Unverified Commit a51eb05b authored by Oran Agra's avatar Oran Agra Committed by GitHub
Browse files

Release Redis 7.2 RC2

parents e26a769d 986dbf71
#define JEMALLOC_BASE_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
......@@ -7,6 +6,15 @@
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/sz.h"
/*
* In auto mode, arenas switch to huge pages for the base allocator on the
* second base block. a0 switches to thp on the 5th block (after 20 megabytes
* of metadata), since more metadata (e.g. rtree nodes) come from a0's base.
*/
#define BASE_AUTO_THP_THRESHOLD 2
#define BASE_AUTO_THP_THRESHOLD_A0 5
/******************************************************************************/
/* Data. */
......@@ -29,7 +37,7 @@ metadata_thp_madvise(void) {
}
static void *
base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size) {
base_map(tsdn_t *tsdn, ehooks_t *ehooks, unsigned ind, size_t size) {
void *addr;
bool zero = true;
bool commit = true;
......@@ -37,22 +45,21 @@ base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size)
/* Use huge page sizes and alignment regardless of opt_metadata_thp. */
assert(size == HUGEPAGE_CEILING(size));
size_t alignment = HUGEPAGE;
if (extent_hooks == &extent_hooks_default) {
if (ehooks_are_default(ehooks)) {
addr = extent_alloc_mmap(NULL, size, alignment, &zero, &commit);
if (have_madvise_huge && addr) {
pages_set_thp_state(addr, size);
}
} else {
/* No arena context as we are creating new arenas. */
tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
pre_reentrancy(tsd, NULL);
addr = extent_hooks->alloc(extent_hooks, NULL, size, alignment,
&zero, &commit, ind);
post_reentrancy(tsd);
addr = ehooks_alloc(tsdn, ehooks, NULL, size, alignment, &zero,
&commit);
}
return addr;
}
static void
base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr,
base_unmap(tsdn_t *tsdn, ehooks_t *ehooks, unsigned ind, void *addr,
size_t size) {
/*
* Cascade through dalloc, decommit, purge_forced, and purge_lazy,
......@@ -64,7 +71,7 @@ base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr,
* may in fact want the end state of all associated virtual memory to be
* in some consistent-but-allocated state.
*/
if (extent_hooks == &extent_hooks_default) {
if (ehooks_are_default(ehooks)) {
if (!extent_dalloc_mmap(addr, size)) {
goto label_done;
}
......@@ -80,31 +87,19 @@ base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr,
/* Nothing worked. This should never happen. */
not_reached();
} else {
tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
pre_reentrancy(tsd, NULL);
if (extent_hooks->dalloc != NULL &&
!extent_hooks->dalloc(extent_hooks, addr, size, true,
ind)) {
goto label_post_reentrancy;
if (!ehooks_dalloc(tsdn, ehooks, addr, size, true)) {
goto label_done;
}
if (extent_hooks->decommit != NULL &&
!extent_hooks->decommit(extent_hooks, addr, size, 0, size,
ind)) {
goto label_post_reentrancy;
if (!ehooks_decommit(tsdn, ehooks, addr, size, 0, size)) {
goto label_done;
}
if (extent_hooks->purge_forced != NULL &&
!extent_hooks->purge_forced(extent_hooks, addr, size, 0,
size, ind)) {
goto label_post_reentrancy;
if (!ehooks_purge_forced(tsdn, ehooks, addr, size, 0, size)) {
goto label_done;
}
if (extent_hooks->purge_lazy != NULL &&
!extent_hooks->purge_lazy(extent_hooks, addr, size, 0, size,
ind)) {
goto label_post_reentrancy;
if (!ehooks_purge_lazy(tsdn, ehooks, addr, size, 0, size)) {
goto label_done;
}
/* Nothing worked. That's the application's problem. */
label_post_reentrancy:
post_reentrancy(tsd);
}
label_done:
if (metadata_thp_madvise()) {
......@@ -116,14 +111,14 @@ label_done:
}
static void
base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr,
base_edata_init(size_t *extent_sn_next, edata_t *edata, void *addr,
size_t size) {
size_t sn;
sn = *extent_sn_next;
(*extent_sn_next)++;
extent_binit(extent, addr, size, sn);
edata_binit(edata, addr, size, sn);
}
static size_t
......@@ -169,7 +164,7 @@ base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
pages_huge(block, block->size);
if (config_stats) {
base->n_thp += HUGEPAGE_CEILING(block->size -
extent_bsize_get(&block->extent)) >> LG_HUGEPAGE;
edata_bsize_get(&block->edata)) >> LG_HUGEPAGE;
}
block = block->next;
assert(block == NULL || (base_ind_get(base) == 0));
......@@ -177,34 +172,34 @@ base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
}
static void *
base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size,
base_extent_bump_alloc_helper(edata_t *edata, size_t *gap_size, size_t size,
size_t alignment) {
void *ret;
assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM));
assert(size == ALIGNMENT_CEILING(size, alignment));
*gap_size = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent),
alignment) - (uintptr_t)extent_addr_get(extent);
ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size);
assert(extent_bsize_get(extent) >= *gap_size + size);
extent_binit(extent, (void *)((uintptr_t)extent_addr_get(extent) +
*gap_size + size), extent_bsize_get(extent) - *gap_size - size,
extent_sn_get(extent));
*gap_size = ALIGNMENT_CEILING((uintptr_t)edata_addr_get(edata),
alignment) - (uintptr_t)edata_addr_get(edata);
ret = (void *)((uintptr_t)edata_addr_get(edata) + *gap_size);
assert(edata_bsize_get(edata) >= *gap_size + size);
edata_binit(edata, (void *)((uintptr_t)edata_addr_get(edata) +
*gap_size + size), edata_bsize_get(edata) - *gap_size - size,
edata_sn_get(edata));
return ret;
}
static void
base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size,
base_extent_bump_alloc_post(base_t *base, edata_t *edata, size_t gap_size,
void *addr, size_t size) {
if (extent_bsize_get(extent) > 0) {
if (edata_bsize_get(edata) > 0) {
/*
* Compute the index for the largest size class that does not
* exceed extent's size.
*/
szind_t index_floor =
sz_size2index(extent_bsize_get(extent) + 1) - 1;
extent_heap_insert(&base->avail[index_floor], extent);
sz_size2index(edata_bsize_get(edata) + 1) - 1;
edata_heap_insert(&base->avail[index_floor], edata);
}
if (config_stats) {
......@@ -229,13 +224,13 @@ base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size,
}
static void *
base_extent_bump_alloc(base_t *base, extent_t *extent, size_t size,
base_extent_bump_alloc(base_t *base, edata_t *edata, size_t size,
size_t alignment) {
void *ret;
size_t gap_size;
ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment);
base_extent_bump_alloc_post(base, extent, gap_size, ret, size);
ret = base_extent_bump_alloc_helper(edata, &gap_size, size, alignment);
base_extent_bump_alloc_post(base, edata, gap_size, ret, size);
return ret;
}
......@@ -245,8 +240,8 @@ base_extent_bump_alloc(base_t *base, extent_t *extent, size_t size,
* On success a pointer to the initialized base_block_t header is returned.
*/
static base_block_t *
base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks,
unsigned ind, pszind_t *pind_last, size_t *extent_sn_next, size_t size,
base_block_alloc(tsdn_t *tsdn, base_t *base, ehooks_t *ehooks, unsigned ind,
pszind_t *pind_last, size_t *extent_sn_next, size_t size,
size_t alignment) {
alignment = ALIGNMENT_CEILING(alignment, QUANTUM);
size_t usize = ALIGNMENT_CEILING(size, alignment);
......@@ -267,7 +262,7 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks,
size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next));
size_t block_size = (min_block_size > next_block_size) ? min_block_size
: next_block_size;
base_block_t *block = (base_block_t *)base_map(tsdn, extent_hooks, ind,
base_block_t *block = (base_block_t *)base_map(tsdn, ehooks, ind,
block_size);
if (block == NULL) {
return NULL;
......@@ -295,7 +290,7 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks,
block->size = block_size;
block->next = NULL;
assert(block_size >= header_size);
base_extent_init(extent_sn_next, &block->extent,
base_edata_init(extent_sn_next, &block->edata,
(void *)((uintptr_t)block + header_size), block_size - header_size);
return block;
}
......@@ -304,17 +299,17 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks,
* Allocate an extent that is at least as large as specified size, with
* specified alignment.
*/
static extent_t *
static edata_t *
base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
malloc_mutex_assert_owner(tsdn, &base->mtx);
extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
ehooks_t *ehooks = base_ehooks_get_for_metadata(base);
/*
* Drop mutex during base_block_alloc(), because an extent hook will be
* called.
*/
malloc_mutex_unlock(tsdn, &base->mtx);
base_block_t *block = base_block_alloc(tsdn, base, extent_hooks,
base_block_t *block = base_block_alloc(tsdn, base, ehooks,
base_ind_get(base), &base->pind_last, &base->extent_sn_next, size,
alignment);
malloc_mutex_lock(tsdn, &base->mtx);
......@@ -338,7 +333,7 @@ base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
assert(base->resident <= base->mapped);
assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
}
return &block->extent;
return &block->edata;
}
base_t *
......@@ -347,10 +342,22 @@ b0get(void) {
}
base_t *
base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
base_new(tsdn_t *tsdn, unsigned ind, const extent_hooks_t *extent_hooks,
bool metadata_use_hooks) {
pszind_t pind_last = 0;
size_t extent_sn_next = 0;
base_block_t *block = base_block_alloc(tsdn, NULL, extent_hooks, ind,
/*
* The base will contain the ehooks eventually, but it itself is
* allocated using them. So we use some stack ehooks to bootstrap its
* memory, and then initialize the ehooks within the base_t.
*/
ehooks_t fake_ehooks;
ehooks_init(&fake_ehooks, metadata_use_hooks ?
(extent_hooks_t *)extent_hooks :
(extent_hooks_t *)&ehooks_default_extent_hooks, ind);
base_block_t *block = base_block_alloc(tsdn, NULL, &fake_ehooks, ind,
&pind_last, &extent_sn_next, sizeof(base_t), QUANTUM);
if (block == NULL) {
return NULL;
......@@ -359,13 +366,15 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
size_t gap_size;
size_t base_alignment = CACHELINE;
size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment);
base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->extent,
base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->edata,
&gap_size, base_size, base_alignment);
base->ind = ind;
atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELAXED);
ehooks_init(&base->ehooks, (extent_hooks_t *)extent_hooks, ind);
ehooks_init(&base->ehooks_base, metadata_use_hooks ?
(extent_hooks_t *)extent_hooks :
(extent_hooks_t *)&ehooks_default_extent_hooks, ind);
if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE,
malloc_mutex_rank_exclusive)) {
base_unmap(tsdn, extent_hooks, ind, block, block->size);
base_unmap(tsdn, &fake_ehooks, ind, block, block->size);
return NULL;
}
base->pind_last = pind_last;
......@@ -373,7 +382,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
base->blocks = block;
base->auto_thp_switched = false;
for (szind_t i = 0; i < SC_NSIZES; i++) {
extent_heap_new(&base->avail[i]);
edata_heap_new(&base->avail[i]);
}
if (config_stats) {
base->allocated = sizeof(base_block_t);
......@@ -386,7 +395,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
assert(base->resident <= base->mapped);
assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
}
base_extent_bump_alloc_post(base, &block->extent, gap_size, base,
base_extent_bump_alloc_post(base, &block->edata, gap_size, base,
base_size);
return base;
......@@ -394,26 +403,31 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
void
base_delete(tsdn_t *tsdn, base_t *base) {
extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
ehooks_t *ehooks = base_ehooks_get_for_metadata(base);
base_block_t *next = base->blocks;
do {
base_block_t *block = next;
next = block->next;
base_unmap(tsdn, extent_hooks, base_ind_get(base), block,
base_unmap(tsdn, ehooks, base_ind_get(base), block,
block->size);
} while (next != NULL);
}
extent_hooks_t *
base_extent_hooks_get(base_t *base) {
return (extent_hooks_t *)atomic_load_p(&base->extent_hooks,
ATOMIC_ACQUIRE);
ehooks_t *
base_ehooks_get(base_t *base) {
return &base->ehooks;
}
ehooks_t *
base_ehooks_get_for_metadata(base_t *base) {
return &base->ehooks_base;
}
extent_hooks_t *
base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) {
extent_hooks_t *old_extent_hooks = base_extent_hooks_get(base);
atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELEASE);
extent_hooks_t *old_extent_hooks =
ehooks_get_extent_hooks_ptr(&base->ehooks);
ehooks_init(&base->ehooks, extent_hooks, ehooks_ind_get(&base->ehooks));
return old_extent_hooks;
}
......@@ -424,28 +438,28 @@ base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment,
size_t usize = ALIGNMENT_CEILING(size, alignment);
size_t asize = usize + alignment - QUANTUM;
extent_t *extent = NULL;
edata_t *edata = NULL;
malloc_mutex_lock(tsdn, &base->mtx);
for (szind_t i = sz_size2index(asize); i < SC_NSIZES; i++) {
extent = extent_heap_remove_first(&base->avail[i]);
if (extent != NULL) {
edata = edata_heap_remove_first(&base->avail[i]);
if (edata != NULL) {
/* Use existing space. */
break;
}
}
if (extent == NULL) {
if (edata == NULL) {
/* Try to allocate more space. */
extent = base_extent_alloc(tsdn, base, usize, alignment);
edata = base_extent_alloc(tsdn, base, usize, alignment);
}
void *ret;
if (extent == NULL) {
if (edata == NULL) {
ret = NULL;
goto label_return;
}
ret = base_extent_bump_alloc(base, extent, usize, alignment);
ret = base_extent_bump_alloc(base, edata, usize, alignment);
if (esn != NULL) {
*esn = extent_sn_get(extent);
*esn = (size_t)edata_sn_get(edata);
}
label_return:
malloc_mutex_unlock(tsdn, &base->mtx);
......@@ -465,16 +479,16 @@ base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
return base_alloc_impl(tsdn, base, size, alignment, NULL);
}
extent_t *
base_alloc_extent(tsdn_t *tsdn, base_t *base) {
edata_t *
base_alloc_edata(tsdn_t *tsdn, base_t *base) {
size_t esn;
extent_t *extent = base_alloc_impl(tsdn, base, sizeof(extent_t),
CACHELINE, &esn);
if (extent == NULL) {
edata_t *edata = base_alloc_impl(tsdn, base, sizeof(edata_t),
EDATA_ALIGNMENT, &esn);
if (edata == NULL) {
return NULL;
}
extent_esn_set(extent, esn);
return extent;
edata_esn_set(edata, esn);
return edata;
}
void
......@@ -509,6 +523,7 @@ base_postfork_child(tsdn_t *tsdn, base_t *base) {
bool
base_boot(tsdn_t *tsdn) {
b0 = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default);
b0 = base_new(tsdn, 0, (extent_hooks_t *)&ehooks_default_extent_hooks,
/* metadata_use_hooks */ true);
return (b0 == NULL);
}
......@@ -6,26 +6,6 @@
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/witness.h"
bin_info_t bin_infos[SC_NBINS];
static void
bin_infos_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
bin_info_t bin_infos[SC_NBINS]) {
for (unsigned i = 0; i < SC_NBINS; i++) {
bin_info_t *bin_info = &bin_infos[i];
sc_t *sc = &sc_data->sc[i];
bin_info->reg_size = ((size_t)1U << sc->lg_base)
+ ((size_t)sc->ndelta << sc->lg_delta);
bin_info->slab_size = (sc->pgs << LG_PAGE);
bin_info->nregs =
(uint32_t)(bin_info->slab_size / bin_info->reg_size);
bin_info->n_shards = bin_shard_sizes[i];
bitmap_info_t bitmap_info = BITMAP_INFO_INITIALIZER(
bin_info->nregs);
bin_info->bitmap_info = bitmap_info;
}
}
bool
bin_update_shard_size(unsigned bin_shard_sizes[SC_NBINS], size_t start_size,
size_t end_size, size_t nshards) {
......@@ -58,12 +38,6 @@ bin_shard_sizes_boot(unsigned bin_shard_sizes[SC_NBINS]) {
}
}
void
bin_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) {
assert(sc_data->initialized);
bin_infos_init(sc_data, bin_shard_sizes, bin_infos);
}
bool
bin_init(bin_t *bin) {
if (malloc_mutex_init(&bin->lock, "bin", WITNESS_RANK_BIN,
......@@ -71,8 +45,8 @@ bin_init(bin_t *bin) {
return true;
}
bin->slabcur = NULL;
extent_heap_new(&bin->slabs_nonfull);
extent_list_init(&bin->slabs_full);
edata_heap_new(&bin->slabs_nonfull);
edata_list_active_init(&bin->slabs_full);
if (config_stats) {
memset(&bin->stats, 0, sizeof(bin_stats_t));
}
......
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/bin_info.h"
bin_info_t bin_infos[SC_NBINS];
static void
bin_infos_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
bin_info_t infos[SC_NBINS]) {
for (unsigned i = 0; i < SC_NBINS; i++) {
bin_info_t *bin_info = &infos[i];
sc_t *sc = &sc_data->sc[i];
bin_info->reg_size = ((size_t)1U << sc->lg_base)
+ ((size_t)sc->ndelta << sc->lg_delta);
bin_info->slab_size = (sc->pgs << LG_PAGE);
bin_info->nregs =
(uint32_t)(bin_info->slab_size / bin_info->reg_size);
bin_info->n_shards = bin_shard_sizes[i];
bitmap_info_t bitmap_info = BITMAP_INFO_INITIALIZER(
bin_info->nregs);
bin_info->bitmap_info = bitmap_info;
}
}
void
bin_info_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) {
assert(sc_data->initialized);
bin_infos_init(sc_data, bin_shard_sizes, bin_infos);
}
#define JEMALLOC_BITMAP_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
......
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/buf_writer.h"
#include "jemalloc/internal/malloc_io.h"
static void *
buf_writer_allocate_internal_buf(tsdn_t *tsdn, size_t buf_len) {
#ifdef JEMALLOC_JET
if (buf_len > SC_LARGE_MAXCLASS) {
return NULL;
}
#else
assert(buf_len <= SC_LARGE_MAXCLASS);
#endif
return iallocztm(tsdn, buf_len, sz_size2index(buf_len), false, NULL,
true, arena_get(tsdn, 0, false), true);
}
static void
buf_writer_free_internal_buf(tsdn_t *tsdn, void *buf) {
if (buf != NULL) {
idalloctm(tsdn, buf, NULL, NULL, true, true);
}
}
static void
buf_writer_assert(buf_writer_t *buf_writer) {
assert(buf_writer != NULL);
assert(buf_writer->write_cb != NULL);
if (buf_writer->buf != NULL) {
assert(buf_writer->buf_size > 0);
} else {
assert(buf_writer->buf_size == 0);
assert(buf_writer->internal_buf);
}
assert(buf_writer->buf_end <= buf_writer->buf_size);
}
bool
buf_writer_init(tsdn_t *tsdn, buf_writer_t *buf_writer, write_cb_t *write_cb,
void *cbopaque, char *buf, size_t buf_len) {
if (write_cb != NULL) {
buf_writer->write_cb = write_cb;
} else {
buf_writer->write_cb = je_malloc_message != NULL ?
je_malloc_message : wrtmessage;
}
buf_writer->cbopaque = cbopaque;
assert(buf_len >= 2);
if (buf != NULL) {
buf_writer->buf = buf;
buf_writer->internal_buf = false;
} else {
buf_writer->buf = buf_writer_allocate_internal_buf(tsdn,
buf_len);
buf_writer->internal_buf = true;
}
if (buf_writer->buf != NULL) {
buf_writer->buf_size = buf_len - 1; /* Allowing for '\0'. */
} else {
buf_writer->buf_size = 0;
}
buf_writer->buf_end = 0;
buf_writer_assert(buf_writer);
return buf_writer->buf == NULL;
}
void
buf_writer_flush(buf_writer_t *buf_writer) {
buf_writer_assert(buf_writer);
if (buf_writer->buf == NULL) {
return;
}
buf_writer->buf[buf_writer->buf_end] = '\0';
buf_writer->write_cb(buf_writer->cbopaque, buf_writer->buf);
buf_writer->buf_end = 0;
buf_writer_assert(buf_writer);
}
void
buf_writer_cb(void *buf_writer_arg, const char *s) {
buf_writer_t *buf_writer = (buf_writer_t *)buf_writer_arg;
buf_writer_assert(buf_writer);
if (buf_writer->buf == NULL) {
buf_writer->write_cb(buf_writer->cbopaque, s);
return;
}
size_t i, slen, n;
for (i = 0, slen = strlen(s); i < slen; i += n) {
if (buf_writer->buf_end == buf_writer->buf_size) {
buf_writer_flush(buf_writer);
}
size_t s_remain = slen - i;
size_t buf_remain = buf_writer->buf_size - buf_writer->buf_end;
n = s_remain < buf_remain ? s_remain : buf_remain;
memcpy(buf_writer->buf + buf_writer->buf_end, s + i, n);
buf_writer->buf_end += n;
buf_writer_assert(buf_writer);
}
assert(i == slen);
}
void
buf_writer_terminate(tsdn_t *tsdn, buf_writer_t *buf_writer) {
buf_writer_assert(buf_writer);
buf_writer_flush(buf_writer);
if (buf_writer->internal_buf) {
buf_writer_free_internal_buf(tsdn, buf_writer->buf);
}
}
void
buf_writer_pipe(buf_writer_t *buf_writer, read_cb_t *read_cb,
void *read_cbopaque) {
/*
* A tiny local buffer in case the buffered writer failed to allocate
* at init.
*/
static char backup_buf[16];
static buf_writer_t backup_buf_writer;
buf_writer_assert(buf_writer);
assert(read_cb != NULL);
if (buf_writer->buf == NULL) {
buf_writer_init(TSDN_NULL, &backup_buf_writer,
buf_writer->write_cb, buf_writer->cbopaque, backup_buf,
sizeof(backup_buf));
buf_writer = &backup_buf_writer;
}
assert(buf_writer->buf != NULL);
ssize_t nread = 0;
do {
buf_writer->buf_end += nread;
buf_writer_assert(buf_writer);
if (buf_writer->buf_end == buf_writer->buf_size) {
buf_writer_flush(buf_writer);
}
nread = read_cb(read_cbopaque,
buf_writer->buf + buf_writer->buf_end,
buf_writer->buf_size - buf_writer->buf_end);
} while (nread > 0);
buf_writer_flush(buf_writer);
}
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/bit_util.h"
#include "jemalloc/internal/cache_bin.h"
#include "jemalloc/internal/safety_check.h"
void
cache_bin_info_init(cache_bin_info_t *info,
cache_bin_sz_t ncached_max) {
assert(ncached_max <= CACHE_BIN_NCACHED_MAX);
size_t stack_size = (size_t)ncached_max * sizeof(void *);
assert(stack_size < ((size_t)1 << (sizeof(cache_bin_sz_t) * 8)));
info->ncached_max = (cache_bin_sz_t)ncached_max;
}
void
cache_bin_info_compute_alloc(cache_bin_info_t *infos, szind_t ninfos,
size_t *size, size_t *alignment) {
/* For the total bin stack region (per tcache), reserve 2 more slots so
* that
* 1) the empty position can be safely read on the fast path before
* checking "is_empty"; and
* 2) the cur_ptr can go beyond the empty position by 1 step safely on
* the fast path (i.e. no overflow).
*/
*size = sizeof(void *) * 2;
for (szind_t i = 0; i < ninfos; i++) {
assert(infos[i].ncached_max > 0);
*size += infos[i].ncached_max * sizeof(void *);
}
/*
* Align to at least PAGE, to minimize the # of TLBs needed by the
* smaller sizes; also helps if the larger sizes don't get used at all.
*/
*alignment = PAGE;
}
void
cache_bin_preincrement(cache_bin_info_t *infos, szind_t ninfos, void *alloc,
size_t *cur_offset) {
if (config_debug) {
size_t computed_size;
size_t computed_alignment;
/* Pointer should be as aligned as we asked for. */
cache_bin_info_compute_alloc(infos, ninfos, &computed_size,
&computed_alignment);
assert(((uintptr_t)alloc & (computed_alignment - 1)) == 0);
}
*(uintptr_t *)((uintptr_t)alloc + *cur_offset) =
cache_bin_preceding_junk;
*cur_offset += sizeof(void *);
}
void
cache_bin_postincrement(cache_bin_info_t *infos, szind_t ninfos, void *alloc,
size_t *cur_offset) {
*(uintptr_t *)((uintptr_t)alloc + *cur_offset) =
cache_bin_trailing_junk;
*cur_offset += sizeof(void *);
}
void
cache_bin_init(cache_bin_t *bin, cache_bin_info_t *info, void *alloc,
size_t *cur_offset) {
/*
* The full_position points to the lowest available space. Allocations
* will access the slots toward higher addresses (for the benefit of
* adjacent prefetch).
*/
void *stack_cur = (void *)((uintptr_t)alloc + *cur_offset);
void *full_position = stack_cur;
uint16_t bin_stack_size = info->ncached_max * sizeof(void *);
*cur_offset += bin_stack_size;
void *empty_position = (void *)((uintptr_t)alloc + *cur_offset);
/* Init to the empty position. */
bin->stack_head = (void **)empty_position;
bin->low_bits_low_water = (uint16_t)(uintptr_t)bin->stack_head;
bin->low_bits_full = (uint16_t)(uintptr_t)full_position;
bin->low_bits_empty = (uint16_t)(uintptr_t)empty_position;
cache_bin_sz_t free_spots = cache_bin_diff(bin,
bin->low_bits_full, (uint16_t)(uintptr_t)bin->stack_head,
/* racy */ false);
assert(free_spots == bin_stack_size);
assert(cache_bin_ncached_get_local(bin, info) == 0);
assert(cache_bin_empty_position_get(bin) == empty_position);
assert(bin_stack_size > 0 || empty_position == full_position);
}
bool
cache_bin_still_zero_initialized(cache_bin_t *bin) {
return bin->stack_head == NULL;
}
......@@ -34,7 +34,6 @@
* respectively.
*
******************************************************************************/
#define JEMALLOC_CKH_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/ckh.h"
......@@ -357,14 +356,14 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) {
}
bool
ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *ckh_hash,
ckh_keycomp_t *keycomp) {
bool ret;
size_t mincells, usize;
unsigned lg_mincells;
assert(minitems > 0);
assert(hash != NULL);
assert(ckh_hash != NULL);
assert(keycomp != NULL);
#ifdef CKH_COUNT
......@@ -393,7 +392,7 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
}
ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
ckh->hash = hash;
ckh->hash = ckh_hash;
ckh->keycomp = keycomp;
usize = sz_sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
......
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/counter.h"
bool
counter_accum_init(counter_accum_t *counter, uint64_t interval) {
if (LOCKEDINT_MTX_INIT(counter->mtx, "counter_accum",
WITNESS_RANK_COUNTER_ACCUM, malloc_mutex_rank_exclusive)) {
return true;
}
locked_init_u64_unsynchronized(&counter->accumbytes, 0);
counter->interval = interval;
return false;
}
void
counter_prefork(tsdn_t *tsdn, counter_accum_t *counter) {
LOCKEDINT_MTX_PREFORK(tsdn, counter->mtx);
}
void
counter_postfork_parent(tsdn_t *tsdn, counter_accum_t *counter) {
LOCKEDINT_MTX_POSTFORK_PARENT(tsdn, counter->mtx);
}
void
counter_postfork_child(tsdn_t *tsdn, counter_accum_t *counter) {
LOCKEDINT_MTX_POSTFORK_CHILD(tsdn, counter->mtx);
}
#define JEMALLOC_CTL_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
......@@ -6,8 +5,16 @@
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/extent_mmap.h"
#include "jemalloc/internal/inspect.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/peak_event.h"
#include "jemalloc/internal/prof_data.h"
#include "jemalloc/internal/prof_log.h"
#include "jemalloc/internal/prof_recent.h"
#include "jemalloc/internal/prof_stats.h"
#include "jemalloc/internal/prof_sys.h"
#include "jemalloc/internal/safety_check.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/util.h"
......@@ -60,6 +67,8 @@ CTL_PROTO(background_thread)
CTL_PROTO(max_background_threads)
CTL_PROTO(thread_tcache_enabled)
CTL_PROTO(thread_tcache_flush)
CTL_PROTO(thread_peak_read)
CTL_PROTO(thread_peak_reset)
CTL_PROTO(thread_prof_name)
CTL_PROTO(thread_prof_active)
CTL_PROTO(thread_arena)
......@@ -67,6 +76,7 @@ CTL_PROTO(thread_allocated)
CTL_PROTO(thread_allocatedp)
CTL_PROTO(thread_deallocated)
CTL_PROTO(thread_deallocatedp)
CTL_PROTO(thread_idle)
CTL_PROTO(config_cache_oblivious)
CTL_PROTO(config_debug)
CTL_PROTO(config_fill)
......@@ -81,7 +91,20 @@ CTL_PROTO(config_utrace)
CTL_PROTO(config_xmalloc)
CTL_PROTO(opt_abort)
CTL_PROTO(opt_abort_conf)
CTL_PROTO(opt_cache_oblivious)
CTL_PROTO(opt_trust_madvise)
CTL_PROTO(opt_confirm_conf)
CTL_PROTO(opt_hpa)
CTL_PROTO(opt_hpa_slab_max_alloc)
CTL_PROTO(opt_hpa_hugification_threshold)
CTL_PROTO(opt_hpa_hugify_delay_ms)
CTL_PROTO(opt_hpa_min_purge_interval_ms)
CTL_PROTO(opt_hpa_dirty_mult)
CTL_PROTO(opt_hpa_sec_nshards)
CTL_PROTO(opt_hpa_sec_max_alloc)
CTL_PROTO(opt_hpa_sec_max_bytes)
CTL_PROTO(opt_hpa_sec_bytes_after_flush)
CTL_PROTO(opt_hpa_sec_batch_fill_extra)
CTL_PROTO(opt_metadata_thp)
CTL_PROTO(opt_retain)
CTL_PROTO(opt_dss)
......@@ -89,19 +112,31 @@ CTL_PROTO(opt_narenas)
CTL_PROTO(opt_percpu_arena)
CTL_PROTO(opt_oversize_threshold)
CTL_PROTO(opt_background_thread)
CTL_PROTO(opt_mutex_max_spin)
CTL_PROTO(opt_max_background_threads)
CTL_PROTO(opt_dirty_decay_ms)
CTL_PROTO(opt_muzzy_decay_ms)
CTL_PROTO(opt_stats_print)
CTL_PROTO(opt_stats_print_opts)
CTL_PROTO(opt_stats_interval)
CTL_PROTO(opt_stats_interval_opts)
CTL_PROTO(opt_junk)
CTL_PROTO(opt_zero)
CTL_PROTO(opt_utrace)
CTL_PROTO(opt_xmalloc)
CTL_PROTO(opt_experimental_infallible_new)
CTL_PROTO(opt_tcache)
CTL_PROTO(opt_tcache_max)
CTL_PROTO(opt_tcache_nslots_small_min)
CTL_PROTO(opt_tcache_nslots_small_max)
CTL_PROTO(opt_tcache_nslots_large)
CTL_PROTO(opt_lg_tcache_nslots_mul)
CTL_PROTO(opt_tcache_gc_incr_bytes)
CTL_PROTO(opt_tcache_gc_delay_bytes)
CTL_PROTO(opt_lg_tcache_flush_small_div)
CTL_PROTO(opt_lg_tcache_flush_large_div)
CTL_PROTO(opt_thp)
CTL_PROTO(opt_lg_extent_max_active_fit)
CTL_PROTO(opt_lg_tcache_max)
CTL_PROTO(opt_prof)
CTL_PROTO(opt_prof_prefix)
CTL_PROTO(opt_prof_active)
......@@ -111,7 +146,14 @@ CTL_PROTO(opt_lg_prof_interval)
CTL_PROTO(opt_prof_gdump)
CTL_PROTO(opt_prof_final)
CTL_PROTO(opt_prof_leak)
CTL_PROTO(opt_prof_leak_error)
CTL_PROTO(opt_prof_accum)
CTL_PROTO(opt_prof_recent_alloc_max)
CTL_PROTO(opt_prof_stats)
CTL_PROTO(opt_prof_sys_thread_name)
CTL_PROTO(opt_prof_time_res)
CTL_PROTO(opt_lg_san_uaf_align)
CTL_PROTO(opt_zero_realloc)
CTL_PROTO(tcache_create)
CTL_PROTO(tcache_flush)
CTL_PROTO(tcache_destroy)
......@@ -121,6 +163,7 @@ CTL_PROTO(arena_i_purge)
CTL_PROTO(arena_i_reset)
CTL_PROTO(arena_i_destroy)
CTL_PROTO(arena_i_dss)
CTL_PROTO(arena_i_oversize_threshold)
CTL_PROTO(arena_i_dirty_decay_ms)
CTL_PROTO(arena_i_muzzy_decay_ms)
CTL_PROTO(arena_i_extent_hooks)
......@@ -148,11 +191,18 @@ CTL_PROTO(prof_thread_active_init)
CTL_PROTO(prof_active)
CTL_PROTO(prof_dump)
CTL_PROTO(prof_gdump)
CTL_PROTO(prof_prefix)
CTL_PROTO(prof_reset)
CTL_PROTO(prof_interval)
CTL_PROTO(lg_prof_sample)
CTL_PROTO(prof_log_start)
CTL_PROTO(prof_log_stop)
CTL_PROTO(prof_stats_bins_i_live)
CTL_PROTO(prof_stats_bins_i_accum)
INDEX_PROTO(prof_stats_bins_i)
CTL_PROTO(prof_stats_lextents_i_live)
CTL_PROTO(prof_stats_lextents_i_accum)
INDEX_PROTO(prof_stats_lextents_i)
CTL_PROTO(stats_arenas_i_small_allocated)
CTL_PROTO(stats_arenas_i_small_nmalloc)
CTL_PROTO(stats_arenas_i_small_ndalloc)
......@@ -188,6 +238,39 @@ CTL_PROTO(stats_arenas_i_extents_j_dirty_bytes)
CTL_PROTO(stats_arenas_i_extents_j_muzzy_bytes)
CTL_PROTO(stats_arenas_i_extents_j_retained_bytes)
INDEX_PROTO(stats_arenas_i_extents_j)
CTL_PROTO(stats_arenas_i_hpa_shard_npurge_passes)
CTL_PROTO(stats_arenas_i_hpa_shard_npurges)
CTL_PROTO(stats_arenas_i_hpa_shard_nhugifies)
CTL_PROTO(stats_arenas_i_hpa_shard_ndehugifies)
/* We have a set of stats for full slabs. */
CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_npageslabs_nonhuge)
CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_npageslabs_huge)
CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_nactive_nonhuge)
CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_nactive_huge)
CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_ndirty_nonhuge)
CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_ndirty_huge)
/* A parallel set for the empty slabs. */
CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_nonhuge)
CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_huge)
CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_nactive_nonhuge)
CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_nactive_huge)
CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_ndirty_nonhuge)
CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_ndirty_huge)
/*
* And one for the slabs that are neither empty nor full, but indexed by how
* full they are.
*/
CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_nonhuge)
CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_huge)
CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_nonhuge)
CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_huge)
CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_nonhuge)
CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_huge)
INDEX_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j)
CTL_PROTO(stats_arenas_i_nthreads)
CTL_PROTO(stats_arenas_i_uptime)
CTL_PROTO(stats_arenas_i_dss)
......@@ -209,8 +292,10 @@ CTL_PROTO(stats_arenas_i_base)
CTL_PROTO(stats_arenas_i_internal)
CTL_PROTO(stats_arenas_i_metadata_thp)
CTL_PROTO(stats_arenas_i_tcache_bytes)
CTL_PROTO(stats_arenas_i_tcache_stashed_bytes)
CTL_PROTO(stats_arenas_i_resident)
CTL_PROTO(stats_arenas_i_abandoned_vm)
CTL_PROTO(stats_arenas_i_hpa_sec_bytes)
INDEX_PROTO(stats_arenas_i)
CTL_PROTO(stats_allocated)
CTL_PROTO(stats_active)
......@@ -222,12 +307,21 @@ CTL_PROTO(stats_metadata_thp)
CTL_PROTO(stats_resident)
CTL_PROTO(stats_mapped)
CTL_PROTO(stats_retained)
CTL_PROTO(stats_zero_reallocs)
CTL_PROTO(experimental_hooks_install)
CTL_PROTO(experimental_hooks_remove)
CTL_PROTO(experimental_hooks_prof_backtrace)
CTL_PROTO(experimental_hooks_prof_dump)
CTL_PROTO(experimental_hooks_safety_check_abort)
CTL_PROTO(experimental_thread_activity_callback)
CTL_PROTO(experimental_utilization_query)
CTL_PROTO(experimental_utilization_batch_query)
CTL_PROTO(experimental_arenas_i_pactivep)
INDEX_PROTO(experimental_arenas_i)
CTL_PROTO(experimental_prof_recent_alloc_max)
CTL_PROTO(experimental_prof_recent_alloc_dump)
CTL_PROTO(experimental_batch_alloc)
CTL_PROTO(experimental_arenas_create_ext)
#define MUTEX_STATS_CTL_PROTO_GEN(n) \
CTL_PROTO(stats_##n##_num_ops) \
......@@ -275,6 +369,11 @@ static const ctl_named_node_t thread_tcache_node[] = {
{NAME("flush"), CTL(thread_tcache_flush)}
};
static const ctl_named_node_t thread_peak_node[] = {
{NAME("read"), CTL(thread_peak_read)},
{NAME("reset"), CTL(thread_peak_reset)},
};
static const ctl_named_node_t thread_prof_node[] = {
{NAME("name"), CTL(thread_prof_name)},
{NAME("active"), CTL(thread_prof_active)}
......@@ -287,7 +386,9 @@ static const ctl_named_node_t thread_node[] = {
{NAME("deallocated"), CTL(thread_deallocated)},
{NAME("deallocatedp"), CTL(thread_deallocatedp)},
{NAME("tcache"), CHILD(named, thread_tcache)},
{NAME("prof"), CHILD(named, thread_prof)}
{NAME("peak"), CHILD(named, thread_peak)},
{NAME("prof"), CHILD(named, thread_prof)},
{NAME("idle"), CTL(thread_idle)}
};
static const ctl_named_node_t config_node[] = {
......@@ -308,27 +409,60 @@ static const ctl_named_node_t config_node[] = {
static const ctl_named_node_t opt_node[] = {
{NAME("abort"), CTL(opt_abort)},
{NAME("abort_conf"), CTL(opt_abort_conf)},
{NAME("cache_oblivious"), CTL(opt_cache_oblivious)},
{NAME("trust_madvise"), CTL(opt_trust_madvise)},
{NAME("confirm_conf"), CTL(opt_confirm_conf)},
{NAME("hpa"), CTL(opt_hpa)},
{NAME("hpa_slab_max_alloc"), CTL(opt_hpa_slab_max_alloc)},
{NAME("hpa_hugification_threshold"),
CTL(opt_hpa_hugification_threshold)},
{NAME("hpa_hugify_delay_ms"), CTL(opt_hpa_hugify_delay_ms)},
{NAME("hpa_min_purge_interval_ms"), CTL(opt_hpa_min_purge_interval_ms)},
{NAME("hpa_dirty_mult"), CTL(opt_hpa_dirty_mult)},
{NAME("hpa_sec_nshards"), CTL(opt_hpa_sec_nshards)},
{NAME("hpa_sec_max_alloc"), CTL(opt_hpa_sec_max_alloc)},
{NAME("hpa_sec_max_bytes"), CTL(opt_hpa_sec_max_bytes)},
{NAME("hpa_sec_bytes_after_flush"),
CTL(opt_hpa_sec_bytes_after_flush)},
{NAME("hpa_sec_batch_fill_extra"),
CTL(opt_hpa_sec_batch_fill_extra)},
{NAME("metadata_thp"), CTL(opt_metadata_thp)},
{NAME("retain"), CTL(opt_retain)},
{NAME("dss"), CTL(opt_dss)},
{NAME("narenas"), CTL(opt_narenas)},
{NAME("percpu_arena"), CTL(opt_percpu_arena)},
{NAME("oversize_threshold"), CTL(opt_oversize_threshold)},
{NAME("mutex_max_spin"), CTL(opt_mutex_max_spin)},
{NAME("background_thread"), CTL(opt_background_thread)},
{NAME("max_background_threads"), CTL(opt_max_background_threads)},
{NAME("dirty_decay_ms"), CTL(opt_dirty_decay_ms)},
{NAME("muzzy_decay_ms"), CTL(opt_muzzy_decay_ms)},
{NAME("stats_print"), CTL(opt_stats_print)},
{NAME("stats_print_opts"), CTL(opt_stats_print_opts)},
{NAME("stats_interval"), CTL(opt_stats_interval)},
{NAME("stats_interval_opts"), CTL(opt_stats_interval_opts)},
{NAME("junk"), CTL(opt_junk)},
{NAME("zero"), CTL(opt_zero)},
{NAME("utrace"), CTL(opt_utrace)},
{NAME("xmalloc"), CTL(opt_xmalloc)},
{NAME("experimental_infallible_new"),
CTL(opt_experimental_infallible_new)},
{NAME("tcache"), CTL(opt_tcache)},
{NAME("tcache_max"), CTL(opt_tcache_max)},
{NAME("tcache_nslots_small_min"),
CTL(opt_tcache_nslots_small_min)},
{NAME("tcache_nslots_small_max"),
CTL(opt_tcache_nslots_small_max)},
{NAME("tcache_nslots_large"), CTL(opt_tcache_nslots_large)},
{NAME("lg_tcache_nslots_mul"), CTL(opt_lg_tcache_nslots_mul)},
{NAME("tcache_gc_incr_bytes"), CTL(opt_tcache_gc_incr_bytes)},
{NAME("tcache_gc_delay_bytes"), CTL(opt_tcache_gc_delay_bytes)},
{NAME("lg_tcache_flush_small_div"),
CTL(opt_lg_tcache_flush_small_div)},
{NAME("lg_tcache_flush_large_div"),
CTL(opt_lg_tcache_flush_large_div)},
{NAME("thp"), CTL(opt_thp)},
{NAME("lg_extent_max_active_fit"), CTL(opt_lg_extent_max_active_fit)},
{NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)},
{NAME("prof"), CTL(opt_prof)},
{NAME("prof_prefix"), CTL(opt_prof_prefix)},
{NAME("prof_active"), CTL(opt_prof_active)},
......@@ -338,7 +472,14 @@ static const ctl_named_node_t opt_node[] = {
{NAME("prof_gdump"), CTL(opt_prof_gdump)},
{NAME("prof_final"), CTL(opt_prof_final)},
{NAME("prof_leak"), CTL(opt_prof_leak)},
{NAME("prof_accum"), CTL(opt_prof_accum)}
{NAME("prof_leak_error"), CTL(opt_prof_leak_error)},
{NAME("prof_accum"), CTL(opt_prof_accum)},
{NAME("prof_recent_alloc_max"), CTL(opt_prof_recent_alloc_max)},
{NAME("prof_stats"), CTL(opt_prof_stats)},
{NAME("prof_sys_thread_name"), CTL(opt_prof_sys_thread_name)},
{NAME("prof_time_resolution"), CTL(opt_prof_time_res)},
{NAME("lg_san_uaf_align"), CTL(opt_lg_san_uaf_align)},
{NAME("zero_realloc"), CTL(opt_zero_realloc)}
};
static const ctl_named_node_t tcache_node[] = {
......@@ -354,6 +495,11 @@ static const ctl_named_node_t arena_i_node[] = {
{NAME("reset"), CTL(arena_i_reset)},
{NAME("destroy"), CTL(arena_i_destroy)},
{NAME("dss"), CTL(arena_i_dss)},
/*
* Undocumented for now, since we anticipate an arena API in flux after
* we cut the last 5-series release.
*/
{NAME("oversize_threshold"), CTL(arena_i_oversize_threshold)},
{NAME("dirty_decay_ms"), CTL(arena_i_dirty_decay_ms)},
{NAME("muzzy_decay_ms"), CTL(arena_i_muzzy_decay_ms)},
{NAME("extent_hooks"), CTL(arena_i_extent_hooks)},
......@@ -408,17 +554,51 @@ static const ctl_named_node_t arenas_node[] = {
{NAME("lookup"), CTL(arenas_lookup)}
};
static const ctl_named_node_t prof_stats_bins_i_node[] = {
{NAME("live"), CTL(prof_stats_bins_i_live)},
{NAME("accum"), CTL(prof_stats_bins_i_accum)}
};
static const ctl_named_node_t super_prof_stats_bins_i_node[] = {
{NAME(""), CHILD(named, prof_stats_bins_i)}
};
static const ctl_indexed_node_t prof_stats_bins_node[] = {
{INDEX(prof_stats_bins_i)}
};
static const ctl_named_node_t prof_stats_lextents_i_node[] = {
{NAME("live"), CTL(prof_stats_lextents_i_live)},
{NAME("accum"), CTL(prof_stats_lextents_i_accum)}
};
static const ctl_named_node_t super_prof_stats_lextents_i_node[] = {
{NAME(""), CHILD(named, prof_stats_lextents_i)}
};
static const ctl_indexed_node_t prof_stats_lextents_node[] = {
{INDEX(prof_stats_lextents_i)}
};
static const ctl_named_node_t prof_stats_node[] = {
{NAME("bins"), CHILD(indexed, prof_stats_bins)},
{NAME("lextents"), CHILD(indexed, prof_stats_lextents)},
};
static const ctl_named_node_t prof_node[] = {
{NAME("thread_active_init"), CTL(prof_thread_active_init)},
{NAME("active"), CTL(prof_active)},
{NAME("dump"), CTL(prof_dump)},
{NAME("gdump"), CTL(prof_gdump)},
{NAME("prefix"), CTL(prof_prefix)},
{NAME("reset"), CTL(prof_reset)},
{NAME("interval"), CTL(prof_interval)},
{NAME("lg_sample"), CTL(lg_prof_sample)},
{NAME("log_start"), CTL(prof_log_start)},
{NAME("log_stop"), CTL(prof_log_stop)}
{NAME("log_stop"), CTL(prof_log_stop)},
{NAME("stats"), CHILD(named, prof_stats)}
};
static const ctl_named_node_t stats_arenas_i_small_node[] = {
{NAME("allocated"), CTL(stats_arenas_i_small_allocated)},
{NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)},
......@@ -521,6 +701,75 @@ MUTEX_PROF_ARENA_MUTEXES
#undef OP
};
static const ctl_named_node_t stats_arenas_i_hpa_shard_full_slabs_node[] = {
{NAME("npageslabs_nonhuge"),
CTL(stats_arenas_i_hpa_shard_full_slabs_npageslabs_nonhuge)},
{NAME("npageslabs_huge"),
CTL(stats_arenas_i_hpa_shard_full_slabs_npageslabs_huge)},
{NAME("nactive_nonhuge"),
CTL(stats_arenas_i_hpa_shard_full_slabs_nactive_nonhuge)},
{NAME("nactive_huge"),
CTL(stats_arenas_i_hpa_shard_full_slabs_nactive_huge)},
{NAME("ndirty_nonhuge"),
CTL(stats_arenas_i_hpa_shard_full_slabs_ndirty_nonhuge)},
{NAME("ndirty_huge"),
CTL(stats_arenas_i_hpa_shard_full_slabs_ndirty_huge)}
};
static const ctl_named_node_t stats_arenas_i_hpa_shard_empty_slabs_node[] = {
{NAME("npageslabs_nonhuge"),
CTL(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_nonhuge)},
{NAME("npageslabs_huge"),
CTL(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_huge)},
{NAME("nactive_nonhuge"),
CTL(stats_arenas_i_hpa_shard_empty_slabs_nactive_nonhuge)},
{NAME("nactive_huge"),
CTL(stats_arenas_i_hpa_shard_empty_slabs_nactive_huge)},
{NAME("ndirty_nonhuge"),
CTL(stats_arenas_i_hpa_shard_empty_slabs_ndirty_nonhuge)},
{NAME("ndirty_huge"),
CTL(stats_arenas_i_hpa_shard_empty_slabs_ndirty_huge)}
};
static const ctl_named_node_t stats_arenas_i_hpa_shard_nonfull_slabs_j_node[] = {
{NAME("npageslabs_nonhuge"),
CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_nonhuge)},
{NAME("npageslabs_huge"),
CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_huge)},
{NAME("nactive_nonhuge"),
CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_nonhuge)},
{NAME("nactive_huge"),
CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_huge)},
{NAME("ndirty_nonhuge"),
CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_nonhuge)},
{NAME("ndirty_huge"),
CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_huge)}
};
static const ctl_named_node_t super_stats_arenas_i_hpa_shard_nonfull_slabs_j_node[] = {
{NAME(""),
CHILD(named, stats_arenas_i_hpa_shard_nonfull_slabs_j)}
};
static const ctl_indexed_node_t stats_arenas_i_hpa_shard_nonfull_slabs_node[] =
{
{INDEX(stats_arenas_i_hpa_shard_nonfull_slabs_j)}
};
static const ctl_named_node_t stats_arenas_i_hpa_shard_node[] = {
{NAME("full_slabs"), CHILD(named,
stats_arenas_i_hpa_shard_full_slabs)},
{NAME("empty_slabs"), CHILD(named,
stats_arenas_i_hpa_shard_empty_slabs)},
{NAME("nonfull_slabs"), CHILD(indexed,
stats_arenas_i_hpa_shard_nonfull_slabs)},
{NAME("npurge_passes"), CTL(stats_arenas_i_hpa_shard_npurge_passes)},
{NAME("npurges"), CTL(stats_arenas_i_hpa_shard_npurges)},
{NAME("nhugifies"), CTL(stats_arenas_i_hpa_shard_nhugifies)},
{NAME("ndehugifies"), CTL(stats_arenas_i_hpa_shard_ndehugifies)}
};
static const ctl_named_node_t stats_arenas_i_node[] = {
{NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
{NAME("uptime"), CTL(stats_arenas_i_uptime)},
......@@ -543,14 +792,18 @@ static const ctl_named_node_t stats_arenas_i_node[] = {
{NAME("internal"), CTL(stats_arenas_i_internal)},
{NAME("metadata_thp"), CTL(stats_arenas_i_metadata_thp)},
{NAME("tcache_bytes"), CTL(stats_arenas_i_tcache_bytes)},
{NAME("tcache_stashed_bytes"),
CTL(stats_arenas_i_tcache_stashed_bytes)},
{NAME("resident"), CTL(stats_arenas_i_resident)},
{NAME("abandoned_vm"), CTL(stats_arenas_i_abandoned_vm)},
{NAME("hpa_sec_bytes"), CTL(stats_arenas_i_hpa_sec_bytes)},
{NAME("small"), CHILD(named, stats_arenas_i_small)},
{NAME("large"), CHILD(named, stats_arenas_i_large)},
{NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
{NAME("lextents"), CHILD(indexed, stats_arenas_i_lextents)},
{NAME("extents"), CHILD(indexed, stats_arenas_i_extents)},
{NAME("mutexes"), CHILD(named, stats_arenas_i_mutexes)}
{NAME("mutexes"), CHILD(named, stats_arenas_i_mutexes)},
{NAME("hpa_shard"), CHILD(named, stats_arenas_i_hpa_shard)}
};
static const ctl_named_node_t super_stats_arenas_i_node[] = {
{NAME(""), CHILD(named, stats_arenas_i)}
......@@ -589,12 +842,21 @@ static const ctl_named_node_t stats_node[] = {
{NAME("background_thread"),
CHILD(named, stats_background_thread)},
{NAME("mutexes"), CHILD(named, stats_mutexes)},
{NAME("arenas"), CHILD(indexed, stats_arenas)}
{NAME("arenas"), CHILD(indexed, stats_arenas)},
{NAME("zero_reallocs"), CTL(stats_zero_reallocs)},
};
static const ctl_named_node_t experimental_hooks_node[] = {
{NAME("install"), CTL(experimental_hooks_install)},
{NAME("remove"), CTL(experimental_hooks_remove)}
{NAME("remove"), CTL(experimental_hooks_remove)},
{NAME("prof_backtrace"), CTL(experimental_hooks_prof_backtrace)},
{NAME("prof_dump"), CTL(experimental_hooks_prof_dump)},
{NAME("safety_check_abort"), CTL(experimental_hooks_safety_check_abort)},
};
static const ctl_named_node_t experimental_thread_node[] = {
{NAME("activity_callback"),
CTL(experimental_thread_activity_callback)}
};
static const ctl_named_node_t experimental_utilization_node[] = {
......@@ -613,10 +875,19 @@ static const ctl_indexed_node_t experimental_arenas_node[] = {
{INDEX(experimental_arenas_i)}
};
static const ctl_named_node_t experimental_prof_recent_node[] = {
{NAME("alloc_max"), CTL(experimental_prof_recent_alloc_max)},
{NAME("alloc_dump"), CTL(experimental_prof_recent_alloc_dump)},
};
static const ctl_named_node_t experimental_node[] = {
{NAME("hooks"), CHILD(named, experimental_hooks)},
{NAME("utilization"), CHILD(named, experimental_utilization)},
{NAME("arenas"), CHILD(indexed, experimental_arenas)}
{NAME("arenas"), CHILD(indexed, experimental_arenas)},
{NAME("arenas_create_ext"), CTL(experimental_arenas_create_ext)},
{NAME("prof_recent"), CHILD(named, experimental_prof_recent)},
{NAME("batch_alloc"), CTL(experimental_batch_alloc)},
{NAME("thread"), CHILD(named, experimental_thread)}
};
static const ctl_named_node_t root_node[] = {
......@@ -650,28 +921,13 @@ static const ctl_named_node_t super_root_node[] = {
* synchronized by the ctl mutex.
*/
static void
ctl_accum_arena_stats_u64(arena_stats_u64_t *dst, arena_stats_u64_t *src) {
#ifdef JEMALLOC_ATOMIC_U64
uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
uint64_t cur_src = atomic_load_u64(src, ATOMIC_RELAXED);
atomic_store_u64(dst, cur_dst + cur_src, ATOMIC_RELAXED);
#else
*dst += *src;
#endif
}
/* Likewise: with ctl mutex synchronization, reading is simple. */
static uint64_t
ctl_arena_stats_read_u64(arena_stats_u64_t *p) {
#ifdef JEMALLOC_ATOMIC_U64
return atomic_load_u64(p, ATOMIC_RELAXED);
#else
return *p;
#endif
ctl_accum_locked_u64(locked_u64_t *dst, locked_u64_t *src) {
locked_inc_u64_unsynchronized(dst,
locked_read_u64_unsynchronized(src));
}
static void
accum_atomic_zu(atomic_zu_t *dst, atomic_zu_t *src) {
ctl_accum_atomic_zu(atomic_zu_t *dst, atomic_zu_t *src) {
size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
size_t cur_src = atomic_load_zu(src, ATOMIC_RELAXED);
atomic_store_zu(dst, cur_dst + cur_src, ATOMIC_RELAXED);
......@@ -783,11 +1039,15 @@ ctl_arena_clear(ctl_arena_t *ctl_arena) {
ctl_arena->astats->nfills_small = 0;
ctl_arena->astats->nflushes_small = 0;
memset(ctl_arena->astats->bstats, 0, SC_NBINS *
sizeof(bin_stats_t));
sizeof(bin_stats_data_t));
memset(ctl_arena->astats->lstats, 0, (SC_NSIZES - SC_NBINS) *
sizeof(arena_stats_large_t));
memset(ctl_arena->astats->estats, 0, SC_NPSIZES *
sizeof(arena_stats_extents_t));
sizeof(pac_estats_t));
memset(&ctl_arena->astats->hpastats, 0,
sizeof(hpa_shard_stats_t));
memset(&ctl_arena->astats->secstats, 0,
sizeof(sec_stats_t));
}
}
......@@ -801,22 +1061,19 @@ ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) {
&ctl_arena->muzzy_decay_ms, &ctl_arena->pactive,
&ctl_arena->pdirty, &ctl_arena->pmuzzy,
&ctl_arena->astats->astats, ctl_arena->astats->bstats,
ctl_arena->astats->lstats, ctl_arena->astats->estats);
ctl_arena->astats->lstats, ctl_arena->astats->estats,
&ctl_arena->astats->hpastats, &ctl_arena->astats->secstats);
for (i = 0; i < SC_NBINS; i++) {
ctl_arena->astats->allocated_small +=
ctl_arena->astats->bstats[i].curregs *
bin_stats_t *bstats =
&ctl_arena->astats->bstats[i].stats_data;
ctl_arena->astats->allocated_small += bstats->curregs *
sz_index2size(i);
ctl_arena->astats->nmalloc_small +=
ctl_arena->astats->bstats[i].nmalloc;
ctl_arena->astats->ndalloc_small +=
ctl_arena->astats->bstats[i].ndalloc;
ctl_arena->astats->nrequests_small +=
ctl_arena->astats->bstats[i].nrequests;
ctl_arena->astats->nfills_small +=
ctl_arena->astats->bstats[i].nfills;
ctl_arena->astats->nflushes_small +=
ctl_arena->astats->bstats[i].nflushes;
ctl_arena->astats->nmalloc_small += bstats->nmalloc;
ctl_arena->astats->ndalloc_small += bstats->ndalloc;
ctl_arena->astats->nrequests_small += bstats->nrequests;
ctl_arena->astats->nfills_small += bstats->nfills;
ctl_arena->astats->nflushes_small += bstats->nflushes;
}
} else {
arena_basic_stats_merge(tsdn, arena, &ctl_arena->nthreads,
......@@ -848,27 +1105,32 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
ctl_arena_stats_t *astats = ctl_arena->astats;
if (!destroyed) {
accum_atomic_zu(&sdstats->astats.mapped,
&astats->astats.mapped);
accum_atomic_zu(&sdstats->astats.retained,
&astats->astats.retained);
accum_atomic_zu(&sdstats->astats.extent_avail,
&astats->astats.extent_avail);
}
ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.npurge,
&astats->astats.decay_dirty.npurge);
ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.nmadvise,
&astats->astats.decay_dirty.nmadvise);
ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.purged,
&astats->astats.decay_dirty.purged);
ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.npurge,
&astats->astats.decay_muzzy.npurge);
ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.nmadvise,
&astats->astats.decay_muzzy.nmadvise);
ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.purged,
&astats->astats.decay_muzzy.purged);
sdstats->astats.mapped += astats->astats.mapped;
sdstats->astats.pa_shard_stats.pac_stats.retained
+= astats->astats.pa_shard_stats.pac_stats.retained;
sdstats->astats.pa_shard_stats.edata_avail
+= astats->astats.pa_shard_stats.edata_avail;
}
ctl_accum_locked_u64(
&sdstats->astats.pa_shard_stats.pac_stats.decay_dirty.npurge,
&astats->astats.pa_shard_stats.pac_stats.decay_dirty.npurge);
ctl_accum_locked_u64(
&sdstats->astats.pa_shard_stats.pac_stats.decay_dirty.nmadvise,
&astats->astats.pa_shard_stats.pac_stats.decay_dirty.nmadvise);
ctl_accum_locked_u64(
&sdstats->astats.pa_shard_stats.pac_stats.decay_dirty.purged,
&astats->astats.pa_shard_stats.pac_stats.decay_dirty.purged);
ctl_accum_locked_u64(
&sdstats->astats.pa_shard_stats.pac_stats.decay_muzzy.npurge,
&astats->astats.pa_shard_stats.pac_stats.decay_muzzy.npurge);
ctl_accum_locked_u64(
&sdstats->astats.pa_shard_stats.pac_stats.decay_muzzy.nmadvise,
&astats->astats.pa_shard_stats.pac_stats.decay_muzzy.nmadvise);
ctl_accum_locked_u64(
&sdstats->astats.pa_shard_stats.pac_stats.decay_muzzy.purged,
&astats->astats.pa_shard_stats.pac_stats.decay_muzzy.purged);
#define OP(mtx) malloc_mutex_prof_merge( \
&(sdstats->astats.mutex_prof_data[ \
......@@ -878,14 +1140,11 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
MUTEX_PROF_ARENA_MUTEXES
#undef OP
if (!destroyed) {
accum_atomic_zu(&sdstats->astats.base,
&astats->astats.base);
accum_atomic_zu(&sdstats->astats.internal,
sdstats->astats.base += astats->astats.base;
sdstats->astats.resident += astats->astats.resident;
sdstats->astats.metadata_thp += astats->astats.metadata_thp;
ctl_accum_atomic_zu(&sdstats->astats.internal,
&astats->astats.internal);
accum_atomic_zu(&sdstats->astats.resident,
&astats->astats.resident);
accum_atomic_zu(&sdstats->astats.metadata_thp,
&astats->astats.metadata_thp);
} else {
assert(atomic_load_zu(
&astats->astats.internal, ATOMIC_RELAXED) == 0);
......@@ -903,23 +1162,23 @@ MUTEX_PROF_ARENA_MUTEXES
sdstats->nflushes_small += astats->nflushes_small;
if (!destroyed) {
accum_atomic_zu(&sdstats->astats.allocated_large,
&astats->astats.allocated_large);
sdstats->astats.allocated_large +=
astats->astats.allocated_large;
} else {
assert(atomic_load_zu(&astats->astats.allocated_large,
ATOMIC_RELAXED) == 0);
assert(astats->astats.allocated_large == 0);
}
ctl_accum_arena_stats_u64(&sdstats->astats.nmalloc_large,
&astats->astats.nmalloc_large);
ctl_accum_arena_stats_u64(&sdstats->astats.ndalloc_large,
&astats->astats.ndalloc_large);
ctl_accum_arena_stats_u64(&sdstats->astats.nrequests_large,
&astats->astats.nrequests_large);
accum_atomic_zu(&sdstats->astats.abandoned_vm,
&astats->astats.abandoned_vm);
accum_atomic_zu(&sdstats->astats.tcache_bytes,
&astats->astats.tcache_bytes);
sdstats->astats.nmalloc_large += astats->astats.nmalloc_large;
sdstats->astats.ndalloc_large += astats->astats.ndalloc_large;
sdstats->astats.nrequests_large
+= astats->astats.nrequests_large;
sdstats->astats.nflushes_large += astats->astats.nflushes_large;
ctl_accum_atomic_zu(
&sdstats->astats.pa_shard_stats.pac_stats.abandoned_vm,
&astats->astats.pa_shard_stats.pac_stats.abandoned_vm);
sdstats->astats.tcache_bytes += astats->astats.tcache_bytes;
sdstats->astats.tcache_stashed_bytes +=
astats->astats.tcache_stashed_bytes;
if (ctl_arena->arena_ind == 0) {
sdstats->astats.uptime = astats->astats.uptime;
......@@ -927,29 +1186,26 @@ MUTEX_PROF_ARENA_MUTEXES
/* Merge bin stats. */
for (i = 0; i < SC_NBINS; i++) {
sdstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
sdstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
sdstats->bstats[i].nrequests +=
astats->bstats[i].nrequests;
bin_stats_t *bstats = &astats->bstats[i].stats_data;
bin_stats_t *merged = &sdstats->bstats[i].stats_data;
merged->nmalloc += bstats->nmalloc;
merged->ndalloc += bstats->ndalloc;
merged->nrequests += bstats->nrequests;
if (!destroyed) {
sdstats->bstats[i].curregs +=
astats->bstats[i].curregs;
merged->curregs += bstats->curregs;
} else {
assert(astats->bstats[i].curregs == 0);
assert(bstats->curregs == 0);
}
sdstats->bstats[i].nfills += astats->bstats[i].nfills;
sdstats->bstats[i].nflushes +=
astats->bstats[i].nflushes;
sdstats->bstats[i].nslabs += astats->bstats[i].nslabs;
sdstats->bstats[i].reslabs += astats->bstats[i].reslabs;
merged->nfills += bstats->nfills;
merged->nflushes += bstats->nflushes;
merged->nslabs += bstats->nslabs;
merged->reslabs += bstats->reslabs;
if (!destroyed) {
sdstats->bstats[i].curslabs +=
astats->bstats[i].curslabs;
sdstats->bstats[i].nonfull_slabs +=
astats->bstats[i].nonfull_slabs;
merged->curslabs += bstats->curslabs;
merged->nonfull_slabs += bstats->nonfull_slabs;
} else {
assert(astats->bstats[i].curslabs == 0);
assert(astats->bstats[i].nonfull_slabs == 0);
assert(bstats->curslabs == 0);
assert(bstats->nonfull_slabs == 0);
}
malloc_mutex_prof_merge(&sdstats->bstats[i].mutex_data,
&astats->bstats[i].mutex_data);
......@@ -957,11 +1213,11 @@ MUTEX_PROF_ARENA_MUTEXES
/* Merge stats for large allocations. */
for (i = 0; i < SC_NSIZES - SC_NBINS; i++) {
ctl_accum_arena_stats_u64(&sdstats->lstats[i].nmalloc,
ctl_accum_locked_u64(&sdstats->lstats[i].nmalloc,
&astats->lstats[i].nmalloc);
ctl_accum_arena_stats_u64(&sdstats->lstats[i].ndalloc,
ctl_accum_locked_u64(&sdstats->lstats[i].ndalloc,
&astats->lstats[i].ndalloc);
ctl_accum_arena_stats_u64(&sdstats->lstats[i].nrequests,
ctl_accum_locked_u64(&sdstats->lstats[i].nrequests,
&astats->lstats[i].nrequests);
if (!destroyed) {
sdstats->lstats[i].curlextents +=
......@@ -973,19 +1229,21 @@ MUTEX_PROF_ARENA_MUTEXES
/* Merge extents stats. */
for (i = 0; i < SC_NPSIZES; i++) {
accum_atomic_zu(&sdstats->estats[i].ndirty,
&astats->estats[i].ndirty);
accum_atomic_zu(&sdstats->estats[i].nmuzzy,
&astats->estats[i].nmuzzy);
accum_atomic_zu(&sdstats->estats[i].nretained,
&astats->estats[i].nretained);
accum_atomic_zu(&sdstats->estats[i].dirty_bytes,
&astats->estats[i].dirty_bytes);
accum_atomic_zu(&sdstats->estats[i].muzzy_bytes,
&astats->estats[i].muzzy_bytes);
accum_atomic_zu(&sdstats->estats[i].retained_bytes,
&astats->estats[i].retained_bytes);
sdstats->estats[i].ndirty += astats->estats[i].ndirty;
sdstats->estats[i].nmuzzy += astats->estats[i].nmuzzy;
sdstats->estats[i].nretained
+= astats->estats[i].nretained;
sdstats->estats[i].dirty_bytes
+= astats->estats[i].dirty_bytes;
sdstats->estats[i].muzzy_bytes
+= astats->estats[i].muzzy_bytes;
sdstats->estats[i].retained_bytes
+= astats->estats[i].retained_bytes;
}
/* Merge HPA stats. */
hpa_shard_stats_accum(&sdstats->hpastats, &astats->hpastats);
sec_stats_accum(&sdstats->secstats, &astats->secstats);
}
}
......@@ -1001,7 +1259,7 @@ ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, ctl_arena_t *ctl_sdarena,
}
static unsigned
ctl_arena_init(tsd_t *tsd, extent_hooks_t *extent_hooks) {
ctl_arena_init(tsd_t *tsd, const arena_config_t *config) {
unsigned arena_ind;
ctl_arena_t *ctl_arena;
......@@ -1019,7 +1277,7 @@ ctl_arena_init(tsd_t *tsd, extent_hooks_t *extent_hooks) {
}
/* Initialize new arena. */
if (arena_init(tsd_tsdn(tsd), arena_ind, extent_hooks) == NULL) {
if (arena_init(tsd_tsdn(tsd), arena_ind, config) == NULL) {
return UINT_MAX;
}
......@@ -1036,8 +1294,11 @@ ctl_background_thread_stats_read(tsdn_t *tsdn) {
if (!have_background_thread ||
background_thread_stats_read(tsdn, stats)) {
memset(stats, 0, sizeof(background_thread_stats_t));
nstime_init(&stats->run_interval, 0);
nstime_init_zero(&stats->run_interval);
}
malloc_mutex_prof_copy(
&ctl_stats->mutex_prof_data[global_prof_mutex_max_per_bg_thd],
&stats->max_counter_per_bg_thd);
}
static void
......@@ -1069,21 +1330,17 @@ ctl_refresh(tsdn_t *tsdn) {
if (config_stats) {
ctl_stats->allocated = ctl_sarena->astats->allocated_small +
atomic_load_zu(&ctl_sarena->astats->astats.allocated_large,
ATOMIC_RELAXED);
ctl_sarena->astats->astats.allocated_large;
ctl_stats->active = (ctl_sarena->pactive << LG_PAGE);
ctl_stats->metadata = atomic_load_zu(
&ctl_sarena->astats->astats.base, ATOMIC_RELAXED) +
ctl_stats->metadata = ctl_sarena->astats->astats.base +
atomic_load_zu(&ctl_sarena->astats->astats.internal,
ATOMIC_RELAXED);
ctl_stats->metadata_thp = atomic_load_zu(
&ctl_sarena->astats->astats.metadata_thp, ATOMIC_RELAXED);
ctl_stats->resident = atomic_load_zu(
&ctl_sarena->astats->astats.resident, ATOMIC_RELAXED);
ctl_stats->mapped = atomic_load_zu(
&ctl_sarena->astats->astats.mapped, ATOMIC_RELAXED);
ctl_stats->retained = atomic_load_zu(
&ctl_sarena->astats->astats.retained, ATOMIC_RELAXED);
ctl_stats->resident = ctl_sarena->astats->astats.resident;
ctl_stats->metadata_thp =
ctl_sarena->astats->astats.metadata_thp;
ctl_stats->mapped = ctl_sarena->astats->astats.mapped;
ctl_stats->retained = ctl_sarena->astats->astats
.pa_shard_stats.pac_stats.retained;
ctl_background_thread_stats_read(tsdn);
......@@ -1093,8 +1350,20 @@ ctl_refresh(tsdn_t *tsdn) {
malloc_mutex_unlock(tsdn, &mtx);
if (config_prof && opt_prof) {
READ_GLOBAL_MUTEX_PROF_DATA(global_prof_mutex_prof,
bt2gctx_mtx);
READ_GLOBAL_MUTEX_PROF_DATA(
global_prof_mutex_prof, bt2gctx_mtx);
READ_GLOBAL_MUTEX_PROF_DATA(
global_prof_mutex_prof_thds_data, tdatas_mtx);
READ_GLOBAL_MUTEX_PROF_DATA(
global_prof_mutex_prof_dump, prof_dump_mtx);
READ_GLOBAL_MUTEX_PROF_DATA(
global_prof_mutex_prof_recent_alloc,
prof_recent_alloc_mtx);
READ_GLOBAL_MUTEX_PROF_DATA(
global_prof_mutex_prof_recent_dump,
prof_recent_dump_mtx);
READ_GLOBAL_MUTEX_PROF_DATA(
global_prof_mutex_prof_stats, prof_stats_mtx);
}
if (have_background_thread) {
READ_GLOBAL_MUTEX_PROF_DATA(
......@@ -1191,8 +1460,9 @@ label_return:
}
static int
ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
size_t *mibp, size_t *depthp) {
ctl_lookup(tsdn_t *tsdn, const ctl_named_node_t *starting_node,
const char *name, const ctl_named_node_t **ending_nodep, size_t *mibp,
size_t *depthp) {
int ret;
const char *elm, *tdot, *dot;
size_t elen, i, j;
......@@ -1206,7 +1476,7 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
ret = ENOENT;
goto label_return;
}
node = super_root_node;
node = starting_node;
for (i = 0; i < *depthp; i++) {
assert(node);
assert(node->nchildren > 0);
......@@ -1220,10 +1490,6 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
if (strlen(child->name) == elen &&
strncmp(elm, child->name, elen) == 0) {
node = child;
if (nodesp != NULL) {
nodesp[i] =
(const ctl_node_t *)node;
}
mibp[i] = j;
break;
}
......@@ -1250,13 +1516,11 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
goto label_return;
}
if (nodesp != NULL) {
nodesp[i] = (const ctl_node_t *)node;
}
mibp[i] = (size_t)index;
}
if (node->ctl != NULL) {
/* Reached the end? */
if (node->ctl != NULL || *dot == '\0') {
/* Terminal node. */
if (*dot != '\0') {
/*
......@@ -1272,16 +1536,14 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
}
/* Update elm. */
if (*dot == '\0') {
/* No more elements. */
ret = ENOENT;
goto label_return;
}
elm = &dot[1];
dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot :
strchr(elm, '\0');
elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
}
if (ending_nodep != NULL) {
*ending_nodep = node;
}
ret = 0;
label_return:
......@@ -1293,7 +1555,6 @@ ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
void *newp, size_t newlen) {
int ret;
size_t depth;
ctl_node_t const *nodes[CTL_MAX_DEPTH];
size_t mib[CTL_MAX_DEPTH];
const ctl_named_node_t *node;
......@@ -1303,12 +1564,12 @@ ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
}
depth = CTL_MAX_DEPTH;
ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth);
ret = ctl_lookup(tsd_tsdn(tsd), super_root_node, name, &node, mib,
&depth);
if (ret != 0) {
goto label_return;
}
node = ctl_named_node(nodes[depth-1]);
if (node != NULL && node->ctl) {
ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen);
} else {
......@@ -1329,26 +1590,19 @@ ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp) {
goto label_return;
}
ret = ctl_lookup(tsd_tsdn(tsd), name, NULL, mibp, miblenp);
ret = ctl_lookup(tsd_tsdn(tsd), super_root_node, name, NULL, mibp,
miblenp);
label_return:
return(ret);
}
int
ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen) {
static int
ctl_lookupbymib(tsdn_t *tsdn, const ctl_named_node_t **ending_nodep,
const size_t *mib, size_t miblen) {
int ret;
const ctl_named_node_t *node;
size_t i;
if (!ctl_initialized && ctl_init(tsd)) {
ret = EAGAIN;
goto label_return;
}
/* Iterate down the tree. */
node = super_root_node;
for (i = 0; i < miblen; i++) {
const ctl_named_node_t *node = super_root_node;
for (size_t i = 0; i < miblen; i++) {
assert(node);
assert(node->nchildren > 0);
if (ctl_named_node(node->children) != NULL) {
......@@ -1363,13 +1617,36 @@ ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
/* Indexed element. */
inode = ctl_indexed_node(node->children);
node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]);
node = inode->index(tsdn, mib, miblen, mib[i]);
if (node == NULL) {
ret = ENOENT;
goto label_return;
}
}
}
assert(ending_nodep != NULL);
*ending_nodep = node;
ret = 0;
label_return:
return(ret);
}
int
ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen) {
int ret;
const ctl_named_node_t *node;
if (!ctl_initialized && ctl_init(tsd)) {
ret = EAGAIN;
goto label_return;
}
ret = ctl_lookupbymib(tsd_tsdn(tsd), &node, mib, miblen);
if (ret != 0) {
goto label_return;
}
/* Call the ctl function. */
if (node && node->ctl) {
......@@ -1383,6 +1660,81 @@ label_return:
return(ret);
}
int
ctl_mibnametomib(tsd_t *tsd, size_t *mib, size_t miblen, const char *name,
size_t *miblenp) {
int ret;
const ctl_named_node_t *node;
if (!ctl_initialized && ctl_init(tsd)) {
ret = EAGAIN;
goto label_return;
}
ret = ctl_lookupbymib(tsd_tsdn(tsd), &node, mib, miblen);
if (ret != 0) {
goto label_return;
}
if (node == NULL || node->ctl != NULL) {
ret = ENOENT;
goto label_return;
}
assert(miblenp != NULL);
assert(*miblenp >= miblen);
*miblenp -= miblen;
ret = ctl_lookup(tsd_tsdn(tsd), node, name, NULL, mib + miblen,
miblenp);
*miblenp += miblen;
label_return:
return(ret);
}
int
ctl_bymibname(tsd_t *tsd, size_t *mib, size_t miblen, const char *name,
size_t *miblenp, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
const ctl_named_node_t *node;
if (!ctl_initialized && ctl_init(tsd)) {
ret = EAGAIN;
goto label_return;
}
ret = ctl_lookupbymib(tsd_tsdn(tsd), &node, mib, miblen);
if (ret != 0) {
goto label_return;
}
if (node == NULL || node->ctl != NULL) {
ret = ENOENT;
goto label_return;
}
assert(miblenp != NULL);
assert(*miblenp >= miblen);
*miblenp -= miblen;
/*
* The same node supplies the starting node and stores the ending node.
*/
ret = ctl_lookup(tsd_tsdn(tsd), node, name, &node, mib + miblen,
miblenp);
*miblenp += miblen;
if (ret != 0) {
goto label_return;
}
if (node != NULL && node->ctl) {
ret = node->ctl(tsd, mib, *miblenp, oldp, oldlenp, newp,
newlen);
} else {
/* The name refers to a partial path through the ctl tree. */
ret = ENOENT;
}
label_return:
return(ret);
}
bool
ctl_boot(void) {
if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL,
......@@ -1410,6 +1762,11 @@ ctl_postfork_child(tsdn_t *tsdn) {
malloc_mutex_postfork_child(tsdn, &ctl_mtx);
}
void
ctl_mtx_assert_held(tsdn_t *tsdn) {
malloc_mutex_assert_owner(tsdn, &ctl_mtx);
}
/******************************************************************************/
/* *_ctl() functions. */
......@@ -1427,6 +1784,7 @@ ctl_postfork_child(tsdn_t *tsdn) {
} \
} while (0)
/* Can read or write, but not both. */
#define READ_XOR_WRITE() do { \
if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \
newlen != 0)) { \
......@@ -1435,12 +1793,31 @@ ctl_postfork_child(tsdn_t *tsdn) {
} \
} while (0)
/* Can neither read nor write. */
#define NEITHER_READ_NOR_WRITE() do { \
if (oldp != NULL || oldlenp != NULL || newp != NULL || \
newlen != 0) { \
ret = EPERM; \
goto label_return; \
} \
} while (0)
/* Verify that the space provided is enough. */
#define VERIFY_READ(t) do { \
if (oldp == NULL || oldlenp == NULL || *oldlenp != sizeof(t)) { \
*oldlenp = 0; \
ret = EINVAL; \
goto label_return; \
} \
} while (0)
#define READ(v, t) do { \
if (oldp != NULL && oldlenp != NULL) { \
if (*oldlenp != sizeof(t)) { \
size_t copylen = (sizeof(t) <= *oldlenp) \
? sizeof(t) : *oldlenp; \
memcpy(oldp, (void *)&(v), copylen); \
*oldlenp = copylen; \
ret = EINVAL; \
goto label_return; \
} \
......@@ -1458,6 +1835,14 @@ ctl_postfork_child(tsdn_t *tsdn) {
} \
} while (0)
#define ASSURED_WRITE(v, t) do { \
if (newp == NULL || newlen != sizeof(t)) { \
ret = EINVAL; \
goto label_return; \
} \
(v) = *(t *)newp; \
} while (0)
#define MIB_UNSIGNED(v, i) do { \
if (mib[i] > UINT_MAX) { \
ret = EFAULT; \
......@@ -1497,8 +1882,8 @@ label_return: \
#define CTL_RO_CGEN(c, n, v, t) \
static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
......@@ -1540,8 +1925,8 @@ label_return: \
*/
#define CTL_RO_NL_CGEN(c, n, v, t) \
static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
......@@ -1559,8 +1944,8 @@ label_return: \
#define CTL_RO_NL_GEN(n, v, t) \
static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
......@@ -1573,29 +1958,10 @@ label_return: \
return ret; \
}
#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \
static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
if (!(c)) { \
return ENOENT; \
} \
READONLY(); \
oldval = (m(tsd)); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
return ret; \
}
#define CTL_RO_CONFIG_GEN(n, t) \
static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
......@@ -1761,7 +2127,34 @@ CTL_RO_CONFIG_GEN(config_xmalloc, bool)
CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
CTL_RO_NL_GEN(opt_abort_conf, opt_abort_conf, bool)
CTL_RO_NL_GEN(opt_cache_oblivious, opt_cache_oblivious, bool)
CTL_RO_NL_GEN(opt_trust_madvise, opt_trust_madvise, bool)
CTL_RO_NL_GEN(opt_confirm_conf, opt_confirm_conf, bool)
/* HPA options. */
CTL_RO_NL_GEN(opt_hpa, opt_hpa, bool)
CTL_RO_NL_GEN(opt_hpa_hugification_threshold,
opt_hpa_opts.hugification_threshold, size_t)
CTL_RO_NL_GEN(opt_hpa_hugify_delay_ms, opt_hpa_opts.hugify_delay_ms, uint64_t)
CTL_RO_NL_GEN(opt_hpa_min_purge_interval_ms, opt_hpa_opts.min_purge_interval_ms,
uint64_t)
/*
* This will have to change before we publicly document this option; fxp_t and
* its representation are internal implementation details.
*/
CTL_RO_NL_GEN(opt_hpa_dirty_mult, opt_hpa_opts.dirty_mult, fxp_t)
CTL_RO_NL_GEN(opt_hpa_slab_max_alloc, opt_hpa_opts.slab_max_alloc, size_t)
/* HPA SEC options */
CTL_RO_NL_GEN(opt_hpa_sec_nshards, opt_hpa_sec_opts.nshards, size_t)
CTL_RO_NL_GEN(opt_hpa_sec_max_alloc, opt_hpa_sec_opts.max_alloc, size_t)
CTL_RO_NL_GEN(opt_hpa_sec_max_bytes, opt_hpa_sec_opts.max_bytes, size_t)
CTL_RO_NL_GEN(opt_hpa_sec_bytes_after_flush, opt_hpa_sec_opts.bytes_after_flush,
size_t)
CTL_RO_NL_GEN(opt_hpa_sec_batch_fill_extra, opt_hpa_sec_opts.batch_fill_extra,
size_t)
CTL_RO_NL_GEN(opt_metadata_thp, metadata_thp_mode_names[opt_metadata_thp],
const char *)
CTL_RO_NL_GEN(opt_retain, opt_retain, bool)
......@@ -1769,6 +2162,7 @@ CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned)
CTL_RO_NL_GEN(opt_percpu_arena, percpu_arena_mode_names[opt_percpu_arena],
const char *)
CTL_RO_NL_GEN(opt_mutex_max_spin, opt_mutex_max_spin, int64_t)
CTL_RO_NL_GEN(opt_oversize_threshold, opt_oversize_threshold, size_t)
CTL_RO_NL_GEN(opt_background_thread, opt_background_thread, bool)
CTL_RO_NL_GEN(opt_max_background_threads, opt_max_background_threads, size_t)
......@@ -1776,15 +2170,31 @@ CTL_RO_NL_GEN(opt_dirty_decay_ms, opt_dirty_decay_ms, ssize_t)
CTL_RO_NL_GEN(opt_muzzy_decay_ms, opt_muzzy_decay_ms, ssize_t)
CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
CTL_RO_NL_GEN(opt_stats_print_opts, opt_stats_print_opts, const char *)
CTL_RO_NL_GEN(opt_stats_interval, opt_stats_interval, int64_t)
CTL_RO_NL_GEN(opt_stats_interval_opts, opt_stats_interval_opts, const char *)
CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
CTL_RO_NL_CGEN(config_enable_cxx, opt_experimental_infallible_new,
opt_experimental_infallible_new, bool)
CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool)
CTL_RO_NL_GEN(opt_tcache_max, opt_tcache_max, size_t)
CTL_RO_NL_GEN(opt_tcache_nslots_small_min, opt_tcache_nslots_small_min,
unsigned)
CTL_RO_NL_GEN(opt_tcache_nslots_small_max, opt_tcache_nslots_small_max,
unsigned)
CTL_RO_NL_GEN(opt_tcache_nslots_large, opt_tcache_nslots_large, unsigned)
CTL_RO_NL_GEN(opt_lg_tcache_nslots_mul, opt_lg_tcache_nslots_mul, ssize_t)
CTL_RO_NL_GEN(opt_tcache_gc_incr_bytes, opt_tcache_gc_incr_bytes, size_t)
CTL_RO_NL_GEN(opt_tcache_gc_delay_bytes, opt_tcache_gc_delay_bytes, size_t)
CTL_RO_NL_GEN(opt_lg_tcache_flush_small_div, opt_lg_tcache_flush_small_div,
unsigned)
CTL_RO_NL_GEN(opt_lg_tcache_flush_large_div, opt_lg_tcache_flush_large_div,
unsigned)
CTL_RO_NL_GEN(opt_thp, thp_mode_names[opt_thp], const char *)
CTL_RO_NL_GEN(opt_lg_extent_max_active_fit, opt_lg_extent_max_active_fit,
size_t)
CTL_RO_NL_GEN(opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool)
......@@ -1796,6 +2206,18 @@ CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_leak_error, opt_prof_leak_error, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_recent_alloc_max,
opt_prof_recent_alloc_max, ssize_t)
CTL_RO_NL_CGEN(config_prof, opt_prof_stats, opt_prof_stats, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_sys_thread_name, opt_prof_sys_thread_name,
bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_time_res,
prof_time_res_mode_names[opt_prof_time_res], const char *)
CTL_RO_NL_CGEN(config_uaf_detection, opt_lg_san_uaf_align,
opt_lg_san_uaf_align, ssize_t)
CTL_RO_NL_GEN(opt_zero_realloc,
zero_realloc_mode_names[opt_zero_realloc_action], const char *)
/******************************************************************************/
......@@ -1843,10 +2265,11 @@ thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
goto label_return;
}
/* Set new arena/tcache associations. */
arena_migrate(tsd, oldind, newind);
arena_migrate(tsd, oldarena, newarena);
if (tcache_available(tsd)) {
tcache_arena_reassociate(tsd_tsdn(tsd),
tsd_tcachep_get(tsd), newarena);
tsd_tcache_slowp_get(tsd), tsd_tcachep_get(tsd),
newarena);
}
}
......@@ -1855,14 +2278,10 @@ label_return:
return ret;
}
CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get,
uint64_t)
CTL_TSD_RO_NL_CGEN(config_stats, thread_allocatedp, tsd_thread_allocatedp_get,
uint64_t *)
CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocated, tsd_thread_deallocated_get,
uint64_t)
CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp,
tsd_thread_deallocatedp_get, uint64_t *)
CTL_RO_NL_GEN(thread_allocated, tsd_thread_allocated_get(tsd), uint64_t)
CTL_RO_NL_GEN(thread_allocatedp, tsd_thread_allocatedp_get(tsd), uint64_t *)
CTL_RO_NL_GEN(thread_deallocated, tsd_thread_deallocated_get(tsd), uint64_t)
CTL_RO_NL_GEN(thread_deallocatedp, tsd_thread_deallocatedp_get(tsd), uint64_t *)
static int
thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib,
......@@ -1897,8 +2316,7 @@ thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib,
goto label_return;
}
READONLY();
WRITEONLY();
NEITHER_READ_NOR_WRITE();
tcache_flush(tsd);
......@@ -1907,13 +2325,45 @@ label_return:
return ret;
}
static int
thread_peak_read_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen) {
int ret;
if (!config_stats) {
return ENOENT;
}
READONLY();
peak_event_update(tsd);
uint64_t result = peak_event_max(tsd);
READ(result, uint64_t);
ret = 0;
label_return:
return ret;
}
static int
thread_peak_reset_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen) {
int ret;
if (!config_stats) {
return ENOENT;
}
NEITHER_READ_NOR_WRITE();
peak_event_zero(tsd);
ret = 0;
label_return:
return ret;
}
static int
thread_prof_name_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen) {
int ret;
if (!config_prof) {
if (!config_prof || !opt_prof) {
return ENOENT;
}
......@@ -1950,8 +2400,12 @@ thread_prof_active_ctl(tsd_t *tsd, const size_t *mib,
return ENOENT;
}
oldval = prof_thread_active_get(tsd);
oldval = opt_prof ? prof_thread_active_get(tsd) : false;
if (newp != NULL) {
if (!opt_prof) {
ret = ENOENT;
goto label_return;
}
if (newlen != sizeof(bool)) {
ret = EINVAL;
goto label_return;
......@@ -1968,6 +2422,39 @@ label_return:
return ret;
}
static int
thread_idle_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen) {
int ret;
NEITHER_READ_NOR_WRITE();
if (tcache_available(tsd)) {
tcache_flush(tsd);
}
/*
* This heuristic is perhaps not the most well-considered. But it
* matches the only idling policy we have experience with in the status
* quo. Over time we should investigate more principled approaches.
*/
if (opt_narenas > ncpus * 2) {
arena_t *arena = arena_choose(tsd, NULL);
if (arena != NULL) {
arena_decay(tsd_tsdn(tsd), arena, false, true);
}
/*
* The missing arena case is not actually an error; a thread
* might be idle before it associates itself to one. This is
* unusual, but not wrong.
*/
}
ret = 0;
label_return:
return ret;
}
/******************************************************************************/
static int
......@@ -1977,7 +2464,8 @@ tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
unsigned tcache_ind;
READONLY();
if (tcaches_create(tsd, &tcache_ind)) {
VERIFY_READ(unsigned);
if (tcaches_create(tsd, b0get(), &tcache_ind)) {
ret = EFAULT;
goto label_return;
}
......@@ -1995,12 +2483,7 @@ tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
unsigned tcache_ind;
WRITEONLY();
tcache_ind = UINT_MAX;
WRITE(tcache_ind, unsigned);
if (tcache_ind == UINT_MAX) {
ret = EFAULT;
goto label_return;
}
ASSURED_WRITE(tcache_ind, unsigned);
tcaches_flush(tsd, tcache_ind);
ret = 0;
......@@ -2015,12 +2498,7 @@ tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
unsigned tcache_ind;
WRITEONLY();
tcache_ind = UINT_MAX;
WRITE(tcache_ind, unsigned);
if (tcache_ind == UINT_MAX) {
ret = EFAULT;
goto label_return;
}
ASSURED_WRITE(tcache_ind, unsigned);
tcaches_destroy(tsd, tcache_ind);
ret = 0;
......@@ -2105,8 +2583,7 @@ arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
int ret;
unsigned arena_ind;
READONLY();
WRITEONLY();
NEITHER_READ_NOR_WRITE();
MIB_UNSIGNED(arena_ind, 1);
arena_i_decay(tsd_tsdn(tsd), arena_ind, false);
......@@ -2121,8 +2598,7 @@ arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
int ret;
unsigned arena_ind;
READONLY();
WRITEONLY();
NEITHER_READ_NOR_WRITE();
MIB_UNSIGNED(arena_ind, 1);
arena_i_decay(tsd_tsdn(tsd), arena_ind, true);
......@@ -2137,8 +2613,7 @@ arena_i_reset_destroy_helper(tsd_t *tsd, const size_t *mib, size_t miblen,
arena_t **arena) {
int ret;
READONLY();
WRITEONLY();
NEITHER_READ_NOR_WRITE();
MIB_UNSIGNED(*arena_ind, 1);
*arena = arena_get(tsd_tsdn(tsd), *arena_ind, false);
......@@ -2211,6 +2686,8 @@ arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
arena_t *arena;
ctl_arena_t *ctl_darena, *ctl_arena;
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
newp, newlen, &arena_ind, &arena);
if (ret != 0) {
......@@ -2241,6 +2718,8 @@ arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
assert(ret == 0);
label_return:
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return ret;
}
......@@ -2306,8 +2785,40 @@ label_return:
}
static int
arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) {
arena_i_oversize_threshold_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
unsigned arena_ind;
MIB_UNSIGNED(arena_ind, 1);
arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
if (arena == NULL) {
ret = EFAULT;
goto label_return;
}
if (oldp != NULL && oldlenp != NULL) {
size_t oldval = atomic_load_zu(
&arena->pa_shard.pac.oversize_threshold, ATOMIC_RELAXED);
READ(oldval, size_t);
}
if (newp != NULL) {
if (newlen != sizeof(size_t)) {
ret = EINVAL;
goto label_return;
}
atomic_store_zu(&arena->pa_shard.pac.oversize_threshold,
*(size_t *)newp, ATOMIC_RELAXED);
}
ret = 0;
label_return:
return ret;
}
static int
arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) {
int ret;
unsigned arena_ind;
arena_t *arena;
......@@ -2318,10 +2829,10 @@ arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
ret = EFAULT;
goto label_return;
}
extent_state_t state = dirty ? extent_state_dirty : extent_state_muzzy;
if (oldp != NULL && oldlenp != NULL) {
size_t oldval = dirty ? arena_dirty_decay_ms_get(arena) :
arena_muzzy_decay_ms_get(arena);
size_t oldval = arena_decay_ms_get(arena, state);
READ(oldval, ssize_t);
}
if (newp != NULL) {
......@@ -2340,9 +2851,9 @@ arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
goto label_return;
}
}
if (dirty ? arena_dirty_decay_ms_set(tsd_tsdn(tsd), arena,
*(ssize_t *)newp) : arena_muzzy_decay_ms_set(tsd_tsdn(tsd),
arena, *(ssize_t *)newp)) {
if (arena_decay_ms_set(tsd_tsdn(tsd), arena, state,
*(ssize_t *)newp)) {
ret = EFAULT;
goto label_return;
}
......@@ -2385,15 +2896,18 @@ arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
goto label_return;
}
old_extent_hooks =
(extent_hooks_t *)&extent_hooks_default;
(extent_hooks_t *)&ehooks_default_extent_hooks;
READ(old_extent_hooks, extent_hooks_t *);
if (newp != NULL) {
/* Initialize a new arena as a side effect. */
extent_hooks_t *new_extent_hooks
JEMALLOC_CC_SILENCE_INIT(NULL);
WRITE(new_extent_hooks, extent_hooks_t *);
arena_config_t config = arena_config_default;
config.extent_hooks = new_extent_hooks;
arena = arena_init(tsd_tsdn(tsd), arena_ind,
new_extent_hooks);
&config);
if (arena == NULL) {
ret = EFAULT;
goto label_return;
......@@ -2404,11 +2918,13 @@ arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
extent_hooks_t *new_extent_hooks
JEMALLOC_CC_SILENCE_INIT(NULL);
WRITE(new_extent_hooks, extent_hooks_t *);
old_extent_hooks = extent_hooks_set(tsd, arena,
new_extent_hooks);
old_extent_hooks = arena_set_extent_hooks(tsd,
arena, new_extent_hooks);
READ(old_extent_hooks, extent_hooks_t *);
} else {
old_extent_hooks = extent_hooks_get(arena);
old_extent_hooks =
ehooks_get_extent_hooks_ptr(
arena_get_ehooks(arena));
READ(old_extent_hooks, extent_hooks_t *);
}
}
......@@ -2493,10 +3009,6 @@ arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
READONLY();
if (*oldlenp != sizeof(unsigned)) {
ret = EINVAL;
goto label_return;
}
narenas = ctl_arenas->narenas;
READ(narenas, unsigned);
......@@ -2582,14 +3094,14 @@ static int
arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
extent_hooks_t *extent_hooks;
unsigned arena_ind;
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
extent_hooks = (extent_hooks_t *)&extent_hooks_default;
WRITE(extent_hooks, extent_hooks_t *);
if ((arena_ind = ctl_arena_init(tsd, extent_hooks)) == UINT_MAX) {
VERIFY_READ(unsigned);
arena_config_t config = arena_config_default;
WRITE(config.extent_hooks, extent_hooks_t *);
if ((arena_ind = ctl_arena_init(tsd, &config)) == UINT_MAX) {
ret = EAGAIN;
goto label_return;
}
......@@ -2601,6 +3113,30 @@ label_return:
return ret;
}
static int
experimental_arenas_create_ext_ctl(tsd_t *tsd,
const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
unsigned arena_ind;
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
arena_config_t config = arena_config_default;
VERIFY_READ(unsigned);
WRITE(config, arena_config_t);
if ((arena_ind = ctl_arena_init(tsd, &config)) == UINT_MAX) {
ret = EAGAIN;
goto label_return;
}
READ(arena_ind, unsigned);
ret = 0;
label_return:
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return ret;
}
static int
arenas_lookup_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp,
......@@ -2608,20 +3144,22 @@ arenas_lookup_ctl(tsd_t *tsd, const size_t *mib,
int ret;
unsigned arena_ind;
void *ptr;
extent_t *extent;
edata_t *edata;
arena_t *arena;
ptr = NULL;
ret = EINVAL;
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
WRITE(ptr, void *);
extent = iealloc(tsd_tsdn(tsd), ptr);
if (extent == NULL)
edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr);
if (edata == NULL) {
goto label_return;
}
arena = extent_arena_get(extent);
if (arena == NULL)
arena = arena_get_from_edata(edata);
if (arena == NULL) {
goto label_return;
}
arena_ind = arena_ind_get(arena);
READ(arena_ind, unsigned);
......@@ -2646,6 +3184,10 @@ prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib,
}
if (newp != NULL) {
if (!opt_prof) {
ret = ENOENT;
goto label_return;
}
if (newlen != sizeof(bool)) {
ret = EINVAL;
goto label_return;
......@@ -2653,7 +3195,8 @@ prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib,
oldval = prof_thread_active_init_set(tsd_tsdn(tsd),
*(bool *)newp);
} else {
oldval = prof_thread_active_init_get(tsd_tsdn(tsd));
oldval = opt_prof ? prof_thread_active_init_get(tsd_tsdn(tsd)) :
false;
}
READ(oldval, bool);
......@@ -2669,7 +3212,8 @@ prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
bool oldval;
if (!config_prof) {
return ENOENT;
ret = ENOENT;
goto label_return;
}
if (newp != NULL) {
......@@ -2677,9 +3221,20 @@ prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
ret = EINVAL;
goto label_return;
}
oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp);
bool val = *(bool *)newp;
if (!opt_prof) {
if (val) {
ret = ENOENT;
goto label_return;
} else {
/* No change needed (already off). */
oldval = false;
}
} else {
oldval = prof_active_set(tsd_tsdn(tsd), val);
}
} else {
oldval = prof_active_get(tsd_tsdn(tsd));
oldval = opt_prof ? prof_active_get(tsd_tsdn(tsd)) : false;
}
READ(oldval, bool);
......@@ -2694,7 +3249,7 @@ prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
int ret;
const char *filename = NULL;
if (!config_prof) {
if (!config_prof || !opt_prof) {
return ENOENT;
}
......@@ -2722,13 +3277,17 @@ prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
}
if (newp != NULL) {
if (!opt_prof) {
ret = ENOENT;
goto label_return;
}
if (newlen != sizeof(bool)) {
ret = EINVAL;
goto label_return;
}
oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp);
} else {
oldval = prof_gdump_get(tsd_tsdn(tsd));
oldval = opt_prof ? prof_gdump_get(tsd_tsdn(tsd)) : false;
}
READ(oldval, bool);
......@@ -2737,13 +3296,33 @@ label_return:
return ret;
}
static int
prof_prefix_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
const char *prefix = NULL;
if (!config_prof || !opt_prof) {
return ENOENT;
}
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
WRITEONLY();
WRITE(prefix, const char *);
ret = prof_prefix_set(tsd_tsdn(tsd), prefix) ? EFAULT : 0;
label_return:
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return ret;
}
static int
prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
size_t lg_sample = lg_prof_sample;
if (!config_prof) {
if (!config_prof || !opt_prof) {
return ENOENT;
}
......@@ -2770,7 +3349,7 @@ prof_log_start_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
const char *filename = NULL;
if (!config_prof) {
if (!config_prof || !opt_prof) {
return ENOENT;
}
......@@ -2790,7 +3369,7 @@ label_return:
static int
prof_log_stop_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen) {
if (!config_prof) {
if (!config_prof || !opt_prof) {
return ENOENT;
}
......@@ -2801,6 +3380,87 @@ prof_log_stop_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
return 0;
}
static int
experimental_hooks_prof_backtrace_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
if (oldp == NULL && newp == NULL) {
ret = EINVAL;
goto label_return;
}
if (oldp != NULL) {
prof_backtrace_hook_t old_hook =
prof_backtrace_hook_get();
READ(old_hook, prof_backtrace_hook_t);
}
if (newp != NULL) {
if (!opt_prof) {
ret = ENOENT;
goto label_return;
}
prof_backtrace_hook_t new_hook JEMALLOC_CC_SILENCE_INIT(NULL);
WRITE(new_hook, prof_backtrace_hook_t);
if (new_hook == NULL) {
ret = EINVAL;
goto label_return;
}
prof_backtrace_hook_set(new_hook);
}
ret = 0;
label_return:
return ret;
}
static int
experimental_hooks_prof_dump_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
if (oldp == NULL && newp == NULL) {
ret = EINVAL;
goto label_return;
}
if (oldp != NULL) {
prof_dump_hook_t old_hook =
prof_dump_hook_get();
READ(old_hook, prof_dump_hook_t);
}
if (newp != NULL) {
if (!opt_prof) {
ret = ENOENT;
goto label_return;
}
prof_dump_hook_t new_hook JEMALLOC_CC_SILENCE_INIT(NULL);
WRITE(new_hook, prof_dump_hook_t);
prof_dump_hook_set(new_hook);
}
ret = 0;
label_return:
return ret;
}
/* For integration test purpose only. No plan to move out of experimental. */
static int
experimental_hooks_safety_check_abort_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
WRITEONLY();
if (newp != NULL) {
if (newlen != sizeof(safety_check_abort_hook_t)) {
ret = EINVAL;
goto label_return;
}
safety_check_abort_hook_t hook JEMALLOC_CC_SILENCE_INIT(NULL);
WRITE(hook, safety_check_abort_hook_t);
safety_check_set_abort(hook);
}
ret = 0;
label_return:
return ret;
}
/******************************************************************************/
CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats->allocated, size_t)
......@@ -2818,6 +3478,9 @@ CTL_RO_CGEN(config_stats, stats_background_thread_num_runs,
CTL_RO_CGEN(config_stats, stats_background_thread_run_interval,
nstime_ns(&ctl_stats->background_thread.run_interval), uint64_t)
CTL_RO_CGEN(config_stats, stats_zero_reallocs,
atomic_load_zu(&zero_realloc_count, ATOMIC_RELAXED), size_t)
CTL_RO_GEN(stats_arenas_i_dss, arenas_i(mib[2])->dss, const char *)
CTL_RO_GEN(stats_arenas_i_dirty_decay_ms, arenas_i(mib[2])->dirty_decay_ms,
ssize_t)
......@@ -2830,55 +3493,61 @@ CTL_RO_GEN(stats_arenas_i_pactive, arenas_i(mib[2])->pactive, size_t)
CTL_RO_GEN(stats_arenas_i_pdirty, arenas_i(mib[2])->pdirty, size_t)
CTL_RO_GEN(stats_arenas_i_pmuzzy, arenas_i(mib[2])->pmuzzy, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.mapped, ATOMIC_RELAXED),
size_t)
arenas_i(mib[2])->astats->astats.mapped, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.retained, ATOMIC_RELAXED),
size_t)
arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.retained, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_extent_avail,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.extent_avail,
ATOMIC_RELAXED),
size_t)
arenas_i(mib[2])->astats->astats.pa_shard_stats.edata_avail, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_npurge,
ctl_arena_stats_read_u64(
&arenas_i(mib[2])->astats->astats.decay_dirty.npurge), uint64_t)
locked_read_u64_unsynchronized(
&arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_dirty.npurge),
uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_nmadvise,
ctl_arena_stats_read_u64(
&arenas_i(mib[2])->astats->astats.decay_dirty.nmadvise), uint64_t)
locked_read_u64_unsynchronized(
&arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_dirty.nmadvise),
uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_purged,
ctl_arena_stats_read_u64(
&arenas_i(mib[2])->astats->astats.decay_dirty.purged), uint64_t)
locked_read_u64_unsynchronized(
&arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_dirty.purged),
uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_npurge,
ctl_arena_stats_read_u64(
&arenas_i(mib[2])->astats->astats.decay_muzzy.npurge), uint64_t)
locked_read_u64_unsynchronized(
&arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_muzzy.npurge),
uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_nmadvise,
ctl_arena_stats_read_u64(
&arenas_i(mib[2])->astats->astats.decay_muzzy.nmadvise), uint64_t)
locked_read_u64_unsynchronized(
&arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_muzzy.nmadvise),
uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_purged,
ctl_arena_stats_read_u64(
&arenas_i(mib[2])->astats->astats.decay_muzzy.purged), uint64_t)
locked_read_u64_unsynchronized(
&arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_muzzy.purged),
uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_base,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.base, ATOMIC_RELAXED),
arenas_i(mib[2])->astats->astats.base,
size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_internal,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.internal, ATOMIC_RELAXED),
size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_thp,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.metadata_thp,
ATOMIC_RELAXED), size_t)
arenas_i(mib[2])->astats->astats.metadata_thp, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_bytes,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.tcache_bytes,
ATOMIC_RELAXED), size_t)
arenas_i(mib[2])->astats->astats.tcache_bytes, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_stashed_bytes,
arenas_i(mib[2])->astats->astats.tcache_stashed_bytes, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_resident,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.resident, ATOMIC_RELAXED),
arenas_i(mib[2])->astats->astats.resident,
size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_abandoned_vm,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.abandoned_vm,
atomic_load_zu(
&arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.abandoned_vm,
ATOMIC_RELAXED), size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_sec_bytes,
arenas_i(mib[2])->astats->secstats.bytes, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
arenas_i(mib[2])->astats->allocated_small, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
......@@ -2892,27 +3561,21 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_small_nfills,
CTL_RO_CGEN(config_stats, stats_arenas_i_small_nflushes,
arenas_i(mib[2])->astats->nflushes_small, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.allocated_large,
ATOMIC_RELAXED), size_t)
arenas_i(mib[2])->astats->astats.allocated_large, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
ctl_arena_stats_read_u64(
&arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t)
arenas_i(mib[2])->astats->astats.nmalloc_large, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
ctl_arena_stats_read_u64(
&arenas_i(mib[2])->astats->astats.ndalloc_large), uint64_t)
arenas_i(mib[2])->astats->astats.ndalloc_large, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
ctl_arena_stats_read_u64(
&arenas_i(mib[2])->astats->astats.nrequests_large), uint64_t)
arenas_i(mib[2])->astats->astats.nrequests_large, uint64_t)
/*
* Note: "nmalloc_large" here instead of "nfills" in the read. This is
* intentional (large has no batch fill).
*/
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nfills,
ctl_arena_stats_read_u64(
&arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t)
arenas_i(mib[2])->astats->astats.nmalloc_large, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nflushes,
ctl_arena_stats_read_u64(
&arenas_i(mib[2])->astats->astats.nflushes_large), uint64_t)
arenas_i(mib[2])->astats->astats.nflushes_large, uint64_t)
/* Lock profiling related APIs below. */
#define RO_MUTEX_CTL_GEN(n, l) \
......@@ -2972,9 +3635,13 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib,
}
if (config_prof && opt_prof) {
MUTEX_PROF_RESET(bt2gctx_mtx);
MUTEX_PROF_RESET(tdatas_mtx);
MUTEX_PROF_RESET(prof_dump_mtx);
MUTEX_PROF_RESET(prof_recent_alloc_mtx);
MUTEX_PROF_RESET(prof_recent_dump_mtx);
MUTEX_PROF_RESET(prof_stats_mtx);
}
/* Per arena mutexes. */
unsigned n = narenas_total_get();
......@@ -2984,18 +3651,18 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib,
continue;
}
MUTEX_PROF_RESET(arena->large_mtx);
MUTEX_PROF_RESET(arena->extent_avail_mtx);
MUTEX_PROF_RESET(arena->extents_dirty.mtx);
MUTEX_PROF_RESET(arena->extents_muzzy.mtx);
MUTEX_PROF_RESET(arena->extents_retained.mtx);
MUTEX_PROF_RESET(arena->decay_dirty.mtx);
MUTEX_PROF_RESET(arena->decay_muzzy.mtx);
MUTEX_PROF_RESET(arena->pa_shard.edata_cache.mtx);
MUTEX_PROF_RESET(arena->pa_shard.pac.ecache_dirty.mtx);
MUTEX_PROF_RESET(arena->pa_shard.pac.ecache_muzzy.mtx);
MUTEX_PROF_RESET(arena->pa_shard.pac.ecache_retained.mtx);
MUTEX_PROF_RESET(arena->pa_shard.pac.decay_dirty.mtx);
MUTEX_PROF_RESET(arena->pa_shard.pac.decay_muzzy.mtx);
MUTEX_PROF_RESET(arena->tcache_ql_mtx);
MUTEX_PROF_RESET(arena->base->mtx);
for (szind_t i = 0; i < SC_NBINS; i++) {
for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
bin_t *bin = &arena->bins[i].bin_shards[j];
for (szind_t j = 0; j < SC_NBINS; j++) {
for (unsigned k = 0; k < bin_infos[j].n_shards; k++) {
bin_t *bin = arena_get_bin(arena, j, k);
MUTEX_PROF_RESET(bin->lock);
}
}
......@@ -3005,25 +3672,25 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib,
}
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
arenas_i(mib[2])->astats->bstats[mib[4]].nmalloc, uint64_t)
arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nmalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
arenas_i(mib[2])->astats->bstats[mib[4]].ndalloc, uint64_t)
arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.ndalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
arenas_i(mib[2])->astats->bstats[mib[4]].nrequests, uint64_t)
arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nrequests, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs,
arenas_i(mib[2])->astats->bstats[mib[4]].curregs, size_t)
arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.curregs, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nfills,
arenas_i(mib[2])->astats->bstats[mib[4]].nfills, uint64_t)
arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nfills, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nflushes,
arenas_i(mib[2])->astats->bstats[mib[4]].nflushes, uint64_t)
arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nflushes, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nslabs,
arenas_i(mib[2])->astats->bstats[mib[4]].nslabs, uint64_t)
arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nslabs, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreslabs,
arenas_i(mib[2])->astats->bstats[mib[4]].reslabs, uint64_t)
arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.reslabs, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs,
arenas_i(mib[2])->astats->bstats[mib[4]].curslabs, size_t)
arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.curslabs, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nonfull_slabs,
arenas_i(mib[2])->astats->bstats[mib[4]].nonfull_slabs, size_t)
arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nonfull_slabs, size_t)
static const ctl_named_node_t *
stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib,
......@@ -3035,13 +3702,13 @@ stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib,
}
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc,
ctl_arena_stats_read_u64(
locked_read_u64_unsynchronized(
&arenas_i(mib[2])->astats->lstats[mib[4]].nmalloc), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_ndalloc,
ctl_arena_stats_read_u64(
locked_read_u64_unsynchronized(
&arenas_i(mib[2])->astats->lstats[mib[4]].ndalloc), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nrequests,
ctl_arena_stats_read_u64(
locked_read_u64_unsynchronized(
&arenas_i(mib[2])->astats->lstats[mib[4]].nrequests), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents,
arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t)
......@@ -3056,29 +3723,17 @@ stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib,
}
CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_ndirty,
atomic_load_zu(
&arenas_i(mib[2])->astats->estats[mib[4]].ndirty,
ATOMIC_RELAXED), size_t);
arenas_i(mib[2])->astats->estats[mib[4]].ndirty, size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_nmuzzy,
atomic_load_zu(
&arenas_i(mib[2])->astats->estats[mib[4]].nmuzzy,
ATOMIC_RELAXED), size_t);
arenas_i(mib[2])->astats->estats[mib[4]].nmuzzy, size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_nretained,
atomic_load_zu(
&arenas_i(mib[2])->astats->estats[mib[4]].nretained,
ATOMIC_RELAXED), size_t);
arenas_i(mib[2])->astats->estats[mib[4]].nretained, size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_dirty_bytes,
atomic_load_zu(
&arenas_i(mib[2])->astats->estats[mib[4]].dirty_bytes,
ATOMIC_RELAXED), size_t);
arenas_i(mib[2])->astats->estats[mib[4]].dirty_bytes, size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_muzzy_bytes,
atomic_load_zu(
&arenas_i(mib[2])->astats->estats[mib[4]].muzzy_bytes,
ATOMIC_RELAXED), size_t);
arenas_i(mib[2])->astats->estats[mib[4]].muzzy_bytes, size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_retained_bytes,
atomic_load_zu(
&arenas_i(mib[2])->astats->estats[mib[4]].retained_bytes,
ATOMIC_RELAXED), size_t);
arenas_i(mib[2])->astats->estats[mib[4]].retained_bytes, size_t);
static const ctl_named_node_t *
stats_arenas_i_extents_j_index(tsdn_t *tsdn, const size_t *mib,
......@@ -3089,6 +3744,82 @@ stats_arenas_i_extents_j_index(tsdn_t *tsdn, const size_t *mib,
return super_stats_arenas_i_extents_j_node;
}
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_npurge_passes,
arenas_i(mib[2])->astats->hpastats.nonderived_stats.npurge_passes, uint64_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_npurges,
arenas_i(mib[2])->astats->hpastats.nonderived_stats.npurges, uint64_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nhugifies,
arenas_i(mib[2])->astats->hpastats.nonderived_stats.nhugifies, uint64_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_ndehugifies,
arenas_i(mib[2])->astats->hpastats.nonderived_stats.ndehugifies, uint64_t);
/* Full, nonhuge */
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_npageslabs_nonhuge,
arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[0].npageslabs,
size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_nactive_nonhuge,
arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[0].nactive, size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_ndirty_nonhuge,
arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[0].ndirty, size_t);
/* Full, huge */
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_npageslabs_huge,
arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[1].npageslabs,
size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_nactive_huge,
arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[1].nactive, size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_ndirty_huge,
arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[1].ndirty, size_t);
/* Empty, nonhuge */
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_npageslabs_nonhuge,
arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[0].npageslabs,
size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_nactive_nonhuge,
arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[0].nactive, size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_ndirty_nonhuge,
arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[0].ndirty, size_t);
/* Empty, huge */
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_npageslabs_huge,
arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[1].npageslabs,
size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_nactive_huge,
arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[1].nactive, size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_ndirty_huge,
arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[1].ndirty, size_t);
/* Nonfull, nonhuge */
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_nonhuge,
arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][0].npageslabs,
size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_nonhuge,
arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][0].nactive,
size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_nonhuge,
arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][0].ndirty,
size_t);
/* Nonfull, huge */
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_huge,
arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][1].npageslabs,
size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_huge,
arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][1].nactive,
size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_huge,
arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][1].ndirty,
size_t);
static const ctl_named_node_t *
stats_arenas_i_hpa_shard_nonfull_slabs_j_index(tsdn_t *tsdn, const size_t *mib,
size_t miblen, size_t j) {
if (j >= PSSET_NPSIZES) {
return NULL;
}
return super_stats_arenas_i_hpa_shard_nonfull_slabs_j_node;
}
static bool
ctl_arenas_i_verify(size_t i) {
size_t a = arenas_i2a_impl(i, true, true);
......@@ -3161,6 +3892,32 @@ label_return:
return ret;
}
static int
experimental_thread_activity_callback_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
if (!config_stats) {
return ENOENT;
}
activity_callback_thunk_t t_old = tsd_activity_callback_thunk_get(tsd);
READ(t_old, activity_callback_thunk_t);
if (newp != NULL) {
/*
* This initialization is unnecessary. If it's omitted, though,
* clang gets confused and warns on the subsequent use of t_new.
*/
activity_callback_thunk_t t_new = {NULL, NULL};
WRITE(t_new, activity_callback_thunk_t);
tsd_activity_callback_thunk_set(tsd, t_new);
}
ret = 0;
label_return:
return ret;
}
/*
* Output six memory utilization entries for an input pointer, the first one of
* type (void *) and the remaining five of type size_t, describing the following
......@@ -3178,7 +3935,8 @@ label_return:
* otherwise their values are undefined.
*
* This API is mainly intended for small class allocations, where extents are
* used as slab.
* used as slab. Note that if the bin the extent belongs to is completely
* full, "(a)" will be NULL.
*
* In case of large class allocations, "(a)" will be NULL, and "(e)" and "(f)"
* will be zero (if stats are enabled; otherwise undefined). The other three
......@@ -3232,11 +3990,11 @@ experimental_utilization_query_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
assert(sizeof(extent_util_stats_verbose_t)
assert(sizeof(inspect_extent_util_stats_verbose_t)
== sizeof(void *) + sizeof(size_t) * 5);
if (oldp == NULL || oldlenp == NULL
|| *oldlenp != sizeof(extent_util_stats_verbose_t)
|| *oldlenp != sizeof(inspect_extent_util_stats_verbose_t)
|| newp == NULL) {
ret = EINVAL;
goto label_return;
......@@ -3244,9 +4002,9 @@ experimental_utilization_query_ctl(tsd_t *tsd, const size_t *mib,
void *ptr = NULL;
WRITE(ptr, void *);
extent_util_stats_verbose_t *util_stats
= (extent_util_stats_verbose_t *)oldp;
extent_util_stats_verbose_get(tsd_tsdn(tsd), ptr,
inspect_extent_util_stats_verbose_t *util_stats
= (inspect_extent_util_stats_verbose_t *)oldp;
inspect_extent_util_stats_verbose_get(tsd_tsdn(tsd), ptr,
&util_stats->nfree, &util_stats->nregs, &util_stats->size,
&util_stats->bin_nfree, &util_stats->bin_nregs,
&util_stats->slabcur_addr);
......@@ -3357,21 +4115,22 @@ experimental_utilization_batch_query_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
assert(sizeof(extent_util_stats_t) == sizeof(size_t) * 3);
assert(sizeof(inspect_extent_util_stats_t) == sizeof(size_t) * 3);
const size_t len = newlen / sizeof(const void *);
if (oldp == NULL || oldlenp == NULL || newp == NULL || newlen == 0
|| newlen != len * sizeof(const void *)
|| *oldlenp != len * sizeof(extent_util_stats_t)) {
|| *oldlenp != len * sizeof(inspect_extent_util_stats_t)) {
ret = EINVAL;
goto label_return;
}
void **ptrs = (void **)newp;
extent_util_stats_t *util_stats = (extent_util_stats_t *)oldp;
inspect_extent_util_stats_t *util_stats =
(inspect_extent_util_stats_t *)oldp;
size_t i;
for (i = 0; i < len; ++i) {
extent_util_stats_get(tsd_tsdn(tsd), ptrs[i],
inspect_extent_util_stats_get(tsd_tsdn(tsd), ptrs[i],
&util_stats[i].nfree, &util_stats[i].nregs,
&util_stats[i].size);
}
......@@ -3420,7 +4179,7 @@ experimental_arenas_i_pactivep_ctl(tsd_t *tsd, const size_t *mib,
#if defined(JEMALLOC_GCC_ATOMIC_ATOMICS) || \
defined(JEMALLOC_GCC_SYNC_ATOMICS) || defined(_MSC_VER)
/* Expose the underlying counter for fast read. */
pactivep = (size_t *)&(arena->nactive.repr);
pactivep = (size_t *)&(arena->pa_shard.nactive.repr);
READ(pactivep, size_t *);
ret = 0;
#else
......@@ -3433,3 +4192,223 @@ label_return:
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return ret;
}
static int
experimental_prof_recent_alloc_max_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
if (!(config_prof && opt_prof)) {
ret = ENOENT;
goto label_return;
}
ssize_t old_max;
if (newp != NULL) {
if (newlen != sizeof(ssize_t)) {
ret = EINVAL;
goto label_return;
}
ssize_t max = *(ssize_t *)newp;
if (max < -1) {
ret = EINVAL;
goto label_return;
}
old_max = prof_recent_alloc_max_ctl_write(tsd, max);
} else {
old_max = prof_recent_alloc_max_ctl_read();
}
READ(old_max, ssize_t);
ret = 0;
label_return:
return ret;
}
typedef struct write_cb_packet_s write_cb_packet_t;
struct write_cb_packet_s {
write_cb_t *write_cb;
void *cbopaque;
};
static int
experimental_prof_recent_alloc_dump_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
if (!(config_prof && opt_prof)) {
ret = ENOENT;
goto label_return;
}
assert(sizeof(write_cb_packet_t) == sizeof(void *) * 2);
WRITEONLY();
write_cb_packet_t write_cb_packet;
ASSURED_WRITE(write_cb_packet, write_cb_packet_t);
prof_recent_alloc_dump(tsd, write_cb_packet.write_cb,
write_cb_packet.cbopaque);
ret = 0;
label_return:
return ret;
}
typedef struct batch_alloc_packet_s batch_alloc_packet_t;
struct batch_alloc_packet_s {
void **ptrs;
size_t num;
size_t size;
int flags;
};
static int
experimental_batch_alloc_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
VERIFY_READ(size_t);
batch_alloc_packet_t batch_alloc_packet;
ASSURED_WRITE(batch_alloc_packet, batch_alloc_packet_t);
size_t filled = batch_alloc(batch_alloc_packet.ptrs,
batch_alloc_packet.num, batch_alloc_packet.size,
batch_alloc_packet.flags);
READ(filled, size_t);
ret = 0;
label_return:
return ret;
}
static int
prof_stats_bins_i_live_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
unsigned binind;
prof_stats_t stats;
if (!(config_prof && opt_prof && opt_prof_stats)) {
ret = ENOENT;
goto label_return;
}
READONLY();
MIB_UNSIGNED(binind, 3);
if (binind >= SC_NBINS) {
ret = EINVAL;
goto label_return;
}
prof_stats_get_live(tsd, (szind_t)binind, &stats);
READ(stats, prof_stats_t);
ret = 0;
label_return:
return ret;
}
static int
prof_stats_bins_i_accum_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
unsigned binind;
prof_stats_t stats;
if (!(config_prof && opt_prof && opt_prof_stats)) {
ret = ENOENT;
goto label_return;
}
READONLY();
MIB_UNSIGNED(binind, 3);
if (binind >= SC_NBINS) {
ret = EINVAL;
goto label_return;
}
prof_stats_get_accum(tsd, (szind_t)binind, &stats);
READ(stats, prof_stats_t);
ret = 0;
label_return:
return ret;
}
static const ctl_named_node_t *
prof_stats_bins_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
size_t i) {
if (!(config_prof && opt_prof && opt_prof_stats)) {
return NULL;
}
if (i >= SC_NBINS) {
return NULL;
}
return super_prof_stats_bins_i_node;
}
static int
prof_stats_lextents_i_live_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
unsigned lextent_ind;
prof_stats_t stats;
if (!(config_prof && opt_prof && opt_prof_stats)) {
ret = ENOENT;
goto label_return;
}
READONLY();
MIB_UNSIGNED(lextent_ind, 3);
if (lextent_ind >= SC_NSIZES - SC_NBINS) {
ret = EINVAL;
goto label_return;
}
prof_stats_get_live(tsd, (szind_t)(lextent_ind + SC_NBINS), &stats);
READ(stats, prof_stats_t);
ret = 0;
label_return:
return ret;
}
static int
prof_stats_lextents_i_accum_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
unsigned lextent_ind;
prof_stats_t stats;
if (!(config_prof && opt_prof && opt_prof_stats)) {
ret = ENOENT;
goto label_return;
}
READONLY();
MIB_UNSIGNED(lextent_ind, 3);
if (lextent_ind >= SC_NSIZES - SC_NBINS) {
ret = EINVAL;
goto label_return;
}
prof_stats_get_accum(tsd, (szind_t)(lextent_ind + SC_NBINS), &stats);
READ(stats, prof_stats_t);
ret = 0;
label_return:
return ret;
}
static const ctl_named_node_t *
prof_stats_lextents_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
size_t i) {
if (!(config_prof && opt_prof && opt_prof_stats)) {
return NULL;
}
if (i >= SC_NSIZES - SC_NBINS) {
return NULL;
}
return super_prof_stats_lextents_i_node;
}
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/decay.h"
static const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = {
#define STEP(step, h, x, y) \
h,
SMOOTHSTEP
#undef STEP
};
/*
* Generate a new deadline that is uniformly random within the next epoch after
* the current one.
*/
void
decay_deadline_init(decay_t *decay) {
nstime_copy(&decay->deadline, &decay->epoch);
nstime_add(&decay->deadline, &decay->interval);
if (decay_ms_read(decay) > 0) {
nstime_t jitter;
nstime_init(&jitter, prng_range_u64(&decay->jitter_state,
nstime_ns(&decay->interval)));
nstime_add(&decay->deadline, &jitter);
}
}
void
decay_reinit(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms) {
atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED);
if (decay_ms > 0) {
nstime_init(&decay->interval, (uint64_t)decay_ms *
KQU(1000000));
nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS);
}
nstime_copy(&decay->epoch, cur_time);
decay->jitter_state = (uint64_t)(uintptr_t)decay;
decay_deadline_init(decay);
decay->nunpurged = 0;
memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
}
bool
decay_init(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms) {
if (config_debug) {
for (size_t i = 0; i < sizeof(decay_t); i++) {
assert(((char *)decay)[i] == 0);
}
decay->ceil_npages = 0;
}
if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY,
malloc_mutex_rank_exclusive)) {
return true;
}
decay->purging = false;
decay_reinit(decay, cur_time, decay_ms);
return false;
}
bool
decay_ms_valid(ssize_t decay_ms) {
if (decay_ms < -1) {
return false;
}
if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX *
KQU(1000)) {
return true;
}
return false;
}
static void
decay_maybe_update_time(decay_t *decay, nstime_t *new_time) {
if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch,
new_time) > 0)) {
/*
* Time went backwards. Move the epoch back in time and
* generate a new deadline, with the expectation that time
* typically flows forward for long enough periods of time that
* epochs complete. Unfortunately, this strategy is susceptible
* to clock jitter triggering premature epoch advances, but
* clock jitter estimation and compensation isn't feasible here
* because calls into this code are event-driven.
*/
nstime_copy(&decay->epoch, new_time);
decay_deadline_init(decay);
} else {
/* Verify that time does not go backwards. */
assert(nstime_compare(&decay->epoch, new_time) <= 0);
}
}
static size_t
decay_backlog_npages_limit(const decay_t *decay) {
/*
* For each element of decay_backlog, multiply by the corresponding
* fixed-point smoothstep decay factor. Sum the products, then divide
* to round down to the nearest whole number of pages.
*/
uint64_t sum = 0;
for (unsigned i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
sum += decay->backlog[i] * h_steps[i];
}
size_t npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
return npages_limit_backlog;
}
/*
* Update backlog, assuming that 'nadvance_u64' time intervals have passed.
* Trailing 'nadvance_u64' records should be erased and 'current_npages' is
* placed as the newest record.
*/
static void
decay_backlog_update(decay_t *decay, uint64_t nadvance_u64,
size_t current_npages) {
if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
sizeof(size_t));
} else {
size_t nadvance_z = (size_t)nadvance_u64;
assert((uint64_t)nadvance_z == nadvance_u64);
memmove(decay->backlog, &decay->backlog[nadvance_z],
(SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
if (nadvance_z > 1) {
memset(&decay->backlog[SMOOTHSTEP_NSTEPS -
nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
}
}
size_t npages_delta = (current_npages > decay->nunpurged) ?
current_npages - decay->nunpurged : 0;
decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta;
if (config_debug) {
if (current_npages > decay->ceil_npages) {
decay->ceil_npages = current_npages;
}
size_t npages_limit = decay_backlog_npages_limit(decay);
assert(decay->ceil_npages >= npages_limit);
if (decay->ceil_npages > npages_limit) {
decay->ceil_npages = npages_limit;
}
}
}
static inline bool
decay_deadline_reached(const decay_t *decay, const nstime_t *time) {
return (nstime_compare(&decay->deadline, time) <= 0);
}
uint64_t
decay_npages_purge_in(decay_t *decay, nstime_t *time, size_t npages_new) {
uint64_t decay_interval_ns = decay_epoch_duration_ns(decay);
size_t n_epoch = (size_t)(nstime_ns(time) / decay_interval_ns);
uint64_t npages_purge;
if (n_epoch >= SMOOTHSTEP_NSTEPS) {
npages_purge = npages_new;
} else {
uint64_t h_steps_max = h_steps[SMOOTHSTEP_NSTEPS - 1];
assert(h_steps_max >=
h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
npages_purge = npages_new * (h_steps_max -
h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
npages_purge >>= SMOOTHSTEP_BFP;
}
return npages_purge;
}
bool
decay_maybe_advance_epoch(decay_t *decay, nstime_t *new_time,
size_t npages_current) {
/* Handle possible non-monotonicity of time. */
decay_maybe_update_time(decay, new_time);
if (!decay_deadline_reached(decay, new_time)) {
return false;
}
nstime_t delta;
nstime_copy(&delta, new_time);
nstime_subtract(&delta, &decay->epoch);
uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval);
assert(nadvance_u64 > 0);
/* Add nadvance_u64 decay intervals to epoch. */
nstime_copy(&delta, &decay->interval);
nstime_imultiply(&delta, nadvance_u64);
nstime_add(&decay->epoch, &delta);
/* Set a new deadline. */
decay_deadline_init(decay);
/* Update the backlog. */
decay_backlog_update(decay, nadvance_u64, npages_current);
decay->npages_limit = decay_backlog_npages_limit(decay);
decay->nunpurged = (decay->npages_limit > npages_current) ?
decay->npages_limit : npages_current;
return true;
}
/*
* Calculate how many pages should be purged after 'interval'.
*
* First, calculate how many pages should remain at the moment, then subtract
* the number of pages that should remain after 'interval'. The difference is
* how many pages should be purged until then.
*
* The number of pages that should remain at a specific moment is calculated
* like this: pages(now) = sum(backlog[i] * h_steps[i]). After 'interval'
* passes, backlog would shift 'interval' positions to the left and sigmoid
* curve would be applied starting with backlog[interval].
*
* The implementation doesn't directly map to the description, but it's
* essentially the same calculation, optimized to avoid iterating over
* [interval..SMOOTHSTEP_NSTEPS) twice.
*/
static inline size_t
decay_npurge_after_interval(decay_t *decay, size_t interval) {
size_t i;
uint64_t sum = 0;
for (i = 0; i < interval; i++) {
sum += decay->backlog[i] * h_steps[i];
}
for (; i < SMOOTHSTEP_NSTEPS; i++) {
sum += decay->backlog[i] *
(h_steps[i] - h_steps[i - interval]);
}
return (size_t)(sum >> SMOOTHSTEP_BFP);
}
uint64_t decay_ns_until_purge(decay_t *decay, size_t npages_current,
uint64_t npages_threshold) {
if (!decay_gradually(decay)) {
return DECAY_UNBOUNDED_TIME_TO_PURGE;
}
uint64_t decay_interval_ns = decay_epoch_duration_ns(decay);
assert(decay_interval_ns > 0);
if (npages_current == 0) {
unsigned i;
for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
if (decay->backlog[i] > 0) {
break;
}
}
if (i == SMOOTHSTEP_NSTEPS) {
/* No dirty pages recorded. Sleep indefinitely. */
return DECAY_UNBOUNDED_TIME_TO_PURGE;
}
}
if (npages_current <= npages_threshold) {
/* Use max interval. */
return decay_interval_ns * SMOOTHSTEP_NSTEPS;
}
/* Minimal 2 intervals to ensure reaching next epoch deadline. */
size_t lb = 2;
size_t ub = SMOOTHSTEP_NSTEPS;
size_t npurge_lb, npurge_ub;
npurge_lb = decay_npurge_after_interval(decay, lb);
if (npurge_lb > npages_threshold) {
return decay_interval_ns * lb;
}
npurge_ub = decay_npurge_after_interval(decay, ub);
if (npurge_ub < npages_threshold) {
return decay_interval_ns * ub;
}
unsigned n_search = 0;
size_t target, npurge;
while ((npurge_lb + npages_threshold < npurge_ub) && (lb + 2 < ub)) {
target = (lb + ub) / 2;
npurge = decay_npurge_after_interval(decay, target);
if (npurge > npages_threshold) {
ub = target;
npurge_ub = npurge;
} else {
lb = target;
npurge_lb = npurge;
}
assert(n_search < lg_floor(SMOOTHSTEP_NSTEPS) + 1);
++n_search;
}
return decay_interval_ns * (ub + lb) / 2;
}
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/san.h"
bool
ecache_init(tsdn_t *tsdn, ecache_t *ecache, extent_state_t state, unsigned ind,
bool delay_coalesce) {
if (malloc_mutex_init(&ecache->mtx, "extents", WITNESS_RANK_EXTENTS,
malloc_mutex_rank_exclusive)) {
return true;
}
ecache->state = state;
ecache->ind = ind;
ecache->delay_coalesce = delay_coalesce;
eset_init(&ecache->eset, state);
eset_init(&ecache->guarded_eset, state);
return false;
}
void
ecache_prefork(tsdn_t *tsdn, ecache_t *ecache) {
malloc_mutex_prefork(tsdn, &ecache->mtx);
}
void
ecache_postfork_parent(tsdn_t *tsdn, ecache_t *ecache) {
malloc_mutex_postfork_parent(tsdn, &ecache->mtx);
}
void
ecache_postfork_child(tsdn_t *tsdn, ecache_t *ecache) {
malloc_mutex_postfork_child(tsdn, &ecache->mtx);
}
#define JEMALLOC_HASH_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
ph_gen(, edata_avail, edata_t, avail_link,
edata_esnead_comp)
ph_gen(, edata_heap, edata_t, heap_link, edata_snad_comp)
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
bool
edata_cache_init(edata_cache_t *edata_cache, base_t *base) {
edata_avail_new(&edata_cache->avail);
/*
* This is not strictly necessary, since the edata_cache_t is only
* created inside an arena, which is zeroed on creation. But this is
* handy as a safety measure.
*/
atomic_store_zu(&edata_cache->count, 0, ATOMIC_RELAXED);
if (malloc_mutex_init(&edata_cache->mtx, "edata_cache",
WITNESS_RANK_EDATA_CACHE, malloc_mutex_rank_exclusive)) {
return true;
}
edata_cache->base = base;
return false;
}
edata_t *
edata_cache_get(tsdn_t *tsdn, edata_cache_t *edata_cache) {
malloc_mutex_lock(tsdn, &edata_cache->mtx);
edata_t *edata = edata_avail_first(&edata_cache->avail);
if (edata == NULL) {
malloc_mutex_unlock(tsdn, &edata_cache->mtx);
return base_alloc_edata(tsdn, edata_cache->base);
}
edata_avail_remove(&edata_cache->avail, edata);
atomic_load_sub_store_zu(&edata_cache->count, 1);
malloc_mutex_unlock(tsdn, &edata_cache->mtx);
return edata;
}
void
edata_cache_put(tsdn_t *tsdn, edata_cache_t *edata_cache, edata_t *edata) {
malloc_mutex_lock(tsdn, &edata_cache->mtx);
edata_avail_insert(&edata_cache->avail, edata);
atomic_load_add_store_zu(&edata_cache->count, 1);
malloc_mutex_unlock(tsdn, &edata_cache->mtx);
}
void
edata_cache_prefork(tsdn_t *tsdn, edata_cache_t *edata_cache) {
malloc_mutex_prefork(tsdn, &edata_cache->mtx);
}
void
edata_cache_postfork_parent(tsdn_t *tsdn, edata_cache_t *edata_cache) {
malloc_mutex_postfork_parent(tsdn, &edata_cache->mtx);
}
void
edata_cache_postfork_child(tsdn_t *tsdn, edata_cache_t *edata_cache) {
malloc_mutex_postfork_child(tsdn, &edata_cache->mtx);
}
void
edata_cache_fast_init(edata_cache_fast_t *ecs, edata_cache_t *fallback) {
edata_list_inactive_init(&ecs->list);
ecs->fallback = fallback;
ecs->disabled = false;
}
static void
edata_cache_fast_try_fill_from_fallback(tsdn_t *tsdn,
edata_cache_fast_t *ecs) {
edata_t *edata;
malloc_mutex_lock(tsdn, &ecs->fallback->mtx);
for (int i = 0; i < EDATA_CACHE_FAST_FILL; i++) {
edata = edata_avail_remove_first(&ecs->fallback->avail);
if (edata == NULL) {
break;
}
edata_list_inactive_append(&ecs->list, edata);
atomic_load_sub_store_zu(&ecs->fallback->count, 1);
}
malloc_mutex_unlock(tsdn, &ecs->fallback->mtx);
}
edata_t *
edata_cache_fast_get(tsdn_t *tsdn, edata_cache_fast_t *ecs) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_EDATA_CACHE, 0);
if (ecs->disabled) {
assert(edata_list_inactive_first(&ecs->list) == NULL);
return edata_cache_get(tsdn, ecs->fallback);
}
edata_t *edata = edata_list_inactive_first(&ecs->list);
if (edata != NULL) {
edata_list_inactive_remove(&ecs->list, edata);
return edata;
}
/* Slow path; requires synchronization. */
edata_cache_fast_try_fill_from_fallback(tsdn, ecs);
edata = edata_list_inactive_first(&ecs->list);
if (edata != NULL) {
edata_list_inactive_remove(&ecs->list, edata);
} else {
/*
* Slowest path (fallback was also empty); allocate something
* new.
*/
edata = base_alloc_edata(tsdn, ecs->fallback->base);
}
return edata;
}
static void
edata_cache_fast_flush_all(tsdn_t *tsdn, edata_cache_fast_t *ecs) {
/*
* You could imagine smarter cache management policies (like
* only flushing down to some threshold in anticipation of
* future get requests). But just flushing everything provides
* a good opportunity to defrag too, and lets us share code between the
* flush and disable pathways.
*/
edata_t *edata;
size_t nflushed = 0;
malloc_mutex_lock(tsdn, &ecs->fallback->mtx);
while ((edata = edata_list_inactive_first(&ecs->list)) != NULL) {
edata_list_inactive_remove(&ecs->list, edata);
edata_avail_insert(&ecs->fallback->avail, edata);
nflushed++;
}
atomic_load_add_store_zu(&ecs->fallback->count, nflushed);
malloc_mutex_unlock(tsdn, &ecs->fallback->mtx);
}
void
edata_cache_fast_put(tsdn_t *tsdn, edata_cache_fast_t *ecs, edata_t *edata) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_EDATA_CACHE, 0);
if (ecs->disabled) {
assert(edata_list_inactive_first(&ecs->list) == NULL);
edata_cache_put(tsdn, ecs->fallback, edata);
return;
}
/*
* Prepend rather than append, to do LIFO ordering in the hopes of some
* cache locality.
*/
edata_list_inactive_prepend(&ecs->list, edata);
}
void
edata_cache_fast_disable(tsdn_t *tsdn, edata_cache_fast_t *ecs) {
edata_cache_fast_flush_all(tsdn, ecs);
ecs->disabled = true;
}
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/ehooks.h"
#include "jemalloc/internal/extent_mmap.h"
void
ehooks_init(ehooks_t *ehooks, extent_hooks_t *extent_hooks, unsigned ind) {
/* All other hooks are optional; this one is not. */
assert(extent_hooks->alloc != NULL);
ehooks->ind = ind;
ehooks_set_extent_hooks_ptr(ehooks, extent_hooks);
}
/*
* If the caller specifies (!*zero), it is still possible to receive zeroed
* memory, in which case *zero is toggled to true. arena_extent_alloc() takes
* advantage of this to avoid demanding zeroed extents, but taking advantage of
* them if they are returned.
*/
static void *
extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) {
void *ret;
assert(size != 0);
assert(alignment != 0);
/* "primary" dss. */
if (have_dss && dss_prec == dss_prec_primary && (ret =
extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
commit)) != NULL) {
return ret;
}
/* mmap. */
if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit))
!= NULL) {
return ret;
}
/* "secondary" dss. */
if (have_dss && dss_prec == dss_prec_secondary && (ret =
extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
commit)) != NULL) {
return ret;
}
/* All strategies for allocation failed. */
return NULL;
}
void *
ehooks_default_alloc_impl(tsdn_t *tsdn, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
arena_t *arena = arena_get(tsdn, arena_ind, false);
/* NULL arena indicates arena_create. */
assert(arena != NULL || alignment == HUGEPAGE);
dss_prec_t dss = (arena == NULL) ? dss_prec_disabled :
(dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_RELAXED);
void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment,
zero, commit, dss);
if (have_madvise_huge && ret) {
pages_set_thp_state(ret, size);
}
return ret;
}
static void *
ehooks_default_alloc(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
return ehooks_default_alloc_impl(tsdn_fetch(), new_addr, size,
ALIGNMENT_CEILING(alignment, PAGE), zero, commit, arena_ind);
}
bool
ehooks_default_dalloc_impl(void *addr, size_t size) {
if (!have_dss || !extent_in_dss(addr)) {
return extent_dalloc_mmap(addr, size);
}
return true;
}
static bool
ehooks_default_dalloc(extent_hooks_t *extent_hooks, void *addr, size_t size,
bool committed, unsigned arena_ind) {
return ehooks_default_dalloc_impl(addr, size);
}
void
ehooks_default_destroy_impl(void *addr, size_t size) {
if (!have_dss || !extent_in_dss(addr)) {
pages_unmap(addr, size);
}
}
static void
ehooks_default_destroy(extent_hooks_t *extent_hooks, void *addr, size_t size,
bool committed, unsigned arena_ind) {
ehooks_default_destroy_impl(addr, size);
}
bool
ehooks_default_commit_impl(void *addr, size_t offset, size_t length) {
return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
length);
}
static bool
ehooks_default_commit(extent_hooks_t *extent_hooks, void *addr, size_t size,
size_t offset, size_t length, unsigned arena_ind) {
return ehooks_default_commit_impl(addr, offset, length);
}
bool
ehooks_default_decommit_impl(void *addr, size_t offset, size_t length) {
return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
length);
}
static bool
ehooks_default_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size,
size_t offset, size_t length, unsigned arena_ind) {
return ehooks_default_decommit_impl(addr, offset, length);
}
#ifdef PAGES_CAN_PURGE_LAZY
bool
ehooks_default_purge_lazy_impl(void *addr, size_t offset, size_t length) {
return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset),
length);
}
static bool
ehooks_default_purge_lazy(extent_hooks_t *extent_hooks, void *addr, size_t size,
size_t offset, size_t length, unsigned arena_ind) {
assert(addr != NULL);
assert((offset & PAGE_MASK) == 0);
assert(length != 0);
assert((length & PAGE_MASK) == 0);
return ehooks_default_purge_lazy_impl(addr, offset, length);
}
#endif
#ifdef PAGES_CAN_PURGE_FORCED
bool
ehooks_default_purge_forced_impl(void *addr, size_t offset, size_t length) {
return pages_purge_forced((void *)((uintptr_t)addr +
(uintptr_t)offset), length);
}
static bool
ehooks_default_purge_forced(extent_hooks_t *extent_hooks, void *addr,
size_t size, size_t offset, size_t length, unsigned arena_ind) {
assert(addr != NULL);
assert((offset & PAGE_MASK) == 0);
assert(length != 0);
assert((length & PAGE_MASK) == 0);
return ehooks_default_purge_forced_impl(addr, offset, length);
}
#endif
bool
ehooks_default_split_impl() {
if (!maps_coalesce) {
/*
* Without retain, only whole regions can be purged (required by
* MEM_RELEASE on Windows) -- therefore disallow splitting. See
* comments in extent_head_no_merge().
*/
return !opt_retain;
}
return false;
}
static bool
ehooks_default_split(extent_hooks_t *extent_hooks, void *addr, size_t size,
size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
return ehooks_default_split_impl();
}
bool
ehooks_default_merge_impl(tsdn_t *tsdn, void *addr_a, void *addr_b) {
assert(addr_a < addr_b);
/*
* For non-DSS cases --
* a) W/o maps_coalesce, merge is not always allowed (Windows):
* 1) w/o retain, never merge (first branch below).
* 2) with retain, only merge extents from the same VirtualAlloc
* region (in which case MEM_DECOMMIT is utilized for purging).
*
* b) With maps_coalesce, it's always possible to merge.
* 1) w/o retain, always allow merge (only about dirty / muzzy).
* 2) with retain, to preserve the SN / first-fit, merge is still
* disallowed if b is a head extent, i.e. no merging across
* different mmap regions.
*
* a2) and b2) are implemented in emap_try_acquire_edata_neighbor, and
* sanity checked in the second branch below.
*/
if (!maps_coalesce && !opt_retain) {
return true;
}
if (config_debug) {
edata_t *a = emap_edata_lookup(tsdn, &arena_emap_global,
addr_a);
bool head_a = edata_is_head_get(a);
edata_t *b = emap_edata_lookup(tsdn, &arena_emap_global,
addr_b);
bool head_b = edata_is_head_get(b);
emap_assert_mapped(tsdn, &arena_emap_global, a);
emap_assert_mapped(tsdn, &arena_emap_global, b);
assert(extent_neighbor_head_state_mergeable(head_a, head_b,
/* forward */ true));
}
if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) {
return true;
}
return false;
}
bool
ehooks_default_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
tsdn_t *tsdn = tsdn_fetch();
return ehooks_default_merge_impl(tsdn, addr_a, addr_b);
}
void
ehooks_default_zero_impl(void *addr, size_t size) {
/*
* By default, we try to zero out memory using OS-provided demand-zeroed
* pages. If the user has specifically requested hugepages, though, we
* don't want to purge in the middle of a hugepage (which would break it
* up), so we act conservatively and use memset.
*/
bool needs_memset = true;
if (opt_thp != thp_mode_always) {
needs_memset = pages_purge_forced(addr, size);
}
if (needs_memset) {
memset(addr, 0, size);
}
}
void
ehooks_default_guard_impl(void *guard1, void *guard2) {
pages_mark_guards(guard1, guard2);
}
void
ehooks_default_unguard_impl(void *guard1, void *guard2) {
pages_unmark_guards(guard1, guard2);
}
const extent_hooks_t ehooks_default_extent_hooks = {
ehooks_default_alloc,
ehooks_default_dalloc,
ehooks_default_destroy,
ehooks_default_commit,
ehooks_default_decommit,
#ifdef PAGES_CAN_PURGE_LAZY
ehooks_default_purge_lazy,
#else
NULL,
#endif
#ifdef PAGES_CAN_PURGE_FORCED
ehooks_default_purge_forced,
#else
NULL,
#endif
ehooks_default_split,
ehooks_default_merge
};
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/emap.h"
enum emap_lock_result_e {
emap_lock_result_success,
emap_lock_result_failure,
emap_lock_result_no_extent
};
typedef enum emap_lock_result_e emap_lock_result_t;
bool
emap_init(emap_t *emap, base_t *base, bool zeroed) {
return rtree_new(&emap->rtree, base, zeroed);
}
void
emap_update_edata_state(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
extent_state_t state) {
witness_assert_positive_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE);
edata_state_set(edata, state);
EMAP_DECLARE_RTREE_CTX;
rtree_leaf_elm_t *elm1 = rtree_leaf_elm_lookup(tsdn, &emap->rtree,
rtree_ctx, (uintptr_t)edata_base_get(edata), /* dependent */ true,
/* init_missing */ false);
assert(elm1 != NULL);
rtree_leaf_elm_t *elm2 = edata_size_get(edata) == PAGE ? NULL :
rtree_leaf_elm_lookup(tsdn, &emap->rtree, rtree_ctx,
(uintptr_t)edata_last_get(edata), /* dependent */ true,
/* init_missing */ false);
rtree_leaf_elm_state_update(tsdn, &emap->rtree, elm1, elm2, state);
emap_assert_mapped(tsdn, emap, edata);
}
static inline edata_t *
emap_try_acquire_edata_neighbor_impl(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
extent_pai_t pai, extent_state_t expected_state, bool forward,
bool expanding) {
witness_assert_positive_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE);
assert(!edata_guarded_get(edata));
assert(!expanding || forward);
assert(!edata_state_in_transition(expected_state));
assert(expected_state == extent_state_dirty ||
expected_state == extent_state_muzzy ||
expected_state == extent_state_retained);
void *neighbor_addr = forward ? edata_past_get(edata) :
edata_before_get(edata);
/*
* This is subtle; the rtree code asserts that its input pointer is
* non-NULL, and this is a useful thing to check. But it's possible
* that edata corresponds to an address of (void *)PAGE (in practice,
* this has only been observed on FreeBSD when address-space
* randomization is on, but it could in principle happen anywhere). In
* this case, edata_before_get(edata) is NULL, triggering the assert.
*/
if (neighbor_addr == NULL) {
return NULL;
}
EMAP_DECLARE_RTREE_CTX;
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &emap->rtree,
rtree_ctx, (uintptr_t)neighbor_addr, /* dependent*/ false,
/* init_missing */ false);
if (elm == NULL) {
return NULL;
}
rtree_contents_t neighbor_contents = rtree_leaf_elm_read(tsdn,
&emap->rtree, elm, /* dependent */ true);
if (!extent_can_acquire_neighbor(edata, neighbor_contents, pai,
expected_state, forward, expanding)) {
return NULL;
}
/* From this point, the neighbor edata can be safely acquired. */
edata_t *neighbor = neighbor_contents.edata;
assert(edata_state_get(neighbor) == expected_state);
emap_update_edata_state(tsdn, emap, neighbor, extent_state_merging);
if (expanding) {
extent_assert_can_expand(edata, neighbor);
} else {
extent_assert_can_coalesce(edata, neighbor);
}
return neighbor;
}
edata_t *
emap_try_acquire_edata_neighbor(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
extent_pai_t pai, extent_state_t expected_state, bool forward) {
return emap_try_acquire_edata_neighbor_impl(tsdn, emap, edata, pai,
expected_state, forward, /* expand */ false);
}
edata_t *
emap_try_acquire_edata_neighbor_expand(tsdn_t *tsdn, emap_t *emap,
edata_t *edata, extent_pai_t pai, extent_state_t expected_state) {
/* Try expanding forward. */
return emap_try_acquire_edata_neighbor_impl(tsdn, emap, edata, pai,
expected_state, /* forward */ true, /* expand */ true);
}
void
emap_release_edata(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
extent_state_t new_state) {
assert(emap_edata_in_transition(tsdn, emap, edata));
assert(emap_edata_is_acquired(tsdn, emap, edata));
emap_update_edata_state(tsdn, emap, edata, new_state);
}
static bool
emap_rtree_leaf_elms_lookup(tsdn_t *tsdn, emap_t *emap, rtree_ctx_t *rtree_ctx,
const edata_t *edata, bool dependent, bool init_missing,
rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) {
*r_elm_a = rtree_leaf_elm_lookup(tsdn, &emap->rtree, rtree_ctx,
(uintptr_t)edata_base_get(edata), dependent, init_missing);
if (!dependent && *r_elm_a == NULL) {
return true;
}
assert(*r_elm_a != NULL);
*r_elm_b = rtree_leaf_elm_lookup(tsdn, &emap->rtree, rtree_ctx,
(uintptr_t)edata_last_get(edata), dependent, init_missing);
if (!dependent && *r_elm_b == NULL) {
return true;
}
assert(*r_elm_b != NULL);
return false;
}
static void
emap_rtree_write_acquired(tsdn_t *tsdn, emap_t *emap, rtree_leaf_elm_t *elm_a,
rtree_leaf_elm_t *elm_b, edata_t *edata, szind_t szind, bool slab) {
rtree_contents_t contents;
contents.edata = edata;
contents.metadata.szind = szind;
contents.metadata.slab = slab;
contents.metadata.is_head = (edata == NULL) ? false :
edata_is_head_get(edata);
contents.metadata.state = (edata == NULL) ? 0 : edata_state_get(edata);
rtree_leaf_elm_write(tsdn, &emap->rtree, elm_a, contents);
if (elm_b != NULL) {
rtree_leaf_elm_write(tsdn, &emap->rtree, elm_b, contents);
}
}
bool
emap_register_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
szind_t szind, bool slab) {
assert(edata_state_get(edata) == extent_state_active);
EMAP_DECLARE_RTREE_CTX;
rtree_leaf_elm_t *elm_a, *elm_b;
bool err = emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, edata,
false, true, &elm_a, &elm_b);
if (err) {
return true;
}
assert(rtree_leaf_elm_read(tsdn, &emap->rtree, elm_a,
/* dependent */ false).edata == NULL);
assert(rtree_leaf_elm_read(tsdn, &emap->rtree, elm_b,
/* dependent */ false).edata == NULL);
emap_rtree_write_acquired(tsdn, emap, elm_a, elm_b, edata, szind, slab);
return false;
}
/* Invoked *after* emap_register_boundary. */
void
emap_register_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
szind_t szind) {
EMAP_DECLARE_RTREE_CTX;
assert(edata_slab_get(edata));
assert(edata_state_get(edata) == extent_state_active);
if (config_debug) {
/* Making sure the boundary is registered already. */
rtree_leaf_elm_t *elm_a, *elm_b;
bool err = emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx,
edata, /* dependent */ true, /* init_missing */ false,
&elm_a, &elm_b);
assert(!err);
rtree_contents_t contents_a, contents_b;
contents_a = rtree_leaf_elm_read(tsdn, &emap->rtree, elm_a,
/* dependent */ true);
contents_b = rtree_leaf_elm_read(tsdn, &emap->rtree, elm_b,
/* dependent */ true);
assert(contents_a.edata == edata && contents_b.edata == edata);
assert(contents_a.metadata.slab && contents_b.metadata.slab);
}
rtree_contents_t contents;
contents.edata = edata;
contents.metadata.szind = szind;
contents.metadata.slab = true;
contents.metadata.state = extent_state_active;
contents.metadata.is_head = false; /* Not allowed to access. */
assert(edata_size_get(edata) > (2 << LG_PAGE));
rtree_write_range(tsdn, &emap->rtree, rtree_ctx,
(uintptr_t)edata_base_get(edata) + PAGE,
(uintptr_t)edata_last_get(edata) - PAGE, contents);
}
void
emap_deregister_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
/*
* The edata must be either in an acquired state, or protected by state
* based locks.
*/
if (!emap_edata_is_acquired(tsdn, emap, edata)) {
witness_assert_positive_depth_to_rank(
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE);
}
EMAP_DECLARE_RTREE_CTX;
rtree_leaf_elm_t *elm_a, *elm_b;
emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, edata,
true, false, &elm_a, &elm_b);
emap_rtree_write_acquired(tsdn, emap, elm_a, elm_b, NULL, SC_NSIZES,
false);
}
void
emap_deregister_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
EMAP_DECLARE_RTREE_CTX;
assert(edata_slab_get(edata));
if (edata_size_get(edata) > (2 << LG_PAGE)) {
rtree_clear_range(tsdn, &emap->rtree, rtree_ctx,
(uintptr_t)edata_base_get(edata) + PAGE,
(uintptr_t)edata_last_get(edata) - PAGE);
}
}
void
emap_remap(tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind,
bool slab) {
EMAP_DECLARE_RTREE_CTX;
if (szind != SC_NSIZES) {
rtree_contents_t contents;
contents.edata = edata;
contents.metadata.szind = szind;
contents.metadata.slab = slab;
contents.metadata.is_head = edata_is_head_get(edata);
contents.metadata.state = edata_state_get(edata);
rtree_write(tsdn, &emap->rtree, rtree_ctx,
(uintptr_t)edata_addr_get(edata), contents);
/*
* Recall that this is called only for active->inactive and
* inactive->active transitions (since only active extents have
* meaningful values for szind and slab). Active, non-slab
* extents only need to handle lookups at their head (on
* deallocation), so we don't bother filling in the end
* boundary.
*
* For slab extents, we do the end-mapping change. This still
* leaves the interior unmodified; an emap_register_interior
* call is coming in those cases, though.
*/
if (slab && edata_size_get(edata) > PAGE) {
uintptr_t key = (uintptr_t)edata_past_get(edata)
- (uintptr_t)PAGE;
rtree_write(tsdn, &emap->rtree, rtree_ctx, key,
contents);
}
}
}
bool
emap_split_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
edata_t *edata, size_t size_a, edata_t *trail, size_t size_b) {
EMAP_DECLARE_RTREE_CTX;
/*
* We use incorrect constants for things like arena ind, zero, ranged,
* and commit state, and head status. This is a fake edata_t, used to
* facilitate a lookup.
*/
edata_t lead = {0};
edata_init(&lead, 0U, edata_addr_get(edata), size_a, false, 0, 0,
extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, &lead, false, true,
&prepare->lead_elm_a, &prepare->lead_elm_b);
emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, trail, false, true,
&prepare->trail_elm_a, &prepare->trail_elm_b);
if (prepare->lead_elm_a == NULL || prepare->lead_elm_b == NULL
|| prepare->trail_elm_a == NULL || prepare->trail_elm_b == NULL) {
return true;
}
return false;
}
void
emap_split_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
edata_t *lead, size_t size_a, edata_t *trail, size_t size_b) {
/*
* We should think about not writing to the lead leaf element. We can
* get into situations where a racing realloc-like call can disagree
* with a size lookup request. I think it's fine to declare that these
* situations are race bugs, but there's an argument to be made that for
* things like xallocx, a size lookup call should return either the old
* size or the new size, but not anything else.
*/
emap_rtree_write_acquired(tsdn, emap, prepare->lead_elm_a,
prepare->lead_elm_b, lead, SC_NSIZES, /* slab */ false);
emap_rtree_write_acquired(tsdn, emap, prepare->trail_elm_a,
prepare->trail_elm_b, trail, SC_NSIZES, /* slab */ false);
}
void
emap_merge_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
edata_t *lead, edata_t *trail) {
EMAP_DECLARE_RTREE_CTX;
emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, lead, true, false,
&prepare->lead_elm_a, &prepare->lead_elm_b);
emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, trail, true, false,
&prepare->trail_elm_a, &prepare->trail_elm_b);
}
void
emap_merge_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
edata_t *lead, edata_t *trail) {
rtree_contents_t clear_contents;
clear_contents.edata = NULL;
clear_contents.metadata.szind = SC_NSIZES;
clear_contents.metadata.slab = false;
clear_contents.metadata.is_head = false;
clear_contents.metadata.state = (extent_state_t)0;
if (prepare->lead_elm_b != NULL) {
rtree_leaf_elm_write(tsdn, &emap->rtree,
prepare->lead_elm_b, clear_contents);
}
rtree_leaf_elm_t *merged_b;
if (prepare->trail_elm_b != NULL) {
rtree_leaf_elm_write(tsdn, &emap->rtree,
prepare->trail_elm_a, clear_contents);
merged_b = prepare->trail_elm_b;
} else {
merged_b = prepare->trail_elm_a;
}
emap_rtree_write_acquired(tsdn, emap, prepare->lead_elm_a, merged_b,
lead, SC_NSIZES, false);
}
void
emap_do_assert_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
EMAP_DECLARE_RTREE_CTX;
rtree_contents_t contents = rtree_read(tsdn, &emap->rtree, rtree_ctx,
(uintptr_t)edata_base_get(edata));
assert(contents.edata == edata);
assert(contents.metadata.is_head == edata_is_head_get(edata));
assert(contents.metadata.state == edata_state_get(edata));
}
void
emap_do_assert_not_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
emap_full_alloc_ctx_t context1 = {0};
emap_full_alloc_ctx_try_lookup(tsdn, emap, edata_base_get(edata),
&context1);
assert(context1.edata == NULL);
emap_full_alloc_ctx_t context2 = {0};
emap_full_alloc_ctx_try_lookup(tsdn, emap, edata_last_get(edata),
&context2);
assert(context2.edata == NULL);
}
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/eset.h"
#define ESET_NPSIZES (SC_NPSIZES + 1)
static void
eset_bin_init(eset_bin_t *bin) {
edata_heap_new(&bin->heap);
/*
* heap_min doesn't need initialization; it gets filled in when the bin
* goes from non-empty to empty.
*/
}
static void
eset_bin_stats_init(eset_bin_stats_t *bin_stats) {
atomic_store_zu(&bin_stats->nextents, 0, ATOMIC_RELAXED);
atomic_store_zu(&bin_stats->nbytes, 0, ATOMIC_RELAXED);
}
void
eset_init(eset_t *eset, extent_state_t state) {
for (unsigned i = 0; i < ESET_NPSIZES; i++) {
eset_bin_init(&eset->bins[i]);
eset_bin_stats_init(&eset->bin_stats[i]);
}
fb_init(eset->bitmap, ESET_NPSIZES);
edata_list_inactive_init(&eset->lru);
eset->state = state;
}
size_t
eset_npages_get(eset_t *eset) {
return atomic_load_zu(&eset->npages, ATOMIC_RELAXED);
}
size_t
eset_nextents_get(eset_t *eset, pszind_t pind) {
return atomic_load_zu(&eset->bin_stats[pind].nextents, ATOMIC_RELAXED);
}
size_t
eset_nbytes_get(eset_t *eset, pszind_t pind) {
return atomic_load_zu(&eset->bin_stats[pind].nbytes, ATOMIC_RELAXED);
}
static void
eset_stats_add(eset_t *eset, pszind_t pind, size_t sz) {
size_t cur = atomic_load_zu(&eset->bin_stats[pind].nextents,
ATOMIC_RELAXED);
atomic_store_zu(&eset->bin_stats[pind].nextents, cur + 1,
ATOMIC_RELAXED);
cur = atomic_load_zu(&eset->bin_stats[pind].nbytes, ATOMIC_RELAXED);
atomic_store_zu(&eset->bin_stats[pind].nbytes, cur + sz,
ATOMIC_RELAXED);
}
static void
eset_stats_sub(eset_t *eset, pszind_t pind, size_t sz) {
size_t cur = atomic_load_zu(&eset->bin_stats[pind].nextents,
ATOMIC_RELAXED);
atomic_store_zu(&eset->bin_stats[pind].nextents, cur - 1,
ATOMIC_RELAXED);
cur = atomic_load_zu(&eset->bin_stats[pind].nbytes, ATOMIC_RELAXED);
atomic_store_zu(&eset->bin_stats[pind].nbytes, cur - sz,
ATOMIC_RELAXED);
}
void
eset_insert(eset_t *eset, edata_t *edata) {
assert(edata_state_get(edata) == eset->state);
size_t size = edata_size_get(edata);
size_t psz = sz_psz_quantize_floor(size);
pszind_t pind = sz_psz2ind(psz);
edata_cmp_summary_t edata_cmp_summary = edata_cmp_summary_get(edata);
if (edata_heap_empty(&eset->bins[pind].heap)) {
fb_set(eset->bitmap, ESET_NPSIZES, (size_t)pind);
/* Only element is automatically the min element. */
eset->bins[pind].heap_min = edata_cmp_summary;
} else {
/*
* There's already a min element; update the summary if we're
* about to insert a lower one.
*/
if (edata_cmp_summary_comp(edata_cmp_summary,
eset->bins[pind].heap_min) < 0) {
eset->bins[pind].heap_min = edata_cmp_summary;
}
}
edata_heap_insert(&eset->bins[pind].heap, edata);
if (config_stats) {
eset_stats_add(eset, pind, size);
}
edata_list_inactive_append(&eset->lru, edata);
size_t npages = size >> LG_PAGE;
/*
* All modifications to npages hold the mutex (as asserted above), so we
* don't need an atomic fetch-add; we can get by with a load followed by
* a store.
*/
size_t cur_eset_npages =
atomic_load_zu(&eset->npages, ATOMIC_RELAXED);
atomic_store_zu(&eset->npages, cur_eset_npages + npages,
ATOMIC_RELAXED);
}
void
eset_remove(eset_t *eset, edata_t *edata) {
assert(edata_state_get(edata) == eset->state ||
edata_state_in_transition(edata_state_get(edata)));
size_t size = edata_size_get(edata);
size_t psz = sz_psz_quantize_floor(size);
pszind_t pind = sz_psz2ind(psz);
if (config_stats) {
eset_stats_sub(eset, pind, size);
}
edata_cmp_summary_t edata_cmp_summary = edata_cmp_summary_get(edata);
edata_heap_remove(&eset->bins[pind].heap, edata);
if (edata_heap_empty(&eset->bins[pind].heap)) {
fb_unset(eset->bitmap, ESET_NPSIZES, (size_t)pind);
} else {
/*
* This is a little weird; we compare if the summaries are
* equal, rather than if the edata we removed was the heap
* minimum. The reason why is that getting the heap minimum
* can cause a pairing heap merge operation. We can avoid this
* if we only update the min if it's changed, in which case the
* summaries of the removed element and the min element should
* compare equal.
*/
if (edata_cmp_summary_comp(edata_cmp_summary,
eset->bins[pind].heap_min) == 0) {
eset->bins[pind].heap_min = edata_cmp_summary_get(
edata_heap_first(&eset->bins[pind].heap));
}
}
edata_list_inactive_remove(&eset->lru, edata);
size_t npages = size >> LG_PAGE;
/*
* As in eset_insert, we hold eset->mtx and so don't need atomic
* operations for updating eset->npages.
*/
size_t cur_extents_npages =
atomic_load_zu(&eset->npages, ATOMIC_RELAXED);
assert(cur_extents_npages >= npages);
atomic_store_zu(&eset->npages,
cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED);
}
/*
* Find an extent with size [min_size, max_size) to satisfy the alignment
* requirement. For each size, try only the first extent in the heap.
*/
static edata_t *
eset_fit_alignment(eset_t *eset, size_t min_size, size_t max_size,
size_t alignment) {
pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(min_size));
pszind_t pind_max = sz_psz2ind(sz_psz_quantize_ceil(max_size));
for (pszind_t i =
(pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)pind);
i < pind_max;
i = (pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)i + 1)) {
assert(i < SC_NPSIZES);
assert(!edata_heap_empty(&eset->bins[i].heap));
edata_t *edata = edata_heap_first(&eset->bins[i].heap);
uintptr_t base = (uintptr_t)edata_base_get(edata);
size_t candidate_size = edata_size_get(edata);
assert(candidate_size >= min_size);
uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base,
PAGE_CEILING(alignment));
if (base > next_align || base + candidate_size <= next_align) {
/* Overflow or not crossing the next alignment. */
continue;
}
size_t leadsize = next_align - base;
if (candidate_size - leadsize >= min_size) {
return edata;
}
}
return NULL;
}
/*
* Do first-fit extent selection, i.e. select the oldest/lowest extent that is
* large enough.
*
* lg_max_fit is the (log of the) maximum ratio between the requested size and
* the returned size that we'll allow. This can reduce fragmentation by
* avoiding reusing and splitting large extents for smaller sizes. In practice,
* it's set to opt_lg_extent_max_active_fit for the dirty eset and SC_PTR_BITS
* for others.
*/
static edata_t *
eset_first_fit(eset_t *eset, size_t size, bool exact_only,
unsigned lg_max_fit) {
edata_t *ret = NULL;
edata_cmp_summary_t ret_summ JEMALLOC_CC_SILENCE_INIT({0});
pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(size));
if (exact_only) {
return edata_heap_empty(&eset->bins[pind].heap) ? NULL :
edata_heap_first(&eset->bins[pind].heap);
}
for (pszind_t i =
(pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)pind);
i < ESET_NPSIZES;
i = (pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)i + 1)) {
assert(!edata_heap_empty(&eset->bins[i].heap));
if (lg_max_fit == SC_PTR_BITS) {
/*
* We'll shift by this below, and shifting out all the
* bits is undefined. Decreasing is safe, since the
* page size is larger than 1 byte.
*/
lg_max_fit = SC_PTR_BITS - 1;
}
if ((sz_pind2sz(i) >> lg_max_fit) > size) {
break;
}
if (ret == NULL || edata_cmp_summary_comp(
eset->bins[i].heap_min, ret_summ) < 0) {
/*
* We grab the edata as early as possible, even though
* we might change it later. Practically, a large
* portion of eset_fit calls succeed at the first valid
* index, so this doesn't cost much, and we get the
* effect of prefetching the edata as early as possible.
*/
edata_t *edata = edata_heap_first(&eset->bins[i].heap);
assert(edata_size_get(edata) >= size);
assert(ret == NULL || edata_snad_comp(edata, ret) < 0);
assert(ret == NULL || edata_cmp_summary_comp(
eset->bins[i].heap_min,
edata_cmp_summary_get(edata)) == 0);
ret = edata;
ret_summ = eset->bins[i].heap_min;
}
if (i == SC_NPSIZES) {
break;
}
assert(i < SC_NPSIZES);
}
return ret;
}
edata_t *
eset_fit(eset_t *eset, size_t esize, size_t alignment, bool exact_only,
unsigned lg_max_fit) {
size_t max_size = esize + PAGE_CEILING(alignment) - PAGE;
/* Beware size_t wrap-around. */
if (max_size < esize) {
return NULL;
}
edata_t *edata = eset_first_fit(eset, max_size, exact_only, lg_max_fit);
if (alignment > PAGE && edata == NULL) {
/*
* max_size guarantees the alignment requirement but is rather
* pessimistic. Next we try to satisfy the aligned allocation
* with sizes in [esize, max_size).
*/
edata = eset_fit_alignment(eset, esize, max_size, alignment);
}
return edata;
}
#define JEMALLOC_PRNG_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
void
exp_grow_init(exp_grow_t *exp_grow) {
exp_grow->next = sz_psz2ind(HUGEPAGE);
exp_grow->limit = sz_psz2ind(SC_LARGE_MAXCLASS);
}
#define JEMALLOC_EXTENT_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/emap.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/extent_mmap.h"
#include "jemalloc/internal/ph.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex_pool.h"
/******************************************************************************/
/* Data. */
rtree_t extents_rtree;
/* Keyed by the address of the extent_t being protected. */
mutex_pool_t extent_mutex_pool;
size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT;
static const bitmap_info_t extents_bitmap_info =
BITMAP_INFO_INITIALIZER(SC_NPSIZES+1);
static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr,
size_t size, size_t alignment, bool *zero, bool *commit,
unsigned arena_ind);
static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr,
size_t size, bool committed, unsigned arena_ind);
static void extent_destroy_default(extent_hooks_t *extent_hooks, void *addr,
size_t size, bool committed, unsigned arena_ind);
static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr,
size_t size, size_t offset, size_t length, unsigned arena_ind);
static bool extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length, bool growing_retained);
static bool extent_decommit_default(extent_hooks_t *extent_hooks,
void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
#ifdef PAGES_CAN_PURGE_LAZY
static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr,
size_t size, size_t offset, size_t length, unsigned arena_ind);
#endif
static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length, bool growing_retained);
#ifdef PAGES_CAN_PURGE_FORCED
static bool extent_purge_forced_default(extent_hooks_t *extent_hooks,
void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
#endif
static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length, bool growing_retained);
static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr,
size_t size, size_t size_a, size_t size_b, bool committed,
unsigned arena_ind);
static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
bool growing_retained);
static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a,
size_t size_a, void *addr_b, size_t size_b, bool committed,
unsigned arena_ind);
static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
bool growing_retained);
const extent_hooks_t extent_hooks_default = {
extent_alloc_default,
extent_dalloc_default,
extent_destroy_default,
extent_commit_default,
extent_decommit_default
#ifdef PAGES_CAN_PURGE_LAZY
,
extent_purge_lazy_default
#else
,
NULL
#endif
#ifdef PAGES_CAN_PURGE_FORCED
,
extent_purge_forced_default
#else
,
NULL
#endif
,
extent_split_default,
extent_merge_default
};
static bool extent_commit_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
size_t offset, size_t length, bool growing_retained);
static bool extent_purge_lazy_impl(tsdn_t *tsdn, ehooks_t *ehooks,
edata_t *edata, size_t offset, size_t length, bool growing_retained);
static bool extent_purge_forced_impl(tsdn_t *tsdn, ehooks_t *ehooks,
edata_t *edata, size_t offset, size_t length, bool growing_retained);
static edata_t *extent_split_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *edata, size_t size_a, size_t size_b, bool holding_core_locks);
static bool extent_merge_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *a, edata_t *b, bool holding_core_locks);
/* Used exclusively for gdump triggering. */
static atomic_zu_t curpages;
......@@ -99,503 +34,158 @@ static atomic_zu_t highpages;
* definition.
*/
static void extent_deregister(tsdn_t *tsdn, extent_t *extent);
static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr,
size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind,
bool *zero, bool *commit, bool growing_retained);
static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
extent_t *extent, bool *coalesced, bool growing_retained);
static void extent_record(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent,
bool growing_retained);
static void extent_deregister(tsdn_t *tsdn, pac_t *pac, edata_t *edata);
static edata_t *extent_recycle(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
ecache_t *ecache, edata_t *expand_edata, size_t usize, size_t alignment,
bool zero, bool *commit, bool growing_retained, bool guarded);
static edata_t *extent_try_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
ecache_t *ecache, edata_t *edata, bool *coalesced);
static edata_t *extent_alloc_retained(tsdn_t *tsdn, pac_t *pac,
ehooks_t *ehooks, edata_t *expand_edata, size_t size, size_t alignment,
bool zero, bool *commit, bool guarded);
/******************************************************************************/
#define ATTR_NONE /* does nothing */
ph_gen(ATTR_NONE, extent_avail_, extent_tree_t, extent_t, ph_link,
extent_esnead_comp)
#undef ATTR_NONE
typedef enum {
lock_result_success,
lock_result_failure,
lock_result_no_extent
} lock_result_t;
static lock_result_t
extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm,
extent_t **result, bool inactive_only) {
extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree,
elm, true);
/* Slab implies active extents and should be skipped. */
if (extent1 == NULL || (inactive_only && rtree_leaf_elm_slab_read(tsdn,
&extents_rtree, elm, true))) {
return lock_result_no_extent;
}
/*
* It's possible that the extent changed out from under us, and with it
* the leaf->extent mapping. We have to recheck while holding the lock.
*/
extent_lock(tsdn, extent1);
extent_t *extent2 = rtree_leaf_elm_extent_read(tsdn,
&extents_rtree, elm, true);
if (extent1 == extent2) {
*result = extent1;
return lock_result_success;
} else {
extent_unlock(tsdn, extent1);
return lock_result_failure;
}
}
/*
* Returns a pool-locked extent_t * if there's one associated with the given
* address, and NULL otherwise.
*/
static extent_t *
extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr,
bool inactive_only) {
extent_t *ret = NULL;
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree,
rtree_ctx, (uintptr_t)addr, false, false);
if (elm == NULL) {
return NULL;
}
lock_result_t lock_result;
do {
lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret,
inactive_only);
} while (lock_result == lock_result_failure);
return ret;
}
extent_t *
extent_alloc(tsdn_t *tsdn, arena_t *arena) {
malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
extent_t *extent = extent_avail_first(&arena->extent_avail);
if (extent == NULL) {
malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
return base_alloc_extent(tsdn, arena->base);
}
extent_avail_remove(&arena->extent_avail, extent);
atomic_fetch_sub_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED);
malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
return extent;
}
void
extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
extent_avail_insert(&arena->extent_avail, extent);
atomic_fetch_add_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED);
malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
}
extent_hooks_t *
extent_hooks_get(arena_t *arena) {
return base_extent_hooks_get(arena->base);
}
extent_hooks_t *
extent_hooks_set(tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks) {
background_thread_info_t *info;
if (have_background_thread) {
info = arena_background_thread_info_get(arena);
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
}
extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks);
if (have_background_thread) {
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
}
return ret;
}
static void
extent_hooks_assure_initialized(arena_t *arena,
extent_hooks_t **r_extent_hooks) {
if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) {
*r_extent_hooks = extent_hooks_get(arena);
}
}
#ifndef JEMALLOC_JET
static
#endif
size_t
extent_size_quantize_floor(size_t size) {
size_t ret;
pszind_t pind;
assert(size > 0);
assert((size & PAGE_MASK) == 0);
pind = sz_psz2ind(size - sz_large_pad + 1);
if (pind == 0) {
/*
* Avoid underflow. This short-circuit would also do the right
* thing for all sizes in the range for which there are
* PAGE-spaced size classes, but it's simplest to just handle
* the one case that would cause erroneous results.
*/
return size;
}
ret = sz_pind2sz(pind - 1) + sz_large_pad;
assert(ret <= size);
return ret;
extent_sn_next(pac_t *pac) {
return atomic_fetch_add_zu(&pac->extent_sn_next, 1, ATOMIC_RELAXED);
}
#ifndef JEMALLOC_JET
static
#endif
size_t
extent_size_quantize_ceil(size_t size) {
size_t ret;
assert(size > 0);
assert(size - sz_large_pad <= SC_LARGE_MAXCLASS);
assert((size & PAGE_MASK) == 0);
ret = extent_size_quantize_floor(size);
if (ret < size) {
/*
* Skip a quantization that may have an adequately large extent,
* because under-sized extents may be mixed in. This only
* happens when an unusual size is requested, i.e. for aligned
* allocation, and is just one of several places where linear
* search would potentially find sufficiently aligned available
* memory somewhere lower.
*/
ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) +
sz_large_pad;
}
return ret;
static inline bool
extent_may_force_decay(pac_t *pac) {
return !(pac_decay_ms_get(pac, extent_state_dirty) == -1
|| pac_decay_ms_get(pac, extent_state_muzzy) == -1);
}
/* Generate pairing heap functions. */
ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp)
static bool
extent_try_delayed_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
ecache_t *ecache, edata_t *edata) {
emap_update_edata_state(tsdn, pac->emap, edata, extent_state_active);
bool
extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
bool delay_coalesce) {
if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS,
malloc_mutex_rank_exclusive)) {
bool coalesced;
edata = extent_try_coalesce(tsdn, pac, ehooks, ecache,
edata, &coalesced);
emap_update_edata_state(tsdn, pac->emap, edata, ecache->state);
if (!coalesced) {
return true;
}
for (unsigned i = 0; i < SC_NPSIZES + 1; i++) {
extent_heap_new(&extents->heaps[i]);
}
bitmap_init(extents->bitmap, &extents_bitmap_info, true);
extent_list_init(&extents->lru);
atomic_store_zu(&extents->npages, 0, ATOMIC_RELAXED);
extents->state = state;
extents->delay_coalesce = delay_coalesce;
eset_insert(&ecache->eset, edata);
return false;
}
extent_state_t
extents_state_get(const extents_t *extents) {
return extents->state;
}
size_t
extents_npages_get(extents_t *extents) {
return atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
}
size_t
extents_nextents_get(extents_t *extents, pszind_t pind) {
return atomic_load_zu(&extents->nextents[pind], ATOMIC_RELAXED);
}
size_t
extents_nbytes_get(extents_t *extents, pszind_t pind) {
return atomic_load_zu(&extents->nbytes[pind], ATOMIC_RELAXED);
}
static void
extents_stats_add(extents_t *extent, pszind_t pind, size_t sz) {
size_t cur = atomic_load_zu(&extent->nextents[pind], ATOMIC_RELAXED);
atomic_store_zu(&extent->nextents[pind], cur + 1, ATOMIC_RELAXED);
cur = atomic_load_zu(&extent->nbytes[pind], ATOMIC_RELAXED);
atomic_store_zu(&extent->nbytes[pind], cur + sz, ATOMIC_RELAXED);
}
static void
extents_stats_sub(extents_t *extent, pszind_t pind, size_t sz) {
size_t cur = atomic_load_zu(&extent->nextents[pind], ATOMIC_RELAXED);
atomic_store_zu(&extent->nextents[pind], cur - 1, ATOMIC_RELAXED);
cur = atomic_load_zu(&extent->nbytes[pind], ATOMIC_RELAXED);
atomic_store_zu(&extent->nbytes[pind], cur - sz, ATOMIC_RELAXED);
}
static void
extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
malloc_mutex_assert_owner(tsdn, &extents->mtx);
assert(extent_state_get(extent) == extents->state);
size_t size = extent_size_get(extent);
size_t psz = extent_size_quantize_floor(size);
pszind_t pind = sz_psz2ind(psz);
if (extent_heap_empty(&extents->heaps[pind])) {
bitmap_unset(extents->bitmap, &extents_bitmap_info,
(size_t)pind);
}
extent_heap_insert(&extents->heaps[pind], extent);
if (config_stats) {
extents_stats_add(extents, pind, size);
}
extent_list_append(&extents->lru, extent);
size_t npages = size >> LG_PAGE;
/*
* All modifications to npages hold the mutex (as asserted above), so we
* don't need an atomic fetch-add; we can get by with a load followed by
* a store.
*/
size_t cur_extents_npages =
atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
atomic_store_zu(&extents->npages, cur_extents_npages + npages,
ATOMIC_RELAXED);
}
static void
extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
malloc_mutex_assert_owner(tsdn, &extents->mtx);
assert(extent_state_get(extent) == extents->state);
size_t size = extent_size_get(extent);
size_t psz = extent_size_quantize_floor(size);
pszind_t pind = sz_psz2ind(psz);
extent_heap_remove(&extents->heaps[pind], extent);
if (config_stats) {
extents_stats_sub(extents, pind, size);
}
if (extent_heap_empty(&extents->heaps[pind])) {
bitmap_set(extents->bitmap, &extents_bitmap_info,
(size_t)pind);
}
extent_list_remove(&extents->lru, extent);
size_t npages = size >> LG_PAGE;
/*
* As in extents_insert_locked, we hold extents->mtx and so don't need
* atomic operations for updating extents->npages.
*/
size_t cur_extents_npages =
atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
assert(cur_extents_npages >= npages);
atomic_store_zu(&extents->npages,
cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED);
}
/*
* Find an extent with size [min_size, max_size) to satisfy the alignment
* requirement. For each size, try only the first extent in the heap.
*/
static extent_t *
extents_fit_alignment(extents_t *extents, size_t min_size, size_t max_size,
size_t alignment) {
pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(min_size));
pszind_t pind_max = sz_psz2ind(extent_size_quantize_ceil(max_size));
for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
&extents_bitmap_info, (size_t)pind); i < pind_max; i =
(pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
(size_t)i+1)) {
assert(i < SC_NPSIZES);
assert(!extent_heap_empty(&extents->heaps[i]));
extent_t *extent = extent_heap_first(&extents->heaps[i]);
uintptr_t base = (uintptr_t)extent_base_get(extent);
size_t candidate_size = extent_size_get(extent);
assert(candidate_size >= min_size);
uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base,
PAGE_CEILING(alignment));
if (base > next_align || base + candidate_size <= next_align) {
/* Overflow or not crossing the next alignment. */
continue;
}
size_t leadsize = next_align - base;
if (candidate_size - leadsize >= min_size) {
return extent;
}
}
edata_t *
ecache_alloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
edata_t *expand_edata, size_t size, size_t alignment, bool zero,
bool guarded) {
assert(size != 0);
assert(alignment != 0);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
return NULL;
bool commit = true;
edata_t *edata = extent_recycle(tsdn, pac, ehooks, ecache, expand_edata,
size, alignment, zero, &commit, false, guarded);
assert(edata == NULL || edata_pai_get(edata) == EXTENT_PAI_PAC);
assert(edata == NULL || edata_guarded_get(edata) == guarded);
return edata;
}
/*
* Do first-fit extent selection, i.e. select the oldest/lowest extent that is
* large enough.
*/
static extent_t *
extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
size_t size) {
extent_t *ret = NULL;
pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
if (!maps_coalesce && !opt_retain) {
/*
* No split / merge allowed (Windows w/o retain). Try exact fit
* only.
*/
return extent_heap_empty(&extents->heaps[pind]) ? NULL :
extent_heap_first(&extents->heaps[pind]);
}
edata_t *
ecache_alloc_grow(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
edata_t *expand_edata, size_t size, size_t alignment, bool zero,
bool guarded) {
assert(size != 0);
assert(alignment != 0);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
&extents_bitmap_info, (size_t)pind);
i < SC_NPSIZES + 1;
i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
(size_t)i+1)) {
assert(!extent_heap_empty(&extents->heaps[i]));
extent_t *extent = extent_heap_first(&extents->heaps[i]);
assert(extent_size_get(extent) >= size);
/*
* In order to reduce fragmentation, avoid reusing and splitting
* large extents for much smaller sizes.
*
* Only do check for dirty extents (delay_coalesce).
*/
if (extents->delay_coalesce &&
(sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) {
break;
}
if (ret == NULL || extent_snad_comp(extent, ret) < 0) {
ret = extent;
bool commit = true;
edata_t *edata = extent_alloc_retained(tsdn, pac, ehooks, expand_edata,
size, alignment, zero, &commit, guarded);
if (edata == NULL) {
if (opt_retain && expand_edata != NULL) {
/*
* When retain is enabled and trying to expand, we do
* not attempt extent_alloc_wrapper which does mmap that
* is very unlikely to succeed (unless it happens to be
* at the end).
*/
return NULL;
}
if (i == SC_NPSIZES) {
break;
if (guarded) {
/*
* Means no cached guarded extents available (and no
* grow_retained was attempted). The pac_alloc flow
* will alloc regular extents to make new guarded ones.
*/
return NULL;
}
assert(i < SC_NPSIZES);
}
return ret;
}
/*
* Do first-fit extent selection, where the selection policy choice is
* based on extents->delay_coalesce.
*/
static extent_t *
extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
size_t esize, size_t alignment) {
malloc_mutex_assert_owner(tsdn, &extents->mtx);
size_t max_size = esize + PAGE_CEILING(alignment) - PAGE;
/* Beware size_t wrap-around. */
if (max_size < esize) {
return NULL;
}
extent_t *extent =
extents_first_fit_locked(tsdn, arena, extents, max_size);
if (alignment > PAGE && extent == NULL) {
/*
* max_size guarantees the alignment requirement but is rather
* pessimistic. Next we try to satisfy the aligned allocation
* with sizes in [esize, max_size).
*/
extent = extents_fit_alignment(extents, esize, max_size,
alignment);
}
return extent;
}
static bool
extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
extent_t *extent) {
extent_state_set(extent, extent_state_active);
bool coalesced;
extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx,
extents, extent, &coalesced, false);
extent_state_set(extent, extents_state_get(extents));
if (!coalesced) {
return true;
void *new_addr = (expand_edata == NULL) ? NULL :
edata_past_get(expand_edata);
edata = extent_alloc_wrapper(tsdn, pac, ehooks, new_addr,
size, alignment, zero, &commit,
/* growing_retained */ false);
}
extents_insert_locked(tsdn, extents, extent);
return false;
}
extent_t *
extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
extents_t *extents, void *new_addr, size_t size, size_t pad,
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
assert(size + pad != 0);
assert(alignment != 0);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, extents,
new_addr, size, pad, alignment, slab, szind, zero, commit, false);
assert(extent == NULL || extent_dumpable_get(extent));
return extent;
assert(edata == NULL || edata_pai_get(edata) == EXTENT_PAI_PAC);
return edata;
}
void
extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
extents_t *extents, extent_t *extent) {
assert(extent_base_get(extent) != NULL);
assert(extent_size_get(extent) != 0);
assert(extent_dumpable_get(extent));
ecache_dalloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
edata_t *edata) {
assert(edata_base_get(edata) != NULL);
assert(edata_size_get(edata) != 0);
assert(edata_pai_get(edata) == EXTENT_PAI_PAC);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
extent_addr_set(extent, extent_base_get(extent));
extent_zeroed_set(extent, false);
edata_addr_set(edata, edata_base_get(edata));
edata_zeroed_set(edata, false);
extent_record(tsdn, arena, r_extent_hooks, extents, extent, false);
extent_record(tsdn, pac, ehooks, ecache, edata);
}
extent_t *
extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
extents_t *extents, size_t npages_min) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
malloc_mutex_lock(tsdn, &extents->mtx);
edata_t *
ecache_evict(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
ecache_t *ecache, size_t npages_min) {
malloc_mutex_lock(tsdn, &ecache->mtx);
/*
* Get the LRU coalesced extent, if any. If coalescing was delayed,
* the loop will iterate until the LRU extent is fully coalesced.
*/
extent_t *extent;
edata_t *edata;
while (true) {
/* Get the LRU extent, if any. */
extent = extent_list_first(&extents->lru);
if (extent == NULL) {
goto label_return;
eset_t *eset = &ecache->eset;
edata = edata_list_inactive_first(&eset->lru);
if (edata == NULL) {
/*
* Next check if there are guarded extents. They are
* more expensive to purge (since they are not
* mergeable), thus in favor of caching them longer.
*/
eset = &ecache->guarded_eset;
edata = edata_list_inactive_first(&eset->lru);
if (edata == NULL) {
goto label_return;
}
}
/* Check the eviction limit. */
size_t extents_npages = atomic_load_zu(&extents->npages,
ATOMIC_RELAXED);
size_t extents_npages = ecache_npages_get(ecache);
if (extents_npages <= npages_min) {
extent = NULL;
edata = NULL;
goto label_return;
}
extents_remove_locked(tsdn, extents, extent);
if (!extents->delay_coalesce) {
eset_remove(eset, edata);
if (!ecache->delay_coalesce || edata_guarded_get(edata)) {
break;
}
/* Try to coalesce. */
if (extent_try_delayed_coalesce(tsdn, arena, r_extent_hooks,
rtree_ctx, extents, extent)) {
if (extent_try_delayed_coalesce(tsdn, pac, ehooks, ecache,
edata)) {
break;
}
/*
......@@ -608,23 +198,24 @@ extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
* Either mark the extent active or deregister it to protect against
* concurrent operations.
*/
switch (extents_state_get(extents)) {
switch (ecache->state) {
case extent_state_active:
not_reached();
case extent_state_dirty:
case extent_state_muzzy:
extent_state_set(extent, extent_state_active);
emap_update_edata_state(tsdn, pac->emap, edata,
extent_state_active);
break;
case extent_state_retained:
extent_deregister(tsdn, extent);
extent_deregister(tsdn, pac, edata);
break;
default:
not_reached();
}
label_return:
malloc_mutex_unlock(tsdn, &extents->mtx);
return extent;
malloc_mutex_unlock(tsdn, &ecache->mtx);
return edata;
}
/*
......@@ -632,123 +223,73 @@ label_return:
* indicates OOM), e.g. when trying to split an existing extent.
*/
static void
extents_abandon_vm(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
extents_t *extents, extent_t *extent, bool growing_retained) {
size_t sz = extent_size_get(extent);
extents_abandon_vm(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
edata_t *edata, bool growing_retained) {
size_t sz = edata_size_get(edata);
if (config_stats) {
arena_stats_accum_zu(&arena->stats.abandoned_vm, sz);
atomic_fetch_add_zu(&pac->stats->abandoned_vm, sz,
ATOMIC_RELAXED);
}
/*
* Leak extent after making sure its pages have already been purged, so
* that this is only a virtual memory leak.
*/
if (extents_state_get(extents) == extent_state_dirty) {
if (extent_purge_lazy_impl(tsdn, arena, r_extent_hooks,
extent, 0, sz, growing_retained)) {
extent_purge_forced_impl(tsdn, arena, r_extent_hooks,
extent, 0, extent_size_get(extent),
growing_retained);
if (ecache->state == extent_state_dirty) {
if (extent_purge_lazy_impl(tsdn, ehooks, edata, 0, sz,
growing_retained)) {
extent_purge_forced_impl(tsdn, ehooks, edata, 0,
edata_size_get(edata), growing_retained);
}
}
extent_dalloc(tsdn, arena, extent);
}
void
extents_prefork(tsdn_t *tsdn, extents_t *extents) {
malloc_mutex_prefork(tsdn, &extents->mtx);
}
void
extents_postfork_parent(tsdn_t *tsdn, extents_t *extents) {
malloc_mutex_postfork_parent(tsdn, &extents->mtx);
}
void
extents_postfork_child(tsdn_t *tsdn, extents_t *extents) {
malloc_mutex_postfork_child(tsdn, &extents->mtx);
edata_cache_put(tsdn, pac->edata_cache, edata);
}
static void
extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
extent_t *extent) {
assert(extent_arena_get(extent) == arena);
assert(extent_state_get(extent) == extent_state_active);
extent_deactivate_locked_impl(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache,
edata_t *edata) {
malloc_mutex_assert_owner(tsdn, &ecache->mtx);
assert(edata_arena_ind_get(edata) == ecache_ind_get(ecache));
extent_state_set(extent, extents_state_get(extents));
extents_insert_locked(tsdn, extents, extent);
emap_update_edata_state(tsdn, pac->emap, edata, ecache->state);
eset_t *eset = edata_guarded_get(edata) ? &ecache->guarded_eset :
&ecache->eset;
eset_insert(eset, edata);
}
static void
extent_deactivate(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
extent_t *extent) {
malloc_mutex_lock(tsdn, &extents->mtx);
extent_deactivate_locked(tsdn, arena, extents, extent);
malloc_mutex_unlock(tsdn, &extents->mtx);
extent_deactivate_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache,
edata_t *edata) {
assert(edata_state_get(edata) == extent_state_active);
extent_deactivate_locked_impl(tsdn, pac, ecache, edata);
}
static void
extent_activate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
extent_t *extent) {
assert(extent_arena_get(extent) == arena);
assert(extent_state_get(extent) == extents_state_get(extents));
extents_remove_locked(tsdn, extents, extent);
extent_state_set(extent, extent_state_active);
}
static bool
extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
const extent_t *extent, bool dependent, bool init_missing,
rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) {
*r_elm_a = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)extent_base_get(extent), dependent, init_missing);
if (!dependent && *r_elm_a == NULL) {
return true;
}
assert(*r_elm_a != NULL);
*r_elm_b = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)extent_last_get(extent), dependent, init_missing);
if (!dependent && *r_elm_b == NULL) {
return true;
}
assert(*r_elm_b != NULL);
return false;
extent_deactivate_check_state_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache,
edata_t *edata, extent_state_t expected_state) {
assert(edata_state_get(edata) == expected_state);
extent_deactivate_locked_impl(tsdn, pac, ecache, edata);
}
static void
extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a,
rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) {
rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab);
if (elm_b != NULL) {
rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, extent, szind,
slab);
}
}
extent_activate_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache, eset_t *eset,
edata_t *edata) {
assert(edata_arena_ind_get(edata) == ecache_ind_get(ecache));
assert(edata_state_get(edata) == ecache->state ||
edata_state_get(edata) == extent_state_merging);
static void
extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent,
szind_t szind) {
assert(extent_slab_get(extent));
/* Register interior. */
for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
rtree_write(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
LG_PAGE), extent, szind, true);
}
eset_remove(eset, edata);
emap_update_edata_state(tsdn, pac->emap, edata, extent_state_active);
}
static void
extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) {
void
extent_gdump_add(tsdn_t *tsdn, const edata_t *edata) {
cassert(config_prof);
/* prof_gdump() requirement. */
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
if (opt_prof && extent_state_get(extent) == extent_state_active) {
size_t nadd = extent_size_get(extent) >> LG_PAGE;
if (opt_prof && edata_state_get(edata) == extent_state_active) {
size_t nadd = edata_size_get(edata) >> LG_PAGE;
size_t cur = atomic_fetch_add_zu(&curpages, nadd,
ATOMIC_RELAXED) + nadd;
size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED);
......@@ -767,232 +308,184 @@ extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) {
}
static void
extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) {
extent_gdump_sub(tsdn_t *tsdn, const edata_t *edata) {
cassert(config_prof);
if (opt_prof && extent_state_get(extent) == extent_state_active) {
size_t nsub = extent_size_get(extent) >> LG_PAGE;
if (opt_prof && edata_state_get(edata) == extent_state_active) {
size_t nsub = edata_size_get(edata) >> LG_PAGE;
assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub);
atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED);
}
}
static bool
extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
rtree_leaf_elm_t *elm_a, *elm_b;
extent_register_impl(tsdn_t *tsdn, pac_t *pac, edata_t *edata, bool gdump_add) {
assert(edata_state_get(edata) == extent_state_active);
/*
* We need to hold the lock to protect against a concurrent coalesce
* operation that sees us in a partial state.
* No locking needed, as the edata must be in active state, which
* prevents other threads from accessing the edata.
*/
extent_lock(tsdn, extent);
if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true,
&elm_a, &elm_b)) {
extent_unlock(tsdn, extent);
if (emap_register_boundary(tsdn, pac->emap, edata, SC_NSIZES,
/* slab */ false)) {
return true;
}
szind_t szind = extent_szind_get_maybe_invalid(extent);
bool slab = extent_slab_get(extent);
extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab);
if (slab) {
extent_interior_register(tsdn, rtree_ctx, extent, szind);
}
extent_unlock(tsdn, extent);
if (config_prof && gdump_add) {
extent_gdump_add(tsdn, extent);
extent_gdump_add(tsdn, edata);
}
return false;
}
static bool
extent_register(tsdn_t *tsdn, extent_t *extent) {
return extent_register_impl(tsdn, extent, true);
extent_register(tsdn_t *tsdn, pac_t *pac, edata_t *edata) {
return extent_register_impl(tsdn, pac, edata, true);
}
static bool
extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) {
return extent_register_impl(tsdn, extent, false);
extent_register_no_gdump_add(tsdn_t *tsdn, pac_t *pac, edata_t *edata) {
return extent_register_impl(tsdn, pac, edata, false);
}
static void
extent_reregister(tsdn_t *tsdn, extent_t *extent) {
bool err = extent_register(tsdn, extent);
extent_reregister(tsdn_t *tsdn, pac_t *pac, edata_t *edata) {
bool err = extent_register(tsdn, pac, edata);
assert(!err);
}
/*
* Removes all pointers to the given extent from the global rtree indices for
* its interior. This is relevant for slab extents, for which we need to do
* metadata lookups at places other than the head of the extent. We deregister
* on the interior, then, when an extent moves from being an active slab to an
* inactive state.
*/
static void
extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
extent_t *extent) {
size_t i;
assert(extent_slab_get(extent));
for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
rtree_clear(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
LG_PAGE));
}
}
/*
* Removes all pointers to the given extent from the global rtree.
*/
static void
extent_deregister_impl(tsdn_t *tsdn, extent_t *extent, bool gdump) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
rtree_leaf_elm_t *elm_a, *elm_b;
extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, true, false,
&elm_a, &elm_b);
extent_lock(tsdn, extent);
extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, SC_NSIZES, false);
if (extent_slab_get(extent)) {
extent_interior_deregister(tsdn, rtree_ctx, extent);
extent_slab_set(extent, false);
}
extent_unlock(tsdn, extent);
extent_deregister_impl(tsdn_t *tsdn, pac_t *pac, edata_t *edata,
bool gdump) {
emap_deregister_boundary(tsdn, pac->emap, edata);
if (config_prof && gdump) {
extent_gdump_sub(tsdn, extent);
extent_gdump_sub(tsdn, edata);
}
}
static void
extent_deregister(tsdn_t *tsdn, extent_t *extent) {
extent_deregister_impl(tsdn, extent, true);
extent_deregister(tsdn_t *tsdn, pac_t *pac, edata_t *edata) {
extent_deregister_impl(tsdn, pac, edata, true);
}
static void
extent_deregister_no_gdump_sub(tsdn_t *tsdn, extent_t *extent) {
extent_deregister_impl(tsdn, extent, false);
extent_deregister_no_gdump_sub(tsdn_t *tsdn, pac_t *pac,
edata_t *edata) {
extent_deregister_impl(tsdn, pac, edata, false);
}
/*
* Tries to find and remove an extent from extents that can be used for the
* Tries to find and remove an extent from ecache that can be used for the
* given allocation request.
*/
static extent_t *
extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
bool growing_retained) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
static edata_t *
extent_recycle_extract(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
bool guarded) {
malloc_mutex_assert_owner(tsdn, &ecache->mtx);
assert(alignment > 0);
if (config_debug && new_addr != NULL) {
if (config_debug && expand_edata != NULL) {
/*
* Non-NULL new_addr has two use cases:
*
* 1) Recycle a known-extant extent, e.g. during purging.
* 2) Perform in-place expanding reallocation.
*
* Regardless of use case, new_addr must either refer to a
* non-existing extent, or to the base of an extant extent,
* since only active slabs support interior lookups (which of
* course cannot be recycled).
* Non-NULL expand_edata indicates in-place expanding realloc.
* new_addr must either refer to a non-existing extent, or to
* the base of an extant extent, since only active slabs support
* interior lookups (which of course cannot be recycled).
*/
void *new_addr = edata_past_get(expand_edata);
assert(PAGE_ADDR2BASE(new_addr) == new_addr);
assert(pad == 0);
assert(alignment <= PAGE);
}
size_t esize = size + pad;
malloc_mutex_lock(tsdn, &extents->mtx);
extent_hooks_assure_initialized(arena, r_extent_hooks);
extent_t *extent;
if (new_addr != NULL) {
extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr,
false);
if (extent != NULL) {
/*
* We might null-out extent to report an error, but we
* still need to unlock the associated mutex after.
*/
extent_t *unlock_extent = extent;
assert(extent_base_get(extent) == new_addr);
if (extent_arena_get(extent) != arena ||
extent_size_get(extent) < esize ||
extent_state_get(extent) !=
extents_state_get(extents)) {
extent = NULL;
edata_t *edata;
eset_t *eset = guarded ? &ecache->guarded_eset : &ecache->eset;
if (expand_edata != NULL) {
edata = emap_try_acquire_edata_neighbor_expand(tsdn, pac->emap,
expand_edata, EXTENT_PAI_PAC, ecache->state);
if (edata != NULL) {
extent_assert_can_expand(expand_edata, edata);
if (edata_size_get(edata) < size) {
emap_release_edata(tsdn, pac->emap, edata,
ecache->state);
edata = NULL;
}
extent_unlock(tsdn, unlock_extent);
}
} else {
extent = extents_fit_locked(tsdn, arena, extents, esize,
alignment);
/*
* A large extent might be broken up from its original size to
* some small size to satisfy a small request. When that small
* request is freed, though, it won't merge back with the larger
* extent if delayed coalescing is on. The large extent can
* then no longer satify a request for its original size. To
* limit this effect, when delayed coalescing is enabled, we
* put a cap on how big an extent we can split for a request.
*/
unsigned lg_max_fit = ecache->delay_coalesce
? (unsigned)opt_lg_extent_max_active_fit : SC_PTR_BITS;
/*
* If split and merge are not allowed (Windows w/o retain), try
* exact fit only.
*
* For simplicity purposes, splitting guarded extents is not
* supported. Hence, we do only exact fit for guarded
* allocations.
*/
bool exact_only = (!maps_coalesce && !opt_retain) || guarded;
edata = eset_fit(eset, size, alignment, exact_only,
lg_max_fit);
}
if (extent == NULL) {
malloc_mutex_unlock(tsdn, &extents->mtx);
if (edata == NULL) {
return NULL;
}
assert(!guarded || edata_guarded_get(edata));
extent_activate_locked(tsdn, pac, ecache, eset, edata);
extent_activate_locked(tsdn, arena, extents, extent);
malloc_mutex_unlock(tsdn, &extents->mtx);
return extent;
return edata;
}
/*
* Given an allocation request and an extent guaranteed to be able to satisfy
* it, this splits off lead and trail extents, leaving extent pointing to an
* it, this splits off lead and trail extents, leaving edata pointing to an
* extent satisfying the allocation.
* This function doesn't put lead or trail into any extents_t; it's the caller's
* This function doesn't put lead or trail into any ecache; it's the caller's
* job to ensure that they can be reused.
*/
typedef enum {
/*
* Split successfully. lead, extent, and trail, are modified to extents
* Split successfully. lead, edata, and trail, are modified to extents
* describing the ranges before, in, and after the given allocation.
*/
extent_split_interior_ok,
/*
* The extent can't satisfy the given allocation request. None of the
* input extent_t *s are touched.
* input edata_t *s are touched.
*/
extent_split_interior_cant_alloc,
/*
* In a potentially invalid state. Must leak (if *to_leak is non-NULL),
* and salvage what's still salvageable (if *to_salvage is non-NULL).
* None of lead, extent, or trail are valid.
* None of lead, edata, or trail are valid.
*/
extent_split_interior_error
} extent_split_interior_result_t;
static extent_split_interior_result_t
extent_split_interior(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx,
extent_split_interior(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
/* The result of splitting, in case of success. */
extent_t **extent, extent_t **lead, extent_t **trail,
edata_t **edata, edata_t **lead, edata_t **trail,
/* The mess to clean up, in case of error. */
extent_t **to_leak, extent_t **to_salvage,
void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
szind_t szind, bool growing_retained) {
size_t esize = size + pad;
size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(*extent),
PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(*extent);
assert(new_addr == NULL || leadsize == 0);
if (extent_size_get(*extent) < leadsize + esize) {
edata_t **to_leak, edata_t **to_salvage,
edata_t *expand_edata, size_t size, size_t alignment) {
size_t leadsize = ALIGNMENT_CEILING((uintptr_t)edata_base_get(*edata),
PAGE_CEILING(alignment)) - (uintptr_t)edata_base_get(*edata);
assert(expand_edata == NULL || leadsize == 0);
if (edata_size_get(*edata) < leadsize + size) {
return extent_split_interior_cant_alloc;
}
size_t trailsize = extent_size_get(*extent) - leadsize - esize;
size_t trailsize = edata_size_get(*edata) - leadsize - size;
*lead = NULL;
*trail = NULL;
......@@ -1001,11 +494,11 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena,
/* Split the lead. */
if (leadsize != 0) {
*lead = *extent;
*extent = extent_split_impl(tsdn, arena, r_extent_hooks,
*lead, leadsize, SC_NSIZES, false, esize + trailsize, szind,
slab, growing_retained);
if (*extent == NULL) {
assert(!edata_guarded_get(*edata));
*lead = *edata;
*edata = extent_split_impl(tsdn, pac, ehooks, *lead, leadsize,
size + trailsize, /* holding_core_locks*/ true);
if (*edata == NULL) {
*to_leak = *lead;
*lead = NULL;
return extent_split_interior_error;
......@@ -1014,36 +507,18 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena,
/* Split the trail. */
if (trailsize != 0) {
*trail = extent_split_impl(tsdn, arena, r_extent_hooks, *extent,
esize, szind, slab, trailsize, SC_NSIZES, false,
growing_retained);
assert(!edata_guarded_get(*edata));
*trail = extent_split_impl(tsdn, pac, ehooks, *edata, size,
trailsize, /* holding_core_locks */ true);
if (*trail == NULL) {
*to_leak = *extent;
*to_leak = *edata;
*to_salvage = *lead;
*lead = NULL;
*extent = NULL;
*edata = NULL;
return extent_split_interior_error;
}
}
if (leadsize == 0 && trailsize == 0) {
/*
* Splitting causes szind to be set as a side effect, but no
* splitting occurred.
*/
extent_szind_set(*extent, szind);
if (szind != SC_NSIZES) {
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)extent_addr_get(*extent), szind, slab);
if (slab && extent_size_get(*extent) > PAGE) {
rtree_szind_slab_update(tsdn, &extents_rtree,
rtree_ctx,
(uintptr_t)extent_past_get(*extent) -
(uintptr_t)PAGE, szind, slab);
}
}
}
return extent_split_interior_ok;
}
......@@ -1051,42 +526,43 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena,
* This fulfills the indicated allocation request out of the given extent (which
* the caller should have ensured was big enough). If there's any unused space
* before or after the resulting allocation, that space is given its own extent
* and put back into extents.
* and put back into ecache.
*/
static extent_t *
extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
szind_t szind, extent_t *extent, bool growing_retained) {
extent_t *lead;
extent_t *trail;
extent_t *to_leak;
extent_t *to_salvage;
static edata_t *
extent_recycle_split(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
edata_t *edata, bool growing_retained) {
assert(!edata_guarded_get(edata) || size == edata_size_get(edata));
malloc_mutex_assert_owner(tsdn, &ecache->mtx);
edata_t *lead;
edata_t *trail;
edata_t *to_leak JEMALLOC_CC_SILENCE_INIT(NULL);
edata_t *to_salvage JEMALLOC_CC_SILENCE_INIT(NULL);
extent_split_interior_result_t result = extent_split_interior(
tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
&to_leak, &to_salvage, new_addr, size, pad, alignment, slab, szind,
growing_retained);
tsdn, pac, ehooks, &edata, &lead, &trail, &to_leak, &to_salvage,
expand_edata, size, alignment);
if (!maps_coalesce && result != extent_split_interior_ok
&& !opt_retain) {
/*
* Split isn't supported (implies Windows w/o retain). Avoid
* leaking the extents.
* leaking the extent.
*/
assert(to_leak != NULL && lead == NULL && trail == NULL);
extent_deactivate(tsdn, arena, extents, to_leak);
extent_deactivate_locked(tsdn, pac, ecache, to_leak);
return NULL;
}
if (result == extent_split_interior_ok) {
if (lead != NULL) {
extent_deactivate(tsdn, arena, extents, lead);
extent_deactivate_locked(tsdn, pac, ecache, lead);
}
if (trail != NULL) {
extent_deactivate(tsdn, arena, extents, trail);
extent_deactivate_locked(tsdn, pac, ecache, trail);
}
return extent;
return edata;
} else {
/*
* We should have picked an extent that was large enough to
......@@ -1094,294 +570,144 @@ extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
*/
assert(result == extent_split_interior_error);
if (to_salvage != NULL) {
extent_deregister(tsdn, to_salvage);
extent_deregister(tsdn, pac, to_salvage);
}
if (to_leak != NULL) {
void *leak = extent_base_get(to_leak);
extent_deregister_no_gdump_sub(tsdn, to_leak);
extents_abandon_vm(tsdn, arena, r_extent_hooks, extents,
to_leak, growing_retained);
assert(extent_lock_from_addr(tsdn, rtree_ctx, leak,
false) == NULL);
extent_deregister_no_gdump_sub(tsdn, pac, to_leak);
/*
* May go down the purge path (which assume no ecache
* locks). Only happens with OOM caused split failures.
*/
malloc_mutex_unlock(tsdn, &ecache->mtx);
extents_abandon_vm(tsdn, pac, ehooks, ecache, to_leak,
growing_retained);
malloc_mutex_lock(tsdn, &ecache->mtx);
}
return NULL;
}
unreachable();
}
static bool
extent_need_manual_zero(arena_t *arena) {
/*
* Need to manually zero the extent on repopulating if either; 1) non
* default extent hooks installed (in which case the purge semantics may
* change); or 2) transparent huge pages enabled.
*/
return (!arena_has_default_hooks(arena) ||
(opt_thp == thp_mode_always));
}
/*
* Tries to satisfy the given allocation request by reusing one of the extents
* in the given extents_t.
* in the given ecache_t.
*/
static extent_t *
extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
extents_t *extents, void *new_addr, size_t size, size_t pad,
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit,
bool growing_retained) {
static edata_t *
extent_recycle(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
edata_t *expand_edata, size_t size, size_t alignment, bool zero,
bool *commit, bool growing_retained, bool guarded) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
assert(new_addr == NULL || !slab);
assert(pad == 0 || !slab);
assert(!*zero || !slab);
assert(!guarded || expand_edata == NULL);
assert(!guarded || alignment <= PAGE);
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
malloc_mutex_lock(tsdn, &ecache->mtx);
extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks,
rtree_ctx, extents, new_addr, size, pad, alignment, slab,
growing_retained);
if (extent == NULL) {
edata_t *edata = extent_recycle_extract(tsdn, pac, ehooks, ecache,
expand_edata, size, alignment, guarded);
if (edata == NULL) {
malloc_mutex_unlock(tsdn, &ecache->mtx);
return NULL;
}
extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx,
extents, new_addr, size, pad, alignment, slab, szind, extent,
growing_retained);
if (extent == NULL) {
edata = extent_recycle_split(tsdn, pac, ehooks, ecache, expand_edata,
size, alignment, edata, growing_retained);
malloc_mutex_unlock(tsdn, &ecache->mtx);
if (edata == NULL) {
return NULL;
}
if (*commit && !extent_committed_get(extent)) {
if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent,
0, extent_size_get(extent), growing_retained)) {
extent_record(tsdn, arena, r_extent_hooks, extents,
extent, growing_retained);
return NULL;
}
if (!extent_need_manual_zero(arena)) {
extent_zeroed_set(extent, true);
}
assert(edata_state_get(edata) == extent_state_active);
if (extent_commit_zero(tsdn, ehooks, edata, *commit, zero,
growing_retained)) {
extent_record(tsdn, pac, ehooks, ecache, edata);
return NULL;
}
if (extent_committed_get(extent)) {
if (edata_committed_get(edata)) {
/*
* This reverses the purpose of this variable - previously it
* was treated as an input parameter, now it turns into an
* output parameter, reporting if the edata has actually been
* committed.
*/
*commit = true;
}
if (extent_zeroed_get(extent)) {
*zero = true;
}
if (pad != 0) {
extent_addr_randomize(tsdn, extent, alignment);
}
assert(extent_state_get(extent) == extent_state_active);
if (slab) {
extent_slab_set(extent, slab);
extent_interior_register(tsdn, rtree_ctx, extent, szind);
}
if (*zero) {
void *addr = extent_base_get(extent);
if (!extent_zeroed_get(extent)) {
size_t size = extent_size_get(extent);
if (extent_need_manual_zero(arena) ||
pages_purge_forced(addr, size)) {
memset(addr, 0, size);
}
} else if (config_debug) {
size_t *p = (size_t *)(uintptr_t)addr;
/* Check the first page only. */
for (size_t i = 0; i < PAGE / sizeof(size_t); i++) {
assert(p[i] == 0);
}
}
}
return extent;
return edata;
}
/*
* If the caller specifies (!*zero), it is still possible to receive zeroed
* memory, in which case *zero is toggled to true. arena_extent_alloc() takes
* advantage of this to avoid demanding zeroed extents, but taking advantage of
* them if they are returned.
* If virtual memory is retained, create increasingly larger extents from which
* to split requested extents in order to limit the total number of disjoint
* virtual memory ranges retained by each shard.
*/
static void *
extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) {
void *ret;
static edata_t *
extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
size_t size, size_t alignment, bool zero, bool *commit) {
malloc_mutex_assert_owner(tsdn, &pac->grow_mtx);
assert(size != 0);
assert(alignment != 0);
/* "primary" dss. */
if (have_dss && dss_prec == dss_prec_primary && (ret =
extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
commit)) != NULL) {
return ret;
}
/* mmap. */
if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit))
!= NULL) {
return ret;
}
/* "secondary" dss. */
if (have_dss && dss_prec == dss_prec_secondary && (ret =
extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
commit)) != NULL) {
return ret;
}
/* All strategies for allocation failed. */
return NULL;
}
static void *
extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
size_t size, size_t alignment, bool *zero, bool *commit) {
void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
commit, (dss_prec_t)atomic_load_u(&arena->dss_prec,
ATOMIC_RELAXED));
if (have_madvise_huge && ret) {
pages_set_thp_state(ret, size);
}
return ret;
}
static void *
extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
tsdn_t *tsdn;
arena_t *arena;
tsdn = tsdn_fetch();
arena = arena_get(tsdn, arena_ind, false);
/*
* The arena we're allocating on behalf of must have been initialized
* already.
*/
assert(arena != NULL);
return extent_alloc_default_impl(tsdn, arena, new_addr, size,
ALIGNMENT_CEILING(alignment, PAGE), zero, commit);
}
static void
extent_hook_pre_reentrancy(tsdn_t *tsdn, arena_t *arena) {
tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
if (arena == arena_get(tsd_tsdn(tsd), 0, false)) {
/*
* The only legitimate case of customized extent hooks for a0 is
* hooks with no allocation activities. One such example is to
* place metadata on pre-allocated resources such as huge pages.
* In that case, rely on reentrancy_level checks to catch
* infinite recursions.
*/
pre_reentrancy(tsd, NULL);
} else {
pre_reentrancy(tsd, arena);
}
}
static void
extent_hook_post_reentrancy(tsdn_t *tsdn) {
tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
post_reentrancy(tsd);
}
/*
* If virtual memory is retained, create increasingly larger extents from which
* to split requested extents in order to limit the total number of disjoint
* virtual memory ranges retained by each arena.
*/
static extent_t *
extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, size_t size, size_t pad, size_t alignment,
bool slab, szind_t szind, bool *zero, bool *commit) {
malloc_mutex_assert_owner(tsdn, &arena->extent_grow_mtx);
assert(pad == 0 || !slab);
assert(!*zero || !slab);
size_t esize = size + pad;
size_t alloc_size_min = esize + PAGE_CEILING(alignment) - PAGE;
size_t alloc_size_min = size + PAGE_CEILING(alignment) - PAGE;
/* Beware size_t wrap-around. */
if (alloc_size_min < esize) {
if (alloc_size_min < size) {
goto label_err;
}
/*
* Find the next extent size in the series that would be large enough to
* satisfy this request.
*/
pszind_t egn_skip = 0;
size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
while (alloc_size < alloc_size_min) {
egn_skip++;
if (arena->extent_grow_next + egn_skip >=
sz_psz2ind(SC_LARGE_MAXCLASS)) {
/* Outside legal range. */
goto label_err;
}
alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
size_t alloc_size;
pszind_t exp_grow_skip;
bool err = exp_grow_size_prepare(&pac->exp_grow, alloc_size_min,
&alloc_size, &exp_grow_skip);
if (err) {
goto label_err;
}
extent_t *extent = extent_alloc(tsdn, arena);
if (extent == NULL) {
edata_t *edata = edata_cache_get(tsdn, pac->edata_cache);
if (edata == NULL) {
goto label_err;
}
bool zeroed = false;
bool committed = false;
void *ptr;
if (*r_extent_hooks == &extent_hooks_default) {
ptr = extent_alloc_default_impl(tsdn, arena, NULL,
alloc_size, PAGE, &zeroed, &committed);
} else {
extent_hook_pre_reentrancy(tsdn, arena);
ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL,
alloc_size, PAGE, &zeroed, &committed,
arena_ind_get(arena));
extent_hook_post_reentrancy(tsdn);
}
void *ptr = ehooks_alloc(tsdn, ehooks, NULL, alloc_size, PAGE, &zeroed,
&committed);
extent_init(extent, arena, ptr, alloc_size, false, SC_NSIZES,
arena_extent_sn_next(arena), extent_state_active, zeroed,
committed, true, EXTENT_IS_HEAD);
if (ptr == NULL) {
extent_dalloc(tsdn, arena, extent);
edata_cache_put(tsdn, pac->edata_cache, edata);
goto label_err;
}
if (extent_register_no_gdump_add(tsdn, extent)) {
extent_dalloc(tsdn, arena, extent);
edata_init(edata, ecache_ind_get(&pac->ecache_retained), ptr,
alloc_size, false, SC_NSIZES, extent_sn_next(pac),
extent_state_active, zeroed, committed, EXTENT_PAI_PAC,
EXTENT_IS_HEAD);
if (extent_register_no_gdump_add(tsdn, pac, edata)) {
edata_cache_put(tsdn, pac->edata_cache, edata);
goto label_err;
}
if (extent_zeroed_get(extent) && extent_committed_get(extent)) {
*zero = true;
}
if (extent_committed_get(extent)) {
if (edata_committed_get(edata)) {
*commit = true;
}
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
edata_t *lead;
edata_t *trail;
edata_t *to_leak JEMALLOC_CC_SILENCE_INIT(NULL);
edata_t *to_salvage JEMALLOC_CC_SILENCE_INIT(NULL);
extent_t *lead;
extent_t *trail;
extent_t *to_leak;
extent_t *to_salvage;
extent_split_interior_result_t result = extent_split_interior(
tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
&to_leak, &to_salvage, NULL, size, pad, alignment, slab, szind,
true);
extent_split_interior_result_t result = extent_split_interior(tsdn,
pac, ehooks, &edata, &lead, &trail, &to_leak, &to_salvage, NULL,
size, alignment);
if (result == extent_split_interior_ok) {
if (lead != NULL) {
extent_record(tsdn, arena, r_extent_hooks,
&arena->extents_retained, lead, true);
extent_record(tsdn, pac, ehooks, &pac->ecache_retained,
lead);
}
if (trail != NULL) {
extent_record(tsdn, arena, r_extent_hooks,
&arena->extents_retained, trail, true);
extent_record(tsdn, pac, ehooks, &pac->ecache_retained,
trail);
}
} else {
/*
......@@ -1393,26 +719,32 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
if (config_prof) {
extent_gdump_add(tsdn, to_salvage);
}
extent_record(tsdn, arena, r_extent_hooks,
&arena->extents_retained, to_salvage, true);
extent_record(tsdn, pac, ehooks, &pac->ecache_retained,
to_salvage);
}
if (to_leak != NULL) {
extent_deregister_no_gdump_sub(tsdn, to_leak);
extents_abandon_vm(tsdn, arena, r_extent_hooks,
&arena->extents_retained, to_leak, true);
extent_deregister_no_gdump_sub(tsdn, pac, to_leak);
extents_abandon_vm(tsdn, pac, ehooks,
&pac->ecache_retained, to_leak, true);
}
goto label_err;
}
if (*commit && !extent_committed_get(extent)) {
if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 0,
extent_size_get(extent), true)) {
extent_record(tsdn, arena, r_extent_hooks,
&arena->extents_retained, extent, true);
if (*commit && !edata_committed_get(edata)) {
if (extent_commit_impl(tsdn, ehooks, edata, 0,
edata_size_get(edata), true)) {
extent_record(tsdn, pac, ehooks,
&pac->ecache_retained, edata);
goto label_err;
}
if (!extent_need_manual_zero(arena)) {
extent_zeroed_set(extent, true);
/* A successful commit should return zeroed memory. */
if (config_debug) {
void *addr = edata_addr_get(edata);
size_t *p = (size_t *)(uintptr_t)addr;
/* Check the first page only. */
for (size_t i = 0; i < PAGE / sizeof(size_t); i++) {
assert(p[i] == 0);
}
}
}
......@@ -1420,187 +752,74 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
* Increment extent_grow_next if doing so wouldn't exceed the allowed
* range.
*/
if (arena->extent_grow_next + egn_skip + 1 <=
arena->retain_grow_limit) {
arena->extent_grow_next += egn_skip + 1;
} else {
arena->extent_grow_next = arena->retain_grow_limit;
}
/* All opportunities for failure are past. */
malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
exp_grow_size_commit(&pac->exp_grow, exp_grow_skip);
malloc_mutex_unlock(tsdn, &pac->grow_mtx);
if (config_prof) {
/* Adjust gdump stats now that extent is final size. */
extent_gdump_add(tsdn, extent);
extent_gdump_add(tsdn, edata);
}
if (pad != 0) {
extent_addr_randomize(tsdn, extent, alignment);
}
if (slab) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
&rtree_ctx_fallback);
extent_slab_set(extent, true);
extent_interior_register(tsdn, rtree_ctx, extent, szind);
if (zero && !edata_zeroed_get(edata)) {
ehooks_zero(tsdn, ehooks, edata_base_get(edata),
edata_size_get(edata));
}
if (*zero && !extent_zeroed_get(extent)) {
void *addr = extent_base_get(extent);
size_t size = extent_size_get(extent);
if (extent_need_manual_zero(arena) ||
pages_purge_forced(addr, size)) {
memset(addr, 0, size);
}
}
return extent;
return edata;
label_err:
malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
malloc_mutex_unlock(tsdn, &pac->grow_mtx);
return NULL;
}
static extent_t *
extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
static edata_t *
extent_alloc_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *expand_edata, size_t size, size_t alignment, bool zero,
bool *commit, bool guarded) {
assert(size != 0);
assert(alignment != 0);
malloc_mutex_lock(tsdn, &arena->extent_grow_mtx);
malloc_mutex_lock(tsdn, &pac->grow_mtx);
extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks,
&arena->extents_retained, new_addr, size, pad, alignment, slab,
szind, zero, commit, true);
if (extent != NULL) {
malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
edata_t *edata = extent_recycle(tsdn, pac, ehooks,
&pac->ecache_retained, expand_edata, size, alignment, zero, commit,
/* growing_retained */ true, guarded);
if (edata != NULL) {
malloc_mutex_unlock(tsdn, &pac->grow_mtx);
if (config_prof) {
extent_gdump_add(tsdn, extent);
extent_gdump_add(tsdn, edata);
}
} else if (opt_retain && new_addr == NULL) {
extent = extent_grow_retained(tsdn, arena, r_extent_hooks, size,
pad, alignment, slab, szind, zero, commit);
/* extent_grow_retained() always releases extent_grow_mtx. */
} else {
malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
}
malloc_mutex_assert_not_owner(tsdn, &arena->extent_grow_mtx);
return extent;
}
static extent_t *
extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
size_t esize = size + pad;
extent_t *extent = extent_alloc(tsdn, arena);
if (extent == NULL) {
return NULL;
}
void *addr;
size_t palignment = ALIGNMENT_CEILING(alignment, PAGE);
if (*r_extent_hooks == &extent_hooks_default) {
/* Call directly to propagate tsdn. */
addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize,
palignment, zero, commit);
} else if (opt_retain && expand_edata == NULL && !guarded) {
edata = extent_grow_retained(tsdn, pac, ehooks, size,
alignment, zero, commit);
/* extent_grow_retained() always releases pac->grow_mtx. */
} else {
extent_hook_pre_reentrancy(tsdn, arena);
addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr,
esize, palignment, zero, commit, arena_ind_get(arena));
extent_hook_post_reentrancy(tsdn);
}
if (addr == NULL) {
extent_dalloc(tsdn, arena, extent);
return NULL;
}
extent_init(extent, arena, addr, esize, slab, szind,
arena_extent_sn_next(arena), extent_state_active, *zero, *commit,
true, EXTENT_NOT_HEAD);
if (pad != 0) {
extent_addr_randomize(tsdn, extent, alignment);
}
if (extent_register(tsdn, extent)) {
extent_dalloc(tsdn, arena, extent);
return NULL;
}
return extent;
}
extent_t *
extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
extent_hooks_assure_initialized(arena, r_extent_hooks);
extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks,
new_addr, size, pad, alignment, slab, szind, zero, commit);
if (extent == NULL) {
if (opt_retain && new_addr != NULL) {
/*
* When retain is enabled and new_addr is set, we do not
* attempt extent_alloc_wrapper_hard which does mmap
* that is very unlikely to succeed (unless it happens
* to be at the end).
*/
return NULL;
}
extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks,
new_addr, size, pad, alignment, slab, szind, zero, commit);
malloc_mutex_unlock(tsdn, &pac->grow_mtx);
}
malloc_mutex_assert_not_owner(tsdn, &pac->grow_mtx);
assert(extent == NULL || extent_dumpable_get(extent));
return extent;
return edata;
}
static bool
extent_can_coalesce(arena_t *arena, extents_t *extents, const extent_t *inner,
const extent_t *outer) {
assert(extent_arena_get(inner) == arena);
if (extent_arena_get(outer) != arena) {
return false;
}
assert(extent_state_get(inner) == extent_state_active);
if (extent_state_get(outer) != extents->state) {
return false;
}
if (extent_committed_get(inner) != extent_committed_get(outer)) {
return false;
}
return true;
}
static bool
extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
extents_t *extents, extent_t *inner, extent_t *outer, bool forward,
bool growing_retained) {
assert(extent_can_coalesce(arena, extents, inner, outer));
extent_activate_locked(tsdn, arena, extents, outer);
malloc_mutex_unlock(tsdn, &extents->mtx);
bool err = extent_merge_impl(tsdn, arena, r_extent_hooks,
forward ? inner : outer, forward ? outer : inner, growing_retained);
malloc_mutex_lock(tsdn, &extents->mtx);
extent_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
edata_t *inner, edata_t *outer, bool forward) {
extent_assert_can_coalesce(inner, outer);
eset_remove(&ecache->eset, outer);
bool err = extent_merge_impl(tsdn, pac, ehooks,
forward ? inner : outer, forward ? outer : inner,
/* holding_core_locks */ true);
if (err) {
extent_deactivate_locked(tsdn, arena, extents, outer);
extent_deactivate_check_state_locked(tsdn, pac, ecache, outer,
extent_state_merging);
}
return err;
}
static extent_t *
extent_try_coalesce_impl(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
extent_t *extent, bool *coalesced, bool growing_retained,
bool inactive_only) {
static edata_t *
extent_try_coalesce_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
ecache_t *ecache, edata_t *edata, bool *coalesced) {
assert(!edata_guarded_get(edata));
/*
* We avoid checking / locking inactive neighbors for large size
* classes, since they are eagerly coalesced on deallocation which can
......@@ -1615,467 +834,333 @@ extent_try_coalesce_impl(tsdn_t *tsdn, arena_t *arena,
again = false;
/* Try to coalesce forward. */
extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx,
extent_past_get(extent), inactive_only);
edata_t *next = emap_try_acquire_edata_neighbor(tsdn, pac->emap,
edata, EXTENT_PAI_PAC, ecache->state, /* forward */ true);
if (next != NULL) {
/*
* extents->mtx only protects against races for
* like-state extents, so call extent_can_coalesce()
* before releasing next's pool lock.
*/
bool can_coalesce = extent_can_coalesce(arena, extents,
extent, next);
extent_unlock(tsdn, next);
if (can_coalesce && !extent_coalesce(tsdn, arena,
r_extent_hooks, extents, extent, next, true,
growing_retained)) {
if (extents->delay_coalesce) {
if (!extent_coalesce(tsdn, pac, ehooks, ecache, edata,
next, true)) {
if (ecache->delay_coalesce) {
/* Do minimal coalescing. */
*coalesced = true;
return extent;
return edata;
}
again = true;
}
}
/* Try to coalesce backward. */
extent_t *prev = extent_lock_from_addr(tsdn, rtree_ctx,
extent_before_get(extent), inactive_only);
edata_t *prev = emap_try_acquire_edata_neighbor(tsdn, pac->emap,
edata, EXTENT_PAI_PAC, ecache->state, /* forward */ false);
if (prev != NULL) {
bool can_coalesce = extent_can_coalesce(arena, extents,
extent, prev);
extent_unlock(tsdn, prev);
if (can_coalesce && !extent_coalesce(tsdn, arena,
r_extent_hooks, extents, extent, prev, false,
growing_retained)) {
extent = prev;
if (extents->delay_coalesce) {
if (!extent_coalesce(tsdn, pac, ehooks, ecache, edata,
prev, false)) {
edata = prev;
if (ecache->delay_coalesce) {
/* Do minimal coalescing. */
*coalesced = true;
return extent;
return edata;
}
again = true;
}
}
} while (again);
if (extents->delay_coalesce) {
if (ecache->delay_coalesce) {
*coalesced = false;
}
return extent;
return edata;
}
static extent_t *
extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
extent_t *extent, bool *coalesced, bool growing_retained) {
return extent_try_coalesce_impl(tsdn, arena, r_extent_hooks, rtree_ctx,
extents, extent, coalesced, growing_retained, false);
static edata_t *
extent_try_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
ecache_t *ecache, edata_t *edata, bool *coalesced) {
return extent_try_coalesce_impl(tsdn, pac, ehooks, ecache, edata,
coalesced);
}
static extent_t *
extent_try_coalesce_large(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
extent_t *extent, bool *coalesced, bool growing_retained) {
return extent_try_coalesce_impl(tsdn, arena, r_extent_hooks, rtree_ctx,
extents, extent, coalesced, growing_retained, true);
static edata_t *
extent_try_coalesce_large(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
ecache_t *ecache, edata_t *edata, bool *coalesced) {
return extent_try_coalesce_impl(tsdn, pac, ehooks, ecache, edata,
coalesced);
}
/* Purge a single extent to retained / unmapped directly. */
static void
extent_maximally_purge(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *edata) {
size_t extent_size = edata_size_get(edata);
extent_dalloc_wrapper(tsdn, pac, ehooks, edata);
if (config_stats) {
/* Update stats accordingly. */
LOCKEDINT_MTX_LOCK(tsdn, *pac->stats_mtx);
locked_inc_u64(tsdn,
LOCKEDINT_MTX(*pac->stats_mtx),
&pac->stats->decay_dirty.nmadvise, 1);
locked_inc_u64(tsdn,
LOCKEDINT_MTX(*pac->stats_mtx),
&pac->stats->decay_dirty.purged,
extent_size >> LG_PAGE);
LOCKEDINT_MTX_UNLOCK(tsdn, *pac->stats_mtx);
atomic_fetch_sub_zu(&pac->stats->pac_mapped, extent_size,
ATOMIC_RELAXED);
}
}
/*
* Does the metadata management portions of putting an unused extent into the
* given extents_t (coalesces, deregisters slab interiors, the heap operations).
* given ecache_t (coalesces and inserts into the eset).
*/
static void
extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
extents_t *extents, extent_t *extent, bool growing_retained) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
assert((extents_state_get(extents) != extent_state_dirty &&
extents_state_get(extents) != extent_state_muzzy) ||
!extent_zeroed_get(extent));
malloc_mutex_lock(tsdn, &extents->mtx);
extent_hooks_assure_initialized(arena, r_extent_hooks);
extent_szind_set(extent, SC_NSIZES);
if (extent_slab_get(extent)) {
extent_interior_deregister(tsdn, rtree_ctx, extent);
extent_slab_set(extent, false);
}
void
extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
edata_t *edata) {
assert((ecache->state != extent_state_dirty &&
ecache->state != extent_state_muzzy) ||
!edata_zeroed_get(edata));
assert(rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)extent_base_get(extent), true) == extent);
malloc_mutex_lock(tsdn, &ecache->mtx);
if (!extents->delay_coalesce) {
extent = extent_try_coalesce(tsdn, arena, r_extent_hooks,
rtree_ctx, extents, extent, NULL, growing_retained);
} else if (extent_size_get(extent) >= SC_LARGE_MINCLASS) {
assert(extents == &arena->extents_dirty);
emap_assert_mapped(tsdn, pac->emap, edata);
if (edata_guarded_get(edata)) {
goto label_skip_coalesce;
}
if (!ecache->delay_coalesce) {
edata = extent_try_coalesce(tsdn, pac, ehooks, ecache, edata,
NULL);
} else if (edata_size_get(edata) >= SC_LARGE_MINCLASS) {
assert(ecache == &pac->ecache_dirty);
/* Always coalesce large extents eagerly. */
bool coalesced;
do {
assert(extent_state_get(extent) == extent_state_active);
extent = extent_try_coalesce_large(tsdn, arena,
r_extent_hooks, rtree_ctx, extents, extent,
&coalesced, growing_retained);
assert(edata_state_get(edata) == extent_state_active);
edata = extent_try_coalesce_large(tsdn, pac, ehooks,
ecache, edata, &coalesced);
} while (coalesced);
if (extent_size_get(extent) >= oversize_threshold) {
if (edata_size_get(edata) >=
atomic_load_zu(&pac->oversize_threshold, ATOMIC_RELAXED)
&& extent_may_force_decay(pac)) {
/* Shortcut to purge the oversize extent eagerly. */
malloc_mutex_unlock(tsdn, &extents->mtx);
arena_decay_extent(tsdn, arena, r_extent_hooks, extent);
malloc_mutex_unlock(tsdn, &ecache->mtx);
extent_maximally_purge(tsdn, pac, ehooks, edata);
return;
}
}
extent_deactivate_locked(tsdn, arena, extents, extent);
label_skip_coalesce:
extent_deactivate_locked(tsdn, pac, ecache, edata);
malloc_mutex_unlock(tsdn, &extents->mtx);
malloc_mutex_unlock(tsdn, &ecache->mtx);
}
void
extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
extent_dalloc_gap(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *edata) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
if (extent_register(tsdn, extent)) {
extent_dalloc(tsdn, arena, extent);
if (extent_register(tsdn, pac, edata)) {
edata_cache_put(tsdn, pac->edata_cache, edata);
return;
}
extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent);
extent_dalloc_wrapper(tsdn, pac, ehooks, edata);
}
static bool
extent_may_dalloc(void) {
/* With retain enabled, the default dalloc always fails. */
return !opt_retain;
}
static bool
extent_dalloc_default_impl(void *addr, size_t size) {
if (!have_dss || !extent_in_dss(addr)) {
return extent_dalloc_mmap(addr, size);
}
return true;
}
static bool
extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
bool committed, unsigned arena_ind) {
return extent_dalloc_default_impl(addr, size);
}
static bool
extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent) {
extent_dalloc_wrapper_try(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *edata) {
bool err;
assert(extent_base_get(extent) != NULL);
assert(extent_size_get(extent) != 0);
assert(edata_base_get(edata) != NULL);
assert(edata_size_get(edata) != 0);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
extent_addr_set(extent, extent_base_get(extent));
edata_addr_set(edata, edata_base_get(edata));
extent_hooks_assure_initialized(arena, r_extent_hooks);
/* Try to deallocate. */
if (*r_extent_hooks == &extent_hooks_default) {
/* Call directly to propagate tsdn. */
err = extent_dalloc_default_impl(extent_base_get(extent),
extent_size_get(extent));
} else {
extent_hook_pre_reentrancy(tsdn, arena);
err = ((*r_extent_hooks)->dalloc == NULL ||
(*r_extent_hooks)->dalloc(*r_extent_hooks,
extent_base_get(extent), extent_size_get(extent),
extent_committed_get(extent), arena_ind_get(arena)));
extent_hook_post_reentrancy(tsdn);
}
err = ehooks_dalloc(tsdn, ehooks, edata_base_get(edata),
edata_size_get(edata), edata_committed_get(edata));
if (!err) {
extent_dalloc(tsdn, arena, extent);
edata_cache_put(tsdn, pac->edata_cache, edata);
}
return err;
}
edata_t *
extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
void *new_addr, size_t size, size_t alignment, bool zero, bool *commit,
bool growing_retained) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
edata_t *edata = edata_cache_get(tsdn, pac->edata_cache);
if (edata == NULL) {
return NULL;
}
size_t palignment = ALIGNMENT_CEILING(alignment, PAGE);
void *addr = ehooks_alloc(tsdn, ehooks, new_addr, size, palignment,
&zero, commit);
if (addr == NULL) {
edata_cache_put(tsdn, pac->edata_cache, edata);
return NULL;
}
edata_init(edata, ecache_ind_get(&pac->ecache_dirty), addr,
size, /* slab */ false, SC_NSIZES, extent_sn_next(pac),
extent_state_active, zero, *commit, EXTENT_PAI_PAC,
opt_retain ? EXTENT_IS_HEAD : EXTENT_NOT_HEAD);
/*
* Retained memory is not counted towards gdump. Only if an extent is
* allocated as a separate mapping, i.e. growing_retained is false, then
* gdump should be updated.
*/
bool gdump_add = !growing_retained;
if (extent_register_impl(tsdn, pac, edata, gdump_add)) {
edata_cache_put(tsdn, pac->edata_cache, edata);
return NULL;
}
return edata;
}
void
extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent) {
assert(extent_dumpable_get(extent));
extent_dalloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *edata) {
assert(edata_pai_get(edata) == EXTENT_PAI_PAC);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
/* Avoid calling the default extent_dalloc unless have to. */
if (*r_extent_hooks != &extent_hooks_default || extent_may_dalloc()) {
if (!ehooks_dalloc_will_fail(ehooks)) {
/* Remove guard pages for dalloc / unmap. */
if (edata_guarded_get(edata)) {
assert(ehooks_are_default(ehooks));
san_unguard_pages_two_sided(tsdn, ehooks, edata,
pac->emap);
}
/*
* Deregister first to avoid a race with other allocating
* threads, and reregister if deallocation fails.
*/
extent_deregister(tsdn, extent);
if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks,
extent)) {
extent_deregister(tsdn, pac, edata);
if (!extent_dalloc_wrapper_try(tsdn, pac, ehooks, edata)) {
return;
}
extent_reregister(tsdn, extent);
extent_reregister(tsdn, pac, edata);
}
if (*r_extent_hooks != &extent_hooks_default) {
extent_hook_pre_reentrancy(tsdn, arena);
}
/* Try to decommit; purge if that fails. */
bool zeroed;
if (!extent_committed_get(extent)) {
if (!edata_committed_get(edata)) {
zeroed = true;
} else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent,
0, extent_size_get(extent))) {
} else if (!extent_decommit_wrapper(tsdn, ehooks, edata, 0,
edata_size_get(edata))) {
zeroed = true;
} else if ((*r_extent_hooks)->purge_forced != NULL &&
!(*r_extent_hooks)->purge_forced(*r_extent_hooks,
extent_base_get(extent), extent_size_get(extent), 0,
extent_size_get(extent), arena_ind_get(arena))) {
} else if (!ehooks_purge_forced(tsdn, ehooks, edata_base_get(edata),
edata_size_get(edata), 0, edata_size_get(edata))) {
zeroed = true;
} else if (extent_state_get(extent) == extent_state_muzzy ||
((*r_extent_hooks)->purge_lazy != NULL &&
!(*r_extent_hooks)->purge_lazy(*r_extent_hooks,
extent_base_get(extent), extent_size_get(extent), 0,
extent_size_get(extent), arena_ind_get(arena)))) {
} else if (edata_state_get(edata) == extent_state_muzzy ||
!ehooks_purge_lazy(tsdn, ehooks, edata_base_get(edata),
edata_size_get(edata), 0, edata_size_get(edata))) {
zeroed = false;
} else {
zeroed = false;
}
if (*r_extent_hooks != &extent_hooks_default) {
extent_hook_post_reentrancy(tsdn);
}
extent_zeroed_set(extent, zeroed);
edata_zeroed_set(edata, zeroed);
if (config_prof) {
extent_gdump_sub(tsdn, extent);
extent_gdump_sub(tsdn, edata);
}
extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained,
extent, false);
}
static void
extent_destroy_default_impl(void *addr, size_t size) {
if (!have_dss || !extent_in_dss(addr)) {
pages_unmap(addr, size);
}
}
static void
extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
bool committed, unsigned arena_ind) {
extent_destroy_default_impl(addr, size);
extent_record(tsdn, pac, ehooks, &pac->ecache_retained, edata);
}
void
extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent) {
assert(extent_base_get(extent) != NULL);
assert(extent_size_get(extent) != 0);
extent_destroy_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *edata) {
assert(edata_base_get(edata) != NULL);
assert(edata_size_get(edata) != 0);
extent_state_t state = edata_state_get(edata);
assert(state == extent_state_retained || state == extent_state_active);
assert(emap_edata_is_acquired(tsdn, pac->emap, edata));
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
/* Deregister first to avoid a race with other allocating threads. */
extent_deregister(tsdn, extent);
extent_addr_set(extent, extent_base_get(extent));
extent_hooks_assure_initialized(arena, r_extent_hooks);
/* Try to destroy; silently fail otherwise. */
if (*r_extent_hooks == &extent_hooks_default) {
/* Call directly to propagate tsdn. */
extent_destroy_default_impl(extent_base_get(extent),
extent_size_get(extent));
} else if ((*r_extent_hooks)->destroy != NULL) {
extent_hook_pre_reentrancy(tsdn, arena);
(*r_extent_hooks)->destroy(*r_extent_hooks,
extent_base_get(extent), extent_size_get(extent),
extent_committed_get(extent), arena_ind_get(arena));
extent_hook_post_reentrancy(tsdn);
if (edata_guarded_get(edata)) {
assert(opt_retain);
san_unguard_pages_pre_destroy(tsdn, ehooks, edata, pac->emap);
}
edata_addr_set(edata, edata_base_get(edata));
extent_dalloc(tsdn, arena, extent);
}
/* Try to destroy; silently fail otherwise. */
ehooks_destroy(tsdn, ehooks, edata_base_get(edata),
edata_size_get(edata), edata_committed_get(edata));
static bool
extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
size_t offset, size_t length, unsigned arena_ind) {
return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
length);
edata_cache_put(tsdn, pac->edata_cache, edata);
}
static bool
extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length, bool growing_retained) {
extent_commit_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
size_t offset, size_t length, bool growing_retained) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
extent_hooks_assure_initialized(arena, r_extent_hooks);
if (*r_extent_hooks != &extent_hooks_default) {
extent_hook_pre_reentrancy(tsdn, arena);
}
bool err = ((*r_extent_hooks)->commit == NULL ||
(*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent),
extent_size_get(extent), offset, length, arena_ind_get(arena)));
if (*r_extent_hooks != &extent_hooks_default) {
extent_hook_post_reentrancy(tsdn);
}
extent_committed_set(extent, extent_committed_get(extent) || !err);
bool err = ehooks_commit(tsdn, ehooks, edata_base_get(edata),
edata_size_get(edata), offset, length);
edata_committed_set(edata, edata_committed_get(edata) || !err);
return err;
}
bool
extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length) {
return extent_commit_impl(tsdn, arena, r_extent_hooks, extent, offset,
length, false);
}
static bool
extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
size_t offset, size_t length, unsigned arena_ind) {
return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
length);
extent_commit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
size_t offset, size_t length) {
return extent_commit_impl(tsdn, ehooks, edata, offset, length,
/* growing_retained */ false);
}
bool
extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length) {
extent_decommit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
size_t offset, size_t length) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
extent_hooks_assure_initialized(arena, r_extent_hooks);
if (*r_extent_hooks != &extent_hooks_default) {
extent_hook_pre_reentrancy(tsdn, arena);
}
bool err = ((*r_extent_hooks)->decommit == NULL ||
(*r_extent_hooks)->decommit(*r_extent_hooks,
extent_base_get(extent), extent_size_get(extent), offset, length,
arena_ind_get(arena)));
if (*r_extent_hooks != &extent_hooks_default) {
extent_hook_post_reentrancy(tsdn);
}
extent_committed_set(extent, extent_committed_get(extent) && err);
bool err = ehooks_decommit(tsdn, ehooks, edata_base_get(edata),
edata_size_get(edata), offset, length);
edata_committed_set(edata, edata_committed_get(edata) && err);
return err;
}
#ifdef PAGES_CAN_PURGE_LAZY
static bool
extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
size_t offset, size_t length, unsigned arena_ind) {
assert(addr != NULL);
assert((offset & PAGE_MASK) == 0);
assert(length != 0);
assert((length & PAGE_MASK) == 0);
return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset),
length);
}
#endif
static bool
extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length, bool growing_retained) {
extent_purge_lazy_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
size_t offset, size_t length, bool growing_retained) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
extent_hooks_assure_initialized(arena, r_extent_hooks);
if ((*r_extent_hooks)->purge_lazy == NULL) {
return true;
}
if (*r_extent_hooks != &extent_hooks_default) {
extent_hook_pre_reentrancy(tsdn, arena);
}
bool err = (*r_extent_hooks)->purge_lazy(*r_extent_hooks,
extent_base_get(extent), extent_size_get(extent), offset, length,
arena_ind_get(arena));
if (*r_extent_hooks != &extent_hooks_default) {
extent_hook_post_reentrancy(tsdn);
}
bool err = ehooks_purge_lazy(tsdn, ehooks, edata_base_get(edata),
edata_size_get(edata), offset, length);
return err;
}
bool
extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length) {
return extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, extent,
offset, length, false);
}
#ifdef PAGES_CAN_PURGE_FORCED
static bool
extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr,
size_t size, size_t offset, size_t length, unsigned arena_ind) {
assert(addr != NULL);
assert((offset & PAGE_MASK) == 0);
assert(length != 0);
assert((length & PAGE_MASK) == 0);
return pages_purge_forced((void *)((uintptr_t)addr +
(uintptr_t)offset), length);
extent_purge_lazy_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
size_t offset, size_t length) {
return extent_purge_lazy_impl(tsdn, ehooks, edata, offset,
length, false);
}
#endif
static bool
extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length, bool growing_retained) {
extent_purge_forced_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
size_t offset, size_t length, bool growing_retained) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
extent_hooks_assure_initialized(arena, r_extent_hooks);
if ((*r_extent_hooks)->purge_forced == NULL) {
return true;
}
if (*r_extent_hooks != &extent_hooks_default) {
extent_hook_pre_reentrancy(tsdn, arena);
}
bool err = (*r_extent_hooks)->purge_forced(*r_extent_hooks,
extent_base_get(extent), extent_size_get(extent), offset, length,
arena_ind_get(arena));
if (*r_extent_hooks != &extent_hooks_default) {
extent_hook_post_reentrancy(tsdn);
}
bool err = ehooks_purge_forced(tsdn, ehooks, edata_base_get(edata),
edata_size_get(edata), offset, length);
return err;
}
bool
extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length) {
return extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent,
offset, length, false);
}
static bool
extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
if (!maps_coalesce) {
/*
* Without retain, only whole regions can be purged (required by
* MEM_RELEASE on Windows) -- therefore disallow splitting. See
* comments in extent_head_no_merge().
*/
return !opt_retain;
}
return false;
extent_purge_forced_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
size_t offset, size_t length) {
return extent_purge_forced_impl(tsdn, ehooks, edata, offset, length,
false);
}
/*
......@@ -2085,183 +1170,95 @@ extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
* with the trail (the higher addressed portion). This makes 'extent' the lead,
* and returns the trail (except in case of error).
*/
static extent_t *
extent_split_impl(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
bool growing_retained) {
assert(extent_size_get(extent) == size_a + size_b);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
extent_hooks_assure_initialized(arena, r_extent_hooks);
static edata_t *
extent_split_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *edata, size_t size_a, size_t size_b, bool holding_core_locks) {
assert(edata_size_get(edata) == size_a + size_b);
/* Only the shrink path may split w/o holding core locks. */
if (holding_core_locks) {
witness_assert_positive_depth_to_rank(
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE);
} else {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
}
if ((*r_extent_hooks)->split == NULL) {
if (ehooks_split_will_fail(ehooks)) {
return NULL;
}
extent_t *trail = extent_alloc(tsdn, arena);
edata_t *trail = edata_cache_get(tsdn, pac->edata_cache);
if (trail == NULL) {
goto label_error_a;
}
extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
size_a), size_b, slab_b, szind_b, extent_sn_get(extent),
extent_state_get(extent), extent_zeroed_get(extent),
extent_committed_get(extent), extent_dumpable_get(extent),
EXTENT_NOT_HEAD);
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
rtree_leaf_elm_t *lead_elm_a, *lead_elm_b;
{
extent_t lead;
extent_init(&lead, arena, extent_addr_get(extent), size_a,
slab_a, szind_a, extent_sn_get(extent),
extent_state_get(extent), extent_zeroed_get(extent),
extent_committed_get(extent), extent_dumpable_get(extent),
EXTENT_NOT_HEAD);
extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false,
true, &lead_elm_a, &lead_elm_b);
}
rtree_leaf_elm_t *trail_elm_a, *trail_elm_b;
extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, trail, false, true,
&trail_elm_a, &trail_elm_b);
if (lead_elm_a == NULL || lead_elm_b == NULL || trail_elm_a == NULL
|| trail_elm_b == NULL) {
edata_init(trail, edata_arena_ind_get(edata),
(void *)((uintptr_t)edata_base_get(edata) + size_a), size_b,
/* slab */ false, SC_NSIZES, edata_sn_get(edata),
edata_state_get(edata), edata_zeroed_get(edata),
edata_committed_get(edata), EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
emap_prepare_t prepare;
bool err = emap_split_prepare(tsdn, pac->emap, &prepare, edata,
size_a, trail, size_b);
if (err) {
goto label_error_b;
}
extent_lock2(tsdn, extent, trail);
/*
* No need to acquire trail or edata, because: 1) trail was new (just
* allocated); and 2) edata is either an active allocation (the shrink
* path), or in an acquired state (extracted from the ecache on the
* extent_recycle_split path).
*/
assert(emap_edata_is_acquired(tsdn, pac->emap, edata));
assert(emap_edata_is_acquired(tsdn, pac->emap, trail));
err = ehooks_split(tsdn, ehooks, edata_base_get(edata), size_a + size_b,
size_a, size_b, edata_committed_get(edata));
if (*r_extent_hooks != &extent_hooks_default) {
extent_hook_pre_reentrancy(tsdn, arena);
}
bool err = (*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent),
size_a + size_b, size_a, size_b, extent_committed_get(extent),
arena_ind_get(arena));
if (*r_extent_hooks != &extent_hooks_default) {
extent_hook_post_reentrancy(tsdn);
}
if (err) {
goto label_error_c;
goto label_error_b;
}
extent_size_set(extent, size_a);
extent_szind_set(extent, szind_a);
extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent,
szind_a, slab_a);
extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail,
szind_b, slab_b);
extent_unlock2(tsdn, extent, trail);
edata_size_set(edata, size_a);
emap_split_commit(tsdn, pac->emap, &prepare, edata, size_a, trail,
size_b);
return trail;
label_error_c:
extent_unlock2(tsdn, extent, trail);
label_error_b:
extent_dalloc(tsdn, arena, trail);
edata_cache_put(tsdn, pac->edata_cache, trail);
label_error_a:
return NULL;
}
extent_t *
extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) {
return extent_split_impl(tsdn, arena, r_extent_hooks, extent, size_a,
szind_a, slab_a, size_b, szind_b, slab_b, false);
edata_t *
extent_split_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata,
size_t size_a, size_t size_b, bool holding_core_locks) {
return extent_split_impl(tsdn, pac, ehooks, edata, size_a, size_b,
holding_core_locks);
}
static bool
extent_merge_default_impl(void *addr_a, void *addr_b) {
if (!maps_coalesce && !opt_retain) {
return true;
}
if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) {
return true;
}
return false;
}
/*
* Returns true if the given extents can't be merged because of their head bit
* settings. Assumes the second extent has the higher address.
*/
static bool
extent_head_no_merge(extent_t *a, extent_t *b) {
assert(extent_base_get(a) < extent_base_get(b));
/*
* When coalesce is not always allowed (Windows), only merge extents
* from the same VirtualAlloc region under opt.retain (in which case
* MEM_DECOMMIT is utilized for purging).
*/
if (maps_coalesce) {
return false;
}
if (!opt_retain) {
return true;
}
/* If b is a head extent, disallow the cross-region merge. */
if (extent_is_head_get(b)) {
/*
* Additionally, sn should not overflow with retain; sanity
* check that different regions have unique sn.
*/
assert(extent_sn_comp(a, b) != 0);
return true;
}
assert(extent_sn_comp(a, b) == 0);
return false;
}
static bool
extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
if (!maps_coalesce) {
tsdn_t *tsdn = tsdn_fetch();
extent_t *a = iealloc(tsdn, addr_a);
extent_t *b = iealloc(tsdn, addr_b);
if (extent_head_no_merge(a, b)) {
return true;
}
extent_merge_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *a,
edata_t *b, bool holding_core_locks) {
/* Only the expanding path may merge w/o holding ecache locks. */
if (holding_core_locks) {
witness_assert_positive_depth_to_rank(
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE);
} else {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
}
return extent_merge_default_impl(addr_a, addr_b);
}
static bool
extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
bool growing_retained) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
assert(extent_base_get(a) < extent_base_get(b));
extent_hooks_assure_initialized(arena, r_extent_hooks);
if ((*r_extent_hooks)->merge == NULL || extent_head_no_merge(a, b)) {
return true;
}
assert(edata_base_get(a) < edata_base_get(b));
assert(edata_arena_ind_get(a) == edata_arena_ind_get(b));
assert(edata_arena_ind_get(a) == ehooks_ind_get(ehooks));
emap_assert_mapped(tsdn, pac->emap, a);
emap_assert_mapped(tsdn, pac->emap, b);
bool err;
if (*r_extent_hooks == &extent_hooks_default) {
/* Call directly to propagate tsdn. */
err = extent_merge_default_impl(extent_base_get(a),
extent_base_get(b));
} else {
extent_hook_pre_reentrancy(tsdn, arena);
err = (*r_extent_hooks)->merge(*r_extent_hooks,
extent_base_get(a), extent_size_get(a), extent_base_get(b),
extent_size_get(b), extent_committed_get(a),
arena_ind_get(arena));
extent_hook_post_reentrancy(tsdn);
}
bool err = ehooks_merge(tsdn, ehooks, edata_base_get(a),
edata_size_get(a), edata_base_get(b), edata_size_get(b),
edata_committed_get(a));
if (err) {
return true;
......@@ -2272,132 +1269,58 @@ extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
* owned, so the following code uses decomposed helper functions rather
* than extent_{,de}register() to do things in the right order.
*/
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
rtree_leaf_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, a, true, false, &a_elm_a,
&a_elm_b);
extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, b, true, false, &b_elm_a,
&b_elm_b);
extent_lock2(tsdn, a, b);
if (a_elm_b != NULL) {
rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL,
SC_NSIZES, false);
}
if (b_elm_b != NULL) {
rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL,
SC_NSIZES, false);
} else {
b_elm_b = b_elm_a;
}
extent_size_set(a, extent_size_get(a) + extent_size_get(b));
extent_szind_set(a, SC_NSIZES);
extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ?
extent_sn_get(a) : extent_sn_get(b));
extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
emap_prepare_t prepare;
emap_merge_prepare(tsdn, pac->emap, &prepare, a, b);
extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, SC_NSIZES,
false);
assert(edata_state_get(a) == extent_state_active ||
edata_state_get(a) == extent_state_merging);
edata_state_set(a, extent_state_active);
edata_size_set(a, edata_size_get(a) + edata_size_get(b));
edata_sn_set(a, (edata_sn_get(a) < edata_sn_get(b)) ?
edata_sn_get(a) : edata_sn_get(b));
edata_zeroed_set(a, edata_zeroed_get(a) && edata_zeroed_get(b));
extent_unlock2(tsdn, a, b);
emap_merge_commit(tsdn, pac->emap, &prepare, a, b);
extent_dalloc(tsdn, extent_arena_get(b), b);
edata_cache_put(tsdn, pac->edata_cache, b);
return false;
}
bool
extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) {
return extent_merge_impl(tsdn, arena, r_extent_hooks, a, b, false);
extent_merge_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *a, edata_t *b) {
return extent_merge_impl(tsdn, pac, ehooks, a, b,
/* holding_core_locks */ false);
}
bool
extent_boot(void) {
if (rtree_new(&extents_rtree, true)) {
return true;
}
extent_commit_zero(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
bool commit, bool zero, bool growing_retained) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
if (mutex_pool_init(&extent_mutex_pool, "extent_mutex_pool",
WITNESS_RANK_EXTENT_POOL)) {
return true;
if (commit && !edata_committed_get(edata)) {
if (extent_commit_impl(tsdn, ehooks, edata, 0,
edata_size_get(edata), growing_retained)) {
return true;
}
}
if (have_dss) {
extent_dss_boot();
if (zero && !edata_zeroed_get(edata)) {
void *addr = edata_base_get(edata);
size_t size = edata_size_get(edata);
ehooks_zero(tsdn, ehooks, addr, size);
}
return false;
}
void
extent_util_stats_get(tsdn_t *tsdn, const void *ptr,
size_t *nfree, size_t *nregs, size_t *size) {
assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL);
const extent_t *extent = iealloc(tsdn, ptr);
if (unlikely(extent == NULL)) {
*nfree = *nregs = *size = 0;
return;
}
*size = extent_size_get(extent);
if (!extent_slab_get(extent)) {
*nfree = 0;
*nregs = 1;
} else {
*nfree = extent_nfree_get(extent);
*nregs = bin_infos[extent_szind_get(extent)].nregs;
assert(*nfree <= *nregs);
assert(*nfree * extent_usize_get(extent) <= *size);
}
}
void
extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
size_t *nfree, size_t *nregs, size_t *size,
size_t *bin_nfree, size_t *bin_nregs, void **slabcur_addr) {
assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL
&& bin_nfree != NULL && bin_nregs != NULL && slabcur_addr != NULL);
const extent_t *extent = iealloc(tsdn, ptr);
if (unlikely(extent == NULL)) {
*nfree = *nregs = *size = *bin_nfree = *bin_nregs = 0;
*slabcur_addr = NULL;
return;
}
bool
extent_boot(void) {
assert(sizeof(slab_data_t) >= sizeof(e_prof_info_t));
*size = extent_size_get(extent);
if (!extent_slab_get(extent)) {
*nfree = *bin_nfree = *bin_nregs = 0;
*nregs = 1;
*slabcur_addr = NULL;
return;
if (have_dss) {
extent_dss_boot();
}
*nfree = extent_nfree_get(extent);
const szind_t szind = extent_szind_get(extent);
*nregs = bin_infos[szind].nregs;
assert(*nfree <= *nregs);
assert(*nfree * extent_usize_get(extent) <= *size);
const arena_t *arena = extent_arena_get(extent);
assert(arena != NULL);
const unsigned binshard = extent_binshard_get(extent);
bin_t *bin = &arena->bins[szind].bin_shards[binshard];
malloc_mutex_lock(tsdn, &bin->lock);
if (config_stats) {
*bin_nregs = *nregs * bin->stats.curslabs;
assert(*bin_nregs >= bin->stats.curregs);
*bin_nfree = *bin_nregs - bin->stats.curregs;
} else {
*bin_nfree = *bin_nregs = 0;
}
*slabcur_addr = extent_addr_get(bin->slabcur);
assert(*slabcur_addr != NULL);
malloc_mutex_unlock(tsdn, &bin->lock);
return false;
}
#define JEMALLOC_EXTENT_DSS_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
......@@ -109,7 +108,7 @@ extent_dss_max_update(void *new_addr) {
void *
extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit) {
extent_t *gap;
edata_t *gap;
cassert(have_dss);
assert(size > 0);
......@@ -123,7 +122,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
return NULL;
}
gap = extent_alloc(tsdn, arena);
gap = edata_cache_get(tsdn, &arena->pa_shard.edata_cache);
if (gap == NULL) {
return NULL;
}
......@@ -141,6 +140,8 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
goto label_oom;
}
bool head_state = opt_retain ? EXTENT_IS_HEAD :
EXTENT_NOT_HEAD;
/*
* Compute how much page-aligned gap space (if any) is
* necessary to satisfy alignment. This space can be
......@@ -153,11 +154,12 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
size_t gap_size_page = (uintptr_t)ret -
(uintptr_t)gap_addr_page;
if (gap_size_page != 0) {
extent_init(gap, arena, gap_addr_page,
gap_size_page, false, SC_NSIZES,
arena_extent_sn_next(arena),
extent_state_active, false, true, true,
EXTENT_NOT_HEAD);
edata_init(gap, arena_ind_get(arena),
gap_addr_page, gap_size_page, false,
SC_NSIZES, extent_sn_next(
&arena->pa_shard.pac),
extent_state_active, false, true,
EXTENT_PAI_PAC, head_state);
}
/*
* Compute the address just past the end of the desired
......@@ -186,25 +188,29 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
extent_dss_extending_finish();
if (gap_size_page != 0) {
extent_dalloc_gap(tsdn, arena, gap);
ehooks_t *ehooks = arena_get_ehooks(
arena);
extent_dalloc_gap(tsdn,
&arena->pa_shard.pac, ehooks, gap);
} else {
extent_dalloc(tsdn, arena, gap);
edata_cache_put(tsdn,
&arena->pa_shard.edata_cache, gap);
}
if (!*commit) {
*commit = pages_decommit(ret, size);
}
if (*zero && *commit) {
extent_hooks_t *extent_hooks =
EXTENT_HOOKS_INITIALIZER;
extent_t extent;
edata_t edata = {0};
ehooks_t *ehooks = arena_get_ehooks(
arena);
extent_init(&extent, arena, ret, size,
edata_init(&edata,
arena_ind_get(arena), ret, size,
size, false, SC_NSIZES,
extent_state_active, false, true,
true, EXTENT_NOT_HEAD);
EXTENT_PAI_PAC, head_state);
if (extent_purge_forced_wrapper(tsdn,
arena, &extent_hooks, &extent, 0,
size)) {
ehooks, &edata, 0, size)) {
memset(ret, 0, size);
}
}
......@@ -224,7 +230,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
}
label_oom:
extent_dss_extending_finish();
extent_dalloc(tsdn, arena, gap);
edata_cache_put(tsdn, &arena->pa_shard.edata_cache, gap);
return NULL;
}
......
#define JEMALLOC_EXTENT_MMAP_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment