Commit a78e148b authored by antirez's avatar antirez
Browse files

jemalloc source added

parent 07486df6
/*
* This radix tree implementation is tailored to the singular purpose of
* tracking which chunks are currently owned by jemalloc. This functionality
* is mandatory for OS X, where jemalloc must be able to respond to object
* ownership queries.
*
*******************************************************************************
*/
#ifdef JEMALLOC_H_TYPES
typedef struct rtree_s rtree_t;
/*
* Size of each radix tree node (must be a power of 2). This impacts tree
* depth.
*/
#if (LG_SIZEOF_PTR == 2)
# define RTREE_NODESIZE (1U << 14)
#else
# define RTREE_NODESIZE CACHELINE
#endif
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct rtree_s {
malloc_mutex_t mutex;
void **root;
unsigned height;
unsigned level2bits[1]; /* Dynamically sized. */
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
rtree_t *rtree_new(unsigned bits);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
#ifndef JEMALLOC_DEBUG
void *rtree_get_locked(rtree_t *rtree, uintptr_t key);
#endif
void *rtree_get(rtree_t *rtree, uintptr_t key);
bool rtree_set(rtree_t *rtree, uintptr_t key, void *val);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
#define RTREE_GET_GENERATE(f) \
/* The least significant bits of the key are ignored. */ \
JEMALLOC_INLINE void * \
f(rtree_t *rtree, uintptr_t key) \
{ \
void *ret; \
uintptr_t subkey; \
unsigned i, lshift, height, bits; \
void **node, **child; \
\
RTREE_LOCK(&rtree->mutex); \
for (i = lshift = 0, height = rtree->height, node = rtree->root;\
i < height - 1; \
i++, lshift += bits, node = child) { \
bits = rtree->level2bits[i]; \
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR + \
3)) - bits); \
child = (void**)node[subkey]; \
if (child == NULL) { \
RTREE_UNLOCK(&rtree->mutex); \
return (NULL); \
} \
} \
\
/* \
* node is a leaf, so it contains values rather than node \
* pointers. \
*/ \
bits = rtree->level2bits[i]; \
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - \
bits); \
ret = node[subkey]; \
RTREE_UNLOCK(&rtree->mutex); \
\
RTREE_GET_VALIDATE \
return (ret); \
}
#ifdef JEMALLOC_DEBUG
# define RTREE_LOCK(l) malloc_mutex_lock(l)
# define RTREE_UNLOCK(l) malloc_mutex_unlock(l)
# define RTREE_GET_VALIDATE
RTREE_GET_GENERATE(rtree_get_locked)
# undef RTREE_LOCK
# undef RTREE_UNLOCK
# undef RTREE_GET_VALIDATE
#endif
#define RTREE_LOCK(l)
#define RTREE_UNLOCK(l)
#ifdef JEMALLOC_DEBUG
/*
* Suppose that it were possible for a jemalloc-allocated chunk to be
* munmap()ped, followed by a different allocator in another thread re-using
* overlapping virtual memory, all without invalidating the cached rtree
* value. The result would be a false positive (the rtree would claim that
* jemalloc owns memory that it had actually discarded). This scenario
* seems impossible, but the following assertion is a prudent sanity check.
*/
# define RTREE_GET_VALIDATE \
assert(rtree_get_locked(rtree, key) == ret);
#else
# define RTREE_GET_VALIDATE
#endif
RTREE_GET_GENERATE(rtree_get)
#undef RTREE_LOCK
#undef RTREE_UNLOCK
#undef RTREE_GET_VALIDATE
JEMALLOC_INLINE bool
rtree_set(rtree_t *rtree, uintptr_t key, void *val)
{
uintptr_t subkey;
unsigned i, lshift, height, bits;
void **node, **child;
malloc_mutex_lock(&rtree->mutex);
for (i = lshift = 0, height = rtree->height, node = rtree->root;
i < height - 1;
i++, lshift += bits, node = child) {
bits = rtree->level2bits[i];
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -
bits);
child = (void**)node[subkey];
if (child == NULL) {
child = (void**)base_alloc(sizeof(void *) <<
rtree->level2bits[i+1]);
if (child == NULL) {
malloc_mutex_unlock(&rtree->mutex);
return (true);
}
memset(child, 0, sizeof(void *) <<
rtree->level2bits[i+1]);
node[subkey] = child;
}
}
/* node is a leaf, so it contains values rather than node pointers. */
bits = rtree->level2bits[i];
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - bits);
node[subkey] = val;
malloc_mutex_unlock(&rtree->mutex);
return (false);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#define UMAX2S_BUFSIZE 65
#ifdef JEMALLOC_STATS
typedef struct tcache_bin_stats_s tcache_bin_stats_t;
typedef struct malloc_bin_stats_s malloc_bin_stats_t;
typedef struct malloc_large_stats_s malloc_large_stats_t;
typedef struct arena_stats_s arena_stats_t;
#endif
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
typedef struct chunk_stats_s chunk_stats_t;
#endif
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#ifdef JEMALLOC_STATS
#ifdef JEMALLOC_TCACHE
struct tcache_bin_stats_s {
/*
* Number of allocation requests that corresponded to the size of this
* bin.
*/
uint64_t nrequests;
};
#endif
struct malloc_bin_stats_s {
/*
* Current number of bytes allocated, including objects currently
* cached by tcache.
*/
size_t allocated;
/*
* Total number of allocation/deallocation requests served directly by
* the bin. Note that tcache may allocate an object, then recycle it
* many times, resulting many increments to nrequests, but only one
* each to nmalloc and ndalloc.
*/
uint64_t nmalloc;
uint64_t ndalloc;
/*
* Number of allocation requests that correspond to the size of this
* bin. This includes requests served by tcache, though tcache only
* periodically merges into this counter.
*/
uint64_t nrequests;
#ifdef JEMALLOC_TCACHE
/* Number of tcache fills from this bin. */
uint64_t nfills;
/* Number of tcache flushes to this bin. */
uint64_t nflushes;
#endif
/* Total number of runs created for this bin's size class. */
uint64_t nruns;
/*
* Total number of runs reused by extracting them from the runs tree for
* this bin's size class.
*/
uint64_t reruns;
/* High-water mark for this bin. */
size_t highruns;
/* Current number of runs in this bin. */
size_t curruns;
};
struct malloc_large_stats_s {
/*
* Total number of allocation/deallocation requests served directly by
* the arena. Note that tcache may allocate an object, then recycle it
* many times, resulting many increments to nrequests, but only one
* each to nmalloc and ndalloc.
*/
uint64_t nmalloc;
uint64_t ndalloc;
/*
* Number of allocation requests that correspond to this size class.
* This includes requests served by tcache, though tcache only
* periodically merges into this counter.
*/
uint64_t nrequests;
/* High-water mark for this size class. */
size_t highruns;
/* Current number of runs of this size class. */
size_t curruns;
};
struct arena_stats_s {
/* Number of bytes currently mapped. */
size_t mapped;
/*
* Total number of purge sweeps, total number of madvise calls made,
* and total pages purged in order to keep dirty unused memory under
* control.
*/
uint64_t npurge;
uint64_t nmadvise;
uint64_t purged;
/* Per-size-category statistics. */
size_t allocated_large;
uint64_t nmalloc_large;
uint64_t ndalloc_large;
uint64_t nrequests_large;
/*
* One element for each possible size class, including sizes that
* overlap with bin size classes. This is necessary because ipalloc()
* sometimes has to use such large objects in order to assure proper
* alignment.
*/
malloc_large_stats_t *lstats;
};
#endif /* JEMALLOC_STATS */
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
struct chunk_stats_s {
# ifdef JEMALLOC_STATS
/* Number of chunks that were allocated. */
uint64_t nchunks;
# endif
/* High-water mark for number of chunks allocated. */
size_t highchunks;
/*
* Current number of chunks allocated. This value isn't maintained for
* any other purpose, so keep track of it in order to be able to set
* highchunks.
*/
size_t curchunks;
};
#endif /* JEMALLOC_STATS */
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern bool opt_stats_print;
#ifdef JEMALLOC_STATS
extern size_t stats_cactive;
#endif
char *u2s(uint64_t x, unsigned base, char *s);
#ifdef JEMALLOC_STATS
void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque,
const char *format, ...) JEMALLOC_ATTR(format(printf, 3, 4));
void malloc_printf(const char *format, ...)
JEMALLOC_ATTR(format(printf, 1, 2));
#endif
void stats_print(void (*write)(void *, const char *), void *cbopaque,
const char *opts);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifdef JEMALLOC_STATS
#ifndef JEMALLOC_ENABLE_INLINE
size_t stats_cactive_get(void);
void stats_cactive_add(size_t size);
void stats_cactive_sub(size_t size);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_))
JEMALLOC_INLINE size_t
stats_cactive_get(void)
{
return (atomic_read_z(&stats_cactive));
}
JEMALLOC_INLINE void
stats_cactive_add(size_t size)
{
atomic_add_z(&stats_cactive, size);
}
JEMALLOC_INLINE void
stats_cactive_sub(size_t size)
{
atomic_sub_z(&stats_cactive, size);
}
#endif
#endif /* JEMALLOC_STATS */
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
#ifdef JEMALLOC_TCACHE
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct tcache_bin_info_s tcache_bin_info_t;
typedef struct tcache_bin_s tcache_bin_t;
typedef struct tcache_s tcache_t;
/*
* Absolute maximum number of cache slots for each small bin in the thread
* cache. This is an additional constraint beyond that imposed as: twice the
* number of regions per run for this size class.
*
* This constant must be an even number.
*/
#define TCACHE_NSLOTS_SMALL_MAX 200
/* Number of cache slots for large size classes. */
#define TCACHE_NSLOTS_LARGE 20
/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
#define LG_TCACHE_MAXCLASS_DEFAULT 15
/*
* (1U << opt_lg_tcache_gc_sweep) is the approximate number of allocation
* events between full GC sweeps (-1: disabled). Integer rounding may cause
* the actual number to be slightly higher, since GC is performed
* incrementally.
*/
#define LG_TCACHE_GC_SWEEP_DEFAULT 13
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
/*
* Read-only information associated with each element of tcache_t's tbins array
* is stored separately, mainly to reduce memory usage.
*/
struct tcache_bin_info_s {
unsigned ncached_max; /* Upper limit on ncached. */
};
struct tcache_bin_s {
# ifdef JEMALLOC_STATS
tcache_bin_stats_t tstats;
# endif
int low_water; /* Min # cached since last GC. */
unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */
unsigned ncached; /* # of cached objects. */
void **avail; /* Stack of available objects. */
};
struct tcache_s {
# ifdef JEMALLOC_STATS
ql_elm(tcache_t) link; /* Used for aggregating stats. */
# endif
# ifdef JEMALLOC_PROF
uint64_t prof_accumbytes;/* Cleared after arena_prof_accum() */
# endif
arena_t *arena; /* This thread's arena. */
unsigned ev_cnt; /* Event count since incremental GC. */
unsigned next_gc_bin; /* Next bin to GC. */
tcache_bin_t tbins[1]; /* Dynamically sized. */
/*
* The pointer stacks associated with tbins follow as a contiguous
* array. During tcache initialization, the avail pointer in each
* element of tbins is initialized to point to the proper offset within
* this array.
*/
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern bool opt_tcache;
extern ssize_t opt_lg_tcache_max;
extern ssize_t opt_lg_tcache_gc_sweep;
extern tcache_bin_info_t *tcache_bin_info;
/* Map of thread-specific caches. */
#ifndef NO_TLS
extern __thread tcache_t *tcache_tls
JEMALLOC_ATTR(tls_model("initial-exec"));
# define TCACHE_GET() tcache_tls
# define TCACHE_SET(v) do { \
tcache_tls = (tcache_t *)(v); \
pthread_setspecific(tcache_tsd, (void *)(v)); \
} while (0)
#else
# define TCACHE_GET() ((tcache_t *)pthread_getspecific(tcache_tsd))
# define TCACHE_SET(v) do { \
pthread_setspecific(tcache_tsd, (void *)(v)); \
} while (0)
#endif
extern pthread_key_t tcache_tsd;
/*
* Number of tcache bins. There are nbins small-object bins, plus 0 or more
* large-object bins.
*/
extern size_t nhbins;
/* Maximum cached size class. */
extern size_t tcache_maxclass;
/* Number of tcache allocation/deallocation events between incremental GCs. */
extern unsigned tcache_gc_incr;
void tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
, tcache_t *tcache
#endif
);
void tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
, tcache_t *tcache
#endif
);
tcache_t *tcache_create(arena_t *arena);
void *tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin,
size_t binind);
void tcache_destroy(tcache_t *tcache);
#ifdef JEMALLOC_STATS
void tcache_stats_merge(tcache_t *tcache, arena_t *arena);
#endif
bool tcache_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
void tcache_event(tcache_t *tcache);
tcache_t *tcache_get(void);
void *tcache_alloc_easy(tcache_bin_t *tbin);
void *tcache_alloc_small(tcache_t *tcache, size_t size, bool zero);
void *tcache_alloc_large(tcache_t *tcache, size_t size, bool zero);
void tcache_dalloc_small(tcache_t *tcache, void *ptr);
void tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
JEMALLOC_INLINE tcache_t *
tcache_get(void)
{
tcache_t *tcache;
if ((isthreaded & opt_tcache) == false)
return (NULL);
tcache = TCACHE_GET();
if ((uintptr_t)tcache <= (uintptr_t)2) {
if (tcache == NULL) {
tcache = tcache_create(choose_arena());
if (tcache == NULL)
return (NULL);
} else {
if (tcache == (void *)(uintptr_t)1) {
/*
* Make a note that an allocator function was
* called after the tcache_thread_cleanup() was
* called.
*/
TCACHE_SET((uintptr_t)2);
}
return (NULL);
}
}
return (tcache);
}
JEMALLOC_INLINE void
tcache_event(tcache_t *tcache)
{
if (tcache_gc_incr == 0)
return;
tcache->ev_cnt++;
assert(tcache->ev_cnt <= tcache_gc_incr);
if (tcache->ev_cnt == tcache_gc_incr) {
size_t binind = tcache->next_gc_bin;
tcache_bin_t *tbin = &tcache->tbins[binind];
tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
if (tbin->low_water > 0) {
/*
* Flush (ceiling) 3/4 of the objects below the low
* water mark.
*/
if (binind < nbins) {
tcache_bin_flush_small(tbin, binind,
tbin->ncached - tbin->low_water +
(tbin->low_water >> 2)
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
, tcache
#endif
);
} else {
tcache_bin_flush_large(tbin, binind,
tbin->ncached - tbin->low_water +
(tbin->low_water >> 2)
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
, tcache
#endif
);
}
/*
* Reduce fill count by 2X. Limit lg_fill_div such that
* the fill count is always at least 1.
*/
if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1))
>= 1)
tbin->lg_fill_div++;
} else if (tbin->low_water < 0) {
/*
* Increase fill count by 2X. Make sure lg_fill_div
* stays greater than 0.
*/
if (tbin->lg_fill_div > 1)
tbin->lg_fill_div--;
}
tbin->low_water = tbin->ncached;
tcache->next_gc_bin++;
if (tcache->next_gc_bin == nhbins)
tcache->next_gc_bin = 0;
tcache->ev_cnt = 0;
}
}
JEMALLOC_INLINE void *
tcache_alloc_easy(tcache_bin_t *tbin)
{
void *ret;
if (tbin->ncached == 0) {
tbin->low_water = -1;
return (NULL);
}
tbin->ncached--;
if ((int)tbin->ncached < tbin->low_water)
tbin->low_water = tbin->ncached;
ret = tbin->avail[tbin->ncached];
return (ret);
}
JEMALLOC_INLINE void *
tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
{
void *ret;
size_t binind;
tcache_bin_t *tbin;
binind = SMALL_SIZE2BIN(size);
assert(binind < nbins);
tbin = &tcache->tbins[binind];
ret = tcache_alloc_easy(tbin);
if (ret == NULL) {
ret = tcache_alloc_small_hard(tcache, tbin, binind);
if (ret == NULL)
return (NULL);
}
assert(arena_salloc(ret) == arena_bin_info[binind].reg_size);
if (zero == false) {
#ifdef JEMALLOC_FILL
if (opt_junk)
memset(ret, 0xa5, size);
else if (opt_zero)
memset(ret, 0, size);
#endif
} else
memset(ret, 0, size);
#ifdef JEMALLOC_STATS
tbin->tstats.nrequests++;
#endif
#ifdef JEMALLOC_PROF
tcache->prof_accumbytes += arena_bin_info[binind].reg_size;
#endif
tcache_event(tcache);
return (ret);
}
JEMALLOC_INLINE void *
tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
{
void *ret;
size_t binind;
tcache_bin_t *tbin;
size = PAGE_CEILING(size);
assert(size <= tcache_maxclass);
binind = nbins + (size >> PAGE_SHIFT) - 1;
assert(binind < nhbins);
tbin = &tcache->tbins[binind];
ret = tcache_alloc_easy(tbin);
if (ret == NULL) {
/*
* Only allocate one large object at a time, because it's quite
* expensive to create one and not use it.
*/
ret = arena_malloc_large(tcache->arena, size, zero);
if (ret == NULL)
return (NULL);
} else {
#ifdef JEMALLOC_PROF
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
PAGE_SHIFT);
chunk->map[pageind-map_bias].bits &= ~CHUNK_MAP_CLASS_MASK;
#endif
if (zero == false) {
#ifdef JEMALLOC_FILL
if (opt_junk)
memset(ret, 0xa5, size);
else if (opt_zero)
memset(ret, 0, size);
#endif
} else
memset(ret, 0, size);
#ifdef JEMALLOC_STATS
tbin->tstats.nrequests++;
#endif
#ifdef JEMALLOC_PROF
tcache->prof_accumbytes += size;
#endif
}
tcache_event(tcache);
return (ret);
}
JEMALLOC_INLINE void
tcache_dalloc_small(tcache_t *tcache, void *ptr)
{
arena_t *arena;
arena_chunk_t *chunk;
arena_run_t *run;
arena_bin_t *bin;
tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info;
size_t pageind, binind;
arena_chunk_map_t *mapelm;
assert(arena_salloc(ptr) <= small_maxclass);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
arena = chunk->arena;
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
mapelm = &chunk->map[pageind-map_bias];
run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
(mapelm->bits >> PAGE_SHIFT)) << PAGE_SHIFT));
dassert(run->magic == ARENA_RUN_MAGIC);
bin = run->bin;
binind = ((uintptr_t)bin - (uintptr_t)&arena->bins) /
sizeof(arena_bin_t);
assert(binind < nbins);
#ifdef JEMALLOC_FILL
if (opt_junk)
memset(ptr, 0x5a, arena_bin_info[binind].reg_size);
#endif
tbin = &tcache->tbins[binind];
tbin_info = &tcache_bin_info[binind];
if (tbin->ncached == tbin_info->ncached_max) {
tcache_bin_flush_small(tbin, binind, (tbin_info->ncached_max >>
1)
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
, tcache
#endif
);
}
assert(tbin->ncached < tbin_info->ncached_max);
tbin->avail[tbin->ncached] = ptr;
tbin->ncached++;
tcache_event(tcache);
}
JEMALLOC_INLINE void
tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
{
arena_t *arena;
arena_chunk_t *chunk;
size_t pageind, binind;
tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info;
assert((size & PAGE_MASK) == 0);
assert(arena_salloc(ptr) > small_maxclass);
assert(arena_salloc(ptr) <= tcache_maxclass);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
arena = chunk->arena;
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
binind = nbins + (size >> PAGE_SHIFT) - 1;
#ifdef JEMALLOC_FILL
if (opt_junk)
memset(ptr, 0x5a, size);
#endif
tbin = &tcache->tbins[binind];
tbin_info = &tcache_bin_info[binind];
if (tbin->ncached == tbin_info->ncached_max) {
tcache_bin_flush_large(tbin, binind, (tbin_info->ncached_max >>
1)
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
, tcache
#endif
);
}
assert(tbin->ncached < tbin_info->ncached_max);
tbin->avail[tbin->ncached] = ptr;
tbin->ncached++;
tcache_event(tcache);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
#endif /* JEMALLOC_TCACHE */
#ifndef JEMALLOC_ZONE
# error "This source file is for zones on Darwin (OS X)."
#endif
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
malloc_zone_t *create_zone(void);
void szone2ozone(malloc_zone_t *zone);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
#ifndef JEMALLOC_H_
#define JEMALLOC_H_
#ifdef __cplusplus
extern "C" {
#endif
#include <limits.h>
#include <strings.h>
#define JEMALLOC_VERSION "@jemalloc_version@"
#define JEMALLOC_VERSION_MAJOR @jemalloc_version_major@
#define JEMALLOC_VERSION_MINOR @jemalloc_version_minor@
#define JEMALLOC_VERSION_BUGFIX @jemalloc_version_bugfix@
#define JEMALLOC_VERSION_NREV @jemalloc_version_nrev@
#define JEMALLOC_VERSION_GID "@jemalloc_version_gid@"
#include "jemalloc_defs@install_suffix@.h"
#ifndef JEMALLOC_P
# define JEMALLOC_P(s) s
#endif
#define ALLOCM_LG_ALIGN(la) (la)
#if LG_SIZEOF_PTR == 2
#define ALLOCM_ALIGN(a) (ffs(a)-1)
#else
#define ALLOCM_ALIGN(a) ((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
#endif
#define ALLOCM_ZERO ((int)0x40)
#define ALLOCM_NO_MOVE ((int)0x80)
#define ALLOCM_SUCCESS 0
#define ALLOCM_ERR_OOM 1
#define ALLOCM_ERR_NOT_MOVED 2
extern const char *JEMALLOC_P(malloc_conf);
extern void (*JEMALLOC_P(malloc_message))(void *, const char *);
void *JEMALLOC_P(malloc)(size_t size) JEMALLOC_ATTR(malloc);
void *JEMALLOC_P(calloc)(size_t num, size_t size) JEMALLOC_ATTR(malloc);
int JEMALLOC_P(posix_memalign)(void **memptr, size_t alignment, size_t size)
JEMALLOC_ATTR(nonnull(1));
void *JEMALLOC_P(realloc)(void *ptr, size_t size);
void JEMALLOC_P(free)(void *ptr);
size_t JEMALLOC_P(malloc_usable_size)(const void *ptr);
void JEMALLOC_P(malloc_stats_print)(void (*write_cb)(void *, const char *),
void *cbopaque, const char *opts);
int JEMALLOC_P(mallctl)(const char *name, void *oldp, size_t *oldlenp,
void *newp, size_t newlen);
int JEMALLOC_P(mallctlnametomib)(const char *name, size_t *mibp,
size_t *miblenp);
int JEMALLOC_P(mallctlbymib)(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen);
int JEMALLOC_P(allocm)(void **ptr, size_t *rsize, size_t size, int flags)
JEMALLOC_ATTR(nonnull(1));
int JEMALLOC_P(rallocm)(void **ptr, size_t *rsize, size_t size,
size_t extra, int flags) JEMALLOC_ATTR(nonnull(1));
int JEMALLOC_P(sallocm)(const void *ptr, size_t *rsize, int flags)
JEMALLOC_ATTR(nonnull(1));
int JEMALLOC_P(dallocm)(void *ptr, int flags) JEMALLOC_ATTR(nonnull(1));
#ifdef __cplusplus
};
#endif
#endif /* JEMALLOC_H_ */
#ifndef JEMALLOC_DEFS_H_
#define JEMALLOC_DEFS_H_
/*
* If JEMALLOC_PREFIX is defined, it will cause all public APIs to be prefixed.
* This makes it possible, with some care, to use multiple allocators
* simultaneously.
*
* In many cases it is more convenient to manually prefix allocator function
* calls than to let macros do it automatically, particularly when using
* multiple allocators simultaneously. Define JEMALLOC_MANGLE before
* #include'ing jemalloc.h in order to cause name mangling that corresponds to
* the API prefixing.
*/
#undef JEMALLOC_PREFIX
#undef JEMALLOC_CPREFIX
#if (defined(JEMALLOC_PREFIX) && defined(JEMALLOC_MANGLE))
#undef JEMALLOC_P
#endif
/*
* Hyper-threaded CPUs may need a special instruction inside spin loops in
* order to yield to another virtual CPU.
*/
#undef CPU_SPINWAIT
/*
* Defined if OSAtomic*() functions are available, as provided by Darwin, and
* documented in the atomic(3) manual page.
*/
#undef JEMALLOC_OSATOMIC
/*
* Defined if OSSpin*() functions are available, as provided by Darwin, and
* documented in the spinlock(3) manual page.
*/
#undef JEMALLOC_OSSPIN
/* Defined if __attribute__((...)) syntax is supported. */
#undef JEMALLOC_HAVE_ATTR
#ifdef JEMALLOC_HAVE_ATTR
# define JEMALLOC_ATTR(s) __attribute__((s))
#else
# define JEMALLOC_ATTR(s)
#endif
/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */
#undef JEMALLOC_CC_SILENCE
/*
* JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
* inline functions.
*/
#undef JEMALLOC_DEBUG
/* JEMALLOC_STATS enables statistics calculation. */
#undef JEMALLOC_STATS
/* JEMALLOC_PROF enables allocation profiling. */
#undef JEMALLOC_PROF
/* Use libunwind for profile backtracing if defined. */
#undef JEMALLOC_PROF_LIBUNWIND
/* Use libgcc for profile backtracing if defined. */
#undef JEMALLOC_PROF_LIBGCC
/* Use gcc intrinsics for profile backtracing if defined. */
#undef JEMALLOC_PROF_GCC
/*
* JEMALLOC_TINY enables support for tiny objects, which are smaller than one
* quantum.
*/
#undef JEMALLOC_TINY
/*
* JEMALLOC_TCACHE enables a thread-specific caching layer for small objects.
* This makes it possible to allocate/deallocate objects without any locking
* when the cache is in the steady state.
*/
#undef JEMALLOC_TCACHE
/*
* JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage
* segment (DSS).
*/
#undef JEMALLOC_DSS
/* JEMALLOC_SWAP enables mmap()ed swap file support. */
#undef JEMALLOC_SWAP
/* Support memory filling (junk/zero). */
#undef JEMALLOC_FILL
/* Support optional abort() on OOM. */
#undef JEMALLOC_XMALLOC
/* Support SYSV semantics. */
#undef JEMALLOC_SYSV
/* Support lazy locking (avoid locking unless a second thread is launched). */
#undef JEMALLOC_LAZY_LOCK
/* Determine page size at run time if defined. */
#undef DYNAMIC_PAGE_SHIFT
/* One page is 2^STATIC_PAGE_SHIFT bytes. */
#undef STATIC_PAGE_SHIFT
/* TLS is used to map arenas and magazine caches to threads. */
#undef NO_TLS
/*
* JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside
* within jemalloc-owned chunks before dereferencing them.
*/
#undef JEMALLOC_IVSALLOC
/*
* Define overrides for non-standard allocator-related functions if they
* are present on the system.
*/
#undef JEMALLOC_OVERRIDE_MEMALIGN
#undef JEMALLOC_OVERRIDE_VALLOC
/*
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
*/
#undef JEMALLOC_ZONE
#undef JEMALLOC_ZONE_VERSION
/* If defined, use mremap(...MREMAP_FIXED...) for huge realloc(). */
#undef JEMALLOC_MREMAP_FIXED
/*
* Methods for purging unused pages differ between operating systems.
*
* madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages,
* such that new pages will be demand-zeroed if
* the address region is later touched.
* madvise(..., MADV_FREE) : On FreeBSD and Darwin, this marks pages as being
* unused, such that they will be discarded rather
* than swapped out.
*/
#undef JEMALLOC_PURGE_MADVISE_DONTNEED
#undef JEMALLOC_PURGE_MADVISE_FREE
/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
#undef LG_SIZEOF_PTR
/* sizeof(int) == 2^LG_SIZEOF_INT. */
#undef LG_SIZEOF_INT
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
#undef LG_SIZEOF_LONG
#endif /* JEMALLOC_DEFS_H_ */
#! /bin/sh
#
# install - install a program, script, or datafile
# This comes from X11R5 (mit/util/scripts/install.sh).
#
# Copyright 1991 by the Massachusetts Institute of Technology
#
# Permission to use, copy, modify, distribute, and sell this software and its
# documentation for any purpose is hereby granted without fee, provided that
# the above copyright notice appear in all copies and that both that
# copyright notice and this permission notice appear in supporting
# documentation, and that the name of M.I.T. not be used in advertising or
# publicity pertaining to distribution of the software without specific,
# written prior permission. M.I.T. makes no representations about the
# suitability of this software for any purpose. It is provided "as is"
# without express or implied warranty.
#
# Calling this script install-sh is preferred over install.sh, to prevent
# `make' implicit rules from creating a file called install from it
# when there is no Makefile.
#
# This script is compatible with the BSD install script, but was written
# from scratch. It can only install one file at a time, a restriction
# shared with many OS's install programs.
# set DOITPROG to echo to test this script
# Don't use :- since 4.3BSD and earlier shells don't like it.
doit="${DOITPROG-}"
# put in absolute paths if you don't have them in your path; or use env. vars.
mvprog="${MVPROG-mv}"
cpprog="${CPPROG-cp}"
chmodprog="${CHMODPROG-chmod}"
chownprog="${CHOWNPROG-chown}"
chgrpprog="${CHGRPPROG-chgrp}"
stripprog="${STRIPPROG-strip}"
rmprog="${RMPROG-rm}"
mkdirprog="${MKDIRPROG-mkdir}"
transformbasename=""
transform_arg=""
instcmd="$mvprog"
chmodcmd="$chmodprog 0755"
chowncmd=""
chgrpcmd=""
stripcmd=""
rmcmd="$rmprog -f"
mvcmd="$mvprog"
src=""
dst=""
dir_arg=""
while [ x"$1" != x ]; do
case $1 in
-c) instcmd="$cpprog"
shift
continue;;
-d) dir_arg=true
shift
continue;;
-m) chmodcmd="$chmodprog $2"
shift
shift
continue;;
-o) chowncmd="$chownprog $2"
shift
shift
continue;;
-g) chgrpcmd="$chgrpprog $2"
shift
shift
continue;;
-s) stripcmd="$stripprog"
shift
continue;;
-t=*) transformarg=`echo $1 | sed 's/-t=//'`
shift
continue;;
-b=*) transformbasename=`echo $1 | sed 's/-b=//'`
shift
continue;;
*) if [ x"$src" = x ]
then
src=$1
else
# this colon is to work around a 386BSD /bin/sh bug
:
dst=$1
fi
shift
continue;;
esac
done
if [ x"$src" = x ]
then
echo "install: no input file specified"
exit 1
else
true
fi
if [ x"$dir_arg" != x ]; then
dst=$src
src=""
if [ -d $dst ]; then
instcmd=:
else
instcmd=mkdir
fi
else
# Waiting for this to be detected by the "$instcmd $src $dsttmp" command
# might cause directories to be created, which would be especially bad
# if $src (and thus $dsttmp) contains '*'.
if [ -f $src -o -d $src ]
then
true
else
echo "install: $src does not exist"
exit 1
fi
if [ x"$dst" = x ]
then
echo "install: no destination specified"
exit 1
else
true
fi
# If destination is a directory, append the input filename; if your system
# does not like double slashes in filenames, you may need to add some logic
if [ -d $dst ]
then
dst="$dst"/`basename $src`
else
true
fi
fi
## this sed command emulates the dirname command
dstdir=`echo $dst | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'`
# Make sure that the destination directory exists.
# this part is taken from Noah Friedman's mkinstalldirs script
# Skip lots of stat calls in the usual case.
if [ ! -d "$dstdir" ]; then
defaultIFS='
'
IFS="${IFS-${defaultIFS}}"
oIFS="${IFS}"
# Some sh's can't handle IFS=/ for some reason.
IFS='%'
set - `echo ${dstdir} | sed -e 's@/@%@g' -e 's@^%@/@'`
IFS="${oIFS}"
pathcomp=''
while [ $# -ne 0 ] ; do
pathcomp="${pathcomp}${1}"
shift
if [ ! -d "${pathcomp}" ] ;
then
$mkdirprog "${pathcomp}"
else
true
fi
pathcomp="${pathcomp}/"
done
fi
if [ x"$dir_arg" != x ]
then
$doit $instcmd $dst &&
if [ x"$chowncmd" != x ]; then $doit $chowncmd $dst; else true ; fi &&
if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dst; else true ; fi &&
if [ x"$stripcmd" != x ]; then $doit $stripcmd $dst; else true ; fi &&
if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dst; else true ; fi
else
# If we're going to rename the final executable, determine the name now.
if [ x"$transformarg" = x ]
then
dstfile=`basename $dst`
else
dstfile=`basename $dst $transformbasename |
sed $transformarg`$transformbasename
fi
# don't allow the sed command to completely eliminate the filename
if [ x"$dstfile" = x ]
then
dstfile=`basename $dst`
else
true
fi
# Make a temp file name in the proper directory.
dsttmp=$dstdir/#inst.$$#
# Move or copy the file name to the temp name
$doit $instcmd $src $dsttmp &&
trap "rm -f ${dsttmp}" 0 &&
# and set any options; do chmod last to preserve setuid bits
# If any of these fail, we abort the whole thing. If we want to
# ignore errors from any of these, just make sure not to ignore
# errors from the above "$doit $instcmd $src $dsttmp" command.
if [ x"$chowncmd" != x ]; then $doit $chowncmd $dsttmp; else true;fi &&
if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dsttmp; else true;fi &&
if [ x"$stripcmd" != x ]; then $doit $stripcmd $dsttmp; else true;fi &&
if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dsttmp; else true;fi &&
# Now rename the file to the real destination.
$doit $rmcmd -f $dstdir/$dstfile &&
$doit $mvcmd $dsttmp $dstdir/$dstfile
fi &&
exit 0
#define JEMALLOC_ARENA_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Data. */
size_t opt_lg_qspace_max = LG_QSPACE_MAX_DEFAULT;
size_t opt_lg_cspace_max = LG_CSPACE_MAX_DEFAULT;
ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
uint8_t const *small_size2bin;
arena_bin_info_t *arena_bin_info;
/* Various bin-related settings. */
unsigned nqbins;
unsigned ncbins;
unsigned nsbins;
unsigned nbins;
size_t qspace_max;
size_t cspace_min;
size_t cspace_max;
size_t sspace_min;
size_t sspace_max;
size_t lg_mspace;
size_t mspace_mask;
/*
* const_small_size2bin is a static constant lookup table that in the common
* case can be used as-is for small_size2bin.
*/
#if (LG_TINY_MIN == 2)
#define S2B_4(i) i,
#define S2B_8(i) S2B_4(i) S2B_4(i)
#elif (LG_TINY_MIN == 3)
#define S2B_8(i) i,
#else
# error "Unsupported LG_TINY_MIN"
#endif
#define S2B_16(i) S2B_8(i) S2B_8(i)
#define S2B_32(i) S2B_16(i) S2B_16(i)
#define S2B_64(i) S2B_32(i) S2B_32(i)
#define S2B_128(i) S2B_64(i) S2B_64(i)
#define S2B_256(i) S2B_128(i) S2B_128(i)
/*
* The number of elements in const_small_size2bin is dependent on the
* definition for SUBPAGE.
*/
static JEMALLOC_ATTR(aligned(CACHELINE))
const uint8_t const_small_size2bin[] = {
#if (LG_QUANTUM == 4)
/* 16-byte quantum **********************/
# ifdef JEMALLOC_TINY
# if (LG_TINY_MIN == 2)
S2B_4(0) /* 4 */
S2B_4(1) /* 8 */
S2B_8(2) /* 16 */
# define S2B_QMIN 2
# elif (LG_TINY_MIN == 3)
S2B_8(0) /* 8 */
S2B_8(1) /* 16 */
# define S2B_QMIN 1
# else
# error "Unsupported LG_TINY_MIN"
# endif
# else
S2B_16(0) /* 16 */
# define S2B_QMIN 0
# endif
S2B_16(S2B_QMIN + 1) /* 32 */
S2B_16(S2B_QMIN + 2) /* 48 */
S2B_16(S2B_QMIN + 3) /* 64 */
S2B_16(S2B_QMIN + 4) /* 80 */
S2B_16(S2B_QMIN + 5) /* 96 */
S2B_16(S2B_QMIN + 6) /* 112 */
S2B_16(S2B_QMIN + 7) /* 128 */
# define S2B_CMIN (S2B_QMIN + 8)
#else
/* 8-byte quantum ***********************/
# ifdef JEMALLOC_TINY
# if (LG_TINY_MIN == 2)
S2B_4(0) /* 4 */
S2B_4(1) /* 8 */
# define S2B_QMIN 1
# else
# error "Unsupported LG_TINY_MIN"
# endif
# else
S2B_8(0) /* 8 */
# define S2B_QMIN 0
# endif
S2B_8(S2B_QMIN + 1) /* 16 */
S2B_8(S2B_QMIN + 2) /* 24 */
S2B_8(S2B_QMIN + 3) /* 32 */
S2B_8(S2B_QMIN + 4) /* 40 */
S2B_8(S2B_QMIN + 5) /* 48 */
S2B_8(S2B_QMIN + 6) /* 56 */
S2B_8(S2B_QMIN + 7) /* 64 */
S2B_8(S2B_QMIN + 8) /* 72 */
S2B_8(S2B_QMIN + 9) /* 80 */
S2B_8(S2B_QMIN + 10) /* 88 */
S2B_8(S2B_QMIN + 11) /* 96 */
S2B_8(S2B_QMIN + 12) /* 104 */
S2B_8(S2B_QMIN + 13) /* 112 */
S2B_8(S2B_QMIN + 14) /* 120 */
S2B_8(S2B_QMIN + 15) /* 128 */
# define S2B_CMIN (S2B_QMIN + 16)
#endif
/****************************************/
S2B_64(S2B_CMIN + 0) /* 192 */
S2B_64(S2B_CMIN + 1) /* 256 */
S2B_64(S2B_CMIN + 2) /* 320 */
S2B_64(S2B_CMIN + 3) /* 384 */
S2B_64(S2B_CMIN + 4) /* 448 */
S2B_64(S2B_CMIN + 5) /* 512 */
# define S2B_SMIN (S2B_CMIN + 6)
S2B_256(S2B_SMIN + 0) /* 768 */
S2B_256(S2B_SMIN + 1) /* 1024 */
S2B_256(S2B_SMIN + 2) /* 1280 */
S2B_256(S2B_SMIN + 3) /* 1536 */
S2B_256(S2B_SMIN + 4) /* 1792 */
S2B_256(S2B_SMIN + 5) /* 2048 */
S2B_256(S2B_SMIN + 6) /* 2304 */
S2B_256(S2B_SMIN + 7) /* 2560 */
S2B_256(S2B_SMIN + 8) /* 2816 */
S2B_256(S2B_SMIN + 9) /* 3072 */
S2B_256(S2B_SMIN + 10) /* 3328 */
S2B_256(S2B_SMIN + 11) /* 3584 */
S2B_256(S2B_SMIN + 12) /* 3840 */
#if (STATIC_PAGE_SHIFT == 13)
S2B_256(S2B_SMIN + 13) /* 4096 */
S2B_256(S2B_SMIN + 14) /* 4352 */
S2B_256(S2B_SMIN + 15) /* 4608 */
S2B_256(S2B_SMIN + 16) /* 4864 */
S2B_256(S2B_SMIN + 17) /* 5120 */
S2B_256(S2B_SMIN + 18) /* 5376 */
S2B_256(S2B_SMIN + 19) /* 5632 */
S2B_256(S2B_SMIN + 20) /* 5888 */
S2B_256(S2B_SMIN + 21) /* 6144 */
S2B_256(S2B_SMIN + 22) /* 6400 */
S2B_256(S2B_SMIN + 23) /* 6656 */
S2B_256(S2B_SMIN + 24) /* 6912 */
S2B_256(S2B_SMIN + 25) /* 7168 */
S2B_256(S2B_SMIN + 26) /* 7424 */
S2B_256(S2B_SMIN + 27) /* 7680 */
S2B_256(S2B_SMIN + 28) /* 7936 */
#endif
};
#undef S2B_1
#undef S2B_2
#undef S2B_4
#undef S2B_8
#undef S2B_16
#undef S2B_32
#undef S2B_64
#undef S2B_128
#undef S2B_256
#undef S2B_QMIN
#undef S2B_CMIN
#undef S2B_SMIN
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
bool large, bool zero);
static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool large,
bool zero);
static void arena_purge(arena_t *arena, bool all);
static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty);
static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk,
arena_run_t *run, size_t oldsize, size_t newsize);
static void arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk,
arena_run_t *run, size_t oldsize, size_t newsize, bool dirty);
static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin);
static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin);
static void arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
arena_bin_t *bin);
static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
arena_run_t *run, arena_bin_t *bin);
static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
arena_run_t *run, arena_bin_t *bin);
static void arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk,
void *ptr, size_t oldsize, size_t size);
static bool arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk,
void *ptr, size_t oldsize, size_t size, size_t extra, bool zero);
static bool arena_ralloc_large(void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero);
static bool small_size2bin_init(void);
#ifdef JEMALLOC_DEBUG
static void small_size2bin_validate(void);
#endif
static bool small_size2bin_init_hard(void);
static size_t bin_info_run_size_calc(arena_bin_info_t *bin_info,
size_t min_run_size);
static bool bin_info_init(void);
/******************************************************************************/
static inline int
arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
{
uintptr_t a_mapelm = (uintptr_t)a;
uintptr_t b_mapelm = (uintptr_t)b;
assert(a != NULL);
assert(b != NULL);
return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm));
}
/* Generate red-black tree functions. */
rb_gen(static JEMALLOC_ATTR(unused), arena_run_tree_, arena_run_tree_t,
arena_chunk_map_t, u.rb_link, arena_run_comp)
static inline int
arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
{
int ret;
size_t a_size = a->bits & ~PAGE_MASK;
size_t b_size = b->bits & ~PAGE_MASK;
assert((a->bits & CHUNK_MAP_KEY) == CHUNK_MAP_KEY || (a->bits &
CHUNK_MAP_DIRTY) == (b->bits & CHUNK_MAP_DIRTY));
ret = (a_size > b_size) - (a_size < b_size);
if (ret == 0) {
uintptr_t a_mapelm, b_mapelm;
if ((a->bits & CHUNK_MAP_KEY) != CHUNK_MAP_KEY)
a_mapelm = (uintptr_t)a;
else {
/*
* Treat keys as though they are lower than anything
* else.
*/
a_mapelm = 0;
}
b_mapelm = (uintptr_t)b;
ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm);
}
return (ret);
}
/* Generate red-black tree functions. */
rb_gen(static JEMALLOC_ATTR(unused), arena_avail_tree_, arena_avail_tree_t,
arena_chunk_map_t, u.rb_link, arena_avail_comp)
static inline void *
arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
{
void *ret;
unsigned regind;
bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
(uintptr_t)bin_info->bitmap_offset);
dassert(run->magic == ARENA_RUN_MAGIC);
assert(run->nfree > 0);
assert(bitmap_full(bitmap, &bin_info->bitmap_info) == false);
regind = bitmap_sfu(bitmap, &bin_info->bitmap_info);
ret = (void *)((uintptr_t)run + (uintptr_t)bin_info->reg0_offset +
(uintptr_t)(bin_info->reg_size * regind));
run->nfree--;
if (regind == run->nextind)
run->nextind++;
assert(regind < run->nextind);
return (ret);
}
static inline void
arena_run_reg_dalloc(arena_run_t *run, void *ptr)
{
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
size_t binind = arena_bin_index(chunk->arena, run->bin);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
unsigned regind = arena_run_regind(run, bin_info, ptr);
bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
(uintptr_t)bin_info->bitmap_offset);
assert(run->nfree < bin_info->nregs);
/* Freeing an interior pointer can cause assertion failure. */
assert(((uintptr_t)ptr - ((uintptr_t)run +
(uintptr_t)bin_info->reg0_offset)) % (uintptr_t)bin_info->reg_size
== 0);
assert((uintptr_t)ptr >= (uintptr_t)run +
(uintptr_t)bin_info->reg0_offset);
/* Freeing an unallocated pointer can cause assertion failure. */
assert(bitmap_get(bitmap, &bin_info->bitmap_info, regind));
bitmap_unset(bitmap, &bin_info->bitmap_info, regind);
run->nfree++;
}
#ifdef JEMALLOC_DEBUG
static inline void
arena_chunk_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
{
size_t i;
size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << PAGE_SHIFT));
for (i = 0; i < PAGE_SIZE / sizeof(size_t); i++)
assert(p[i] == 0);
}
#endif
static void
arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
bool zero)
{
arena_chunk_t *chunk;
size_t old_ndirty, run_ind, total_pages, need_pages, rem_pages, i;
size_t flag_dirty;
arena_avail_tree_t *runs_avail;
#ifdef JEMALLOC_STATS
size_t cactive_diff;
#endif
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
old_ndirty = chunk->ndirty;
run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk)
>> PAGE_SHIFT);
flag_dirty = chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY;
runs_avail = (flag_dirty != 0) ? &arena->runs_avail_dirty :
&arena->runs_avail_clean;
total_pages = (chunk->map[run_ind-map_bias].bits & ~PAGE_MASK) >>
PAGE_SHIFT;
assert((chunk->map[run_ind+total_pages-1-map_bias].bits &
CHUNK_MAP_DIRTY) == flag_dirty);
need_pages = (size >> PAGE_SHIFT);
assert(need_pages > 0);
assert(need_pages <= total_pages);
rem_pages = total_pages - need_pages;
arena_avail_tree_remove(runs_avail, &chunk->map[run_ind-map_bias]);
#ifdef JEMALLOC_STATS
/* Update stats_cactive if nactive is crossing a chunk multiple. */
cactive_diff = CHUNK_CEILING((arena->nactive + need_pages) <<
PAGE_SHIFT) - CHUNK_CEILING(arena->nactive << PAGE_SHIFT);
if (cactive_diff != 0)
stats_cactive_add(cactive_diff);
#endif
arena->nactive += need_pages;
/* Keep track of trailing unused pages for later use. */
if (rem_pages > 0) {
if (flag_dirty != 0) {
chunk->map[run_ind+need_pages-map_bias].bits =
(rem_pages << PAGE_SHIFT) | CHUNK_MAP_DIRTY;
chunk->map[run_ind+total_pages-1-map_bias].bits =
(rem_pages << PAGE_SHIFT) | CHUNK_MAP_DIRTY;
} else {
chunk->map[run_ind+need_pages-map_bias].bits =
(rem_pages << PAGE_SHIFT) |
(chunk->map[run_ind+need_pages-map_bias].bits &
CHUNK_MAP_UNZEROED);
chunk->map[run_ind+total_pages-1-map_bias].bits =
(rem_pages << PAGE_SHIFT) |
(chunk->map[run_ind+total_pages-1-map_bias].bits &
CHUNK_MAP_UNZEROED);
}
arena_avail_tree_insert(runs_avail,
&chunk->map[run_ind+need_pages-map_bias]);
}
/* Update dirty page accounting. */
if (flag_dirty != 0) {
chunk->ndirty -= need_pages;
arena->ndirty -= need_pages;
}
/*
* Update the page map separately for large vs. small runs, since it is
* possible to avoid iteration for large mallocs.
*/
if (large) {
if (zero) {
if (flag_dirty == 0) {
/*
* The run is clean, so some pages may be
* zeroed (i.e. never before touched).
*/
for (i = 0; i < need_pages; i++) {
if ((chunk->map[run_ind+i-map_bias].bits
& CHUNK_MAP_UNZEROED) != 0) {
memset((void *)((uintptr_t)
chunk + ((run_ind+i) <<
PAGE_SHIFT)), 0,
PAGE_SIZE);
}
#ifdef JEMALLOC_DEBUG
else {
arena_chunk_validate_zeroed(
chunk, run_ind+i);
}
#endif
}
} else {
/*
* The run is dirty, so all pages must be
* zeroed.
*/
memset((void *)((uintptr_t)chunk + (run_ind <<
PAGE_SHIFT)), 0, (need_pages <<
PAGE_SHIFT));
}
}
/*
* Set the last element first, in case the run only contains one
* page (i.e. both statements set the same element).
*/
chunk->map[run_ind+need_pages-1-map_bias].bits =
CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED | flag_dirty;
chunk->map[run_ind-map_bias].bits = size | flag_dirty |
CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
} else {
assert(zero == false);
/*
* Propagate the dirty and unzeroed flags to the allocated
* small run, so that arena_dalloc_bin_run() has the ability to
* conditionally trim clean pages.
*/
chunk->map[run_ind-map_bias].bits =
(chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED) |
CHUNK_MAP_ALLOCATED | flag_dirty;
#ifdef JEMALLOC_DEBUG
/*
* The first page will always be dirtied during small run
* initialization, so a validation failure here would not
* actually cause an observable failure.
*/
if (flag_dirty == 0 &&
(chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED)
== 0)
arena_chunk_validate_zeroed(chunk, run_ind);
#endif
for (i = 1; i < need_pages - 1; i++) {
chunk->map[run_ind+i-map_bias].bits = (i << PAGE_SHIFT)
| (chunk->map[run_ind+i-map_bias].bits &
CHUNK_MAP_UNZEROED) | CHUNK_MAP_ALLOCATED;
#ifdef JEMALLOC_DEBUG
if (flag_dirty == 0 &&
(chunk->map[run_ind+i-map_bias].bits &
CHUNK_MAP_UNZEROED) == 0)
arena_chunk_validate_zeroed(chunk, run_ind+i);
#endif
}
chunk->map[run_ind+need_pages-1-map_bias].bits = ((need_pages
- 1) << PAGE_SHIFT) |
(chunk->map[run_ind+need_pages-1-map_bias].bits &
CHUNK_MAP_UNZEROED) | CHUNK_MAP_ALLOCATED | flag_dirty;
#ifdef JEMALLOC_DEBUG
if (flag_dirty == 0 &&
(chunk->map[run_ind+need_pages-1-map_bias].bits &
CHUNK_MAP_UNZEROED) == 0) {
arena_chunk_validate_zeroed(chunk,
run_ind+need_pages-1);
}
#endif
}
}
static arena_chunk_t *
arena_chunk_alloc(arena_t *arena)
{
arena_chunk_t *chunk;
size_t i;
if (arena->spare != NULL) {
arena_avail_tree_t *runs_avail;
chunk = arena->spare;
arena->spare = NULL;
/* Insert the run into the appropriate runs_avail_* tree. */
if ((chunk->map[0].bits & CHUNK_MAP_DIRTY) == 0)
runs_avail = &arena->runs_avail_clean;
else
runs_avail = &arena->runs_avail_dirty;
assert((chunk->map[0].bits & ~PAGE_MASK) == arena_maxclass);
assert((chunk->map[chunk_npages-1-map_bias].bits & ~PAGE_MASK)
== arena_maxclass);
assert((chunk->map[0].bits & CHUNK_MAP_DIRTY) ==
(chunk->map[chunk_npages-1-map_bias].bits &
CHUNK_MAP_DIRTY));
arena_avail_tree_insert(runs_avail, &chunk->map[0]);
} else {
bool zero;
size_t unzeroed;
zero = false;
malloc_mutex_unlock(&arena->lock);
chunk = (arena_chunk_t *)chunk_alloc(chunksize, false, &zero);
malloc_mutex_lock(&arena->lock);
if (chunk == NULL)
return (NULL);
#ifdef JEMALLOC_STATS
arena->stats.mapped += chunksize;
#endif
chunk->arena = arena;
ql_elm_new(chunk, link_dirty);
chunk->dirtied = false;
/*
* Claim that no pages are in use, since the header is merely
* overhead.
*/
chunk->ndirty = 0;
/*
* Initialize the map to contain one maximal free untouched run.
* Mark the pages as zeroed iff chunk_alloc() returned a zeroed
* chunk.
*/
unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED;
chunk->map[0].bits = arena_maxclass | unzeroed;
/*
* There is no need to initialize the internal page map entries
* unless the chunk is not zeroed.
*/
if (zero == false) {
for (i = map_bias+1; i < chunk_npages-1; i++)
chunk->map[i-map_bias].bits = unzeroed;
}
#ifdef JEMALLOC_DEBUG
else {
for (i = map_bias+1; i < chunk_npages-1; i++)
assert(chunk->map[i-map_bias].bits == unzeroed);
}
#endif
chunk->map[chunk_npages-1-map_bias].bits = arena_maxclass |
unzeroed;
/* Insert the run into the runs_avail_clean tree. */
arena_avail_tree_insert(&arena->runs_avail_clean,
&chunk->map[0]);
}
return (chunk);
}
static void
arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
{
arena_avail_tree_t *runs_avail;
/*
* Remove run from the appropriate runs_avail_* tree, so that the arena
* does not use it.
*/
if ((chunk->map[0].bits & CHUNK_MAP_DIRTY) == 0)
runs_avail = &arena->runs_avail_clean;
else
runs_avail = &arena->runs_avail_dirty;
arena_avail_tree_remove(runs_avail, &chunk->map[0]);
if (arena->spare != NULL) {
arena_chunk_t *spare = arena->spare;
arena->spare = chunk;
if (spare->dirtied) {
ql_remove(&chunk->arena->chunks_dirty, spare,
link_dirty);
arena->ndirty -= spare->ndirty;
}
malloc_mutex_unlock(&arena->lock);
chunk_dealloc((void *)spare, chunksize);
malloc_mutex_lock(&arena->lock);
#ifdef JEMALLOC_STATS
arena->stats.mapped -= chunksize;
#endif
} else
arena->spare = chunk;
}
static arena_run_t *
arena_run_alloc(arena_t *arena, size_t size, bool large, bool zero)
{
arena_chunk_t *chunk;
arena_run_t *run;
arena_chunk_map_t *mapelm, key;
assert(size <= arena_maxclass);
assert((size & PAGE_MASK) == 0);
/* Search the arena's chunks for the lowest best fit. */
key.bits = size | CHUNK_MAP_KEY;
mapelm = arena_avail_tree_nsearch(&arena->runs_avail_dirty, &key);
if (mapelm != NULL) {
arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
size_t pageind = (((uintptr_t)mapelm -
(uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
+ map_bias;
run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
PAGE_SHIFT));
arena_run_split(arena, run, size, large, zero);
return (run);
}
mapelm = arena_avail_tree_nsearch(&arena->runs_avail_clean, &key);
if (mapelm != NULL) {
arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
size_t pageind = (((uintptr_t)mapelm -
(uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
+ map_bias;
run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
PAGE_SHIFT));
arena_run_split(arena, run, size, large, zero);
return (run);
}
/*
* No usable runs. Create a new chunk from which to allocate the run.
*/
chunk = arena_chunk_alloc(arena);
if (chunk != NULL) {
run = (arena_run_t *)((uintptr_t)chunk + (map_bias <<
PAGE_SHIFT));
arena_run_split(arena, run, size, large, zero);
return (run);
}
/*
* arena_chunk_alloc() failed, but another thread may have made
* sufficient memory available while this one dropped arena->lock in
* arena_chunk_alloc(), so search one more time.
*/
mapelm = arena_avail_tree_nsearch(&arena->runs_avail_dirty, &key);
if (mapelm != NULL) {
arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
size_t pageind = (((uintptr_t)mapelm -
(uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
+ map_bias;
run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
PAGE_SHIFT));
arena_run_split(arena, run, size, large, zero);
return (run);
}
mapelm = arena_avail_tree_nsearch(&arena->runs_avail_clean, &key);
if (mapelm != NULL) {
arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
size_t pageind = (((uintptr_t)mapelm -
(uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
+ map_bias;
run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
PAGE_SHIFT));
arena_run_split(arena, run, size, large, zero);
return (run);
}
return (NULL);
}
static inline void
arena_maybe_purge(arena_t *arena)
{
/* Enforce opt_lg_dirty_mult. */
if (opt_lg_dirty_mult >= 0 && arena->ndirty > arena->npurgatory &&
(arena->ndirty - arena->npurgatory) > chunk_npages &&
(arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
arena->npurgatory))
arena_purge(arena, false);
}
static inline void
arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
{
ql_head(arena_chunk_map_t) mapelms;
arena_chunk_map_t *mapelm;
size_t pageind, flag_unzeroed;
#ifdef JEMALLOC_DEBUG
size_t ndirty;
#endif
#ifdef JEMALLOC_STATS
size_t nmadvise;
#endif
ql_new(&mapelms);
flag_unzeroed =
#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
/*
* madvise(..., MADV_DONTNEED) results in zero-filled pages for anonymous
* mappings, but not for file-backed mappings.
*/
# ifdef JEMALLOC_SWAP
swap_enabled ? CHUNK_MAP_UNZEROED :
# endif
0;
#else
CHUNK_MAP_UNZEROED;
#endif
/*
* If chunk is the spare, temporarily re-allocate it, 1) so that its
* run is reinserted into runs_avail_dirty, and 2) so that it cannot be
* completely discarded by another thread while arena->lock is dropped
* by this thread. Note that the arena_run_dalloc() call will
* implicitly deallocate the chunk, so no explicit action is required
* in this function to deallocate the chunk.
*
* Note that once a chunk contains dirty pages, it cannot again contain
* a single run unless 1) it is a dirty run, or 2) this function purges
* dirty pages and causes the transition to a single clean run. Thus
* (chunk == arena->spare) is possible, but it is not possible for
* this function to be called on the spare unless it contains a dirty
* run.
*/
if (chunk == arena->spare) {
assert((chunk->map[0].bits & CHUNK_MAP_DIRTY) != 0);
arena_chunk_alloc(arena);
}
/* Temporarily allocate all free dirty runs within chunk. */
for (pageind = map_bias; pageind < chunk_npages;) {
mapelm = &chunk->map[pageind-map_bias];
if ((mapelm->bits & CHUNK_MAP_ALLOCATED) == 0) {
size_t npages;
npages = mapelm->bits >> PAGE_SHIFT;
assert(pageind + npages <= chunk_npages);
if (mapelm->bits & CHUNK_MAP_DIRTY) {
size_t i;
#ifdef JEMALLOC_STATS
size_t cactive_diff;
#endif
arena_avail_tree_remove(
&arena->runs_avail_dirty, mapelm);
mapelm->bits = (npages << PAGE_SHIFT) |
flag_unzeroed | CHUNK_MAP_LARGE |
CHUNK_MAP_ALLOCATED;
/*
* Update internal elements in the page map, so
* that CHUNK_MAP_UNZEROED is properly set.
*/
for (i = 1; i < npages - 1; i++) {
chunk->map[pageind+i-map_bias].bits =
flag_unzeroed;
}
if (npages > 1) {
chunk->map[
pageind+npages-1-map_bias].bits =
flag_unzeroed | CHUNK_MAP_LARGE |
CHUNK_MAP_ALLOCATED;
}
#ifdef JEMALLOC_STATS
/*
* Update stats_cactive if nactive is crossing a
* chunk multiple.
*/
cactive_diff = CHUNK_CEILING((arena->nactive +
npages) << PAGE_SHIFT) -
CHUNK_CEILING(arena->nactive << PAGE_SHIFT);
if (cactive_diff != 0)
stats_cactive_add(cactive_diff);
#endif
arena->nactive += npages;
/* Append to list for later processing. */
ql_elm_new(mapelm, u.ql_link);
ql_tail_insert(&mapelms, mapelm, u.ql_link);
}
pageind += npages;
} else {
/* Skip allocated run. */
if (mapelm->bits & CHUNK_MAP_LARGE)
pageind += mapelm->bits >> PAGE_SHIFT;
else {
arena_run_t *run = (arena_run_t *)((uintptr_t)
chunk + (uintptr_t)(pageind << PAGE_SHIFT));
assert((mapelm->bits >> PAGE_SHIFT) == 0);
dassert(run->magic == ARENA_RUN_MAGIC);
size_t binind = arena_bin_index(arena,
run->bin);
arena_bin_info_t *bin_info =
&arena_bin_info[binind];
pageind += bin_info->run_size >> PAGE_SHIFT;
}
}
}
assert(pageind == chunk_npages);
#ifdef JEMALLOC_DEBUG
ndirty = chunk->ndirty;
#endif
#ifdef JEMALLOC_STATS
arena->stats.purged += chunk->ndirty;
#endif
arena->ndirty -= chunk->ndirty;
chunk->ndirty = 0;
ql_remove(&arena->chunks_dirty, chunk, link_dirty);
chunk->dirtied = false;
malloc_mutex_unlock(&arena->lock);
#ifdef JEMALLOC_STATS
nmadvise = 0;
#endif
ql_foreach(mapelm, &mapelms, u.ql_link) {
size_t pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
sizeof(arena_chunk_map_t)) + map_bias;
size_t npages = mapelm->bits >> PAGE_SHIFT;
assert(pageind + npages <= chunk_npages);
#ifdef JEMALLOC_DEBUG
assert(ndirty >= npages);
ndirty -= npages;
#endif
#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
madvise((void *)((uintptr_t)chunk + (pageind << PAGE_SHIFT)),
(npages << PAGE_SHIFT), MADV_DONTNEED);
#elif defined(JEMALLOC_PURGE_MADVISE_FREE)
madvise((void *)((uintptr_t)chunk + (pageind << PAGE_SHIFT)),
(npages << PAGE_SHIFT), MADV_FREE);
#else
# error "No method defined for purging unused dirty pages."
#endif
#ifdef JEMALLOC_STATS
nmadvise++;
#endif
}
#ifdef JEMALLOC_DEBUG
assert(ndirty == 0);
#endif
malloc_mutex_lock(&arena->lock);
#ifdef JEMALLOC_STATS
arena->stats.nmadvise += nmadvise;
#endif
/* Deallocate runs. */
for (mapelm = ql_first(&mapelms); mapelm != NULL;
mapelm = ql_first(&mapelms)) {
size_t pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
sizeof(arena_chunk_map_t)) + map_bias;
arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
(uintptr_t)(pageind << PAGE_SHIFT));
ql_remove(&mapelms, mapelm, u.ql_link);
arena_run_dalloc(arena, run, false);
}
}
static void
arena_purge(arena_t *arena, bool all)
{
arena_chunk_t *chunk;
size_t npurgatory;
#ifdef JEMALLOC_DEBUG
size_t ndirty = 0;
ql_foreach(chunk, &arena->chunks_dirty, link_dirty) {
assert(chunk->dirtied);
ndirty += chunk->ndirty;
}
assert(ndirty == arena->ndirty);
#endif
assert(arena->ndirty > arena->npurgatory || all);
assert(arena->ndirty > chunk_npages || all);
assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
npurgatory) || all);
#ifdef JEMALLOC_STATS
arena->stats.npurge++;
#endif
/*
* Compute the minimum number of pages that this thread should try to
* purge, and add the result to arena->npurgatory. This will keep
* multiple threads from racing to reduce ndirty below the threshold.
*/
npurgatory = arena->ndirty - arena->npurgatory;
if (all == false) {
assert(npurgatory >= arena->nactive >> opt_lg_dirty_mult);
npurgatory -= arena->nactive >> opt_lg_dirty_mult;
}
arena->npurgatory += npurgatory;
while (npurgatory > 0) {
/* Get next chunk with dirty pages. */
chunk = ql_first(&arena->chunks_dirty);
if (chunk == NULL) {
/*
* This thread was unable to purge as many pages as
* originally intended, due to races with other threads
* that either did some of the purging work, or re-used
* dirty pages.
*/
arena->npurgatory -= npurgatory;
return;
}
while (chunk->ndirty == 0) {
ql_remove(&arena->chunks_dirty, chunk, link_dirty);
chunk->dirtied = false;
chunk = ql_first(&arena->chunks_dirty);
if (chunk == NULL) {
/* Same logic as for above. */
arena->npurgatory -= npurgatory;
return;
}
}
if (chunk->ndirty > npurgatory) {
/*
* This thread will, at a minimum, purge all the dirty
* pages in chunk, so set npurgatory to reflect this
* thread's commitment to purge the pages. This tends
* to reduce the chances of the following scenario:
*
* 1) This thread sets arena->npurgatory such that
* (arena->ndirty - arena->npurgatory) is at the
* threshold.
* 2) This thread drops arena->lock.
* 3) Another thread causes one or more pages to be
* dirtied, and immediately determines that it must
* purge dirty pages.
*
* If this scenario *does* play out, that's okay,
* because all of the purging work being done really
* needs to happen.
*/
arena->npurgatory += chunk->ndirty - npurgatory;
npurgatory = chunk->ndirty;
}
arena->npurgatory -= chunk->ndirty;
npurgatory -= chunk->ndirty;
arena_chunk_purge(arena, chunk);
}
}
void
arena_purge_all(arena_t *arena)
{
malloc_mutex_lock(&arena->lock);
arena_purge(arena, true);
malloc_mutex_unlock(&arena->lock);
}
static void
arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
{
arena_chunk_t *chunk;
size_t size, run_ind, run_pages, flag_dirty;
arena_avail_tree_t *runs_avail;
#ifdef JEMALLOC_STATS
size_t cactive_diff;
#endif
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk)
>> PAGE_SHIFT);
assert(run_ind >= map_bias);
assert(run_ind < chunk_npages);
if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_LARGE) != 0) {
size = chunk->map[run_ind-map_bias].bits & ~PAGE_MASK;
assert(size == PAGE_SIZE ||
(chunk->map[run_ind+(size>>PAGE_SHIFT)-1-map_bias].bits &
~PAGE_MASK) == 0);
assert((chunk->map[run_ind+(size>>PAGE_SHIFT)-1-map_bias].bits &
CHUNK_MAP_LARGE) != 0);
assert((chunk->map[run_ind+(size>>PAGE_SHIFT)-1-map_bias].bits &
CHUNK_MAP_ALLOCATED) != 0);
} else {
size_t binind = arena_bin_index(arena, run->bin);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
size = bin_info->run_size;
}
run_pages = (size >> PAGE_SHIFT);
#ifdef JEMALLOC_STATS
/* Update stats_cactive if nactive is crossing a chunk multiple. */
cactive_diff = CHUNK_CEILING(arena->nactive << PAGE_SHIFT) -
CHUNK_CEILING((arena->nactive - run_pages) << PAGE_SHIFT);
if (cactive_diff != 0)
stats_cactive_sub(cactive_diff);
#endif
arena->nactive -= run_pages;
/*
* The run is dirty if the caller claims to have dirtied it, as well as
* if it was already dirty before being allocated.
*/
if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) != 0)
dirty = true;
flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
runs_avail = dirty ? &arena->runs_avail_dirty :
&arena->runs_avail_clean;
/* Mark pages as unallocated in the chunk map. */
if (dirty) {
chunk->map[run_ind-map_bias].bits = size | CHUNK_MAP_DIRTY;
chunk->map[run_ind+run_pages-1-map_bias].bits = size |
CHUNK_MAP_DIRTY;
chunk->ndirty += run_pages;
arena->ndirty += run_pages;
} else {
chunk->map[run_ind-map_bias].bits = size |
(chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED);
chunk->map[run_ind+run_pages-1-map_bias].bits = size |
(chunk->map[run_ind+run_pages-1-map_bias].bits &
CHUNK_MAP_UNZEROED);
}
/* Try to coalesce forward. */
if (run_ind + run_pages < chunk_npages &&
(chunk->map[run_ind+run_pages-map_bias].bits & CHUNK_MAP_ALLOCATED)
== 0 && (chunk->map[run_ind+run_pages-map_bias].bits &
CHUNK_MAP_DIRTY) == flag_dirty) {
size_t nrun_size = chunk->map[run_ind+run_pages-map_bias].bits &
~PAGE_MASK;
size_t nrun_pages = nrun_size >> PAGE_SHIFT;
/*
* Remove successor from runs_avail; the coalesced run is
* inserted later.
*/
assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits
& ~PAGE_MASK) == nrun_size);
assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits
& CHUNK_MAP_ALLOCATED) == 0);
assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits
& CHUNK_MAP_DIRTY) == flag_dirty);
arena_avail_tree_remove(runs_avail,
&chunk->map[run_ind+run_pages-map_bias]);
size += nrun_size;
run_pages += nrun_pages;
chunk->map[run_ind-map_bias].bits = size |
(chunk->map[run_ind-map_bias].bits & CHUNK_MAP_FLAGS_MASK);
chunk->map[run_ind+run_pages-1-map_bias].bits = size |
(chunk->map[run_ind+run_pages-1-map_bias].bits &
CHUNK_MAP_FLAGS_MASK);
}
/* Try to coalesce backward. */
if (run_ind > map_bias && (chunk->map[run_ind-1-map_bias].bits &
CHUNK_MAP_ALLOCATED) == 0 && (chunk->map[run_ind-1-map_bias].bits &
CHUNK_MAP_DIRTY) == flag_dirty) {
size_t prun_size = chunk->map[run_ind-1-map_bias].bits &
~PAGE_MASK;
size_t prun_pages = prun_size >> PAGE_SHIFT;
run_ind -= prun_pages;
/*
* Remove predecessor from runs_avail; the coalesced run is
* inserted later.
*/
assert((chunk->map[run_ind-map_bias].bits & ~PAGE_MASK)
== prun_size);
assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_ALLOCATED)
== 0);
assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY)
== flag_dirty);
arena_avail_tree_remove(runs_avail,
&chunk->map[run_ind-map_bias]);
size += prun_size;
run_pages += prun_pages;
chunk->map[run_ind-map_bias].bits = size |
(chunk->map[run_ind-map_bias].bits & CHUNK_MAP_FLAGS_MASK);
chunk->map[run_ind+run_pages-1-map_bias].bits = size |
(chunk->map[run_ind+run_pages-1-map_bias].bits &
CHUNK_MAP_FLAGS_MASK);
}
/* Insert into runs_avail, now that coalescing is complete. */
assert((chunk->map[run_ind-map_bias].bits & ~PAGE_MASK) ==
(chunk->map[run_ind+run_pages-1-map_bias].bits & ~PAGE_MASK));
assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) ==
(chunk->map[run_ind+run_pages-1-map_bias].bits & CHUNK_MAP_DIRTY));
arena_avail_tree_insert(runs_avail, &chunk->map[run_ind-map_bias]);
if (dirty) {
/*
* Insert into chunks_dirty before potentially calling
* arena_chunk_dealloc(), so that chunks_dirty and
* arena->ndirty are consistent.
*/
if (chunk->dirtied == false) {
ql_tail_insert(&arena->chunks_dirty, chunk, link_dirty);
chunk->dirtied = true;
}
}
/*
* Deallocate chunk if it is now completely unused. The bit
* manipulation checks whether the first run is unallocated and extends
* to the end of the chunk.
*/
if ((chunk->map[0].bits & (~PAGE_MASK | CHUNK_MAP_ALLOCATED)) ==
arena_maxclass)
arena_chunk_dealloc(arena, chunk);
/*
* It is okay to do dirty page processing here even if the chunk was
* deallocated above, since in that case it is the spare. Waiting
* until after possible chunk deallocation to do dirty processing
* allows for an old spare to be fully deallocated, thus decreasing the
* chances of spuriously crossing the dirty page purging threshold.
*/
if (dirty)
arena_maybe_purge(arena);
}
static void
arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
size_t oldsize, size_t newsize)
{
size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> PAGE_SHIFT;
size_t head_npages = (oldsize - newsize) >> PAGE_SHIFT;
size_t flag_dirty = chunk->map[pageind-map_bias].bits & CHUNK_MAP_DIRTY;
assert(oldsize > newsize);
/*
* Update the chunk map so that arena_run_dalloc() can treat the
* leading run as separately allocated. Set the last element of each
* run first, in case of single-page runs.
*/
assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_LARGE) != 0);
assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_ALLOCATED) != 0);
chunk->map[pageind+head_npages-1-map_bias].bits = flag_dirty |
(chunk->map[pageind+head_npages-1-map_bias].bits &
CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
chunk->map[pageind-map_bias].bits = (oldsize - newsize)
| flag_dirty | (chunk->map[pageind-map_bias].bits &
CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
#ifdef JEMALLOC_DEBUG
{
size_t tail_npages = newsize >> PAGE_SHIFT;
assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
.bits & ~PAGE_MASK) == 0);
assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
.bits & CHUNK_MAP_DIRTY) == flag_dirty);
assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
.bits & CHUNK_MAP_LARGE) != 0);
assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
.bits & CHUNK_MAP_ALLOCATED) != 0);
}
#endif
chunk->map[pageind+head_npages-map_bias].bits = newsize | flag_dirty |
(chunk->map[pageind+head_npages-map_bias].bits &
CHUNK_MAP_FLAGS_MASK) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
arena_run_dalloc(arena, run, false);
}
static void
arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
size_t oldsize, size_t newsize, bool dirty)
{
size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> PAGE_SHIFT;
size_t head_npages = newsize >> PAGE_SHIFT;
size_t tail_npages = (oldsize - newsize) >> PAGE_SHIFT;
size_t flag_dirty = chunk->map[pageind-map_bias].bits &
CHUNK_MAP_DIRTY;
assert(oldsize > newsize);
/*
* Update the chunk map so that arena_run_dalloc() can treat the
* trailing run as separately allocated. Set the last element of each
* run first, in case of single-page runs.
*/
assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_LARGE) != 0);
assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_ALLOCATED) != 0);
chunk->map[pageind+head_npages-1-map_bias].bits = flag_dirty |
(chunk->map[pageind+head_npages-1-map_bias].bits &
CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
chunk->map[pageind-map_bias].bits = newsize | flag_dirty |
(chunk->map[pageind-map_bias].bits & CHUNK_MAP_UNZEROED) |
CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits &
~PAGE_MASK) == 0);
assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits &
CHUNK_MAP_LARGE) != 0);
assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits &
CHUNK_MAP_ALLOCATED) != 0);
chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits =
flag_dirty |
(chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits &
CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
chunk->map[pageind+head_npages-map_bias].bits = (oldsize - newsize) |
flag_dirty | (chunk->map[pageind+head_npages-map_bias].bits &
CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
dirty);
}
static arena_run_t *
arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
{
arena_chunk_map_t *mapelm;
arena_run_t *run;
size_t binind;
arena_bin_info_t *bin_info;
/* Look for a usable run. */
mapelm = arena_run_tree_first(&bin->runs);
if (mapelm != NULL) {
arena_chunk_t *chunk;
size_t pageind;
/* run is guaranteed to have available space. */
arena_run_tree_remove(&bin->runs, mapelm);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) /
sizeof(arena_chunk_map_t))) + map_bias;
run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
(mapelm->bits >> PAGE_SHIFT))
<< PAGE_SHIFT));
#ifdef JEMALLOC_STATS
bin->stats.reruns++;
#endif
return (run);
}
/* No existing runs have any space available. */
binind = arena_bin_index(arena, bin);
bin_info = &arena_bin_info[binind];
/* Allocate a new run. */
malloc_mutex_unlock(&bin->lock);
/******************************/
malloc_mutex_lock(&arena->lock);
run = arena_run_alloc(arena, bin_info->run_size, false, false);
if (run != NULL) {
bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
(uintptr_t)bin_info->bitmap_offset);
/* Initialize run internals. */
run->bin = bin;
run->nextind = 0;
run->nfree = bin_info->nregs;
bitmap_init(bitmap, &bin_info->bitmap_info);
#ifdef JEMALLOC_DEBUG
run->magic = ARENA_RUN_MAGIC;
#endif
}
malloc_mutex_unlock(&arena->lock);
/********************************/
malloc_mutex_lock(&bin->lock);
if (run != NULL) {
#ifdef JEMALLOC_STATS
bin->stats.nruns++;
bin->stats.curruns++;
if (bin->stats.curruns > bin->stats.highruns)
bin->stats.highruns = bin->stats.curruns;
#endif
return (run);
}
/*
* arena_run_alloc() failed, but another thread may have made
* sufficient memory available while this one dropped bin->lock above,
* so search one more time.
*/
mapelm = arena_run_tree_first(&bin->runs);
if (mapelm != NULL) {
arena_chunk_t *chunk;
size_t pageind;
/* run is guaranteed to have available space. */
arena_run_tree_remove(&bin->runs, mapelm);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) /
sizeof(arena_chunk_map_t))) + map_bias;
run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
(mapelm->bits >> PAGE_SHIFT))
<< PAGE_SHIFT));
#ifdef JEMALLOC_STATS
bin->stats.reruns++;
#endif
return (run);
}
return (NULL);
}
/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
static void *
arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
{
void *ret;
size_t binind;
arena_bin_info_t *bin_info;
arena_run_t *run;
binind = arena_bin_index(arena, bin);
bin_info = &arena_bin_info[binind];
bin->runcur = NULL;
run = arena_bin_nonfull_run_get(arena, bin);
if (bin->runcur != NULL && bin->runcur->nfree > 0) {
/*
* Another thread updated runcur while this one ran without the
* bin lock in arena_bin_nonfull_run_get().
*/
dassert(bin->runcur->magic == ARENA_RUN_MAGIC);
assert(bin->runcur->nfree > 0);
ret = arena_run_reg_alloc(bin->runcur, bin_info);
if (run != NULL) {
arena_chunk_t *chunk;
/*
* arena_run_alloc() may have allocated run, or it may
* have pulled run from the bin's run tree. Therefore
* it is unsafe to make any assumptions about how run
* has previously been used, and arena_bin_lower_run()
* must be called, as if a region were just deallocated
* from the run.
*/
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
if (run->nfree == bin_info->nregs)
arena_dalloc_bin_run(arena, chunk, run, bin);
else
arena_bin_lower_run(arena, chunk, run, bin);
}
return (ret);
}
if (run == NULL)
return (NULL);
bin->runcur = run;
dassert(bin->runcur->magic == ARENA_RUN_MAGIC);
assert(bin->runcur->nfree > 0);
return (arena_run_reg_alloc(bin->runcur, bin_info));
}
#ifdef JEMALLOC_PROF
void
arena_prof_accum(arena_t *arena, uint64_t accumbytes)
{
if (prof_interval != 0) {
arena->prof_accumbytes += accumbytes;
if (arena->prof_accumbytes >= prof_interval) {
prof_idump();
arena->prof_accumbytes -= prof_interval;
}
}
}
#endif
#ifdef JEMALLOC_TCACHE
void
arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind
# ifdef JEMALLOC_PROF
, uint64_t prof_accumbytes
# endif
)
{
unsigned i, nfill;
arena_bin_t *bin;
arena_run_t *run;
void *ptr;
assert(tbin->ncached == 0);
#ifdef JEMALLOC_PROF
malloc_mutex_lock(&arena->lock);
arena_prof_accum(arena, prof_accumbytes);
malloc_mutex_unlock(&arena->lock);
#endif
bin = &arena->bins[binind];
malloc_mutex_lock(&bin->lock);
for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
tbin->lg_fill_div); i < nfill; i++) {
if ((run = bin->runcur) != NULL && run->nfree > 0)
ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
else
ptr = arena_bin_malloc_hard(arena, bin);
if (ptr == NULL)
break;
/* Insert such that low regions get used first. */
tbin->avail[nfill - 1 - i] = ptr;
}
#ifdef JEMALLOC_STATS
bin->stats.allocated += i * arena_bin_info[binind].reg_size;
bin->stats.nmalloc += i;
bin->stats.nrequests += tbin->tstats.nrequests;
bin->stats.nfills++;
tbin->tstats.nrequests = 0;
#endif
malloc_mutex_unlock(&bin->lock);
tbin->ncached = i;
}
#endif
void *
arena_malloc_small(arena_t *arena, size_t size, bool zero)
{
void *ret;
arena_bin_t *bin;
arena_run_t *run;
size_t binind;
binind = SMALL_SIZE2BIN(size);
assert(binind < nbins);
bin = &arena->bins[binind];
size = arena_bin_info[binind].reg_size;
malloc_mutex_lock(&bin->lock);
if ((run = bin->runcur) != NULL && run->nfree > 0)
ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
else
ret = arena_bin_malloc_hard(arena, bin);
if (ret == NULL) {
malloc_mutex_unlock(&bin->lock);
return (NULL);
}
#ifdef JEMALLOC_STATS
bin->stats.allocated += size;
bin->stats.nmalloc++;
bin->stats.nrequests++;
#endif
malloc_mutex_unlock(&bin->lock);
#ifdef JEMALLOC_PROF
if (isthreaded == false) {
malloc_mutex_lock(&arena->lock);
arena_prof_accum(arena, size);
malloc_mutex_unlock(&arena->lock);
}
#endif
if (zero == false) {
#ifdef JEMALLOC_FILL
if (opt_junk)
memset(ret, 0xa5, size);
else if (opt_zero)
memset(ret, 0, size);
#endif
} else
memset(ret, 0, size);
return (ret);
}
void *
arena_malloc_large(arena_t *arena, size_t size, bool zero)
{
void *ret;
/* Large allocation. */
size = PAGE_CEILING(size);
malloc_mutex_lock(&arena->lock);
ret = (void *)arena_run_alloc(arena, size, true, zero);
if (ret == NULL) {
malloc_mutex_unlock(&arena->lock);
return (NULL);
}
#ifdef JEMALLOC_STATS
arena->stats.nmalloc_large++;
arena->stats.nrequests_large++;
arena->stats.allocated_large += size;
arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++;
arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++;
arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++;
if (arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns >
arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns) {
arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns =
arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns;
}
#endif
#ifdef JEMALLOC_PROF
arena_prof_accum(arena, size);
#endif
malloc_mutex_unlock(&arena->lock);
if (zero == false) {
#ifdef JEMALLOC_FILL
if (opt_junk)
memset(ret, 0xa5, size);
else if (opt_zero)
memset(ret, 0, size);
#endif
}
return (ret);
}
void *
arena_malloc(size_t size, bool zero)
{
assert(size != 0);
assert(QUANTUM_CEILING(size) <= arena_maxclass);
if (size <= small_maxclass) {
#ifdef JEMALLOC_TCACHE
tcache_t *tcache;
if ((tcache = tcache_get()) != NULL)
return (tcache_alloc_small(tcache, size, zero));
else
#endif
return (arena_malloc_small(choose_arena(), size, zero));
} else {
#ifdef JEMALLOC_TCACHE
if (size <= tcache_maxclass) {
tcache_t *tcache;
if ((tcache = tcache_get()) != NULL)
return (tcache_alloc_large(tcache, size, zero));
else {
return (arena_malloc_large(choose_arena(),
size, zero));
}
} else
#endif
return (arena_malloc_large(choose_arena(), size, zero));
}
}
/* Only handles large allocations that require more than page alignment. */
void *
arena_palloc(arena_t *arena, size_t size, size_t alloc_size, size_t alignment,
bool zero)
{
void *ret;
size_t offset;
arena_chunk_t *chunk;
assert((size & PAGE_MASK) == 0);
alignment = PAGE_CEILING(alignment);
malloc_mutex_lock(&arena->lock);
ret = (void *)arena_run_alloc(arena, alloc_size, true, zero);
if (ret == NULL) {
malloc_mutex_unlock(&arena->lock);
return (NULL);
}
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
offset = (uintptr_t)ret & (alignment - 1);
assert((offset & PAGE_MASK) == 0);
assert(offset < alloc_size);
if (offset == 0)
arena_run_trim_tail(arena, chunk, ret, alloc_size, size, false);
else {
size_t leadsize, trailsize;
leadsize = alignment - offset;
if (leadsize > 0) {
arena_run_trim_head(arena, chunk, ret, alloc_size,
alloc_size - leadsize);
ret = (void *)((uintptr_t)ret + leadsize);
}
trailsize = alloc_size - leadsize - size;
if (trailsize != 0) {
/* Trim trailing space. */
assert(trailsize < alloc_size);
arena_run_trim_tail(arena, chunk, ret, size + trailsize,
size, false);
}
}
#ifdef JEMALLOC_STATS
arena->stats.nmalloc_large++;
arena->stats.nrequests_large++;
arena->stats.allocated_large += size;
arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++;
arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++;
arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++;
if (arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns >
arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns) {
arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns =
arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns;
}
#endif
malloc_mutex_unlock(&arena->lock);
#ifdef JEMALLOC_FILL
if (zero == false) {
if (opt_junk)
memset(ret, 0xa5, size);
else if (opt_zero)
memset(ret, 0, size);
}
#endif
return (ret);
}
/* Return the size of the allocation pointed to by ptr. */
size_t
arena_salloc(const void *ptr)
{
size_t ret;
arena_chunk_t *chunk;
size_t pageind, mapbits;
assert(ptr != NULL);
assert(CHUNK_ADDR2BASE(ptr) != ptr);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
mapbits = chunk->map[pageind-map_bias].bits;
assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
if ((mapbits & CHUNK_MAP_LARGE) == 0) {
arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
(uintptr_t)((pageind - (mapbits >> PAGE_SHIFT)) <<
PAGE_SHIFT));
dassert(run->magic == ARENA_RUN_MAGIC);
size_t binind = arena_bin_index(chunk->arena, run->bin);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
assert(((uintptr_t)ptr - ((uintptr_t)run +
(uintptr_t)bin_info->reg0_offset)) % bin_info->reg_size ==
0);
ret = bin_info->reg_size;
} else {
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
ret = mapbits & ~PAGE_MASK;
assert(ret != 0);
}
return (ret);
}
#ifdef JEMALLOC_PROF
void
arena_prof_promoted(const void *ptr, size_t size)
{
arena_chunk_t *chunk;
size_t pageind, binind;
assert(ptr != NULL);
assert(CHUNK_ADDR2BASE(ptr) != ptr);
assert(isalloc(ptr) == PAGE_SIZE);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
binind = SMALL_SIZE2BIN(size);
assert(binind < nbins);
chunk->map[pageind-map_bias].bits = (chunk->map[pageind-map_bias].bits &
~CHUNK_MAP_CLASS_MASK) | ((binind+1) << CHUNK_MAP_CLASS_SHIFT);
}
size_t
arena_salloc_demote(const void *ptr)
{
size_t ret;
arena_chunk_t *chunk;
size_t pageind, mapbits;
assert(ptr != NULL);
assert(CHUNK_ADDR2BASE(ptr) != ptr);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
mapbits = chunk->map[pageind-map_bias].bits;
assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
if ((mapbits & CHUNK_MAP_LARGE) == 0) {
arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
(uintptr_t)((pageind - (mapbits >> PAGE_SHIFT)) <<
PAGE_SHIFT));
dassert(run->magic == ARENA_RUN_MAGIC);
size_t binind = arena_bin_index(chunk->arena, run->bin);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
assert(((uintptr_t)ptr - ((uintptr_t)run +
(uintptr_t)bin_info->reg0_offset)) % bin_info->reg_size ==
0);
ret = bin_info->reg_size;
} else {
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
ret = mapbits & ~PAGE_MASK;
if (prof_promote && ret == PAGE_SIZE && (mapbits &
CHUNK_MAP_CLASS_MASK) != 0) {
size_t binind = ((mapbits & CHUNK_MAP_CLASS_MASK) >>
CHUNK_MAP_CLASS_SHIFT) - 1;
assert(binind < nbins);
ret = arena_bin_info[binind].reg_size;
}
assert(ret != 0);
}
return (ret);
}
#endif
static void
arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
arena_bin_t *bin)
{
/* Dissociate run from bin. */
if (run == bin->runcur)
bin->runcur = NULL;
else {
size_t binind = arena_bin_index(chunk->arena, bin);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
if (bin_info->nregs != 1) {
size_t run_pageind = (((uintptr_t)run -
(uintptr_t)chunk)) >> PAGE_SHIFT;
arena_chunk_map_t *run_mapelm =
&chunk->map[run_pageind-map_bias];
/*
* This block's conditional is necessary because if the
* run only contains one region, then it never gets
* inserted into the non-full runs tree.
*/
arena_run_tree_remove(&bin->runs, run_mapelm);
}
}
}
static void
arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
arena_bin_t *bin)
{
size_t binind;
arena_bin_info_t *bin_info;
size_t npages, run_ind, past;
assert(run != bin->runcur);
assert(arena_run_tree_search(&bin->runs, &chunk->map[
(((uintptr_t)run-(uintptr_t)chunk)>>PAGE_SHIFT)-map_bias]) == NULL);
binind = arena_bin_index(chunk->arena, run->bin);
bin_info = &arena_bin_info[binind];
malloc_mutex_unlock(&bin->lock);
/******************************/
npages = bin_info->run_size >> PAGE_SHIFT;
run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> PAGE_SHIFT);
past = (size_t)(PAGE_CEILING((uintptr_t)run +
(uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind *
bin_info->reg_size) - (uintptr_t)chunk) >> PAGE_SHIFT);
malloc_mutex_lock(&arena->lock);
/*
* If the run was originally clean, and some pages were never touched,
* trim the clean pages before deallocating the dirty portion of the
* run.
*/
if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) == 0 && past
- run_ind < npages) {
/*
* Trim clean pages. Convert to large run beforehand. Set the
* last map element first, in case this is a one-page run.
*/
chunk->map[run_ind+npages-1-map_bias].bits = CHUNK_MAP_LARGE |
(chunk->map[run_ind+npages-1-map_bias].bits &
CHUNK_MAP_FLAGS_MASK);
chunk->map[run_ind-map_bias].bits = bin_info->run_size |
CHUNK_MAP_LARGE | (chunk->map[run_ind-map_bias].bits &
CHUNK_MAP_FLAGS_MASK);
arena_run_trim_tail(arena, chunk, run, (npages << PAGE_SHIFT),
((past - run_ind) << PAGE_SHIFT), false);
/* npages = past - run_ind; */
}
#ifdef JEMALLOC_DEBUG
run->magic = 0;
#endif
arena_run_dalloc(arena, run, true);
malloc_mutex_unlock(&arena->lock);
/****************************/
malloc_mutex_lock(&bin->lock);
#ifdef JEMALLOC_STATS
bin->stats.curruns--;
#endif
}
static void
arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
arena_bin_t *bin)
{
/*
* Make sure that bin->runcur always refers to the lowest non-full run,
* if one exists.
*/
if (bin->runcur == NULL)
bin->runcur = run;
else if ((uintptr_t)run < (uintptr_t)bin->runcur) {
/* Switch runcur. */
if (bin->runcur->nfree > 0) {
arena_chunk_t *runcur_chunk =
CHUNK_ADDR2BASE(bin->runcur);
size_t runcur_pageind = (((uintptr_t)bin->runcur -
(uintptr_t)runcur_chunk)) >> PAGE_SHIFT;
arena_chunk_map_t *runcur_mapelm =
&runcur_chunk->map[runcur_pageind-map_bias];
/* Insert runcur. */
arena_run_tree_insert(&bin->runs, runcur_mapelm);
}
bin->runcur = run;
} else {
size_t run_pageind = (((uintptr_t)run -
(uintptr_t)chunk)) >> PAGE_SHIFT;
arena_chunk_map_t *run_mapelm =
&chunk->map[run_pageind-map_bias];
assert(arena_run_tree_search(&bin->runs, run_mapelm) == NULL);
arena_run_tree_insert(&bin->runs, run_mapelm);
}
}
void
arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena_chunk_map_t *mapelm)
{
size_t pageind;
arena_run_t *run;
arena_bin_t *bin;
#if (defined(JEMALLOC_FILL) || defined(JEMALLOC_STATS))
size_t size;
#endif
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
(mapelm->bits >> PAGE_SHIFT)) << PAGE_SHIFT));
dassert(run->magic == ARENA_RUN_MAGIC);
bin = run->bin;
size_t binind = arena_bin_index(arena, bin);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
#if (defined(JEMALLOC_FILL) || defined(JEMALLOC_STATS))
size = bin_info->reg_size;
#endif
#ifdef JEMALLOC_FILL
if (opt_junk)
memset(ptr, 0x5a, size);
#endif
arena_run_reg_dalloc(run, ptr);
if (run->nfree == bin_info->nregs) {
arena_dissociate_bin_run(chunk, run, bin);
arena_dalloc_bin_run(arena, chunk, run, bin);
} else if (run->nfree == 1 && run != bin->runcur)
arena_bin_lower_run(arena, chunk, run, bin);
#ifdef JEMALLOC_STATS
bin->stats.allocated -= size;
bin->stats.ndalloc++;
#endif
}
#ifdef JEMALLOC_STATS
void
arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
arena_stats_t *astats, malloc_bin_stats_t *bstats,
malloc_large_stats_t *lstats)
{
unsigned i;
malloc_mutex_lock(&arena->lock);
*nactive += arena->nactive;
*ndirty += arena->ndirty;
astats->mapped += arena->stats.mapped;
astats->npurge += arena->stats.npurge;
astats->nmadvise += arena->stats.nmadvise;
astats->purged += arena->stats.purged;
astats->allocated_large += arena->stats.allocated_large;
astats->nmalloc_large += arena->stats.nmalloc_large;
astats->ndalloc_large += arena->stats.ndalloc_large;
astats->nrequests_large += arena->stats.nrequests_large;
for (i = 0; i < nlclasses; i++) {
lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
lstats[i].nrequests += arena->stats.lstats[i].nrequests;
lstats[i].highruns += arena->stats.lstats[i].highruns;
lstats[i].curruns += arena->stats.lstats[i].curruns;
}
malloc_mutex_unlock(&arena->lock);
for (i = 0; i < nbins; i++) {
arena_bin_t *bin = &arena->bins[i];
malloc_mutex_lock(&bin->lock);
bstats[i].allocated += bin->stats.allocated;
bstats[i].nmalloc += bin->stats.nmalloc;
bstats[i].ndalloc += bin->stats.ndalloc;
bstats[i].nrequests += bin->stats.nrequests;
#ifdef JEMALLOC_TCACHE
bstats[i].nfills += bin->stats.nfills;
bstats[i].nflushes += bin->stats.nflushes;
#endif
bstats[i].nruns += bin->stats.nruns;
bstats[i].reruns += bin->stats.reruns;
bstats[i].highruns += bin->stats.highruns;
bstats[i].curruns += bin->stats.curruns;
malloc_mutex_unlock(&bin->lock);
}
}
#endif
void
arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
{
/* Large allocation. */
#ifdef JEMALLOC_FILL
# ifndef JEMALLOC_STATS
if (opt_junk)
# endif
#endif
{
#if (defined(JEMALLOC_FILL) || defined(JEMALLOC_STATS))
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
PAGE_SHIFT;
size_t size = chunk->map[pageind-map_bias].bits & ~PAGE_MASK;
#endif
#ifdef JEMALLOC_FILL
# ifdef JEMALLOC_STATS
if (opt_junk)
# endif
memset(ptr, 0x5a, size);
#endif
#ifdef JEMALLOC_STATS
arena->stats.ndalloc_large++;
arena->stats.allocated_large -= size;
arena->stats.lstats[(size >> PAGE_SHIFT) - 1].ndalloc++;
arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns--;
#endif
}
arena_run_dalloc(arena, (arena_run_t *)ptr, true);
}
static void
arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t oldsize, size_t size)
{
assert(size < oldsize);
/*
* Shrink the run, and make trailing pages available for other
* allocations.
*/
malloc_mutex_lock(&arena->lock);
arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size,
true);
#ifdef JEMALLOC_STATS
arena->stats.ndalloc_large++;
arena->stats.allocated_large -= oldsize;
arena->stats.lstats[(oldsize >> PAGE_SHIFT) - 1].ndalloc++;
arena->stats.lstats[(oldsize >> PAGE_SHIFT) - 1].curruns--;
arena->stats.nmalloc_large++;
arena->stats.nrequests_large++;
arena->stats.allocated_large += size;
arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++;
arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++;
arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++;
if (arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns >
arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns) {
arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns =
arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns;
}
#endif
malloc_mutex_unlock(&arena->lock);
}
static bool
arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t oldsize, size_t size, size_t extra, bool zero)
{
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
size_t npages = oldsize >> PAGE_SHIFT;
size_t followsize;
assert(oldsize == (chunk->map[pageind-map_bias].bits & ~PAGE_MASK));
/* Try to extend the run. */
assert(size + extra > oldsize);
malloc_mutex_lock(&arena->lock);
if (pageind + npages < chunk_npages &&
(chunk->map[pageind+npages-map_bias].bits
& CHUNK_MAP_ALLOCATED) == 0 && (followsize =
chunk->map[pageind+npages-map_bias].bits & ~PAGE_MASK) >= size -
oldsize) {
/*
* The next run is available and sufficiently large. Split the
* following run, then merge the first part with the existing
* allocation.
*/
size_t flag_dirty;
size_t splitsize = (oldsize + followsize <= size + extra)
? followsize : size + extra - oldsize;
arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk +
((pageind+npages) << PAGE_SHIFT)), splitsize, true, zero);
size = oldsize + splitsize;
npages = size >> PAGE_SHIFT;
/*
* Mark the extended run as dirty if either portion of the run
* was dirty before allocation. This is rather pedantic,
* because there's not actually any sequence of events that
* could cause the resulting run to be passed to
* arena_run_dalloc() with the dirty argument set to false
* (which is when dirty flag consistency would really matter).
*/
flag_dirty = (chunk->map[pageind-map_bias].bits &
CHUNK_MAP_DIRTY) |
(chunk->map[pageind+npages-1-map_bias].bits &
CHUNK_MAP_DIRTY);
chunk->map[pageind-map_bias].bits = size | flag_dirty
| CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
chunk->map[pageind+npages-1-map_bias].bits = flag_dirty |
CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
#ifdef JEMALLOC_STATS
arena->stats.ndalloc_large++;
arena->stats.allocated_large -= oldsize;
arena->stats.lstats[(oldsize >> PAGE_SHIFT) - 1].ndalloc++;
arena->stats.lstats[(oldsize >> PAGE_SHIFT) - 1].curruns--;
arena->stats.nmalloc_large++;
arena->stats.nrequests_large++;
arena->stats.allocated_large += size;
arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++;
arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++;
arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++;
if (arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns >
arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns) {
arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns =
arena->stats.lstats[(size >> PAGE_SHIFT) -
1].curruns;
}
#endif
malloc_mutex_unlock(&arena->lock);
return (false);
}
malloc_mutex_unlock(&arena->lock);
return (true);
}
/*
* Try to resize a large allocation, in order to avoid copying. This will
* always fail if growing an object, and the following run is already in use.
*/
static bool
arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
bool zero)
{
size_t psize;
psize = PAGE_CEILING(size + extra);
if (psize == oldsize) {
/* Same size class. */
#ifdef JEMALLOC_FILL
if (opt_junk && size < oldsize) {
memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize -
size);
}
#endif
return (false);
} else {
arena_chunk_t *chunk;
arena_t *arena;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
arena = chunk->arena;
dassert(arena->magic == ARENA_MAGIC);
if (psize < oldsize) {
#ifdef JEMALLOC_FILL
/* Fill before shrinking in order avoid a race. */
if (opt_junk) {
memset((void *)((uintptr_t)ptr + size), 0x5a,
oldsize - size);
}
#endif
arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
psize);
return (false);
} else {
bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
oldsize, PAGE_CEILING(size),
psize - PAGE_CEILING(size), zero);
#ifdef JEMALLOC_FILL
if (ret == false && zero == false && opt_zero) {
memset((void *)((uintptr_t)ptr + oldsize), 0,
size - oldsize);
}
#endif
return (ret);
}
}
}
void *
arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
bool zero)
{
/*
* Avoid moving the allocation if the size class can be left the same.
*/
if (oldsize <= arena_maxclass) {
if (oldsize <= small_maxclass) {
assert(arena_bin_info[SMALL_SIZE2BIN(oldsize)].reg_size
== oldsize);
if ((size + extra <= small_maxclass &&
SMALL_SIZE2BIN(size + extra) ==
SMALL_SIZE2BIN(oldsize)) || (size <= oldsize &&
size + extra >= oldsize)) {
#ifdef JEMALLOC_FILL
if (opt_junk && size < oldsize) {
memset((void *)((uintptr_t)ptr + size),
0x5a, oldsize - size);
}
#endif
return (ptr);
}
} else {
assert(size <= arena_maxclass);
if (size + extra > small_maxclass) {
if (arena_ralloc_large(ptr, oldsize, size,
extra, zero) == false)
return (ptr);
}
}
}
/* Reallocation would require a move. */
return (NULL);
}
void *
arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero)
{
void *ret;
size_t copysize;
/* Try to avoid moving the allocation. */
ret = arena_ralloc_no_move(ptr, oldsize, size, extra, zero);
if (ret != NULL)
return (ret);
/*
* size and oldsize are different enough that we need to move the
* object. In that case, fall back to allocating new space and
* copying.
*/
if (alignment != 0) {
size_t usize = sa2u(size + extra, alignment, NULL);
if (usize == 0)
return (NULL);
ret = ipalloc(usize, alignment, zero);
} else
ret = arena_malloc(size + extra, zero);
if (ret == NULL) {
if (extra == 0)
return (NULL);
/* Try again, this time without extra. */
if (alignment != 0) {
size_t usize = sa2u(size, alignment, NULL);
if (usize == 0)
return (NULL);
ret = ipalloc(usize, alignment, zero);
} else
ret = arena_malloc(size, zero);
if (ret == NULL)
return (NULL);
}
/* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */
/*
* Copy at most size bytes (not size+extra), since the caller has no
* expectation that the extra bytes will be reliably preserved.
*/
copysize = (size < oldsize) ? size : oldsize;
memcpy(ret, ptr, copysize);
idalloc(ptr);
return (ret);
}
bool
arena_new(arena_t *arena, unsigned ind)
{
unsigned i;
arena_bin_t *bin;
arena->ind = ind;
arena->nthreads = 0;
if (malloc_mutex_init(&arena->lock))
return (true);
#ifdef JEMALLOC_STATS
memset(&arena->stats, 0, sizeof(arena_stats_t));
arena->stats.lstats = (malloc_large_stats_t *)base_alloc(nlclasses *
sizeof(malloc_large_stats_t));
if (arena->stats.lstats == NULL)
return (true);
memset(arena->stats.lstats, 0, nlclasses *
sizeof(malloc_large_stats_t));
# ifdef JEMALLOC_TCACHE
ql_new(&arena->tcache_ql);
# endif
#endif
#ifdef JEMALLOC_PROF
arena->prof_accumbytes = 0;
#endif
/* Initialize chunks. */
ql_new(&arena->chunks_dirty);
arena->spare = NULL;
arena->nactive = 0;
arena->ndirty = 0;
arena->npurgatory = 0;
arena_avail_tree_new(&arena->runs_avail_clean);
arena_avail_tree_new(&arena->runs_avail_dirty);
/* Initialize bins. */
i = 0;
#ifdef JEMALLOC_TINY
/* (2^n)-spaced tiny bins. */
for (; i < ntbins; i++) {
bin = &arena->bins[i];
if (malloc_mutex_init(&bin->lock))
return (true);
bin->runcur = NULL;
arena_run_tree_new(&bin->runs);
#ifdef JEMALLOC_STATS
memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
#endif
}
#endif
/* Quantum-spaced bins. */
for (; i < ntbins + nqbins; i++) {
bin = &arena->bins[i];
if (malloc_mutex_init(&bin->lock))
return (true);
bin->runcur = NULL;
arena_run_tree_new(&bin->runs);
#ifdef JEMALLOC_STATS
memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
#endif
}
/* Cacheline-spaced bins. */
for (; i < ntbins + nqbins + ncbins; i++) {
bin = &arena->bins[i];
if (malloc_mutex_init(&bin->lock))
return (true);
bin->runcur = NULL;
arena_run_tree_new(&bin->runs);
#ifdef JEMALLOC_STATS
memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
#endif
}
/* Subpage-spaced bins. */
for (; i < nbins; i++) {
bin = &arena->bins[i];
if (malloc_mutex_init(&bin->lock))
return (true);
bin->runcur = NULL;
arena_run_tree_new(&bin->runs);
#ifdef JEMALLOC_STATS
memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
#endif
}
#ifdef JEMALLOC_DEBUG
arena->magic = ARENA_MAGIC;
#endif
return (false);
}
#ifdef JEMALLOC_DEBUG
static void
small_size2bin_validate(void)
{
size_t i, size, binind;
i = 1;
# ifdef JEMALLOC_TINY
/* Tiny. */
for (; i < (1U << LG_TINY_MIN); i++) {
size = pow2_ceil(1U << LG_TINY_MIN);
binind = ffs((int)(size >> (LG_TINY_MIN + 1)));
assert(SMALL_SIZE2BIN(i) == binind);
}
for (; i < qspace_min; i++) {
size = pow2_ceil(i);
binind = ffs((int)(size >> (LG_TINY_MIN + 1)));
assert(SMALL_SIZE2BIN(i) == binind);
}
# endif
/* Quantum-spaced. */
for (; i <= qspace_max; i++) {
size = QUANTUM_CEILING(i);
binind = ntbins + (size >> LG_QUANTUM) - 1;
assert(SMALL_SIZE2BIN(i) == binind);
}
/* Cacheline-spaced. */
for (; i <= cspace_max; i++) {
size = CACHELINE_CEILING(i);
binind = ntbins + nqbins + ((size - cspace_min) >>
LG_CACHELINE);
assert(SMALL_SIZE2BIN(i) == binind);
}
/* Sub-page. */
for (; i <= sspace_max; i++) {
size = SUBPAGE_CEILING(i);
binind = ntbins + nqbins + ncbins + ((size - sspace_min)
>> LG_SUBPAGE);
assert(SMALL_SIZE2BIN(i) == binind);
}
}
#endif
static bool
small_size2bin_init(void)
{
if (opt_lg_qspace_max != LG_QSPACE_MAX_DEFAULT
|| opt_lg_cspace_max != LG_CSPACE_MAX_DEFAULT
|| (sizeof(const_small_size2bin) != ((small_maxclass-1) >>
LG_TINY_MIN) + 1))
return (small_size2bin_init_hard());
small_size2bin = const_small_size2bin;
#ifdef JEMALLOC_DEBUG
small_size2bin_validate();
#endif
return (false);
}
static bool
small_size2bin_init_hard(void)
{
size_t i, size, binind;
uint8_t *custom_small_size2bin;
#define CUSTOM_SMALL_SIZE2BIN(s) \
custom_small_size2bin[(s-1) >> LG_TINY_MIN]
assert(opt_lg_qspace_max != LG_QSPACE_MAX_DEFAULT
|| opt_lg_cspace_max != LG_CSPACE_MAX_DEFAULT
|| (sizeof(const_small_size2bin) != ((small_maxclass-1) >>
LG_TINY_MIN) + 1));
custom_small_size2bin = (uint8_t *)
base_alloc(small_maxclass >> LG_TINY_MIN);
if (custom_small_size2bin == NULL)
return (true);
i = 1;
#ifdef JEMALLOC_TINY
/* Tiny. */
for (; i < (1U << LG_TINY_MIN); i += TINY_MIN) {
size = pow2_ceil(1U << LG_TINY_MIN);
binind = ffs((int)(size >> (LG_TINY_MIN + 1)));
CUSTOM_SMALL_SIZE2BIN(i) = binind;
}
for (; i < qspace_min; i += TINY_MIN) {
size = pow2_ceil(i);
binind = ffs((int)(size >> (LG_TINY_MIN + 1)));
CUSTOM_SMALL_SIZE2BIN(i) = binind;
}
#endif
/* Quantum-spaced. */
for (; i <= qspace_max; i += TINY_MIN) {
size = QUANTUM_CEILING(i);
binind = ntbins + (size >> LG_QUANTUM) - 1;
CUSTOM_SMALL_SIZE2BIN(i) = binind;
}
/* Cacheline-spaced. */
for (; i <= cspace_max; i += TINY_MIN) {
size = CACHELINE_CEILING(i);
binind = ntbins + nqbins + ((size - cspace_min) >>
LG_CACHELINE);
CUSTOM_SMALL_SIZE2BIN(i) = binind;
}
/* Sub-page. */
for (; i <= sspace_max; i += TINY_MIN) {
size = SUBPAGE_CEILING(i);
binind = ntbins + nqbins + ncbins + ((size - sspace_min) >>
LG_SUBPAGE);
CUSTOM_SMALL_SIZE2BIN(i) = binind;
}
small_size2bin = custom_small_size2bin;
#ifdef JEMALLOC_DEBUG
small_size2bin_validate();
#endif
return (false);
#undef CUSTOM_SMALL_SIZE2BIN
}
/*
* Calculate bin_info->run_size such that it meets the following constraints:
*
* *) bin_info->run_size >= min_run_size
* *) bin_info->run_size <= arena_maxclass
* *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
* *) bin_info->nregs <= RUN_MAXREGS
*
* bin_info->nregs, bin_info->bitmap_offset, and bin_info->reg0_offset are also
* calculated here, since these settings are all interdependent.
*/
static size_t
bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
{
size_t try_run_size, good_run_size;
uint32_t try_nregs, good_nregs;
uint32_t try_hdr_size, good_hdr_size;
uint32_t try_bitmap_offset, good_bitmap_offset;
#ifdef JEMALLOC_PROF
uint32_t try_ctx0_offset, good_ctx0_offset;
#endif
uint32_t try_reg0_offset, good_reg0_offset;
assert(min_run_size >= PAGE_SIZE);
assert(min_run_size <= arena_maxclass);
/*
* Calculate known-valid settings before entering the run_size
* expansion loop, so that the first part of the loop always copies
* valid settings.
*
* The do..while loop iteratively reduces the number of regions until
* the run header and the regions no longer overlap. A closed formula
* would be quite messy, since there is an interdependency between the
* header's mask length and the number of regions.
*/
try_run_size = min_run_size;
try_nregs = ((try_run_size - sizeof(arena_run_t)) / bin_info->reg_size)
+ 1; /* Counter-act try_nregs-- in loop. */
if (try_nregs > RUN_MAXREGS) {
try_nregs = RUN_MAXREGS
+ 1; /* Counter-act try_nregs-- in loop. */
}
do {
try_nregs--;
try_hdr_size = sizeof(arena_run_t);
/* Pad to a long boundary. */
try_hdr_size = LONG_CEILING(try_hdr_size);
try_bitmap_offset = try_hdr_size;
/* Add space for bitmap. */
try_hdr_size += bitmap_size(try_nregs);
#ifdef JEMALLOC_PROF
if (opt_prof && prof_promote == false) {
/* Pad to a quantum boundary. */
try_hdr_size = QUANTUM_CEILING(try_hdr_size);
try_ctx0_offset = try_hdr_size;
/* Add space for one (prof_ctx_t *) per region. */
try_hdr_size += try_nregs * sizeof(prof_ctx_t *);
} else
try_ctx0_offset = 0;
#endif
try_reg0_offset = try_run_size - (try_nregs *
bin_info->reg_size);
} while (try_hdr_size > try_reg0_offset);
/* run_size expansion loop. */
do {
/*
* Copy valid settings before trying more aggressive settings.
*/
good_run_size = try_run_size;
good_nregs = try_nregs;
good_hdr_size = try_hdr_size;
good_bitmap_offset = try_bitmap_offset;
#ifdef JEMALLOC_PROF
good_ctx0_offset = try_ctx0_offset;
#endif
good_reg0_offset = try_reg0_offset;
/* Try more aggressive settings. */
try_run_size += PAGE_SIZE;
try_nregs = ((try_run_size - sizeof(arena_run_t)) /
bin_info->reg_size)
+ 1; /* Counter-act try_nregs-- in loop. */
if (try_nregs > RUN_MAXREGS) {
try_nregs = RUN_MAXREGS
+ 1; /* Counter-act try_nregs-- in loop. */
}
do {
try_nregs--;
try_hdr_size = sizeof(arena_run_t);
/* Pad to a long boundary. */
try_hdr_size = LONG_CEILING(try_hdr_size);
try_bitmap_offset = try_hdr_size;
/* Add space for bitmap. */
try_hdr_size += bitmap_size(try_nregs);
#ifdef JEMALLOC_PROF
if (opt_prof && prof_promote == false) {
/* Pad to a quantum boundary. */
try_hdr_size = QUANTUM_CEILING(try_hdr_size);
try_ctx0_offset = try_hdr_size;
/*
* Add space for one (prof_ctx_t *) per region.
*/
try_hdr_size += try_nregs *
sizeof(prof_ctx_t *);
}
#endif
try_reg0_offset = try_run_size - (try_nregs *
bin_info->reg_size);
} while (try_hdr_size > try_reg0_offset);
} while (try_run_size <= arena_maxclass
&& try_run_size <= arena_maxclass
&& RUN_MAX_OVRHD * (bin_info->reg_size << 3) > RUN_MAX_OVRHD_RELAX
&& (try_reg0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size
&& try_nregs < RUN_MAXREGS);
assert(good_hdr_size <= good_reg0_offset);
/* Copy final settings. */
bin_info->run_size = good_run_size;
bin_info->nregs = good_nregs;
bin_info->bitmap_offset = good_bitmap_offset;
#ifdef JEMALLOC_PROF
bin_info->ctx0_offset = good_ctx0_offset;
#endif
bin_info->reg0_offset = good_reg0_offset;
return (good_run_size);
}
static bool
bin_info_init(void)
{
arena_bin_info_t *bin_info;
unsigned i;
size_t prev_run_size;
arena_bin_info = base_alloc(sizeof(arena_bin_info_t) * nbins);
if (arena_bin_info == NULL)
return (true);
prev_run_size = PAGE_SIZE;
i = 0;
#ifdef JEMALLOC_TINY
/* (2^n)-spaced tiny bins. */
for (; i < ntbins; i++) {
bin_info = &arena_bin_info[i];
bin_info->reg_size = (1U << (LG_TINY_MIN + i));
prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);
bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
}
#endif
/* Quantum-spaced bins. */
for (; i < ntbins + nqbins; i++) {
bin_info = &arena_bin_info[i];
bin_info->reg_size = (i - ntbins + 1) << LG_QUANTUM;
prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);
bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
}
/* Cacheline-spaced bins. */
for (; i < ntbins + nqbins + ncbins; i++) {
bin_info = &arena_bin_info[i];
bin_info->reg_size = cspace_min + ((i - (ntbins + nqbins)) <<
LG_CACHELINE);
prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);
bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
}
/* Subpage-spaced bins. */
for (; i < nbins; i++) {
bin_info = &arena_bin_info[i];
bin_info->reg_size = sspace_min + ((i - (ntbins + nqbins +
ncbins)) << LG_SUBPAGE);
prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);
bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
}
return (false);
}
bool
arena_boot(void)
{
size_t header_size;
unsigned i;
/* Set variables according to the value of opt_lg_[qc]space_max. */
qspace_max = (1U << opt_lg_qspace_max);
cspace_min = CACHELINE_CEILING(qspace_max);
if (cspace_min == qspace_max)
cspace_min += CACHELINE;
cspace_max = (1U << opt_lg_cspace_max);
sspace_min = SUBPAGE_CEILING(cspace_max);
if (sspace_min == cspace_max)
sspace_min += SUBPAGE;
assert(sspace_min < PAGE_SIZE);
sspace_max = PAGE_SIZE - SUBPAGE;
#ifdef JEMALLOC_TINY
assert(LG_QUANTUM >= LG_TINY_MIN);
#endif
assert(ntbins <= LG_QUANTUM);
nqbins = qspace_max >> LG_QUANTUM;
ncbins = ((cspace_max - cspace_min) >> LG_CACHELINE) + 1;
nsbins = ((sspace_max - sspace_min) >> LG_SUBPAGE) + 1;
nbins = ntbins + nqbins + ncbins + nsbins;
/*
* The small_size2bin lookup table uses uint8_t to encode each bin
* index, so we cannot support more than 256 small size classes. This
* limit is difficult to exceed (not even possible with 16B quantum and
* 4KiB pages), and such configurations are impractical, but
* nonetheless we need to protect against this case in order to avoid
* undefined behavior.
*
* Further constrain nbins to 255 if prof_promote is true, since all
* small size classes, plus a "not small" size class must be stored in
* 8 bits of arena_chunk_map_t's bits field.
*/
#ifdef JEMALLOC_PROF
if (opt_prof && prof_promote) {
if (nbins > 255) {
char line_buf[UMAX2S_BUFSIZE];
malloc_write("<jemalloc>: Too many small size classes (");
malloc_write(u2s(nbins, 10, line_buf));
malloc_write(" > max 255)\n");
abort();
}
} else
#endif
if (nbins > 256) {
char line_buf[UMAX2S_BUFSIZE];
malloc_write("<jemalloc>: Too many small size classes (");
malloc_write(u2s(nbins, 10, line_buf));
malloc_write(" > max 256)\n");
abort();
}
/*
* Compute the header size such that it is large enough to contain the
* page map. The page map is biased to omit entries for the header
* itself, so some iteration is necessary to compute the map bias.
*
* 1) Compute safe header_size and map_bias values that include enough
* space for an unbiased page map.
* 2) Refine map_bias based on (1) to omit the header pages in the page
* map. The resulting map_bias may be one too small.
* 3) Refine map_bias based on (2). The result will be >= the result
* from (2), and will always be correct.
*/
map_bias = 0;
for (i = 0; i < 3; i++) {
header_size = offsetof(arena_chunk_t, map)
+ (sizeof(arena_chunk_map_t) * (chunk_npages-map_bias));
map_bias = (header_size >> PAGE_SHIFT) + ((header_size &
PAGE_MASK) != 0);
}
assert(map_bias > 0);
arena_maxclass = chunksize - (map_bias << PAGE_SHIFT);
if (small_size2bin_init())
return (true);
if (bin_info_init())
return (true);
return (false);
}
#define JEMALLOC_ATOMIC_C_
#include "jemalloc/internal/jemalloc_internal.h"
#define JEMALLOC_BASE_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Data. */
malloc_mutex_t base_mtx;
/*
* Current pages that are being used for internal memory allocations. These
* pages are carved up in cacheline-size quanta, so that there is no chance of
* false cache line sharing.
*/
static void *base_pages;
static void *base_next_addr;
static void *base_past_addr; /* Addr immediately past base_pages. */
static extent_node_t *base_nodes;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static bool base_pages_alloc(size_t minsize);
/******************************************************************************/
static bool
base_pages_alloc(size_t minsize)
{
size_t csize;
bool zero;
assert(minsize != 0);
csize = CHUNK_CEILING(minsize);
zero = false;
base_pages = chunk_alloc(csize, true, &zero);
if (base_pages == NULL)
return (true);
base_next_addr = base_pages;
base_past_addr = (void *)((uintptr_t)base_pages + csize);
return (false);
}
void *
base_alloc(size_t size)
{
void *ret;
size_t csize;
/* Round size up to nearest multiple of the cacheline size. */
csize = CACHELINE_CEILING(size);
malloc_mutex_lock(&base_mtx);
/* Make sure there's enough space for the allocation. */
if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) {
if (base_pages_alloc(csize)) {
malloc_mutex_unlock(&base_mtx);
return (NULL);
}
}
/* Allocate. */
ret = base_next_addr;
base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
malloc_mutex_unlock(&base_mtx);
return (ret);
}
extent_node_t *
base_node_alloc(void)
{
extent_node_t *ret;
malloc_mutex_lock(&base_mtx);
if (base_nodes != NULL) {
ret = base_nodes;
base_nodes = *(extent_node_t **)ret;
malloc_mutex_unlock(&base_mtx);
} else {
malloc_mutex_unlock(&base_mtx);
ret = (extent_node_t *)base_alloc(sizeof(extent_node_t));
}
return (ret);
}
void
base_node_dealloc(extent_node_t *node)
{
malloc_mutex_lock(&base_mtx);
*(extent_node_t **)node = base_nodes;
base_nodes = node;
malloc_mutex_unlock(&base_mtx);
}
bool
base_boot(void)
{
base_nodes = NULL;
if (malloc_mutex_init(&base_mtx))
return (true);
return (false);
}
#define JEMALLOC_BITMAP_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static size_t bits2groups(size_t nbits);
/******************************************************************************/
static size_t
bits2groups(size_t nbits)
{
return ((nbits >> LG_BITMAP_GROUP_NBITS) +
!!(nbits & BITMAP_GROUP_NBITS_MASK));
}
void
bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
{
unsigned i;
size_t group_count;
assert(nbits > 0);
assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
/*
* Compute the number of groups necessary to store nbits bits, and
* progressively work upward through the levels until reaching a level
* that requires only one group.
*/
binfo->levels[0].group_offset = 0;
group_count = bits2groups(nbits);
for (i = 1; group_count > 1; i++) {
assert(i < BITMAP_MAX_LEVELS);
binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
+ group_count;
group_count = bits2groups(group_count);
}
binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
+ group_count;
binfo->nlevels = i;
binfo->nbits = nbits;
}
size_t
bitmap_info_ngroups(const bitmap_info_t *binfo)
{
return (binfo->levels[binfo->nlevels].group_offset << LG_SIZEOF_BITMAP);
}
size_t
bitmap_size(size_t nbits)
{
bitmap_info_t binfo;
bitmap_info_init(&binfo, nbits);
return (bitmap_info_ngroups(&binfo));
}
void
bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
{
size_t extra;
unsigned i;
/*
* Bits are actually inverted with regard to the external bitmap
* interface, so the bitmap starts out with all 1 bits, except for
* trailing unused bits (if any). Note that each group uses bit 0 to
* correspond to the first logical bit in the group, so extra bits
* are the most significant bits of the last group.
*/
memset(bitmap, 0xffU, binfo->levels[binfo->nlevels].group_offset <<
LG_SIZEOF_BITMAP);
extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
& BITMAP_GROUP_NBITS_MASK;
if (extra != 0)
bitmap[binfo->levels[1].group_offset - 1] >>= extra;
for (i = 1; i < binfo->nlevels; i++) {
size_t group_count = binfo->levels[i].group_offset -
binfo->levels[i-1].group_offset;
extra = (BITMAP_GROUP_NBITS - (group_count &
BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK;
if (extra != 0)
bitmap[binfo->levels[i+1].group_offset - 1] >>= extra;
}
}
#define JEMALLOC_CHUNK_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Data. */
size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
#ifdef JEMALLOC_SWAP
bool opt_overcommit = true;
#endif
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
malloc_mutex_t chunks_mtx;
chunk_stats_t stats_chunks;
#endif
#ifdef JEMALLOC_IVSALLOC
rtree_t *chunks_rtree;
#endif
/* Various chunk-related settings. */
size_t chunksize;
size_t chunksize_mask; /* (chunksize - 1). */
size_t chunk_npages;
size_t map_bias;
size_t arena_maxclass; /* Max size class for arenas. */
/******************************************************************************/
/*
* If the caller specifies (*zero == false), it is still possible to receive
* zeroed memory, in which case *zero is toggled to true. arena_chunk_alloc()
* takes advantage of this to avoid demanding zeroed chunks, but taking
* advantage of them if they are returned.
*/
void *
chunk_alloc(size_t size, bool base, bool *zero)
{
void *ret;
assert(size != 0);
assert((size & chunksize_mask) == 0);
#ifdef JEMALLOC_SWAP
if (swap_enabled) {
ret = chunk_alloc_swap(size, zero);
if (ret != NULL)
goto RETURN;
}
if (swap_enabled == false || opt_overcommit) {
#endif
#ifdef JEMALLOC_DSS
ret = chunk_alloc_dss(size, zero);
if (ret != NULL)
goto RETURN;
#endif
ret = chunk_alloc_mmap(size);
if (ret != NULL) {
*zero = true;
goto RETURN;
}
#ifdef JEMALLOC_SWAP
}
#endif
/* All strategies for allocation failed. */
ret = NULL;
RETURN:
#ifdef JEMALLOC_IVSALLOC
if (base == false && ret != NULL) {
if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) {
chunk_dealloc(ret, size);
return (NULL);
}
}
#endif
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
if (ret != NULL) {
# ifdef JEMALLOC_PROF
bool gdump;
# endif
malloc_mutex_lock(&chunks_mtx);
# ifdef JEMALLOC_STATS
stats_chunks.nchunks += (size / chunksize);
# endif
stats_chunks.curchunks += (size / chunksize);
if (stats_chunks.curchunks > stats_chunks.highchunks) {
stats_chunks.highchunks = stats_chunks.curchunks;
# ifdef JEMALLOC_PROF
gdump = true;
# endif
}
# ifdef JEMALLOC_PROF
else
gdump = false;
# endif
malloc_mutex_unlock(&chunks_mtx);
# ifdef JEMALLOC_PROF
if (opt_prof && opt_prof_gdump && gdump)
prof_gdump();
# endif
}
#endif
assert(CHUNK_ADDR2BASE(ret) == ret);
return (ret);
}
void
chunk_dealloc(void *chunk, size_t size)
{
assert(chunk != NULL);
assert(CHUNK_ADDR2BASE(chunk) == chunk);
assert(size != 0);
assert((size & chunksize_mask) == 0);
#ifdef JEMALLOC_IVSALLOC
rtree_set(chunks_rtree, (uintptr_t)chunk, NULL);
#endif
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
malloc_mutex_lock(&chunks_mtx);
stats_chunks.curchunks -= (size / chunksize);
malloc_mutex_unlock(&chunks_mtx);
#endif
#ifdef JEMALLOC_SWAP
if (swap_enabled && chunk_dealloc_swap(chunk, size) == false)
return;
#endif
#ifdef JEMALLOC_DSS
if (chunk_dealloc_dss(chunk, size) == false)
return;
#endif
chunk_dealloc_mmap(chunk, size);
}
bool
chunk_boot(void)
{
/* Set variables according to the value of opt_lg_chunk. */
chunksize = (ZU(1) << opt_lg_chunk);
assert(chunksize >= PAGE_SIZE);
chunksize_mask = chunksize - 1;
chunk_npages = (chunksize >> PAGE_SHIFT);
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
if (malloc_mutex_init(&chunks_mtx))
return (true);
memset(&stats_chunks, 0, sizeof(chunk_stats_t));
#endif
#ifdef JEMALLOC_SWAP
if (chunk_swap_boot())
return (true);
#endif
if (chunk_mmap_boot())
return (true);
#ifdef JEMALLOC_DSS
if (chunk_dss_boot())
return (true);
#endif
#ifdef JEMALLOC_IVSALLOC
chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) - opt_lg_chunk);
if (chunks_rtree == NULL)
return (true);
#endif
return (false);
}
#define JEMALLOC_CHUNK_DSS_C_
#include "jemalloc/internal/jemalloc_internal.h"
#ifdef JEMALLOC_DSS
/******************************************************************************/
/* Data. */
malloc_mutex_t dss_mtx;
/* Base address of the DSS. */
static void *dss_base;
/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
static void *dss_prev;
/* Current upper limit on DSS addresses. */
static void *dss_max;
/*
* Trees of chunks that were previously allocated (trees differ only in node
* ordering). These are used when allocating chunks, in an attempt to re-use
* address space. Depending on function, different tree orderings are needed,
* which is why there are two trees with the same contents.
*/
static extent_tree_t dss_chunks_szad;
static extent_tree_t dss_chunks_ad;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static void *chunk_recycle_dss(size_t size, bool *zero);
static extent_node_t *chunk_dealloc_dss_record(void *chunk, size_t size);
/******************************************************************************/
static void *
chunk_recycle_dss(size_t size, bool *zero)
{
extent_node_t *node, key;
key.addr = NULL;
key.size = size;
malloc_mutex_lock(&dss_mtx);
node = extent_tree_szad_nsearch(&dss_chunks_szad, &key);
if (node != NULL) {
void *ret = node->addr;
/* Remove node from the tree. */
extent_tree_szad_remove(&dss_chunks_szad, node);
if (node->size == size) {
extent_tree_ad_remove(&dss_chunks_ad, node);
base_node_dealloc(node);
} else {
/*
* Insert the remainder of node's address range as a
* smaller chunk. Its position within dss_chunks_ad
* does not change.
*/
assert(node->size > size);
node->addr = (void *)((uintptr_t)node->addr + size);
node->size -= size;
extent_tree_szad_insert(&dss_chunks_szad, node);
}
malloc_mutex_unlock(&dss_mtx);
if (*zero)
memset(ret, 0, size);
return (ret);
}
malloc_mutex_unlock(&dss_mtx);
return (NULL);
}
void *
chunk_alloc_dss(size_t size, bool *zero)
{
void *ret;
ret = chunk_recycle_dss(size, zero);
if (ret != NULL)
return (ret);
/*
* sbrk() uses a signed increment argument, so take care not to
* interpret a huge allocation request as a negative increment.
*/
if ((intptr_t)size < 0)
return (NULL);
malloc_mutex_lock(&dss_mtx);
if (dss_prev != (void *)-1) {
intptr_t incr;
/*
* The loop is necessary to recover from races with other
* threads that are using the DSS for something other than
* malloc.
*/
do {
/* Get the current end of the DSS. */
dss_max = sbrk(0);
/*
* Calculate how much padding is necessary to
* chunk-align the end of the DSS.
*/
incr = (intptr_t)size
- (intptr_t)CHUNK_ADDR2OFFSET(dss_max);
if (incr == (intptr_t)size)
ret = dss_max;
else {
ret = (void *)((intptr_t)dss_max + incr);
incr += size;
}
dss_prev = sbrk(incr);
if (dss_prev == dss_max) {
/* Success. */
dss_max = (void *)((intptr_t)dss_prev + incr);
malloc_mutex_unlock(&dss_mtx);
*zero = true;
return (ret);
}
} while (dss_prev != (void *)-1);
}
malloc_mutex_unlock(&dss_mtx);
return (NULL);
}
static extent_node_t *
chunk_dealloc_dss_record(void *chunk, size_t size)
{
extent_node_t *xnode, *node, *prev, key;
xnode = NULL;
while (true) {
key.addr = (void *)((uintptr_t)chunk + size);
node = extent_tree_ad_nsearch(&dss_chunks_ad, &key);
/* Try to coalesce forward. */
if (node != NULL && node->addr == key.addr) {
/*
* Coalesce chunk with the following address range.
* This does not change the position within
* dss_chunks_ad, so only remove/insert from/into
* dss_chunks_szad.
*/
extent_tree_szad_remove(&dss_chunks_szad, node);
node->addr = chunk;
node->size += size;
extent_tree_szad_insert(&dss_chunks_szad, node);
break;
} else if (xnode == NULL) {
/*
* It is possible that base_node_alloc() will cause a
* new base chunk to be allocated, so take care not to
* deadlock on dss_mtx, and recover if another thread
* deallocates an adjacent chunk while this one is busy
* allocating xnode.
*/
malloc_mutex_unlock(&dss_mtx);
xnode = base_node_alloc();
malloc_mutex_lock(&dss_mtx);
if (xnode == NULL)
return (NULL);
} else {
/* Coalescing forward failed, so insert a new node. */
node = xnode;
xnode = NULL;
node->addr = chunk;
node->size = size;
extent_tree_ad_insert(&dss_chunks_ad, node);
extent_tree_szad_insert(&dss_chunks_szad, node);
break;
}
}
/* Discard xnode if it ended up unused do to a race. */
if (xnode != NULL)
base_node_dealloc(xnode);
/* Try to coalesce backward. */
prev = extent_tree_ad_prev(&dss_chunks_ad, node);
if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
chunk) {
/*
* Coalesce chunk with the previous address range. This does
* not change the position within dss_chunks_ad, so only
* remove/insert node from/into dss_chunks_szad.
*/
extent_tree_szad_remove(&dss_chunks_szad, prev);
extent_tree_ad_remove(&dss_chunks_ad, prev);
extent_tree_szad_remove(&dss_chunks_szad, node);
node->addr = prev->addr;
node->size += prev->size;
extent_tree_szad_insert(&dss_chunks_szad, node);
base_node_dealloc(prev);
}
return (node);
}
bool
chunk_in_dss(void *chunk)
{
bool ret;
malloc_mutex_lock(&dss_mtx);
if ((uintptr_t)chunk >= (uintptr_t)dss_base
&& (uintptr_t)chunk < (uintptr_t)dss_max)
ret = true;
else
ret = false;
malloc_mutex_unlock(&dss_mtx);
return (ret);
}
bool
chunk_dealloc_dss(void *chunk, size_t size)
{
bool ret;
malloc_mutex_lock(&dss_mtx);
if ((uintptr_t)chunk >= (uintptr_t)dss_base
&& (uintptr_t)chunk < (uintptr_t)dss_max) {
extent_node_t *node;
/* Try to coalesce with other unused chunks. */
node = chunk_dealloc_dss_record(chunk, size);
if (node != NULL) {
chunk = node->addr;
size = node->size;
}
/* Get the current end of the DSS. */
dss_max = sbrk(0);
/*
* Try to shrink the DSS if this chunk is at the end of the
* DSS. The sbrk() call here is subject to a race condition
* with threads that use brk(2) or sbrk(2) directly, but the
* alternative would be to leak memory for the sake of poorly
* designed multi-threaded programs.
*/
if ((void *)((uintptr_t)chunk + size) == dss_max
&& (dss_prev = sbrk(-(intptr_t)size)) == dss_max) {
/* Success. */
dss_max = (void *)((intptr_t)dss_prev - (intptr_t)size);
if (node != NULL) {
extent_tree_szad_remove(&dss_chunks_szad, node);
extent_tree_ad_remove(&dss_chunks_ad, node);
base_node_dealloc(node);
}
} else
madvise(chunk, size, MADV_DONTNEED);
ret = false;
goto RETURN;
}
ret = true;
RETURN:
malloc_mutex_unlock(&dss_mtx);
return (ret);
}
bool
chunk_dss_boot(void)
{
if (malloc_mutex_init(&dss_mtx))
return (true);
dss_base = sbrk(0);
dss_prev = dss_base;
dss_max = dss_base;
extent_tree_szad_new(&dss_chunks_szad);
extent_tree_ad_new(&dss_chunks_ad);
return (false);
}
/******************************************************************************/
#endif /* JEMALLOC_DSS */
#define JEMALLOC_CHUNK_MMAP_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Data. */
/*
* Used by chunk_alloc_mmap() to decide whether to attempt the fast path and
* potentially avoid some system calls.
*/
#ifndef NO_TLS
static __thread bool mmap_unaligned_tls
JEMALLOC_ATTR(tls_model("initial-exec"));
#define MMAP_UNALIGNED_GET() mmap_unaligned_tls
#define MMAP_UNALIGNED_SET(v) do { \
mmap_unaligned_tls = (v); \
} while (0)
#else
static pthread_key_t mmap_unaligned_tsd;
#define MMAP_UNALIGNED_GET() ((bool)pthread_getspecific(mmap_unaligned_tsd))
#define MMAP_UNALIGNED_SET(v) do { \
pthread_setspecific(mmap_unaligned_tsd, (void *)(v)); \
} while (0)
#endif
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static void *pages_map(void *addr, size_t size, bool noreserve);
static void pages_unmap(void *addr, size_t size);
static void *chunk_alloc_mmap_slow(size_t size, bool unaligned,
bool noreserve);
static void *chunk_alloc_mmap_internal(size_t size, bool noreserve);
/******************************************************************************/
static void *
pages_map(void *addr, size_t size, bool noreserve)
{
void *ret;
/*
* We don't use MAP_FIXED here, because it can cause the *replacement*
* of existing mappings, and we only want to create new mappings.
*/
int flags = MAP_PRIVATE | MAP_ANON;
#ifdef MAP_NORESERVE
if (noreserve)
flags |= MAP_NORESERVE;
#endif
ret = mmap(addr, size, PROT_READ | PROT_WRITE, flags, -1, 0);
assert(ret != NULL);
if (ret == MAP_FAILED)
ret = NULL;
else if (addr != NULL && ret != addr) {
/*
* We succeeded in mapping memory, but not in the right place.
*/
if (munmap(ret, size) == -1) {
char buf[BUFERROR_BUF];
buferror(errno, buf, sizeof(buf));
malloc_write("<jemalloc>: Error in munmap(): ");
malloc_write(buf);
malloc_write("\n");
if (opt_abort)
abort();
}
ret = NULL;
}
assert(ret == NULL || (addr == NULL && ret != addr)
|| (addr != NULL && ret == addr));
return (ret);
}
static void
pages_unmap(void *addr, size_t size)
{
if (munmap(addr, size) == -1) {
char buf[BUFERROR_BUF];
buferror(errno, buf, sizeof(buf));
malloc_write("<jemalloc>: Error in munmap(): ");
malloc_write(buf);
malloc_write("\n");
if (opt_abort)
abort();
}
}
static void *
chunk_alloc_mmap_slow(size_t size, bool unaligned, bool noreserve)
{
void *ret;
size_t offset;
/* Beware size_t wrap-around. */
if (size + chunksize <= size)
return (NULL);
ret = pages_map(NULL, size + chunksize, noreserve);
if (ret == NULL)
return (NULL);
/* Clean up unneeded leading/trailing space. */
offset = CHUNK_ADDR2OFFSET(ret);
if (offset != 0) {
/* Note that mmap() returned an unaligned mapping. */
unaligned = true;
/* Leading space. */
pages_unmap(ret, chunksize - offset);
ret = (void *)((uintptr_t)ret +
(chunksize - offset));
/* Trailing space. */
pages_unmap((void *)((uintptr_t)ret + size),
offset);
} else {
/* Trailing space only. */
pages_unmap((void *)((uintptr_t)ret + size),
chunksize);
}
/*
* If mmap() returned an aligned mapping, reset mmap_unaligned so that
* the next chunk_alloc_mmap() execution tries the fast allocation
* method.
*/
if (unaligned == false)
MMAP_UNALIGNED_SET(false);
return (ret);
}
static void *
chunk_alloc_mmap_internal(size_t size, bool noreserve)
{
void *ret;
/*
* Ideally, there would be a way to specify alignment to mmap() (like
* NetBSD has), but in the absence of such a feature, we have to work
* hard to efficiently create aligned mappings. The reliable, but
* slow method is to create a mapping that is over-sized, then trim the
* excess. However, that always results in at least one call to
* pages_unmap().
*
* A more optimistic approach is to try mapping precisely the right
* amount, then try to append another mapping if alignment is off. In
* practice, this works out well as long as the application is not
* interleaving mappings via direct mmap() calls. If we do run into a
* situation where there is an interleaved mapping and we are unable to
* extend an unaligned mapping, our best option is to switch to the
* slow method until mmap() returns another aligned mapping. This will
* tend to leave a gap in the memory map that is too small to cause
* later problems for the optimistic method.
*
* Another possible confounding factor is address space layout
* randomization (ASLR), which causes mmap(2) to disregard the
* requested address. mmap_unaligned tracks whether the previous
* chunk_alloc_mmap() execution received any unaligned or relocated
* mappings, and if so, the current execution will immediately fall
* back to the slow method. However, we keep track of whether the fast
* method would have succeeded, and if so, we make a note to try the
* fast method next time.
*/
if (MMAP_UNALIGNED_GET() == false) {
size_t offset;
ret = pages_map(NULL, size, noreserve);
if (ret == NULL)
return (NULL);
offset = CHUNK_ADDR2OFFSET(ret);
if (offset != 0) {
MMAP_UNALIGNED_SET(true);
/* Try to extend chunk boundary. */
if (pages_map((void *)((uintptr_t)ret + size),
chunksize - offset, noreserve) == NULL) {
/*
* Extension failed. Clean up, then revert to
* the reliable-but-expensive method.
*/
pages_unmap(ret, size);
ret = chunk_alloc_mmap_slow(size, true,
noreserve);
} else {
/* Clean up unneeded leading space. */
pages_unmap(ret, chunksize - offset);
ret = (void *)((uintptr_t)ret + (chunksize -
offset));
}
}
} else
ret = chunk_alloc_mmap_slow(size, false, noreserve);
return (ret);
}
void *
chunk_alloc_mmap(size_t size)
{
return (chunk_alloc_mmap_internal(size, false));
}
void *
chunk_alloc_mmap_noreserve(size_t size)
{
return (chunk_alloc_mmap_internal(size, true));
}
void
chunk_dealloc_mmap(void *chunk, size_t size)
{
pages_unmap(chunk, size);
}
bool
chunk_mmap_boot(void)
{
#ifdef NO_TLS
if (pthread_key_create(&mmap_unaligned_tsd, NULL) != 0) {
malloc_write("<jemalloc>: Error in pthread_key_create()\n");
return (true);
}
#endif
return (false);
}
#define JEMALLOC_CHUNK_SWAP_C_
#include "jemalloc/internal/jemalloc_internal.h"
#ifdef JEMALLOC_SWAP
/******************************************************************************/
/* Data. */
malloc_mutex_t swap_mtx;
bool swap_enabled;
bool swap_prezeroed;
size_t swap_nfds;
int *swap_fds;
#ifdef JEMALLOC_STATS
size_t swap_avail;
#endif
/* Base address of the mmap()ed file(s). */
static void *swap_base;
/* Current end of the space in use (<= swap_max). */
static void *swap_end;
/* Absolute upper limit on file-backed addresses. */
static void *swap_max;
/*
* Trees of chunks that were previously allocated (trees differ only in node
* ordering). These are used when allocating chunks, in an attempt to re-use
* address space. Depending on function, different tree orderings are needed,
* which is why there are two trees with the same contents.
*/
static extent_tree_t swap_chunks_szad;
static extent_tree_t swap_chunks_ad;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static void *chunk_recycle_swap(size_t size, bool *zero);
static extent_node_t *chunk_dealloc_swap_record(void *chunk, size_t size);
/******************************************************************************/
static void *
chunk_recycle_swap(size_t size, bool *zero)
{
extent_node_t *node, key;
key.addr = NULL;
key.size = size;
malloc_mutex_lock(&swap_mtx);
node = extent_tree_szad_nsearch(&swap_chunks_szad, &key);
if (node != NULL) {
void *ret = node->addr;
/* Remove node from the tree. */
extent_tree_szad_remove(&swap_chunks_szad, node);
if (node->size == size) {
extent_tree_ad_remove(&swap_chunks_ad, node);
base_node_dealloc(node);
} else {
/*
* Insert the remainder of node's address range as a
* smaller chunk. Its position within swap_chunks_ad
* does not change.
*/
assert(node->size > size);
node->addr = (void *)((uintptr_t)node->addr + size);
node->size -= size;
extent_tree_szad_insert(&swap_chunks_szad, node);
}
#ifdef JEMALLOC_STATS
swap_avail -= size;
#endif
malloc_mutex_unlock(&swap_mtx);
if (*zero)
memset(ret, 0, size);
return (ret);
}
malloc_mutex_unlock(&swap_mtx);
return (NULL);
}
void *
chunk_alloc_swap(size_t size, bool *zero)
{
void *ret;
assert(swap_enabled);
ret = chunk_recycle_swap(size, zero);
if (ret != NULL)
return (ret);
malloc_mutex_lock(&swap_mtx);
if ((uintptr_t)swap_end + size <= (uintptr_t)swap_max) {
ret = swap_end;
swap_end = (void *)((uintptr_t)swap_end + size);
#ifdef JEMALLOC_STATS
swap_avail -= size;
#endif
malloc_mutex_unlock(&swap_mtx);
if (swap_prezeroed)
*zero = true;
else if (*zero)
memset(ret, 0, size);
} else {
malloc_mutex_unlock(&swap_mtx);
return (NULL);
}
return (ret);
}
static extent_node_t *
chunk_dealloc_swap_record(void *chunk, size_t size)
{
extent_node_t *xnode, *node, *prev, key;
xnode = NULL;
while (true) {
key.addr = (void *)((uintptr_t)chunk + size);
node = extent_tree_ad_nsearch(&swap_chunks_ad, &key);
/* Try to coalesce forward. */
if (node != NULL && node->addr == key.addr) {
/*
* Coalesce chunk with the following address range.
* This does not change the position within
* swap_chunks_ad, so only remove/insert from/into
* swap_chunks_szad.
*/
extent_tree_szad_remove(&swap_chunks_szad, node);
node->addr = chunk;
node->size += size;
extent_tree_szad_insert(&swap_chunks_szad, node);
break;
} else if (xnode == NULL) {
/*
* It is possible that base_node_alloc() will cause a
* new base chunk to be allocated, so take care not to
* deadlock on swap_mtx, and recover if another thread
* deallocates an adjacent chunk while this one is busy
* allocating xnode.
*/
malloc_mutex_unlock(&swap_mtx);
xnode = base_node_alloc();
malloc_mutex_lock(&swap_mtx);
if (xnode == NULL)
return (NULL);
} else {
/* Coalescing forward failed, so insert a new node. */
node = xnode;
xnode = NULL;
node->addr = chunk;
node->size = size;
extent_tree_ad_insert(&swap_chunks_ad, node);
extent_tree_szad_insert(&swap_chunks_szad, node);
break;
}
}
/* Discard xnode if it ended up unused do to a race. */
if (xnode != NULL)
base_node_dealloc(xnode);
/* Try to coalesce backward. */
prev = extent_tree_ad_prev(&swap_chunks_ad, node);
if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
chunk) {
/*
* Coalesce chunk with the previous address range. This does
* not change the position within swap_chunks_ad, so only
* remove/insert node from/into swap_chunks_szad.
*/
extent_tree_szad_remove(&swap_chunks_szad, prev);
extent_tree_ad_remove(&swap_chunks_ad, prev);
extent_tree_szad_remove(&swap_chunks_szad, node);
node->addr = prev->addr;
node->size += prev->size;
extent_tree_szad_insert(&swap_chunks_szad, node);
base_node_dealloc(prev);
}
return (node);
}
bool
chunk_in_swap(void *chunk)
{
bool ret;
assert(swap_enabled);
malloc_mutex_lock(&swap_mtx);
if ((uintptr_t)chunk >= (uintptr_t)swap_base
&& (uintptr_t)chunk < (uintptr_t)swap_max)
ret = true;
else
ret = false;
malloc_mutex_unlock(&swap_mtx);
return (ret);
}
bool
chunk_dealloc_swap(void *chunk, size_t size)
{
bool ret;
assert(swap_enabled);
malloc_mutex_lock(&swap_mtx);
if ((uintptr_t)chunk >= (uintptr_t)swap_base
&& (uintptr_t)chunk < (uintptr_t)swap_max) {
extent_node_t *node;
/* Try to coalesce with other unused chunks. */
node = chunk_dealloc_swap_record(chunk, size);
if (node != NULL) {
chunk = node->addr;
size = node->size;
}
/*
* Try to shrink the in-use memory if this chunk is at the end
* of the in-use memory.
*/
if ((void *)((uintptr_t)chunk + size) == swap_end) {
swap_end = (void *)((uintptr_t)swap_end - size);
if (node != NULL) {
extent_tree_szad_remove(&swap_chunks_szad,
node);
extent_tree_ad_remove(&swap_chunks_ad, node);
base_node_dealloc(node);
}
} else
madvise(chunk, size, MADV_DONTNEED);
#ifdef JEMALLOC_STATS
swap_avail += size;
#endif
ret = false;
goto RETURN;
}
ret = true;
RETURN:
malloc_mutex_unlock(&swap_mtx);
return (ret);
}
bool
chunk_swap_enable(const int *fds, unsigned nfds, bool prezeroed)
{
bool ret;
unsigned i;
off_t off;
void *vaddr;
size_t cumsize, voff;
size_t sizes[nfds];
malloc_mutex_lock(&swap_mtx);
/* Get file sizes. */
for (i = 0, cumsize = 0; i < nfds; i++) {
off = lseek(fds[i], 0, SEEK_END);
if (off == ((off_t)-1)) {
ret = true;
goto RETURN;
}
if (PAGE_CEILING(off) != off) {
/* Truncate to a multiple of the page size. */
off &= ~PAGE_MASK;
if (ftruncate(fds[i], off) != 0) {
ret = true;
goto RETURN;
}
}
sizes[i] = off;
if (cumsize + off < cumsize) {
/*
* Cumulative file size is greater than the total
* address space. Bail out while it's still obvious
* what the problem is.
*/
ret = true;
goto RETURN;
}
cumsize += off;
}
/* Round down to a multiple of the chunk size. */
cumsize &= ~chunksize_mask;
if (cumsize == 0) {
ret = true;
goto RETURN;
}
/*
* Allocate a chunk-aligned region of anonymous memory, which will
* be the final location for the memory-mapped files.
*/
vaddr = chunk_alloc_mmap_noreserve(cumsize);
if (vaddr == NULL) {
ret = true;
goto RETURN;
}
/* Overlay the files onto the anonymous mapping. */
for (i = 0, voff = 0; i < nfds; i++) {
void *addr = mmap((void *)((uintptr_t)vaddr + voff), sizes[i],
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fds[i], 0);
if (addr == MAP_FAILED) {
char buf[BUFERROR_BUF];
buferror(errno, buf, sizeof(buf));
malloc_write(
"<jemalloc>: Error in mmap(..., MAP_FIXED, ...): ");
malloc_write(buf);
malloc_write("\n");
if (opt_abort)
abort();
if (munmap(vaddr, voff) == -1) {
buferror(errno, buf, sizeof(buf));
malloc_write("<jemalloc>: Error in munmap(): ");
malloc_write(buf);
malloc_write("\n");
}
ret = true;
goto RETURN;
}
assert(addr == (void *)((uintptr_t)vaddr + voff));
/*
* Tell the kernel that the mapping will be accessed randomly,
* and that it should not gratuitously sync pages to the
* filesystem.
*/
#ifdef MADV_RANDOM
madvise(addr, sizes[i], MADV_RANDOM);
#endif
#ifdef MADV_NOSYNC
madvise(addr, sizes[i], MADV_NOSYNC);
#endif
voff += sizes[i];
}
swap_prezeroed = prezeroed;
swap_base = vaddr;
swap_end = swap_base;
swap_max = (void *)((uintptr_t)vaddr + cumsize);
/* Copy the fds array for mallctl purposes. */
swap_fds = (int *)base_alloc(nfds * sizeof(int));
if (swap_fds == NULL) {
ret = true;
goto RETURN;
}
memcpy(swap_fds, fds, nfds * sizeof(int));
swap_nfds = nfds;
#ifdef JEMALLOC_STATS
swap_avail = cumsize;
#endif
swap_enabled = true;
ret = false;
RETURN:
malloc_mutex_unlock(&swap_mtx);
return (ret);
}
bool
chunk_swap_boot(void)
{
if (malloc_mutex_init(&swap_mtx))
return (true);
swap_enabled = false;
swap_prezeroed = false; /* swap.* mallctl's depend on this. */
swap_nfds = 0;
swap_fds = NULL;
#ifdef JEMALLOC_STATS
swap_avail = 0;
#endif
swap_base = NULL;
swap_end = NULL;
swap_max = NULL;
extent_tree_szad_new(&swap_chunks_szad);
extent_tree_ad_new(&swap_chunks_ad);
return (false);
}
/******************************************************************************/
#endif /* JEMALLOC_SWAP */
/*
*******************************************************************************
* Implementation of (2^1+,2) cuckoo hashing, where 2^1+ indicates that each
* hash bucket contains 2^n cells, for n >= 1, and 2 indicates that two hash
* functions are employed. The original cuckoo hashing algorithm was described
* in:
*
* Pagh, R., F.F. Rodler (2004) Cuckoo Hashing. Journal of Algorithms
* 51(2):122-144.
*
* Generalization of cuckoo hashing was discussed in:
*
* Erlingsson, U., M. Manasse, F. McSherry (2006) A cool and practical
* alternative to traditional hash tables. In Proceedings of the 7th
* Workshop on Distributed Data and Structures (WDAS'06), Santa Clara, CA,
* January 2006.
*
* This implementation uses precisely two hash functions because that is the
* fewest that can work, and supporting multiple hashes is an implementation
* burden. Here is a reproduction of Figure 1 from Erlingsson et al. (2006)
* that shows approximate expected maximum load factors for various
* configurations:
*
* | #cells/bucket |
* #hashes | 1 | 2 | 4 | 8 |
* --------+-------+-------+-------+-------+
* 1 | 0.006 | 0.006 | 0.03 | 0.12 |
* 2 | 0.49 | 0.86 |>0.93< |>0.96< |
* 3 | 0.91 | 0.97 | 0.98 | 0.999 |
* 4 | 0.97 | 0.99 | 0.999 | |
*
* The number of cells per bucket is chosen such that a bucket fits in one cache
* line. So, on 32- and 64-bit systems, we use (8,2) and (4,2) cuckoo hashing,
* respectively.
*
******************************************************************************/
#define JEMALLOC_CKH_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static bool ckh_grow(ckh_t *ckh);
static void ckh_shrink(ckh_t *ckh);
/******************************************************************************/
/*
* Search bucket for key and return the cell number if found; SIZE_T_MAX
* otherwise.
*/
JEMALLOC_INLINE size_t
ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key)
{
ckhc_t *cell;
unsigned i;
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
if (cell->key != NULL && ckh->keycomp(key, cell->key))
return ((bucket << LG_CKH_BUCKET_CELLS) + i);
}
return (SIZE_T_MAX);
}
/*
* Search table for key and return cell number if found; SIZE_T_MAX otherwise.
*/
JEMALLOC_INLINE size_t
ckh_isearch(ckh_t *ckh, const void *key)
{
size_t hash1, hash2, bucket, cell;
assert(ckh != NULL);
dassert(ckh->magic == CKH_MAGIC);
ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2);
/* Search primary bucket. */
bucket = hash1 & ((ZU(1) << ckh->lg_curbuckets) - 1);
cell = ckh_bucket_search(ckh, bucket, key);
if (cell != SIZE_T_MAX)
return (cell);
/* Search secondary bucket. */
bucket = hash2 & ((ZU(1) << ckh->lg_curbuckets) - 1);
cell = ckh_bucket_search(ckh, bucket, key);
return (cell);
}
JEMALLOC_INLINE bool
ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
const void *data)
{
ckhc_t *cell;
unsigned offset, i;
/*
* Cycle through the cells in the bucket, starting at a random position.
* The randomness avoids worst-case search overhead as buckets fill up.
*/
prn32(offset, LG_CKH_BUCKET_CELLS, ckh->prn_state, CKH_A, CKH_C);
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) +
((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))];
if (cell->key == NULL) {
cell->key = key;
cell->data = data;
ckh->count++;
return (false);
}
}
return (true);
}
/*
* No space is available in bucket. Randomly evict an item, then try to find an
* alternate location for that item. Iteratively repeat this
* eviction/relocation procedure until either success or detection of an
* eviction/relocation bucket cycle.
*/
JEMALLOC_INLINE bool
ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
void const **argdata)
{
const void *key, *data, *tkey, *tdata;
ckhc_t *cell;
size_t hash1, hash2, bucket, tbucket;
unsigned i;
bucket = argbucket;
key = *argkey;
data = *argdata;
while (true) {
/*
* Choose a random item within the bucket to evict. This is
* critical to correct function, because without (eventually)
* evicting all items within a bucket during iteration, it
* would be possible to get stuck in an infinite loop if there
* were an item for which both hashes indicated the same
* bucket.
*/
prn32(i, LG_CKH_BUCKET_CELLS, ckh->prn_state, CKH_A, CKH_C);
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
assert(cell->key != NULL);
/* Swap cell->{key,data} and {key,data} (evict). */
tkey = cell->key; tdata = cell->data;
cell->key = key; cell->data = data;
key = tkey; data = tdata;
#ifdef CKH_COUNT
ckh->nrelocs++;
#endif
/* Find the alternate bucket for the evicted item. */
ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2);
tbucket = hash2 & ((ZU(1) << ckh->lg_curbuckets) - 1);
if (tbucket == bucket) {
tbucket = hash1 & ((ZU(1) << ckh->lg_curbuckets) - 1);
/*
* It may be that (tbucket == bucket) still, if the
* item's hashes both indicate this bucket. However,
* we are guaranteed to eventually escape this bucket
* during iteration, assuming pseudo-random item
* selection (true randomness would make infinite
* looping a remote possibility). The reason we can
* never get trapped forever is that there are two
* cases:
*
* 1) This bucket == argbucket, so we will quickly
* detect an eviction cycle and terminate.
* 2) An item was evicted to this bucket from another,
* which means that at least one item in this bucket
* has hashes that indicate distinct buckets.
*/
}
/* Check for a cycle. */
if (tbucket == argbucket) {
*argkey = key;
*argdata = data;
return (true);
}
bucket = tbucket;
if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
return (false);
}
}
JEMALLOC_INLINE bool
ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
{
size_t hash1, hash2, bucket;
const void *key = *argkey;
const void *data = *argdata;
ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2);
/* Try to insert in primary bucket. */
bucket = hash1 & ((ZU(1) << ckh->lg_curbuckets) - 1);
if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
return (false);
/* Try to insert in secondary bucket. */
bucket = hash2 & ((ZU(1) << ckh->lg_curbuckets) - 1);
if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
return (false);
/*
* Try to find a place for this item via iterative eviction/relocation.
*/
return (ckh_evict_reloc_insert(ckh, bucket, argkey, argdata));
}
/*
* Try to rebuild the hash table from scratch by inserting all items from the
* old table into the new.
*/
JEMALLOC_INLINE bool
ckh_rebuild(ckh_t *ckh, ckhc_t *aTab)
{
size_t count, i, nins;
const void *key, *data;
count = ckh->count;
ckh->count = 0;
for (i = nins = 0; nins < count; i++) {
if (aTab[i].key != NULL) {
key = aTab[i].key;
data = aTab[i].data;
if (ckh_try_insert(ckh, &key, &data)) {
ckh->count = count;
return (true);
}
nins++;
}
}
return (false);
}
static bool
ckh_grow(ckh_t *ckh)
{
bool ret;
ckhc_t *tab, *ttab;
size_t lg_curcells;
unsigned lg_prevbuckets;
#ifdef CKH_COUNT
ckh->ngrows++;
#endif
/*
* It is possible (though unlikely, given well behaved hashes) that the
* table will have to be doubled more than once in order to create a
* usable table.
*/
lg_prevbuckets = ckh->lg_curbuckets;
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS;
while (true) {
size_t usize;
lg_curcells++;
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE, NULL);
if (usize == 0) {
ret = true;
goto RETURN;
}
tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
if (tab == NULL) {
ret = true;
goto RETURN;
}
/* Swap in new table. */
ttab = ckh->tab;
ckh->tab = tab;
tab = ttab;
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
if (ckh_rebuild(ckh, tab) == false) {
idalloc(tab);
break;
}
/* Rebuilding failed, so back out partially rebuilt table. */
idalloc(ckh->tab);
ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets;
}
ret = false;
RETURN:
return (ret);
}
static void
ckh_shrink(ckh_t *ckh)
{
ckhc_t *tab, *ttab;
size_t lg_curcells, usize;
unsigned lg_prevbuckets;
/*
* It is possible (though unlikely, given well behaved hashes) that the
* table rebuild will fail.
*/
lg_prevbuckets = ckh->lg_curbuckets;
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE, NULL);
if (usize == 0)
return;
tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
if (tab == NULL) {
/*
* An OOM error isn't worth propagating, since it doesn't
* prevent this or future operations from proceeding.
*/
return;
}
/* Swap in new table. */
ttab = ckh->tab;
ckh->tab = tab;
tab = ttab;
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
if (ckh_rebuild(ckh, tab) == false) {
idalloc(tab);
#ifdef CKH_COUNT
ckh->nshrinks++;
#endif
return;
}
/* Rebuilding failed, so back out partially rebuilt table. */
idalloc(ckh->tab);
ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets;
#ifdef CKH_COUNT
ckh->nshrinkfails++;
#endif
}
bool
ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
{
bool ret;
size_t mincells, usize;
unsigned lg_mincells;
assert(minitems > 0);
assert(hash != NULL);
assert(keycomp != NULL);
#ifdef CKH_COUNT
ckh->ngrows = 0;
ckh->nshrinks = 0;
ckh->nshrinkfails = 0;
ckh->ninserts = 0;
ckh->nrelocs = 0;
#endif
ckh->prn_state = 42; /* Value doesn't really matter. */
ckh->count = 0;
/*
* Find the minimum power of 2 that is large enough to fit aBaseCount
* entries. We are using (2+,2) cuckoo hashing, which has an expected
* maximum load factor of at least ~0.86, so 0.75 is a conservative load
* factor that will typically allow 2^aLgMinItems to fit without ever
* growing the table.
*/
assert(LG_CKH_BUCKET_CELLS > 0);
mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2;
for (lg_mincells = LG_CKH_BUCKET_CELLS;
(ZU(1) << lg_mincells) < mincells;
lg_mincells++)
; /* Do nothing. */
ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
ckh->hash = hash;
ckh->keycomp = keycomp;
usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE, NULL);
if (usize == 0) {
ret = true;
goto RETURN;
}
ckh->tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
if (ckh->tab == NULL) {
ret = true;
goto RETURN;
}
#ifdef JEMALLOC_DEBUG
ckh->magic = CKH_MAGIC;
#endif
ret = false;
RETURN:
return (ret);
}
void
ckh_delete(ckh_t *ckh)
{
assert(ckh != NULL);
dassert(ckh->magic == CKH_MAGIC);
#ifdef CKH_VERBOSE
malloc_printf(
"%s(%p): ngrows: %"PRIu64", nshrinks: %"PRIu64","
" nshrinkfails: %"PRIu64", ninserts: %"PRIu64","
" nrelocs: %"PRIu64"\n", __func__, ckh,
(unsigned long long)ckh->ngrows,
(unsigned long long)ckh->nshrinks,
(unsigned long long)ckh->nshrinkfails,
(unsigned long long)ckh->ninserts,
(unsigned long long)ckh->nrelocs);
#endif
idalloc(ckh->tab);
#ifdef JEMALLOC_DEBUG
memset(ckh, 0x5a, sizeof(ckh_t));
#endif
}
size_t
ckh_count(ckh_t *ckh)
{
assert(ckh != NULL);
dassert(ckh->magic == CKH_MAGIC);
return (ckh->count);
}
bool
ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data)
{
size_t i, ncells;
for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets +
LG_CKH_BUCKET_CELLS)); i < ncells; i++) {
if (ckh->tab[i].key != NULL) {
if (key != NULL)
*key = (void *)ckh->tab[i].key;
if (data != NULL)
*data = (void *)ckh->tab[i].data;
*tabind = i + 1;
return (false);
}
}
return (true);
}
bool
ckh_insert(ckh_t *ckh, const void *key, const void *data)
{
bool ret;
assert(ckh != NULL);
dassert(ckh->magic == CKH_MAGIC);
assert(ckh_search(ckh, key, NULL, NULL));
#ifdef CKH_COUNT
ckh->ninserts++;
#endif
while (ckh_try_insert(ckh, &key, &data)) {
if (ckh_grow(ckh)) {
ret = true;
goto RETURN;
}
}
ret = false;
RETURN:
return (ret);
}
bool
ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data)
{
size_t cell;
assert(ckh != NULL);
dassert(ckh->magic == CKH_MAGIC);
cell = ckh_isearch(ckh, searchkey);
if (cell != SIZE_T_MAX) {
if (key != NULL)
*key = (void *)ckh->tab[cell].key;
if (data != NULL)
*data = (void *)ckh->tab[cell].data;
ckh->tab[cell].key = NULL;
ckh->tab[cell].data = NULL; /* Not necessary. */
ckh->count--;
/* Try to halve the table if it is less than 1/4 full. */
if (ckh->count < (ZU(1) << (ckh->lg_curbuckets
+ LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets
> ckh->lg_minbuckets) {
/* Ignore error due to OOM. */
ckh_shrink(ckh);
}
return (false);
}
return (true);
}
bool
ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data)
{
size_t cell;
assert(ckh != NULL);
dassert(ckh->magic == CKH_MAGIC);
cell = ckh_isearch(ckh, searchkey);
if (cell != SIZE_T_MAX) {
if (key != NULL)
*key = (void *)ckh->tab[cell].key;
if (data != NULL)
*data = (void *)ckh->tab[cell].data;
return (false);
}
return (true);
}
void
ckh_string_hash(const void *key, unsigned minbits, size_t *hash1, size_t *hash2)
{
size_t ret1, ret2;
uint64_t h;
assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64));
assert(hash1 != NULL);
assert(hash2 != NULL);
h = hash(key, strlen((const char *)key), 0x94122f335b332aeaLLU);
if (minbits <= 32) {
/*
* Avoid doing multiple hashes, since a single hash provides
* enough bits.
*/
ret1 = h & ZU(0xffffffffU);
ret2 = h >> 32;
} else {
ret1 = h;
ret2 = hash(key, strlen((const char *)key),
0x8432a476666bbc13U);
}
*hash1 = ret1;
*hash2 = ret2;
}
bool
ckh_string_keycomp(const void *k1, const void *k2)
{
assert(k1 != NULL);
assert(k2 != NULL);
return (strcmp((char *)k1, (char *)k2) ? false : true);
}
void
ckh_pointer_hash(const void *key, unsigned minbits, size_t *hash1,
size_t *hash2)
{
size_t ret1, ret2;
uint64_t h;
union {
const void *v;
uint64_t i;
} u;
assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64));
assert(hash1 != NULL);
assert(hash2 != NULL);
assert(sizeof(u.v) == sizeof(u.i));
#if (LG_SIZEOF_PTR != LG_SIZEOF_INT)
u.i = 0;
#endif
u.v = key;
h = hash(&u.i, sizeof(u.i), 0xd983396e68886082LLU);
if (minbits <= 32) {
/*
* Avoid doing multiple hashes, since a single hash provides
* enough bits.
*/
ret1 = h & ZU(0xffffffffU);
ret2 = h >> 32;
} else {
assert(SIZEOF_PTR == 8);
ret1 = h;
ret2 = hash(&u.i, sizeof(u.i), 0x5e2be9aff8709a5dLLU);
}
*hash1 = ret1;
*hash2 = ret2;
}
bool
ckh_pointer_keycomp(const void *k1, const void *k2)
{
return ((k1 == k2) ? true : false);
}
#define JEMALLOC_CTL_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Data. */
/*
* ctl_mtx protects the following:
* - ctl_stats.*
* - opt_prof_active
* - swap_enabled
* - swap_prezeroed
*/
static malloc_mutex_t ctl_mtx;
static bool ctl_initialized;
static uint64_t ctl_epoch;
static ctl_stats_t ctl_stats;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
#define CTL_PROTO(n) \
static int n##_ctl(const size_t *mib, size_t miblen, void *oldp, \
size_t *oldlenp, void *newp, size_t newlen);
#define INDEX_PROTO(n) \
const ctl_node_t *n##_index(const size_t *mib, size_t miblen, \
size_t i);
#ifdef JEMALLOC_STATS
static bool ctl_arena_init(ctl_arena_stats_t *astats);
#endif
static void ctl_arena_clear(ctl_arena_stats_t *astats);
#ifdef JEMALLOC_STATS
static void ctl_arena_stats_amerge(ctl_arena_stats_t *cstats,
arena_t *arena);
static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats,
ctl_arena_stats_t *astats);
#endif
static void ctl_arena_refresh(arena_t *arena, unsigned i);
static void ctl_refresh(void);
static bool ctl_init(void);
static int ctl_lookup(const char *name, ctl_node_t const **nodesp,
size_t *mibp, size_t *depthp);
CTL_PROTO(version)
CTL_PROTO(epoch)
#ifdef JEMALLOC_TCACHE
CTL_PROTO(tcache_flush)
#endif
CTL_PROTO(thread_arena)
#ifdef JEMALLOC_STATS
CTL_PROTO(thread_allocated)
CTL_PROTO(thread_allocatedp)
CTL_PROTO(thread_deallocated)
CTL_PROTO(thread_deallocatedp)
#endif
CTL_PROTO(config_debug)
CTL_PROTO(config_dss)
CTL_PROTO(config_dynamic_page_shift)
CTL_PROTO(config_fill)
CTL_PROTO(config_lazy_lock)
CTL_PROTO(config_prof)
CTL_PROTO(config_prof_libgcc)
CTL_PROTO(config_prof_libunwind)
CTL_PROTO(config_stats)
CTL_PROTO(config_swap)
CTL_PROTO(config_sysv)
CTL_PROTO(config_tcache)
CTL_PROTO(config_tiny)
CTL_PROTO(config_tls)
CTL_PROTO(config_xmalloc)
CTL_PROTO(opt_abort)
CTL_PROTO(opt_lg_qspace_max)
CTL_PROTO(opt_lg_cspace_max)
CTL_PROTO(opt_lg_chunk)
CTL_PROTO(opt_narenas)
CTL_PROTO(opt_lg_dirty_mult)
CTL_PROTO(opt_stats_print)
#ifdef JEMALLOC_FILL
CTL_PROTO(opt_junk)
CTL_PROTO(opt_zero)
#endif
#ifdef JEMALLOC_SYSV
CTL_PROTO(opt_sysv)
#endif
#ifdef JEMALLOC_XMALLOC
CTL_PROTO(opt_xmalloc)
#endif
#ifdef JEMALLOC_TCACHE
CTL_PROTO(opt_tcache)
CTL_PROTO(opt_lg_tcache_gc_sweep)
#endif
#ifdef JEMALLOC_PROF
CTL_PROTO(opt_prof)
CTL_PROTO(opt_prof_prefix)
CTL_PROTO(opt_prof_active)
CTL_PROTO(opt_lg_prof_bt_max)
CTL_PROTO(opt_lg_prof_sample)
CTL_PROTO(opt_lg_prof_interval)
CTL_PROTO(opt_prof_gdump)
CTL_PROTO(opt_prof_leak)
CTL_PROTO(opt_prof_accum)
CTL_PROTO(opt_lg_prof_tcmax)
#endif
#ifdef JEMALLOC_SWAP
CTL_PROTO(opt_overcommit)
#endif
CTL_PROTO(arenas_bin_i_size)
CTL_PROTO(arenas_bin_i_nregs)
CTL_PROTO(arenas_bin_i_run_size)
INDEX_PROTO(arenas_bin_i)
CTL_PROTO(arenas_lrun_i_size)
INDEX_PROTO(arenas_lrun_i)
CTL_PROTO(arenas_narenas)
CTL_PROTO(arenas_initialized)
CTL_PROTO(arenas_quantum)
CTL_PROTO(arenas_cacheline)
CTL_PROTO(arenas_subpage)
CTL_PROTO(arenas_pagesize)
CTL_PROTO(arenas_chunksize)
#ifdef JEMALLOC_TINY
CTL_PROTO(arenas_tspace_min)
CTL_PROTO(arenas_tspace_max)
#endif
CTL_PROTO(arenas_qspace_min)
CTL_PROTO(arenas_qspace_max)
CTL_PROTO(arenas_cspace_min)
CTL_PROTO(arenas_cspace_max)
CTL_PROTO(arenas_sspace_min)
CTL_PROTO(arenas_sspace_max)
#ifdef JEMALLOC_TCACHE
CTL_PROTO(arenas_tcache_max)
#endif
CTL_PROTO(arenas_ntbins)
CTL_PROTO(arenas_nqbins)
CTL_PROTO(arenas_ncbins)
CTL_PROTO(arenas_nsbins)
CTL_PROTO(arenas_nbins)
#ifdef JEMALLOC_TCACHE
CTL_PROTO(arenas_nhbins)
#endif
CTL_PROTO(arenas_nlruns)
CTL_PROTO(arenas_purge)
#ifdef JEMALLOC_PROF
CTL_PROTO(prof_active)
CTL_PROTO(prof_dump)
CTL_PROTO(prof_interval)
#endif
#ifdef JEMALLOC_STATS
CTL_PROTO(stats_chunks_current)
CTL_PROTO(stats_chunks_total)
CTL_PROTO(stats_chunks_high)
CTL_PROTO(stats_huge_allocated)
CTL_PROTO(stats_huge_nmalloc)
CTL_PROTO(stats_huge_ndalloc)
CTL_PROTO(stats_arenas_i_small_allocated)
CTL_PROTO(stats_arenas_i_small_nmalloc)
CTL_PROTO(stats_arenas_i_small_ndalloc)
CTL_PROTO(stats_arenas_i_small_nrequests)
CTL_PROTO(stats_arenas_i_large_allocated)
CTL_PROTO(stats_arenas_i_large_nmalloc)
CTL_PROTO(stats_arenas_i_large_ndalloc)
CTL_PROTO(stats_arenas_i_large_nrequests)
CTL_PROTO(stats_arenas_i_bins_j_allocated)
CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
CTL_PROTO(stats_arenas_i_bins_j_nrequests)
#ifdef JEMALLOC_TCACHE
CTL_PROTO(stats_arenas_i_bins_j_nfills)
CTL_PROTO(stats_arenas_i_bins_j_nflushes)
#endif
CTL_PROTO(stats_arenas_i_bins_j_nruns)
CTL_PROTO(stats_arenas_i_bins_j_nreruns)
CTL_PROTO(stats_arenas_i_bins_j_highruns)
CTL_PROTO(stats_arenas_i_bins_j_curruns)
INDEX_PROTO(stats_arenas_i_bins_j)
CTL_PROTO(stats_arenas_i_lruns_j_nmalloc)
CTL_PROTO(stats_arenas_i_lruns_j_ndalloc)
CTL_PROTO(stats_arenas_i_lruns_j_nrequests)
CTL_PROTO(stats_arenas_i_lruns_j_highruns)
CTL_PROTO(stats_arenas_i_lruns_j_curruns)
INDEX_PROTO(stats_arenas_i_lruns_j)
#endif
CTL_PROTO(stats_arenas_i_nthreads)
CTL_PROTO(stats_arenas_i_pactive)
CTL_PROTO(stats_arenas_i_pdirty)
#ifdef JEMALLOC_STATS
CTL_PROTO(stats_arenas_i_mapped)
CTL_PROTO(stats_arenas_i_npurge)
CTL_PROTO(stats_arenas_i_nmadvise)
CTL_PROTO(stats_arenas_i_purged)
#endif
INDEX_PROTO(stats_arenas_i)
#ifdef JEMALLOC_STATS
CTL_PROTO(stats_cactive)
CTL_PROTO(stats_allocated)
CTL_PROTO(stats_active)
CTL_PROTO(stats_mapped)
#endif
#ifdef JEMALLOC_SWAP
# ifdef JEMALLOC_STATS
CTL_PROTO(swap_avail)
# endif
CTL_PROTO(swap_prezeroed)
CTL_PROTO(swap_nfds)
CTL_PROTO(swap_fds)
#endif
/******************************************************************************/
/* mallctl tree. */
/* Maximum tree depth. */
#define CTL_MAX_DEPTH 6
#define NAME(n) true, {.named = {n
#define CHILD(c) sizeof(c##_node) / sizeof(ctl_node_t), c##_node}}, NULL
#define CTL(c) 0, NULL}}, c##_ctl
/*
* Only handles internal indexed nodes, since there are currently no external
* ones.
*/
#define INDEX(i) false, {.indexed = {i##_index}}, NULL
#ifdef JEMALLOC_TCACHE
static const ctl_node_t tcache_node[] = {
{NAME("flush"), CTL(tcache_flush)}
};
#endif
static const ctl_node_t thread_node[] = {
{NAME("arena"), CTL(thread_arena)}
#ifdef JEMALLOC_STATS
,
{NAME("allocated"), CTL(thread_allocated)},
{NAME("allocatedp"), CTL(thread_allocatedp)},
{NAME("deallocated"), CTL(thread_deallocated)},
{NAME("deallocatedp"), CTL(thread_deallocatedp)}
#endif
};
static const ctl_node_t config_node[] = {
{NAME("debug"), CTL(config_debug)},
{NAME("dss"), CTL(config_dss)},
{NAME("dynamic_page_shift"), CTL(config_dynamic_page_shift)},
{NAME("fill"), CTL(config_fill)},
{NAME("lazy_lock"), CTL(config_lazy_lock)},
{NAME("prof"), CTL(config_prof)},
{NAME("prof_libgcc"), CTL(config_prof_libgcc)},
{NAME("prof_libunwind"), CTL(config_prof_libunwind)},
{NAME("stats"), CTL(config_stats)},
{NAME("swap"), CTL(config_swap)},
{NAME("sysv"), CTL(config_sysv)},
{NAME("tcache"), CTL(config_tcache)},
{NAME("tiny"), CTL(config_tiny)},
{NAME("tls"), CTL(config_tls)},
{NAME("xmalloc"), CTL(config_xmalloc)}
};
static const ctl_node_t opt_node[] = {
{NAME("abort"), CTL(opt_abort)},
{NAME("lg_qspace_max"), CTL(opt_lg_qspace_max)},
{NAME("lg_cspace_max"), CTL(opt_lg_cspace_max)},
{NAME("lg_chunk"), CTL(opt_lg_chunk)},
{NAME("narenas"), CTL(opt_narenas)},
{NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)},
{NAME("stats_print"), CTL(opt_stats_print)}
#ifdef JEMALLOC_FILL
,
{NAME("junk"), CTL(opt_junk)},
{NAME("zero"), CTL(opt_zero)}
#endif
#ifdef JEMALLOC_SYSV
,
{NAME("sysv"), CTL(opt_sysv)}
#endif
#ifdef JEMALLOC_XMALLOC
,
{NAME("xmalloc"), CTL(opt_xmalloc)}
#endif
#ifdef JEMALLOC_TCACHE
,
{NAME("tcache"), CTL(opt_tcache)},
{NAME("lg_tcache_gc_sweep"), CTL(opt_lg_tcache_gc_sweep)}
#endif
#ifdef JEMALLOC_PROF
,
{NAME("prof"), CTL(opt_prof)},
{NAME("prof_prefix"), CTL(opt_prof_prefix)},
{NAME("prof_active"), CTL(opt_prof_active)},
{NAME("lg_prof_bt_max"), CTL(opt_lg_prof_bt_max)},
{NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)},
{NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)},
{NAME("prof_gdump"), CTL(opt_prof_gdump)},
{NAME("prof_leak"), CTL(opt_prof_leak)},
{NAME("prof_accum"), CTL(opt_prof_accum)},
{NAME("lg_prof_tcmax"), CTL(opt_lg_prof_tcmax)}
#endif
#ifdef JEMALLOC_SWAP
,
{NAME("overcommit"), CTL(opt_overcommit)}
#endif
};
static const ctl_node_t arenas_bin_i_node[] = {
{NAME("size"), CTL(arenas_bin_i_size)},
{NAME("nregs"), CTL(arenas_bin_i_nregs)},
{NAME("run_size"), CTL(arenas_bin_i_run_size)}
};
static const ctl_node_t super_arenas_bin_i_node[] = {
{NAME(""), CHILD(arenas_bin_i)}
};
static const ctl_node_t arenas_bin_node[] = {
{INDEX(arenas_bin_i)}
};
static const ctl_node_t arenas_lrun_i_node[] = {
{NAME("size"), CTL(arenas_lrun_i_size)}
};
static const ctl_node_t super_arenas_lrun_i_node[] = {
{NAME(""), CHILD(arenas_lrun_i)}
};
static const ctl_node_t arenas_lrun_node[] = {
{INDEX(arenas_lrun_i)}
};
static const ctl_node_t arenas_node[] = {
{NAME("narenas"), CTL(arenas_narenas)},
{NAME("initialized"), CTL(arenas_initialized)},
{NAME("quantum"), CTL(arenas_quantum)},
{NAME("cacheline"), CTL(arenas_cacheline)},
{NAME("subpage"), CTL(arenas_subpage)},
{NAME("pagesize"), CTL(arenas_pagesize)},
{NAME("chunksize"), CTL(arenas_chunksize)},
#ifdef JEMALLOC_TINY
{NAME("tspace_min"), CTL(arenas_tspace_min)},
{NAME("tspace_max"), CTL(arenas_tspace_max)},
#endif
{NAME("qspace_min"), CTL(arenas_qspace_min)},
{NAME("qspace_max"), CTL(arenas_qspace_max)},
{NAME("cspace_min"), CTL(arenas_cspace_min)},
{NAME("cspace_max"), CTL(arenas_cspace_max)},
{NAME("sspace_min"), CTL(arenas_sspace_min)},
{NAME("sspace_max"), CTL(arenas_sspace_max)},
#ifdef JEMALLOC_TCACHE
{NAME("tcache_max"), CTL(arenas_tcache_max)},
#endif
{NAME("ntbins"), CTL(arenas_ntbins)},
{NAME("nqbins"), CTL(arenas_nqbins)},
{NAME("ncbins"), CTL(arenas_ncbins)},
{NAME("nsbins"), CTL(arenas_nsbins)},
{NAME("nbins"), CTL(arenas_nbins)},
#ifdef JEMALLOC_TCACHE
{NAME("nhbins"), CTL(arenas_nhbins)},
#endif
{NAME("bin"), CHILD(arenas_bin)},
{NAME("nlruns"), CTL(arenas_nlruns)},
{NAME("lrun"), CHILD(arenas_lrun)},
{NAME("purge"), CTL(arenas_purge)}
};
#ifdef JEMALLOC_PROF
static const ctl_node_t prof_node[] = {
{NAME("active"), CTL(prof_active)},
{NAME("dump"), CTL(prof_dump)},
{NAME("interval"), CTL(prof_interval)}
};
#endif
#ifdef JEMALLOC_STATS
static const ctl_node_t stats_chunks_node[] = {
{NAME("current"), CTL(stats_chunks_current)},
{NAME("total"), CTL(stats_chunks_total)},
{NAME("high"), CTL(stats_chunks_high)}
};
static const ctl_node_t stats_huge_node[] = {
{NAME("allocated"), CTL(stats_huge_allocated)},
{NAME("nmalloc"), CTL(stats_huge_nmalloc)},
{NAME("ndalloc"), CTL(stats_huge_ndalloc)}
};
static const ctl_node_t stats_arenas_i_small_node[] = {
{NAME("allocated"), CTL(stats_arenas_i_small_allocated)},
{NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)},
{NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)},
{NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)}
};
static const ctl_node_t stats_arenas_i_large_node[] = {
{NAME("allocated"), CTL(stats_arenas_i_large_allocated)},
{NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)},
{NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)},
{NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)}
};
static const ctl_node_t stats_arenas_i_bins_j_node[] = {
{NAME("allocated"), CTL(stats_arenas_i_bins_j_allocated)},
{NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)},
{NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)},
{NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)},
#ifdef JEMALLOC_TCACHE
{NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)},
{NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)},
#endif
{NAME("nruns"), CTL(stats_arenas_i_bins_j_nruns)},
{NAME("nreruns"), CTL(stats_arenas_i_bins_j_nreruns)},
{NAME("highruns"), CTL(stats_arenas_i_bins_j_highruns)},
{NAME("curruns"), CTL(stats_arenas_i_bins_j_curruns)}
};
static const ctl_node_t super_stats_arenas_i_bins_j_node[] = {
{NAME(""), CHILD(stats_arenas_i_bins_j)}
};
static const ctl_node_t stats_arenas_i_bins_node[] = {
{INDEX(stats_arenas_i_bins_j)}
};
static const ctl_node_t stats_arenas_i_lruns_j_node[] = {
{NAME("nmalloc"), CTL(stats_arenas_i_lruns_j_nmalloc)},
{NAME("ndalloc"), CTL(stats_arenas_i_lruns_j_ndalloc)},
{NAME("nrequests"), CTL(stats_arenas_i_lruns_j_nrequests)},
{NAME("highruns"), CTL(stats_arenas_i_lruns_j_highruns)},
{NAME("curruns"), CTL(stats_arenas_i_lruns_j_curruns)}
};
static const ctl_node_t super_stats_arenas_i_lruns_j_node[] = {
{NAME(""), CHILD(stats_arenas_i_lruns_j)}
};
static const ctl_node_t stats_arenas_i_lruns_node[] = {
{INDEX(stats_arenas_i_lruns_j)}
};
#endif
static const ctl_node_t stats_arenas_i_node[] = {
{NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
{NAME("pactive"), CTL(stats_arenas_i_pactive)},
{NAME("pdirty"), CTL(stats_arenas_i_pdirty)}
#ifdef JEMALLOC_STATS
,
{NAME("mapped"), CTL(stats_arenas_i_mapped)},
{NAME("npurge"), CTL(stats_arenas_i_npurge)},
{NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)},
{NAME("purged"), CTL(stats_arenas_i_purged)},
{NAME("small"), CHILD(stats_arenas_i_small)},
{NAME("large"), CHILD(stats_arenas_i_large)},
{NAME("bins"), CHILD(stats_arenas_i_bins)},
{NAME("lruns"), CHILD(stats_arenas_i_lruns)}
#endif
};
static const ctl_node_t super_stats_arenas_i_node[] = {
{NAME(""), CHILD(stats_arenas_i)}
};
static const ctl_node_t stats_arenas_node[] = {
{INDEX(stats_arenas_i)}
};
static const ctl_node_t stats_node[] = {
#ifdef JEMALLOC_STATS
{NAME("cactive"), CTL(stats_cactive)},
{NAME("allocated"), CTL(stats_allocated)},
{NAME("active"), CTL(stats_active)},
{NAME("mapped"), CTL(stats_mapped)},
{NAME("chunks"), CHILD(stats_chunks)},
{NAME("huge"), CHILD(stats_huge)},
#endif
{NAME("arenas"), CHILD(stats_arenas)}
};
#ifdef JEMALLOC_SWAP
static const ctl_node_t swap_node[] = {
# ifdef JEMALLOC_STATS
{NAME("avail"), CTL(swap_avail)},
# endif
{NAME("prezeroed"), CTL(swap_prezeroed)},
{NAME("nfds"), CTL(swap_nfds)},
{NAME("fds"), CTL(swap_fds)}
};
#endif
static const ctl_node_t root_node[] = {
{NAME("version"), CTL(version)},
{NAME("epoch"), CTL(epoch)},
#ifdef JEMALLOC_TCACHE
{NAME("tcache"), CHILD(tcache)},
#endif
{NAME("thread"), CHILD(thread)},
{NAME("config"), CHILD(config)},
{NAME("opt"), CHILD(opt)},
{NAME("arenas"), CHILD(arenas)},
#ifdef JEMALLOC_PROF
{NAME("prof"), CHILD(prof)},
#endif
{NAME("stats"), CHILD(stats)}
#ifdef JEMALLOC_SWAP
,
{NAME("swap"), CHILD(swap)}
#endif
};
static const ctl_node_t super_root_node[] = {
{NAME(""), CHILD(root)}
};
#undef NAME
#undef CHILD
#undef CTL
#undef INDEX
/******************************************************************************/
#ifdef JEMALLOC_STATS
static bool
ctl_arena_init(ctl_arena_stats_t *astats)
{
if (astats->bstats == NULL) {
astats->bstats = (malloc_bin_stats_t *)base_alloc(nbins *
sizeof(malloc_bin_stats_t));
if (astats->bstats == NULL)
return (true);
}
if (astats->lstats == NULL) {
astats->lstats = (malloc_large_stats_t *)base_alloc(nlclasses *
sizeof(malloc_large_stats_t));
if (astats->lstats == NULL)
return (true);
}
return (false);
}
#endif
static void
ctl_arena_clear(ctl_arena_stats_t *astats)
{
astats->pactive = 0;
astats->pdirty = 0;
#ifdef JEMALLOC_STATS
memset(&astats->astats, 0, sizeof(arena_stats_t));
astats->allocated_small = 0;
astats->nmalloc_small = 0;
astats->ndalloc_small = 0;
astats->nrequests_small = 0;
memset(astats->bstats, 0, nbins * sizeof(malloc_bin_stats_t));
memset(astats->lstats, 0, nlclasses * sizeof(malloc_large_stats_t));
#endif
}
#ifdef JEMALLOC_STATS
static void
ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena)
{
unsigned i;
arena_stats_merge(arena, &cstats->pactive, &cstats->pdirty,
&cstats->astats, cstats->bstats, cstats->lstats);
for (i = 0; i < nbins; i++) {
cstats->allocated_small += cstats->bstats[i].allocated;
cstats->nmalloc_small += cstats->bstats[i].nmalloc;
cstats->ndalloc_small += cstats->bstats[i].ndalloc;
cstats->nrequests_small += cstats->bstats[i].nrequests;
}
}
static void
ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
{
unsigned i;
sstats->pactive += astats->pactive;
sstats->pdirty += astats->pdirty;
sstats->astats.mapped += astats->astats.mapped;
sstats->astats.npurge += astats->astats.npurge;
sstats->astats.nmadvise += astats->astats.nmadvise;
sstats->astats.purged += astats->astats.purged;
sstats->allocated_small += astats->allocated_small;
sstats->nmalloc_small += astats->nmalloc_small;
sstats->ndalloc_small += astats->ndalloc_small;
sstats->nrequests_small += astats->nrequests_small;
sstats->astats.allocated_large += astats->astats.allocated_large;
sstats->astats.nmalloc_large += astats->astats.nmalloc_large;
sstats->astats.ndalloc_large += astats->astats.ndalloc_large;
sstats->astats.nrequests_large += astats->astats.nrequests_large;
for (i = 0; i < nlclasses; i++) {
sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
sstats->lstats[i].nrequests += astats->lstats[i].nrequests;
sstats->lstats[i].highruns += astats->lstats[i].highruns;
sstats->lstats[i].curruns += astats->lstats[i].curruns;
}
for (i = 0; i < nbins; i++) {
sstats->bstats[i].allocated += astats->bstats[i].allocated;
sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
sstats->bstats[i].nrequests += astats->bstats[i].nrequests;
#ifdef JEMALLOC_TCACHE
sstats->bstats[i].nfills += astats->bstats[i].nfills;
sstats->bstats[i].nflushes += astats->bstats[i].nflushes;
#endif
sstats->bstats[i].nruns += astats->bstats[i].nruns;
sstats->bstats[i].reruns += astats->bstats[i].reruns;
sstats->bstats[i].highruns += astats->bstats[i].highruns;
sstats->bstats[i].curruns += astats->bstats[i].curruns;
}
}
#endif
static void
ctl_arena_refresh(arena_t *arena, unsigned i)
{
ctl_arena_stats_t *astats = &ctl_stats.arenas[i];
ctl_arena_stats_t *sstats = &ctl_stats.arenas[narenas];
ctl_arena_clear(astats);
sstats->nthreads += astats->nthreads;
#ifdef JEMALLOC_STATS
ctl_arena_stats_amerge(astats, arena);
/* Merge into sum stats as well. */
ctl_arena_stats_smerge(sstats, astats);
#else
astats->pactive += arena->nactive;
astats->pdirty += arena->ndirty;
/* Merge into sum stats as well. */
sstats->pactive += arena->nactive;
sstats->pdirty += arena->ndirty;
#endif
}
static void
ctl_refresh(void)
{
unsigned i;
arena_t *tarenas[narenas];
#ifdef JEMALLOC_STATS
malloc_mutex_lock(&chunks_mtx);
ctl_stats.chunks.current = stats_chunks.curchunks;
ctl_stats.chunks.total = stats_chunks.nchunks;
ctl_stats.chunks.high = stats_chunks.highchunks;
malloc_mutex_unlock(&chunks_mtx);
malloc_mutex_lock(&huge_mtx);
ctl_stats.huge.allocated = huge_allocated;
ctl_stats.huge.nmalloc = huge_nmalloc;
ctl_stats.huge.ndalloc = huge_ndalloc;
malloc_mutex_unlock(&huge_mtx);
#endif
/*
* Clear sum stats, since they will be merged into by
* ctl_arena_refresh().
*/
ctl_stats.arenas[narenas].nthreads = 0;
ctl_arena_clear(&ctl_stats.arenas[narenas]);
malloc_mutex_lock(&arenas_lock);
memcpy(tarenas, arenas, sizeof(arena_t *) * narenas);
for (i = 0; i < narenas; i++) {
if (arenas[i] != NULL)
ctl_stats.arenas[i].nthreads = arenas[i]->nthreads;
else
ctl_stats.arenas[i].nthreads = 0;
}
malloc_mutex_unlock(&arenas_lock);
for (i = 0; i < narenas; i++) {
bool initialized = (tarenas[i] != NULL);
ctl_stats.arenas[i].initialized = initialized;
if (initialized)
ctl_arena_refresh(tarenas[i], i);
}
#ifdef JEMALLOC_STATS
ctl_stats.allocated = ctl_stats.arenas[narenas].allocated_small
+ ctl_stats.arenas[narenas].astats.allocated_large
+ ctl_stats.huge.allocated;
ctl_stats.active = (ctl_stats.arenas[narenas].pactive << PAGE_SHIFT)
+ ctl_stats.huge.allocated;
ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk);
# ifdef JEMALLOC_SWAP
malloc_mutex_lock(&swap_mtx);
ctl_stats.swap_avail = swap_avail;
malloc_mutex_unlock(&swap_mtx);
# endif
#endif
ctl_epoch++;
}
static bool
ctl_init(void)
{
bool ret;
malloc_mutex_lock(&ctl_mtx);
if (ctl_initialized == false) {
#ifdef JEMALLOC_STATS
unsigned i;
#endif
/*
* Allocate space for one extra arena stats element, which
* contains summed stats across all arenas.
*/
ctl_stats.arenas = (ctl_arena_stats_t *)base_alloc(
(narenas + 1) * sizeof(ctl_arena_stats_t));
if (ctl_stats.arenas == NULL) {
ret = true;
goto RETURN;
}
memset(ctl_stats.arenas, 0, (narenas + 1) *
sizeof(ctl_arena_stats_t));
/*
* Initialize all stats structures, regardless of whether they
* ever get used. Lazy initialization would allow errors to
* cause inconsistent state to be viewable by the application.
*/
#ifdef JEMALLOC_STATS
for (i = 0; i <= narenas; i++) {
if (ctl_arena_init(&ctl_stats.arenas[i])) {
ret = true;
goto RETURN;
}
}
#endif
ctl_stats.arenas[narenas].initialized = true;
ctl_epoch = 0;
ctl_refresh();
ctl_initialized = true;
}
ret = false;
RETURN:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
static int
ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp,
size_t *depthp)
{
int ret;
const char *elm, *tdot, *dot;
size_t elen, i, j;
const ctl_node_t *node;
elm = name;
/* Equivalent to strchrnul(). */
dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0');
elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
if (elen == 0) {
ret = ENOENT;
goto RETURN;
}
node = super_root_node;
for (i = 0; i < *depthp; i++) {
assert(node->named);
assert(node->u.named.nchildren > 0);
if (node->u.named.children[0].named) {
const ctl_node_t *pnode = node;
/* Children are named. */
for (j = 0; j < node->u.named.nchildren; j++) {
const ctl_node_t *child =
&node->u.named.children[j];
if (strlen(child->u.named.name) == elen
&& strncmp(elm, child->u.named.name,
elen) == 0) {
node = child;
if (nodesp != NULL)
nodesp[i] = node;
mibp[i] = j;
break;
}
}
if (node == pnode) {
ret = ENOENT;
goto RETURN;
}
} else {
unsigned long index;
const ctl_node_t *inode;
/* Children are indexed. */
index = strtoul(elm, NULL, 10);
if (index == ULONG_MAX) {
ret = ENOENT;
goto RETURN;
}
inode = &node->u.named.children[0];
node = inode->u.indexed.index(mibp, *depthp,
index);
if (node == NULL) {
ret = ENOENT;
goto RETURN;
}
if (nodesp != NULL)
nodesp[i] = node;
mibp[i] = (size_t)index;
}
if (node->ctl != NULL) {
/* Terminal node. */
if (*dot != '\0') {
/*
* The name contains more elements than are
* in this path through the tree.
*/
ret = ENOENT;
goto RETURN;
}
/* Complete lookup successful. */
*depthp = i + 1;
break;
}
/* Update elm. */
if (*dot == '\0') {
/* No more elements. */
ret = ENOENT;
goto RETURN;
}
elm = &dot[1];
dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot :
strchr(elm, '\0');
elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
}
ret = 0;
RETURN:
return (ret);
}
int
ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
size_t newlen)
{
int ret;
size_t depth;
ctl_node_t const *nodes[CTL_MAX_DEPTH];
size_t mib[CTL_MAX_DEPTH];
if (ctl_initialized == false && ctl_init()) {
ret = EAGAIN;
goto RETURN;
}
depth = CTL_MAX_DEPTH;
ret = ctl_lookup(name, nodes, mib, &depth);
if (ret != 0)
goto RETURN;
if (nodes[depth-1]->ctl == NULL) {
/* The name refers to a partial path through the ctl tree. */
ret = ENOENT;
goto RETURN;
}
ret = nodes[depth-1]->ctl(mib, depth, oldp, oldlenp, newp, newlen);
RETURN:
return(ret);
}
int
ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp)
{
int ret;
if (ctl_initialized == false && ctl_init()) {
ret = EAGAIN;
goto RETURN;
}
ret = ctl_lookup(name, NULL, mibp, miblenp);
RETURN:
return(ret);
}
int
ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
const ctl_node_t *node;
size_t i;
if (ctl_initialized == false && ctl_init()) {
ret = EAGAIN;
goto RETURN;
}
/* Iterate down the tree. */
node = super_root_node;
for (i = 0; i < miblen; i++) {
if (node->u.named.children[0].named) {
/* Children are named. */
if (node->u.named.nchildren <= mib[i]) {
ret = ENOENT;
goto RETURN;
}
node = &node->u.named.children[mib[i]];
} else {
const ctl_node_t *inode;
/* Indexed element. */
inode = &node->u.named.children[0];
node = inode->u.indexed.index(mib, miblen, mib[i]);
if (node == NULL) {
ret = ENOENT;
goto RETURN;
}
}
}
/* Call the ctl function. */
if (node->ctl == NULL) {
/* Partial MIB. */
ret = ENOENT;
goto RETURN;
}
ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen);
RETURN:
return(ret);
}
bool
ctl_boot(void)
{
if (malloc_mutex_init(&ctl_mtx))
return (true);
ctl_initialized = false;
return (false);
}
/******************************************************************************/
/* *_ctl() functions. */
#define READONLY() do { \
if (newp != NULL || newlen != 0) { \
ret = EPERM; \
goto RETURN; \
} \
} while (0)
#define WRITEONLY() do { \
if (oldp != NULL || oldlenp != NULL) { \
ret = EPERM; \
goto RETURN; \
} \
} while (0)
#define VOID() do { \
READONLY(); \
WRITEONLY(); \
} while (0)
#define READ(v, t) do { \
if (oldp != NULL && oldlenp != NULL) { \
if (*oldlenp != sizeof(t)) { \
size_t copylen = (sizeof(t) <= *oldlenp) \
? sizeof(t) : *oldlenp; \
memcpy(oldp, (void *)&v, copylen); \
ret = EINVAL; \
goto RETURN; \
} else \
*(t *)oldp = v; \
} \
} while (0)
#define WRITE(v, t) do { \
if (newp != NULL) { \
if (newlen != sizeof(t)) { \
ret = EINVAL; \
goto RETURN; \
} \
v = *(t *)newp; \
} \
} while (0)
#define CTL_RO_GEN(n, v, t) \
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
void *newp, size_t newlen) \
{ \
int ret; \
t oldval; \
\
malloc_mutex_lock(&ctl_mtx); \
READONLY(); \
oldval = v; \
READ(oldval, t); \
\
ret = 0; \
RETURN: \
malloc_mutex_unlock(&ctl_mtx); \
return (ret); \
}
/*
* ctl_mtx is not acquired, under the assumption that no pertinent data will
* mutate during the call.
*/
#define CTL_RO_NL_GEN(n, v, t) \
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
void *newp, size_t newlen) \
{ \
int ret; \
t oldval; \
\
READONLY(); \
oldval = v; \
READ(oldval, t); \
\
ret = 0; \
RETURN: \
return (ret); \
}
#define CTL_RO_TRUE_GEN(n) \
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
void *newp, size_t newlen) \
{ \
int ret; \
bool oldval; \
\
READONLY(); \
oldval = true; \
READ(oldval, bool); \
\
ret = 0; \
RETURN: \
return (ret); \
}
#define CTL_RO_FALSE_GEN(n) \
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
void *newp, size_t newlen) \
{ \
int ret; \
bool oldval; \
\
READONLY(); \
oldval = false; \
READ(oldval, bool); \
\
ret = 0; \
RETURN: \
return (ret); \
}
CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
static int
epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
uint64_t newval;
malloc_mutex_lock(&ctl_mtx);
newval = 0;
WRITE(newval, uint64_t);
if (newval != 0)
ctl_refresh();
READ(ctl_epoch, uint64_t);
ret = 0;
RETURN:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
#ifdef JEMALLOC_TCACHE
static int
tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
tcache_t *tcache;
VOID();
tcache = TCACHE_GET();
if (tcache == NULL) {
ret = 0;
goto RETURN;
}
tcache_destroy(tcache);
TCACHE_SET(NULL);
ret = 0;
RETURN:
return (ret);
}
#endif
static int
thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
unsigned newind, oldind;
newind = oldind = choose_arena()->ind;
WRITE(newind, unsigned);
READ(oldind, unsigned);
if (newind != oldind) {
arena_t *arena;
if (newind >= narenas) {
/* New arena index is out of range. */
ret = EFAULT;
goto RETURN;
}
/* Initialize arena if necessary. */
malloc_mutex_lock(&arenas_lock);
if ((arena = arenas[newind]) == NULL)
arena = arenas_extend(newind);
arenas[oldind]->nthreads--;
arenas[newind]->nthreads++;
malloc_mutex_unlock(&arenas_lock);
if (arena == NULL) {
ret = EAGAIN;
goto RETURN;
}
/* Set new arena association. */
ARENA_SET(arena);
{
tcache_t *tcache = TCACHE_GET();
if (tcache != NULL)
tcache->arena = arena;
}
}
ret = 0;
RETURN:
return (ret);
}
#ifdef JEMALLOC_STATS
CTL_RO_NL_GEN(thread_allocated, ALLOCATED_GET(), uint64_t);
CTL_RO_NL_GEN(thread_allocatedp, ALLOCATEDP_GET(), uint64_t *);
CTL_RO_NL_GEN(thread_deallocated, DEALLOCATED_GET(), uint64_t);
CTL_RO_NL_GEN(thread_deallocatedp, DEALLOCATEDP_GET(), uint64_t *);
#endif
/******************************************************************************/
#ifdef JEMALLOC_DEBUG
CTL_RO_TRUE_GEN(config_debug)
#else
CTL_RO_FALSE_GEN(config_debug)
#endif
#ifdef JEMALLOC_DSS
CTL_RO_TRUE_GEN(config_dss)
#else
CTL_RO_FALSE_GEN(config_dss)
#endif
#ifdef JEMALLOC_DYNAMIC_PAGE_SHIFT
CTL_RO_TRUE_GEN(config_dynamic_page_shift)
#else
CTL_RO_FALSE_GEN(config_dynamic_page_shift)
#endif
#ifdef JEMALLOC_FILL
CTL_RO_TRUE_GEN(config_fill)
#else
CTL_RO_FALSE_GEN(config_fill)
#endif
#ifdef JEMALLOC_LAZY_LOCK
CTL_RO_TRUE_GEN(config_lazy_lock)
#else
CTL_RO_FALSE_GEN(config_lazy_lock)
#endif
#ifdef JEMALLOC_PROF
CTL_RO_TRUE_GEN(config_prof)
#else
CTL_RO_FALSE_GEN(config_prof)
#endif
#ifdef JEMALLOC_PROF_LIBGCC
CTL_RO_TRUE_GEN(config_prof_libgcc)
#else
CTL_RO_FALSE_GEN(config_prof_libgcc)
#endif
#ifdef JEMALLOC_PROF_LIBUNWIND
CTL_RO_TRUE_GEN(config_prof_libunwind)
#else
CTL_RO_FALSE_GEN(config_prof_libunwind)
#endif
#ifdef JEMALLOC_STATS
CTL_RO_TRUE_GEN(config_stats)
#else
CTL_RO_FALSE_GEN(config_stats)
#endif
#ifdef JEMALLOC_SWAP
CTL_RO_TRUE_GEN(config_swap)
#else
CTL_RO_FALSE_GEN(config_swap)
#endif
#ifdef JEMALLOC_SYSV
CTL_RO_TRUE_GEN(config_sysv)
#else
CTL_RO_FALSE_GEN(config_sysv)
#endif
#ifdef JEMALLOC_TCACHE
CTL_RO_TRUE_GEN(config_tcache)
#else
CTL_RO_FALSE_GEN(config_tcache)
#endif
#ifdef JEMALLOC_TINY
CTL_RO_TRUE_GEN(config_tiny)
#else
CTL_RO_FALSE_GEN(config_tiny)
#endif
#ifdef JEMALLOC_TLS
CTL_RO_TRUE_GEN(config_tls)
#else
CTL_RO_FALSE_GEN(config_tls)
#endif
#ifdef JEMALLOC_XMALLOC
CTL_RO_TRUE_GEN(config_xmalloc)
#else
CTL_RO_FALSE_GEN(config_xmalloc)
#endif
/******************************************************************************/
CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
CTL_RO_NL_GEN(opt_lg_qspace_max, opt_lg_qspace_max, size_t)
CTL_RO_NL_GEN(opt_lg_cspace_max, opt_lg_cspace_max, size_t)
CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t)
CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
#ifdef JEMALLOC_FILL
CTL_RO_NL_GEN(opt_junk, opt_junk, bool)
CTL_RO_NL_GEN(opt_zero, opt_zero, bool)
#endif
#ifdef JEMALLOC_SYSV
CTL_RO_NL_GEN(opt_sysv, opt_sysv, bool)
#endif
#ifdef JEMALLOC_XMALLOC
CTL_RO_NL_GEN(opt_xmalloc, opt_xmalloc, bool)
#endif
#ifdef JEMALLOC_TCACHE
CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool)
CTL_RO_NL_GEN(opt_lg_tcache_gc_sweep, opt_lg_tcache_gc_sweep, ssize_t)
#endif
#ifdef JEMALLOC_PROF
CTL_RO_NL_GEN(opt_prof, opt_prof, bool)
CTL_RO_NL_GEN(opt_prof_prefix, opt_prof_prefix, const char *)
CTL_RO_GEN(opt_prof_active, opt_prof_active, bool) /* Mutable. */
CTL_RO_NL_GEN(opt_lg_prof_bt_max, opt_lg_prof_bt_max, size_t)
CTL_RO_NL_GEN(opt_lg_prof_sample, opt_lg_prof_sample, size_t)
CTL_RO_NL_GEN(opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
CTL_RO_NL_GEN(opt_prof_gdump, opt_prof_gdump, bool)
CTL_RO_NL_GEN(opt_prof_leak, opt_prof_leak, bool)
CTL_RO_NL_GEN(opt_prof_accum, opt_prof_accum, bool)
CTL_RO_NL_GEN(opt_lg_prof_tcmax, opt_lg_prof_tcmax, ssize_t)
#endif
#ifdef JEMALLOC_SWAP
CTL_RO_NL_GEN(opt_overcommit, opt_overcommit, bool)
#endif
/******************************************************************************/
CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t)
const ctl_node_t *
arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
{
if (i > nbins)
return (NULL);
return (super_arenas_bin_i_node);
}
CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << PAGE_SHIFT), size_t)
const ctl_node_t *
arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
{
if (i > nlclasses)
return (NULL);
return (super_arenas_lrun_i_node);
}
CTL_RO_NL_GEN(arenas_narenas, narenas, unsigned)
static int
arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
unsigned nread, i;
malloc_mutex_lock(&ctl_mtx);
READONLY();
if (*oldlenp != narenas * sizeof(bool)) {
ret = EINVAL;
nread = (*oldlenp < narenas * sizeof(bool))
? (*oldlenp / sizeof(bool)) : narenas;
} else {
ret = 0;
nread = narenas;
}
for (i = 0; i < nread; i++)
((bool *)oldp)[i] = ctl_stats.arenas[i].initialized;
RETURN:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
CTL_RO_NL_GEN(arenas_cacheline, CACHELINE, size_t)
CTL_RO_NL_GEN(arenas_subpage, SUBPAGE, size_t)
CTL_RO_NL_GEN(arenas_pagesize, PAGE_SIZE, size_t)
CTL_RO_NL_GEN(arenas_chunksize, chunksize, size_t)
#ifdef JEMALLOC_TINY
CTL_RO_NL_GEN(arenas_tspace_min, (1U << LG_TINY_MIN), size_t)
CTL_RO_NL_GEN(arenas_tspace_max, (qspace_min >> 1), size_t)
#endif
CTL_RO_NL_GEN(arenas_qspace_min, qspace_min, size_t)
CTL_RO_NL_GEN(arenas_qspace_max, qspace_max, size_t)
CTL_RO_NL_GEN(arenas_cspace_min, cspace_min, size_t)
CTL_RO_NL_GEN(arenas_cspace_max, cspace_max, size_t)
CTL_RO_NL_GEN(arenas_sspace_min, sspace_min, size_t)
CTL_RO_NL_GEN(arenas_sspace_max, sspace_max, size_t)
#ifdef JEMALLOC_TCACHE
CTL_RO_NL_GEN(arenas_tcache_max, tcache_maxclass, size_t)
#endif
CTL_RO_NL_GEN(arenas_ntbins, ntbins, unsigned)
CTL_RO_NL_GEN(arenas_nqbins, nqbins, unsigned)
CTL_RO_NL_GEN(arenas_ncbins, ncbins, unsigned)
CTL_RO_NL_GEN(arenas_nsbins, nsbins, unsigned)
CTL_RO_NL_GEN(arenas_nbins, nbins, unsigned)
#ifdef JEMALLOC_TCACHE
CTL_RO_NL_GEN(arenas_nhbins, nhbins, unsigned)
#endif
CTL_RO_NL_GEN(arenas_nlruns, nlclasses, size_t)
static int
arenas_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
unsigned arena;
WRITEONLY();
arena = UINT_MAX;
WRITE(arena, unsigned);
if (newp != NULL && arena >= narenas) {
ret = EFAULT;
goto RETURN;
} else {
arena_t *tarenas[narenas];
malloc_mutex_lock(&arenas_lock);
memcpy(tarenas, arenas, sizeof(arena_t *) * narenas);
malloc_mutex_unlock(&arenas_lock);
if (arena == UINT_MAX) {
unsigned i;
for (i = 0; i < narenas; i++) {
if (tarenas[i] != NULL)
arena_purge_all(tarenas[i]);
}
} else {
assert(arena < narenas);
if (tarenas[arena] != NULL)
arena_purge_all(tarenas[arena]);
}
}
ret = 0;
RETURN:
return (ret);
}
/******************************************************************************/
#ifdef JEMALLOC_PROF
static int
prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
bool oldval;
malloc_mutex_lock(&ctl_mtx); /* Protect opt_prof_active. */
oldval = opt_prof_active;
if (newp != NULL) {
/*
* The memory barriers will tend to make opt_prof_active
* propagate faster on systems with weak memory ordering.
*/
mb_write();
WRITE(opt_prof_active, bool);
mb_write();
}
READ(oldval, bool);
ret = 0;
RETURN:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
static int
prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
const char *filename = NULL;
WRITEONLY();
WRITE(filename, const char *);
if (prof_mdump(filename)) {
ret = EFAULT;
goto RETURN;
}
ret = 0;
RETURN:
return (ret);
}
CTL_RO_NL_GEN(prof_interval, prof_interval, uint64_t)
#endif
/******************************************************************************/
#ifdef JEMALLOC_STATS
CTL_RO_GEN(stats_chunks_current, ctl_stats.chunks.current, size_t)
CTL_RO_GEN(stats_chunks_total, ctl_stats.chunks.total, uint64_t)
CTL_RO_GEN(stats_chunks_high, ctl_stats.chunks.high, size_t)
CTL_RO_GEN(stats_huge_allocated, huge_allocated, size_t)
CTL_RO_GEN(stats_huge_nmalloc, huge_nmalloc, uint64_t)
CTL_RO_GEN(stats_huge_ndalloc, huge_ndalloc, uint64_t)
CTL_RO_GEN(stats_arenas_i_small_allocated,
ctl_stats.arenas[mib[2]].allocated_small, size_t)
CTL_RO_GEN(stats_arenas_i_small_nmalloc,
ctl_stats.arenas[mib[2]].nmalloc_small, uint64_t)
CTL_RO_GEN(stats_arenas_i_small_ndalloc,
ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t)
CTL_RO_GEN(stats_arenas_i_small_nrequests,
ctl_stats.arenas[mib[2]].nrequests_small, uint64_t)
CTL_RO_GEN(stats_arenas_i_large_allocated,
ctl_stats.arenas[mib[2]].astats.allocated_large, size_t)
CTL_RO_GEN(stats_arenas_i_large_nmalloc,
ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t)
CTL_RO_GEN(stats_arenas_i_large_ndalloc,
ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t)
CTL_RO_GEN(stats_arenas_i_large_nrequests,
ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t)
CTL_RO_GEN(stats_arenas_i_bins_j_allocated,
ctl_stats.arenas[mib[2]].bstats[mib[4]].allocated, size_t)
CTL_RO_GEN(stats_arenas_i_bins_j_nmalloc,
ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t)
CTL_RO_GEN(stats_arenas_i_bins_j_ndalloc,
ctl_stats.arenas[mib[2]].bstats[mib[4]].ndalloc, uint64_t)
CTL_RO_GEN(stats_arenas_i_bins_j_nrequests,
ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t)
#ifdef JEMALLOC_TCACHE
CTL_RO_GEN(stats_arenas_i_bins_j_nfills,
ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t)
CTL_RO_GEN(stats_arenas_i_bins_j_nflushes,
ctl_stats.arenas[mib[2]].bstats[mib[4]].nflushes, uint64_t)
#endif
CTL_RO_GEN(stats_arenas_i_bins_j_nruns,
ctl_stats.arenas[mib[2]].bstats[mib[4]].nruns, uint64_t)
CTL_RO_GEN(stats_arenas_i_bins_j_nreruns,
ctl_stats.arenas[mib[2]].bstats[mib[4]].reruns, uint64_t)
CTL_RO_GEN(stats_arenas_i_bins_j_highruns,
ctl_stats.arenas[mib[2]].bstats[mib[4]].highruns, size_t)
CTL_RO_GEN(stats_arenas_i_bins_j_curruns,
ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t)
const ctl_node_t *
stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j)
{
if (j > nbins)
return (NULL);
return (super_stats_arenas_i_bins_j_node);
}
CTL_RO_GEN(stats_arenas_i_lruns_j_nmalloc,
ctl_stats.arenas[mib[2]].lstats[mib[4]].nmalloc, uint64_t)
CTL_RO_GEN(stats_arenas_i_lruns_j_ndalloc,
ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t)
CTL_RO_GEN(stats_arenas_i_lruns_j_nrequests,
ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t)
CTL_RO_GEN(stats_arenas_i_lruns_j_curruns,
ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t)
CTL_RO_GEN(stats_arenas_i_lruns_j_highruns,
ctl_stats.arenas[mib[2]].lstats[mib[4]].highruns, size_t)
const ctl_node_t *
stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
{
if (j > nlclasses)
return (NULL);
return (super_stats_arenas_i_lruns_j_node);
}
#endif
CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
#ifdef JEMALLOC_STATS
CTL_RO_GEN(stats_arenas_i_mapped, ctl_stats.arenas[mib[2]].astats.mapped,
size_t)
CTL_RO_GEN(stats_arenas_i_npurge, ctl_stats.arenas[mib[2]].astats.npurge,
uint64_t)
CTL_RO_GEN(stats_arenas_i_nmadvise, ctl_stats.arenas[mib[2]].astats.nmadvise,
uint64_t)
CTL_RO_GEN(stats_arenas_i_purged, ctl_stats.arenas[mib[2]].astats.purged,
uint64_t)
#endif
const ctl_node_t *
stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i)
{
const ctl_node_t * ret;
malloc_mutex_lock(&ctl_mtx);
if (ctl_stats.arenas[i].initialized == false) {
ret = NULL;
goto RETURN;
}
ret = super_stats_arenas_i_node;
RETURN:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
#ifdef JEMALLOC_STATS
CTL_RO_GEN(stats_cactive, &stats_cactive, size_t *)
CTL_RO_GEN(stats_allocated, ctl_stats.allocated, size_t)
CTL_RO_GEN(stats_active, ctl_stats.active, size_t)
CTL_RO_GEN(stats_mapped, ctl_stats.mapped, size_t)
#endif
/******************************************************************************/
#ifdef JEMALLOC_SWAP
# ifdef JEMALLOC_STATS
CTL_RO_GEN(swap_avail, ctl_stats.swap_avail, size_t)
# endif
static int
swap_prezeroed_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
malloc_mutex_lock(&ctl_mtx);
if (swap_enabled) {
READONLY();
} else {
/*
* swap_prezeroed isn't actually used by the swap code until it
* is set during a successful chunk_swap_enabled() call. We
* use it here to store the value that we'll pass to
* chunk_swap_enable() in a swap.fds mallctl(). This is not
* very clean, but the obvious alternatives are even worse.
*/
WRITE(swap_prezeroed, bool);
}
READ(swap_prezeroed, bool);
ret = 0;
RETURN:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
CTL_RO_GEN(swap_nfds, swap_nfds, size_t)
static int
swap_fds_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
malloc_mutex_lock(&ctl_mtx);
if (swap_enabled) {
READONLY();
} else if (newp != NULL) {
size_t nfds = newlen / sizeof(int);
{
int fds[nfds];
memcpy(fds, newp, nfds * sizeof(int));
if (chunk_swap_enable(fds, nfds, swap_prezeroed)) {
ret = EFAULT;
goto RETURN;
}
}
}
if (oldp != NULL && oldlenp != NULL) {
if (*oldlenp != swap_nfds * sizeof(int)) {
size_t copylen = (swap_nfds * sizeof(int) <= *oldlenp)
? swap_nfds * sizeof(int) : *oldlenp;
memcpy(oldp, swap_fds, copylen);
ret = EINVAL;
goto RETURN;
} else
memcpy(oldp, swap_fds, *oldlenp);
}
ret = 0;
RETURN:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
#endif
#define JEMALLOC_EXTENT_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
static inline int
extent_szad_comp(extent_node_t *a, extent_node_t *b)
{
int ret;
size_t a_size = a->size;
size_t b_size = b->size;
ret = (a_size > b_size) - (a_size < b_size);
if (ret == 0) {
uintptr_t a_addr = (uintptr_t)a->addr;
uintptr_t b_addr = (uintptr_t)b->addr;
ret = (a_addr > b_addr) - (a_addr < b_addr);
}
return (ret);
}
/* Generate red-black tree functions. */
rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, link_szad,
extent_szad_comp)
#endif
static inline int
extent_ad_comp(extent_node_t *a, extent_node_t *b)
{
uintptr_t a_addr = (uintptr_t)a->addr;
uintptr_t b_addr = (uintptr_t)b->addr;
return ((a_addr > b_addr) - (a_addr < b_addr));
}
/* Generate red-black tree functions. */
rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, link_ad,
extent_ad_comp)
#define JEMALLOC_HASH_C_
#include "jemalloc/internal/jemalloc_internal.h"
#define JEMALLOC_HUGE_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Data. */
#ifdef JEMALLOC_STATS
uint64_t huge_nmalloc;
uint64_t huge_ndalloc;
size_t huge_allocated;
#endif
malloc_mutex_t huge_mtx;
/******************************************************************************/
/* Tree of chunks that are stand-alone huge allocations. */
static extent_tree_t huge;
void *
huge_malloc(size_t size, bool zero)
{
void *ret;
size_t csize;
extent_node_t *node;
/* Allocate one or more contiguous chunks for this request. */
csize = CHUNK_CEILING(size);
if (csize == 0) {
/* size is large enough to cause size_t wrap-around. */
return (NULL);
}
/* Allocate an extent node with which to track the chunk. */
node = base_node_alloc();
if (node == NULL)
return (NULL);
ret = chunk_alloc(csize, false, &zero);
if (ret == NULL) {
base_node_dealloc(node);
return (NULL);
}
/* Insert node into huge. */
node->addr = ret;
node->size = csize;
malloc_mutex_lock(&huge_mtx);
extent_tree_ad_insert(&huge, node);
#ifdef JEMALLOC_STATS
stats_cactive_add(csize);
huge_nmalloc++;
huge_allocated += csize;
#endif
malloc_mutex_unlock(&huge_mtx);
#ifdef JEMALLOC_FILL
if (zero == false) {
if (opt_junk)
memset(ret, 0xa5, csize);
else if (opt_zero)
memset(ret, 0, csize);
}
#endif
return (ret);
}
/* Only handles large allocations that require more than chunk alignment. */
void *
huge_palloc(size_t size, size_t alignment, bool zero)
{
void *ret;
size_t alloc_size, chunk_size, offset;
extent_node_t *node;
/*
* This allocation requires alignment that is even larger than chunk
* alignment. This means that huge_malloc() isn't good enough.
*
* Allocate almost twice as many chunks as are demanded by the size or
* alignment, in order to assure the alignment can be achieved, then
* unmap leading and trailing chunks.
*/
assert(alignment > chunksize);
chunk_size = CHUNK_CEILING(size);
if (size >= alignment)
alloc_size = chunk_size + alignment - chunksize;
else
alloc_size = (alignment << 1) - chunksize;
/* Allocate an extent node with which to track the chunk. */
node = base_node_alloc();
if (node == NULL)
return (NULL);
ret = chunk_alloc(alloc_size, false, &zero);
if (ret == NULL) {
base_node_dealloc(node);
return (NULL);
}
offset = (uintptr_t)ret & (alignment - 1);
assert((offset & chunksize_mask) == 0);
assert(offset < alloc_size);
if (offset == 0) {
/* Trim trailing space. */
chunk_dealloc((void *)((uintptr_t)ret + chunk_size), alloc_size
- chunk_size);
} else {
size_t trailsize;
/* Trim leading space. */
chunk_dealloc(ret, alignment - offset);
ret = (void *)((uintptr_t)ret + (alignment - offset));
trailsize = alloc_size - (alignment - offset) - chunk_size;
if (trailsize != 0) {
/* Trim trailing space. */
assert(trailsize < alloc_size);
chunk_dealloc((void *)((uintptr_t)ret + chunk_size),
trailsize);
}
}
/* Insert node into huge. */
node->addr = ret;
node->size = chunk_size;
malloc_mutex_lock(&huge_mtx);
extent_tree_ad_insert(&huge, node);
#ifdef JEMALLOC_STATS
stats_cactive_add(chunk_size);
huge_nmalloc++;
huge_allocated += chunk_size;
#endif
malloc_mutex_unlock(&huge_mtx);
#ifdef JEMALLOC_FILL
if (zero == false) {
if (opt_junk)
memset(ret, 0xa5, chunk_size);
else if (opt_zero)
memset(ret, 0, chunk_size);
}
#endif
return (ret);
}
void *
huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
{
/*
* Avoid moving the allocation if the size class can be left the same.
*/
if (oldsize > arena_maxclass
&& CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
assert(CHUNK_CEILING(oldsize) == oldsize);
#ifdef JEMALLOC_FILL
if (opt_junk && size < oldsize) {
memset((void *)((uintptr_t)ptr + size), 0x5a,
oldsize - size);
}
#endif
return (ptr);
}
/* Reallocation would require a move. */
return (NULL);
}
void *
huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero)
{
void *ret;
size_t copysize;
/* Try to avoid moving the allocation. */
ret = huge_ralloc_no_move(ptr, oldsize, size, extra);
if (ret != NULL)
return (ret);
/*
* size and oldsize are different enough that we need to use a
* different size class. In that case, fall back to allocating new
* space and copying.
*/
if (alignment > chunksize)
ret = huge_palloc(size + extra, alignment, zero);
else
ret = huge_malloc(size + extra, zero);
if (ret == NULL) {
if (extra == 0)
return (NULL);
/* Try again, this time without extra. */
if (alignment > chunksize)
ret = huge_palloc(size, alignment, zero);
else
ret = huge_malloc(size, zero);
if (ret == NULL)
return (NULL);
}
/*
* Copy at most size bytes (not size+extra), since the caller has no
* expectation that the extra bytes will be reliably preserved.
*/
copysize = (size < oldsize) ? size : oldsize;
/*
* Use mremap(2) if this is a huge-->huge reallocation, and neither the
* source nor the destination are in swap or dss.
*/
#ifdef JEMALLOC_MREMAP_FIXED
if (oldsize >= chunksize
# ifdef JEMALLOC_SWAP
&& (swap_enabled == false || (chunk_in_swap(ptr) == false &&
chunk_in_swap(ret) == false))
# endif
# ifdef JEMALLOC_DSS
&& chunk_in_dss(ptr) == false && chunk_in_dss(ret) == false
# endif
) {
size_t newsize = huge_salloc(ret);
if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED,
ret) == MAP_FAILED) {
/*
* Assuming no chunk management bugs in the allocator,
* the only documented way an error can occur here is
* if the application changed the map type for a
* portion of the old allocation. This is firmly in
* undefined behavior territory, so write a diagnostic
* message, and optionally abort.
*/
char buf[BUFERROR_BUF];
buferror(errno, buf, sizeof(buf));
malloc_write("<jemalloc>: Error in mremap(): ");
malloc_write(buf);
malloc_write("\n");
if (opt_abort)
abort();
memcpy(ret, ptr, copysize);
idalloc(ptr);
} else
huge_dalloc(ptr, false);
} else
#endif
{
memcpy(ret, ptr, copysize);
idalloc(ptr);
}
return (ret);
}
void
huge_dalloc(void *ptr, bool unmap)
{
extent_node_t *node, key;
malloc_mutex_lock(&huge_mtx);
/* Extract from tree of huge allocations. */
key.addr = ptr;
node = extent_tree_ad_search(&huge, &key);
assert(node != NULL);
assert(node->addr == ptr);
extent_tree_ad_remove(&huge, node);
#ifdef JEMALLOC_STATS
stats_cactive_sub(node->size);
huge_ndalloc++;
huge_allocated -= node->size;
#endif
malloc_mutex_unlock(&huge_mtx);
if (unmap) {
/* Unmap chunk. */
#ifdef JEMALLOC_FILL
#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
if (opt_junk)
memset(node->addr, 0x5a, node->size);
#endif
#endif
chunk_dealloc(node->addr, node->size);
}
base_node_dealloc(node);
}
size_t
huge_salloc(const void *ptr)
{
size_t ret;
extent_node_t *node, key;
malloc_mutex_lock(&huge_mtx);
/* Extract from tree of huge allocations. */
key.addr = __DECONST(void *, ptr);
node = extent_tree_ad_search(&huge, &key);
assert(node != NULL);
ret = node->size;
malloc_mutex_unlock(&huge_mtx);
return (ret);
}
#ifdef JEMALLOC_PROF
prof_ctx_t *
huge_prof_ctx_get(const void *ptr)
{
prof_ctx_t *ret;
extent_node_t *node, key;
malloc_mutex_lock(&huge_mtx);
/* Extract from tree of huge allocations. */
key.addr = __DECONST(void *, ptr);
node = extent_tree_ad_search(&huge, &key);
assert(node != NULL);
ret = node->prof_ctx;
malloc_mutex_unlock(&huge_mtx);
return (ret);
}
void
huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
{
extent_node_t *node, key;
malloc_mutex_lock(&huge_mtx);
/* Extract from tree of huge allocations. */
key.addr = __DECONST(void *, ptr);
node = extent_tree_ad_search(&huge, &key);
assert(node != NULL);
node->prof_ctx = ctx;
malloc_mutex_unlock(&huge_mtx);
}
#endif
bool
huge_boot(void)
{
/* Initialize chunks data. */
if (malloc_mutex_init(&huge_mtx))
return (true);
extent_tree_ad_new(&huge);
#ifdef JEMALLOC_STATS
huge_nmalloc = 0;
huge_ndalloc = 0;
huge_allocated = 0;
#endif
return (false);
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment