Commit a78e148b authored by antirez's avatar antirez
Browse files

jemalloc source added

parent 07486df6
#define JEMALLOC_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Data. */
malloc_mutex_t arenas_lock;
arena_t **arenas;
unsigned narenas;
pthread_key_t arenas_tsd;
#ifndef NO_TLS
__thread arena_t *arenas_tls JEMALLOC_ATTR(tls_model("initial-exec"));
#endif
#ifdef JEMALLOC_STATS
# ifndef NO_TLS
__thread thread_allocated_t thread_allocated_tls;
# else
pthread_key_t thread_allocated_tsd;
# endif
#endif
/* Set to true once the allocator has been initialized. */
static bool malloc_initialized = false;
/* Used to let the initializing thread recursively allocate. */
static pthread_t malloc_initializer = (unsigned long)0;
/* Used to avoid initialization races. */
static malloc_mutex_t init_lock =
#ifdef JEMALLOC_OSSPIN
0
#else
MALLOC_MUTEX_INITIALIZER
#endif
;
#ifdef DYNAMIC_PAGE_SHIFT
size_t pagesize;
size_t pagesize_mask;
size_t lg_pagesize;
#endif
unsigned ncpus;
/* Runtime configuration options. */
const char *JEMALLOC_P(malloc_conf) JEMALLOC_ATTR(visibility("default"));
#ifdef JEMALLOC_DEBUG
bool opt_abort = true;
# ifdef JEMALLOC_FILL
bool opt_junk = true;
# endif
#else
bool opt_abort = false;
# ifdef JEMALLOC_FILL
bool opt_junk = false;
# endif
#endif
#ifdef JEMALLOC_SYSV
bool opt_sysv = false;
#endif
#ifdef JEMALLOC_XMALLOC
bool opt_xmalloc = false;
#endif
#ifdef JEMALLOC_FILL
bool opt_zero = false;
#endif
size_t opt_narenas = 0;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static void wrtmessage(void *cbopaque, const char *s);
static void stats_print_atexit(void);
static unsigned malloc_ncpus(void);
static void arenas_cleanup(void *arg);
#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
static void thread_allocated_cleanup(void *arg);
#endif
static bool malloc_conf_next(char const **opts_p, char const **k_p,
size_t *klen_p, char const **v_p, size_t *vlen_p);
static void malloc_conf_error(const char *msg, const char *k, size_t klen,
const char *v, size_t vlen);
static void malloc_conf_init(void);
static bool malloc_init_hard(void);
/******************************************************************************/
/* malloc_message() setup. */
#ifdef JEMALLOC_HAVE_ATTR
JEMALLOC_ATTR(visibility("hidden"))
#else
static
#endif
void
wrtmessage(void *cbopaque, const char *s)
{
#ifdef JEMALLOC_CC_SILENCE
int result =
#endif
write(STDERR_FILENO, s, strlen(s));
#ifdef JEMALLOC_CC_SILENCE
if (result < 0)
result = errno;
#endif
}
void (*JEMALLOC_P(malloc_message))(void *, const char *s)
JEMALLOC_ATTR(visibility("default")) = wrtmessage;
/******************************************************************************/
/*
* Begin miscellaneous support functions.
*/
/* Create a new arena and insert it into the arenas array at index ind. */
arena_t *
arenas_extend(unsigned ind)
{
arena_t *ret;
/* Allocate enough space for trailing bins. */
ret = (arena_t *)base_alloc(offsetof(arena_t, bins)
+ (sizeof(arena_bin_t) * nbins));
if (ret != NULL && arena_new(ret, ind) == false) {
arenas[ind] = ret;
return (ret);
}
/* Only reached if there is an OOM error. */
/*
* OOM here is quite inconvenient to propagate, since dealing with it
* would require a check for failure in the fast path. Instead, punt
* by using arenas[0]. In practice, this is an extremely unlikely
* failure.
*/
malloc_write("<jemalloc>: Error initializing arena\n");
if (opt_abort)
abort();
return (arenas[0]);
}
/*
* Choose an arena based on a per-thread value (slow-path code only, called
* only by choose_arena()).
*/
arena_t *
choose_arena_hard(void)
{
arena_t *ret;
if (narenas > 1) {
unsigned i, choose, first_null;
choose = 0;
first_null = narenas;
malloc_mutex_lock(&arenas_lock);
assert(arenas[0] != NULL);
for (i = 1; i < narenas; i++) {
if (arenas[i] != NULL) {
/*
* Choose the first arena that has the lowest
* number of threads assigned to it.
*/
if (arenas[i]->nthreads <
arenas[choose]->nthreads)
choose = i;
} else if (first_null == narenas) {
/*
* Record the index of the first uninitialized
* arena, in case all extant arenas are in use.
*
* NB: It is possible for there to be
* discontinuities in terms of initialized
* versus uninitialized arenas, due to the
* "thread.arena" mallctl.
*/
first_null = i;
}
}
if (arenas[choose] == 0 || first_null == narenas) {
/*
* Use an unloaded arena, or the least loaded arena if
* all arenas are already initialized.
*/
ret = arenas[choose];
} else {
/* Initialize a new arena. */
ret = arenas_extend(first_null);
}
ret->nthreads++;
malloc_mutex_unlock(&arenas_lock);
} else {
ret = arenas[0];
malloc_mutex_lock(&arenas_lock);
ret->nthreads++;
malloc_mutex_unlock(&arenas_lock);
}
ARENA_SET(ret);
return (ret);
}
/*
* glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so
* provide a wrapper.
*/
int
buferror(int errnum, char *buf, size_t buflen)
{
#ifdef _GNU_SOURCE
char *b = strerror_r(errno, buf, buflen);
if (b != buf) {
strncpy(buf, b, buflen);
buf[buflen-1] = '\0';
}
return (0);
#else
return (strerror_r(errno, buf, buflen));
#endif
}
static void
stats_print_atexit(void)
{
#if (defined(JEMALLOC_TCACHE) && defined(JEMALLOC_STATS))
unsigned i;
/*
* Merge stats from extant threads. This is racy, since individual
* threads do not lock when recording tcache stats events. As a
* consequence, the final stats may be slightly out of date by the time
* they are reported, if other threads continue to allocate.
*/
for (i = 0; i < narenas; i++) {
arena_t *arena = arenas[i];
if (arena != NULL) {
tcache_t *tcache;
/*
* tcache_stats_merge() locks bins, so if any code is
* introduced that acquires both arena and bin locks in
* the opposite order, deadlocks may result.
*/
malloc_mutex_lock(&arena->lock);
ql_foreach(tcache, &arena->tcache_ql, link) {
tcache_stats_merge(tcache, arena);
}
malloc_mutex_unlock(&arena->lock);
}
}
#endif
JEMALLOC_P(malloc_stats_print)(NULL, NULL, NULL);
}
#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
thread_allocated_t *
thread_allocated_get_hard(void)
{
thread_allocated_t *thread_allocated = (thread_allocated_t *)
imalloc(sizeof(thread_allocated_t));
if (thread_allocated == NULL) {
static thread_allocated_t static_thread_allocated = {0, 0};
malloc_write("<jemalloc>: Error allocating TSD;"
" mallctl(\"thread.{de,}allocated[p]\", ...)"
" will be inaccurate\n");
if (opt_abort)
abort();
return (&static_thread_allocated);
}
pthread_setspecific(thread_allocated_tsd, thread_allocated);
thread_allocated->allocated = 0;
thread_allocated->deallocated = 0;
return (thread_allocated);
}
#endif
/*
* End miscellaneous support functions.
*/
/******************************************************************************/
/*
* Begin initialization functions.
*/
static unsigned
malloc_ncpus(void)
{
unsigned ret;
long result;
result = sysconf(_SC_NPROCESSORS_ONLN);
if (result == -1) {
/* Error. */
ret = 1;
}
ret = (unsigned)result;
return (ret);
}
static void
arenas_cleanup(void *arg)
{
arena_t *arena = (arena_t *)arg;
malloc_mutex_lock(&arenas_lock);
arena->nthreads--;
malloc_mutex_unlock(&arenas_lock);
}
#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
static void
thread_allocated_cleanup(void *arg)
{
uint64_t *allocated = (uint64_t *)arg;
if (allocated != NULL)
idalloc(allocated);
}
#endif
/*
* FreeBSD's pthreads implementation calls malloc(3), so the malloc
* implementation has to take pains to avoid infinite recursion during
* initialization.
*/
static inline bool
malloc_init(void)
{
if (malloc_initialized == false)
return (malloc_init_hard());
return (false);
}
static bool
malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
char const **v_p, size_t *vlen_p)
{
bool accept;
const char *opts = *opts_p;
*k_p = opts;
for (accept = false; accept == false;) {
switch (*opts) {
case 'A': case 'B': case 'C': case 'D': case 'E':
case 'F': case 'G': case 'H': case 'I': case 'J':
case 'K': case 'L': case 'M': case 'N': case 'O':
case 'P': case 'Q': case 'R': case 'S': case 'T':
case 'U': case 'V': case 'W': case 'X': case 'Y':
case 'Z':
case 'a': case 'b': case 'c': case 'd': case 'e':
case 'f': case 'g': case 'h': case 'i': case 'j':
case 'k': case 'l': case 'm': case 'n': case 'o':
case 'p': case 'q': case 'r': case 's': case 't':
case 'u': case 'v': case 'w': case 'x': case 'y':
case 'z':
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
case '_':
opts++;
break;
case ':':
opts++;
*klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
*v_p = opts;
accept = true;
break;
case '\0':
if (opts != *opts_p) {
malloc_write("<jemalloc>: Conf string "
"ends with key\n");
}
return (true);
default:
malloc_write("<jemalloc>: Malformed conf "
"string\n");
return (true);
}
}
for (accept = false; accept == false;) {
switch (*opts) {
case ',':
opts++;
/*
* Look ahead one character here, because the
* next time this function is called, it will
* assume that end of input has been cleanly
* reached if no input remains, but we have
* optimistically already consumed the comma if
* one exists.
*/
if (*opts == '\0') {
malloc_write("<jemalloc>: Conf string "
"ends with comma\n");
}
*vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
accept = true;
break;
case '\0':
*vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
accept = true;
break;
default:
opts++;
break;
}
}
*opts_p = opts;
return (false);
}
static void
malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
size_t vlen)
{
char buf[PATH_MAX + 1];
malloc_write("<jemalloc>: ");
malloc_write(msg);
malloc_write(": ");
memcpy(buf, k, klen);
memcpy(&buf[klen], ":", 1);
memcpy(&buf[klen+1], v, vlen);
buf[klen+1+vlen] = '\0';
malloc_write(buf);
malloc_write("\n");
}
static void
malloc_conf_init(void)
{
unsigned i;
char buf[PATH_MAX + 1];
const char *opts, *k, *v;
size_t klen, vlen;
for (i = 0; i < 3; i++) {
/* Get runtime configuration. */
switch (i) {
case 0:
if (JEMALLOC_P(malloc_conf) != NULL) {
/*
* Use options that were compiled into the
* program.
*/
opts = JEMALLOC_P(malloc_conf);
} else {
/* No configuration specified. */
buf[0] = '\0';
opts = buf;
}
break;
case 1: {
int linklen;
const char *linkname =
#ifdef JEMALLOC_PREFIX
"/etc/"JEMALLOC_PREFIX"malloc.conf"
#else
"/etc/malloc.conf"
#endif
;
if ((linklen = readlink(linkname, buf,
sizeof(buf) - 1)) != -1) {
/*
* Use the contents of the "/etc/malloc.conf"
* symbolic link's name.
*/
buf[linklen] = '\0';
opts = buf;
} else {
/* No configuration specified. */
buf[0] = '\0';
opts = buf;
}
break;
}
case 2: {
const char *envname =
#ifdef JEMALLOC_PREFIX
JEMALLOC_CPREFIX"MALLOC_CONF"
#else
"MALLOC_CONF"
#endif
;
if ((opts = getenv(envname)) != NULL) {
/*
* Do nothing; opts is already initialized to
* the value of the MALLOC_CONF environment
* variable.
*/
} else {
/* No configuration specified. */
buf[0] = '\0';
opts = buf;
}
break;
}
default:
/* NOTREACHED */
assert(false);
buf[0] = '\0';
opts = buf;
}
while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
&vlen) == false) {
#define CONF_HANDLE_BOOL(n) \
if (sizeof(#n)-1 == klen && strncmp(#n, k, \
klen) == 0) { \
if (strncmp("true", v, vlen) == 0 && \
vlen == sizeof("true")-1) \
opt_##n = true; \
else if (strncmp("false", v, vlen) == \
0 && vlen == sizeof("false")-1) \
opt_##n = false; \
else { \
malloc_conf_error( \
"Invalid conf value", \
k, klen, v, vlen); \
} \
continue; \
}
#define CONF_HANDLE_SIZE_T(n, min, max) \
if (sizeof(#n)-1 == klen && strncmp(#n, k, \
klen) == 0) { \
unsigned long ul; \
char *end; \
\
errno = 0; \
ul = strtoul(v, &end, 0); \
if (errno != 0 || (uintptr_t)end - \
(uintptr_t)v != vlen) { \
malloc_conf_error( \
"Invalid conf value", \
k, klen, v, vlen); \
} else if (ul < min || ul > max) { \
malloc_conf_error( \
"Out-of-range conf value", \
k, klen, v, vlen); \
} else \
opt_##n = ul; \
continue; \
}
#define CONF_HANDLE_SSIZE_T(n, min, max) \
if (sizeof(#n)-1 == klen && strncmp(#n, k, \
klen) == 0) { \
long l; \
char *end; \
\
errno = 0; \
l = strtol(v, &end, 0); \
if (errno != 0 || (uintptr_t)end - \
(uintptr_t)v != vlen) { \
malloc_conf_error( \
"Invalid conf value", \
k, klen, v, vlen); \
} else if (l < (ssize_t)min || l > \
(ssize_t)max) { \
malloc_conf_error( \
"Out-of-range conf value", \
k, klen, v, vlen); \
} else \
opt_##n = l; \
continue; \
}
#define CONF_HANDLE_CHAR_P(n, d) \
if (sizeof(#n)-1 == klen && strncmp(#n, k, \
klen) == 0) { \
size_t cpylen = (vlen <= \
sizeof(opt_##n)-1) ? vlen : \
sizeof(opt_##n)-1; \
strncpy(opt_##n, v, cpylen); \
opt_##n[cpylen] = '\0'; \
continue; \
}
CONF_HANDLE_BOOL(abort)
CONF_HANDLE_SIZE_T(lg_qspace_max, LG_QUANTUM,
PAGE_SHIFT-1)
CONF_HANDLE_SIZE_T(lg_cspace_max, LG_QUANTUM,
PAGE_SHIFT-1)
/*
* Chunks always require at least one * header page,
* plus one data page.
*/
CONF_HANDLE_SIZE_T(lg_chunk, PAGE_SHIFT+1,
(sizeof(size_t) << 3) - 1)
CONF_HANDLE_SIZE_T(narenas, 1, SIZE_T_MAX)
CONF_HANDLE_SSIZE_T(lg_dirty_mult, -1,
(sizeof(size_t) << 3) - 1)
CONF_HANDLE_BOOL(stats_print)
#ifdef JEMALLOC_FILL
CONF_HANDLE_BOOL(junk)
CONF_HANDLE_BOOL(zero)
#endif
#ifdef JEMALLOC_SYSV
CONF_HANDLE_BOOL(sysv)
#endif
#ifdef JEMALLOC_XMALLOC
CONF_HANDLE_BOOL(xmalloc)
#endif
#ifdef JEMALLOC_TCACHE
CONF_HANDLE_BOOL(tcache)
CONF_HANDLE_SSIZE_T(lg_tcache_gc_sweep, -1,
(sizeof(size_t) << 3) - 1)
CONF_HANDLE_SSIZE_T(lg_tcache_max, -1,
(sizeof(size_t) << 3) - 1)
#endif
#ifdef JEMALLOC_PROF
CONF_HANDLE_BOOL(prof)
CONF_HANDLE_CHAR_P(prof_prefix, "jeprof")
CONF_HANDLE_SIZE_T(lg_prof_bt_max, 0, LG_PROF_BT_MAX)
CONF_HANDLE_BOOL(prof_active)
CONF_HANDLE_SSIZE_T(lg_prof_sample, 0,
(sizeof(uint64_t) << 3) - 1)
CONF_HANDLE_BOOL(prof_accum)
CONF_HANDLE_SSIZE_T(lg_prof_tcmax, -1,
(sizeof(size_t) << 3) - 1)
CONF_HANDLE_SSIZE_T(lg_prof_interval, -1,
(sizeof(uint64_t) << 3) - 1)
CONF_HANDLE_BOOL(prof_gdump)
CONF_HANDLE_BOOL(prof_leak)
#endif
#ifdef JEMALLOC_SWAP
CONF_HANDLE_BOOL(overcommit)
#endif
malloc_conf_error("Invalid conf pair", k, klen, v,
vlen);
#undef CONF_HANDLE_BOOL
#undef CONF_HANDLE_SIZE_T
#undef CONF_HANDLE_SSIZE_T
#undef CONF_HANDLE_CHAR_P
}
/* Validate configuration of options that are inter-related. */
if (opt_lg_qspace_max+1 >= opt_lg_cspace_max) {
malloc_write("<jemalloc>: Invalid lg_[qc]space_max "
"relationship; restoring defaults\n");
opt_lg_qspace_max = LG_QSPACE_MAX_DEFAULT;
opt_lg_cspace_max = LG_CSPACE_MAX_DEFAULT;
}
}
}
static bool
malloc_init_hard(void)
{
arena_t *init_arenas[1];
malloc_mutex_lock(&init_lock);
if (malloc_initialized || malloc_initializer == pthread_self()) {
/*
* Another thread initialized the allocator before this one
* acquired init_lock, or this thread is the initializing
* thread, and it is recursively allocating.
*/
malloc_mutex_unlock(&init_lock);
return (false);
}
if (malloc_initializer != (unsigned long)0) {
/* Busy-wait until the initializing thread completes. */
do {
malloc_mutex_unlock(&init_lock);
CPU_SPINWAIT;
malloc_mutex_lock(&init_lock);
} while (malloc_initialized == false);
malloc_mutex_unlock(&init_lock);
return (false);
}
#ifdef DYNAMIC_PAGE_SHIFT
/* Get page size. */
{
long result;
result = sysconf(_SC_PAGESIZE);
assert(result != -1);
pagesize = (unsigned)result;
/*
* We assume that pagesize is a power of 2 when calculating
* pagesize_mask and lg_pagesize.
*/
assert(((result - 1) & result) == 0);
pagesize_mask = result - 1;
lg_pagesize = ffs((int)result) - 1;
}
#endif
#ifdef JEMALLOC_PROF
prof_boot0();
#endif
malloc_conf_init();
/* Register fork handlers. */
if (pthread_atfork(jemalloc_prefork, jemalloc_postfork,
jemalloc_postfork) != 0) {
malloc_write("<jemalloc>: Error in pthread_atfork()\n");
if (opt_abort)
abort();
}
if (ctl_boot()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
if (opt_stats_print) {
/* Print statistics at exit. */
if (atexit(stats_print_atexit) != 0) {
malloc_write("<jemalloc>: Error in atexit()\n");
if (opt_abort)
abort();
}
}
if (chunk_boot()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
if (base_boot()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
#ifdef JEMALLOC_PROF
prof_boot1();
#endif
if (arena_boot()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
#ifdef JEMALLOC_TCACHE
if (tcache_boot()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
#endif
if (huge_boot()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
/* Initialize allocation counters before any allocations can occur. */
if (pthread_key_create(&thread_allocated_tsd, thread_allocated_cleanup)
!= 0) {
malloc_mutex_unlock(&init_lock);
return (true);
}
#endif
/*
* Create enough scaffolding to allow recursive allocation in
* malloc_ncpus().
*/
narenas = 1;
arenas = init_arenas;
memset(arenas, 0, sizeof(arena_t *) * narenas);
/*
* Initialize one arena here. The rest are lazily created in
* choose_arena_hard().
*/
arenas_extend(0);
if (arenas[0] == NULL) {
malloc_mutex_unlock(&init_lock);
return (true);
}
/*
* Assign the initial arena to the initial thread, in order to avoid
* spurious creation of an extra arena if the application switches to
* threaded mode.
*/
ARENA_SET(arenas[0]);
arenas[0]->nthreads++;
if (malloc_mutex_init(&arenas_lock))
return (true);
if (pthread_key_create(&arenas_tsd, arenas_cleanup) != 0) {
malloc_mutex_unlock(&init_lock);
return (true);
}
#ifdef JEMALLOC_PROF
if (prof_boot2()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
#endif
/* Get number of CPUs. */
malloc_initializer = pthread_self();
malloc_mutex_unlock(&init_lock);
ncpus = malloc_ncpus();
malloc_mutex_lock(&init_lock);
if (opt_narenas == 0) {
/*
* For SMP systems, create more than one arena per CPU by
* default.
*/
if (ncpus > 1)
opt_narenas = ncpus << 2;
else
opt_narenas = 1;
}
narenas = opt_narenas;
/*
* Make sure that the arenas array can be allocated. In practice, this
* limit is enough to allow the allocator to function, but the ctl
* machinery will fail to allocate memory at far lower limits.
*/
if (narenas > chunksize / sizeof(arena_t *)) {
char buf[UMAX2S_BUFSIZE];
narenas = chunksize / sizeof(arena_t *);
malloc_write("<jemalloc>: Reducing narenas to limit (");
malloc_write(u2s(narenas, 10, buf));
malloc_write(")\n");
}
/* Allocate and initialize arenas. */
arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
if (arenas == NULL) {
malloc_mutex_unlock(&init_lock);
return (true);
}
/*
* Zero the array. In practice, this should always be pre-zeroed,
* since it was just mmap()ed, but let's be sure.
*/
memset(arenas, 0, sizeof(arena_t *) * narenas);
/* Copy the pointer to the one arena that was already initialized. */
arenas[0] = init_arenas[0];
#ifdef JEMALLOC_ZONE
/* Register the custom zone. */
malloc_zone_register(create_zone());
/*
* Convert the default szone to an "overlay zone" that is capable of
* deallocating szone-allocated objects, but allocating new objects
* from jemalloc.
*/
szone2ozone(malloc_default_zone());
#endif
malloc_initialized = true;
malloc_mutex_unlock(&init_lock);
return (false);
}
#ifdef JEMALLOC_ZONE
JEMALLOC_ATTR(constructor)
void
jemalloc_darwin_init(void)
{
if (malloc_init_hard())
abort();
}
#endif
/*
* End initialization functions.
*/
/******************************************************************************/
/*
* Begin malloc(3)-compatible functions.
*/
JEMALLOC_ATTR(malloc)
JEMALLOC_ATTR(visibility("default"))
void *
JEMALLOC_P(malloc)(size_t size)
{
void *ret;
#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
size_t usize
# ifdef JEMALLOC_CC_SILENCE
= 0
# endif
;
#endif
#ifdef JEMALLOC_PROF
prof_thr_cnt_t *cnt
# ifdef JEMALLOC_CC_SILENCE
= NULL
# endif
;
#endif
if (malloc_init()) {
ret = NULL;
goto OOM;
}
if (size == 0) {
#ifdef JEMALLOC_SYSV
if (opt_sysv == false)
#endif
size = 1;
#ifdef JEMALLOC_SYSV
else {
# ifdef JEMALLOC_XMALLOC
if (opt_xmalloc) {
malloc_write("<jemalloc>: Error in malloc(): "
"invalid size 0\n");
abort();
}
# endif
ret = NULL;
goto RETURN;
}
#endif
}
#ifdef JEMALLOC_PROF
if (opt_prof) {
usize = s2u(size);
if ((cnt = prof_alloc_prep(usize)) == NULL) {
ret = NULL;
goto OOM;
}
if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
small_maxclass) {
ret = imalloc(small_maxclass+1);
if (ret != NULL)
arena_prof_promoted(ret, usize);
} else
ret = imalloc(size);
} else
#endif
{
#ifdef JEMALLOC_STATS
usize = s2u(size);
#endif
ret = imalloc(size);
}
OOM:
if (ret == NULL) {
#ifdef JEMALLOC_XMALLOC
if (opt_xmalloc) {
malloc_write("<jemalloc>: Error in malloc(): "
"out of memory\n");
abort();
}
#endif
errno = ENOMEM;
}
#ifdef JEMALLOC_SYSV
RETURN:
#endif
#ifdef JEMALLOC_PROF
if (opt_prof && ret != NULL)
prof_malloc(ret, usize, cnt);
#endif
#ifdef JEMALLOC_STATS
if (ret != NULL) {
assert(usize == isalloc(ret));
ALLOCATED_ADD(usize, 0);
}
#endif
return (ret);
}
JEMALLOC_ATTR(nonnull(1))
JEMALLOC_ATTR(visibility("default"))
int
JEMALLOC_P(posix_memalign)(void **memptr, size_t alignment, size_t size)
{
int ret;
size_t usize
#ifdef JEMALLOC_CC_SILENCE
= 0
#endif
;
void *result;
#ifdef JEMALLOC_PROF
prof_thr_cnt_t *cnt
# ifdef JEMALLOC_CC_SILENCE
= NULL
# endif
;
#endif
if (malloc_init())
result = NULL;
else {
if (size == 0) {
#ifdef JEMALLOC_SYSV
if (opt_sysv == false)
#endif
size = 1;
#ifdef JEMALLOC_SYSV
else {
# ifdef JEMALLOC_XMALLOC
if (opt_xmalloc) {
malloc_write("<jemalloc>: Error in "
"posix_memalign(): invalid size "
"0\n");
abort();
}
# endif
result = NULL;
*memptr = NULL;
ret = 0;
goto RETURN;
}
#endif
}
/* Make sure that alignment is a large enough power of 2. */
if (((alignment - 1) & alignment) != 0
|| alignment < sizeof(void *)) {
#ifdef JEMALLOC_XMALLOC
if (opt_xmalloc) {
malloc_write("<jemalloc>: Error in "
"posix_memalign(): invalid alignment\n");
abort();
}
#endif
result = NULL;
ret = EINVAL;
goto RETURN;
}
usize = sa2u(size, alignment, NULL);
if (usize == 0) {
result = NULL;
ret = ENOMEM;
goto RETURN;
}
#ifdef JEMALLOC_PROF
if (opt_prof) {
if ((cnt = prof_alloc_prep(usize)) == NULL) {
result = NULL;
ret = EINVAL;
} else {
if (prof_promote && (uintptr_t)cnt !=
(uintptr_t)1U && usize <= small_maxclass) {
assert(sa2u(small_maxclass+1,
alignment, NULL) != 0);
result = ipalloc(sa2u(small_maxclass+1,
alignment, NULL), alignment, false);
if (result != NULL) {
arena_prof_promoted(result,
usize);
}
} else {
result = ipalloc(usize, alignment,
false);
}
}
} else
#endif
result = ipalloc(usize, alignment, false);
}
if (result == NULL) {
#ifdef JEMALLOC_XMALLOC
if (opt_xmalloc) {
malloc_write("<jemalloc>: Error in posix_memalign(): "
"out of memory\n");
abort();
}
#endif
ret = ENOMEM;
goto RETURN;
}
*memptr = result;
ret = 0;
RETURN:
#ifdef JEMALLOC_STATS
if (result != NULL) {
assert(usize == isalloc(result));
ALLOCATED_ADD(usize, 0);
}
#endif
#ifdef JEMALLOC_PROF
if (opt_prof && result != NULL)
prof_malloc(result, usize, cnt);
#endif
return (ret);
}
JEMALLOC_ATTR(malloc)
JEMALLOC_ATTR(visibility("default"))
void *
JEMALLOC_P(calloc)(size_t num, size_t size)
{
void *ret;
size_t num_size;
#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
size_t usize
# ifdef JEMALLOC_CC_SILENCE
= 0
# endif
;
#endif
#ifdef JEMALLOC_PROF
prof_thr_cnt_t *cnt
# ifdef JEMALLOC_CC_SILENCE
= NULL
# endif
;
#endif
if (malloc_init()) {
num_size = 0;
ret = NULL;
goto RETURN;
}
num_size = num * size;
if (num_size == 0) {
#ifdef JEMALLOC_SYSV
if ((opt_sysv == false) && ((num == 0) || (size == 0)))
#endif
num_size = 1;
#ifdef JEMALLOC_SYSV
else {
ret = NULL;
goto RETURN;
}
#endif
/*
* Try to avoid division here. We know that it isn't possible to
* overflow during multiplication if neither operand uses any of the
* most significant half of the bits in a size_t.
*/
} else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
&& (num_size / size != num)) {
/* size_t overflow. */
ret = NULL;
goto RETURN;
}
#ifdef JEMALLOC_PROF
if (opt_prof) {
usize = s2u(num_size);
if ((cnt = prof_alloc_prep(usize)) == NULL) {
ret = NULL;
goto RETURN;
}
if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
<= small_maxclass) {
ret = icalloc(small_maxclass+1);
if (ret != NULL)
arena_prof_promoted(ret, usize);
} else
ret = icalloc(num_size);
} else
#endif
{
#ifdef JEMALLOC_STATS
usize = s2u(num_size);
#endif
ret = icalloc(num_size);
}
RETURN:
if (ret == NULL) {
#ifdef JEMALLOC_XMALLOC
if (opt_xmalloc) {
malloc_write("<jemalloc>: Error in calloc(): out of "
"memory\n");
abort();
}
#endif
errno = ENOMEM;
}
#ifdef JEMALLOC_PROF
if (opt_prof && ret != NULL)
prof_malloc(ret, usize, cnt);
#endif
#ifdef JEMALLOC_STATS
if (ret != NULL) {
assert(usize == isalloc(ret));
ALLOCATED_ADD(usize, 0);
}
#endif
return (ret);
}
JEMALLOC_ATTR(visibility("default"))
void *
JEMALLOC_P(realloc)(void *ptr, size_t size)
{
void *ret;
#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
size_t usize
# ifdef JEMALLOC_CC_SILENCE
= 0
# endif
;
size_t old_size = 0;
#endif
#ifdef JEMALLOC_PROF
prof_thr_cnt_t *cnt
# ifdef JEMALLOC_CC_SILENCE
= NULL
# endif
;
prof_ctx_t *old_ctx
# ifdef JEMALLOC_CC_SILENCE
= NULL
# endif
;
#endif
if (size == 0) {
#ifdef JEMALLOC_SYSV
if (opt_sysv == false)
#endif
size = 1;
#ifdef JEMALLOC_SYSV
else {
if (ptr != NULL) {
#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
old_size = isalloc(ptr);
#endif
#ifdef JEMALLOC_PROF
if (opt_prof) {
old_ctx = prof_ctx_get(ptr);
cnt = NULL;
}
#endif
idalloc(ptr);
}
#ifdef JEMALLOC_PROF
else if (opt_prof) {
old_ctx = NULL;
cnt = NULL;
}
#endif
ret = NULL;
goto RETURN;
}
#endif
}
if (ptr != NULL) {
assert(malloc_initialized || malloc_initializer ==
pthread_self());
#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
old_size = isalloc(ptr);
#endif
#ifdef JEMALLOC_PROF
if (opt_prof) {
usize = s2u(size);
old_ctx = prof_ctx_get(ptr);
if ((cnt = prof_alloc_prep(usize)) == NULL) {
ret = NULL;
goto OOM;
}
if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
usize <= small_maxclass) {
ret = iralloc(ptr, small_maxclass+1, 0, 0,
false, false);
if (ret != NULL)
arena_prof_promoted(ret, usize);
} else
ret = iralloc(ptr, size, 0, 0, false, false);
} else
#endif
{
#ifdef JEMALLOC_STATS
usize = s2u(size);
#endif
ret = iralloc(ptr, size, 0, 0, false, false);
}
#ifdef JEMALLOC_PROF
OOM:
#endif
if (ret == NULL) {
#ifdef JEMALLOC_XMALLOC
if (opt_xmalloc) {
malloc_write("<jemalloc>: Error in realloc(): "
"out of memory\n");
abort();
}
#endif
errno = ENOMEM;
}
} else {
#ifdef JEMALLOC_PROF
if (opt_prof)
old_ctx = NULL;
#endif
if (malloc_init()) {
#ifdef JEMALLOC_PROF
if (opt_prof)
cnt = NULL;
#endif
ret = NULL;
} else {
#ifdef JEMALLOC_PROF
if (opt_prof) {
usize = s2u(size);
if ((cnt = prof_alloc_prep(usize)) == NULL)
ret = NULL;
else {
if (prof_promote && (uintptr_t)cnt !=
(uintptr_t)1U && usize <=
small_maxclass) {
ret = imalloc(small_maxclass+1);
if (ret != NULL) {
arena_prof_promoted(ret,
usize);
}
} else
ret = imalloc(size);
}
} else
#endif
{
#ifdef JEMALLOC_STATS
usize = s2u(size);
#endif
ret = imalloc(size);
}
}
if (ret == NULL) {
#ifdef JEMALLOC_XMALLOC
if (opt_xmalloc) {
malloc_write("<jemalloc>: Error in realloc(): "
"out of memory\n");
abort();
}
#endif
errno = ENOMEM;
}
}
#ifdef JEMALLOC_SYSV
RETURN:
#endif
#ifdef JEMALLOC_PROF
if (opt_prof)
prof_realloc(ret, usize, cnt, old_size, old_ctx);
#endif
#ifdef JEMALLOC_STATS
if (ret != NULL) {
assert(usize == isalloc(ret));
ALLOCATED_ADD(usize, old_size);
}
#endif
return (ret);
}
JEMALLOC_ATTR(visibility("default"))
void
JEMALLOC_P(free)(void *ptr)
{
if (ptr != NULL) {
#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
size_t usize;
#endif
assert(malloc_initialized || malloc_initializer ==
pthread_self());
#ifdef JEMALLOC_STATS
usize = isalloc(ptr);
#endif
#ifdef JEMALLOC_PROF
if (opt_prof) {
# ifndef JEMALLOC_STATS
usize = isalloc(ptr);
# endif
prof_free(ptr, usize);
}
#endif
#ifdef JEMALLOC_STATS
ALLOCATED_ADD(0, usize);
#endif
idalloc(ptr);
}
}
/*
* End malloc(3)-compatible functions.
*/
/******************************************************************************/
/*
* Begin non-standard override functions.
*
* These overrides are omitted if the JEMALLOC_PREFIX is defined, since the
* entire point is to avoid accidental mixed allocator usage.
*/
#ifndef JEMALLOC_PREFIX
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
JEMALLOC_ATTR(malloc)
JEMALLOC_ATTR(visibility("default"))
void *
JEMALLOC_P(memalign)(size_t alignment, size_t size)
{
void *ret;
#ifdef JEMALLOC_CC_SILENCE
int result =
#endif
JEMALLOC_P(posix_memalign)(&ret, alignment, size);
#ifdef JEMALLOC_CC_SILENCE
if (result != 0)
return (NULL);
#endif
return (ret);
}
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC
JEMALLOC_ATTR(malloc)
JEMALLOC_ATTR(visibility("default"))
void *
JEMALLOC_P(valloc)(size_t size)
{
void *ret;
#ifdef JEMALLOC_CC_SILENCE
int result =
#endif
JEMALLOC_P(posix_memalign)(&ret, PAGE_SIZE, size);
#ifdef JEMALLOC_CC_SILENCE
if (result != 0)
return (NULL);
#endif
return (ret);
}
#endif
#endif /* JEMALLOC_PREFIX */
/*
* End non-standard override functions.
*/
/******************************************************************************/
/*
* Begin non-standard functions.
*/
JEMALLOC_ATTR(visibility("default"))
size_t
JEMALLOC_P(malloc_usable_size)(const void *ptr)
{
size_t ret;
assert(malloc_initialized || malloc_initializer == pthread_self());
#ifdef JEMALLOC_IVSALLOC
ret = ivsalloc(ptr);
#else
assert(ptr != NULL);
ret = isalloc(ptr);
#endif
return (ret);
}
JEMALLOC_ATTR(visibility("default"))
void
JEMALLOC_P(malloc_stats_print)(void (*write_cb)(void *, const char *),
void *cbopaque, const char *opts)
{
stats_print(write_cb, cbopaque, opts);
}
JEMALLOC_ATTR(visibility("default"))
int
JEMALLOC_P(mallctl)(const char *name, void *oldp, size_t *oldlenp, void *newp,
size_t newlen)
{
if (malloc_init())
return (EAGAIN);
return (ctl_byname(name, oldp, oldlenp, newp, newlen));
}
JEMALLOC_ATTR(visibility("default"))
int
JEMALLOC_P(mallctlnametomib)(const char *name, size_t *mibp, size_t *miblenp)
{
if (malloc_init())
return (EAGAIN);
return (ctl_nametomib(name, mibp, miblenp));
}
JEMALLOC_ATTR(visibility("default"))
int
JEMALLOC_P(mallctlbymib)(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
if (malloc_init())
return (EAGAIN);
return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
}
JEMALLOC_INLINE void *
iallocm(size_t usize, size_t alignment, bool zero)
{
assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, alignment,
NULL)));
if (alignment != 0)
return (ipalloc(usize, alignment, zero));
else if (zero)
return (icalloc(usize));
else
return (imalloc(usize));
}
JEMALLOC_ATTR(nonnull(1))
JEMALLOC_ATTR(visibility("default"))
int
JEMALLOC_P(allocm)(void **ptr, size_t *rsize, size_t size, int flags)
{
void *p;
size_t usize;
size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
& (SIZE_T_MAX-1));
bool zero = flags & ALLOCM_ZERO;
#ifdef JEMALLOC_PROF
prof_thr_cnt_t *cnt;
#endif
assert(ptr != NULL);
assert(size != 0);
if (malloc_init())
goto OOM;
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment,
NULL);
if (usize == 0)
goto OOM;
#ifdef JEMALLOC_PROF
if (opt_prof) {
if ((cnt = prof_alloc_prep(usize)) == NULL)
goto OOM;
if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
small_maxclass) {
size_t usize_promoted = (alignment == 0) ?
s2u(small_maxclass+1) : sa2u(small_maxclass+1,
alignment, NULL);
assert(usize_promoted != 0);
p = iallocm(usize_promoted, alignment, zero);
if (p == NULL)
goto OOM;
arena_prof_promoted(p, usize);
} else {
p = iallocm(usize, alignment, zero);
if (p == NULL)
goto OOM;
}
if (rsize != NULL)
*rsize = usize;
} else
#endif
{
p = iallocm(usize, alignment, zero);
if (p == NULL)
goto OOM;
#ifndef JEMALLOC_STATS
if (rsize != NULL)
#endif
{
#ifdef JEMALLOC_STATS
if (rsize != NULL)
#endif
*rsize = usize;
}
}
*ptr = p;
#ifdef JEMALLOC_STATS
assert(usize == isalloc(p));
ALLOCATED_ADD(usize, 0);
#endif
return (ALLOCM_SUCCESS);
OOM:
#ifdef JEMALLOC_XMALLOC
if (opt_xmalloc) {
malloc_write("<jemalloc>: Error in allocm(): "
"out of memory\n");
abort();
}
#endif
*ptr = NULL;
return (ALLOCM_ERR_OOM);
}
JEMALLOC_ATTR(nonnull(1))
JEMALLOC_ATTR(visibility("default"))
int
JEMALLOC_P(rallocm)(void **ptr, size_t *rsize, size_t size, size_t extra,
int flags)
{
void *p, *q;
size_t usize;
#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
size_t old_size;
#endif
size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
& (SIZE_T_MAX-1));
bool zero = flags & ALLOCM_ZERO;
bool no_move = flags & ALLOCM_NO_MOVE;
#ifdef JEMALLOC_PROF
prof_thr_cnt_t *cnt;
prof_ctx_t *old_ctx;
#endif
assert(ptr != NULL);
assert(*ptr != NULL);
assert(size != 0);
assert(SIZE_T_MAX - size >= extra);
assert(malloc_initialized || malloc_initializer == pthread_self());
p = *ptr;
#ifdef JEMALLOC_PROF
if (opt_prof) {
/*
* usize isn't knowable before iralloc() returns when extra is
* non-zero. Therefore, compute its maximum possible value and
* use that in prof_alloc_prep() to decide whether to capture a
* backtrace. prof_realloc() will use the actual usize to
* decide whether to sample.
*/
size_t max_usize = (alignment == 0) ? s2u(size+extra) :
sa2u(size+extra, alignment, NULL);
old_size = isalloc(p);
old_ctx = prof_ctx_get(p);
if ((cnt = prof_alloc_prep(max_usize)) == NULL)
goto OOM;
if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && max_usize
<= small_maxclass) {
q = iralloc(p, small_maxclass+1, (small_maxclass+1 >=
size+extra) ? 0 : size+extra - (small_maxclass+1),
alignment, zero, no_move);
if (q == NULL)
goto ERR;
usize = isalloc(q);
arena_prof_promoted(q, usize);
} else {
q = iralloc(p, size, extra, alignment, zero, no_move);
if (q == NULL)
goto ERR;
usize = isalloc(q);
}
prof_realloc(q, usize, cnt, old_size, old_ctx);
if (rsize != NULL)
*rsize = usize;
} else
#endif
{
#ifdef JEMALLOC_STATS
old_size = isalloc(p);
#endif
q = iralloc(p, size, extra, alignment, zero, no_move);
if (q == NULL)
goto ERR;
#ifndef JEMALLOC_STATS
if (rsize != NULL)
#endif
{
usize = isalloc(q);
#ifdef JEMALLOC_STATS
if (rsize != NULL)
#endif
*rsize = usize;
}
}
*ptr = q;
#ifdef JEMALLOC_STATS
ALLOCATED_ADD(usize, old_size);
#endif
return (ALLOCM_SUCCESS);
ERR:
if (no_move)
return (ALLOCM_ERR_NOT_MOVED);
#ifdef JEMALLOC_PROF
OOM:
#endif
#ifdef JEMALLOC_XMALLOC
if (opt_xmalloc) {
malloc_write("<jemalloc>: Error in rallocm(): "
"out of memory\n");
abort();
}
#endif
return (ALLOCM_ERR_OOM);
}
JEMALLOC_ATTR(nonnull(1))
JEMALLOC_ATTR(visibility("default"))
int
JEMALLOC_P(sallocm)(const void *ptr, size_t *rsize, int flags)
{
size_t sz;
assert(malloc_initialized || malloc_initializer == pthread_self());
#ifdef JEMALLOC_IVSALLOC
sz = ivsalloc(ptr);
#else
assert(ptr != NULL);
sz = isalloc(ptr);
#endif
assert(rsize != NULL);
*rsize = sz;
return (ALLOCM_SUCCESS);
}
JEMALLOC_ATTR(nonnull(1))
JEMALLOC_ATTR(visibility("default"))
int
JEMALLOC_P(dallocm)(void *ptr, int flags)
{
#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
size_t usize;
#endif
assert(ptr != NULL);
assert(malloc_initialized || malloc_initializer == pthread_self());
#ifdef JEMALLOC_STATS
usize = isalloc(ptr);
#endif
#ifdef JEMALLOC_PROF
if (opt_prof) {
# ifndef JEMALLOC_STATS
usize = isalloc(ptr);
# endif
prof_free(ptr, usize);
}
#endif
#ifdef JEMALLOC_STATS
ALLOCATED_ADD(0, usize);
#endif
idalloc(ptr);
return (ALLOCM_SUCCESS);
}
/*
* End non-standard functions.
*/
/******************************************************************************/
/*
* The following functions are used by threading libraries for protection of
* malloc during fork().
*/
void
jemalloc_prefork(void)
{
unsigned i;
/* Acquire all mutexes in a safe order. */
malloc_mutex_lock(&arenas_lock);
for (i = 0; i < narenas; i++) {
if (arenas[i] != NULL)
malloc_mutex_lock(&arenas[i]->lock);
}
malloc_mutex_lock(&base_mtx);
malloc_mutex_lock(&huge_mtx);
#ifdef JEMALLOC_DSS
malloc_mutex_lock(&dss_mtx);
#endif
#ifdef JEMALLOC_SWAP
malloc_mutex_lock(&swap_mtx);
#endif
}
void
jemalloc_postfork(void)
{
unsigned i;
/* Release all mutexes, now that fork() has completed. */
#ifdef JEMALLOC_SWAP
malloc_mutex_unlock(&swap_mtx);
#endif
#ifdef JEMALLOC_DSS
malloc_mutex_unlock(&dss_mtx);
#endif
malloc_mutex_unlock(&huge_mtx);
malloc_mutex_unlock(&base_mtx);
for (i = 0; i < narenas; i++) {
if (arenas[i] != NULL)
malloc_mutex_unlock(&arenas[i]->lock);
}
malloc_mutex_unlock(&arenas_lock);
}
/******************************************************************************/
#define JEMALLOC_MB_C_
#include "jemalloc/internal/jemalloc_internal.h"
#define JEMALLOC_MUTEX_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Data. */
#ifdef JEMALLOC_LAZY_LOCK
bool isthreaded = false;
#endif
#ifdef JEMALLOC_LAZY_LOCK
static void pthread_create_once(void);
#endif
/******************************************************************************/
/*
* We intercept pthread_create() calls in order to toggle isthreaded if the
* process goes multi-threaded.
*/
#ifdef JEMALLOC_LAZY_LOCK
static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *,
void *(*)(void *), void *__restrict);
static void
pthread_create_once(void)
{
pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create");
if (pthread_create_fptr == NULL) {
malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, "
"\"pthread_create\")\n");
abort();
}
isthreaded = true;
}
JEMALLOC_ATTR(visibility("default"))
int
pthread_create(pthread_t *__restrict thread,
const pthread_attr_t *__restrict attr, void *(*start_routine)(void *),
void *__restrict arg)
{
static pthread_once_t once_control = PTHREAD_ONCE_INIT;
pthread_once(&once_control, pthread_create_once);
return (pthread_create_fptr(thread, attr, start_routine, arg));
}
#endif
/******************************************************************************/
bool
malloc_mutex_init(malloc_mutex_t *mutex)
{
#ifdef JEMALLOC_OSSPIN
*mutex = 0;
#else
pthread_mutexattr_t attr;
if (pthread_mutexattr_init(&attr) != 0)
return (true);
#ifdef PTHREAD_MUTEX_ADAPTIVE_NP
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
#else
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_DEFAULT);
#endif
if (pthread_mutex_init(mutex, &attr) != 0) {
pthread_mutexattr_destroy(&attr);
return (true);
}
pthread_mutexattr_destroy(&attr);
#endif
return (false);
}
void
malloc_mutex_destroy(malloc_mutex_t *mutex)
{
#ifndef JEMALLOC_OSSPIN
if (pthread_mutex_destroy(mutex) != 0) {
malloc_write("<jemalloc>: Error in pthread_mutex_destroy()\n");
abort();
}
#endif
}
#define JEMALLOC_PROF_C_
#include "jemalloc/internal/jemalloc_internal.h"
#ifdef JEMALLOC_PROF
/******************************************************************************/
#ifdef JEMALLOC_PROF_LIBUNWIND
#define UNW_LOCAL_ONLY
#include <libunwind.h>
#endif
#ifdef JEMALLOC_PROF_LIBGCC
#include <unwind.h>
#endif
/******************************************************************************/
/* Data. */
bool opt_prof = false;
bool opt_prof_active = true;
size_t opt_lg_prof_bt_max = LG_PROF_BT_MAX_DEFAULT;
size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
bool opt_prof_gdump = false;
bool opt_prof_leak = false;
bool opt_prof_accum = true;
ssize_t opt_lg_prof_tcmax = LG_PROF_TCMAX_DEFAULT;
char opt_prof_prefix[PATH_MAX + 1];
uint64_t prof_interval;
bool prof_promote;
unsigned prof_bt_max;
#ifndef NO_TLS
__thread prof_tdata_t *prof_tdata_tls
JEMALLOC_ATTR(tls_model("initial-exec"));
#endif
pthread_key_t prof_tdata_tsd;
/*
* Global hash of (prof_bt_t *)-->(prof_ctx_t *). This is the master data
* structure that knows about all backtraces currently captured.
*/
static ckh_t bt2ctx;
static malloc_mutex_t bt2ctx_mtx;
static malloc_mutex_t prof_dump_seq_mtx;
static uint64_t prof_dump_seq;
static uint64_t prof_dump_iseq;
static uint64_t prof_dump_mseq;
static uint64_t prof_dump_useq;
/*
* This buffer is rather large for stack allocation, so use a single buffer for
* all profile dumps. The buffer is implicitly protected by bt2ctx_mtx, since
* it must be locked anyway during dumping.
*/
static char prof_dump_buf[PROF_DUMP_BUF_SIZE];
static unsigned prof_dump_buf_end;
static int prof_dump_fd;
/* Do not dump any profiles until bootstrapping is complete. */
static bool prof_booted = false;
static malloc_mutex_t enq_mtx;
static bool enq;
static bool enq_idump;
static bool enq_gdump;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static prof_bt_t *bt_dup(prof_bt_t *bt);
static void bt_destroy(prof_bt_t *bt);
#ifdef JEMALLOC_PROF_LIBGCC
static _Unwind_Reason_Code prof_unwind_init_callback(
struct _Unwind_Context *context, void *arg);
static _Unwind_Reason_Code prof_unwind_callback(
struct _Unwind_Context *context, void *arg);
#endif
static bool prof_flush(bool propagate_err);
static bool prof_write(const char *s, bool propagate_err);
static void prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all,
size_t *leak_nctx);
static void prof_ctx_destroy(prof_ctx_t *ctx);
static void prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt);
static bool prof_dump_ctx(prof_ctx_t *ctx, prof_bt_t *bt,
bool propagate_err);
static bool prof_dump_maps(bool propagate_err);
static bool prof_dump(const char *filename, bool leakcheck,
bool propagate_err);
static void prof_dump_filename(char *filename, char v, int64_t vseq);
static void prof_fdump(void);
static void prof_bt_hash(const void *key, unsigned minbits, size_t *hash1,
size_t *hash2);
static bool prof_bt_keycomp(const void *k1, const void *k2);
static void prof_tdata_cleanup(void *arg);
/******************************************************************************/
void
bt_init(prof_bt_t *bt, void **vec)
{
bt->vec = vec;
bt->len = 0;
}
static void
bt_destroy(prof_bt_t *bt)
{
idalloc(bt);
}
static prof_bt_t *
bt_dup(prof_bt_t *bt)
{
prof_bt_t *ret;
/*
* Create a single allocation that has space for vec immediately
* following the prof_bt_t structure. The backtraces that get
* stored in the backtrace caches are copied from stack-allocated
* temporary variables, so size is known at creation time. Making this
* a contiguous object improves cache locality.
*/
ret = (prof_bt_t *)imalloc(QUANTUM_CEILING(sizeof(prof_bt_t)) +
(bt->len * sizeof(void *)));
if (ret == NULL)
return (NULL);
ret->vec = (void **)((uintptr_t)ret +
QUANTUM_CEILING(sizeof(prof_bt_t)));
memcpy(ret->vec, bt->vec, bt->len * sizeof(void *));
ret->len = bt->len;
return (ret);
}
static inline void
prof_enter(void)
{
malloc_mutex_lock(&enq_mtx);
enq = true;
malloc_mutex_unlock(&enq_mtx);
malloc_mutex_lock(&bt2ctx_mtx);
}
static inline void
prof_leave(void)
{
bool idump, gdump;
malloc_mutex_unlock(&bt2ctx_mtx);
malloc_mutex_lock(&enq_mtx);
enq = false;
idump = enq_idump;
enq_idump = false;
gdump = enq_gdump;
enq_gdump = false;
malloc_mutex_unlock(&enq_mtx);
if (idump)
prof_idump();
if (gdump)
prof_gdump();
}
#ifdef JEMALLOC_PROF_LIBUNWIND
void
prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
{
unw_context_t uc;
unw_cursor_t cursor;
unsigned i;
int err;
assert(bt->len == 0);
assert(bt->vec != NULL);
assert(max <= (1U << opt_lg_prof_bt_max));
unw_getcontext(&uc);
unw_init_local(&cursor, &uc);
/* Throw away (nignore+1) stack frames, if that many exist. */
for (i = 0; i < nignore + 1; i++) {
err = unw_step(&cursor);
if (err <= 0)
return;
}
/*
* Iterate over stack frames until there are no more, or until no space
* remains in bt.
*/
for (i = 0; i < max; i++) {
unw_get_reg(&cursor, UNW_REG_IP, (unw_word_t *)&bt->vec[i]);
bt->len++;
err = unw_step(&cursor);
if (err <= 0)
break;
}
}
#endif
#ifdef JEMALLOC_PROF_LIBGCC
static _Unwind_Reason_Code
prof_unwind_init_callback(struct _Unwind_Context *context, void *arg)
{
return (_URC_NO_REASON);
}
static _Unwind_Reason_Code
prof_unwind_callback(struct _Unwind_Context *context, void *arg)
{
prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
if (data->nignore > 0)
data->nignore--;
else {
data->bt->vec[data->bt->len] = (void *)_Unwind_GetIP(context);
data->bt->len++;
if (data->bt->len == data->max)
return (_URC_END_OF_STACK);
}
return (_URC_NO_REASON);
}
void
prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
{
prof_unwind_data_t data = {bt, nignore, max};
_Unwind_Backtrace(prof_unwind_callback, &data);
}
#endif
#ifdef JEMALLOC_PROF_GCC
void
prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
{
#define BT_FRAME(i) \
if ((i) < nignore + max) { \
void *p; \
if (__builtin_frame_address(i) == 0) \
return; \
p = __builtin_return_address(i); \
if (p == NULL) \
return; \
if (i >= nignore) { \
bt->vec[(i) - nignore] = p; \
bt->len = (i) - nignore + 1; \
} \
} else \
return;
assert(nignore <= 3);
assert(max <= (1U << opt_lg_prof_bt_max));
BT_FRAME(0)
BT_FRAME(1)
BT_FRAME(2)
BT_FRAME(3)
BT_FRAME(4)
BT_FRAME(5)
BT_FRAME(6)
BT_FRAME(7)
BT_FRAME(8)
BT_FRAME(9)
BT_FRAME(10)
BT_FRAME(11)
BT_FRAME(12)
BT_FRAME(13)
BT_FRAME(14)
BT_FRAME(15)
BT_FRAME(16)
BT_FRAME(17)
BT_FRAME(18)
BT_FRAME(19)
BT_FRAME(20)
BT_FRAME(21)
BT_FRAME(22)
BT_FRAME(23)
BT_FRAME(24)
BT_FRAME(25)
BT_FRAME(26)
BT_FRAME(27)
BT_FRAME(28)
BT_FRAME(29)
BT_FRAME(30)
BT_FRAME(31)
BT_FRAME(32)
BT_FRAME(33)
BT_FRAME(34)
BT_FRAME(35)
BT_FRAME(36)
BT_FRAME(37)
BT_FRAME(38)
BT_FRAME(39)
BT_FRAME(40)
BT_FRAME(41)
BT_FRAME(42)
BT_FRAME(43)
BT_FRAME(44)
BT_FRAME(45)
BT_FRAME(46)
BT_FRAME(47)
BT_FRAME(48)
BT_FRAME(49)
BT_FRAME(50)
BT_FRAME(51)
BT_FRAME(52)
BT_FRAME(53)
BT_FRAME(54)
BT_FRAME(55)
BT_FRAME(56)
BT_FRAME(57)
BT_FRAME(58)
BT_FRAME(59)
BT_FRAME(60)
BT_FRAME(61)
BT_FRAME(62)
BT_FRAME(63)
BT_FRAME(64)
BT_FRAME(65)
BT_FRAME(66)
BT_FRAME(67)
BT_FRAME(68)
BT_FRAME(69)
BT_FRAME(70)
BT_FRAME(71)
BT_FRAME(72)
BT_FRAME(73)
BT_FRAME(74)
BT_FRAME(75)
BT_FRAME(76)
BT_FRAME(77)
BT_FRAME(78)
BT_FRAME(79)
BT_FRAME(80)
BT_FRAME(81)
BT_FRAME(82)
BT_FRAME(83)
BT_FRAME(84)
BT_FRAME(85)
BT_FRAME(86)
BT_FRAME(87)
BT_FRAME(88)
BT_FRAME(89)
BT_FRAME(90)
BT_FRAME(91)
BT_FRAME(92)
BT_FRAME(93)
BT_FRAME(94)
BT_FRAME(95)
BT_FRAME(96)
BT_FRAME(97)
BT_FRAME(98)
BT_FRAME(99)
BT_FRAME(100)
BT_FRAME(101)
BT_FRAME(102)
BT_FRAME(103)
BT_FRAME(104)
BT_FRAME(105)
BT_FRAME(106)
BT_FRAME(107)
BT_FRAME(108)
BT_FRAME(109)
BT_FRAME(110)
BT_FRAME(111)
BT_FRAME(112)
BT_FRAME(113)
BT_FRAME(114)
BT_FRAME(115)
BT_FRAME(116)
BT_FRAME(117)
BT_FRAME(118)
BT_FRAME(119)
BT_FRAME(120)
BT_FRAME(121)
BT_FRAME(122)
BT_FRAME(123)
BT_FRAME(124)
BT_FRAME(125)
BT_FRAME(126)
BT_FRAME(127)
/* Extras to compensate for nignore. */
BT_FRAME(128)
BT_FRAME(129)
BT_FRAME(130)
#undef BT_FRAME
}
#endif
prof_thr_cnt_t *
prof_lookup(prof_bt_t *bt)
{
union {
prof_thr_cnt_t *p;
void *v;
} ret;
prof_tdata_t *prof_tdata;
prof_tdata = PROF_TCACHE_GET();
if (prof_tdata == NULL) {
prof_tdata = prof_tdata_init();
if (prof_tdata == NULL)
return (NULL);
}
if (ckh_search(&prof_tdata->bt2cnt, bt, NULL, &ret.v)) {
union {
prof_bt_t *p;
void *v;
} btkey;
union {
prof_ctx_t *p;
void *v;
} ctx;
bool new_ctx;
/*
* This thread's cache lacks bt. Look for it in the global
* cache.
*/
prof_enter();
if (ckh_search(&bt2ctx, bt, &btkey.v, &ctx.v)) {
/* bt has never been seen before. Insert it. */
ctx.v = imalloc(sizeof(prof_ctx_t));
if (ctx.v == NULL) {
prof_leave();
return (NULL);
}
btkey.p = bt_dup(bt);
if (btkey.v == NULL) {
prof_leave();
idalloc(ctx.v);
return (NULL);
}
ctx.p->bt = btkey.p;
if (malloc_mutex_init(&ctx.p->lock)) {
prof_leave();
idalloc(btkey.v);
idalloc(ctx.v);
return (NULL);
}
memset(&ctx.p->cnt_merged, 0, sizeof(prof_cnt_t));
ql_new(&ctx.p->cnts_ql);
if (ckh_insert(&bt2ctx, btkey.v, ctx.v)) {
/* OOM. */
prof_leave();
malloc_mutex_destroy(&ctx.p->lock);
idalloc(btkey.v);
idalloc(ctx.v);
return (NULL);
}
/*
* Artificially raise curobjs, in order to avoid a race
* condition with prof_ctx_merge()/prof_ctx_destroy().
*/
ctx.p->cnt_merged.curobjs++;
new_ctx = true;
} else
new_ctx = false;
prof_leave();
/* Link a prof_thd_cnt_t into ctx for this thread. */
if (opt_lg_prof_tcmax >= 0 && ckh_count(&prof_tdata->bt2cnt)
== (ZU(1) << opt_lg_prof_tcmax)) {
assert(ckh_count(&prof_tdata->bt2cnt) > 0);
/*
* Flush the least recently used cnt in order to keep
* bt2cnt from becoming too large.
*/
ret.p = ql_last(&prof_tdata->lru_ql, lru_link);
assert(ret.v != NULL);
ckh_remove(&prof_tdata->bt2cnt, ret.p->ctx->bt, NULL,
NULL);
ql_remove(&prof_tdata->lru_ql, ret.p, lru_link);
prof_ctx_merge(ret.p->ctx, ret.p);
/* ret can now be re-used. */
} else {
assert(opt_lg_prof_tcmax < 0 ||
ckh_count(&prof_tdata->bt2cnt) < (ZU(1) <<
opt_lg_prof_tcmax));
/* Allocate and partially initialize a new cnt. */
ret.v = imalloc(sizeof(prof_thr_cnt_t));
if (ret.p == NULL) {
if (new_ctx) {
malloc_mutex_lock(&ctx.p->lock);
ctx.p->cnt_merged.curobjs--;
malloc_mutex_unlock(&ctx.p->lock);
}
return (NULL);
}
ql_elm_new(ret.p, cnts_link);
ql_elm_new(ret.p, lru_link);
}
/* Finish initializing ret. */
ret.p->ctx = ctx.p;
ret.p->epoch = 0;
memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
if (ckh_insert(&prof_tdata->bt2cnt, btkey.v, ret.v)) {
if (new_ctx) {
malloc_mutex_lock(&ctx.p->lock);
ctx.p->cnt_merged.curobjs--;
malloc_mutex_unlock(&ctx.p->lock);
}
idalloc(ret.v);
return (NULL);
}
ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link);
malloc_mutex_lock(&ctx.p->lock);
ql_tail_insert(&ctx.p->cnts_ql, ret.p, cnts_link);
if (new_ctx)
ctx.p->cnt_merged.curobjs--;
malloc_mutex_unlock(&ctx.p->lock);
} else {
/* Move ret to the front of the LRU. */
ql_remove(&prof_tdata->lru_ql, ret.p, lru_link);
ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link);
}
return (ret.p);
}
static bool
prof_flush(bool propagate_err)
{
bool ret = false;
ssize_t err;
err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
if (err == -1) {
if (propagate_err == false) {
malloc_write("<jemalloc>: write() failed during heap "
"profile flush\n");
if (opt_abort)
abort();
}
ret = true;
}
prof_dump_buf_end = 0;
return (ret);
}
static bool
prof_write(const char *s, bool propagate_err)
{
unsigned i, slen, n;
i = 0;
slen = strlen(s);
while (i < slen) {
/* Flush the buffer if it is full. */
if (prof_dump_buf_end == PROF_DUMP_BUF_SIZE)
if (prof_flush(propagate_err) && propagate_err)
return (true);
if (prof_dump_buf_end + slen <= PROF_DUMP_BUF_SIZE) {
/* Finish writing. */
n = slen - i;
} else {
/* Write as much of s as will fit. */
n = PROF_DUMP_BUF_SIZE - prof_dump_buf_end;
}
memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n);
prof_dump_buf_end += n;
i += n;
}
return (false);
}
static void
prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx)
{
prof_thr_cnt_t *thr_cnt;
prof_cnt_t tcnt;
malloc_mutex_lock(&ctx->lock);
memcpy(&ctx->cnt_summed, &ctx->cnt_merged, sizeof(prof_cnt_t));
ql_foreach(thr_cnt, &ctx->cnts_ql, cnts_link) {
volatile unsigned *epoch = &thr_cnt->epoch;
while (true) {
unsigned epoch0 = *epoch;
/* Make sure epoch is even. */
if (epoch0 & 1U)
continue;
memcpy(&tcnt, &thr_cnt->cnts, sizeof(prof_cnt_t));
/* Terminate if epoch didn't change while reading. */
if (*epoch == epoch0)
break;
}
ctx->cnt_summed.curobjs += tcnt.curobjs;
ctx->cnt_summed.curbytes += tcnt.curbytes;
if (opt_prof_accum) {
ctx->cnt_summed.accumobjs += tcnt.accumobjs;
ctx->cnt_summed.accumbytes += tcnt.accumbytes;
}
}
if (ctx->cnt_summed.curobjs != 0)
(*leak_nctx)++;
/* Add to cnt_all. */
cnt_all->curobjs += ctx->cnt_summed.curobjs;
cnt_all->curbytes += ctx->cnt_summed.curbytes;
if (opt_prof_accum) {
cnt_all->accumobjs += ctx->cnt_summed.accumobjs;
cnt_all->accumbytes += ctx->cnt_summed.accumbytes;
}
malloc_mutex_unlock(&ctx->lock);
}
static void
prof_ctx_destroy(prof_ctx_t *ctx)
{
/*
* Check that ctx is still unused by any thread cache before destroying
* it. prof_lookup() interlocks bt2ctx_mtx and ctx->lock in order to
* avoid a race condition with this function, and prof_ctx_merge()
* artificially raises ctx->cnt_merged.curobjs in order to avoid a race
* between the main body of prof_ctx_merge() and entry into this
* function.
*/
prof_enter();
malloc_mutex_lock(&ctx->lock);
if (ql_first(&ctx->cnts_ql) == NULL && ctx->cnt_merged.curobjs == 1) {
assert(ctx->cnt_merged.curbytes == 0);
assert(ctx->cnt_merged.accumobjs == 0);
assert(ctx->cnt_merged.accumbytes == 0);
/* Remove ctx from bt2ctx. */
ckh_remove(&bt2ctx, ctx->bt, NULL, NULL);
prof_leave();
/* Destroy ctx. */
malloc_mutex_unlock(&ctx->lock);
bt_destroy(ctx->bt);
malloc_mutex_destroy(&ctx->lock);
idalloc(ctx);
} else {
/* Compensate for increment in prof_ctx_merge(). */
ctx->cnt_merged.curobjs--;
malloc_mutex_unlock(&ctx->lock);
prof_leave();
}
}
static void
prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt)
{
bool destroy;
/* Merge cnt stats and detach from ctx. */
malloc_mutex_lock(&ctx->lock);
ctx->cnt_merged.curobjs += cnt->cnts.curobjs;
ctx->cnt_merged.curbytes += cnt->cnts.curbytes;
ctx->cnt_merged.accumobjs += cnt->cnts.accumobjs;
ctx->cnt_merged.accumbytes += cnt->cnts.accumbytes;
ql_remove(&ctx->cnts_ql, cnt, cnts_link);
if (opt_prof_accum == false && ql_first(&ctx->cnts_ql) == NULL &&
ctx->cnt_merged.curobjs == 0) {
/*
* Artificially raise ctx->cnt_merged.curobjs in order to keep
* another thread from winning the race to destroy ctx while
* this one has ctx->lock dropped. Without this, it would be
* possible for another thread to:
*
* 1) Sample an allocation associated with ctx.
* 2) Deallocate the sampled object.
* 3) Successfully prof_ctx_destroy(ctx).
*
* The result would be that ctx no longer exists by the time
* this thread accesses it in prof_ctx_destroy().
*/
ctx->cnt_merged.curobjs++;
destroy = true;
} else
destroy = false;
malloc_mutex_unlock(&ctx->lock);
if (destroy)
prof_ctx_destroy(ctx);
}
static bool
prof_dump_ctx(prof_ctx_t *ctx, prof_bt_t *bt, bool propagate_err)
{
char buf[UMAX2S_BUFSIZE];
unsigned i;
if (opt_prof_accum == false && ctx->cnt_summed.curobjs == 0) {
assert(ctx->cnt_summed.curbytes == 0);
assert(ctx->cnt_summed.accumobjs == 0);
assert(ctx->cnt_summed.accumbytes == 0);
return (false);
}
if (prof_write(u2s(ctx->cnt_summed.curobjs, 10, buf), propagate_err)
|| prof_write(": ", propagate_err)
|| prof_write(u2s(ctx->cnt_summed.curbytes, 10, buf),
propagate_err)
|| prof_write(" [", propagate_err)
|| prof_write(u2s(ctx->cnt_summed.accumobjs, 10, buf),
propagate_err)
|| prof_write(": ", propagate_err)
|| prof_write(u2s(ctx->cnt_summed.accumbytes, 10, buf),
propagate_err)
|| prof_write("] @", propagate_err))
return (true);
for (i = 0; i < bt->len; i++) {
if (prof_write(" 0x", propagate_err)
|| prof_write(u2s((uintptr_t)bt->vec[i], 16, buf),
propagate_err))
return (true);
}
if (prof_write("\n", propagate_err))
return (true);
return (false);
}
static bool
prof_dump_maps(bool propagate_err)
{
int mfd;
char buf[UMAX2S_BUFSIZE];
char *s;
unsigned i, slen;
/* /proc/<pid>/maps\0 */
char mpath[6 + UMAX2S_BUFSIZE
+ 5 + 1];
i = 0;
s = "/proc/";
slen = strlen(s);
memcpy(&mpath[i], s, slen);
i += slen;
s = u2s(getpid(), 10, buf);
slen = strlen(s);
memcpy(&mpath[i], s, slen);
i += slen;
s = "/maps";
slen = strlen(s);
memcpy(&mpath[i], s, slen);
i += slen;
mpath[i] = '\0';
mfd = open(mpath, O_RDONLY);
if (mfd != -1) {
ssize_t nread;
if (prof_write("\nMAPPED_LIBRARIES:\n", propagate_err) &&
propagate_err)
return (true);
nread = 0;
do {
prof_dump_buf_end += nread;
if (prof_dump_buf_end == PROF_DUMP_BUF_SIZE) {
/* Make space in prof_dump_buf before read(). */
if (prof_flush(propagate_err) && propagate_err)
return (true);
}
nread = read(mfd, &prof_dump_buf[prof_dump_buf_end],
PROF_DUMP_BUF_SIZE - prof_dump_buf_end);
} while (nread > 0);
close(mfd);
} else
return (true);
return (false);
}
static bool
prof_dump(const char *filename, bool leakcheck, bool propagate_err)
{
prof_cnt_t cnt_all;
size_t tabind;
union {
prof_bt_t *p;
void *v;
} bt;
union {
prof_ctx_t *p;
void *v;
} ctx;
char buf[UMAX2S_BUFSIZE];
size_t leak_nctx;
prof_enter();
prof_dump_fd = creat(filename, 0644);
if (prof_dump_fd == -1) {
if (propagate_err == false) {
malloc_write("<jemalloc>: creat(\"");
malloc_write(filename);
malloc_write("\", 0644) failed\n");
if (opt_abort)
abort();
}
goto ERROR;
}
/* Merge per thread profile stats, and sum them in cnt_all. */
memset(&cnt_all, 0, sizeof(prof_cnt_t));
leak_nctx = 0;
for (tabind = 0; ckh_iter(&bt2ctx, &tabind, NULL, &ctx.v) == false;)
prof_ctx_sum(ctx.p, &cnt_all, &leak_nctx);
/* Dump profile header. */
if (prof_write("heap profile: ", propagate_err)
|| prof_write(u2s(cnt_all.curobjs, 10, buf), propagate_err)
|| prof_write(": ", propagate_err)
|| prof_write(u2s(cnt_all.curbytes, 10, buf), propagate_err)
|| prof_write(" [", propagate_err)
|| prof_write(u2s(cnt_all.accumobjs, 10, buf), propagate_err)
|| prof_write(": ", propagate_err)
|| prof_write(u2s(cnt_all.accumbytes, 10, buf), propagate_err))
goto ERROR;
if (opt_lg_prof_sample == 0) {
if (prof_write("] @ heapprofile\n", propagate_err))
goto ERROR;
} else {
if (prof_write("] @ heap_v2/", propagate_err)
|| prof_write(u2s((uint64_t)1U << opt_lg_prof_sample, 10,
buf), propagate_err)
|| prof_write("\n", propagate_err))
goto ERROR;
}
/* Dump per ctx profile stats. */
for (tabind = 0; ckh_iter(&bt2ctx, &tabind, &bt.v, &ctx.v)
== false;) {
if (prof_dump_ctx(ctx.p, bt.p, propagate_err))
goto ERROR;
}
/* Dump /proc/<pid>/maps if possible. */
if (prof_dump_maps(propagate_err))
goto ERROR;
if (prof_flush(propagate_err))
goto ERROR;
close(prof_dump_fd);
prof_leave();
if (leakcheck && cnt_all.curbytes != 0) {
malloc_write("<jemalloc>: Leak summary: ");
malloc_write(u2s(cnt_all.curbytes, 10, buf));
malloc_write((cnt_all.curbytes != 1) ? " bytes, " : " byte, ");
malloc_write(u2s(cnt_all.curobjs, 10, buf));
malloc_write((cnt_all.curobjs != 1) ? " objects, " :
" object, ");
malloc_write(u2s(leak_nctx, 10, buf));
malloc_write((leak_nctx != 1) ? " contexts\n" : " context\n");
malloc_write("<jemalloc>: Run pprof on \"");
malloc_write(filename);
malloc_write("\" for leak detail\n");
}
return (false);
ERROR:
prof_leave();
return (true);
}
#define DUMP_FILENAME_BUFSIZE (PATH_MAX+ UMAX2S_BUFSIZE \
+ 1 \
+ UMAX2S_BUFSIZE \
+ 2 \
+ UMAX2S_BUFSIZE \
+ 5 + 1)
static void
prof_dump_filename(char *filename, char v, int64_t vseq)
{
char buf[UMAX2S_BUFSIZE];
char *s;
unsigned i, slen;
/*
* Construct a filename of the form:
*
* <prefix>.<pid>.<seq>.v<vseq>.heap\0
*/
i = 0;
s = opt_prof_prefix;
slen = strlen(s);
memcpy(&filename[i], s, slen);
i += slen;
s = ".";
slen = strlen(s);
memcpy(&filename[i], s, slen);
i += slen;
s = u2s(getpid(), 10, buf);
slen = strlen(s);
memcpy(&filename[i], s, slen);
i += slen;
s = ".";
slen = strlen(s);
memcpy(&filename[i], s, slen);
i += slen;
s = u2s(prof_dump_seq, 10, buf);
prof_dump_seq++;
slen = strlen(s);
memcpy(&filename[i], s, slen);
i += slen;
s = ".";
slen = strlen(s);
memcpy(&filename[i], s, slen);
i += slen;
filename[i] = v;
i++;
if (vseq != 0xffffffffffffffffLLU) {
s = u2s(vseq, 10, buf);
slen = strlen(s);
memcpy(&filename[i], s, slen);
i += slen;
}
s = ".heap";
slen = strlen(s);
memcpy(&filename[i], s, slen);
i += slen;
filename[i] = '\0';
}
static void
prof_fdump(void)
{
char filename[DUMP_FILENAME_BUFSIZE];
if (prof_booted == false)
return;
if (opt_prof_prefix[0] != '\0') {
malloc_mutex_lock(&prof_dump_seq_mtx);
prof_dump_filename(filename, 'f', 0xffffffffffffffffLLU);
malloc_mutex_unlock(&prof_dump_seq_mtx);
prof_dump(filename, opt_prof_leak, false);
}
}
void
prof_idump(void)
{
char filename[DUMP_FILENAME_BUFSIZE];
if (prof_booted == false)
return;
malloc_mutex_lock(&enq_mtx);
if (enq) {
enq_idump = true;
malloc_mutex_unlock(&enq_mtx);
return;
}
malloc_mutex_unlock(&enq_mtx);
if (opt_prof_prefix[0] != '\0') {
malloc_mutex_lock(&prof_dump_seq_mtx);
prof_dump_filename(filename, 'i', prof_dump_iseq);
prof_dump_iseq++;
malloc_mutex_unlock(&prof_dump_seq_mtx);
prof_dump(filename, false, false);
}
}
bool
prof_mdump(const char *filename)
{
char filename_buf[DUMP_FILENAME_BUFSIZE];
if (opt_prof == false || prof_booted == false)
return (true);
if (filename == NULL) {
/* No filename specified, so automatically generate one. */
if (opt_prof_prefix[0] == '\0')
return (true);
malloc_mutex_lock(&prof_dump_seq_mtx);
prof_dump_filename(filename_buf, 'm', prof_dump_mseq);
prof_dump_mseq++;
malloc_mutex_unlock(&prof_dump_seq_mtx);
filename = filename_buf;
}
return (prof_dump(filename, false, true));
}
void
prof_gdump(void)
{
char filename[DUMP_FILENAME_BUFSIZE];
if (prof_booted == false)
return;
malloc_mutex_lock(&enq_mtx);
if (enq) {
enq_gdump = true;
malloc_mutex_unlock(&enq_mtx);
return;
}
malloc_mutex_unlock(&enq_mtx);
if (opt_prof_prefix[0] != '\0') {
malloc_mutex_lock(&prof_dump_seq_mtx);
prof_dump_filename(filename, 'u', prof_dump_useq);
prof_dump_useq++;
malloc_mutex_unlock(&prof_dump_seq_mtx);
prof_dump(filename, false, false);
}
}
static void
prof_bt_hash(const void *key, unsigned minbits, size_t *hash1, size_t *hash2)
{
size_t ret1, ret2;
uint64_t h;
prof_bt_t *bt = (prof_bt_t *)key;
assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64));
assert(hash1 != NULL);
assert(hash2 != NULL);
h = hash(bt->vec, bt->len * sizeof(void *), 0x94122f335b332aeaLLU);
if (minbits <= 32) {
/*
* Avoid doing multiple hashes, since a single hash provides
* enough bits.
*/
ret1 = h & ZU(0xffffffffU);
ret2 = h >> 32;
} else {
ret1 = h;
ret2 = hash(bt->vec, bt->len * sizeof(void *),
0x8432a476666bbc13U);
}
*hash1 = ret1;
*hash2 = ret2;
}
static bool
prof_bt_keycomp(const void *k1, const void *k2)
{
const prof_bt_t *bt1 = (prof_bt_t *)k1;
const prof_bt_t *bt2 = (prof_bt_t *)k2;
if (bt1->len != bt2->len)
return (false);
return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
}
prof_tdata_t *
prof_tdata_init(void)
{
prof_tdata_t *prof_tdata;
/* Initialize an empty cache for this thread. */
prof_tdata = (prof_tdata_t *)imalloc(sizeof(prof_tdata_t));
if (prof_tdata == NULL)
return (NULL);
if (ckh_new(&prof_tdata->bt2cnt, PROF_CKH_MINITEMS,
prof_bt_hash, prof_bt_keycomp)) {
idalloc(prof_tdata);
return (NULL);
}
ql_new(&prof_tdata->lru_ql);
prof_tdata->vec = imalloc(sizeof(void *) * prof_bt_max);
if (prof_tdata->vec == NULL) {
ckh_delete(&prof_tdata->bt2cnt);
idalloc(prof_tdata);
return (NULL);
}
prof_tdata->prn_state = 0;
prof_tdata->threshold = 0;
prof_tdata->accum = 0;
PROF_TCACHE_SET(prof_tdata);
return (prof_tdata);
}
static void
prof_tdata_cleanup(void *arg)
{
prof_tdata_t *prof_tdata;
prof_tdata = PROF_TCACHE_GET();
if (prof_tdata != NULL) {
prof_thr_cnt_t *cnt;
/*
* Delete the hash table. All of its contents can still be
* iterated over via the LRU.
*/
ckh_delete(&prof_tdata->bt2cnt);
/*
* Iteratively merge cnt's into the global stats and delete
* them.
*/
while ((cnt = ql_last(&prof_tdata->lru_ql, lru_link)) != NULL) {
prof_ctx_merge(cnt->ctx, cnt);
ql_remove(&prof_tdata->lru_ql, cnt, lru_link);
idalloc(cnt);
}
idalloc(prof_tdata->vec);
idalloc(prof_tdata);
PROF_TCACHE_SET(NULL);
}
}
void
prof_boot0(void)
{
memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
sizeof(PROF_PREFIX_DEFAULT));
}
void
prof_boot1(void)
{
/*
* opt_prof and prof_promote must be in their final state before any
* arenas are initialized, so this function must be executed early.
*/
if (opt_prof_leak && opt_prof == false) {
/*
* Enable opt_prof, but in such a way that profiles are never
* automatically dumped.
*/
opt_prof = true;
opt_prof_gdump = false;
prof_interval = 0;
} else if (opt_prof) {
if (opt_lg_prof_interval >= 0) {
prof_interval = (((uint64_t)1U) <<
opt_lg_prof_interval);
} else
prof_interval = 0;
}
prof_promote = (opt_prof && opt_lg_prof_sample > PAGE_SHIFT);
}
bool
prof_boot2(void)
{
if (opt_prof) {
if (ckh_new(&bt2ctx, PROF_CKH_MINITEMS, prof_bt_hash,
prof_bt_keycomp))
return (true);
if (malloc_mutex_init(&bt2ctx_mtx))
return (true);
if (pthread_key_create(&prof_tdata_tsd, prof_tdata_cleanup)
!= 0) {
malloc_write(
"<jemalloc>: Error in pthread_key_create()\n");
abort();
}
prof_bt_max = (1U << opt_lg_prof_bt_max);
if (malloc_mutex_init(&prof_dump_seq_mtx))
return (true);
if (malloc_mutex_init(&enq_mtx))
return (true);
enq = false;
enq_idump = false;
enq_gdump = false;
if (atexit(prof_fdump) != 0) {
malloc_write("<jemalloc>: Error in atexit()\n");
if (opt_abort)
abort();
}
}
#ifdef JEMALLOC_PROF_LIBGCC
/*
* Cause the backtracing machinery to allocate its internal state
* before enabling profiling.
*/
_Unwind_Backtrace(prof_unwind_init_callback, NULL);
#endif
prof_booted = true;
return (false);
}
/******************************************************************************/
#endif /* JEMALLOC_PROF */
#define JEMALLOC_RTREE_C_
#include "jemalloc/internal/jemalloc_internal.h"
rtree_t *
rtree_new(unsigned bits)
{
rtree_t *ret;
unsigned bits_per_level, height, i;
bits_per_level = ffs(pow2_ceil((RTREE_NODESIZE / sizeof(void *)))) - 1;
height = bits / bits_per_level;
if (height * bits_per_level != bits)
height++;
assert(height * bits_per_level >= bits);
ret = (rtree_t*)base_alloc(offsetof(rtree_t, level2bits) +
(sizeof(unsigned) * height));
if (ret == NULL)
return (NULL);
memset(ret, 0, offsetof(rtree_t, level2bits) + (sizeof(unsigned) *
height));
if (malloc_mutex_init(&ret->mutex)) {
/* Leak the rtree. */
return (NULL);
}
ret->height = height;
if (bits_per_level * height > bits)
ret->level2bits[0] = bits % bits_per_level;
else
ret->level2bits[0] = bits_per_level;
for (i = 1; i < height; i++)
ret->level2bits[i] = bits_per_level;
ret->root = (void**)base_alloc(sizeof(void *) << ret->level2bits[0]);
if (ret->root == NULL) {
/*
* We leak the rtree here, since there's no generic base
* deallocation.
*/
return (NULL);
}
memset(ret->root, 0, sizeof(void *) << ret->level2bits[0]);
return (ret);
}
#define JEMALLOC_STATS_C_
#include "jemalloc/internal/jemalloc_internal.h"
#define CTL_GET(n, v, t) do { \
size_t sz = sizeof(t); \
xmallctl(n, v, &sz, NULL, 0); \
} while (0)
#define CTL_I_GET(n, v, t) do { \
size_t mib[6]; \
size_t miblen = sizeof(mib) / sizeof(size_t); \
size_t sz = sizeof(t); \
xmallctlnametomib(n, mib, &miblen); \
mib[2] = i; \
xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
} while (0)
#define CTL_J_GET(n, v, t) do { \
size_t mib[6]; \
size_t miblen = sizeof(mib) / sizeof(size_t); \
size_t sz = sizeof(t); \
xmallctlnametomib(n, mib, &miblen); \
mib[2] = j; \
xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
} while (0)
#define CTL_IJ_GET(n, v, t) do { \
size_t mib[6]; \
size_t miblen = sizeof(mib) / sizeof(size_t); \
size_t sz = sizeof(t); \
xmallctlnametomib(n, mib, &miblen); \
mib[2] = i; \
mib[4] = j; \
xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
} while (0)
/******************************************************************************/
/* Data. */
bool opt_stats_print = false;
#ifdef JEMALLOC_STATS
size_t stats_cactive = 0;
#endif
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
#ifdef JEMALLOC_STATS
static void malloc_vcprintf(void (*write_cb)(void *, const char *),
void *cbopaque, const char *format, va_list ap);
static void stats_arena_bins_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned i);
static void stats_arena_lruns_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned i);
static void stats_arena_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned i);
#endif
/******************************************************************************/
/*
* We don't want to depend on vsnprintf() for production builds, since that can
* cause unnecessary bloat for static binaries. u2s() provides minimal integer
* printing functionality, so that malloc_printf() use can be limited to
* JEMALLOC_STATS code.
*/
char *
u2s(uint64_t x, unsigned base, char *s)
{
unsigned i;
i = UMAX2S_BUFSIZE - 1;
s[i] = '\0';
switch (base) {
case 10:
do {
i--;
s[i] = "0123456789"[x % (uint64_t)10];
x /= (uint64_t)10;
} while (x > 0);
break;
case 16:
do {
i--;
s[i] = "0123456789abcdef"[x & 0xf];
x >>= 4;
} while (x > 0);
break;
default:
do {
i--;
s[i] = "0123456789abcdefghijklmnopqrstuvwxyz"[x %
(uint64_t)base];
x /= (uint64_t)base;
} while (x > 0);
}
return (&s[i]);
}
#ifdef JEMALLOC_STATS
static void
malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
const char *format, va_list ap)
{
char buf[4096];
if (write_cb == NULL) {
/*
* The caller did not provide an alternate write_cb callback
* function, so use the default one. malloc_write() is an
* inline function, so use malloc_message() directly here.
*/
write_cb = JEMALLOC_P(malloc_message);
cbopaque = NULL;
}
vsnprintf(buf, sizeof(buf), format, ap);
write_cb(cbopaque, buf);
}
/*
* Print to a callback function in such a way as to (hopefully) avoid memory
* allocation.
*/
JEMALLOC_ATTR(format(printf, 3, 4))
void
malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
const char *format, ...)
{
va_list ap;
va_start(ap, format);
malloc_vcprintf(write_cb, cbopaque, format, ap);
va_end(ap);
}
/*
* Print to stderr in such a way as to (hopefully) avoid memory allocation.
*/
JEMALLOC_ATTR(format(printf, 1, 2))
void
malloc_printf(const char *format, ...)
{
va_list ap;
va_start(ap, format);
malloc_vcprintf(NULL, NULL, format, ap);
va_end(ap);
}
#endif
#ifdef JEMALLOC_STATS
static void
stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
unsigned i)
{
size_t pagesize;
bool config_tcache;
unsigned nbins, j, gap_start;
CTL_GET("arenas.pagesize", &pagesize, size_t);
CTL_GET("config.tcache", &config_tcache, bool);
if (config_tcache) {
malloc_cprintf(write_cb, cbopaque,
"bins: bin size regs pgs allocated nmalloc"
" ndalloc nrequests nfills nflushes"
" newruns reruns maxruns curruns\n");
} else {
malloc_cprintf(write_cb, cbopaque,
"bins: bin size regs pgs allocated nmalloc"
" ndalloc newruns reruns maxruns"
" curruns\n");
}
CTL_GET("arenas.nbins", &nbins, unsigned);
for (j = 0, gap_start = UINT_MAX; j < nbins; j++) {
uint64_t nruns;
CTL_IJ_GET("stats.arenas.0.bins.0.nruns", &nruns, uint64_t);
if (nruns == 0) {
if (gap_start == UINT_MAX)
gap_start = j;
} else {
unsigned ntbins_, nqbins, ncbins, nsbins;
size_t reg_size, run_size, allocated;
uint32_t nregs;
uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
uint64_t reruns;
size_t highruns, curruns;
if (gap_start != UINT_MAX) {
if (j > gap_start + 1) {
/* Gap of more than one size class. */
malloc_cprintf(write_cb, cbopaque,
"[%u..%u]\n", gap_start,
j - 1);
} else {
/* Gap of one size class. */
malloc_cprintf(write_cb, cbopaque,
"[%u]\n", gap_start);
}
gap_start = UINT_MAX;
}
CTL_GET("arenas.ntbins", &ntbins_, unsigned);
CTL_GET("arenas.nqbins", &nqbins, unsigned);
CTL_GET("arenas.ncbins", &ncbins, unsigned);
CTL_GET("arenas.nsbins", &nsbins, unsigned);
CTL_J_GET("arenas.bin.0.size", &reg_size, size_t);
CTL_J_GET("arenas.bin.0.nregs", &nregs, uint32_t);
CTL_J_GET("arenas.bin.0.run_size", &run_size, size_t);
CTL_IJ_GET("stats.arenas.0.bins.0.allocated",
&allocated, size_t);
CTL_IJ_GET("stats.arenas.0.bins.0.nmalloc",
&nmalloc, uint64_t);
CTL_IJ_GET("stats.arenas.0.bins.0.ndalloc",
&ndalloc, uint64_t);
if (config_tcache) {
CTL_IJ_GET("stats.arenas.0.bins.0.nrequests",
&nrequests, uint64_t);
CTL_IJ_GET("stats.arenas.0.bins.0.nfills",
&nfills, uint64_t);
CTL_IJ_GET("stats.arenas.0.bins.0.nflushes",
&nflushes, uint64_t);
}
CTL_IJ_GET("stats.arenas.0.bins.0.nreruns", &reruns,
uint64_t);
CTL_IJ_GET("stats.arenas.0.bins.0.highruns", &highruns,
size_t);
CTL_IJ_GET("stats.arenas.0.bins.0.curruns", &curruns,
size_t);
if (config_tcache) {
malloc_cprintf(write_cb, cbopaque,
"%13u %1s %5zu %4u %3zu %12zu %12"PRIu64
" %12"PRIu64" %12"PRIu64" %12"PRIu64
" %12"PRIu64" %12"PRIu64" %12"PRIu64
" %12zu %12zu\n",
j,
j < ntbins_ ? "T" : j < ntbins_ + nqbins ?
"Q" : j < ntbins_ + nqbins + ncbins ? "C" :
"S",
reg_size, nregs, run_size / pagesize,
allocated, nmalloc, ndalloc, nrequests,
nfills, nflushes, nruns, reruns, highruns,
curruns);
} else {
malloc_cprintf(write_cb, cbopaque,
"%13u %1s %5zu %4u %3zu %12zu %12"PRIu64
" %12"PRIu64" %12"PRIu64" %12"PRIu64
" %12zu %12zu\n",
j,
j < ntbins_ ? "T" : j < ntbins_ + nqbins ?
"Q" : j < ntbins_ + nqbins + ncbins ? "C" :
"S",
reg_size, nregs, run_size / pagesize,
allocated, nmalloc, ndalloc, nruns, reruns,
highruns, curruns);
}
}
}
if (gap_start != UINT_MAX) {
if (j > gap_start + 1) {
/* Gap of more than one size class. */
malloc_cprintf(write_cb, cbopaque, "[%u..%u]\n",
gap_start, j - 1);
} else {
/* Gap of one size class. */
malloc_cprintf(write_cb, cbopaque, "[%u]\n", gap_start);
}
}
}
static void
stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
unsigned i)
{
size_t pagesize, nlruns, j;
ssize_t gap_start;
CTL_GET("arenas.pagesize", &pagesize, size_t);
malloc_cprintf(write_cb, cbopaque,
"large: size pages nmalloc ndalloc nrequests"
" maxruns curruns\n");
CTL_GET("arenas.nlruns", &nlruns, size_t);
for (j = 0, gap_start = -1; j < nlruns; j++) {
uint64_t nmalloc, ndalloc, nrequests;
size_t run_size, highruns, curruns;
CTL_IJ_GET("stats.arenas.0.lruns.0.nmalloc", &nmalloc,
uint64_t);
CTL_IJ_GET("stats.arenas.0.lruns.0.ndalloc", &ndalloc,
uint64_t);
CTL_IJ_GET("stats.arenas.0.lruns.0.nrequests", &nrequests,
uint64_t);
if (nrequests == 0) {
if (gap_start == -1)
gap_start = j;
} else {
CTL_J_GET("arenas.lrun.0.size", &run_size, size_t);
CTL_IJ_GET("stats.arenas.0.lruns.0.highruns", &highruns,
size_t);
CTL_IJ_GET("stats.arenas.0.lruns.0.curruns", &curruns,
size_t);
if (gap_start != -1) {
malloc_cprintf(write_cb, cbopaque, "[%zu]\n",
j - gap_start);
gap_start = -1;
}
malloc_cprintf(write_cb, cbopaque,
"%13zu %5zu %12"PRIu64" %12"PRIu64" %12"PRIu64
" %12zu %12zu\n",
run_size, run_size / pagesize, nmalloc, ndalloc,
nrequests, highruns, curruns);
}
}
if (gap_start != -1)
malloc_cprintf(write_cb, cbopaque, "[%zu]\n", j - gap_start);
}
static void
stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
unsigned i)
{
unsigned nthreads;
size_t pagesize, pactive, pdirty, mapped;
uint64_t npurge, nmadvise, purged;
size_t small_allocated;
uint64_t small_nmalloc, small_ndalloc, small_nrequests;
size_t large_allocated;
uint64_t large_nmalloc, large_ndalloc, large_nrequests;
CTL_GET("arenas.pagesize", &pagesize, size_t);
CTL_I_GET("stats.arenas.0.nthreads", &nthreads, unsigned);
malloc_cprintf(write_cb, cbopaque,
"assigned threads: %u\n", nthreads);
CTL_I_GET("stats.arenas.0.pactive", &pactive, size_t);
CTL_I_GET("stats.arenas.0.pdirty", &pdirty, size_t);
CTL_I_GET("stats.arenas.0.npurge", &npurge, uint64_t);
CTL_I_GET("stats.arenas.0.nmadvise", &nmadvise, uint64_t);
CTL_I_GET("stats.arenas.0.purged", &purged, uint64_t);
malloc_cprintf(write_cb, cbopaque,
"dirty pages: %zu:%zu active:dirty, %"PRIu64" sweep%s,"
" %"PRIu64" madvise%s, %"PRIu64" purged\n",
pactive, pdirty, npurge, npurge == 1 ? "" : "s",
nmadvise, nmadvise == 1 ? "" : "s", purged);
malloc_cprintf(write_cb, cbopaque,
" allocated nmalloc ndalloc nrequests\n");
CTL_I_GET("stats.arenas.0.small.allocated", &small_allocated, size_t);
CTL_I_GET("stats.arenas.0.small.nmalloc", &small_nmalloc, uint64_t);
CTL_I_GET("stats.arenas.0.small.ndalloc", &small_ndalloc, uint64_t);
CTL_I_GET("stats.arenas.0.small.nrequests", &small_nrequests, uint64_t);
malloc_cprintf(write_cb, cbopaque,
"small: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
small_allocated, small_nmalloc, small_ndalloc, small_nrequests);
CTL_I_GET("stats.arenas.0.large.allocated", &large_allocated, size_t);
CTL_I_GET("stats.arenas.0.large.nmalloc", &large_nmalloc, uint64_t);
CTL_I_GET("stats.arenas.0.large.ndalloc", &large_ndalloc, uint64_t);
CTL_I_GET("stats.arenas.0.large.nrequests", &large_nrequests, uint64_t);
malloc_cprintf(write_cb, cbopaque,
"large: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
large_allocated, large_nmalloc, large_ndalloc, large_nrequests);
malloc_cprintf(write_cb, cbopaque,
"total: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
small_allocated + large_allocated,
small_nmalloc + large_nmalloc,
small_ndalloc + large_ndalloc,
small_nrequests + large_nrequests);
malloc_cprintf(write_cb, cbopaque, "active: %12zu\n",
pactive * pagesize );
CTL_I_GET("stats.arenas.0.mapped", &mapped, size_t);
malloc_cprintf(write_cb, cbopaque, "mapped: %12zu\n", mapped);
stats_arena_bins_print(write_cb, cbopaque, i);
stats_arena_lruns_print(write_cb, cbopaque, i);
}
#endif
void
stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
const char *opts)
{
int err;
uint64_t epoch;
size_t u64sz;
char s[UMAX2S_BUFSIZE];
bool general = true;
bool merged = true;
bool unmerged = true;
bool bins = true;
bool large = true;
/*
* Refresh stats, in case mallctl() was called by the application.
*
* Check for OOM here, since refreshing the ctl cache can trigger
* allocation. In practice, none of the subsequent mallctl()-related
* calls in this function will cause OOM if this one succeeds.
* */
epoch = 1;
u64sz = sizeof(uint64_t);
err = JEMALLOC_P(mallctl)("epoch", &epoch, &u64sz, &epoch,
sizeof(uint64_t));
if (err != 0) {
if (err == EAGAIN) {
malloc_write("<jemalloc>: Memory allocation failure in "
"mallctl(\"epoch\", ...)\n");
return;
}
malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", "
"...)\n");
abort();
}
if (write_cb == NULL) {
/*
* The caller did not provide an alternate write_cb callback
* function, so use the default one. malloc_write() is an
* inline function, so use malloc_message() directly here.
*/
write_cb = JEMALLOC_P(malloc_message);
cbopaque = NULL;
}
if (opts != NULL) {
unsigned i;
for (i = 0; opts[i] != '\0'; i++) {
switch (opts[i]) {
case 'g':
general = false;
break;
case 'm':
merged = false;
break;
case 'a':
unmerged = false;
break;
case 'b':
bins = false;
break;
case 'l':
large = false;
break;
default:;
}
}
}
write_cb(cbopaque, "___ Begin jemalloc statistics ___\n");
if (general) {
int err;
const char *cpv;
bool bv;
unsigned uv;
ssize_t ssv;
size_t sv, bsz, ssz, sssz, cpsz;
bsz = sizeof(bool);
ssz = sizeof(size_t);
sssz = sizeof(ssize_t);
cpsz = sizeof(const char *);
CTL_GET("version", &cpv, const char *);
write_cb(cbopaque, "Version: ");
write_cb(cbopaque, cpv);
write_cb(cbopaque, "\n");
CTL_GET("config.debug", &bv, bool);
write_cb(cbopaque, "Assertions ");
write_cb(cbopaque, bv ? "enabled" : "disabled");
write_cb(cbopaque, "\n");
#define OPT_WRITE_BOOL(n) \
if ((err = JEMALLOC_P(mallctl)("opt."#n, &bv, &bsz, \
NULL, 0)) == 0) { \
write_cb(cbopaque, " opt."#n": "); \
write_cb(cbopaque, bv ? "true" : "false"); \
write_cb(cbopaque, "\n"); \
}
#define OPT_WRITE_SIZE_T(n) \
if ((err = JEMALLOC_P(mallctl)("opt."#n, &sv, &ssz, \
NULL, 0)) == 0) { \
write_cb(cbopaque, " opt."#n": "); \
write_cb(cbopaque, u2s(sv, 10, s)); \
write_cb(cbopaque, "\n"); \
}
#define OPT_WRITE_SSIZE_T(n) \
if ((err = JEMALLOC_P(mallctl)("opt."#n, &ssv, &sssz, \
NULL, 0)) == 0) { \
if (ssv >= 0) { \
write_cb(cbopaque, " opt."#n": "); \
write_cb(cbopaque, u2s(ssv, 10, s)); \
} else { \
write_cb(cbopaque, " opt."#n": -"); \
write_cb(cbopaque, u2s(-ssv, 10, s)); \
} \
write_cb(cbopaque, "\n"); \
}
#define OPT_WRITE_CHAR_P(n) \
if ((err = JEMALLOC_P(mallctl)("opt."#n, &cpv, &cpsz, \
NULL, 0)) == 0) { \
write_cb(cbopaque, " opt."#n": \""); \
write_cb(cbopaque, cpv); \
write_cb(cbopaque, "\"\n"); \
}
write_cb(cbopaque, "Run-time option settings:\n");
OPT_WRITE_BOOL(abort)
OPT_WRITE_SIZE_T(lg_qspace_max)
OPT_WRITE_SIZE_T(lg_cspace_max)
OPT_WRITE_SIZE_T(lg_chunk)
OPT_WRITE_SIZE_T(narenas)
OPT_WRITE_SSIZE_T(lg_dirty_mult)
OPT_WRITE_BOOL(stats_print)
OPT_WRITE_BOOL(junk)
OPT_WRITE_BOOL(zero)
OPT_WRITE_BOOL(sysv)
OPT_WRITE_BOOL(xmalloc)
OPT_WRITE_BOOL(tcache)
OPT_WRITE_SSIZE_T(lg_tcache_gc_sweep)
OPT_WRITE_SSIZE_T(lg_tcache_max)
OPT_WRITE_BOOL(prof)
OPT_WRITE_CHAR_P(prof_prefix)
OPT_WRITE_SIZE_T(lg_prof_bt_max)
OPT_WRITE_BOOL(prof_active)
OPT_WRITE_SSIZE_T(lg_prof_sample)
OPT_WRITE_BOOL(prof_accum)
OPT_WRITE_SSIZE_T(lg_prof_tcmax)
OPT_WRITE_SSIZE_T(lg_prof_interval)
OPT_WRITE_BOOL(prof_gdump)
OPT_WRITE_BOOL(prof_leak)
OPT_WRITE_BOOL(overcommit)
#undef OPT_WRITE_BOOL
#undef OPT_WRITE_SIZE_T
#undef OPT_WRITE_SSIZE_T
#undef OPT_WRITE_CHAR_P
write_cb(cbopaque, "CPUs: ");
write_cb(cbopaque, u2s(ncpus, 10, s));
write_cb(cbopaque, "\n");
CTL_GET("arenas.narenas", &uv, unsigned);
write_cb(cbopaque, "Max arenas: ");
write_cb(cbopaque, u2s(uv, 10, s));
write_cb(cbopaque, "\n");
write_cb(cbopaque, "Pointer size: ");
write_cb(cbopaque, u2s(sizeof(void *), 10, s));
write_cb(cbopaque, "\n");
CTL_GET("arenas.quantum", &sv, size_t);
write_cb(cbopaque, "Quantum size: ");
write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, "\n");
CTL_GET("arenas.cacheline", &sv, size_t);
write_cb(cbopaque, "Cacheline size (assumed): ");
write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, "\n");
CTL_GET("arenas.subpage", &sv, size_t);
write_cb(cbopaque, "Subpage spacing: ");
write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, "\n");
if ((err = JEMALLOC_P(mallctl)("arenas.tspace_min", &sv, &ssz,
NULL, 0)) == 0) {
write_cb(cbopaque, "Tiny 2^n-spaced sizes: [");
write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, "..");
CTL_GET("arenas.tspace_max", &sv, size_t);
write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, "]\n");
}
CTL_GET("arenas.qspace_min", &sv, size_t);
write_cb(cbopaque, "Quantum-spaced sizes: [");
write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, "..");
CTL_GET("arenas.qspace_max", &sv, size_t);
write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, "]\n");
CTL_GET("arenas.cspace_min", &sv, size_t);
write_cb(cbopaque, "Cacheline-spaced sizes: [");
write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, "..");
CTL_GET("arenas.cspace_max", &sv, size_t);
write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, "]\n");
CTL_GET("arenas.sspace_min", &sv, size_t);
write_cb(cbopaque, "Subpage-spaced sizes: [");
write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, "..");
CTL_GET("arenas.sspace_max", &sv, size_t);
write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, "]\n");
CTL_GET("opt.lg_dirty_mult", &ssv, ssize_t);
if (ssv >= 0) {
write_cb(cbopaque,
"Min active:dirty page ratio per arena: ");
write_cb(cbopaque, u2s((1U << ssv), 10, s));
write_cb(cbopaque, ":1\n");
} else {
write_cb(cbopaque,
"Min active:dirty page ratio per arena: N/A\n");
}
if ((err = JEMALLOC_P(mallctl)("arenas.tcache_max", &sv,
&ssz, NULL, 0)) == 0) {
write_cb(cbopaque,
"Maximum thread-cached size class: ");
write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, "\n");
}
if ((err = JEMALLOC_P(mallctl)("opt.lg_tcache_gc_sweep", &ssv,
&ssz, NULL, 0)) == 0) {
size_t tcache_gc_sweep = (1U << ssv);
bool tcache_enabled;
CTL_GET("opt.tcache", &tcache_enabled, bool);
write_cb(cbopaque, "Thread cache GC sweep interval: ");
write_cb(cbopaque, tcache_enabled && ssv >= 0 ?
u2s(tcache_gc_sweep, 10, s) : "N/A");
write_cb(cbopaque, "\n");
}
if ((err = JEMALLOC_P(mallctl)("opt.prof", &bv, &bsz, NULL, 0))
== 0 && bv) {
CTL_GET("opt.lg_prof_bt_max", &sv, size_t);
write_cb(cbopaque, "Maximum profile backtrace depth: ");
write_cb(cbopaque, u2s((1U << sv), 10, s));
write_cb(cbopaque, "\n");
CTL_GET("opt.lg_prof_tcmax", &ssv, ssize_t);
write_cb(cbopaque,
"Maximum per thread backtrace cache: ");
if (ssv >= 0) {
write_cb(cbopaque, u2s((1U << ssv), 10, s));
write_cb(cbopaque, " (2^");
write_cb(cbopaque, u2s(ssv, 10, s));
write_cb(cbopaque, ")\n");
} else
write_cb(cbopaque, "N/A\n");
CTL_GET("opt.lg_prof_sample", &sv, size_t);
write_cb(cbopaque, "Average profile sample interval: ");
write_cb(cbopaque, u2s((((uint64_t)1U) << sv), 10, s));
write_cb(cbopaque, " (2^");
write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, ")\n");
CTL_GET("opt.lg_prof_interval", &ssv, ssize_t);
write_cb(cbopaque, "Average profile dump interval: ");
if (ssv >= 0) {
write_cb(cbopaque, u2s((((uint64_t)1U) << ssv),
10, s));
write_cb(cbopaque, " (2^");
write_cb(cbopaque, u2s(ssv, 10, s));
write_cb(cbopaque, ")\n");
} else
write_cb(cbopaque, "N/A\n");
}
CTL_GET("arenas.chunksize", &sv, size_t);
write_cb(cbopaque, "Chunk size: ");
write_cb(cbopaque, u2s(sv, 10, s));
CTL_GET("opt.lg_chunk", &sv, size_t);
write_cb(cbopaque, " (2^");
write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, ")\n");
}
#ifdef JEMALLOC_STATS
{
int err;
size_t sszp, ssz;
size_t *cactive;
size_t allocated, active, mapped;
size_t chunks_current, chunks_high, swap_avail;
uint64_t chunks_total;
size_t huge_allocated;
uint64_t huge_nmalloc, huge_ndalloc;
sszp = sizeof(size_t *);
ssz = sizeof(size_t);
CTL_GET("stats.cactive", &cactive, size_t *);
CTL_GET("stats.allocated", &allocated, size_t);
CTL_GET("stats.active", &active, size_t);
CTL_GET("stats.mapped", &mapped, size_t);
malloc_cprintf(write_cb, cbopaque,
"Allocated: %zu, active: %zu, mapped: %zu\n",
allocated, active, mapped);
malloc_cprintf(write_cb, cbopaque,
"Current active ceiling: %zu\n", atomic_read_z(cactive));
/* Print chunk stats. */
CTL_GET("stats.chunks.total", &chunks_total, uint64_t);
CTL_GET("stats.chunks.high", &chunks_high, size_t);
CTL_GET("stats.chunks.current", &chunks_current, size_t);
if ((err = JEMALLOC_P(mallctl)("swap.avail", &swap_avail, &ssz,
NULL, 0)) == 0) {
size_t lg_chunk;
malloc_cprintf(write_cb, cbopaque, "chunks: nchunks "
"highchunks curchunks swap_avail\n");
CTL_GET("opt.lg_chunk", &lg_chunk, size_t);
malloc_cprintf(write_cb, cbopaque,
" %13"PRIu64"%13zu%13zu%13zu\n",
chunks_total, chunks_high, chunks_current,
swap_avail << lg_chunk);
} else {
malloc_cprintf(write_cb, cbopaque, "chunks: nchunks "
"highchunks curchunks\n");
malloc_cprintf(write_cb, cbopaque,
" %13"PRIu64"%13zu%13zu\n",
chunks_total, chunks_high, chunks_current);
}
/* Print huge stats. */
CTL_GET("stats.huge.nmalloc", &huge_nmalloc, uint64_t);
CTL_GET("stats.huge.ndalloc", &huge_ndalloc, uint64_t);
CTL_GET("stats.huge.allocated", &huge_allocated, size_t);
malloc_cprintf(write_cb, cbopaque,
"huge: nmalloc ndalloc allocated\n");
malloc_cprintf(write_cb, cbopaque,
" %12"PRIu64" %12"PRIu64" %12zu\n",
huge_nmalloc, huge_ndalloc, huge_allocated);
if (merged) {
unsigned narenas;
CTL_GET("arenas.narenas", &narenas, unsigned);
{
bool initialized[narenas];
size_t isz;
unsigned i, ninitialized;
isz = sizeof(initialized);
xmallctl("arenas.initialized", initialized,
&isz, NULL, 0);
for (i = ninitialized = 0; i < narenas; i++) {
if (initialized[i])
ninitialized++;
}
if (ninitialized > 1) {
/* Print merged arena stats. */
malloc_cprintf(write_cb, cbopaque,
"\nMerged arenas stats:\n");
stats_arena_print(write_cb, cbopaque,
narenas);
}
}
}
if (unmerged) {
unsigned narenas;
/* Print stats for each arena. */
CTL_GET("arenas.narenas", &narenas, unsigned);
{
bool initialized[narenas];
size_t isz;
unsigned i;
isz = sizeof(initialized);
xmallctl("arenas.initialized", initialized,
&isz, NULL, 0);
for (i = 0; i < narenas; i++) {
if (initialized[i]) {
malloc_cprintf(write_cb,
cbopaque,
"\narenas[%u]:\n", i);
stats_arena_print(write_cb,
cbopaque, i);
}
}
}
}
}
#endif /* #ifdef JEMALLOC_STATS */
write_cb(cbopaque, "--- End jemalloc statistics ---\n");
}
#define JEMALLOC_TCACHE_C_
#include "jemalloc/internal/jemalloc_internal.h"
#ifdef JEMALLOC_TCACHE
/******************************************************************************/
/* Data. */
bool opt_tcache = true;
ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
ssize_t opt_lg_tcache_gc_sweep = LG_TCACHE_GC_SWEEP_DEFAULT;
tcache_bin_info_t *tcache_bin_info;
static unsigned stack_nelms; /* Total stack elms per tcache. */
/* Map of thread-specific caches. */
#ifndef NO_TLS
__thread tcache_t *tcache_tls JEMALLOC_ATTR(tls_model("initial-exec"));
#endif
/*
* Same contents as tcache, but initialized such that the TSD destructor is
* called when a thread exits, so that the cache can be cleaned up.
*/
pthread_key_t tcache_tsd;
size_t nhbins;
size_t tcache_maxclass;
unsigned tcache_gc_incr;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static void tcache_thread_cleanup(void *arg);
/******************************************************************************/
void *
tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind)
{
void *ret;
arena_tcache_fill_small(tcache->arena, tbin, binind
#ifdef JEMALLOC_PROF
, tcache->prof_accumbytes
#endif
);
#ifdef JEMALLOC_PROF
tcache->prof_accumbytes = 0;
#endif
ret = tcache_alloc_easy(tbin);
return (ret);
}
void
tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
, tcache_t *tcache
#endif
)
{
void *ptr;
unsigned i, nflush, ndeferred;
#ifdef JEMALLOC_STATS
bool merged_stats = false;
#endif
assert(binind < nbins);
assert(rem <= tbin->ncached);
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
/* Lock the arena bin associated with the first object. */
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
tbin->avail[0]);
arena_t *arena = chunk->arena;
arena_bin_t *bin = &arena->bins[binind];
#ifdef JEMALLOC_PROF
if (arena == tcache->arena) {
malloc_mutex_lock(&arena->lock);
arena_prof_accum(arena, tcache->prof_accumbytes);
malloc_mutex_unlock(&arena->lock);
tcache->prof_accumbytes = 0;
}
#endif
malloc_mutex_lock(&bin->lock);
#ifdef JEMALLOC_STATS
if (arena == tcache->arena) {
assert(merged_stats == false);
merged_stats = true;
bin->stats.nflushes++;
bin->stats.nrequests += tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
}
#endif
ndeferred = 0;
for (i = 0; i < nflush; i++) {
ptr = tbin->avail[i];
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk->arena == arena) {
size_t pageind = ((uintptr_t)ptr -
(uintptr_t)chunk) >> PAGE_SHIFT;
arena_chunk_map_t *mapelm =
&chunk->map[pageind-map_bias];
arena_dalloc_bin(arena, chunk, ptr, mapelm);
} else {
/*
* This object was allocated via a different
* arena bin than the one that is currently
* locked. Stash the object, so that it can be
* handled in a future pass.
*/
tbin->avail[ndeferred] = ptr;
ndeferred++;
}
}
malloc_mutex_unlock(&bin->lock);
}
#ifdef JEMALLOC_STATS
if (merged_stats == false) {
/*
* The flush loop didn't happen to flush to this thread's
* arena, so the stats didn't get merged. Manually do so now.
*/
arena_bin_t *bin = &tcache->arena->bins[binind];
malloc_mutex_lock(&bin->lock);
bin->stats.nflushes++;
bin->stats.nrequests += tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
malloc_mutex_unlock(&bin->lock);
}
#endif
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
rem * sizeof(void *));
tbin->ncached = rem;
if ((int)tbin->ncached < tbin->low_water)
tbin->low_water = tbin->ncached;
}
void
tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
, tcache_t *tcache
#endif
)
{
void *ptr;
unsigned i, nflush, ndeferred;
#ifdef JEMALLOC_STATS
bool merged_stats = false;
#endif
assert(binind < nhbins);
assert(rem <= tbin->ncached);
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
/* Lock the arena associated with the first object. */
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
tbin->avail[0]);
arena_t *arena = chunk->arena;
malloc_mutex_lock(&arena->lock);
#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
if (arena == tcache->arena) {
#endif
#ifdef JEMALLOC_PROF
arena_prof_accum(arena, tcache->prof_accumbytes);
tcache->prof_accumbytes = 0;
#endif
#ifdef JEMALLOC_STATS
merged_stats = true;
arena->stats.nrequests_large += tbin->tstats.nrequests;
arena->stats.lstats[binind - nbins].nrequests +=
tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
#endif
#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
}
#endif
ndeferred = 0;
for (i = 0; i < nflush; i++) {
ptr = tbin->avail[i];
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk->arena == arena)
arena_dalloc_large(arena, chunk, ptr);
else {
/*
* This object was allocated via a different
* arena than the one that is currently locked.
* Stash the object, so that it can be handled
* in a future pass.
*/
tbin->avail[ndeferred] = ptr;
ndeferred++;
}
}
malloc_mutex_unlock(&arena->lock);
}
#ifdef JEMALLOC_STATS
if (merged_stats == false) {
/*
* The flush loop didn't happen to flush to this thread's
* arena, so the stats didn't get merged. Manually do so now.
*/
arena_t *arena = tcache->arena;
malloc_mutex_lock(&arena->lock);
arena->stats.nrequests_large += tbin->tstats.nrequests;
arena->stats.lstats[binind - nbins].nrequests +=
tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
malloc_mutex_unlock(&arena->lock);
}
#endif
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
rem * sizeof(void *));
tbin->ncached = rem;
if ((int)tbin->ncached < tbin->low_water)
tbin->low_water = tbin->ncached;
}
tcache_t *
tcache_create(arena_t *arena)
{
tcache_t *tcache;
size_t size, stack_offset;
unsigned i;
size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins);
/* Naturally align the pointer stacks. */
size = PTR_CEILING(size);
stack_offset = size;
size += stack_nelms * sizeof(void *);
/*
* Round up to the nearest multiple of the cacheline size, in order to
* avoid the possibility of false cacheline sharing.
*
* That this works relies on the same logic as in ipalloc(), but we
* cannot directly call ipalloc() here due to tcache bootstrapping
* issues.
*/
size = (size + CACHELINE_MASK) & (-CACHELINE);
if (size <= small_maxclass)
tcache = (tcache_t *)arena_malloc_small(arena, size, true);
else if (size <= tcache_maxclass)
tcache = (tcache_t *)arena_malloc_large(arena, size, true);
else
tcache = (tcache_t *)icalloc(size);
if (tcache == NULL)
return (NULL);
#ifdef JEMALLOC_STATS
/* Link into list of extant tcaches. */
malloc_mutex_lock(&arena->lock);
ql_elm_new(tcache, link);
ql_tail_insert(&arena->tcache_ql, tcache, link);
malloc_mutex_unlock(&arena->lock);
#endif
tcache->arena = arena;
assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
for (i = 0; i < nhbins; i++) {
tcache->tbins[i].lg_fill_div = 1;
tcache->tbins[i].avail = (void **)((uintptr_t)tcache +
(uintptr_t)stack_offset);
stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
}
TCACHE_SET(tcache);
return (tcache);
}
void
tcache_destroy(tcache_t *tcache)
{
unsigned i;
size_t tcache_size;
#ifdef JEMALLOC_STATS
/* Unlink from list of extant tcaches. */
malloc_mutex_lock(&tcache->arena->lock);
ql_remove(&tcache->arena->tcache_ql, tcache, link);
malloc_mutex_unlock(&tcache->arena->lock);
tcache_stats_merge(tcache, tcache->arena);
#endif
for (i = 0; i < nbins; i++) {
tcache_bin_t *tbin = &tcache->tbins[i];
tcache_bin_flush_small(tbin, i, 0
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
, tcache
#endif
);
#ifdef JEMALLOC_STATS
if (tbin->tstats.nrequests != 0) {
arena_t *arena = tcache->arena;
arena_bin_t *bin = &arena->bins[i];
malloc_mutex_lock(&bin->lock);
bin->stats.nrequests += tbin->tstats.nrequests;
malloc_mutex_unlock(&bin->lock);
}
#endif
}
for (; i < nhbins; i++) {
tcache_bin_t *tbin = &tcache->tbins[i];
tcache_bin_flush_large(tbin, i, 0
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
, tcache
#endif
);
#ifdef JEMALLOC_STATS
if (tbin->tstats.nrequests != 0) {
arena_t *arena = tcache->arena;
malloc_mutex_lock(&arena->lock);
arena->stats.nrequests_large += tbin->tstats.nrequests;
arena->stats.lstats[i - nbins].nrequests +=
tbin->tstats.nrequests;
malloc_mutex_unlock(&arena->lock);
}
#endif
}
#ifdef JEMALLOC_PROF
if (tcache->prof_accumbytes > 0) {
malloc_mutex_lock(&tcache->arena->lock);
arena_prof_accum(tcache->arena, tcache->prof_accumbytes);
malloc_mutex_unlock(&tcache->arena->lock);
}
#endif
tcache_size = arena_salloc(tcache);
if (tcache_size <= small_maxclass) {
arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
arena_t *arena = chunk->arena;
size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >>
PAGE_SHIFT;
arena_chunk_map_t *mapelm = &chunk->map[pageind-map_bias];
arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
(uintptr_t)((pageind - (mapelm->bits >> PAGE_SHIFT)) <<
PAGE_SHIFT));
arena_bin_t *bin = run->bin;
malloc_mutex_lock(&bin->lock);
arena_dalloc_bin(arena, chunk, tcache, mapelm);
malloc_mutex_unlock(&bin->lock);
} else if (tcache_size <= tcache_maxclass) {
arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
arena_t *arena = chunk->arena;
malloc_mutex_lock(&arena->lock);
arena_dalloc_large(arena, chunk, tcache);
malloc_mutex_unlock(&arena->lock);
} else
idalloc(tcache);
}
static void
tcache_thread_cleanup(void *arg)
{
tcache_t *tcache = (tcache_t *)arg;
if (tcache == (void *)(uintptr_t)1) {
/*
* The previous time this destructor was called, we set the key
* to 1 so that other destructors wouldn't cause re-creation of
* the tcache. This time, do nothing, so that the destructor
* will not be called again.
*/
} else if (tcache == (void *)(uintptr_t)2) {
/*
* Another destructor called an allocator function after this
* destructor was called. Reset tcache to 1 in order to
* receive another callback.
*/
TCACHE_SET((uintptr_t)1);
} else if (tcache != NULL) {
assert(tcache != (void *)(uintptr_t)1);
tcache_destroy(tcache);
TCACHE_SET((uintptr_t)1);
}
}
#ifdef JEMALLOC_STATS
void
tcache_stats_merge(tcache_t *tcache, arena_t *arena)
{
unsigned i;
/* Merge and reset tcache stats. */
for (i = 0; i < nbins; i++) {
arena_bin_t *bin = &arena->bins[i];
tcache_bin_t *tbin = &tcache->tbins[i];
malloc_mutex_lock(&bin->lock);
bin->stats.nrequests += tbin->tstats.nrequests;
malloc_mutex_unlock(&bin->lock);
tbin->tstats.nrequests = 0;
}
for (; i < nhbins; i++) {
malloc_large_stats_t *lstats = &arena->stats.lstats[i - nbins];
tcache_bin_t *tbin = &tcache->tbins[i];
arena->stats.nrequests_large += tbin->tstats.nrequests;
lstats->nrequests += tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
}
}
#endif
bool
tcache_boot(void)
{
if (opt_tcache) {
unsigned i;
/*
* If necessary, clamp opt_lg_tcache_max, now that
* small_maxclass and arena_maxclass are known.
*/
if (opt_lg_tcache_max < 0 || (1U <<
opt_lg_tcache_max) < small_maxclass)
tcache_maxclass = small_maxclass;
else if ((1U << opt_lg_tcache_max) > arena_maxclass)
tcache_maxclass = arena_maxclass;
else
tcache_maxclass = (1U << opt_lg_tcache_max);
nhbins = nbins + (tcache_maxclass >> PAGE_SHIFT);
/* Initialize tcache_bin_info. */
tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins *
sizeof(tcache_bin_info_t));
if (tcache_bin_info == NULL)
return (true);
stack_nelms = 0;
for (i = 0; i < nbins; i++) {
if ((arena_bin_info[i].nregs << 1) <=
TCACHE_NSLOTS_SMALL_MAX) {
tcache_bin_info[i].ncached_max =
(arena_bin_info[i].nregs << 1);
} else {
tcache_bin_info[i].ncached_max =
TCACHE_NSLOTS_SMALL_MAX;
}
stack_nelms += tcache_bin_info[i].ncached_max;
}
for (; i < nhbins; i++) {
tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE;
stack_nelms += tcache_bin_info[i].ncached_max;
}
/* Compute incremental GC event threshold. */
if (opt_lg_tcache_gc_sweep >= 0) {
tcache_gc_incr = ((1U << opt_lg_tcache_gc_sweep) /
nbins) + (((1U << opt_lg_tcache_gc_sweep) % nbins ==
0) ? 0 : 1);
} else
tcache_gc_incr = 0;
if (pthread_key_create(&tcache_tsd, tcache_thread_cleanup) !=
0) {
malloc_write(
"<jemalloc>: Error in pthread_key_create()\n");
abort();
}
}
return (false);
}
/******************************************************************************/
#endif /* JEMALLOC_TCACHE */
#include "jemalloc/internal/jemalloc_internal.h"
#ifndef JEMALLOC_ZONE
# error "This source file is for zones on Darwin (OS X)."
#endif
/******************************************************************************/
/* Data. */
static malloc_zone_t zone, szone;
static struct malloc_introspection_t zone_introspect, ozone_introspect;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static size_t zone_size(malloc_zone_t *zone, void *ptr);
static void *zone_malloc(malloc_zone_t *zone, size_t size);
static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size);
static void *zone_valloc(malloc_zone_t *zone, size_t size);
static void zone_free(malloc_zone_t *zone, void *ptr);
static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size);
#if (JEMALLOC_ZONE_VERSION >= 6)
static void *zone_memalign(malloc_zone_t *zone, size_t alignment,
size_t size);
static void zone_free_definite_size(malloc_zone_t *zone, void *ptr,
size_t size);
#endif
static void *zone_destroy(malloc_zone_t *zone);
static size_t zone_good_size(malloc_zone_t *zone, size_t size);
static void zone_force_lock(malloc_zone_t *zone);
static void zone_force_unlock(malloc_zone_t *zone);
static size_t ozone_size(malloc_zone_t *zone, void *ptr);
static void ozone_free(malloc_zone_t *zone, void *ptr);
static void *ozone_realloc(malloc_zone_t *zone, void *ptr, size_t size);
static unsigned ozone_batch_malloc(malloc_zone_t *zone, size_t size,
void **results, unsigned num_requested);
static void ozone_batch_free(malloc_zone_t *zone, void **to_be_freed,
unsigned num);
#if (JEMALLOC_ZONE_VERSION >= 6)
static void ozone_free_definite_size(malloc_zone_t *zone, void *ptr,
size_t size);
#endif
static void ozone_force_lock(malloc_zone_t *zone);
static void ozone_force_unlock(malloc_zone_t *zone);
/******************************************************************************/
/*
* Functions.
*/
static size_t
zone_size(malloc_zone_t *zone, void *ptr)
{
/*
* There appear to be places within Darwin (such as setenv(3)) that
* cause calls to this function with pointers that *no* zone owns. If
* we knew that all pointers were owned by *some* zone, we could split
* our zone into two parts, and use one as the default allocator and
* the other as the default deallocator/reallocator. Since that will
* not work in practice, we must check all pointers to assure that they
* reside within a mapped chunk before determining size.
*/
return (ivsalloc(ptr));
}
static void *
zone_malloc(malloc_zone_t *zone, size_t size)
{
return (JEMALLOC_P(malloc)(size));
}
static void *
zone_calloc(malloc_zone_t *zone, size_t num, size_t size)
{
return (JEMALLOC_P(calloc)(num, size));
}
static void *
zone_valloc(malloc_zone_t *zone, size_t size)
{
void *ret = NULL; /* Assignment avoids useless compiler warning. */
JEMALLOC_P(posix_memalign)(&ret, PAGE_SIZE, size);
return (ret);
}
static void
zone_free(malloc_zone_t *zone, void *ptr)
{
JEMALLOC_P(free)(ptr);
}
static void *
zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
{
return (JEMALLOC_P(realloc)(ptr, size));
}
#if (JEMALLOC_ZONE_VERSION >= 6)
static void *
zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size)
{
void *ret = NULL; /* Assignment avoids useless compiler warning. */
JEMALLOC_P(posix_memalign)(&ret, alignment, size);
return (ret);
}
static void
zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
{
assert(ivsalloc(ptr) == size);
JEMALLOC_P(free)(ptr);
}
#endif
static void *
zone_destroy(malloc_zone_t *zone)
{
/* This function should never be called. */
assert(false);
return (NULL);
}
static size_t
zone_good_size(malloc_zone_t *zone, size_t size)
{
size_t ret;
void *p;
/*
* Actually create an object of the appropriate size, then find out
* how large it could have been without moving up to the next size
* class.
*/
p = JEMALLOC_P(malloc)(size);
if (p != NULL) {
ret = isalloc(p);
JEMALLOC_P(free)(p);
} else
ret = size;
return (ret);
}
static void
zone_force_lock(malloc_zone_t *zone)
{
if (isthreaded)
jemalloc_prefork();
}
static void
zone_force_unlock(malloc_zone_t *zone)
{
if (isthreaded)
jemalloc_postfork();
}
malloc_zone_t *
create_zone(void)
{
zone.size = (void *)zone_size;
zone.malloc = (void *)zone_malloc;
zone.calloc = (void *)zone_calloc;
zone.valloc = (void *)zone_valloc;
zone.free = (void *)zone_free;
zone.realloc = (void *)zone_realloc;
zone.destroy = (void *)zone_destroy;
zone.zone_name = "jemalloc_zone";
zone.batch_malloc = NULL;
zone.batch_free = NULL;
zone.introspect = &zone_introspect;
zone.version = JEMALLOC_ZONE_VERSION;
#if (JEMALLOC_ZONE_VERSION >= 6)
zone.memalign = zone_memalign;
zone.free_definite_size = zone_free_definite_size;
#endif
zone_introspect.enumerator = NULL;
zone_introspect.good_size = (void *)zone_good_size;
zone_introspect.check = NULL;
zone_introspect.print = NULL;
zone_introspect.log = NULL;
zone_introspect.force_lock = (void *)zone_force_lock;
zone_introspect.force_unlock = (void *)zone_force_unlock;
zone_introspect.statistics = NULL;
#if (JEMALLOC_ZONE_VERSION >= 6)
zone_introspect.zone_locked = NULL;
#endif
return (&zone);
}
static size_t
ozone_size(malloc_zone_t *zone, void *ptr)
{
size_t ret;
ret = ivsalloc(ptr);
if (ret == 0)
ret = szone.size(zone, ptr);
return (ret);
}
static void
ozone_free(malloc_zone_t *zone, void *ptr)
{
if (ivsalloc(ptr) != 0)
JEMALLOC_P(free)(ptr);
else {
size_t size = szone.size(zone, ptr);
if (size != 0)
(szone.free)(zone, ptr);
}
}
static void *
ozone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
{
size_t oldsize;
if (ptr == NULL)
return (JEMALLOC_P(malloc)(size));
oldsize = ivsalloc(ptr);
if (oldsize != 0)
return (JEMALLOC_P(realloc)(ptr, size));
else {
oldsize = szone.size(zone, ptr);
if (oldsize == 0)
return (JEMALLOC_P(malloc)(size));
else {
void *ret = JEMALLOC_P(malloc)(size);
if (ret != NULL) {
memcpy(ret, ptr, (oldsize < size) ? oldsize :
size);
(szone.free)(zone, ptr);
}
return (ret);
}
}
}
static unsigned
ozone_batch_malloc(malloc_zone_t *zone, size_t size, void **results,
unsigned num_requested)
{
/* Don't bother implementing this interface, since it isn't required. */
return (0);
}
static void
ozone_batch_free(malloc_zone_t *zone, void **to_be_freed, unsigned num)
{
unsigned i;
for (i = 0; i < num; i++)
ozone_free(zone, to_be_freed[i]);
}
#if (JEMALLOC_ZONE_VERSION >= 6)
static void
ozone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
{
if (ivsalloc(ptr) != 0) {
assert(ivsalloc(ptr) == size);
JEMALLOC_P(free)(ptr);
} else {
assert(size == szone.size(zone, ptr));
szone.free_definite_size(zone, ptr, size);
}
}
#endif
static void
ozone_force_lock(malloc_zone_t *zone)
{
/* jemalloc locking is taken care of by the normal jemalloc zone. */
szone.introspect->force_lock(zone);
}
static void
ozone_force_unlock(malloc_zone_t *zone)
{
/* jemalloc locking is taken care of by the normal jemalloc zone. */
szone.introspect->force_unlock(zone);
}
/*
* Overlay the default scalable zone (szone) such that existing allocations are
* drained, and further allocations come from jemalloc. This is necessary
* because Core Foundation directly accesses and uses the szone before the
* jemalloc library is even loaded.
*/
void
szone2ozone(malloc_zone_t *zone)
{
/*
* Stash a copy of the original szone so that we can call its
* functions as needed. Note that the internally, the szone stores its
* bookkeeping data structures immediately following the malloc_zone_t
* header, so when calling szone functions, we need to pass a pointer
* to the original zone structure.
*/
memcpy(&szone, zone, sizeof(malloc_zone_t));
zone->size = (void *)ozone_size;
zone->malloc = (void *)zone_malloc;
zone->calloc = (void *)zone_calloc;
zone->valloc = (void *)zone_valloc;
zone->free = (void *)ozone_free;
zone->realloc = (void *)ozone_realloc;
zone->destroy = (void *)zone_destroy;
zone->zone_name = "jemalloc_ozone";
zone->batch_malloc = ozone_batch_malloc;
zone->batch_free = ozone_batch_free;
zone->introspect = &ozone_introspect;
zone->version = JEMALLOC_ZONE_VERSION;
#if (JEMALLOC_ZONE_VERSION >= 6)
zone->memalign = zone_memalign;
zone->free_definite_size = ozone_free_definite_size;
#endif
ozone_introspect.enumerator = NULL;
ozone_introspect.good_size = (void *)zone_good_size;
ozone_introspect.check = NULL;
ozone_introspect.print = NULL;
ozone_introspect.log = NULL;
ozone_introspect.force_lock = (void *)ozone_force_lock;
ozone_introspect.force_unlock = (void *)ozone_force_unlock;
ozone_introspect.statistics = NULL;
#if (JEMALLOC_ZONE_VERSION >= 6)
ozone_introspect.zone_locked = NULL;
#endif
}
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <stdbool.h>
#include <pthread.h>
#include <assert.h>
#include <errno.h>
#include <string.h>
#define JEMALLOC_MANGLE
#include "jemalloc_test.h"
void *
thread_start(void *arg)
{
int err;
void *p;
uint64_t a0, a1, d0, d1;
uint64_t *ap0, *ap1, *dp0, *dp1;
size_t sz, usize;
sz = sizeof(a0);
if ((err = JEMALLOC_P(mallctl)("thread.allocated", &a0, &sz, NULL,
0))) {
if (err == ENOENT) {
#ifdef JEMALLOC_STATS
assert(false);
#endif
goto RETURN;
}
fprintf(stderr, "%s(): Error in mallctl(): %s\n", __func__,
strerror(err));
exit(1);
}
sz = sizeof(ap0);
if ((err = JEMALLOC_P(mallctl)("thread.allocatedp", &ap0, &sz, NULL,
0))) {
if (err == ENOENT) {
#ifdef JEMALLOC_STATS
assert(false);
#endif
goto RETURN;
}
fprintf(stderr, "%s(): Error in mallctl(): %s\n", __func__,
strerror(err));
exit(1);
}
assert(*ap0 == a0);
sz = sizeof(d0);
if ((err = JEMALLOC_P(mallctl)("thread.deallocated", &d0, &sz, NULL,
0))) {
if (err == ENOENT) {
#ifdef JEMALLOC_STATS
assert(false);
#endif
goto RETURN;
}
fprintf(stderr, "%s(): Error in mallctl(): %s\n", __func__,
strerror(err));
exit(1);
}
sz = sizeof(dp0);
if ((err = JEMALLOC_P(mallctl)("thread.deallocatedp", &dp0, &sz, NULL,
0))) {
if (err == ENOENT) {
#ifdef JEMALLOC_STATS
assert(false);
#endif
goto RETURN;
}
fprintf(stderr, "%s(): Error in mallctl(): %s\n", __func__,
strerror(err));
exit(1);
}
assert(*dp0 == d0);
p = JEMALLOC_P(malloc)(1);
if (p == NULL) {
fprintf(stderr, "%s(): Error in malloc()\n", __func__);
exit(1);
}
sz = sizeof(a1);
JEMALLOC_P(mallctl)("thread.allocated", &a1, &sz, NULL, 0);
sz = sizeof(ap1);
JEMALLOC_P(mallctl)("thread.allocatedp", &ap1, &sz, NULL, 0);
assert(*ap1 == a1);
assert(ap0 == ap1);
usize = JEMALLOC_P(malloc_usable_size)(p);
assert(a0 + usize <= a1);
JEMALLOC_P(free)(p);
sz = sizeof(d1);
JEMALLOC_P(mallctl)("thread.deallocated", &d1, &sz, NULL, 0);
sz = sizeof(dp1);
JEMALLOC_P(mallctl)("thread.deallocatedp", &dp1, &sz, NULL, 0);
assert(*dp1 == d1);
assert(dp0 == dp1);
assert(d0 + usize <= d1);
RETURN:
return (NULL);
}
int
main(void)
{
int ret = 0;
pthread_t thread;
fprintf(stderr, "Test begin\n");
thread_start(NULL);
if (pthread_create(&thread, NULL, thread_start, NULL)
!= 0) {
fprintf(stderr, "%s(): Error in pthread_create()\n", __func__);
ret = 1;
goto RETURN;
}
pthread_join(thread, (void *)&ret);
thread_start(NULL);
if (pthread_create(&thread, NULL, thread_start, NULL)
!= 0) {
fprintf(stderr, "%s(): Error in pthread_create()\n", __func__);
ret = 1;
goto RETURN;
}
pthread_join(thread, (void *)&ret);
thread_start(NULL);
RETURN:
fprintf(stderr, "Test end\n");
return (ret);
}
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#define JEMALLOC_MANGLE
#include "jemalloc_test.h"
#define CHUNK 0x400000
/* #define MAXALIGN ((size_t)0x80000000000LLU) */
#define MAXALIGN ((size_t)0x2000000LLU)
#define NITER 4
int
main(void)
{
int r;
void *p;
size_t sz, alignment, total, tsz;
unsigned i;
void *ps[NITER];
fprintf(stderr, "Test begin\n");
sz = 0;
r = JEMALLOC_P(allocm)(&p, &sz, 42, 0);
if (r != ALLOCM_SUCCESS) {
fprintf(stderr, "Unexpected allocm() error\n");
abort();
}
if (sz < 42)
fprintf(stderr, "Real size smaller than expected\n");
if (JEMALLOC_P(dallocm)(p, 0) != ALLOCM_SUCCESS)
fprintf(stderr, "Unexpected dallocm() error\n");
r = JEMALLOC_P(allocm)(&p, NULL, 42, 0);
if (r != ALLOCM_SUCCESS) {
fprintf(stderr, "Unexpected allocm() error\n");
abort();
}
if (JEMALLOC_P(dallocm)(p, 0) != ALLOCM_SUCCESS)
fprintf(stderr, "Unexpected dallocm() error\n");
r = JEMALLOC_P(allocm)(&p, NULL, 42, ALLOCM_ZERO);
if (r != ALLOCM_SUCCESS) {
fprintf(stderr, "Unexpected allocm() error\n");
abort();
}
if (JEMALLOC_P(dallocm)(p, 0) != ALLOCM_SUCCESS)
fprintf(stderr, "Unexpected dallocm() error\n");
#if LG_SIZEOF_PTR == 3
alignment = 0x8000000000000000LLU;
sz = 0x8000000000000000LLU;
#else
alignment = 0x80000000LU;
sz = 0x80000000LU;
#endif
r = JEMALLOC_P(allocm)(&p, NULL, sz, ALLOCM_ALIGN(alignment));
if (r == ALLOCM_SUCCESS) {
fprintf(stderr,
"Expected error for allocm(&p, %zu, 0x%x)\n",
sz, ALLOCM_ALIGN(alignment));
}
#if LG_SIZEOF_PTR == 3
alignment = 0x4000000000000000LLU;
sz = 0x8400000000000001LLU;
#else
alignment = 0x40000000LU;
sz = 0x84000001LU;
#endif
r = JEMALLOC_P(allocm)(&p, NULL, sz, ALLOCM_ALIGN(alignment));
if (r == ALLOCM_SUCCESS) {
fprintf(stderr,
"Expected error for allocm(&p, %zu, 0x%x)\n",
sz, ALLOCM_ALIGN(alignment));
}
alignment = 0x10LLU;
#if LG_SIZEOF_PTR == 3
sz = 0xfffffffffffffff0LLU;
#else
sz = 0xfffffff0LU;
#endif
r = JEMALLOC_P(allocm)(&p, NULL, sz, ALLOCM_ALIGN(alignment));
if (r == ALLOCM_SUCCESS) {
fprintf(stderr,
"Expected error for allocm(&p, %zu, 0x%x)\n",
sz, ALLOCM_ALIGN(alignment));
}
for (i = 0; i < NITER; i++)
ps[i] = NULL;
for (alignment = 8;
alignment <= MAXALIGN;
alignment <<= 1) {
total = 0;
fprintf(stderr, "Alignment: %zu\n", alignment);
for (sz = 1;
sz < 3 * alignment && sz < (1U << 31);
sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
for (i = 0; i < NITER; i++) {
r = JEMALLOC_P(allocm)(&ps[i], NULL, sz,
ALLOCM_ALIGN(alignment) | ALLOCM_ZERO);
if (r != ALLOCM_SUCCESS) {
fprintf(stderr,
"Error for size %zu (0x%zx): %d\n",
sz, sz, r);
exit(1);
}
if ((uintptr_t)p & (alignment-1)) {
fprintf(stderr,
"%p inadequately aligned for"
" alignment: %zu\n", p, alignment);
}
JEMALLOC_P(sallocm)(ps[i], &tsz, 0);
total += tsz;
if (total >= (MAXALIGN << 1))
break;
}
for (i = 0; i < NITER; i++) {
if (ps[i] != NULL) {
JEMALLOC_P(dallocm)(ps[i], 0);
ps[i] = NULL;
}
}
}
}
fprintf(stderr, "Test end\n");
return (0);
}
Test begin
Alignment: 8
Alignment: 16
Alignment: 32
Alignment: 64
Alignment: 128
Alignment: 256
Alignment: 512
Alignment: 1024
Alignment: 2048
Alignment: 4096
Alignment: 8192
Alignment: 16384
Alignment: 32768
Alignment: 65536
Alignment: 131072
Alignment: 262144
Alignment: 524288
Alignment: 1048576
Alignment: 2097152
Alignment: 4194304
Alignment: 8388608
Alignment: 16777216
Alignment: 33554432
Test end
#define JEMALLOC_MANGLE
#include "jemalloc_test.h"
/*
* Avoid using the assert() from jemalloc_internal.h, since it requires
* internal libjemalloc functionality.
* */
#include <assert.h>
/*
* Directly include the bitmap code, since it isn't exposed outside
* libjemalloc.
*/
#include "../src/bitmap.c"
#if (LG_BITMAP_MAXBITS > 12)
# define MAXBITS 4500
#else
# define MAXBITS (1U << LG_BITMAP_MAXBITS)
#endif
static void
test_bitmap_size(void)
{
size_t i, prev_size;
prev_size = 0;
for (i = 1; i <= MAXBITS; i++) {
size_t size = bitmap_size(i);
assert(size >= prev_size);
prev_size = size;
}
}
static void
test_bitmap_init(void)
{
size_t i;
for (i = 1; i <= MAXBITS; i++) {
bitmap_info_t binfo;
bitmap_info_init(&binfo, i);
{
size_t j;
bitmap_t bitmap[bitmap_info_ngroups(&binfo)];
bitmap_init(bitmap, &binfo);
for (j = 0; j < i; j++)
assert(bitmap_get(bitmap, &binfo, j) == false);
}
}
}
static void
test_bitmap_set(void)
{
size_t i;
for (i = 1; i <= MAXBITS; i++) {
bitmap_info_t binfo;
bitmap_info_init(&binfo, i);
{
size_t j;
bitmap_t bitmap[bitmap_info_ngroups(&binfo)];
bitmap_init(bitmap, &binfo);
for (j = 0; j < i; j++)
bitmap_set(bitmap, &binfo, j);
assert(bitmap_full(bitmap, &binfo));
}
}
}
static void
test_bitmap_unset(void)
{
size_t i;
for (i = 1; i <= MAXBITS; i++) {
bitmap_info_t binfo;
bitmap_info_init(&binfo, i);
{
size_t j;
bitmap_t bitmap[bitmap_info_ngroups(&binfo)];
bitmap_init(bitmap, &binfo);
for (j = 0; j < i; j++)
bitmap_set(bitmap, &binfo, j);
assert(bitmap_full(bitmap, &binfo));
for (j = 0; j < i; j++)
bitmap_unset(bitmap, &binfo, j);
for (j = 0; j < i; j++)
bitmap_set(bitmap, &binfo, j);
assert(bitmap_full(bitmap, &binfo));
}
}
}
static void
test_bitmap_sfu(void)
{
size_t i;
for (i = 1; i <= MAXBITS; i++) {
bitmap_info_t binfo;
bitmap_info_init(&binfo, i);
{
ssize_t j;
bitmap_t bitmap[bitmap_info_ngroups(&binfo)];
bitmap_init(bitmap, &binfo);
/* Iteratively set bits starting at the beginning. */
for (j = 0; j < i; j++)
assert(bitmap_sfu(bitmap, &binfo) == j);
assert(bitmap_full(bitmap, &binfo));
/*
* Iteratively unset bits starting at the end, and
* verify that bitmap_sfu() reaches the unset bits.
*/
for (j = i - 1; j >= 0; j--) {
bitmap_unset(bitmap, &binfo, j);
assert(bitmap_sfu(bitmap, &binfo) == j);
bitmap_unset(bitmap, &binfo, j);
}
assert(bitmap_get(bitmap, &binfo, 0) == false);
/*
* Iteratively set bits starting at the beginning, and
* verify that bitmap_sfu() looks past them.
*/
for (j = 1; j < i; j++) {
bitmap_set(bitmap, &binfo, j - 1);
assert(bitmap_sfu(bitmap, &binfo) == j);
bitmap_unset(bitmap, &binfo, j);
}
assert(bitmap_sfu(bitmap, &binfo) == i - 1);
assert(bitmap_full(bitmap, &binfo));
}
}
}
int
main(void)
{
fprintf(stderr, "Test begin\n");
test_bitmap_size();
test_bitmap_init();
test_bitmap_set();
test_bitmap_unset();
test_bitmap_sfu();
fprintf(stderr, "Test end\n");
return (0);
}
/*
* This header should be included by tests, rather than directly including
* jemalloc/jemalloc.h, because --with-install-suffix may cause the header to
* have a different name.
*/
#include "jemalloc/jemalloc@install_suffix@.h"
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <errno.h>
#include <string.h>
#define JEMALLOC_MANGLE
#include "jemalloc_test.h"
int
main(void)
{
int ret, err;
size_t sz, lg_chunk, chunksize, i;
char *p, *q;
fprintf(stderr, "Test begin\n");
sz = sizeof(lg_chunk);
if ((err = JEMALLOC_P(mallctl)("opt.lg_chunk", &lg_chunk, &sz, NULL,
0))) {
assert(err != ENOENT);
fprintf(stderr, "%s(): Error in mallctl(): %s\n", __func__,
strerror(err));
ret = 1;
goto RETURN;
}
chunksize = ((size_t)1U) << lg_chunk;
p = (char *)malloc(chunksize);
if (p == NULL) {
fprintf(stderr, "malloc(%zu) --> %p\n", chunksize, p);
ret = 1;
goto RETURN;
}
memset(p, 'a', chunksize);
q = (char *)realloc(p, chunksize * 2);
if (q == NULL) {
fprintf(stderr, "realloc(%p, %zu) --> %p\n", p, chunksize * 2,
q);
ret = 1;
goto RETURN;
}
for (i = 0; i < chunksize; i++) {
assert(q[i] == 'a');
}
p = q;
q = (char *)realloc(p, chunksize);
if (q == NULL) {
fprintf(stderr, "realloc(%p, %zu) --> %p\n", p, chunksize, q);
ret = 1;
goto RETURN;
}
for (i = 0; i < chunksize; i++) {
assert(q[i] == 'a');
}
free(q);
ret = 0;
RETURN:
fprintf(stderr, "Test end\n");
return (ret);
}
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
#define JEMALLOC_MANGLE
#include "jemalloc_test.h"
#define CHUNK 0x400000
/* #define MAXALIGN ((size_t)0x80000000000LLU) */
#define MAXALIGN ((size_t)0x2000000LLU)
#define NITER 4
int
main(void)
{
size_t alignment, size, total;
unsigned i;
int err;
void *p, *ps[NITER];
fprintf(stderr, "Test begin\n");
/* Test error conditions. */
for (alignment = 0; alignment < sizeof(void *); alignment++) {
err = JEMALLOC_P(posix_memalign)(&p, alignment, 1);
if (err != EINVAL) {
fprintf(stderr,
"Expected error for invalid alignment %zu\n",
alignment);
}
}
for (alignment = sizeof(size_t); alignment < MAXALIGN;
alignment <<= 1) {
err = JEMALLOC_P(posix_memalign)(&p, alignment + 1, 1);
if (err == 0) {
fprintf(stderr,
"Expected error for invalid alignment %zu\n",
alignment + 1);
}
}
#if LG_SIZEOF_PTR == 3
alignment = 0x8000000000000000LLU;
size = 0x8000000000000000LLU;
#else
alignment = 0x80000000LU;
size = 0x80000000LU;
#endif
err = JEMALLOC_P(posix_memalign)(&p, alignment, size);
if (err == 0) {
fprintf(stderr,
"Expected error for posix_memalign(&p, %zu, %zu)\n",
alignment, size);
}
#if LG_SIZEOF_PTR == 3
alignment = 0x4000000000000000LLU;
size = 0x8400000000000001LLU;
#else
alignment = 0x40000000LU;
size = 0x84000001LU;
#endif
err = JEMALLOC_P(posix_memalign)(&p, alignment, size);
if (err == 0) {
fprintf(stderr,
"Expected error for posix_memalign(&p, %zu, %zu)\n",
alignment, size);
}
alignment = 0x10LLU;
#if LG_SIZEOF_PTR == 3
size = 0xfffffffffffffff0LLU;
#else
size = 0xfffffff0LU;
#endif
err = JEMALLOC_P(posix_memalign)(&p, alignment, size);
if (err == 0) {
fprintf(stderr,
"Expected error for posix_memalign(&p, %zu, %zu)\n",
alignment, size);
}
for (i = 0; i < NITER; i++)
ps[i] = NULL;
for (alignment = 8;
alignment <= MAXALIGN;
alignment <<= 1) {
total = 0;
fprintf(stderr, "Alignment: %zu\n", alignment);
for (size = 1;
size < 3 * alignment && size < (1U << 31);
size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
for (i = 0; i < NITER; i++) {
err = JEMALLOC_P(posix_memalign)(&ps[i],
alignment, size);
if (err) {
fprintf(stderr,
"Error for size %zu (0x%zx): %s\n",
size, size, strerror(err));
exit(1);
}
total += JEMALLOC_P(malloc_usable_size)(ps[i]);
if (total >= (MAXALIGN << 1))
break;
}
for (i = 0; i < NITER; i++) {
if (ps[i] != NULL) {
JEMALLOC_P(free)(ps[i]);
ps[i] = NULL;
}
}
}
}
fprintf(stderr, "Test end\n");
return (0);
}
Test begin
Alignment: 8
Alignment: 16
Alignment: 32
Alignment: 64
Alignment: 128
Alignment: 256
Alignment: 512
Alignment: 1024
Alignment: 2048
Alignment: 4096
Alignment: 8192
Alignment: 16384
Alignment: 32768
Alignment: 65536
Alignment: 131072
Alignment: 262144
Alignment: 524288
Alignment: 1048576
Alignment: 2097152
Alignment: 4194304
Alignment: 8388608
Alignment: 16777216
Alignment: 33554432
Test end
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define JEMALLOC_MANGLE
#include "jemalloc_test.h"
int
main(void)
{
void *p, *q;
size_t sz, tsz;
int r;
fprintf(stderr, "Test begin\n");
r = JEMALLOC_P(allocm)(&p, &sz, 42, 0);
if (r != ALLOCM_SUCCESS) {
fprintf(stderr, "Unexpected allocm() error\n");
abort();
}
q = p;
r = JEMALLOC_P(rallocm)(&q, &tsz, sz, 0, ALLOCM_NO_MOVE);
if (r != ALLOCM_SUCCESS)
fprintf(stderr, "Unexpected rallocm() error\n");
if (q != p)
fprintf(stderr, "Unexpected object move\n");
if (tsz != sz) {
fprintf(stderr, "Unexpected size change: %zu --> %zu\n",
sz, tsz);
}
q = p;
r = JEMALLOC_P(rallocm)(&q, &tsz, sz, 5, ALLOCM_NO_MOVE);
if (r != ALLOCM_SUCCESS)
fprintf(stderr, "Unexpected rallocm() error\n");
if (q != p)
fprintf(stderr, "Unexpected object move\n");
if (tsz != sz) {
fprintf(stderr, "Unexpected size change: %zu --> %zu\n",
sz, tsz);
}
q = p;
r = JEMALLOC_P(rallocm)(&q, &tsz, sz + 5, 0, ALLOCM_NO_MOVE);
if (r != ALLOCM_ERR_NOT_MOVED)
fprintf(stderr, "Unexpected rallocm() result\n");
if (q != p)
fprintf(stderr, "Unexpected object move\n");
if (tsz != sz) {
fprintf(stderr, "Unexpected size change: %zu --> %zu\n",
sz, tsz);
}
q = p;
r = JEMALLOC_P(rallocm)(&q, &tsz, sz + 5, 0, 0);
if (r != ALLOCM_SUCCESS)
fprintf(stderr, "Unexpected rallocm() error\n");
if (q == p)
fprintf(stderr, "Expected object move\n");
if (tsz == sz) {
fprintf(stderr, "Expected size change: %zu --> %zu\n",
sz, tsz);
}
p = q;
sz = tsz;
r = JEMALLOC_P(rallocm)(&q, &tsz, 8192, 0, 0);
if (r != ALLOCM_SUCCESS)
fprintf(stderr, "Unexpected rallocm() error\n");
if (q == p)
fprintf(stderr, "Expected object move\n");
if (tsz == sz) {
fprintf(stderr, "Expected size change: %zu --> %zu\n",
sz, tsz);
}
p = q;
sz = tsz;
r = JEMALLOC_P(rallocm)(&q, &tsz, 16384, 0, 0);
if (r != ALLOCM_SUCCESS)
fprintf(stderr, "Unexpected rallocm() error\n");
if (tsz == sz) {
fprintf(stderr, "Expected size change: %zu --> %zu\n",
sz, tsz);
}
p = q;
sz = tsz;
r = JEMALLOC_P(rallocm)(&q, &tsz, 8192, 0, ALLOCM_NO_MOVE);
if (r != ALLOCM_SUCCESS)
fprintf(stderr, "Unexpected rallocm() error\n");
if (q != p)
fprintf(stderr, "Unexpected object move\n");
if (tsz == sz) {
fprintf(stderr, "Expected size change: %zu --> %zu\n",
sz, tsz);
}
sz = tsz;
r = JEMALLOC_P(rallocm)(&q, &tsz, 16384, 0, ALLOCM_NO_MOVE);
if (r != ALLOCM_SUCCESS)
fprintf(stderr, "Unexpected rallocm() error\n");
if (q != p)
fprintf(stderr, "Unexpected object move\n");
if (tsz == sz) {
fprintf(stderr, "Expected size change: %zu --> %zu\n",
sz, tsz);
}
sz = tsz;
JEMALLOC_P(dallocm)(p, 0);
fprintf(stderr, "Test end\n");
return (0);
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment