Commit 699cdb0f authored by antirez's avatar antirez
Browse files

Jemalloc updated to 3.0.0.

Full changelog here:

http://www.canonware.com/cgi-bin/gitweb.cgi?p=jemalloc.git;a=blob_plain;f=ChangeLog;hb=master

Notable improvements from the point of view of Redis:

1) Bugfixing.
2) Support for Valgrind.
3) Support for OSX Lion, FreeBSD.
parent 81b89a5c
#ifdef JEMALLOC_DSS
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
......@@ -10,16 +9,12 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
/*
* Protects sbrk() calls. This avoids malloc races among threads, though it
* does not protect against races with threads that call sbrk() directly.
*/
extern malloc_mutex_t dss_mtx;
void *chunk_alloc_dss(size_t size, bool *zero);
void *chunk_alloc_dss(size_t size, size_t alignment, bool *zero);
bool chunk_in_dss(void *chunk);
bool chunk_dealloc_dss(void *chunk, size_t size);
bool chunk_dss_boot(void);
void chunk_dss_prefork(void);
void chunk_dss_postfork_parent(void);
void chunk_dss_postfork_child(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
......@@ -27,4 +22,3 @@ bool chunk_dss_boot(void);
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
#endif /* JEMALLOC_DSS */
......@@ -9,11 +9,10 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void *chunk_alloc_mmap(size_t size);
void *chunk_alloc_mmap_noreserve(size_t size);
void chunk_dealloc_mmap(void *chunk, size_t size);
void pages_purge(void *addr, size_t length);
bool chunk_mmap_boot(void);
void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero);
bool chunk_dealloc_mmap(void *chunk, size_t size);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
......
......@@ -30,11 +30,6 @@ struct ckhc_s {
};
struct ckh_s {
#ifdef JEMALLOC_DEBUG
#define CKH_MAGIC 0x3af2489d
uint32_t magic;
#endif
#ifdef CKH_COUNT
/* Counters used to get an idea of performance. */
uint64_t ngrows;
......@@ -47,7 +42,7 @@ struct ckh_s {
/* Used for pseudo-random number generation. */
#define CKH_A 1103515241
#define CKH_C 12347
uint32_t prn_state;
uint32_t prng_state;
/* Total number of items. */
size_t count;
......
......@@ -2,6 +2,8 @@
#ifdef JEMALLOC_H_TYPES
typedef struct ctl_node_s ctl_node_t;
typedef struct ctl_named_node_s ctl_named_node_t;
typedef struct ctl_indexed_node_s ctl_indexed_node_t;
typedef struct ctl_arena_stats_s ctl_arena_stats_t;
typedef struct ctl_stats_s ctl_stats_t;
......@@ -11,20 +13,21 @@ typedef struct ctl_stats_s ctl_stats_t;
struct ctl_node_s {
bool named;
union {
struct {
const char *name;
/* If (nchildren == 0), this is a terminal node. */
unsigned nchildren;
const ctl_node_t *children;
} named;
struct {
const ctl_node_t *(*index)(const size_t *, size_t,
size_t);
} indexed;
} u;
int (*ctl)(const size_t *, size_t, void *, size_t *, void *,
size_t);
};
struct ctl_named_node_s {
struct ctl_node_s node;
const char *name;
/* If (nchildren == 0), this is a terminal node. */
unsigned nchildren;
const ctl_node_t *children;
int (*ctl)(const size_t *, size_t, void *, size_t *,
void *, size_t);
};
struct ctl_indexed_node_s {
struct ctl_node_s node;
const ctl_named_node_t *(*index)(const size_t *, size_t, size_t);
};
struct ctl_arena_stats_s {
......@@ -32,7 +35,6 @@ struct ctl_arena_stats_s {
unsigned nthreads;
size_t pactive;
size_t pdirty;
#ifdef JEMALLOC_STATS
arena_stats_t astats;
/* Aggregate stats for small size classes, based on bin stats. */
......@@ -41,13 +43,11 @@ struct ctl_arena_stats_s {
uint64_t ndalloc_small;
uint64_t nrequests_small;
malloc_bin_stats_t *bstats; /* nbins elements. */
malloc_bin_stats_t bstats[NBINS];
malloc_large_stats_t *lstats; /* nlclasses elements. */
#endif
};
struct ctl_stats_s {
#ifdef JEMALLOC_STATS
size_t allocated;
size_t active;
size_t mapped;
......@@ -61,11 +61,7 @@ struct ctl_stats_s {
uint64_t nmalloc; /* huge_nmalloc */
uint64_t ndalloc; /* huge_ndalloc */
} huge;
#endif
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
#ifdef JEMALLOC_SWAP
size_t swap_avail;
#endif
};
#endif /* JEMALLOC_H_STRUCTS */
......@@ -81,27 +77,25 @@ int ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
bool ctl_boot(void);
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
if (JEMALLOC_P(mallctl)(name, oldp, oldlenp, newp, newlen) \
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
!= 0) { \
malloc_write("<jemalloc>: Failure in xmallctl(\""); \
malloc_write(name); \
malloc_write("\", ...)\n"); \
malloc_printf( \
"<jemalloc>: Failure in xmallctl(\"%s\", ...)\n", \
name); \
abort(); \
} \
} while (0)
#define xmallctlnametomib(name, mibp, miblenp) do { \
if (JEMALLOC_P(mallctlnametomib)(name, mibp, miblenp) != 0) { \
malloc_write( \
"<jemalloc>: Failure in xmallctlnametomib(\""); \
malloc_write(name); \
malloc_write("\", ...)\n"); \
if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \
malloc_printf("<jemalloc>: Failure in " \
"xmallctlnametomib(\"%s\", ...)\n", name); \
abort(); \
} \
} while (0)
#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \
if (JEMALLOC_P(mallctlbymib)(mib, miblen, oldp, oldlenp, newp, \
if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \
newlen) != 0) { \
malloc_write( \
"<jemalloc>: Failure in xmallctlbymib()\n"); \
......
......@@ -9,18 +9,14 @@ typedef struct extent_node_s extent_node_t;
/* Tree of extents. */
struct extent_node_s {
#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
/* Linkage for the size/address-ordered tree. */
rb_node(extent_node_t) link_szad;
#endif
/* Linkage for the address-ordered tree. */
rb_node(extent_node_t) link_ad;
#ifdef JEMALLOC_PROF
/* Profile counters, used for huge objects. */
prof_ctx_t *prof_ctx;
#endif
/* Pointer to the extent that this tree node is responsible for. */
void *addr;
......@@ -34,9 +30,7 @@ typedef rb_tree(extent_node_t) extent_tree_t;
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
rb_proto(, extent_tree_szad_, extent_tree_t, extent_node_t)
#endif
rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
......
......@@ -26,7 +26,7 @@ uint64_t hash(const void *key, size_t len, uint64_t seed);
JEMALLOC_INLINE uint64_t
hash(const void *key, size_t len, uint64_t seed)
{
const uint64_t m = 0xc6a4a7935bd1e995LLU;
const uint64_t m = UINT64_C(0xc6a4a7935bd1e995);
const int r = 47;
uint64_t h = seed ^ (len * m);
const uint64_t *data = (const uint64_t *)key;
......@@ -48,14 +48,14 @@ hash(const void *key, size_t len, uint64_t seed)
data2 = (const unsigned char *)data;
switch(len & 7) {
case 7: h ^= ((uint64_t)(data2[6])) << 48;
case 6: h ^= ((uint64_t)(data2[5])) << 40;
case 5: h ^= ((uint64_t)(data2[4])) << 32;
case 4: h ^= ((uint64_t)(data2[3])) << 24;
case 3: h ^= ((uint64_t)(data2[2])) << 16;
case 2: h ^= ((uint64_t)(data2[1])) << 8;
case 1: h ^= ((uint64_t)(data2[0]));
h *= m;
case 7: h ^= ((uint64_t)(data2[6])) << 48;
case 6: h ^= ((uint64_t)(data2[5])) << 40;
case 5: h ^= ((uint64_t)(data2[4])) << 32;
case 4: h ^= ((uint64_t)(data2[3])) << 24;
case 3: h ^= ((uint64_t)(data2[2])) << 16;
case 2: h ^= ((uint64_t)(data2[1])) << 8;
case 1: h ^= ((uint64_t)(data2[0]));
h *= m;
}
h ^= h >> r;
......
......@@ -9,12 +9,10 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#ifdef JEMALLOC_STATS
/* Huge allocation statistics. */
extern uint64_t huge_nmalloc;
extern uint64_t huge_ndalloc;
extern size_t huge_allocated;
#endif
/* Protects chunk-related data structures. */
extern malloc_mutex_t huge_mtx;
......@@ -27,11 +25,12 @@ void *huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero);
void huge_dalloc(void *ptr, bool unmap);
size_t huge_salloc(const void *ptr);
#ifdef JEMALLOC_PROF
prof_ctx_t *huge_prof_ctx_get(const void *ptr);
void huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
#endif
bool huge_boot(void);
void huge_prefork(void);
void huge_postfork_parent(void);
void huge_postfork_child(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
......
#include <sys/mman.h>
#include <sys/param.h>
#include <sys/time.h>
#ifndef JEMALLOC_INTERNAL_H
#define JEMALLOC_INTERNAL_H
#include <math.h>
#ifdef _WIN32
# include <windows.h>
# define ENOENT ERROR_PATH_NOT_FOUND
# define EINVAL ERROR_BAD_ARGUMENTS
# define EAGAIN ERROR_OUTOFMEMORY
# define EPERM ERROR_WRITE_FAULT
# define EFAULT ERROR_INVALID_ADDRESS
# define ENOMEM ERROR_NOT_ENOUGH_MEMORY
# undef ERANGE
# define ERANGE ERROR_INVALID_DATA
#else
# include <sys/param.h>
# include <sys/mman.h>
# include <sys/syscall.h>
# if !defined(SYS_write) && defined(__NR_write)
# define SYS_write __NR_write
# endif
# include <sys/uio.h>
# include <pthread.h>
# include <errno.h>
#endif
#include <sys/types.h>
#include <sys/sysctl.h>
#include <sys/uio.h>
#include <errno.h>
#include <limits.h>
#ifndef SIZE_T_MAX
# define SIZE_T_MAX SIZE_MAX
#endif
#include <pthread.h>
#include <sched.h>
#include <stdarg.h>
#include <stdbool.h>
#include <stdio.h>
......@@ -25,16 +41,156 @@
#include <string.h>
#include <strings.h>
#include <ctype.h>
#include <unistd.h>
#ifdef _MSC_VER
# include <io.h>
typedef intptr_t ssize_t;
# define PATH_MAX 1024
# define STDERR_FILENO 2
# define __func__ __FUNCTION__
/* Disable warnings about deprecated system functions */
# pragma warning(disable: 4996)
#else
# include <unistd.h>
#endif
#include <fcntl.h>
#include <pthread.h>
#include <math.h>
#define JEMALLOC_MANGLE
#define JEMALLOC_NO_DEMANGLE
#include "../jemalloc@install_suffix@.h"
#ifdef JEMALLOC_UTRACE
#include <sys/ktrace.h>
#endif
#ifdef JEMALLOC_VALGRIND
#include <valgrind/valgrind.h>
#include <valgrind/memcheck.h>
#endif
#include "jemalloc/internal/private_namespace.h"
#ifdef JEMALLOC_CC_SILENCE
#define UNUSED JEMALLOC_ATTR(unused)
#else
#define UNUSED
#endif
static const bool config_debug =
#ifdef JEMALLOC_DEBUG
true
#else
false
#endif
;
static const bool config_dss =
#ifdef JEMALLOC_DSS
true
#else
false
#endif
;
static const bool config_fill =
#ifdef JEMALLOC_FILL
true
#else
false
#endif
;
static const bool config_lazy_lock =
#ifdef JEMALLOC_LAZY_LOCK
true
#else
false
#endif
;
static const bool config_prof =
#ifdef JEMALLOC_PROF
true
#else
false
#endif
;
static const bool config_prof_libgcc =
#ifdef JEMALLOC_PROF_LIBGCC
true
#else
false
#endif
;
static const bool config_prof_libunwind =
#ifdef JEMALLOC_PROF_LIBUNWIND
true
#else
false
#endif
;
static const bool config_mremap =
#ifdef JEMALLOC_MREMAP
true
#else
false
#endif
;
static const bool config_munmap =
#ifdef JEMALLOC_MUNMAP
true
#else
false
#endif
;
static const bool config_stats =
#ifdef JEMALLOC_STATS
true
#else
false
#endif
;
static const bool config_tcache =
#ifdef JEMALLOC_TCACHE
true
#else
false
#endif
;
static const bool config_tls =
#ifdef JEMALLOC_TLS
true
#else
false
#endif
;
static const bool config_utrace =
#ifdef JEMALLOC_UTRACE
true
#else
false
#endif
;
static const bool config_valgrind =
#ifdef JEMALLOC_VALGRIND
true
#else
false
#endif
;
static const bool config_xmalloc =
#ifdef JEMALLOC_XMALLOC
true
#else
false
#endif
;
static const bool config_ivsalloc =
#ifdef JEMALLOC_IVSALLOC
true
#else
false
#endif
;
#ifdef JEMALLOC_ATOMIC9
#include <machine/atomic.h>
#endif
#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
#include <libkern/OSAtomic.h>
#endif
......@@ -46,48 +202,11 @@
#include <malloc/malloc.h>
#endif
#ifdef JEMALLOC_LAZY_LOCK
#include <dlfcn.h>
#endif
#define RB_COMPACT
#include "jemalloc/internal/rb.h"
#include "jemalloc/internal/qr.h"
#include "jemalloc/internal/ql.h"
extern void (*JEMALLOC_P(malloc_message))(void *wcbopaque, const char *s);
/*
* Define a custom assert() in order to reduce the chances of deadlock during
* assertion failure.
*/
#ifndef assert
# ifdef JEMALLOC_DEBUG
# define assert(e) do { \
if (!(e)) { \
char line_buf[UMAX2S_BUFSIZE]; \
malloc_write("<jemalloc>: "); \
malloc_write(__FILE__); \
malloc_write(":"); \
malloc_write(u2s(__LINE__, 10, line_buf)); \
malloc_write(": Failed assertion: "); \
malloc_write("\""); \
malloc_write(#e); \
malloc_write("\"\n"); \
abort(); \
} \
} while (0)
# else
# define assert(e)
# endif
#endif
#ifdef JEMALLOC_DEBUG
# define dassert(e) assert(e)
#else
# define dassert(e)
#endif
/*
* jemalloc can conceptually be broken into components (arena, tcache, etc.),
* but there are circular dependencies that cannot be broken without
......@@ -119,38 +238,56 @@ extern void (*JEMALLOC_P(malloc_message))(void *wcbopaque, const char *s);
#else
# define JEMALLOC_ENABLE_INLINE
# define JEMALLOC_INLINE static inline
# ifdef _MSC_VER
# define inline _inline
# endif
#endif
/* Size of stack-allocated buffer passed to buferror(). */
#define BUFERROR_BUF 64
/* Smallest size class to support. */
#define LG_TINY_MIN 3
#define TINY_MIN (1U << LG_TINY_MIN)
/* Minimum alignment of allocations is 2^LG_QUANTUM bytes. */
#ifdef __i386__
# define LG_QUANTUM 4
#endif
#ifdef __ia64__
# define LG_QUANTUM 4
#endif
#ifdef __alpha__
# define LG_QUANTUM 4
#endif
#ifdef __sparc64__
# define LG_QUANTUM 4
#endif
#if (defined(__amd64__) || defined(__x86_64__))
# define LG_QUANTUM 4
#endif
#ifdef __arm__
# define LG_QUANTUM 3
#endif
#ifdef __mips__
# define LG_QUANTUM 3
#endif
#ifdef __powerpc__
# define LG_QUANTUM 4
#endif
#ifdef __s390x__
# define LG_QUANTUM 4
/*
* Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size
* classes).
*/
#ifndef LG_QUANTUM
# if (defined(__i386__) || defined(_M_IX86))
# define LG_QUANTUM 4
# endif
# ifdef __ia64__
# define LG_QUANTUM 4
# endif
# ifdef __alpha__
# define LG_QUANTUM 4
# endif
# ifdef __sparc64__
# define LG_QUANTUM 4
# endif
# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
# define LG_QUANTUM 4
# endif
# ifdef __arm__
# define LG_QUANTUM 3
# endif
# ifdef __mips__
# define LG_QUANTUM 3
# endif
# ifdef __powerpc__
# define LG_QUANTUM 4
# endif
# ifdef __s390x__
# define LG_QUANTUM 4
# endif
# ifdef __SH4__
# define LG_QUANTUM 4
# endif
# ifdef __tile__
# define LG_QUANTUM 4
# endif
# ifndef LG_QUANTUM
# error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS"
# endif
#endif
#define QUANTUM ((size_t)(1U << LG_QUANTUM))
......@@ -164,67 +301,149 @@ extern void (*JEMALLOC_P(malloc_message))(void *wcbopaque, const char *s);
#define LONG_MASK (LONG - 1)
/* Return the smallest long multiple that is >= a. */
#define LONG_CEILING(a) \
#define LONG_CEILING(a) \
(((a) + LONG_MASK) & ~LONG_MASK)
#define SIZEOF_PTR (1U << LG_SIZEOF_PTR)
#define PTR_MASK (SIZEOF_PTR - 1)
/* Return the smallest (void *) multiple that is >= a. */
#define PTR_CEILING(a) \
#define PTR_CEILING(a) \
(((a) + PTR_MASK) & ~PTR_MASK)
/*
* Maximum size of L1 cache line. This is used to avoid cache line aliasing.
* In addition, this controls the spacing of cacheline-spaced size classes.
*
* CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
* only handle raw constants.
*/
#define LG_CACHELINE 6
#define CACHELINE ((size_t)(1U << LG_CACHELINE))
#define CACHELINE 64
#define CACHELINE_MASK (CACHELINE - 1)
/* Return the smallest cacheline multiple that is >= s. */
#define CACHELINE_CEILING(s) \
(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
/*
* Page size. STATIC_PAGE_SHIFT is determined by the configure script. If
* DYNAMIC_PAGE_SHIFT is enabled, only use the STATIC_PAGE_* macros where
* compile-time values are required for the purposes of defining data
* structures.
*/
#define STATIC_PAGE_SIZE ((size_t)(1U << STATIC_PAGE_SHIFT))
#define STATIC_PAGE_MASK ((size_t)(STATIC_PAGE_SIZE - 1))
#ifdef PAGE_SHIFT
# undef PAGE_SHIFT
#endif
#ifdef PAGE_SIZE
# undef PAGE_SIZE
#endif
/* Page size. STATIC_PAGE_SHIFT is determined by the configure script. */
#ifdef PAGE_MASK
# undef PAGE_MASK
#endif
#ifdef DYNAMIC_PAGE_SHIFT
# define PAGE_SHIFT lg_pagesize
# define PAGE_SIZE pagesize
# define PAGE_MASK pagesize_mask
#else
# define PAGE_SHIFT STATIC_PAGE_SHIFT
# define PAGE_SIZE STATIC_PAGE_SIZE
# define PAGE_MASK STATIC_PAGE_MASK
#endif
#define LG_PAGE STATIC_PAGE_SHIFT
#define PAGE ((size_t)(1U << STATIC_PAGE_SHIFT))
#define PAGE_MASK ((size_t)(PAGE - 1))
/* Return the smallest pagesize multiple that is >= s. */
#define PAGE_CEILING(s) \
(((s) + PAGE_MASK) & ~PAGE_MASK)
/* Return the nearest aligned address at or below a. */
#define ALIGNMENT_ADDR2BASE(a, alignment) \
((void *)((uintptr_t)(a) & (-(alignment))))
/* Return the offset between a and the nearest aligned address at or below a. */
#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
((size_t)((uintptr_t)(a) & (alignment - 1)))
/* Return the smallest alignment multiple that is >= s. */
#define ALIGNMENT_CEILING(s, alignment) \
(((s) + (alignment - 1)) & (-(alignment)))
/* Declare a variable length array */
#if __STDC_VERSION__ < 199901L
# ifdef _MSC_VER
# include <malloc.h>
# define alloca _alloca
# else
# include <alloca.h>
# endif
# define VARIABLE_ARRAY(type, name, count) \
type *name = alloca(sizeof(type) * count)
#else
# define VARIABLE_ARRAY(type, name, count) type name[count]
#endif
#ifdef JEMALLOC_VALGRIND
/*
* The JEMALLOC_VALGRIND_*() macros must be macros rather than functions
* so that when Valgrind reports errors, there are no extra stack frames
* in the backtraces.
*
* The size that is reported to valgrind must be consistent through a chain of
* malloc..realloc..realloc calls. Request size isn't recorded anywhere in
* jemalloc, so it is critical that all callers of these macros provide usize
* rather than request size. As a result, buffer overflow detection is
* technically weakened for the standard API, though it is generally accepted
* practice to consider any extra bytes reported by malloc_usable_size() as
* usable space.
*/
#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do { \
if (config_valgrind && opt_valgrind && cond) \
VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero); \
} while (0)
#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \
old_rzsize, zero) do { \
if (config_valgrind && opt_valgrind) { \
size_t rzsize = p2rz(ptr); \
\
if (ptr == old_ptr) { \
VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \
usize, rzsize); \
if (zero && old_usize < usize) { \
VALGRIND_MAKE_MEM_DEFINED( \
(void *)((uintptr_t)ptr + \
old_usize), usize - old_usize); \
} \
} else { \
if (old_ptr != NULL) { \
VALGRIND_FREELIKE_BLOCK(old_ptr, \
old_rzsize); \
} \
if (ptr != NULL) { \
size_t copy_size = (old_usize < usize) \
? old_usize : usize; \
size_t tail_size = usize - copy_size; \
VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \
rzsize, false); \
if (copy_size > 0) { \
VALGRIND_MAKE_MEM_DEFINED(ptr, \
copy_size); \
} \
if (zero && tail_size > 0) { \
VALGRIND_MAKE_MEM_DEFINED( \
(void *)((uintptr_t)ptr + \
copy_size), tail_size); \
} \
} \
} \
} \
} while (0)
#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \
if (config_valgrind && opt_valgrind) \
VALGRIND_FREELIKE_BLOCK(ptr, rzsize); \
} while (0)
#else
#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)
#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB)
#define VALGRIND_FREELIKE_BLOCK(addr, rzB)
#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len)
#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len)
#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero)
#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \
old_rzsize, zero)
#define JEMALLOC_VALGRIND_FREE(ptr, rzsize)
#endif
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prn.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/arena.h"
......@@ -235,21 +454,22 @@ extern void (*JEMALLOC_P(malloc_message))(void *wcbopaque, const char *s);
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h"
#ifdef JEMALLOC_ZONE
#include "jemalloc/internal/zone.h"
#endif
#include "jemalloc/internal/quarantine.h"
#include "jemalloc/internal/prof.h"
#undef JEMALLOC_H_TYPES
/******************************************************************************/
#define JEMALLOC_H_STRUCTS
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prn.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/extent.h"
......@@ -260,66 +480,37 @@ extern void (*JEMALLOC_P(malloc_message))(void *wcbopaque, const char *s);
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h"
#ifdef JEMALLOC_ZONE
#include "jemalloc/internal/zone.h"
#endif
#include "jemalloc/internal/quarantine.h"
#include "jemalloc/internal/prof.h"
#ifdef JEMALLOC_STATS
typedef struct {
uint64_t allocated;
uint64_t deallocated;
} thread_allocated_t;
#endif
/*
* The JEMALLOC_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro
* argument.
*/
#define THREAD_ALLOCATED_INITIALIZER JEMALLOC_CONCAT({0, 0})
#undef JEMALLOC_H_STRUCTS
/******************************************************************************/
#define JEMALLOC_H_EXTERNS
extern bool opt_abort;
#ifdef JEMALLOC_FILL
extern bool opt_junk;
#endif
#ifdef JEMALLOC_SYSV
extern bool opt_sysv;
#endif
#ifdef JEMALLOC_XMALLOC
extern size_t opt_quarantine;
extern bool opt_redzone;
extern bool opt_utrace;
extern bool opt_valgrind;
extern bool opt_xmalloc;
#endif
#ifdef JEMALLOC_FILL
extern bool opt_zero;
#endif
extern size_t opt_narenas;
#ifdef DYNAMIC_PAGE_SHIFT
extern size_t pagesize;
extern size_t pagesize_mask;
extern size_t lg_pagesize;
#endif
/* Number of CPUs. */
extern unsigned ncpus;
extern malloc_mutex_t arenas_lock; /* Protects arenas initialization. */
extern pthread_key_t arenas_tsd;
#ifndef NO_TLS
/*
* Map of pthread_self() --> arenas[???], used for selecting an arena to use
* for allocations.
*/
extern __thread arena_t *arenas_tls JEMALLOC_ATTR(tls_model("initial-exec"));
# define ARENA_GET() arenas_tls
# define ARENA_SET(v) do { \
arenas_tls = (v); \
pthread_setspecific(arenas_tsd, (void *)(v)); \
} while (0)
#else
# define ARENA_GET() ((arena_t *)pthread_getspecific(arenas_tsd))
# define ARENA_SET(v) do { \
pthread_setspecific(arenas_tsd, (void *)(v)); \
} while (0)
#endif
/*
* Arenas that are used to service external requests. Not all elements of the
* arenas array are necessarily used; arenas are created lazily as needed.
......@@ -327,45 +518,22 @@ extern __thread arena_t *arenas_tls JEMALLOC_ATTR(tls_model("initial-exec"));
extern arena_t **arenas;
extern unsigned narenas;
#ifdef JEMALLOC_STATS
# ifndef NO_TLS
extern __thread thread_allocated_t thread_allocated_tls;
# define ALLOCATED_GET() (thread_allocated_tls.allocated)
# define ALLOCATEDP_GET() (&thread_allocated_tls.allocated)
# define DEALLOCATED_GET() (thread_allocated_tls.deallocated)
# define DEALLOCATEDP_GET() (&thread_allocated_tls.deallocated)
# define ALLOCATED_ADD(a, d) do { \
thread_allocated_tls.allocated += a; \
thread_allocated_tls.deallocated += d; \
} while (0)
# else
extern pthread_key_t thread_allocated_tsd;
thread_allocated_t *thread_allocated_get_hard(void);
# define ALLOCATED_GET() (thread_allocated_get()->allocated)
# define ALLOCATEDP_GET() (&thread_allocated_get()->allocated)
# define DEALLOCATED_GET() (thread_allocated_get()->deallocated)
# define DEALLOCATEDP_GET() (&thread_allocated_get()->deallocated)
# define ALLOCATED_ADD(a, d) do { \
thread_allocated_t *thread_allocated = thread_allocated_get(); \
thread_allocated->allocated += (a); \
thread_allocated->deallocated += (d); \
} while (0)
# endif
#endif
arena_t *arenas_extend(unsigned ind);
void arenas_cleanup(void *arg);
arena_t *choose_arena_hard(void);
int buferror(int errnum, char *buf, size_t buflen);
void jemalloc_prefork(void);
void jemalloc_postfork(void);
void jemalloc_postfork_parent(void);
void jemalloc_postfork_child(void);
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prn.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/extent.h"
......@@ -376,21 +544,22 @@ void jemalloc_postfork(void);
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h"
#ifdef JEMALLOC_ZONE
#include "jemalloc/internal/zone.h"
#endif
#include "jemalloc/internal/quarantine.h"
#include "jemalloc/internal/prof.h"
#undef JEMALLOC_H_EXTERNS
/******************************************************************************/
#define JEMALLOC_H_INLINES
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prn.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/base.h"
......@@ -398,34 +567,20 @@ void jemalloc_postfork(void);
#include "jemalloc/internal/huge.h"
#ifndef JEMALLOC_ENABLE_INLINE
size_t pow2_ceil(size_t x);
malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *)
size_t s2u(size_t size);
size_t sa2u(size_t size, size_t alignment, size_t *run_size_p);
void malloc_write(const char *s);
arena_t *choose_arena(void);
# if (defined(JEMALLOC_STATS) && defined(NO_TLS))
thread_allocated_t *thread_allocated_get(void);
# endif
size_t sa2u(size_t size, size_t alignment);
arena_t *choose_arena(arena_t *arena);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
/* Compute the smallest power of 2 that is >= x. */
JEMALLOC_INLINE size_t
pow2_ceil(size_t x)
{
x--;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
#if (LG_SIZEOF_PTR == 3)
x |= x >> 32;
#endif
x++;
return (x);
}
/*
* Map of pthread_self() --> arenas[???], used for selecting an arena to use
* for allocations.
*/
malloc_tsd_externs(arenas, arena_t *)
malloc_tsd_funcs(JEMALLOC_INLINE, arenas, arena_t *, NULL, arenas_cleanup)
/*
* Compute usable size that would result from allocating an object with the
......@@ -435,7 +590,7 @@ JEMALLOC_INLINE size_t
s2u(size_t size)
{
if (size <= small_maxclass)
if (size <= SMALL_MAXCLASS)
return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size);
if (size <= arena_maxclass)
return (PAGE_CEILING(size));
......@@ -447,10 +602,12 @@ s2u(size_t size)
* specified size and alignment.
*/
JEMALLOC_INLINE size_t
sa2u(size_t size, size_t alignment, size_t *run_size_p)
sa2u(size_t size, size_t alignment)
{
size_t usize;
assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
/*
* Round size up to the nearest multiple of alignment.
*
......@@ -464,12 +621,8 @@ sa2u(size_t size, size_t alignment, size_t *run_size_p)
* 96 | 1100000 | 32
* 144 | 10100000 | 32
* 192 | 11000000 | 64
*
* Depending on runtime settings, it is possible that arena_malloc()
* will further round up to a power of two, but that never causes
* correctness issues.
*/
usize = (size + (alignment - 1)) & (-alignment);
usize = ALIGNMENT_CEILING(size, alignment);
/*
* (usize < size) protects against the combination of maximal
* alignment and size greater than maximal alignment.
......@@ -479,8 +632,8 @@ sa2u(size_t size, size_t alignment, size_t *run_size_p)
return (0);
}
if (usize <= arena_maxclass && alignment <= PAGE_SIZE) {
if (usize <= small_maxclass)
if (usize <= arena_maxclass && alignment <= PAGE) {
if (usize <= SMALL_MAXCLASS)
return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size);
return (PAGE_CEILING(usize));
} else {
......@@ -494,7 +647,7 @@ sa2u(size_t size, size_t alignment, size_t *run_size_p)
usize = PAGE_CEILING(size);
/*
* (usize < size) protects against very large sizes within
* PAGE_SIZE of SIZE_T_MAX.
* PAGE of SIZE_T_MAX.
*
* (usize + alignment < usize) protects against the
* combination of maximal alignment and usize large enough
......@@ -512,93 +665,63 @@ sa2u(size_t size, size_t alignment, size_t *run_size_p)
/*
* Calculate the size of the over-size run that arena_palloc()
* would need to allocate in order to guarantee the alignment.
* If the run wouldn't fit within a chunk, round up to a huge
* allocation size.
*/
if (usize >= alignment)
run_size = usize + alignment - PAGE_SIZE;
else {
/*
* It is possible that (alignment << 1) will cause
* overflow, but it doesn't matter because we also
* subtract PAGE_SIZE, which in the case of overflow
* leaves us with a very large run_size. That causes
* the first conditional below to fail, which means
* that the bogus run_size value never gets used for
* anything important.
*/
run_size = (alignment << 1) - PAGE_SIZE;
}
if (run_size_p != NULL)
*run_size_p = run_size;
run_size = usize + alignment - PAGE;
if (run_size <= arena_maxclass)
return (PAGE_CEILING(usize));
return (CHUNK_CEILING(usize));
}
}
/*
* Wrapper around malloc_message() that avoids the need for
* JEMALLOC_P(malloc_message)(...) throughout the code.
*/
JEMALLOC_INLINE void
malloc_write(const char *s)
{
JEMALLOC_P(malloc_message)(NULL, s);
}
/*
* Choose an arena based on a per-thread value (fast-path code, calls slow-path
* code if necessary).
*/
/* Choose an arena based on a per-thread value. */
JEMALLOC_INLINE arena_t *
choose_arena(void)
choose_arena(arena_t *arena)
{
arena_t *ret;
ret = ARENA_GET();
if (ret == NULL) {
if (arena != NULL)
return (arena);
if ((ret = *arenas_tsd_get()) == NULL) {
ret = choose_arena_hard();
assert(ret != NULL);
}
return (ret);
}
#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
JEMALLOC_INLINE thread_allocated_t *
thread_allocated_get(void)
{
thread_allocated_t *thread_allocated = (thread_allocated_t *)
pthread_getspecific(thread_allocated_tsd);
if (thread_allocated == NULL)
return (thread_allocated_get_hard());
return (thread_allocated);
}
#endif
#endif
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/rtree.h"
/*
* Include arena.h twice in order to resolve circular dependencies with
* tcache.h.
*/
#define JEMALLOC_ARENA_INLINE_A
#include "jemalloc/internal/arena.h"
#undef JEMALLOC_ARENA_INLINE_A
#include "jemalloc/internal/tcache.h"
#define JEMALLOC_ARENA_INLINE_B
#include "jemalloc/internal/arena.h"
#undef JEMALLOC_ARENA_INLINE_B
#include "jemalloc/internal/hash.h"
#ifdef JEMALLOC_ZONE
#include "jemalloc/internal/zone.h"
#endif
#include "jemalloc/internal/quarantine.h"
#ifndef JEMALLOC_ENABLE_INLINE
void *imalloc(size_t size);
void *icalloc(size_t size);
void *ipalloc(size_t usize, size_t alignment, bool zero);
size_t isalloc(const void *ptr);
# ifdef JEMALLOC_IVSALLOC
size_t ivsalloc(const void *ptr);
# endif
size_t isalloc(const void *ptr, bool demote);
size_t ivsalloc(const void *ptr, bool demote);
size_t u2rz(size_t usize);
size_t p2rz(const void *ptr);
void idalloc(void *ptr);
void iqalloc(void *ptr);
void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
bool zero, bool no_move);
malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
......@@ -609,7 +732,7 @@ imalloc(size_t size)
assert(size != 0);
if (size <= arena_maxclass)
return (arena_malloc(size, false));
return (arena_malloc(NULL, size, false, true));
else
return (huge_malloc(size, false));
}
......@@ -619,7 +742,7 @@ icalloc(size_t size)
{
if (size <= arena_maxclass)
return (arena_malloc(size, true));
return (arena_malloc(NULL, size, true, true));
else
return (huge_malloc(size, true));
}
......@@ -630,75 +753,80 @@ ipalloc(size_t usize, size_t alignment, bool zero)
void *ret;
assert(usize != 0);
assert(usize == sa2u(usize, alignment, NULL));
assert(usize == sa2u(usize, alignment));
if (usize <= arena_maxclass && alignment <= PAGE_SIZE)
ret = arena_malloc(usize, zero);
if (usize <= arena_maxclass && alignment <= PAGE)
ret = arena_malloc(NULL, usize, zero, true);
else {
size_t run_size
#ifdef JEMALLOC_CC_SILENCE
= 0
#endif
;
/*
* Ideally we would only ever call sa2u() once per aligned
* allocation request, and the caller of this function has
* already done so once. However, it's rather burdensome to
* require every caller to pass in run_size, especially given
* that it's only relevant to large allocations. Therefore,
* just call it again here in order to get run_size.
*/
sa2u(usize, alignment, &run_size);
if (run_size <= arena_maxclass) {
ret = arena_palloc(choose_arena(), usize, run_size,
alignment, zero);
if (usize <= arena_maxclass) {
ret = arena_palloc(choose_arena(NULL), usize, alignment,
zero);
} else if (alignment <= chunksize)
ret = huge_malloc(usize, zero);
else
ret = huge_palloc(usize, alignment, zero);
}
assert(((uintptr_t)ret & (alignment - 1)) == 0);
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
return (ret);
}
/*
* Typical usage:
* void *ptr = [...]
* size_t sz = isalloc(ptr, config_prof);
*/
JEMALLOC_INLINE size_t
isalloc(const void *ptr)
isalloc(const void *ptr, bool demote)
{
size_t ret;
arena_chunk_t *chunk;
assert(ptr != NULL);
/* Demotion only makes sense if config_prof is true. */
assert(config_prof || demote == false);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr) {
/* Region. */
dassert(chunk->arena->magic == ARENA_MAGIC);
#ifdef JEMALLOC_PROF
ret = arena_salloc_demote(ptr);
#else
ret = arena_salloc(ptr);
#endif
} else
if (chunk != ptr)
ret = arena_salloc(ptr, demote);
else
ret = huge_salloc(ptr);
return (ret);
}
#ifdef JEMALLOC_IVSALLOC
JEMALLOC_INLINE size_t
ivsalloc(const void *ptr)
ivsalloc(const void *ptr, bool demote)
{
/* Return 0 if ptr is not within a chunk managed by jemalloc. */
if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL)
return (0);
return (isalloc(ptr));
return (isalloc(ptr, demote));
}
JEMALLOC_INLINE size_t
u2rz(size_t usize)
{
size_t ret;
if (usize <= SMALL_MAXCLASS) {
size_t binind = SMALL_SIZE2BIN(usize);
ret = arena_bin_info[binind].redzone_size;
} else
ret = 0;
return (ret);
}
JEMALLOC_INLINE size_t
p2rz(const void *ptr)
{
size_t usize = isalloc(ptr, false);
return (u2rz(usize));
}
#endif
JEMALLOC_INLINE void
idalloc(void *ptr)
......@@ -709,11 +837,21 @@ idalloc(void *ptr)
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr)
arena_dalloc(chunk->arena, chunk, ptr);
arena_dalloc(chunk->arena, chunk, ptr, true);
else
huge_dalloc(ptr, true);
}
JEMALLOC_INLINE void
iqalloc(void *ptr)
{
if (config_fill && opt_quarantine)
quarantine(ptr);
else
idalloc(ptr);
}
JEMALLOC_INLINE void *
iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
bool no_move)
......@@ -724,19 +862,19 @@ iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
assert(ptr != NULL);
assert(size != 0);
oldsize = isalloc(ptr);
oldsize = isalloc(ptr, config_prof);
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
!= 0) {
size_t usize, copysize;
/*
* Existing object alignment is inadquate; allocate new space
* Existing object alignment is inadequate; allocate new space
* and copy.
*/
if (no_move)
return (NULL);
usize = sa2u(size + extra, alignment, NULL);
usize = sa2u(size + extra, alignment);
if (usize == 0)
return (NULL);
ret = ipalloc(usize, alignment, zero);
......@@ -744,7 +882,7 @@ iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
if (extra == 0)
return (NULL);
/* Try again, without extra this time. */
usize = sa2u(size, alignment, NULL);
usize = sa2u(size, alignment);
if (usize == 0)
return (NULL);
ret = ipalloc(usize, alignment, zero);
......@@ -758,7 +896,7 @@ iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
*/
copysize = (size < oldsize) ? size : oldsize;
memcpy(ret, ptr, copysize);
idalloc(ptr);
iqalloc(ptr);
return (ret);
}
......@@ -773,16 +911,21 @@ iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
} else {
if (size + extra <= arena_maxclass) {
return (arena_ralloc(ptr, oldsize, size, extra,
alignment, zero));
alignment, zero, true));
} else {
return (huge_ralloc(ptr, oldsize, size, extra,
alignment, zero));
}
}
}
malloc_tsd_externs(thread_allocated, thread_allocated_t)
malloc_tsd_funcs(JEMALLOC_INLINE, thread_allocated, thread_allocated_t,
THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup)
#endif
#include "jemalloc/internal/prof.h"
#undef JEMALLOC_H_INLINES
/******************************************************************************/
#endif /* JEMALLOC_INTERNAL_H */
......@@ -54,7 +54,7 @@ mb_write(void)
);
#endif
}
#elif (defined(__amd64_) || defined(__x86_64__))
#elif (defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE void
mb_write(void)
{
......@@ -87,6 +87,13 @@ mb_write(void)
: "memory" /* Clobbers. */
);
}
#elif defined(__tile__)
JEMALLOC_INLINE void
mb_write(void)
{
__sync_synchronize();
}
#else
/*
* This is much slower than a simple memory barrier, but the semantics of mutex
......
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#ifdef JEMALLOC_OSSPIN
typedef OSSpinLock malloc_mutex_t;
#else
typedef pthread_mutex_t malloc_mutex_t;
#endif
typedef struct malloc_mutex_s malloc_mutex_t;
#ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
# define MALLOC_MUTEX_INITIALIZER PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
#ifdef _WIN32
# define MALLOC_MUTEX_INITIALIZER
#elif (defined(JEMALLOC_OSSPIN))
# define MALLOC_MUTEX_INITIALIZER {0}
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, NULL}
#else
# define MALLOC_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
# if (defined(PTHREAD_MUTEX_ADAPTIVE_NP) && \
defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP))
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP}
# else
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER}
# endif
#endif
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct malloc_mutex_s {
#ifdef _WIN32
CRITICAL_SECTION lock;
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLock lock;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
pthread_mutex_t lock;
malloc_mutex_t *postponed_next;
#else
pthread_mutex_t lock;
#endif
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
......@@ -24,11 +44,15 @@ typedef pthread_mutex_t malloc_mutex_t;
#ifdef JEMALLOC_LAZY_LOCK
extern bool isthreaded;
#else
# undef isthreaded /* Undo private_namespace.h definition. */
# define isthreaded true
#endif
bool malloc_mutex_init(malloc_mutex_t *mutex);
void malloc_mutex_destroy(malloc_mutex_t *mutex);
void malloc_mutex_prefork(malloc_mutex_t *mutex);
void malloc_mutex_postfork_parent(malloc_mutex_t *mutex);
void malloc_mutex_postfork_child(malloc_mutex_t *mutex);
bool mutex_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
......@@ -36,7 +60,6 @@ void malloc_mutex_destroy(malloc_mutex_t *mutex);
#ifndef JEMALLOC_ENABLE_INLINE
void malloc_mutex_lock(malloc_mutex_t *mutex);
bool malloc_mutex_trylock(malloc_mutex_t *mutex);
void malloc_mutex_unlock(malloc_mutex_t *mutex);
#endif
......@@ -46,37 +69,27 @@ malloc_mutex_lock(malloc_mutex_t *mutex)
{
if (isthreaded) {
#ifdef JEMALLOC_OSSPIN
OSSpinLockLock(mutex);
#ifdef _WIN32
EnterCriticalSection(&mutex->lock);
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockLock(&mutex->lock);
#else
pthread_mutex_lock(mutex);
pthread_mutex_lock(&mutex->lock);
#endif
}
}
JEMALLOC_INLINE bool
malloc_mutex_trylock(malloc_mutex_t *mutex)
{
if (isthreaded) {
#ifdef JEMALLOC_OSSPIN
return (OSSpinLockTry(mutex) == false);
#else
return (pthread_mutex_trylock(mutex) != 0);
#endif
} else
return (false);
}
JEMALLOC_INLINE void
malloc_mutex_unlock(malloc_mutex_t *mutex)
{
if (isthreaded) {
#ifdef JEMALLOC_OSSPIN
OSSpinLockUnlock(mutex);
#ifdef _WIN32
LeaveCriticalSection(&mutex->lock);
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockUnlock(&mutex->lock);
#else
pthread_mutex_unlock(mutex);
pthread_mutex_unlock(&mutex->lock);
#endif
}
}
......
#define a0calloc JEMALLOC_N(a0calloc)
#define a0free JEMALLOC_N(a0free)
#define a0malloc JEMALLOC_N(a0malloc)
#define arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small)
#define arena_bin_index JEMALLOC_N(arena_bin_index)
#define arena_bin_info JEMALLOC_N(arena_bin_info)
#define arena_boot JEMALLOC_N(arena_boot)
#define arena_dalloc JEMALLOC_N(arena_dalloc)
#define arena_dalloc_bin JEMALLOC_N(arena_dalloc_bin)
#define arena_dalloc_bin_locked JEMALLOC_N(arena_dalloc_bin_locked)
#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
#define arena_dalloc_large JEMALLOC_N(arena_dalloc_large)
#define arena_dalloc_large_locked JEMALLOC_N(arena_dalloc_large_locked)
#define arena_dalloc_small JEMALLOC_N(arena_dalloc_small)
#define arena_malloc JEMALLOC_N(arena_malloc)
#define arena_malloc_large JEMALLOC_N(arena_malloc_large)
#define arena_malloc_small JEMALLOC_N(arena_malloc_small)
#define arena_mapbits_allocated_get JEMALLOC_N(arena_mapbits_allocated_get)
#define arena_mapbits_binind_get JEMALLOC_N(arena_mapbits_binind_get)
#define arena_mapbits_dirty_get JEMALLOC_N(arena_mapbits_dirty_get)
#define arena_mapbits_get JEMALLOC_N(arena_mapbits_get)
#define arena_mapbits_large_binind_set JEMALLOC_N(arena_mapbits_large_binind_set)
#define arena_mapbits_large_get JEMALLOC_N(arena_mapbits_large_get)
#define arena_mapbits_large_set JEMALLOC_N(arena_mapbits_large_set)
#define arena_mapbits_large_size_get JEMALLOC_N(arena_mapbits_large_size_get)
#define arena_mapbits_small_runind_get JEMALLOC_N(arena_mapbits_small_runind_get)
#define arena_mapbits_small_set JEMALLOC_N(arena_mapbits_small_set)
#define arena_mapbits_unallocated_set JEMALLOC_N(arena_mapbits_unallocated_set)
#define arena_mapbits_unallocated_size_get JEMALLOC_N(arena_mapbits_unallocated_size_get)
#define arena_mapbits_unallocated_size_set JEMALLOC_N(arena_mapbits_unallocated_size_set)
#define arena_mapbits_unzeroed_get JEMALLOC_N(arena_mapbits_unzeroed_get)
#define arena_mapbits_unzeroed_set JEMALLOC_N(arena_mapbits_unzeroed_set)
#define arena_mapbitsp_get JEMALLOC_N(arena_mapbitsp_get)
#define arena_mapp_get JEMALLOC_N(arena_mapp_get)
#define arena_maxclass JEMALLOC_N(arena_maxclass)
#define arena_new JEMALLOC_N(arena_new)
#define arena_palloc JEMALLOC_N(arena_palloc)
#define arena_postfork_child JEMALLOC_N(arena_postfork_child)
#define arena_postfork_parent JEMALLOC_N(arena_postfork_parent)
#define arena_prefork JEMALLOC_N(arena_prefork)
#define arena_prof_accum JEMALLOC_N(arena_prof_accum)
#define arena_prof_ctx_get JEMALLOC_N(arena_prof_ctx_get)
#define arena_prof_ctx_set JEMALLOC_N(arena_prof_ctx_set)
#define arena_prof_promoted JEMALLOC_N(arena_prof_promoted)
#define arena_ptr_small_binind_get JEMALLOC_N(arena_ptr_small_binind_get)
#define arena_purge_all JEMALLOC_N(arena_purge_all)
#define arena_ralloc JEMALLOC_N(arena_ralloc)
#define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move)
#define arena_run_regind JEMALLOC_N(arena_run_regind)
#define arena_salloc JEMALLOC_N(arena_salloc)
#define arena_salloc_demote JEMALLOC_N(arena_salloc_demote)
#define arena_stats_merge JEMALLOC_N(arena_stats_merge)
#define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small)
#define arenas JEMALLOC_N(arenas)
#define arenas_bin_i_index JEMALLOC_N(arenas_bin_i_index)
#define arenas_booted JEMALLOC_N(arenas_booted)
#define arenas_cleanup JEMALLOC_N(arenas_cleanup)
#define arenas_extend JEMALLOC_N(arenas_extend)
#define arenas_initialized JEMALLOC_N(arenas_initialized)
#define arenas_lock JEMALLOC_N(arenas_lock)
#define arenas_lrun_i_index JEMALLOC_N(arenas_lrun_i_index)
#define arenas_tls JEMALLOC_N(arenas_tls)
#define arenas_tsd_boot JEMALLOC_N(arenas_tsd_boot)
#define arenas_tsd_cleanup_wrapper JEMALLOC_N(arenas_tsd_cleanup_wrapper)
#define arenas_tsd_get JEMALLOC_N(arenas_tsd_get)
#define arenas_tsd_set JEMALLOC_N(arenas_tsd_set)
#define atomic_add_u JEMALLOC_N(atomic_add_u)
#define atomic_add_uint32 JEMALLOC_N(atomic_add_uint32)
#define atomic_add_uint64 JEMALLOC_N(atomic_add_uint64)
#define atomic_add_z JEMALLOC_N(atomic_add_z)
#define atomic_sub_u JEMALLOC_N(atomic_sub_u)
#define atomic_sub_uint32 JEMALLOC_N(atomic_sub_uint32)
#define atomic_sub_uint64 JEMALLOC_N(atomic_sub_uint64)
#define atomic_sub_z JEMALLOC_N(atomic_sub_z)
#define base_alloc JEMALLOC_N(base_alloc)
#define base_boot JEMALLOC_N(base_boot)
#define base_calloc JEMALLOC_N(base_calloc)
#define base_node_alloc JEMALLOC_N(base_node_alloc)
#define base_node_dealloc JEMALLOC_N(base_node_dealloc)
#define base_postfork_child JEMALLOC_N(base_postfork_child)
#define base_postfork_parent JEMALLOC_N(base_postfork_parent)
#define base_prefork JEMALLOC_N(base_prefork)
#define bitmap_full JEMALLOC_N(bitmap_full)
#define bitmap_get JEMALLOC_N(bitmap_get)
#define bitmap_info_init JEMALLOC_N(bitmap_info_init)
......@@ -47,19 +95,19 @@
#define chunk_alloc JEMALLOC_N(chunk_alloc)
#define chunk_alloc_dss JEMALLOC_N(chunk_alloc_dss)
#define chunk_alloc_mmap JEMALLOC_N(chunk_alloc_mmap)
#define chunk_alloc_mmap_noreserve JEMALLOC_N(chunk_alloc_mmap_noreserve)
#define chunk_alloc_swap JEMALLOC_N(chunk_alloc_swap)
#define chunk_boot JEMALLOC_N(chunk_boot)
#define chunk_dealloc JEMALLOC_N(chunk_dealloc)
#define chunk_dealloc_dss JEMALLOC_N(chunk_dealloc_dss)
#define chunk_dealloc_mmap JEMALLOC_N(chunk_dealloc_mmap)
#define chunk_dealloc_swap JEMALLOC_N(chunk_dealloc_swap)
#define chunk_dss_boot JEMALLOC_N(chunk_dss_boot)
#define chunk_dss_postfork_child JEMALLOC_N(chunk_dss_postfork_child)
#define chunk_dss_postfork_parent JEMALLOC_N(chunk_dss_postfork_parent)
#define chunk_dss_prefork JEMALLOC_N(chunk_dss_prefork)
#define chunk_in_dss JEMALLOC_N(chunk_in_dss)
#define chunk_in_swap JEMALLOC_N(chunk_in_swap)
#define chunk_mmap_boot JEMALLOC_N(chunk_mmap_boot)
#define chunk_swap_boot JEMALLOC_N(chunk_swap_boot)
#define chunk_swap_enable JEMALLOC_N(chunk_swap_enable)
#define chunk_npages JEMALLOC_N(chunk_npages)
#define chunks_mtx JEMALLOC_N(chunks_mtx)
#define chunks_rtree JEMALLOC_N(chunks_rtree)
#define chunksize JEMALLOC_N(chunksize)
#define chunksize_mask JEMALLOC_N(chunksize_mask)
#define ckh_bucket_search JEMALLOC_N(ckh_bucket_search)
#define ckh_count JEMALLOC_N(ckh_count)
#define ckh_delete JEMALLOC_N(ckh_delete)
......@@ -77,7 +125,6 @@
#define ckh_string_keycomp JEMALLOC_N(ckh_string_keycomp)
#define ckh_try_bucket_insert JEMALLOC_N(ckh_try_bucket_insert)
#define ckh_try_insert JEMALLOC_N(ckh_try_insert)
#define create_zone JEMALLOC_N(create_zone)
#define ctl_boot JEMALLOC_N(ctl_boot)
#define ctl_bymib JEMALLOC_N(ctl_bymib)
#define ctl_byname JEMALLOC_N(ctl_byname)
......@@ -115,10 +162,17 @@
#define extent_tree_szad_reverse_iter_start JEMALLOC_N(extent_tree_szad_reverse_iter_start)
#define extent_tree_szad_search JEMALLOC_N(extent_tree_szad_search)
#define hash JEMALLOC_N(hash)
#define huge_allocated JEMALLOC_N(huge_allocated)
#define huge_boot JEMALLOC_N(huge_boot)
#define huge_dalloc JEMALLOC_N(huge_dalloc)
#define huge_malloc JEMALLOC_N(huge_malloc)
#define huge_mtx JEMALLOC_N(huge_mtx)
#define huge_ndalloc JEMALLOC_N(huge_ndalloc)
#define huge_nmalloc JEMALLOC_N(huge_nmalloc)
#define huge_palloc JEMALLOC_N(huge_palloc)
#define huge_postfork_child JEMALLOC_N(huge_postfork_child)
#define huge_postfork_parent JEMALLOC_N(huge_postfork_parent)
#define huge_prefork JEMALLOC_N(huge_prefork)
#define huge_prof_ctx_get JEMALLOC_N(huge_prof_ctx_get)
#define huge_prof_ctx_set JEMALLOC_N(huge_prof_ctx_set)
#define huge_ralloc JEMALLOC_N(huge_ralloc)
......@@ -129,21 +183,63 @@
#define idalloc JEMALLOC_N(idalloc)
#define imalloc JEMALLOC_N(imalloc)
#define ipalloc JEMALLOC_N(ipalloc)
#define iqalloc JEMALLOC_N(iqalloc)
#define iralloc JEMALLOC_N(iralloc)
#define isalloc JEMALLOC_N(isalloc)
#define isthreaded JEMALLOC_N(isthreaded)
#define ivsalloc JEMALLOC_N(ivsalloc)
#define jemalloc_darwin_init JEMALLOC_N(jemalloc_darwin_init)
#define jemalloc_postfork JEMALLOC_N(jemalloc_postfork)
#define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child)
#define jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent)
#define jemalloc_prefork JEMALLOC_N(jemalloc_prefork)
#define malloc_cprintf JEMALLOC_N(malloc_cprintf)
#define malloc_mutex_destroy JEMALLOC_N(malloc_mutex_destroy)
#define malloc_mutex_init JEMALLOC_N(malloc_mutex_init)
#define malloc_mutex_lock JEMALLOC_N(malloc_mutex_lock)
#define malloc_mutex_trylock JEMALLOC_N(malloc_mutex_trylock)
#define malloc_mutex_postfork_child JEMALLOC_N(malloc_mutex_postfork_child)
#define malloc_mutex_postfork_parent JEMALLOC_N(malloc_mutex_postfork_parent)
#define malloc_mutex_prefork JEMALLOC_N(malloc_mutex_prefork)
#define malloc_mutex_unlock JEMALLOC_N(malloc_mutex_unlock)
#define malloc_printf JEMALLOC_N(malloc_printf)
#define malloc_snprintf JEMALLOC_N(malloc_snprintf)
#define malloc_strtoumax JEMALLOC_N(malloc_strtoumax)
#define malloc_tsd_boot JEMALLOC_N(malloc_tsd_boot)
#define malloc_tsd_cleanup_register JEMALLOC_N(malloc_tsd_cleanup_register)
#define malloc_tsd_dalloc JEMALLOC_N(malloc_tsd_dalloc)
#define malloc_tsd_malloc JEMALLOC_N(malloc_tsd_malloc)
#define malloc_tsd_no_cleanup JEMALLOC_N(malloc_tsd_no_cleanup)
#define malloc_vcprintf JEMALLOC_N(malloc_vcprintf)
#define malloc_vsnprintf JEMALLOC_N(malloc_vsnprintf)
#define malloc_write JEMALLOC_N(malloc_write)
#define map_bias JEMALLOC_N(map_bias)
#define mb_write JEMALLOC_N(mb_write)
#define mutex_boot JEMALLOC_N(mutex_boot)
#define narenas JEMALLOC_N(narenas)
#define ncpus JEMALLOC_N(ncpus)
#define nhbins JEMALLOC_N(nhbins)
#define opt_abort JEMALLOC_N(opt_abort)
#define opt_junk JEMALLOC_N(opt_junk)
#define opt_lg_chunk JEMALLOC_N(opt_lg_chunk)
#define opt_lg_dirty_mult JEMALLOC_N(opt_lg_dirty_mult)
#define opt_lg_prof_interval JEMALLOC_N(opt_lg_prof_interval)
#define opt_lg_prof_sample JEMALLOC_N(opt_lg_prof_sample)
#define opt_lg_tcache_max JEMALLOC_N(opt_lg_tcache_max)
#define opt_narenas JEMALLOC_N(opt_narenas)
#define opt_prof JEMALLOC_N(opt_prof)
#define opt_prof_accum JEMALLOC_N(opt_prof_accum)
#define opt_prof_active JEMALLOC_N(opt_prof_active)
#define opt_prof_final JEMALLOC_N(opt_prof_final)
#define opt_prof_gdump JEMALLOC_N(opt_prof_gdump)
#define opt_prof_leak JEMALLOC_N(opt_prof_leak)
#define opt_prof_prefix JEMALLOC_N(opt_prof_prefix)
#define opt_quarantine JEMALLOC_N(opt_quarantine)
#define opt_redzone JEMALLOC_N(opt_redzone)
#define opt_stats_print JEMALLOC_N(opt_stats_print)
#define opt_tcache JEMALLOC_N(opt_tcache)
#define opt_utrace JEMALLOC_N(opt_utrace)
#define opt_valgrind JEMALLOC_N(opt_valgrind)
#define opt_xmalloc JEMALLOC_N(opt_xmalloc)
#define opt_zero JEMALLOC_N(opt_zero)
#define p2rz JEMALLOC_N(p2rz)
#define pages_purge JEMALLOC_N(pages_purge)
#define pow2_ceil JEMALLOC_N(pow2_ceil)
#define prof_backtrace JEMALLOC_N(prof_backtrace)
#define prof_boot0 JEMALLOC_N(prof_boot0)
......@@ -154,14 +250,31 @@
#define prof_free JEMALLOC_N(prof_free)
#define prof_gdump JEMALLOC_N(prof_gdump)
#define prof_idump JEMALLOC_N(prof_idump)
#define prof_interval JEMALLOC_N(prof_interval)
#define prof_lookup JEMALLOC_N(prof_lookup)
#define prof_malloc JEMALLOC_N(prof_malloc)
#define prof_mdump JEMALLOC_N(prof_mdump)
#define prof_promote JEMALLOC_N(prof_promote)
#define prof_realloc JEMALLOC_N(prof_realloc)
#define prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update)
#define prof_sample_threshold_update JEMALLOC_N(prof_sample_threshold_update)
#define prof_tdata_booted JEMALLOC_N(prof_tdata_booted)
#define prof_tdata_cleanup JEMALLOC_N(prof_tdata_cleanup)
#define prof_tdata_get JEMALLOC_N(prof_tdata_get)
#define prof_tdata_init JEMALLOC_N(prof_tdata_init)
#define pthread_create JEMALLOC_N(pthread_create)
#define prof_tdata_initialized JEMALLOC_N(prof_tdata_initialized)
#define prof_tdata_tls JEMALLOC_N(prof_tdata_tls)
#define prof_tdata_tsd_boot JEMALLOC_N(prof_tdata_tsd_boot)
#define prof_tdata_tsd_cleanup_wrapper JEMALLOC_N(prof_tdata_tsd_cleanup_wrapper)
#define prof_tdata_tsd_get JEMALLOC_N(prof_tdata_tsd_get)
#define prof_tdata_tsd_set JEMALLOC_N(prof_tdata_tsd_set)
#define quarantine JEMALLOC_N(quarantine)
#define quarantine_boot JEMALLOC_N(quarantine_boot)
#define quarantine_tsd_boot JEMALLOC_N(quarantine_tsd_boot)
#define quarantine_tsd_cleanup_wrapper JEMALLOC_N(quarantine_tsd_cleanup_wrapper)
#define quarantine_tsd_get JEMALLOC_N(quarantine_tsd_get)
#define quarantine_tsd_set JEMALLOC_N(quarantine_tsd_set)
#define register_zone JEMALLOC_N(register_zone)
#define rtree_get JEMALLOC_N(rtree_get)
#define rtree_get_locked JEMALLOC_N(rtree_get_locked)
#define rtree_new JEMALLOC_N(rtree_new)
......@@ -171,25 +284,56 @@
#define stats_arenas_i_bins_j_index JEMALLOC_N(stats_arenas_i_bins_j_index)
#define stats_arenas_i_index JEMALLOC_N(stats_arenas_i_index)
#define stats_arenas_i_lruns_j_index JEMALLOC_N(stats_arenas_i_lruns_j_index)
#define stats_cactive JEMALLOC_N(stats_cactive)
#define stats_cactive_add JEMALLOC_N(stats_cactive_add)
#define stats_cactive_get JEMALLOC_N(stats_cactive_get)
#define stats_cactive_sub JEMALLOC_N(stats_cactive_sub)
#define stats_chunks JEMALLOC_N(stats_chunks)
#define stats_print JEMALLOC_N(stats_print)
#define szone2ozone JEMALLOC_N(szone2ozone)
#define tcache_alloc_easy JEMALLOC_N(tcache_alloc_easy)
#define tcache_alloc_large JEMALLOC_N(tcache_alloc_large)
#define tcache_alloc_small JEMALLOC_N(tcache_alloc_small)
#define tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard)
#define tcache_arena_associate JEMALLOC_N(tcache_arena_associate)
#define tcache_arena_dissociate JEMALLOC_N(tcache_arena_dissociate)
#define tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large)
#define tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small)
#define tcache_boot JEMALLOC_N(tcache_boot)
#define tcache_bin_info JEMALLOC_N(tcache_bin_info)
#define tcache_boot0 JEMALLOC_N(tcache_boot0)
#define tcache_boot1 JEMALLOC_N(tcache_boot1)
#define tcache_booted JEMALLOC_N(tcache_booted)
#define tcache_create JEMALLOC_N(tcache_create)
#define tcache_dalloc_large JEMALLOC_N(tcache_dalloc_large)
#define tcache_dalloc_small JEMALLOC_N(tcache_dalloc_small)
#define tcache_destroy JEMALLOC_N(tcache_destroy)
#define tcache_enabled_booted JEMALLOC_N(tcache_enabled_booted)
#define tcache_enabled_get JEMALLOC_N(tcache_enabled_get)
#define tcache_enabled_initialized JEMALLOC_N(tcache_enabled_initialized)
#define tcache_enabled_set JEMALLOC_N(tcache_enabled_set)
#define tcache_enabled_tls JEMALLOC_N(tcache_enabled_tls)
#define tcache_enabled_tsd_boot JEMALLOC_N(tcache_enabled_tsd_boot)
#define tcache_enabled_tsd_cleanup_wrapper JEMALLOC_N(tcache_enabled_tsd_cleanup_wrapper)
#define tcache_enabled_tsd_get JEMALLOC_N(tcache_enabled_tsd_get)
#define tcache_enabled_tsd_set JEMALLOC_N(tcache_enabled_tsd_set)
#define tcache_event JEMALLOC_N(tcache_event)
#define tcache_event_hard JEMALLOC_N(tcache_event_hard)
#define tcache_flush JEMALLOC_N(tcache_flush)
#define tcache_get JEMALLOC_N(tcache_get)
#define tcache_initialized JEMALLOC_N(tcache_initialized)
#define tcache_maxclass JEMALLOC_N(tcache_maxclass)
#define tcache_salloc JEMALLOC_N(tcache_salloc)
#define tcache_stats_merge JEMALLOC_N(tcache_stats_merge)
#define thread_allocated_get JEMALLOC_N(thread_allocated_get)
#define thread_allocated_get_hard JEMALLOC_N(thread_allocated_get_hard)
#define u2s JEMALLOC_N(u2s)
#define tcache_thread_cleanup JEMALLOC_N(tcache_thread_cleanup)
#define tcache_tls JEMALLOC_N(tcache_tls)
#define tcache_tsd_boot JEMALLOC_N(tcache_tsd_boot)
#define tcache_tsd_cleanup_wrapper JEMALLOC_N(tcache_tsd_cleanup_wrapper)
#define tcache_tsd_get JEMALLOC_N(tcache_tsd_get)
#define tcache_tsd_set JEMALLOC_N(tcache_tsd_set)
#define thread_allocated_booted JEMALLOC_N(thread_allocated_booted)
#define thread_allocated_initialized JEMALLOC_N(thread_allocated_initialized)
#define thread_allocated_tls JEMALLOC_N(thread_allocated_tls)
#define thread_allocated_tsd_boot JEMALLOC_N(thread_allocated_tsd_boot)
#define thread_allocated_tsd_cleanup_wrapper JEMALLOC_N(thread_allocated_tsd_cleanup_wrapper)
#define thread_allocated_tsd_get JEMALLOC_N(thread_allocated_tsd_get)
#define thread_allocated_tsd_set JEMALLOC_N(thread_allocated_tsd_set)
#define u2rz JEMALLOC_N(u2rz)
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/*
* Simple linear congruential pseudo-random number generator:
*
* prng(y) = (a*x + c) % m
*
* where the following constants ensure maximal period:
*
* a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4.
* c == Odd number (relatively prime to 2^n).
* m == 2^32
*
* See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints.
*
* This choice of m has the disadvantage that the quality of the bits is
* proportional to bit position. For example. the lowest bit has a cycle of 2,
* the next has a cycle of 4, etc. For this reason, we prefer to use the upper
* bits.
*
* Macro parameters:
* uint32_t r : Result.
* unsigned lg_range : (0..32], number of least significant bits to return.
* uint32_t state : Seed value.
* const uint32_t a, c : See above discussion.
*/
#define prng32(r, lg_range, state, a, c) do { \
assert(lg_range > 0); \
assert(lg_range <= 32); \
\
r = (state * (a)) + (c); \
state = r; \
r >>= (32 - lg_range); \
} while (false)
/* Same as prng32(), but 64 bits of pseudo-randomness, using uint64_t. */
#define prng64(r, lg_range, state, a, c) do { \
assert(lg_range > 0); \
assert(lg_range <= 64); \
\
r = (state * (a)) + (c); \
state = r; \
r >>= (64 - lg_range); \
} while (false)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
#ifdef JEMALLOC_PROF
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
......@@ -10,28 +9,41 @@ typedef struct prof_tdata_s prof_tdata_t;
/* Option defaults. */
#define PROF_PREFIX_DEFAULT "jeprof"
#define LG_PROF_BT_MAX_DEFAULT 7
#define LG_PROF_SAMPLE_DEFAULT 0
#define LG_PROF_SAMPLE_DEFAULT 19
#define LG_PROF_INTERVAL_DEFAULT -1
#define LG_PROF_TCMAX_DEFAULT -1
/*
* Hard limit on stack backtrace depth. Note that the version of
* prof_backtrace() that is based on __builtin_return_address() necessarily has
* a hard-coded number of backtrace frame handlers.
* Hard limit on stack backtrace depth. The version of prof_backtrace() that
* is based on __builtin_return_address() necessarily has a hard-coded number
* of backtrace frame handlers, and should be kept in sync with this setting.
*/
#if (defined(JEMALLOC_PROF_LIBGCC) || defined(JEMALLOC_PROF_LIBUNWIND))
# define LG_PROF_BT_MAX ((ZU(1) << (LG_SIZEOF_PTR+3)) - 1)
#else
# define LG_PROF_BT_MAX 7 /* >= LG_PROF_BT_MAX_DEFAULT */
#endif
#define PROF_BT_MAX (1U << LG_PROF_BT_MAX)
#define PROF_BT_MAX 128
/* Maximum number of backtraces to store in each per thread LRU cache. */
#define PROF_TCMAX 1024
/* Initial hash table size. */
#define PROF_CKH_MINITEMS 64
#define PROF_CKH_MINITEMS 64
/* Size of memory buffer to use when writing dump files. */
#define PROF_DUMP_BUF_SIZE 65536
#define PROF_DUMP_BUFSIZE 65536
/* Size of stack-allocated buffer used by prof_printf(). */
#define PROF_PRINTF_BUFSIZE 128
/*
* Number of mutexes shared among all ctx's. No space is allocated for these
* unless profiling is enabled, so it's okay to over-provision.
*/
#define PROF_NCTX_LOCKS 1024
/*
* prof_tdata pointers close to NULL are used to encode state information that
* is used for cleaning up during thread shutdown.
*/
#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1)
#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2)
#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
......@@ -109,8 +121,18 @@ struct prof_ctx_s {
/* Associated backtrace. */
prof_bt_t *bt;
/* Protects cnt_merged and cnts_ql. */
malloc_mutex_t lock;
/* Protects nlimbo, cnt_merged, and cnts_ql. */
malloc_mutex_t *lock;
/*
* Number of threads that currently cause this ctx to be in a state of
* limbo due to one of:
* - Initializing per thread counters associated with this ctx.
* - Preparing to destroy this ctx.
* nlimbo must be 1 (single destroyer) in order to safely destroy the
* ctx.
*/
unsigned nlimbo;
/* Temporary storage for summation during dump. */
prof_cnt_t cnt_summed;
......@@ -145,9 +167,14 @@ struct prof_tdata_s {
void **vec;
/* Sampling state. */
uint64_t prn_state;
uint64_t prng_state;
uint64_t threshold;
uint64_t accum;
/* State used to avoid dumping while operating on prof internals. */
bool enq;
bool enq_idump;
bool enq_gdump;
};
#endif /* JEMALLOC_H_STRUCTS */
......@@ -162,13 +189,12 @@ extern bool opt_prof;
* to notice state changes.
*/
extern bool opt_prof_active;
extern size_t opt_lg_prof_bt_max; /* Maximum backtrace depth. */
extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */
extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */
extern bool opt_prof_gdump; /* High-water memory dumping. */
extern bool opt_prof_final; /* Final profile dumping. */
extern bool opt_prof_leak; /* Dump leak summary at exit. */
extern bool opt_prof_accum; /* Report cumulative bytes. */
extern ssize_t opt_lg_prof_tcmax; /* lg(max per thread bactrace cache) */
extern char opt_prof_prefix[PATH_MAX + 1];
/*
......@@ -186,39 +212,14 @@ extern uint64_t prof_interval;
*/
extern bool prof_promote;
/* (1U << opt_lg_prof_bt_max). */
extern unsigned prof_bt_max;
/* Thread-specific backtrace cache, used to reduce bt2ctx contention. */
#ifndef NO_TLS
extern __thread prof_tdata_t *prof_tdata_tls
JEMALLOC_ATTR(tls_model("initial-exec"));
# define PROF_TCACHE_GET() prof_tdata_tls
# define PROF_TCACHE_SET(v) do { \
prof_tdata_tls = (v); \
pthread_setspecific(prof_tdata_tsd, (void *)(v)); \
} while (0)
#else
# define PROF_TCACHE_GET() \
((prof_tdata_t *)pthread_getspecific(prof_tdata_tsd))
# define PROF_TCACHE_SET(v) do { \
pthread_setspecific(prof_tdata_tsd, (void *)(v)); \
} while (0)
#endif
/*
* Same contents as b2cnt_tls, but initialized such that the TSD destructor is
* called when a thread exits, so that prof_tdata_tls contents can be merged,
* unlinked, and deallocated.
*/
extern pthread_key_t prof_tdata_tsd;
void bt_init(prof_bt_t *bt, void **vec);
void prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max);
void prof_backtrace(prof_bt_t *bt, unsigned nignore);
prof_thr_cnt_t *prof_lookup(prof_bt_t *bt);
void prof_idump(void);
bool prof_mdump(const char *filename);
void prof_gdump(void);
prof_tdata_t *prof_tdata_init(void);
void prof_tdata_cleanup(void *arg);
void prof_boot0(void);
void prof_boot1(void);
bool prof_boot2(void);
......@@ -233,13 +234,13 @@ bool prof_boot2(void);
\
assert(size == s2u(size)); \
\
prof_tdata = PROF_TCACHE_GET(); \
if (prof_tdata == NULL) { \
prof_tdata = prof_tdata_init(); \
if (prof_tdata == NULL) { \
prof_tdata = prof_tdata_get(); \
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) { \
if (prof_tdata != NULL) \
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
else \
ret = NULL; \
break; \
} \
break; \
} \
\
if (opt_prof_active == false) { \
......@@ -249,13 +250,13 @@ bool prof_boot2(void);
/* Don't bother with sampling logic, since sampling */\
/* interval is 1. */\
bt_init(&bt, prof_tdata->vec); \
prof_backtrace(&bt, nignore, prof_bt_max); \
prof_backtrace(&bt, nignore); \
ret = prof_lookup(&bt); \
} else { \
if (prof_tdata->threshold == 0) { \
/* Initialize. Seed the prng differently for */\
/* each thread. */\
prof_tdata->prn_state = \
prof_tdata->prng_state = \
(uint64_t)(uintptr_t)&size; \
prof_sample_threshold_update(prof_tdata); \
} \
......@@ -272,7 +273,7 @@ bool prof_boot2(void);
if (size >= prof_tdata->threshold - \
prof_tdata->accum) { \
bt_init(&bt, prof_tdata->vec); \
prof_backtrace(&bt, nignore, prof_bt_max); \
prof_backtrace(&bt, nignore); \
ret = prof_lookup(&bt); \
} else \
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
......@@ -280,6 +281,9 @@ bool prof_boot2(void);
} while (0)
#ifndef JEMALLOC_ENABLE_INLINE
malloc_tsd_protos(JEMALLOC_ATTR(unused), prof_tdata, prof_tdata_t *)
prof_tdata_t *prof_tdata_get(void);
void prof_sample_threshold_update(prof_tdata_t *prof_tdata);
prof_ctx_t *prof_ctx_get(const void *ptr);
void prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
......@@ -291,12 +295,35 @@ void prof_free(const void *ptr, size_t size);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_))
/* Thread-specific backtrace cache, used to reduce bt2ctx contention. */
malloc_tsd_externs(prof_tdata, prof_tdata_t *)
malloc_tsd_funcs(JEMALLOC_INLINE, prof_tdata, prof_tdata_t *, NULL,
prof_tdata_cleanup)
JEMALLOC_INLINE prof_tdata_t *
prof_tdata_get(void)
{
prof_tdata_t *prof_tdata;
cassert(config_prof);
prof_tdata = *prof_tdata_tsd_get();
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) {
if (prof_tdata == NULL)
prof_tdata = prof_tdata_init();
}
return (prof_tdata);
}
JEMALLOC_INLINE void
prof_sample_threshold_update(prof_tdata_t *prof_tdata)
{
uint64_t r;
double u;
cassert(config_prof);
/*
* Compute sample threshold as a geometrically distributed random
* variable with mean (2^opt_lg_prof_sample).
......@@ -315,8 +342,8 @@ prof_sample_threshold_update(prof_tdata_t *prof_tdata)
* pp 500
* (http://cg.scs.carleton.ca/~luc/rnbookindex.html)
*/
prn64(r, 53, prof_tdata->prn_state,
(uint64_t)6364136223846793005LLU, (uint64_t)1442695040888963407LLU);
prng64(r, 53, prof_tdata->prng_state,
UINT64_C(6364136223846793005), UINT64_C(1442695040888963407));
u = (double)r * (1.0/9007199254740992.0L);
prof_tdata->threshold = (uint64_t)(log(u) /
log(1.0 - (1.0 / (double)((uint64_t)1U << opt_lg_prof_sample))))
......@@ -329,13 +356,12 @@ prof_ctx_get(const void *ptr)
prof_ctx_t *ret;
arena_chunk_t *chunk;
cassert(config_prof);
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr) {
/* Region. */
dassert(chunk->arena->magic == ARENA_MAGIC);
ret = arena_prof_ctx_get(ptr);
} else
ret = huge_prof_ctx_get(ptr);
......@@ -348,13 +374,12 @@ prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
{
arena_chunk_t *chunk;
cassert(config_prof);
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr) {
/* Region. */
dassert(chunk->arena->magic == ARENA_MAGIC);
arena_prof_ctx_set(ptr, ctx);
} else
huge_prof_ctx_set(ptr, ctx);
......@@ -365,11 +390,13 @@ prof_sample_accum_update(size_t size)
{
prof_tdata_t *prof_tdata;
cassert(config_prof);
/* Sampling logic is unnecessary if the interval is 1. */
assert(opt_lg_prof_sample != 0);
prof_tdata = PROF_TCACHE_GET();
assert(prof_tdata != NULL);
prof_tdata = *prof_tdata_tsd_get();
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
return (true);
/* Take care to avoid integer overflow. */
if (size >= prof_tdata->threshold - prof_tdata->accum) {
......@@ -391,8 +418,9 @@ JEMALLOC_INLINE void
prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt)
{
cassert(config_prof);
assert(ptr != NULL);
assert(size == isalloc(ptr));
assert(size == isalloc(ptr, true));
if (opt_lg_prof_sample != 0) {
if (prof_sample_accum_update(size)) {
......@@ -437,10 +465,11 @@ prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
{
prof_thr_cnt_t *told_cnt;
cassert(config_prof);
assert(ptr != NULL || (uintptr_t)cnt <= (uintptr_t)1U);
if (ptr != NULL) {
assert(size == isalloc(ptr));
assert(size == isalloc(ptr, true));
if (opt_lg_prof_sample != 0) {
if (prof_sample_accum_update(size)) {
/*
......@@ -463,10 +492,10 @@ prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
* It's too late to propagate OOM for this realloc(),
* so operate directly on old_cnt->ctx->cnt_merged.
*/
malloc_mutex_lock(&old_ctx->lock);
malloc_mutex_lock(old_ctx->lock);
old_ctx->cnt_merged.curobjs--;
old_ctx->cnt_merged.curbytes -= old_size;
malloc_mutex_unlock(&old_ctx->lock);
malloc_mutex_unlock(old_ctx->lock);
told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
}
} else
......@@ -510,9 +539,12 @@ prof_free(const void *ptr, size_t size)
{
prof_ctx_t *ctx = prof_ctx_get(ptr);
cassert(config_prof);
if ((uintptr_t)ctx > (uintptr_t)1) {
assert(size == isalloc(ptr));
prof_thr_cnt_t *tcnt = prof_lookup(ctx->bt);
prof_thr_cnt_t *tcnt;
assert(size == isalloc(ptr, true));
tcnt = prof_lookup(ctx->bt);
if (tcnt != NULL) {
tcnt->epoch++;
......@@ -533,10 +565,10 @@ prof_free(const void *ptr, size_t size)
* OOM during free() cannot be propagated, so operate
* directly on cnt->ctx->cnt_merged.
*/
malloc_mutex_lock(&ctx->lock);
malloc_mutex_lock(ctx->lock);
ctx->cnt_merged.curobjs--;
ctx->cnt_merged.curbytes -= size;
malloc_mutex_unlock(&ctx->lock);
malloc_mutex_unlock(ctx->lock);
}
}
}
......@@ -544,4 +576,3 @@ prof_free(const void *ptr, size_t size)
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
#endif /* JEMALLOC_PROF */
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/* Default per thread quarantine size if valgrind is enabled. */
#define JEMALLOC_VALGRIND_QUARANTINE_DEFAULT (ZU(1) << 24)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void quarantine(void *ptr);
bool quarantine_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
......@@ -223,88 +223,88 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
* The following API is generated:
*
* static void
* ex_new(ex_t *extree);
* ex_new(ex_t *tree);
* Description: Initialize a red-black tree structure.
* Args:
* extree: Pointer to an uninitialized red-black tree object.
* tree: Pointer to an uninitialized red-black tree object.
*
* static ex_node_t *
* ex_first(ex_t *extree);
* ex_first(ex_t *tree);
* static ex_node_t *
* ex_last(ex_t *extree);
* Description: Get the first/last node in extree.
* ex_last(ex_t *tree);
* Description: Get the first/last node in tree.
* Args:
* extree: Pointer to an initialized red-black tree object.
* Ret: First/last node in extree, or NULL if extree is empty.
* tree: Pointer to an initialized red-black tree object.
* Ret: First/last node in tree, or NULL if tree is empty.
*
* static ex_node_t *
* ex_next(ex_t *extree, ex_node_t *node);
* ex_next(ex_t *tree, ex_node_t *node);
* static ex_node_t *
* ex_prev(ex_t *extree, ex_node_t *node);
* ex_prev(ex_t *tree, ex_node_t *node);
* Description: Get node's successor/predecessor.
* Args:
* extree: Pointer to an initialized red-black tree object.
* node : A node in extree.
* Ret: node's successor/predecessor in extree, or NULL if node is
* tree: Pointer to an initialized red-black tree object.
* node: A node in tree.
* Ret: node's successor/predecessor in tree, or NULL if node is
* last/first.
*
* static ex_node_t *
* ex_search(ex_t *extree, ex_node_t *key);
* ex_search(ex_t *tree, ex_node_t *key);
* Description: Search for node that matches key.
* Args:
* extree: Pointer to an initialized red-black tree object.
* key : Search key.
* Ret: Node in extree that matches key, or NULL if no match.
* tree: Pointer to an initialized red-black tree object.
* key : Search key.
* Ret: Node in tree that matches key, or NULL if no match.
*
* static ex_node_t *
* ex_nsearch(ex_t *extree, ex_node_t *key);
* ex_nsearch(ex_t *tree, ex_node_t *key);
* static ex_node_t *
* ex_psearch(ex_t *extree, ex_node_t *key);
* ex_psearch(ex_t *tree, ex_node_t *key);
* Description: Search for node that matches key. If no match is found,
* return what would be key's successor/predecessor, were
* key in extree.
* key in tree.
* Args:
* extree: Pointer to an initialized red-black tree object.
* key : Search key.
* Ret: Node in extree that matches key, or if no match, hypothetical
* node's successor/predecessor (NULL if no successor/predecessor).
* tree: Pointer to an initialized red-black tree object.
* key : Search key.
* Ret: Node in tree that matches key, or if no match, hypothetical node's
* successor/predecessor (NULL if no successor/predecessor).
*
* static void
* ex_insert(ex_t *extree, ex_node_t *node);
* Description: Insert node into extree.
* ex_insert(ex_t *tree, ex_node_t *node);
* Description: Insert node into tree.
* Args:
* extree: Pointer to an initialized red-black tree object.
* node : Node to be inserted into extree.
* tree: Pointer to an initialized red-black tree object.
* node: Node to be inserted into tree.
*
* static void
* ex_remove(ex_t *extree, ex_node_t *node);
* Description: Remove node from extree.
* ex_remove(ex_t *tree, ex_node_t *node);
* Description: Remove node from tree.
* Args:
* extree: Pointer to an initialized red-black tree object.
* node : Node in extree to be removed.
* tree: Pointer to an initialized red-black tree object.
* node: Node in tree to be removed.
*
* static ex_node_t *
* ex_iter(ex_t *extree, ex_node_t *start, ex_node_t *(*cb)(ex_t *,
* ex_iter(ex_t *tree, ex_node_t *start, ex_node_t *(*cb)(ex_t *,
* ex_node_t *, void *), void *arg);
* static ex_node_t *
* ex_reverse_iter(ex_t *extree, ex_node_t *start, ex_node *(*cb)(ex_t *,
* ex_reverse_iter(ex_t *tree, ex_node_t *start, ex_node *(*cb)(ex_t *,
* ex_node_t *, void *), void *arg);
* Description: Iterate forward/backward over extree, starting at node.
* If extree is modified, iteration must be immediately
* Description: Iterate forward/backward over tree, starting at node. If
* tree is modified, iteration must be immediately
* terminated by the callback function that causes the
* modification.
* Args:
* extree: Pointer to an initialized red-black tree object.
* start : Node at which to start iteration, or NULL to start at
* first/last node.
* cb : Callback function, which is called for each node during
* iteration. Under normal circumstances the callback function
* should return NULL, which causes iteration to continue. If a
* callback function returns non-NULL, iteration is immediately
* terminated and the non-NULL return value is returned by the
* iterator. This is useful for re-starting iteration after
* modifying extree.
* arg : Opaque pointer passed to cb().
* tree : Pointer to an initialized red-black tree object.
* start: Node at which to start iteration, or NULL to start at
* first/last node.
* cb : Callback function, which is called for each node during
* iteration. Under normal circumstances the callback function
* should return NULL, which causes iteration to continue. If a
* callback function returns non-NULL, iteration is immediately
* terminated and the non-NULL return value is returned by the
* iterator. This is useful for re-starting iteration after
* modifying tree.
* arg : Opaque pointer passed to cb().
* Ret: NULL if iteration completed, or the non-NULL callback return value
* that caused termination of the iteration.
*/
......
#!/bin/sh
# The following limits are chosen such that they cover all supported platforms.
# Range of quanta.
lg_qmin=3
lg_qmax=4
# The range of tiny size classes is [2^lg_tmin..2^(lg_q-1)].
lg_tmin=3
# Range of page sizes.
lg_pmin=12
lg_pmax=16
pow2() {
e=$1
pow2_result=1
while [ ${e} -gt 0 ] ; do
pow2_result=$((${pow2_result} + ${pow2_result}))
e=$((${e} - 1))
done
}
cat <<EOF
/* This file was automatically generated by size_classes.sh. */
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
EOF
lg_q=${lg_qmin}
while [ ${lg_q} -le ${lg_qmax} ] ; do
lg_t=${lg_tmin}
while [ ${lg_t} -le ${lg_q} ] ; do
lg_p=${lg_pmin}
while [ ${lg_p} -le ${lg_pmax} ] ; do
echo "#if (LG_TINY_MIN == ${lg_t} && LG_QUANTUM == ${lg_q} && LG_PAGE == ${lg_p})"
echo "#define SIZE_CLASSES_DEFINED"
pow2 ${lg_q}; q=${pow2_result}
pow2 ${lg_t}; t=${pow2_result}
pow2 ${lg_p}; p=${pow2_result}
bin=0
psz=0
sz=${t}
delta=$((${sz} - ${psz}))
echo "/* SIZE_CLASS(bin, delta, sz) */"
echo "#define SIZE_CLASSES \\"
# Tiny size classes.
while [ ${sz} -lt ${q} ] ; do
echo " SIZE_CLASS(${bin}, ${delta}, ${sz}) \\"
bin=$((${bin} + 1))
psz=${sz}
sz=$((${sz} + ${sz}))
delta=$((${sz} - ${psz}))
done
# Quantum-multiple size classes. For each doubling of sz, as many as 4
# size classes exist. Their spacing is the greater of:
# - q
# - sz/4, where sz is a power of 2
while [ ${sz} -lt ${p} ] ; do
if [ ${sz} -ge $((${q} * 4)) ] ; then
i=$((${sz} / 4))
else
i=${q}
fi
next_2pow=$((${sz} * 2))
while [ ${sz} -lt $next_2pow ] ; do
echo " SIZE_CLASS(${bin}, ${delta}, ${sz}) \\"
bin=$((${bin} + 1))
psz=${sz}
sz=$((${sz} + ${i}))
delta=$((${sz} - ${psz}))
done
done
echo
echo "#define NBINS ${bin}"
echo "#define SMALL_MAXCLASS ${psz}"
echo "#endif"
echo
lg_p=$((${lg_p} + 1))
done
lg_t=$((${lg_t} + 1))
done
lg_q=$((${lg_q} + 1))
done
cat <<EOF
#ifndef SIZE_CLASSES_DEFINED
# error "No size class definitions match configuration"
#endif
#undef SIZE_CLASSES_DEFINED
/*
* The small_size2bin lookup table uses uint8_t to encode each bin index, so we
* cannot support more than 256 small size classes. Further constrain NBINS to
* 255 to support prof_promote, since all small size classes, plus a "not
* small" size class must be stored in 8 bits of arena_chunk_map_t's bits
* field.
*/
#if (NBINS > 255)
# error "Too many small size classes"
#endif
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
EOF
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#define UMAX2S_BUFSIZE 65
#ifdef JEMALLOC_STATS
typedef struct tcache_bin_stats_s tcache_bin_stats_t;
typedef struct malloc_bin_stats_s malloc_bin_stats_t;
typedef struct malloc_large_stats_s malloc_large_stats_t;
typedef struct arena_stats_s arena_stats_t;
#endif
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
typedef struct chunk_stats_s chunk_stats_t;
#endif
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#ifdef JEMALLOC_STATS
#ifdef JEMALLOC_TCACHE
struct tcache_bin_stats_s {
/*
* Number of allocation requests that corresponded to the size of this
......@@ -27,7 +18,6 @@ struct tcache_bin_stats_s {
*/
uint64_t nrequests;
};
#endif
struct malloc_bin_stats_s {
/*
......@@ -52,13 +42,11 @@ struct malloc_bin_stats_s {
*/
uint64_t nrequests;
#ifdef JEMALLOC_TCACHE
/* Number of tcache fills from this bin. */
uint64_t nfills;
/* Number of tcache flushes to this bin. */
uint64_t nflushes;
#endif
/* Total number of runs created for this bin's size class. */
uint64_t nruns;
......@@ -69,9 +57,6 @@ struct malloc_bin_stats_s {
*/
uint64_t reruns;
/* High-water mark for this bin. */
size_t highruns;
/* Current number of runs in this bin. */
size_t curruns;
};
......@@ -93,9 +78,6 @@ struct malloc_large_stats_s {
*/
uint64_t nrequests;
/* High-water mark for this size class. */
size_t highruns;
/* Current number of runs of this size class. */
size_t curruns;
};
......@@ -127,14 +109,10 @@ struct arena_stats_s {
*/
malloc_large_stats_t *lstats;
};
#endif /* JEMALLOC_STATS */
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
struct chunk_stats_s {
# ifdef JEMALLOC_STATS
/* Number of chunks that were allocated. */
uint64_t nchunks;
# endif
/* High-water mark for number of chunks allocated. */
size_t highchunks;
......@@ -146,7 +124,6 @@ struct chunk_stats_s {
*/
size_t curchunks;
};
#endif /* JEMALLOC_STATS */
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
......@@ -154,24 +131,14 @@ struct chunk_stats_s {
extern bool opt_stats_print;
#ifdef JEMALLOC_STATS
extern size_t stats_cactive;
#endif
char *u2s(uint64_t x, unsigned base, char *s);
#ifdef JEMALLOC_STATS
void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque,
const char *format, ...) JEMALLOC_ATTR(format(printf, 3, 4));
void malloc_printf(const char *format, ...)
JEMALLOC_ATTR(format(printf, 1, 2));
#endif
void stats_print(void (*write)(void *, const char *), void *cbopaque,
const char *opts);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifdef JEMALLOC_STATS
#ifndef JEMALLOC_ENABLE_INLINE
size_t stats_cactive_get(void);
......@@ -202,6 +169,5 @@ stats_cactive_sub(size_t size)
}
#endif
#endif /* JEMALLOC_STATS */
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
#ifdef JEMALLOC_TCACHE
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
......@@ -6,6 +5,16 @@ typedef struct tcache_bin_info_s tcache_bin_info_t;
typedef struct tcache_bin_s tcache_bin_t;
typedef struct tcache_s tcache_t;
/*
* tcache pointers close to NULL are used to encode state information that is
* used for two purposes: preventing thread caching on a per thread basis and
* cleaning up during thread shutdown.
*/
#define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1)
#define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2)
#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3)
#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY
/*
* Absolute maximum number of cache slots for each small bin in the thread
* cache. This is an additional constraint beyond that imposed as: twice the
......@@ -22,17 +31,26 @@ typedef struct tcache_s tcache_t;
#define LG_TCACHE_MAXCLASS_DEFAULT 15
/*
* (1U << opt_lg_tcache_gc_sweep) is the approximate number of allocation
* events between full GC sweeps (-1: disabled). Integer rounding may cause
* the actual number to be slightly higher, since GC is performed
* incrementally.
* TCACHE_GC_SWEEP is the approximate number of allocation events between
* full GC sweeps. Integer rounding may cause the actual number to be
* slightly higher, since GC is performed incrementally.
*/
#define LG_TCACHE_GC_SWEEP_DEFAULT 13
#define TCACHE_GC_SWEEP 8192
/* Number of tcache allocation/deallocation events between incremental GCs. */
#define TCACHE_GC_INCR \
((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1))
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
typedef enum {
tcache_enabled_false = 0, /* Enable cast to/from bool. */
tcache_enabled_true = 1,
tcache_enabled_default = 2
} tcache_enabled_t;
/*
* Read-only information associated with each element of tcache_t's tbins array
* is stored separately, mainly to reduce memory usage.
......@@ -42,9 +60,7 @@ struct tcache_bin_info_s {
};
struct tcache_bin_s {
# ifdef JEMALLOC_STATS
tcache_bin_stats_t tstats;
# endif
int low_water; /* Min # cached since last GC. */
unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */
unsigned ncached; /* # of cached objects. */
......@@ -52,12 +68,8 @@ struct tcache_bin_s {
};
struct tcache_s {
# ifdef JEMALLOC_STATS
ql_elm(tcache_t) link; /* Used for aggregating stats. */
# endif
# ifdef JEMALLOC_PROF
uint64_t prof_accumbytes;/* Cleared after arena_prof_accum() */
# endif
arena_t *arena; /* This thread's arena. */
unsigned ev_cnt; /* Event count since incremental GC. */
unsigned next_gc_bin; /* Next bin to GC. */
......@@ -76,29 +88,11 @@ struct tcache_s {
extern bool opt_tcache;
extern ssize_t opt_lg_tcache_max;
extern ssize_t opt_lg_tcache_gc_sweep;
extern tcache_bin_info_t *tcache_bin_info;
/* Map of thread-specific caches. */
#ifndef NO_TLS
extern __thread tcache_t *tcache_tls
JEMALLOC_ATTR(tls_model("initial-exec"));
# define TCACHE_GET() tcache_tls
# define TCACHE_SET(v) do { \
tcache_tls = (tcache_t *)(v); \
pthread_setspecific(tcache_tsd, (void *)(v)); \
} while (0)
#else
# define TCACHE_GET() ((tcache_t *)pthread_getspecific(tcache_tsd))
# define TCACHE_SET(v) do { \
pthread_setspecific(tcache_tsd, (void *)(v)); \
} while (0)
#endif
extern pthread_key_t tcache_tsd;
/*
* Number of tcache bins. There are nbins small-object bins, plus 0 or more
* Number of tcache bins. There are NBINS small-object bins, plus 0 or more
* large-object bins.
*/
extern size_t nhbins;
......@@ -106,68 +100,159 @@ extern size_t nhbins;
/* Maximum cached size class. */
extern size_t tcache_maxclass;
/* Number of tcache allocation/deallocation events between incremental GCs. */
extern unsigned tcache_gc_incr;
void tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
, tcache_t *tcache
#endif
);
void tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
, tcache_t *tcache
#endif
);
tcache_t *tcache_create(arena_t *arena);
size_t tcache_salloc(const void *ptr);
void tcache_event_hard(tcache_t *tcache);
void *tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin,
size_t binind);
void tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
tcache_t *tcache);
void tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
tcache_t *tcache);
void tcache_arena_associate(tcache_t *tcache, arena_t *arena);
void tcache_arena_dissociate(tcache_t *tcache);
tcache_t *tcache_create(arena_t *arena);
void tcache_destroy(tcache_t *tcache);
#ifdef JEMALLOC_STATS
void tcache_thread_cleanup(void *arg);
void tcache_stats_merge(tcache_t *tcache, arena_t *arena);
#endif
bool tcache_boot(void);
bool tcache_boot0(void);
bool tcache_boot1(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache, tcache_t *)
malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache_enabled, tcache_enabled_t)
void tcache_event(tcache_t *tcache);
tcache_t *tcache_get(void);
void tcache_flush(void);
bool tcache_enabled_get(void);
tcache_t *tcache_get(bool create);
void tcache_enabled_set(bool enabled);
void *tcache_alloc_easy(tcache_bin_t *tbin);
void *tcache_alloc_small(tcache_t *tcache, size_t size, bool zero);
void *tcache_alloc_large(tcache_t *tcache, size_t size, bool zero);
void tcache_dalloc_small(tcache_t *tcache, void *ptr);
void tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind);
void tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
/* Map of thread-specific caches. */
malloc_tsd_externs(tcache, tcache_t *)
malloc_tsd_funcs(JEMALLOC_INLINE, tcache, tcache_t *, NULL,
tcache_thread_cleanup)
/* Per thread flag that allows thread caches to be disabled. */
malloc_tsd_externs(tcache_enabled, tcache_enabled_t)
malloc_tsd_funcs(JEMALLOC_INLINE, tcache_enabled, tcache_enabled_t,
tcache_enabled_default, malloc_tsd_no_cleanup)
JEMALLOC_INLINE void
tcache_flush(void)
{
tcache_t *tcache;
cassert(config_tcache);
tcache = *tcache_tsd_get();
if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX)
return;
tcache_destroy(tcache);
tcache = NULL;
tcache_tsd_set(&tcache);
}
JEMALLOC_INLINE bool
tcache_enabled_get(void)
{
tcache_enabled_t tcache_enabled;
cassert(config_tcache);
tcache_enabled = *tcache_enabled_tsd_get();
if (tcache_enabled == tcache_enabled_default) {
tcache_enabled = (tcache_enabled_t)opt_tcache;
tcache_enabled_tsd_set(&tcache_enabled);
}
return ((bool)tcache_enabled);
}
JEMALLOC_INLINE void
tcache_enabled_set(bool enabled)
{
tcache_enabled_t tcache_enabled;
tcache_t *tcache;
cassert(config_tcache);
tcache_enabled = (tcache_enabled_t)enabled;
tcache_enabled_tsd_set(&tcache_enabled);
tcache = *tcache_tsd_get();
if (enabled) {
if (tcache == TCACHE_STATE_DISABLED) {
tcache = NULL;
tcache_tsd_set(&tcache);
}
} else /* disabled */ {
if (tcache > TCACHE_STATE_MAX) {
tcache_destroy(tcache);
tcache = NULL;
}
if (tcache == NULL) {
tcache = TCACHE_STATE_DISABLED;
tcache_tsd_set(&tcache);
}
}
}
JEMALLOC_INLINE tcache_t *
tcache_get(void)
tcache_get(bool create)
{
tcache_t *tcache;
if ((isthreaded & opt_tcache) == false)
if (config_tcache == false)
return (NULL);
if (config_lazy_lock && isthreaded == false)
return (NULL);
tcache = TCACHE_GET();
if ((uintptr_t)tcache <= (uintptr_t)2) {
tcache = *tcache_tsd_get();
if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX) {
if (tcache == TCACHE_STATE_DISABLED)
return (NULL);
if (tcache == NULL) {
tcache = tcache_create(choose_arena());
if (tcache == NULL)
return (NULL);
} else {
if (tcache == (void *)(uintptr_t)1) {
if (create == false) {
/*
* Make a note that an allocator function was
* called after the tcache_thread_cleanup() was
* called.
* Creating a tcache here would cause
* allocation as a side effect of free().
* Ordinarily that would be okay since
* tcache_create() failure is a soft failure
* that doesn't propagate. However, if TLS
* data are freed via free() as in glibc,
* subtle corruption could result from setting
* a TLS variable after its backing memory is
* freed.
*/
TCACHE_SET((uintptr_t)2);
return (NULL);
}
if (tcache_enabled_get() == false) {
tcache_enabled_set(false); /* Memoize. */
return (NULL);
}
return (tcache_create(choose_arena(NULL)));
}
if (tcache == TCACHE_STATE_PURGATORY) {
/*
* Make a note that an allocator function was called
* after tcache_thread_cleanup() was called.
*/
tcache = TCACHE_STATE_REINCARNATED;
tcache_tsd_set(&tcache);
return (NULL);
}
if (tcache == TCACHE_STATE_REINCARNATED)
return (NULL);
not_reached();
}
return (tcache);
......@@ -177,60 +262,13 @@ JEMALLOC_INLINE void
tcache_event(tcache_t *tcache)
{
if (tcache_gc_incr == 0)
if (TCACHE_GC_INCR == 0)
return;
tcache->ev_cnt++;
assert(tcache->ev_cnt <= tcache_gc_incr);
if (tcache->ev_cnt == tcache_gc_incr) {
size_t binind = tcache->next_gc_bin;
tcache_bin_t *tbin = &tcache->tbins[binind];
tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
if (tbin->low_water > 0) {
/*
* Flush (ceiling) 3/4 of the objects below the low
* water mark.
*/
if (binind < nbins) {
tcache_bin_flush_small(tbin, binind,
tbin->ncached - tbin->low_water +
(tbin->low_water >> 2)
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
, tcache
#endif
);
} else {
tcache_bin_flush_large(tbin, binind,
tbin->ncached - tbin->low_water +
(tbin->low_water >> 2)
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
, tcache
#endif
);
}
/*
* Reduce fill count by 2X. Limit lg_fill_div such that
* the fill count is always at least 1.
*/
if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1))
>= 1)
tbin->lg_fill_div++;
} else if (tbin->low_water < 0) {
/*
* Increase fill count by 2X. Make sure lg_fill_div
* stays greater than 0.
*/
if (tbin->lg_fill_div > 1)
tbin->lg_fill_div--;
}
tbin->low_water = tbin->ncached;
tcache->next_gc_bin++;
if (tcache->next_gc_bin == nhbins)
tcache->next_gc_bin = 0;
tcache->ev_cnt = 0;
}
assert(tcache->ev_cnt <= TCACHE_GC_INCR);
if (tcache->ev_cnt == TCACHE_GC_INCR)
tcache_event_hard(tcache);
}
JEMALLOC_INLINE void *
......@@ -257,7 +295,7 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
tcache_bin_t *tbin;
binind = SMALL_SIZE2BIN(size);
assert(binind < nbins);
assert(binind < NBINS);
tbin = &tcache->tbins[binind];
ret = tcache_alloc_easy(tbin);
if (ret == NULL) {
......@@ -265,24 +303,29 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
if (ret == NULL)
return (NULL);
}
assert(arena_salloc(ret) == arena_bin_info[binind].reg_size);
assert(tcache_salloc(ret) == arena_bin_info[binind].reg_size);
if (zero == false) {
#ifdef JEMALLOC_FILL
if (opt_junk)
memset(ret, 0xa5, size);
else if (opt_zero)
memset(ret, 0, size);
#endif
} else
if (config_fill) {
if (opt_junk) {
arena_alloc_junk_small(ret,
&arena_bin_info[binind], false);
} else if (opt_zero)
memset(ret, 0, size);
}
} else {
if (config_fill && opt_junk) {
arena_alloc_junk_small(ret, &arena_bin_info[binind],
true);
}
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
}
#ifdef JEMALLOC_STATS
tbin->tstats.nrequests++;
#endif
#ifdef JEMALLOC_PROF
tcache->prof_accumbytes += arena_bin_info[binind].reg_size;
#endif
if (config_stats)
tbin->tstats.nrequests++;
if (config_prof)
tcache->prof_accumbytes += arena_bin_info[binind].reg_size;
tcache_event(tcache);
return (ret);
}
......@@ -296,7 +339,7 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
size = PAGE_CEILING(size);
assert(size <= tcache_maxclass);
binind = nbins + (size >> PAGE_SHIFT) - 1;
binind = NBINS + (size >> LG_PAGE) - 1;
assert(binind < nhbins);
tbin = &tcache->tbins[binind];
ret = tcache_alloc_easy(tbin);
......@@ -309,28 +352,30 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
if (ret == NULL)
return (NULL);
} else {
#ifdef JEMALLOC_PROF
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
PAGE_SHIFT);
chunk->map[pageind-map_bias].bits &= ~CHUNK_MAP_CLASS_MASK;
#endif
if (config_prof && prof_promote && size == PAGE) {
arena_chunk_t *chunk =
(arena_chunk_t *)CHUNK_ADDR2BASE(ret);
size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
LG_PAGE);
arena_mapbits_large_binind_set(chunk, pageind,
BININD_INVALID);
}
if (zero == false) {
#ifdef JEMALLOC_FILL
if (opt_junk)
memset(ret, 0xa5, size);
else if (opt_zero)
memset(ret, 0, size);
#endif
} else
if (config_fill) {
if (opt_junk)
memset(ret, 0xa5, size);
else if (opt_zero)
memset(ret, 0, size);
}
} else {
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
}
#ifdef JEMALLOC_STATS
tbin->tstats.nrequests++;
#endif
#ifdef JEMALLOC_PROF
tcache->prof_accumbytes += size;
#endif
if (config_stats)
tbin->tstats.nrequests++;
if (config_prof)
tcache->prof_accumbytes += size;
}
tcache_event(tcache);
......@@ -338,45 +383,21 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
}
JEMALLOC_INLINE void
tcache_dalloc_small(tcache_t *tcache, void *ptr)
tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind)
{
arena_t *arena;
arena_chunk_t *chunk;
arena_run_t *run;
arena_bin_t *bin;
tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info;
size_t pageind, binind;
arena_chunk_map_t *mapelm;
assert(arena_salloc(ptr) <= small_maxclass);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
arena = chunk->arena;
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
mapelm = &chunk->map[pageind-map_bias];
run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
(mapelm->bits >> PAGE_SHIFT)) << PAGE_SHIFT));
dassert(run->magic == ARENA_RUN_MAGIC);
bin = run->bin;
binind = ((uintptr_t)bin - (uintptr_t)&arena->bins) /
sizeof(arena_bin_t);
assert(binind < nbins);
#ifdef JEMALLOC_FILL
if (opt_junk)
memset(ptr, 0x5a, arena_bin_info[binind].reg_size);
#endif
assert(tcache_salloc(ptr) <= SMALL_MAXCLASS);
if (config_fill && opt_junk)
arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
tbin = &tcache->tbins[binind];
tbin_info = &tcache_bin_info[binind];
if (tbin->ncached == tbin_info->ncached_max) {
tcache_bin_flush_small(tbin, binind, (tbin_info->ncached_max >>
1)
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
, tcache
#endif
);
1), tcache);
}
assert(tbin->ncached < tbin_info->ncached_max);
tbin->avail[tbin->ncached] = ptr;
......@@ -388,35 +409,24 @@ tcache_dalloc_small(tcache_t *tcache, void *ptr)
JEMALLOC_INLINE void
tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
{
arena_t *arena;
arena_chunk_t *chunk;
size_t pageind, binind;
size_t binind;
tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info;
assert((size & PAGE_MASK) == 0);
assert(arena_salloc(ptr) > small_maxclass);
assert(arena_salloc(ptr) <= tcache_maxclass);
assert(tcache_salloc(ptr) > SMALL_MAXCLASS);
assert(tcache_salloc(ptr) <= tcache_maxclass);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
arena = chunk->arena;
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
binind = nbins + (size >> PAGE_SHIFT) - 1;
binind = NBINS + (size >> LG_PAGE) - 1;
#ifdef JEMALLOC_FILL
if (opt_junk)
if (config_fill && opt_junk)
memset(ptr, 0x5a, size);
#endif
tbin = &tcache->tbins[binind];
tbin_info = &tcache_bin_info[binind];
if (tbin->ncached == tbin_info->ncached_max) {
tcache_bin_flush_large(tbin, binind, (tbin_info->ncached_max >>
1)
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
, tcache
#endif
);
1), tcache);
}
assert(tbin->ncached < tbin_info->ncached_max);
tbin->avail[tbin->ncached] = ptr;
......@@ -428,4 +438,3 @@ tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
#endif /* JEMALLOC_TCACHE */
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/* Maximum number of malloc_tsd users with cleanup functions. */
#define MALLOC_TSD_CLEANUPS_MAX 8
typedef bool (*malloc_tsd_cleanup_t)(void);
/*
* TLS/TSD-agnostic macro-based implementation of thread-specific data. There
* are four macros that support (at least) three use cases: file-private,
* library-private, and library-private inlined. Following is an example
* library-private tsd variable:
*
* In example.h:
* typedef struct {
* int x;
* int y;
* } example_t;
* #define EX_INITIALIZER JEMALLOC_CONCAT({0, 0})
* malloc_tsd_protos(, example, example_t *)
* malloc_tsd_externs(example, example_t *)
* In example.c:
* malloc_tsd_data(, example, example_t *, EX_INITIALIZER)
* malloc_tsd_funcs(, example, example_t *, EX_INITIALIZER,
* example_tsd_cleanup)
*
* The result is a set of generated functions, e.g.:
*
* bool example_tsd_boot(void) {...}
* example_t **example_tsd_get() {...}
* void example_tsd_set(example_t **val) {...}
*
* Note that all of the functions deal in terms of (a_type *) rather than
* (a_type) so that it is possible to support non-pointer types (unlike
* pthreads TSD). example_tsd_cleanup() is passed an (a_type *) pointer that is
* cast to (void *). This means that the cleanup function needs to cast *and*
* dereference the function argument, e.g.:
*
* void
* example_tsd_cleanup(void *arg)
* {
* example_t *example = *(example_t **)arg;
*
* [...]
* if ([want the cleanup function to be called again]) {
* example_tsd_set(&example);
* }
* }
*
* If example_tsd_set() is called within example_tsd_cleanup(), it will be
* called again. This is similar to how pthreads TSD destruction works, except
* that pthreads only calls the cleanup function again if the value was set to
* non-NULL.
*/
/* malloc_tsd_protos(). */
#define malloc_tsd_protos(a_attr, a_name, a_type) \
a_attr bool \
a_name##_tsd_boot(void); \
a_attr a_type * \
a_name##_tsd_get(void); \
a_attr void \
a_name##_tsd_set(a_type *val);
/* malloc_tsd_externs(). */
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
#define malloc_tsd_externs(a_name, a_type) \
extern __thread a_type a_name##_tls; \
extern __thread bool a_name##_initialized; \
extern bool a_name##_booted;
#elif (defined(JEMALLOC_TLS))
#define malloc_tsd_externs(a_name, a_type) \
extern __thread a_type a_name##_tls; \
extern pthread_key_t a_name##_tsd; \
extern bool a_name##_booted;
#elif (defined(_WIN32))
#define malloc_tsd_externs(a_name, a_type) \
extern DWORD a_name##_tsd; \
extern bool a_name##_booted;
#else
#define malloc_tsd_externs(a_name, a_type) \
extern pthread_key_t a_name##_tsd; \
extern bool a_name##_booted;
#endif
/* malloc_tsd_data(). */
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
a_attr __thread a_type JEMALLOC_TLS_MODEL \
a_name##_tls = a_initializer; \
a_attr __thread bool JEMALLOC_TLS_MODEL \
a_name##_initialized = false; \
a_attr bool a_name##_booted = false;
#elif (defined(JEMALLOC_TLS))
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
a_attr __thread a_type JEMALLOC_TLS_MODEL \
a_name##_tls = a_initializer; \
a_attr pthread_key_t a_name##_tsd; \
a_attr bool a_name##_booted = false;
#elif (defined(_WIN32))
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
a_attr DWORD a_name##_tsd; \
a_attr bool a_name##_booted = false;
#else
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
a_attr pthread_key_t a_name##_tsd; \
a_attr bool a_name##_booted = false;
#endif
/* malloc_tsd_funcs(). */
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
a_cleanup) \
/* Initialization/cleanup. */ \
a_attr bool \
a_name##_tsd_cleanup_wrapper(void) \
{ \
\
if (a_name##_initialized) { \
a_name##_initialized = false; \
a_cleanup(&a_name##_tls); \
} \
return (a_name##_initialized); \
} \
a_attr bool \
a_name##_tsd_boot(void) \
{ \
\
if (a_cleanup != malloc_tsd_no_cleanup) { \
malloc_tsd_cleanup_register( \
&a_name##_tsd_cleanup_wrapper); \
} \
a_name##_booted = true; \
return (false); \
} \
/* Get/set. */ \
a_attr a_type * \
a_name##_tsd_get(void) \
{ \
\
assert(a_name##_booted); \
return (&a_name##_tls); \
} \
a_attr void \
a_name##_tsd_set(a_type *val) \
{ \
\
assert(a_name##_booted); \
a_name##_tls = (*val); \
if (a_cleanup != malloc_tsd_no_cleanup) \
a_name##_initialized = true; \
}
#elif (defined(JEMALLOC_TLS))
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
a_cleanup) \
/* Initialization/cleanup. */ \
a_attr bool \
a_name##_tsd_boot(void) \
{ \
\
if (a_cleanup != malloc_tsd_no_cleanup) { \
if (pthread_key_create(&a_name##_tsd, a_cleanup) != 0) \
return (true); \
} \
a_name##_booted = true; \
return (false); \
} \
/* Get/set. */ \
a_attr a_type * \
a_name##_tsd_get(void) \
{ \
\
assert(a_name##_booted); \
return (&a_name##_tls); \
} \
a_attr void \
a_name##_tsd_set(a_type *val) \
{ \
\
assert(a_name##_booted); \
a_name##_tls = (*val); \
if (a_cleanup != malloc_tsd_no_cleanup) { \
if (pthread_setspecific(a_name##_tsd, \
(void *)(&a_name##_tls))) { \
malloc_write("<jemalloc>: Error" \
" setting TSD for "#a_name"\n"); \
if (opt_abort) \
abort(); \
} \
} \
}
#elif (defined(_WIN32))
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
a_cleanup) \
/* Data structure. */ \
typedef struct { \
bool initialized; \
a_type val; \
} a_name##_tsd_wrapper_t; \
/* Initialization/cleanup. */ \
a_attr bool \
a_name##_tsd_cleanup_wrapper(void) \
{ \
a_name##_tsd_wrapper_t *wrapper; \
\
wrapper = (a_name##_tsd_wrapper_t *) TlsGetValue(a_name##_tsd); \
if (wrapper == NULL) \
return (false); \
if (a_cleanup != malloc_tsd_no_cleanup && \
wrapper->initialized) { \
a_type val = wrapper->val; \
a_type tsd_static_data = a_initializer; \
wrapper->initialized = false; \
wrapper->val = tsd_static_data; \
a_cleanup(&val); \
if (wrapper->initialized) { \
/* Trigger another cleanup round. */ \
return (true); \
} \
} \
malloc_tsd_dalloc(wrapper); \
return (false); \
} \
a_attr bool \
a_name##_tsd_boot(void) \
{ \
\
a_name##_tsd = TlsAlloc(); \
if (a_name##_tsd == TLS_OUT_OF_INDEXES) \
return (true); \
if (a_cleanup != malloc_tsd_no_cleanup) { \
malloc_tsd_cleanup_register( \
&a_name##_tsd_cleanup_wrapper); \
} \
a_name##_booted = true; \
return (false); \
} \
/* Get/set. */ \
a_attr a_name##_tsd_wrapper_t * \
a_name##_tsd_get_wrapper(void) \
{ \
a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \
TlsGetValue(a_name##_tsd); \
\
if (wrapper == NULL) { \
wrapper = (a_name##_tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \
if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \
abort(); \
} else { \
static a_type tsd_static_data = a_initializer; \
wrapper->initialized = false; \
wrapper->val = tsd_static_data; \
} \
if (!TlsSetValue(a_name##_tsd, (void *)wrapper)) { \
malloc_write("<jemalloc>: Error setting" \
" TSD for "#a_name"\n"); \
abort(); \
} \
} \
return (wrapper); \
} \
a_attr a_type * \
a_name##_tsd_get(void) \
{ \
a_name##_tsd_wrapper_t *wrapper; \
\
assert(a_name##_booted); \
wrapper = a_name##_tsd_get_wrapper(); \
return (&wrapper->val); \
} \
a_attr void \
a_name##_tsd_set(a_type *val) \
{ \
a_name##_tsd_wrapper_t *wrapper; \
\
assert(a_name##_booted); \
wrapper = a_name##_tsd_get_wrapper(); \
wrapper->val = *(val); \
if (a_cleanup != malloc_tsd_no_cleanup) \
wrapper->initialized = true; \
}
#else
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
a_cleanup) \
/* Data structure. */ \
typedef struct { \
bool initialized; \
a_type val; \
} a_name##_tsd_wrapper_t; \
/* Initialization/cleanup. */ \
a_attr void \
a_name##_tsd_cleanup_wrapper(void *arg) \
{ \
a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *)arg;\
\
if (a_cleanup != malloc_tsd_no_cleanup && \
wrapper->initialized) { \
wrapper->initialized = false; \
a_cleanup(&wrapper->val); \
if (wrapper->initialized) { \
/* Trigger another cleanup round. */ \
if (pthread_setspecific(a_name##_tsd, \
(void *)wrapper)) { \
malloc_write("<jemalloc>: Error" \
" setting TSD for "#a_name"\n"); \
if (opt_abort) \
abort(); \
} \
return; \
} \
} \
malloc_tsd_dalloc(wrapper); \
} \
a_attr bool \
a_name##_tsd_boot(void) \
{ \
\
if (pthread_key_create(&a_name##_tsd, \
a_name##_tsd_cleanup_wrapper) != 0) \
return (true); \
a_name##_booted = true; \
return (false); \
} \
/* Get/set. */ \
a_attr a_name##_tsd_wrapper_t * \
a_name##_tsd_get_wrapper(void) \
{ \
a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \
pthread_getspecific(a_name##_tsd); \
\
if (wrapper == NULL) { \
wrapper = (a_name##_tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \
if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \
abort(); \
} else { \
static a_type tsd_static_data = a_initializer; \
wrapper->initialized = false; \
wrapper->val = tsd_static_data; \
} \
if (pthread_setspecific(a_name##_tsd, \
(void *)wrapper)) { \
malloc_write("<jemalloc>: Error setting" \
" TSD for "#a_name"\n"); \
abort(); \
} \
} \
return (wrapper); \
} \
a_attr a_type * \
a_name##_tsd_get(void) \
{ \
a_name##_tsd_wrapper_t *wrapper; \
\
assert(a_name##_booted); \
wrapper = a_name##_tsd_get_wrapper(); \
return (&wrapper->val); \
} \
a_attr void \
a_name##_tsd_set(a_type *val) \
{ \
a_name##_tsd_wrapper_t *wrapper; \
\
assert(a_name##_booted); \
wrapper = a_name##_tsd_get_wrapper(); \
wrapper->val = *(val); \
if (a_cleanup != malloc_tsd_no_cleanup) \
wrapper->initialized = true; \
}
#endif
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void *malloc_tsd_malloc(size_t size);
void malloc_tsd_dalloc(void *wrapper);
void malloc_tsd_no_cleanup(void *);
void malloc_tsd_cleanup_register(bool (*f)(void));
void malloc_tsd_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/* Size of stack-allocated buffer passed to buferror(). */
#define BUFERROR_BUF 64
/*
* Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be
* large enough for all possible uses within jemalloc.
*/
#define MALLOC_PRINTF_BUFSIZE 4096
/*
* Wrap a cpp argument that contains commas such that it isn't broken up into
* multiple arguments.
*/
#define JEMALLOC_CONCAT(...) __VA_ARGS__
/*
* Silence compiler warnings due to uninitialized values. This is used
* wherever the compiler fails to recognize that the variable is never used
* uninitialized.
*/
#ifdef JEMALLOC_CC_SILENCE
# define JEMALLOC_CC_SILENCE_INIT(v) = v
#else
# define JEMALLOC_CC_SILENCE_INIT(v)
#endif
/*
* Define a custom assert() in order to reduce the chances of deadlock during
* assertion failure.
*/
#ifndef assert
#define assert(e) do { \
if (config_debug && !(e)) { \
malloc_printf( \
"<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
__FILE__, __LINE__, #e); \
abort(); \
} \
} while (0)
#endif
/* Use to assert a particular configuration, e.g., cassert(config_debug). */
#define cassert(c) do { \
if ((c) == false) \
assert(false); \
} while (0)
#ifndef not_reached
#define not_reached() do { \
if (config_debug) { \
malloc_printf( \
"<jemalloc>: %s:%d: Unreachable code reached\n", \
__FILE__, __LINE__); \
abort(); \
} \
} while (0)
#endif
#ifndef not_implemented
#define not_implemented() do { \
if (config_debug) { \
malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
__FILE__, __LINE__); \
abort(); \
} \
} while (0)
#endif
#define assert_not_implemented(e) do { \
if (config_debug && !(e)) \
not_implemented(); \
} while (0)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
int buferror(char *buf, size_t buflen);
uintmax_t malloc_strtoumax(const char *nptr, char **endptr, int base);
void malloc_write(const char *s);
/*
* malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating
* point math.
*/
int malloc_vsnprintf(char *str, size_t size, const char *format,
va_list ap);
int malloc_snprintf(char *str, size_t size, const char *format, ...)
JEMALLOC_ATTR(format(printf, 3, 4));
void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
const char *format, va_list ap);
void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque,
const char *format, ...) JEMALLOC_ATTR(format(printf, 3, 4));
void malloc_printf(const char *format, ...)
JEMALLOC_ATTR(format(printf, 1, 2));
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
size_t pow2_ceil(size_t x);
void malloc_write(const char *s);
void set_errno(int errnum);
int get_errno(void);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_))
/* Compute the smallest power of 2 that is >= x. */
JEMALLOC_INLINE size_t
pow2_ceil(size_t x)
{
x--;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
#if (LG_SIZEOF_PTR == 3)
x |= x >> 32;
#endif
x++;
return (x);
}
/* Sets error code */
JEMALLOC_INLINE void
set_errno(int errnum)
{
#ifdef _WIN32
SetLastError(errnum);
#else
errno = errnum;
#endif
}
/* Get last error code */
JEMALLOC_INLINE int
get_errno(void)
{
#ifdef _WIN32
return (GetLastError());
#else
return (errno);
#endif
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment