Commit 1f72ec7d authored by flowly's avatar flowly Committed by GitHub
Browse files

Merge pull request #1 from antirez/unstable

update to upstream
parents dfc98dcc f917e0da
...@@ -21,21 +21,27 @@ struct ctl_named_node_s { ...@@ -21,21 +21,27 @@ struct ctl_named_node_s {
/* If (nchildren == 0), this is a terminal node. */ /* If (nchildren == 0), this is a terminal node. */
unsigned nchildren; unsigned nchildren;
const ctl_node_t *children; const ctl_node_t *children;
int (*ctl)(const size_t *, size_t, void *, size_t *, int (*ctl)(tsd_t *, const size_t *, size_t, void *,
void *, size_t); size_t *, void *, size_t);
}; };
struct ctl_indexed_node_s { struct ctl_indexed_node_s {
struct ctl_node_s node; struct ctl_node_s node;
const ctl_named_node_t *(*index)(const size_t *, size_t, size_t); const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t,
size_t);
}; };
struct ctl_arena_stats_s { struct ctl_arena_stats_s {
bool initialized; bool initialized;
unsigned nthreads; unsigned nthreads;
const char *dss; const char *dss;
ssize_t lg_dirty_mult;
ssize_t decay_time;
size_t pactive; size_t pactive;
size_t pdirty; size_t pdirty;
/* The remainder are only populated if config_stats is true. */
arena_stats_t astats; arena_stats_t astats;
/* Aggregate stats for small size classes, based on bin stats. */ /* Aggregate stats for small size classes, based on bin stats. */
...@@ -46,22 +52,16 @@ struct ctl_arena_stats_s { ...@@ -46,22 +52,16 @@ struct ctl_arena_stats_s {
malloc_bin_stats_t bstats[NBINS]; malloc_bin_stats_t bstats[NBINS];
malloc_large_stats_t *lstats; /* nlclasses elements. */ malloc_large_stats_t *lstats; /* nlclasses elements. */
malloc_huge_stats_t *hstats; /* nhclasses elements. */
}; };
struct ctl_stats_s { struct ctl_stats_s {
size_t allocated; size_t allocated;
size_t active; size_t active;
size_t metadata;
size_t resident;
size_t mapped; size_t mapped;
struct { size_t retained;
size_t current; /* stats_chunks.curchunks */
uint64_t total; /* stats_chunks.nchunks */
size_t high; /* stats_chunks.highchunks */
} chunks;
struct {
size_t allocated; /* huge_allocated */
uint64_t nmalloc; /* huge_nmalloc */
uint64_t ndalloc; /* huge_ndalloc */
} huge;
unsigned narenas; unsigned narenas;
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */ ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
}; };
...@@ -70,16 +70,17 @@ struct ctl_stats_s { ...@@ -70,16 +70,17 @@ struct ctl_stats_s {
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS #ifdef JEMALLOC_H_EXTERNS
int ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp, int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
size_t newlen);
int ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp);
int ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen); void *newp, size_t newlen);
int ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp,
size_t *miblenp);
int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen);
bool ctl_boot(void); bool ctl_boot(void);
void ctl_prefork(void); void ctl_prefork(tsdn_t *tsdn);
void ctl_postfork_parent(void); void ctl_postfork_parent(tsdn_t *tsdn);
void ctl_postfork_child(void); void ctl_postfork_child(tsdn_t *tsdn);
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \ #define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \ if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
......
...@@ -7,25 +7,67 @@ typedef struct extent_node_s extent_node_t; ...@@ -7,25 +7,67 @@ typedef struct extent_node_s extent_node_t;
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS #ifdef JEMALLOC_H_STRUCTS
/* Tree of extents. */ /* Tree of extents. Use accessor functions for en_* fields. */
struct extent_node_s { struct extent_node_s {
/* Linkage for the size/address-ordered tree. */ /* Arena from which this extent came, if any. */
rb_node(extent_node_t) link_szad; arena_t *en_arena;
/* Linkage for the address-ordered tree. */ /* Pointer to the extent that this tree node is responsible for. */
rb_node(extent_node_t) link_ad; void *en_addr;
/* Total region size. */
size_t en_size;
/*
* Serial number (potentially non-unique).
*
* In principle serial numbers can wrap around on 32-bit systems if
* JEMALLOC_MUNMAP is defined, but as long as comparison functions fall
* back on address comparison for equal serial numbers, stable (if
* imperfect) ordering is maintained.
*
* Serial numbers may not be unique even in the absence of wrap-around,
* e.g. when splitting an extent and assigning the same serial number to
* both resulting adjacent extents.
*/
size_t en_sn;
/*
* The zeroed flag is used by chunk recycling code to track whether
* memory is zero-filled.
*/
bool en_zeroed;
/*
* True if physical memory is committed to the extent, whether
* explicitly or implicitly as on a system that overcommits and
* satisfies physical memory needs on demand via soft page faults.
*/
bool en_committed;
/*
* The achunk flag is used to validate that huge allocation lookups
* don't return arena chunks.
*/
bool en_achunk;
/* Profile counters, used for huge objects. */ /* Profile counters, used for huge objects. */
prof_ctx_t *prof_ctx; prof_tctx_t *en_prof_tctx;
/* Pointer to the extent that this tree node is responsible for. */ /* Linkage for arena's runs_dirty and chunks_cache rings. */
void *addr; arena_runs_dirty_link_t rd;
qr(extent_node_t) cc_link;
/* Total region size. */ union {
size_t size; /* Linkage for the size/sn/address-ordered tree. */
rb_node(extent_node_t) szsnad_link;
/* Linkage for arena's achunks, huge, and node_cache lists. */
ql_elm(extent_node_t) ql_link;
};
/* True if zero-filled; used by chunk recycling code. */ /* Linkage for the address-ordered tree. */
bool zeroed; rb_node(extent_node_t) ad_link;
}; };
typedef rb_tree(extent_node_t) extent_tree_t; typedef rb_tree(extent_node_t) extent_tree_t;
...@@ -33,7 +75,7 @@ typedef rb_tree(extent_node_t) extent_tree_t; ...@@ -33,7 +75,7 @@ typedef rb_tree(extent_node_t) extent_tree_t;
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS #ifdef JEMALLOC_H_EXTERNS
rb_proto(, extent_tree_szad_, extent_tree_t, extent_node_t) rb_proto(, extent_tree_szsnad_, extent_tree_t, extent_node_t)
rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t) rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
...@@ -41,6 +83,188 @@ rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t) ...@@ -41,6 +83,188 @@ rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_INLINES #ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
arena_t *extent_node_arena_get(const extent_node_t *node);
void *extent_node_addr_get(const extent_node_t *node);
size_t extent_node_size_get(const extent_node_t *node);
size_t extent_node_sn_get(const extent_node_t *node);
bool extent_node_zeroed_get(const extent_node_t *node);
bool extent_node_committed_get(const extent_node_t *node);
bool extent_node_achunk_get(const extent_node_t *node);
prof_tctx_t *extent_node_prof_tctx_get(const extent_node_t *node);
void extent_node_arena_set(extent_node_t *node, arena_t *arena);
void extent_node_addr_set(extent_node_t *node, void *addr);
void extent_node_size_set(extent_node_t *node, size_t size);
void extent_node_sn_set(extent_node_t *node, size_t sn);
void extent_node_zeroed_set(extent_node_t *node, bool zeroed);
void extent_node_committed_set(extent_node_t *node, bool committed);
void extent_node_achunk_set(extent_node_t *node, bool achunk);
void extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx);
void extent_node_init(extent_node_t *node, arena_t *arena, void *addr,
size_t size, size_t sn, bool zeroed, bool committed);
void extent_node_dirty_linkage_init(extent_node_t *node);
void extent_node_dirty_insert(extent_node_t *node,
arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty);
void extent_node_dirty_remove(extent_node_t *node);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_))
JEMALLOC_INLINE arena_t *
extent_node_arena_get(const extent_node_t *node)
{
return (node->en_arena);
}
JEMALLOC_INLINE void *
extent_node_addr_get(const extent_node_t *node)
{
return (node->en_addr);
}
JEMALLOC_INLINE size_t
extent_node_size_get(const extent_node_t *node)
{
return (node->en_size);
}
JEMALLOC_INLINE size_t
extent_node_sn_get(const extent_node_t *node)
{
return (node->en_sn);
}
JEMALLOC_INLINE bool
extent_node_zeroed_get(const extent_node_t *node)
{
return (node->en_zeroed);
}
JEMALLOC_INLINE bool
extent_node_committed_get(const extent_node_t *node)
{
assert(!node->en_achunk);
return (node->en_committed);
}
JEMALLOC_INLINE bool
extent_node_achunk_get(const extent_node_t *node)
{
return (node->en_achunk);
}
JEMALLOC_INLINE prof_tctx_t *
extent_node_prof_tctx_get(const extent_node_t *node)
{
return (node->en_prof_tctx);
}
JEMALLOC_INLINE void
extent_node_arena_set(extent_node_t *node, arena_t *arena)
{
node->en_arena = arena;
}
JEMALLOC_INLINE void
extent_node_addr_set(extent_node_t *node, void *addr)
{
node->en_addr = addr;
}
JEMALLOC_INLINE void
extent_node_size_set(extent_node_t *node, size_t size)
{
node->en_size = size;
}
JEMALLOC_INLINE void
extent_node_sn_set(extent_node_t *node, size_t sn)
{
node->en_sn = sn;
}
JEMALLOC_INLINE void
extent_node_zeroed_set(extent_node_t *node, bool zeroed)
{
node->en_zeroed = zeroed;
}
JEMALLOC_INLINE void
extent_node_committed_set(extent_node_t *node, bool committed)
{
node->en_committed = committed;
}
JEMALLOC_INLINE void
extent_node_achunk_set(extent_node_t *node, bool achunk)
{
node->en_achunk = achunk;
}
JEMALLOC_INLINE void
extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx)
{
node->en_prof_tctx = tctx;
}
JEMALLOC_INLINE void
extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size,
size_t sn, bool zeroed, bool committed)
{
extent_node_arena_set(node, arena);
extent_node_addr_set(node, addr);
extent_node_size_set(node, size);
extent_node_sn_set(node, sn);
extent_node_zeroed_set(node, zeroed);
extent_node_committed_set(node, committed);
extent_node_achunk_set(node, false);
if (config_prof)
extent_node_prof_tctx_set(node, NULL);
}
JEMALLOC_INLINE void
extent_node_dirty_linkage_init(extent_node_t *node)
{
qr_new(&node->rd, rd_link);
qr_new(node, cc_link);
}
JEMALLOC_INLINE void
extent_node_dirty_insert(extent_node_t *node,
arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty)
{
qr_meld(runs_dirty, &node->rd, rd_link);
qr_meld(chunks_dirty, node, cc_link);
}
JEMALLOC_INLINE void
extent_node_dirty_remove(extent_node_t *node)
{
qr_remove(&node->rd, rd_link);
qr_remove(node, cc_link);
}
#endif
#endif /* JEMALLOC_H_INLINES */ #endif /* JEMALLOC_H_INLINES */
/******************************************************************************/ /******************************************************************************/
/*
* The following hash function is based on MurmurHash3, placed into the public
* domain by Austin Appleby. See https://github.com/aappleby/smhasher for
* details.
*/
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_TYPES #ifdef JEMALLOC_H_TYPES
...@@ -14,56 +19,338 @@ ...@@ -14,56 +19,338 @@
#ifdef JEMALLOC_H_INLINES #ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE #ifndef JEMALLOC_ENABLE_INLINE
uint64_t hash(const void *key, size_t len, uint64_t seed); uint32_t hash_x86_32(const void *key, int len, uint32_t seed);
void hash_x86_128(const void *key, const int len, uint32_t seed,
uint64_t r_out[2]);
void hash_x64_128(const void *key, const int len, const uint32_t seed,
uint64_t r_out[2]);
void hash(const void *key, size_t len, const uint32_t seed,
size_t r_hash[2]);
#endif #endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_)) #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_))
/* /******************************************************************************/
* The following hash function is based on MurmurHash64A(), placed into the /* Internal implementation. */
* public domain by Austin Appleby. See http://murmurhash.googlepages.com/ for JEMALLOC_INLINE uint32_t
* details. hash_rotl_32(uint32_t x, int8_t r)
*/ {
return ((x << r) | (x >> (32 - r)));
}
JEMALLOC_INLINE uint64_t JEMALLOC_INLINE uint64_t
hash(const void *key, size_t len, uint64_t seed) hash_rotl_64(uint64_t x, int8_t r)
{ {
const uint64_t m = UINT64_C(0xc6a4a7935bd1e995);
const int r = 47;
uint64_t h = seed ^ (len * m);
const uint64_t *data = (const uint64_t *)key;
const uint64_t *end = data + (len/8);
const unsigned char *data2;
assert(((uintptr_t)key & 0x7) == 0); return ((x << r) | (x >> (64 - r)));
}
while(data != end) { JEMALLOC_INLINE uint32_t
uint64_t k = *data++; hash_get_block_32(const uint32_t *p, int i)
{
k *= m; /* Handle unaligned read. */
k ^= k >> r; if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) {
k *= m; uint32_t ret;
h ^= k; memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t));
h *= m; return (ret);
} }
data2 = (const unsigned char *)data; return (p[i]);
switch(len & 7) { }
case 7: h ^= ((uint64_t)(data2[6])) << 48;
case 6: h ^= ((uint64_t)(data2[5])) << 40; JEMALLOC_INLINE uint64_t
case 5: h ^= ((uint64_t)(data2[4])) << 32; hash_get_block_64(const uint64_t *p, int i)
case 4: h ^= ((uint64_t)(data2[3])) << 24; {
case 3: h ^= ((uint64_t)(data2[2])) << 16;
case 2: h ^= ((uint64_t)(data2[1])) << 8; /* Handle unaligned read. */
case 1: h ^= ((uint64_t)(data2[0])); if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) {
h *= m; uint64_t ret;
memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t));
return (ret);
} }
h ^= h >> r; return (p[i]);
h *= m; }
h ^= h >> r;
JEMALLOC_INLINE uint32_t
hash_fmix_32(uint32_t h)
{
h ^= h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
h *= 0xc2b2ae35;
h ^= h >> 16;
return (h); return (h);
} }
JEMALLOC_INLINE uint64_t
hash_fmix_64(uint64_t k)
{
k ^= k >> 33;
k *= KQU(0xff51afd7ed558ccd);
k ^= k >> 33;
k *= KQU(0xc4ceb9fe1a85ec53);
k ^= k >> 33;
return (k);
}
JEMALLOC_INLINE uint32_t
hash_x86_32(const void *key, int len, uint32_t seed)
{
const uint8_t *data = (const uint8_t *) key;
const int nblocks = len / 4;
uint32_t h1 = seed;
const uint32_t c1 = 0xcc9e2d51;
const uint32_t c2 = 0x1b873593;
/* body */
{
const uint32_t *blocks = (const uint32_t *) (data + nblocks*4);
int i;
for (i = -nblocks; i; i++) {
uint32_t k1 = hash_get_block_32(blocks, i);
k1 *= c1;
k1 = hash_rotl_32(k1, 15);
k1 *= c2;
h1 ^= k1;
h1 = hash_rotl_32(h1, 13);
h1 = h1*5 + 0xe6546b64;
}
}
/* tail */
{
const uint8_t *tail = (const uint8_t *) (data + nblocks*4);
uint32_t k1 = 0;
switch (len & 3) {
case 3: k1 ^= tail[2] << 16;
case 2: k1 ^= tail[1] << 8;
case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15);
k1 *= c2; h1 ^= k1;
}
}
/* finalization */
h1 ^= len;
h1 = hash_fmix_32(h1);
return (h1);
}
UNUSED JEMALLOC_INLINE void
hash_x86_128(const void *key, const int len, uint32_t seed,
uint64_t r_out[2])
{
const uint8_t * data = (const uint8_t *) key;
const int nblocks = len / 16;
uint32_t h1 = seed;
uint32_t h2 = seed;
uint32_t h3 = seed;
uint32_t h4 = seed;
const uint32_t c1 = 0x239b961b;
const uint32_t c2 = 0xab0e9789;
const uint32_t c3 = 0x38b34ae5;
const uint32_t c4 = 0xa1e38b93;
/* body */
{
const uint32_t *blocks = (const uint32_t *) (data + nblocks*16);
int i;
for (i = -nblocks; i; i++) {
uint32_t k1 = hash_get_block_32(blocks, i*4 + 0);
uint32_t k2 = hash_get_block_32(blocks, i*4 + 1);
uint32_t k3 = hash_get_block_32(blocks, i*4 + 2);
uint32_t k4 = hash_get_block_32(blocks, i*4 + 3);
k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
h1 = hash_rotl_32(h1, 19); h1 += h2;
h1 = h1*5 + 0x561ccd1b;
k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
h2 = hash_rotl_32(h2, 17); h2 += h3;
h2 = h2*5 + 0x0bcaa747;
k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
h3 = hash_rotl_32(h3, 15); h3 += h4;
h3 = h3*5 + 0x96cd1c35;
k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
h4 = hash_rotl_32(h4, 13); h4 += h1;
h4 = h4*5 + 0x32ac3b17;
}
}
/* tail */
{
const uint8_t *tail = (const uint8_t *) (data + nblocks*16);
uint32_t k1 = 0;
uint32_t k2 = 0;
uint32_t k3 = 0;
uint32_t k4 = 0;
switch (len & 15) {
case 15: k4 ^= tail[14] << 16;
case 14: k4 ^= tail[13] << 8;
case 13: k4 ^= tail[12] << 0;
k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
case 12: k3 ^= tail[11] << 24;
case 11: k3 ^= tail[10] << 16;
case 10: k3 ^= tail[ 9] << 8;
case 9: k3 ^= tail[ 8] << 0;
k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
case 8: k2 ^= tail[ 7] << 24;
case 7: k2 ^= tail[ 6] << 16;
case 6: k2 ^= tail[ 5] << 8;
case 5: k2 ^= tail[ 4] << 0;
k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
case 4: k1 ^= tail[ 3] << 24;
case 3: k1 ^= tail[ 2] << 16;
case 2: k1 ^= tail[ 1] << 8;
case 1: k1 ^= tail[ 0] << 0;
k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
}
}
/* finalization */
h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;
h1 += h2; h1 += h3; h1 += h4;
h2 += h1; h3 += h1; h4 += h1;
h1 = hash_fmix_32(h1);
h2 = hash_fmix_32(h2);
h3 = hash_fmix_32(h3);
h4 = hash_fmix_32(h4);
h1 += h2; h1 += h3; h1 += h4;
h2 += h1; h3 += h1; h4 += h1;
r_out[0] = (((uint64_t) h2) << 32) | h1;
r_out[1] = (((uint64_t) h4) << 32) | h3;
}
UNUSED JEMALLOC_INLINE void
hash_x64_128(const void *key, const int len, const uint32_t seed,
uint64_t r_out[2])
{
const uint8_t *data = (const uint8_t *) key;
const int nblocks = len / 16;
uint64_t h1 = seed;
uint64_t h2 = seed;
const uint64_t c1 = KQU(0x87c37b91114253d5);
const uint64_t c2 = KQU(0x4cf5ad432745937f);
/* body */
{
const uint64_t *blocks = (const uint64_t *) (data);
int i;
for (i = 0; i < nblocks; i++) {
uint64_t k1 = hash_get_block_64(blocks, i*2 + 0);
uint64_t k2 = hash_get_block_64(blocks, i*2 + 1);
k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
h1 = hash_rotl_64(h1, 27); h1 += h2;
h1 = h1*5 + 0x52dce729;
k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
h2 = hash_rotl_64(h2, 31); h2 += h1;
h2 = h2*5 + 0x38495ab5;
}
}
/* tail */
{
const uint8_t *tail = (const uint8_t*)(data + nblocks*16);
uint64_t k1 = 0;
uint64_t k2 = 0;
switch (len & 15) {
case 15: k2 ^= ((uint64_t)(tail[14])) << 48;
case 14: k2 ^= ((uint64_t)(tail[13])) << 40;
case 13: k2 ^= ((uint64_t)(tail[12])) << 32;
case 12: k2 ^= ((uint64_t)(tail[11])) << 24;
case 11: k2 ^= ((uint64_t)(tail[10])) << 16;
case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8;
case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0;
k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56;
case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48;
case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40;
case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32;
case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24;
case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16;
case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8;
case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0;
k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
}
}
/* finalization */
h1 ^= len; h2 ^= len;
h1 += h2;
h2 += h1;
h1 = hash_fmix_64(h1);
h2 = hash_fmix_64(h2);
h1 += h2;
h2 += h1;
r_out[0] = h1;
r_out[1] = h2;
}
/******************************************************************************/
/* API. */
JEMALLOC_INLINE void
hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2])
{
assert(len <= INT_MAX); /* Unfortunate implementation limitation. */
#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
hash_x64_128(key, (int)len, seed, (uint64_t *)r_hash);
#else
{
uint64_t hashes[2];
hash_x86_128(key, (int)len, seed, hashes);
r_hash[0] = (size_t)hashes[0];
r_hash[1] = (size_t)hashes[1];
}
#endif
}
#endif #endif
#endif /* JEMALLOC_H_INLINES */ #endif /* JEMALLOC_H_INLINES */
......
...@@ -9,28 +9,23 @@ ...@@ -9,28 +9,23 @@
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS #ifdef JEMALLOC_H_EXTERNS
/* Huge allocation statistics. */ void *huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero);
extern uint64_t huge_nmalloc; void *huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
extern uint64_t huge_ndalloc; size_t alignment, bool zero);
extern size_t huge_allocated; bool huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize,
size_t usize_min, size_t usize_max, bool zero);
/* Protects chunk-related data structures. */ void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
extern malloc_mutex_t huge_mtx; size_t usize, size_t alignment, bool zero, tcache_t *tcache);
#ifdef JEMALLOC_JET
void *huge_malloc(size_t size, bool zero); typedef void (huge_dalloc_junk_t)(void *, size_t);
void *huge_palloc(size_t size, size_t alignment, bool zero); extern huge_dalloc_junk_t *huge_dalloc_junk;
void *huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, #endif
size_t extra); void huge_dalloc(tsdn_t *tsdn, void *ptr);
void *huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, arena_t *huge_aalloc(const void *ptr);
size_t alignment, bool zero, bool try_tcache_dalloc); size_t huge_salloc(tsdn_t *tsdn, const void *ptr);
void huge_dalloc(void *ptr, bool unmap); prof_tctx_t *huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr);
size_t huge_salloc(const void *ptr); void huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx);
prof_ctx_t *huge_prof_ctx_get(const void *ptr); void huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr);
void huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
bool huge_boot(void);
void huge_prefork(void);
void huge_postfork_parent(void);
void huge_postfork_child(void);
#endif /* JEMALLOC_H_EXTERNS */ #endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/ /******************************************************************************/
......
#ifndef JEMALLOC_INTERNAL_H #ifndef JEMALLOC_INTERNAL_H
#define JEMALLOC_INTERNAL_H #define JEMALLOC_INTERNAL_H
#include <math.h>
#ifdef _WIN32
# include <windows.h>
# define ENOENT ERROR_PATH_NOT_FOUND
# define EINVAL ERROR_BAD_ARGUMENTS
# define EAGAIN ERROR_OUTOFMEMORY
# define EPERM ERROR_WRITE_FAULT
# define EFAULT ERROR_INVALID_ADDRESS
# define ENOMEM ERROR_NOT_ENOUGH_MEMORY
# undef ERANGE
# define ERANGE ERROR_INVALID_DATA
#else
# include <sys/param.h>
# include <sys/mman.h>
# include <sys/syscall.h>
# if !defined(SYS_write) && defined(__NR_write)
# define SYS_write __NR_write
# endif
# include <sys/uio.h>
# include <pthread.h>
# include <errno.h>
#endif
#include <sys/types.h>
#include <limits.h>
#ifndef SIZE_T_MAX
# define SIZE_T_MAX SIZE_MAX
#endif
#include <stdarg.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <stddef.h>
#ifndef offsetof
# define offsetof(type, member) ((size_t)&(((type *)NULL)->member))
#endif
#include <inttypes.h>
#include <string.h>
#include <strings.h>
#include <ctype.h>
#ifdef _MSC_VER
# include <io.h>
typedef intptr_t ssize_t;
# define PATH_MAX 1024
# define STDERR_FILENO 2
# define __func__ __FUNCTION__
/* Disable warnings about deprecated system functions */
# pragma warning(disable: 4996)
#else
# include <unistd.h>
#endif
#include <fcntl.h>
#define JEMALLOC_NO_DEMANGLE #include "jemalloc_internal_defs.h"
#include "../jemalloc@install_suffix@.h" #include "jemalloc/internal/jemalloc_internal_decls.h"
#ifdef JEMALLOC_UTRACE #ifdef JEMALLOC_UTRACE
#include <sys/ktrace.h> #include <sys/ktrace.h>
#endif #endif
#ifdef JEMALLOC_VALGRIND #define JEMALLOC_NO_DEMANGLE
#include <valgrind/valgrind.h> #ifdef JEMALLOC_JET
#include <valgrind/memcheck.h> # define JEMALLOC_N(n) jet_##n
#endif # include "jemalloc/internal/public_namespace.h"
# define JEMALLOC_NO_RENAME
#include "jemalloc/internal/private_namespace.h" # include "../jemalloc@install_suffix@.h"
# undef JEMALLOC_NO_RENAME
#ifdef JEMALLOC_CC_SILENCE
#define UNUSED JEMALLOC_ATTR(unused)
#else #else
#define UNUSED # define JEMALLOC_N(n) @private_namespace@##n
# include "../jemalloc@install_suffix@.h"
#endif #endif
#include "jemalloc/internal/private_namespace.h"
static const bool config_debug = static const bool config_debug =
#ifdef JEMALLOC_DEBUG #ifdef JEMALLOC_DEBUG
...@@ -81,7 +28,7 @@ static const bool config_debug = ...@@ -81,7 +28,7 @@ static const bool config_debug =
false false
#endif #endif
; ;
static const bool config_dss = static const bool have_dss =
#ifdef JEMALLOC_DSS #ifdef JEMALLOC_DSS
true true
#else #else
...@@ -102,6 +49,7 @@ static const bool config_lazy_lock = ...@@ -102,6 +49,7 @@ static const bool config_lazy_lock =
false false
#endif #endif
; ;
static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF;
static const bool config_prof = static const bool config_prof =
#ifdef JEMALLOC_PROF #ifdef JEMALLOC_PROF
true true
...@@ -123,8 +71,8 @@ static const bool config_prof_libunwind = ...@@ -123,8 +71,8 @@ static const bool config_prof_libunwind =
false false
#endif #endif
; ;
static const bool config_mremap = static const bool maps_coalesce =
#ifdef JEMALLOC_MREMAP #ifdef JEMALLOC_MAPS_COALESCE
true true
#else #else
false false
...@@ -186,6 +134,17 @@ static const bool config_ivsalloc = ...@@ -186,6 +134,17 @@ static const bool config_ivsalloc =
false false
#endif #endif
; ;
static const bool config_cache_oblivious =
#ifdef JEMALLOC_CACHE_OBLIVIOUS
true
#else
false
#endif
;
#ifdef JEMALLOC_C11ATOMICS
#include <stdatomic.h>
#endif
#ifdef JEMALLOC_ATOMIC9 #ifdef JEMALLOC_ATOMIC9
#include <machine/atomic.h> #include <machine/atomic.h>
...@@ -202,7 +161,10 @@ static const bool config_ivsalloc = ...@@ -202,7 +161,10 @@ static const bool config_ivsalloc =
#include <malloc/malloc.h> #include <malloc/malloc.h>
#endif #endif
#include "jemalloc/internal/ph.h"
#ifndef __PGI
#define RB_COMPACT #define RB_COMPACT
#endif
#include "jemalloc/internal/rb.h" #include "jemalloc/internal/rb.h"
#include "jemalloc/internal/qr.h" #include "jemalloc/internal/qr.h"
#include "jemalloc/internal/ql.h" #include "jemalloc/internal/ql.h"
...@@ -223,32 +185,48 @@ static const bool config_ivsalloc = ...@@ -223,32 +185,48 @@ static const bool config_ivsalloc =
/******************************************************************************/ /******************************************************************************/
#define JEMALLOC_H_TYPES #define JEMALLOC_H_TYPES
#define ALLOCM_LG_ALIGN_MASK ((int)0x3f) #include "jemalloc/internal/jemalloc_internal_macros.h"
#define ZU(z) ((size_t)z) /* Page size index type. */
typedef unsigned pszind_t;
#ifndef __DECONST /* Size class index type. */
# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) typedef unsigned szind_t;
#endif
#ifdef JEMALLOC_DEBUG /*
/* Disable inlining to make debugging easier. */ * Flags bits:
# define JEMALLOC_INLINE *
# define inline * a: arena
#else * t: tcache
# define JEMALLOC_ENABLE_INLINE * 0: unused
# define JEMALLOC_INLINE static inline * z: zero
# ifdef _MSC_VER * n: alignment
# define inline _inline *
# endif * aaaaaaaa aaaatttt tttttttt 0znnnnnn
#endif */
#define MALLOCX_ARENA_MASK ((int)~0xfffff)
#define MALLOCX_ARENA_MAX 0xffe
#define MALLOCX_TCACHE_MASK ((int)~0xfff000ffU)
#define MALLOCX_TCACHE_MAX 0xffd
#define MALLOCX_LG_ALIGN_MASK ((int)0x3f)
/* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */
#define MALLOCX_ALIGN_GET_SPECIFIED(flags) \
(ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK))
#define MALLOCX_ALIGN_GET(flags) \
(MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1))
#define MALLOCX_ZERO_GET(flags) \
((bool)(flags & MALLOCX_ZERO))
#define MALLOCX_TCACHE_GET(flags) \
(((unsigned)((flags & MALLOCX_TCACHE_MASK) >> 8)) - 2)
#define MALLOCX_ARENA_GET(flags) \
(((unsigned)(((unsigned)flags) >> 20)) - 1)
/* Smallest size class to support. */ /* Smallest size class to support. */
#define LG_TINY_MIN 3
#define TINY_MIN (1U << LG_TINY_MIN) #define TINY_MIN (1U << LG_TINY_MIN)
/* /*
* Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
* classes). * classes).
*/ */
#ifndef LG_QUANTUM #ifndef LG_QUANTUM
...@@ -261,7 +239,7 @@ static const bool config_ivsalloc = ...@@ -261,7 +239,7 @@ static const bool config_ivsalloc =
# ifdef __alpha__ # ifdef __alpha__
# define LG_QUANTUM 4 # define LG_QUANTUM 4
# endif # endif
# ifdef __sparc64__ # if (defined(__sparc64__) || defined(__sparcv9) || defined(__sparc_v9__))
# define LG_QUANTUM 4 # define LG_QUANTUM 4
# endif # endif
# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64)) # if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
...@@ -270,16 +248,25 @@ static const bool config_ivsalloc = ...@@ -270,16 +248,25 @@ static const bool config_ivsalloc =
# ifdef __arm__ # ifdef __arm__
# define LG_QUANTUM 3 # define LG_QUANTUM 3
# endif # endif
# ifdef __aarch64__
# define LG_QUANTUM 4
# endif
# ifdef __hppa__ # ifdef __hppa__
# define LG_QUANTUM 4 # define LG_QUANTUM 4
# endif # endif
# ifdef __mips__ # ifdef __mips__
# define LG_QUANTUM 3 # define LG_QUANTUM 3
# endif # endif
# ifdef __or1k__
# define LG_QUANTUM 3
# endif
# ifdef __powerpc__ # ifdef __powerpc__
# define LG_QUANTUM 4 # define LG_QUANTUM 4
# endif # endif
# ifdef __s390x__ # ifdef __riscv__
# define LG_QUANTUM 4
# endif
# ifdef __s390__
# define LG_QUANTUM 4 # define LG_QUANTUM 4
# endif # endif
# ifdef __SH4__ # ifdef __SH4__
...@@ -288,8 +275,12 @@ static const bool config_ivsalloc = ...@@ -288,8 +275,12 @@ static const bool config_ivsalloc =
# ifdef __tile__ # ifdef __tile__
# define LG_QUANTUM 4 # define LG_QUANTUM 4
# endif # endif
# ifdef __le32__
# define LG_QUANTUM 4
# endif
# ifndef LG_QUANTUM # ifndef LG_QUANTUM
# error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS" # error "Unknown minimum alignment for architecture; specify via "
"--with-lg-quantum"
# endif # endif
#endif #endif
...@@ -329,21 +320,24 @@ static const bool config_ivsalloc = ...@@ -329,21 +320,24 @@ static const bool config_ivsalloc =
#define CACHELINE_CEILING(s) \ #define CACHELINE_CEILING(s) \
(((s) + CACHELINE_MASK) & ~CACHELINE_MASK) (((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
/* Page size. STATIC_PAGE_SHIFT is determined by the configure script. */ /* Page size. LG_PAGE is determined by the configure script. */
#ifdef PAGE_MASK #ifdef PAGE_MASK
# undef PAGE_MASK # undef PAGE_MASK
#endif #endif
#define LG_PAGE STATIC_PAGE_SHIFT #define PAGE ((size_t)(1U << LG_PAGE))
#define PAGE ((size_t)(1U << STATIC_PAGE_SHIFT))
#define PAGE_MASK ((size_t)(PAGE - 1)) #define PAGE_MASK ((size_t)(PAGE - 1))
/* Return the page base address for the page containing address a. */
#define PAGE_ADDR2BASE(a) \
((void *)((uintptr_t)(a) & ~PAGE_MASK))
/* Return the smallest pagesize multiple that is >= s. */ /* Return the smallest pagesize multiple that is >= s. */
#define PAGE_CEILING(s) \ #define PAGE_CEILING(s) \
(((s) + PAGE_MASK) & ~PAGE_MASK) (((s) + PAGE_MASK) & ~PAGE_MASK)
/* Return the nearest aligned address at or below a. */ /* Return the nearest aligned address at or below a. */
#define ALIGNMENT_ADDR2BASE(a, alignment) \ #define ALIGNMENT_ADDR2BASE(a, alignment) \
((void *)((uintptr_t)(a) & (-(alignment)))) ((void *)((uintptr_t)(a) & ((~(alignment)) + 1)))
/* Return the offset between a and the nearest aligned address at or below a. */ /* Return the offset between a and the nearest aligned address at or below a. */
#define ALIGNMENT_ADDR2OFFSET(a, alignment) \ #define ALIGNMENT_ADDR2OFFSET(a, alignment) \
...@@ -351,101 +345,39 @@ static const bool config_ivsalloc = ...@@ -351,101 +345,39 @@ static const bool config_ivsalloc =
/* Return the smallest alignment multiple that is >= s. */ /* Return the smallest alignment multiple that is >= s. */
#define ALIGNMENT_CEILING(s, alignment) \ #define ALIGNMENT_CEILING(s, alignment) \
(((s) + (alignment - 1)) & (-(alignment))) (((s) + (alignment - 1)) & ((~(alignment)) + 1))
/* Declare a variable length array */ /* Declare a variable-length array. */
#if __STDC_VERSION__ < 199901L #if __STDC_VERSION__ < 199901L
# ifdef _MSC_VER # ifdef _MSC_VER
# include <malloc.h> # include <malloc.h>
# define alloca _alloca # define alloca _alloca
# else # else
# ifdef JEMALLOC_HAS_ALLOCA_H
# include <alloca.h> # include <alloca.h>
# else
# include <stdlib.h>
# endif
# endif # endif
# define VARIABLE_ARRAY(type, name, count) \ # define VARIABLE_ARRAY(type, name, count) \
type *name = alloca(sizeof(type) * count) type *name = alloca(sizeof(type) * (count))
#else
# define VARIABLE_ARRAY(type, name, count) type name[count]
#endif
#ifdef JEMALLOC_VALGRIND
/*
* The JEMALLOC_VALGRIND_*() macros must be macros rather than functions
* so that when Valgrind reports errors, there are no extra stack frames
* in the backtraces.
*
* The size that is reported to valgrind must be consistent through a chain of
* malloc..realloc..realloc calls. Request size isn't recorded anywhere in
* jemalloc, so it is critical that all callers of these macros provide usize
* rather than request size. As a result, buffer overflow detection is
* technically weakened for the standard API, though it is generally accepted
* practice to consider any extra bytes reported by malloc_usable_size() as
* usable space.
*/
#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do { \
if (config_valgrind && opt_valgrind && cond) \
VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero); \
} while (0)
#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \
old_rzsize, zero) do { \
if (config_valgrind && opt_valgrind) { \
size_t rzsize = p2rz(ptr); \
\
if (ptr == old_ptr) { \
VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \
usize, rzsize); \
if (zero && old_usize < usize) { \
VALGRIND_MAKE_MEM_DEFINED( \
(void *)((uintptr_t)ptr + \
old_usize), usize - old_usize); \
} \
} else { \
if (old_ptr != NULL) { \
VALGRIND_FREELIKE_BLOCK(old_ptr, \
old_rzsize); \
} \
if (ptr != NULL) { \
size_t copy_size = (old_usize < usize) \
? old_usize : usize; \
size_t tail_size = usize - copy_size; \
VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \
rzsize, false); \
if (copy_size > 0) { \
VALGRIND_MAKE_MEM_DEFINED(ptr, \
copy_size); \
} \
if (zero && tail_size > 0) { \
VALGRIND_MAKE_MEM_DEFINED( \
(void *)((uintptr_t)ptr + \
copy_size), tail_size); \
} \
} \
} \
} \
} while (0)
#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \
if (config_valgrind && opt_valgrind) \
VALGRIND_FREELIKE_BLOCK(ptr, rzsize); \
} while (0)
#else #else
#define RUNNING_ON_VALGRIND ((unsigned)0) # define VARIABLE_ARRAY(type, name, count) type name[(count)]
#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)
#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB)
#define VALGRIND_FREELIKE_BLOCK(addr, rzB)
#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len)
#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len)
#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero)
#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \
old_rzsize, zero)
#define JEMALLOC_VALGRIND_FREE(ptr, rzsize)
#endif #endif
#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h" #include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h" #include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/spin.h"
#include "jemalloc/internal/prng.h" #include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/ckh.h" #include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/stats.h" #include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h" #include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/witness.h"
#include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/tsd.h" #include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h" #include "jemalloc/internal/mb.h"
...@@ -453,9 +385,10 @@ static const bool config_ivsalloc = ...@@ -453,9 +385,10 @@ static const bool config_ivsalloc =
#include "jemalloc/internal/arena.h" #include "jemalloc/internal/arena.h"
#include "jemalloc/internal/bitmap.h" #include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/base.h" #include "jemalloc/internal/base.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/chunk.h" #include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h" #include "jemalloc/internal/huge.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h" #include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h" #include "jemalloc/internal/hash.h"
#include "jemalloc/internal/quarantine.h" #include "jemalloc/internal/quarantine.h"
...@@ -465,167 +398,433 @@ static const bool config_ivsalloc = ...@@ -465,167 +398,433 @@ static const bool config_ivsalloc =
/******************************************************************************/ /******************************************************************************/
#define JEMALLOC_H_STRUCTS #define JEMALLOC_H_STRUCTS
#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h" #include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h" #include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/spin.h"
#include "jemalloc/internal/prng.h" #include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/ckh.h" #include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/stats.h" #include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h" #include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/witness.h"
#include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h" #include "jemalloc/internal/mb.h"
#include "jemalloc/internal/bitmap.h" #include "jemalloc/internal/bitmap.h"
#define JEMALLOC_ARENA_STRUCTS_A
#include "jemalloc/internal/arena.h"
#undef JEMALLOC_ARENA_STRUCTS_A
#include "jemalloc/internal/extent.h" #include "jemalloc/internal/extent.h"
#define JEMALLOC_ARENA_STRUCTS_B
#include "jemalloc/internal/arena.h" #include "jemalloc/internal/arena.h"
#undef JEMALLOC_ARENA_STRUCTS_B
#include "jemalloc/internal/base.h" #include "jemalloc/internal/base.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/chunk.h" #include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h" #include "jemalloc/internal/huge.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h" #include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h" #include "jemalloc/internal/hash.h"
#include "jemalloc/internal/quarantine.h" #include "jemalloc/internal/quarantine.h"
#include "jemalloc/internal/prof.h" #include "jemalloc/internal/prof.h"
typedef struct { #include "jemalloc/internal/tsd.h"
uint64_t allocated;
uint64_t deallocated;
} thread_allocated_t;
/*
* The JEMALLOC_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro
* argument.
*/
#define THREAD_ALLOCATED_INITIALIZER JEMALLOC_CONCAT({0, 0})
#undef JEMALLOC_H_STRUCTS #undef JEMALLOC_H_STRUCTS
/******************************************************************************/ /******************************************************************************/
#define JEMALLOC_H_EXTERNS #define JEMALLOC_H_EXTERNS
extern bool opt_abort; extern bool opt_abort;
extern bool opt_junk; extern const char *opt_junk;
extern bool opt_junk_alloc;
extern bool opt_junk_free;
extern size_t opt_quarantine; extern size_t opt_quarantine;
extern bool opt_redzone; extern bool opt_redzone;
extern bool opt_utrace; extern bool opt_utrace;
extern bool opt_valgrind;
extern bool opt_xmalloc; extern bool opt_xmalloc;
extern bool opt_zero; extern bool opt_zero;
extern size_t opt_narenas; extern unsigned opt_narenas;
extern bool in_valgrind;
/* Number of CPUs. */ /* Number of CPUs. */
extern unsigned ncpus; extern unsigned ncpus;
/* Protects arenas initialization (arenas, arenas_total). */ /* Number of arenas used for automatic multiplexing of threads and arenas. */
extern malloc_mutex_t arenas_lock; extern unsigned narenas_auto;
/* /*
* Arenas that are used to service external requests. Not all elements of the * Arenas that are used to service external requests. Not all elements of the
* arenas array are necessarily used; arenas are created lazily as needed. * arenas array are necessarily used; arenas are created lazily as needed.
*
* arenas[0..narenas_auto) are used for automatic multiplexing of threads and
* arenas. arenas[narenas_auto..narenas_total) are only used if the application
* takes some action to create them and allocate from them.
*/ */
extern arena_t **arenas; extern arena_t **arenas;
extern unsigned narenas_total;
extern unsigned narenas_auto; /* Read-only after initialization. */
arena_t *arenas_extend(unsigned ind); /*
void arenas_cleanup(void *arg); * pind2sz_tab encodes the same information as could be computed by
arena_t *choose_arena_hard(void); * pind2sz_compute().
*/
extern size_t const pind2sz_tab[NPSIZES];
/*
* index2size_tab encodes the same information as could be computed (at
* unacceptable cost in some code paths) by index2size_compute().
*/
extern size_t const index2size_tab[NSIZES];
/*
* size2index_tab is a compact lookup table that rounds request sizes up to
* size classes. In order to reduce cache footprint, the table is compressed,
* and all accesses are via size2index().
*/
extern uint8_t const size2index_tab[];
arena_t *a0get(void);
void *a0malloc(size_t size);
void a0dalloc(void *ptr);
void *bootstrap_malloc(size_t size);
void *bootstrap_calloc(size_t num, size_t size);
void bootstrap_free(void *ptr);
unsigned narenas_total_get(void);
arena_t *arena_init(tsdn_t *tsdn, unsigned ind);
arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind);
arena_t *arena_choose_hard(tsd_t *tsd, bool internal);
void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
void thread_allocated_cleanup(tsd_t *tsd);
void thread_deallocated_cleanup(tsd_t *tsd);
void iarena_cleanup(tsd_t *tsd);
void arena_cleanup(tsd_t *tsd);
void arenas_tdata_cleanup(tsd_t *tsd);
void narenas_tdata_cleanup(tsd_t *tsd);
void arenas_tdata_bypass_cleanup(tsd_t *tsd);
void jemalloc_prefork(void); void jemalloc_prefork(void);
void jemalloc_postfork_parent(void); void jemalloc_postfork_parent(void);
void jemalloc_postfork_child(void); void jemalloc_postfork_child(void);
#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h" #include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h" #include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/spin.h"
#include "jemalloc/internal/prng.h" #include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/ckh.h" #include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/stats.h" #include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h" #include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/witness.h"
#include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h" #include "jemalloc/internal/mb.h"
#include "jemalloc/internal/bitmap.h" #include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/extent.h" #include "jemalloc/internal/extent.h"
#include "jemalloc/internal/arena.h" #include "jemalloc/internal/arena.h"
#include "jemalloc/internal/base.h" #include "jemalloc/internal/base.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/chunk.h" #include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h" #include "jemalloc/internal/huge.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h" #include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h" #include "jemalloc/internal/hash.h"
#include "jemalloc/internal/quarantine.h" #include "jemalloc/internal/quarantine.h"
#include "jemalloc/internal/prof.h" #include "jemalloc/internal/prof.h"
#include "jemalloc/internal/tsd.h"
#undef JEMALLOC_H_EXTERNS #undef JEMALLOC_H_EXTERNS
/******************************************************************************/ /******************************************************************************/
#define JEMALLOC_H_INLINES #define JEMALLOC_H_INLINES
#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h" #include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h" #include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/spin.h"
#include "jemalloc/internal/prng.h" #include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/ckh.h" #include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/stats.h" #include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h" #include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/tsd.h" #include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/witness.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mb.h" #include "jemalloc/internal/mb.h"
#include "jemalloc/internal/extent.h" #include "jemalloc/internal/extent.h"
#include "jemalloc/internal/base.h" #include "jemalloc/internal/base.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/chunk.h" #include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h" #include "jemalloc/internal/huge.h"
#ifndef JEMALLOC_ENABLE_INLINE #ifndef JEMALLOC_ENABLE_INLINE
malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *) pszind_t psz2ind(size_t psz);
size_t pind2sz_compute(pszind_t pind);
size_t pind2sz_lookup(pszind_t pind);
size_t pind2sz(pszind_t pind);
size_t psz2u(size_t psz);
szind_t size2index_compute(size_t size);
szind_t size2index_lookup(size_t size);
szind_t size2index(size_t size);
size_t index2size_compute(szind_t index);
size_t index2size_lookup(szind_t index);
size_t index2size(szind_t index);
size_t s2u_compute(size_t size);
size_t s2u_lookup(size_t size);
size_t s2u(size_t size); size_t s2u(size_t size);
size_t sa2u(size_t size, size_t alignment); size_t sa2u(size_t size, size_t alignment);
unsigned narenas_total_get(void); arena_t *arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal);
arena_t *choose_arena(arena_t *arena); arena_t *arena_choose(tsd_t *tsd, arena_t *arena);
arena_t *arena_ichoose(tsd_t *tsd, arena_t *arena);
arena_tdata_t *arena_tdata_get(tsd_t *tsd, unsigned ind,
bool refresh_if_missing);
arena_t *arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing);
ticker_t *decay_ticker_get(tsd_t *tsd, unsigned ind);
#endif #endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
/* JEMALLOC_INLINE pszind_t
* Map of pthread_self() --> arenas[???], used for selecting an arena to use psz2ind(size_t psz)
* for allocations. {
*/
malloc_tsd_externs(arenas, arena_t *) if (unlikely(psz > HUGE_MAXCLASS))
malloc_tsd_funcs(JEMALLOC_INLINE, arenas, arena_t *, NULL, arenas_cleanup) return (NPSIZES);
{
pszind_t x = lg_floor((psz<<1)-1);
pszind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_PAGE) ? 0 : x -
(LG_SIZE_CLASS_GROUP + LG_PAGE);
pszind_t grp = shift << LG_SIZE_CLASS_GROUP;
pszind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1;
size_t delta_inverse_mask = ZI(-1) << lg_delta;
pszind_t mod = ((((psz-1) & delta_inverse_mask) >> lg_delta)) &
((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
pszind_t ind = grp + mod;
return (ind);
}
}
JEMALLOC_INLINE size_t
pind2sz_compute(pszind_t pind)
{
{
size_t grp = pind >> LG_SIZE_CLASS_GROUP;
size_t mod = pind & ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
size_t grp_size_mask = ~((!!grp)-1);
size_t grp_size = ((ZU(1) << (LG_PAGE +
(LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
size_t shift = (grp == 0) ? 1 : grp;
size_t lg_delta = shift + (LG_PAGE-1);
size_t mod_size = (mod+1) << lg_delta;
size_t sz = grp_size + mod_size;
return (sz);
}
}
JEMALLOC_INLINE size_t
pind2sz_lookup(pszind_t pind)
{
size_t ret = (size_t)pind2sz_tab[pind];
assert(ret == pind2sz_compute(pind));
return (ret);
}
JEMALLOC_INLINE size_t
pind2sz(pszind_t pind)
{
assert(pind < NPSIZES);
return (pind2sz_lookup(pind));
}
JEMALLOC_INLINE size_t
psz2u(size_t psz)
{
if (unlikely(psz > HUGE_MAXCLASS))
return (0);
{
size_t x = lg_floor((psz<<1)-1);
size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1;
size_t delta = ZU(1) << lg_delta;
size_t delta_mask = delta - 1;
size_t usize = (psz + delta_mask) & ~delta_mask;
return (usize);
}
}
JEMALLOC_INLINE szind_t
size2index_compute(size_t size)
{
if (unlikely(size > HUGE_MAXCLASS))
return (NSIZES);
#if (NTBINS != 0)
if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
szind_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
szind_t lg_ceil = lg_floor(pow2_ceil_zu(size));
return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin);
}
#endif
{
szind_t x = lg_floor((size<<1)-1);
szind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 :
x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM);
szind_t grp = shift << LG_SIZE_CLASS_GROUP;
szind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
size_t delta_inverse_mask = ZI(-1) << lg_delta;
szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
szind_t index = NTBINS + grp + mod;
return (index);
}
}
JEMALLOC_ALWAYS_INLINE szind_t
size2index_lookup(size_t size)
{
assert(size <= LOOKUP_MAXCLASS);
{
szind_t ret = (size2index_tab[(size-1) >> LG_TINY_MIN]);
assert(ret == size2index_compute(size));
return (ret);
}
}
JEMALLOC_ALWAYS_INLINE szind_t
size2index(size_t size)
{
assert(size > 0);
if (likely(size <= LOOKUP_MAXCLASS))
return (size2index_lookup(size));
return (size2index_compute(size));
}
JEMALLOC_INLINE size_t
index2size_compute(szind_t index)
{
#if (NTBINS > 0)
if (index < NTBINS)
return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index));
#endif
{
size_t reduced_index = index - NTBINS;
size_t grp = reduced_index >> LG_SIZE_CLASS_GROUP;
size_t mod = reduced_index & ((ZU(1) << LG_SIZE_CLASS_GROUP) -
1);
size_t grp_size_mask = ~((!!grp)-1);
size_t grp_size = ((ZU(1) << (LG_QUANTUM +
(LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
size_t shift = (grp == 0) ? 1 : grp;
size_t lg_delta = shift + (LG_QUANTUM-1);
size_t mod_size = (mod+1) << lg_delta;
size_t usize = grp_size + mod_size;
return (usize);
}
}
JEMALLOC_ALWAYS_INLINE size_t
index2size_lookup(szind_t index)
{
size_t ret = (size_t)index2size_tab[index];
assert(ret == index2size_compute(index));
return (ret);
}
JEMALLOC_ALWAYS_INLINE size_t
index2size(szind_t index)
{
assert(index < NSIZES);
return (index2size_lookup(index));
}
JEMALLOC_ALWAYS_INLINE size_t
s2u_compute(size_t size)
{
if (unlikely(size > HUGE_MAXCLASS))
return (0);
#if (NTBINS > 0)
if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
size_t lg_ceil = lg_floor(pow2_ceil_zu(size));
return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
(ZU(1) << lg_ceil));
}
#endif
{
size_t x = lg_floor((size<<1)-1);
size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
size_t delta = ZU(1) << lg_delta;
size_t delta_mask = delta - 1;
size_t usize = (size + delta_mask) & ~delta_mask;
return (usize);
}
}
JEMALLOC_ALWAYS_INLINE size_t
s2u_lookup(size_t size)
{
size_t ret = index2size_lookup(size2index_lookup(size));
assert(ret == s2u_compute(size));
return (ret);
}
/* /*
* Compute usable size that would result from allocating an object with the * Compute usable size that would result from allocating an object with the
* specified size. * specified size.
*/ */
JEMALLOC_INLINE size_t JEMALLOC_ALWAYS_INLINE size_t
s2u(size_t size) s2u(size_t size)
{ {
if (size <= SMALL_MAXCLASS) assert(size > 0);
return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size); if (likely(size <= LOOKUP_MAXCLASS))
if (size <= arena_maxclass) return (s2u_lookup(size));
return (PAGE_CEILING(size)); return (s2u_compute(size));
return (CHUNK_CEILING(size));
} }
/* /*
* Compute usable size that would result from allocating an object with the * Compute usable size that would result from allocating an object with the
* specified size and alignment. * specified size and alignment.
*/ */
JEMALLOC_INLINE size_t JEMALLOC_ALWAYS_INLINE size_t
sa2u(size_t size, size_t alignment) sa2u(size_t size, size_t alignment)
{ {
size_t usize; size_t usize;
assert(alignment != 0 && ((alignment - 1) & alignment) == 0); assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
/* Try for a small size class. */
if (size <= SMALL_MAXCLASS && alignment < PAGE) {
/* /*
* Round size up to the nearest multiple of alignment. * Round size up to the nearest multiple of alignment.
* *
* This done, we can take advantage of the fact that for each small * This done, we can take advantage of the fact that for each
* size class, every object is aligned at the smallest power of two * small size class, every object is aligned at the smallest
* that is non-zero in the base two representation of the size. For * power of two that is non-zero in the base two representation
* example: * of the size. For example:
* *
* Size | Base 2 | Minimum alignment * Size | Base 2 | Minimum alignment
* -----+----------+------------------ * -----+----------+------------------
...@@ -633,94 +832,150 @@ sa2u(size_t size, size_t alignment) ...@@ -633,94 +832,150 @@ sa2u(size_t size, size_t alignment)
* 144 | 10100000 | 32 * 144 | 10100000 | 32
* 192 | 11000000 | 64 * 192 | 11000000 | 64
*/ */
usize = ALIGNMENT_CEILING(size, alignment); usize = s2u(ALIGNMENT_CEILING(size, alignment));
/* if (usize < LARGE_MINCLASS)
* (usize < size) protects against the combination of maximal return (usize);
* alignment and size greater than maximal alignment.
*/
if (usize < size) {
/* size_t overflow. */
return (0);
} }
if (usize <= arena_maxclass && alignment <= PAGE) { /* Try for a large size class. */
if (usize <= SMALL_MAXCLASS) if (likely(size <= large_maxclass) && likely(alignment < chunksize)) {
return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size);
return (PAGE_CEILING(usize));
} else {
size_t run_size;
/* /*
* We can't achieve subpage alignment, so round up alignment * We can't achieve subpage alignment, so round up alignment
* permanently; it makes later calculations simpler. * to the minimum that can actually be supported.
*/ */
alignment = PAGE_CEILING(alignment); alignment = PAGE_CEILING(alignment);
usize = PAGE_CEILING(size);
/* Make sure result is a large size class. */
usize = (size <= LARGE_MINCLASS) ? LARGE_MINCLASS : s2u(size);
/* /*
* (usize < size) protects against very large sizes within * Calculate the size of the over-size run that arena_palloc()
* PAGE of SIZE_T_MAX. * would need to allocate in order to guarantee the alignment.
* */
* (usize + alignment < usize) protects against the if (usize + large_pad + alignment - PAGE <= arena_maxrun)
* combination of maximal alignment and usize large enough return (usize);
* to cause overflow. This is similar to the first overflow }
* check above, but it needs to be repeated due to the new
* usize value, which may now be *equal* to maximal /* Huge size class. Beware of overflow. */
* alignment, whereas before we only detected overflow if the
* original size was *greater* than maximal alignment. if (unlikely(alignment > HUGE_MAXCLASS))
return (0);
/*
* We can't achieve subchunk alignment, so round up alignment to the
* minimum that can actually be supported.
*/ */
if (usize < size || usize + alignment < usize) { alignment = CHUNK_CEILING(alignment);
/* Make sure result is a huge size class. */
if (size <= chunksize)
usize = chunksize;
else {
usize = s2u(size);
if (usize < size) {
/* size_t overflow. */ /* size_t overflow. */
return (0); return (0);
} }
}
/* /*
* Calculate the size of the over-size run that arena_palloc() * Calculate the multi-chunk mapping that huge_palloc() would need in
* would need to allocate in order to guarantee the alignment. * order to guarantee the alignment.
* If the run wouldn't fit within a chunk, round up to a huge
* allocation size.
*/ */
run_size = usize + alignment - PAGE; if (usize + alignment - PAGE < usize) {
if (run_size <= arena_maxclass) /* size_t overflow. */
return (PAGE_CEILING(usize)); return (0);
return (CHUNK_CEILING(usize));
} }
return (usize);
} }
JEMALLOC_INLINE unsigned /* Choose an arena based on a per-thread value. */
narenas_total_get(void) JEMALLOC_INLINE arena_t *
arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal)
{ {
unsigned narenas; arena_t *ret;
malloc_mutex_lock(&arenas_lock); if (arena != NULL)
narenas = narenas_total; return (arena);
malloc_mutex_unlock(&arenas_lock);
return (narenas); ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd);
if (unlikely(ret == NULL))
ret = arena_choose_hard(tsd, internal);
return (ret);
} }
/* Choose an arena based on a per-thread value. */
JEMALLOC_INLINE arena_t * JEMALLOC_INLINE arena_t *
choose_arena(arena_t *arena) arena_choose(tsd_t *tsd, arena_t *arena)
{ {
arena_t *ret;
if (arena != NULL) return (arena_choose_impl(tsd, arena, false));
return (arena); }
if ((ret = *arenas_tsd_get()) == NULL) { JEMALLOC_INLINE arena_t *
ret = choose_arena_hard(); arena_ichoose(tsd_t *tsd, arena_t *arena)
assert(ret != NULL); {
return (arena_choose_impl(tsd, arena, true));
}
JEMALLOC_INLINE arena_tdata_t *
arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing)
{
arena_tdata_t *tdata;
arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
if (unlikely(arenas_tdata == NULL)) {
/* arenas_tdata hasn't been initialized yet. */
return (arena_tdata_get_hard(tsd, ind));
} }
if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) {
/*
* ind is invalid, cache is old (too small), or tdata to be
* initialized.
*/
return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) :
NULL);
}
tdata = &arenas_tdata[ind];
if (likely(tdata != NULL) || !refresh_if_missing)
return (tdata);
return (arena_tdata_get_hard(tsd, ind));
}
JEMALLOC_INLINE arena_t *
arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing)
{
arena_t *ret;
assert(ind <= MALLOCX_ARENA_MAX);
ret = arenas[ind];
if (unlikely(ret == NULL)) {
ret = atomic_read_p((void *)&arenas[ind]);
if (init_if_missing && unlikely(ret == NULL))
ret = arena_init(tsdn, ind);
}
return (ret); return (ret);
} }
JEMALLOC_INLINE ticker_t *
decay_ticker_get(tsd_t *tsd, unsigned ind)
{
arena_tdata_t *tdata;
tdata = arena_tdata_get(tsd, ind, true);
if (unlikely(tdata == NULL))
return (NULL);
return (&tdata->decay_ticker);
}
#endif #endif
#include "jemalloc/internal/bitmap.h" #include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/rtree.h"
/* /*
* Include arena.h twice in order to resolve circular dependencies with * Include portions of arena.h interleaved with tcache.h in order to resolve
* tcache.h. * circular dependencies.
*/ */
#define JEMALLOC_ARENA_INLINE_A #define JEMALLOC_ARENA_INLINE_A
#include "jemalloc/internal/arena.h" #include "jemalloc/internal/arena.h"
...@@ -733,131 +988,142 @@ choose_arena(arena_t *arena) ...@@ -733,131 +988,142 @@ choose_arena(arena_t *arena)
#include "jemalloc/internal/quarantine.h" #include "jemalloc/internal/quarantine.h"
#ifndef JEMALLOC_ENABLE_INLINE #ifndef JEMALLOC_ENABLE_INLINE
void *imallocx(size_t size, bool try_tcache, arena_t *arena); arena_t *iaalloc(const void *ptr);
void *imalloc(size_t size); size_t isalloc(tsdn_t *tsdn, const void *ptr, bool demote);
void *icallocx(size_t size, bool try_tcache, arena_t *arena); void *iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero,
void *icalloc(size_t size); tcache_t *tcache, bool is_metadata, arena_t *arena, bool slow_path);
void *ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache, void *ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero,
arena_t *arena); bool slow_path);
void *ipalloc(size_t usize, size_t alignment, bool zero); void *ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
size_t isalloc(const void *ptr, bool demote); tcache_t *tcache, bool is_metadata, arena_t *arena);
size_t ivsalloc(const void *ptr, bool demote); void *ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, arena_t *arena);
void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero);
size_t ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote);
size_t u2rz(size_t usize); size_t u2rz(size_t usize);
size_t p2rz(const void *ptr); size_t p2rz(tsdn_t *tsdn, const void *ptr);
void idallocx(void *ptr, bool try_tcache); void idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata,
void idalloc(void *ptr); bool slow_path);
void iqallocx(void *ptr, bool try_tcache); void idalloc(tsd_t *tsd, void *ptr);
void iqalloc(void *ptr); void iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path);
void *irallocx(void *ptr, size_t size, size_t extra, size_t alignment, void isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
bool zero, bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc, bool slow_path);
void isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache,
bool slow_path);
void *iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t extra, size_t alignment, bool zero, tcache_t *tcache,
arena_t *arena); arena_t *arena);
void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment, void *iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
bool zero, bool no_move); size_t alignment, bool zero, tcache_t *tcache, arena_t *arena);
malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t) void *iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t alignment, bool zero);
bool ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
size_t extra, size_t alignment, bool zero);
#endif #endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
JEMALLOC_INLINE void * JEMALLOC_ALWAYS_INLINE arena_t *
imallocx(size_t size, bool try_tcache, arena_t *arena) iaalloc(const void *ptr)
{ {
assert(size != 0); assert(ptr != NULL);
if (size <= arena_maxclass) return (arena_aalloc(ptr));
return (arena_malloc(arena, size, false, try_tcache));
else
return (huge_malloc(size, false));
} }
JEMALLOC_INLINE void * /*
imalloc(size_t size) * Typical usage:
* tsdn_t *tsdn = [...]
* void *ptr = [...]
* size_t sz = isalloc(tsdn, ptr, config_prof);
*/
JEMALLOC_ALWAYS_INLINE size_t
isalloc(tsdn_t *tsdn, const void *ptr, bool demote)
{ {
return (imallocx(size, true, NULL)); assert(ptr != NULL);
/* Demotion only makes sense if config_prof is true. */
assert(config_prof || !demote);
return (arena_salloc(tsdn, ptr, demote));
} }
JEMALLOC_INLINE void * JEMALLOC_ALWAYS_INLINE void *
icallocx(size_t size, bool try_tcache, arena_t *arena) iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
bool is_metadata, arena_t *arena, bool slow_path)
{ {
void *ret;
if (size <= arena_maxclass) assert(size != 0);
return (arena_malloc(arena, size, true, try_tcache)); assert(!is_metadata || tcache == NULL);
else assert(!is_metadata || arena == NULL || arena->ind < narenas_auto);
return (huge_malloc(size, true));
ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path);
if (config_stats && is_metadata && likely(ret != NULL)) {
arena_metadata_allocated_add(iaalloc(ret),
isalloc(tsdn, ret, config_prof));
}
return (ret);
} }
JEMALLOC_INLINE void * JEMALLOC_ALWAYS_INLINE void *
icalloc(size_t size) ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path)
{ {
return (icallocx(size, true, NULL)); return (iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd, true),
false, NULL, slow_path));
} }
JEMALLOC_INLINE void * JEMALLOC_ALWAYS_INLINE void *
ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache, ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
arena_t *arena) tcache_t *tcache, bool is_metadata, arena_t *arena)
{ {
void *ret; void *ret;
assert(usize != 0); assert(usize != 0);
assert(usize == sa2u(usize, alignment)); assert(usize == sa2u(usize, alignment));
assert(!is_metadata || tcache == NULL);
assert(!is_metadata || arena == NULL || arena->ind < narenas_auto);
if (usize <= arena_maxclass && alignment <= PAGE) ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache);
ret = arena_malloc(arena, usize, zero, try_tcache);
else {
if (usize <= arena_maxclass) {
ret = arena_palloc(choose_arena(arena), usize,
alignment, zero);
} else if (alignment <= chunksize)
ret = huge_malloc(usize, zero);
else
ret = huge_palloc(usize, alignment, zero);
}
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret); assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
if (config_stats && is_metadata && likely(ret != NULL)) {
arena_metadata_allocated_add(iaalloc(ret), isalloc(tsdn, ret,
config_prof));
}
return (ret); return (ret);
} }
JEMALLOC_INLINE void * JEMALLOC_ALWAYS_INLINE void *
ipalloc(size_t usize, size_t alignment, bool zero) ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, arena_t *arena)
{ {
return (ipallocx(usize, alignment, zero, true, NULL)); return (ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena));
} }
/* JEMALLOC_ALWAYS_INLINE void *
* Typical usage: ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero)
* void *ptr = [...]
* size_t sz = isalloc(ptr, config_prof);
*/
JEMALLOC_INLINE size_t
isalloc(const void *ptr, bool demote)
{ {
size_t ret;
arena_chunk_t *chunk;
assert(ptr != NULL); return (ipallocztm(tsd_tsdn(tsd), usize, alignment, zero,
/* Demotion only makes sense if config_prof is true. */ tcache_get(tsd, true), false, NULL));
assert(config_prof || demote == false);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr)
ret = arena_salloc(ptr, demote);
else
ret = huge_salloc(ptr);
return (ret);
} }
JEMALLOC_INLINE size_t JEMALLOC_ALWAYS_INLINE size_t
ivsalloc(const void *ptr, bool demote) ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote)
{ {
extent_node_t *node;
/* Return 0 if ptr is not within a chunk managed by jemalloc. */ /* Return 0 if ptr is not within a chunk managed by jemalloc. */
if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL) node = chunk_lookup(ptr, false);
if (node == NULL)
return (0); return (0);
/* Only arena chunks should be looked up via interior pointers. */
assert(extent_node_addr_get(node) == ptr ||
extent_node_achunk_get(node));
return (isalloc(ptr, demote)); return (isalloc(tsdn, ptr, demote));
} }
JEMALLOC_INLINE size_t JEMALLOC_INLINE size_t
...@@ -866,7 +1132,7 @@ u2rz(size_t usize) ...@@ -866,7 +1132,7 @@ u2rz(size_t usize)
size_t ret; size_t ret;
if (usize <= SMALL_MAXCLASS) { if (usize <= SMALL_MAXCLASS) {
size_t binind = SMALL_SIZE2BIN(usize); szind_t binind = size2index(usize);
ret = arena_bin_info[binind].redzone_size; ret = arena_bin_info[binind].redzone_size;
} else } else
ret = 0; ret = 0;
...@@ -875,132 +1141,144 @@ u2rz(size_t usize) ...@@ -875,132 +1141,144 @@ u2rz(size_t usize)
} }
JEMALLOC_INLINE size_t JEMALLOC_INLINE size_t
p2rz(const void *ptr) p2rz(tsdn_t *tsdn, const void *ptr)
{ {
size_t usize = isalloc(ptr, false); size_t usize = isalloc(tsdn, ptr, false);
return (u2rz(usize)); return (u2rz(usize));
} }
JEMALLOC_INLINE void JEMALLOC_ALWAYS_INLINE void
idallocx(void *ptr, bool try_tcache) idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata,
bool slow_path)
{ {
arena_chunk_t *chunk;
assert(ptr != NULL); assert(ptr != NULL);
assert(!is_metadata || tcache == NULL);
assert(!is_metadata || iaalloc(ptr)->ind < narenas_auto);
if (config_stats && is_metadata) {
arena_metadata_allocated_sub(iaalloc(ptr), isalloc(tsdn, ptr,
config_prof));
}
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); arena_dalloc(tsdn, ptr, tcache, slow_path);
if (chunk != ptr)
arena_dalloc(chunk->arena, chunk, ptr, try_tcache);
else
huge_dalloc(ptr, true);
} }
JEMALLOC_INLINE void JEMALLOC_ALWAYS_INLINE void
idalloc(void *ptr) idalloc(tsd_t *tsd, void *ptr)
{ {
idallocx(ptr, true); idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd, false), false, true);
} }
JEMALLOC_INLINE void JEMALLOC_ALWAYS_INLINE void
iqallocx(void *ptr, bool try_tcache) iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
{ {
if (config_fill && opt_quarantine) if (slow_path && config_fill && unlikely(opt_quarantine))
quarantine(ptr); quarantine(tsd, ptr);
else else
idallocx(ptr, try_tcache); idalloctm(tsd_tsdn(tsd), ptr, tcache, false, slow_path);
} }
JEMALLOC_INLINE void JEMALLOC_ALWAYS_INLINE void
iqalloc(void *ptr) isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
bool slow_path)
{ {
iqallocx(ptr, true); arena_sdalloc(tsdn, ptr, size, tcache, slow_path);
} }
JEMALLOC_INLINE void * JEMALLOC_ALWAYS_INLINE void
irallocx(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache, bool slow_path)
bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena)
{ {
void *ret;
size_t oldsize;
assert(ptr != NULL);
assert(size != 0);
oldsize = isalloc(ptr, config_prof); if (slow_path && config_fill && unlikely(opt_quarantine))
quarantine(tsd, ptr);
else
isdalloct(tsd_tsdn(tsd), ptr, size, tcache, slow_path);
}
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) JEMALLOC_ALWAYS_INLINE void *
!= 0) { iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t extra, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena)
{
void *p;
size_t usize, copysize; size_t usize, copysize;
/*
* Existing object alignment is inadequate; allocate new space
* and copy.
*/
if (no_move)
return (NULL);
usize = sa2u(size + extra, alignment); usize = sa2u(size + extra, alignment);
if (usize == 0) if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
return (NULL); return (NULL);
ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena); p = ipalloct(tsd_tsdn(tsd), usize, alignment, zero, tcache, arena);
if (ret == NULL) { if (p == NULL) {
if (extra == 0) if (extra == 0)
return (NULL); return (NULL);
/* Try again, without extra this time. */ /* Try again, without extra this time. */
usize = sa2u(size, alignment); usize = sa2u(size, alignment);
if (usize == 0) if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
return (NULL); return (NULL);
ret = ipallocx(usize, alignment, zero, try_tcache_alloc, p = ipalloct(tsd_tsdn(tsd), usize, alignment, zero, tcache,
arena); arena);
if (ret == NULL) if (p == NULL)
return (NULL); return (NULL);
} }
/* /*
* Copy at most size bytes (not size+extra), since the caller * Copy at most size bytes (not size+extra), since the caller has no
* has no expectation that the extra bytes will be reliably * expectation that the extra bytes will be reliably preserved.
* preserved.
*/ */
copysize = (size < oldsize) ? size : oldsize; copysize = (size < oldsize) ? size : oldsize;
memcpy(ret, ptr, copysize); memcpy(p, ptr, copysize);
iqallocx(ptr, try_tcache_dalloc); isqalloc(tsd, ptr, oldsize, tcache, true);
return (ret); return (p);
} }
if (no_move) { JEMALLOC_ALWAYS_INLINE void *
if (size <= arena_maxclass) { iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
return (arena_ralloc_no_move(ptr, oldsize, size, bool zero, tcache_t *tcache, arena_t *arena)
extra, zero)); {
} else {
return (huge_ralloc_no_move(ptr, oldsize, size, assert(ptr != NULL);
extra)); assert(size != 0);
}
} else { if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
if (size + extra <= arena_maxclass) { != 0) {
return (arena_ralloc(arena, ptr, oldsize, size, extra, /*
alignment, zero, try_tcache_alloc, * Existing object alignment is inadequate; allocate new space
try_tcache_dalloc)); * and copy.
} else { */
return (huge_ralloc(ptr, oldsize, size, extra, return (iralloct_realign(tsd, ptr, oldsize, size, 0, alignment,
alignment, zero, try_tcache_dalloc)); zero, tcache, arena));
}
} }
return (arena_ralloc(tsd, arena, ptr, oldsize, size, alignment, zero,
tcache));
} }
JEMALLOC_INLINE void * JEMALLOC_ALWAYS_INLINE void *
iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
bool no_move) bool zero)
{ {
return (irallocx(ptr, size, extra, alignment, zero, no_move, true, true, return (iralloct(tsd, ptr, oldsize, size, alignment, zero,
NULL)); tcache_get(tsd, true), NULL));
} }
malloc_tsd_externs(thread_allocated, thread_allocated_t) JEMALLOC_ALWAYS_INLINE bool
malloc_tsd_funcs(JEMALLOC_INLINE, thread_allocated, thread_allocated_t, ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra,
THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup) size_t alignment, bool zero)
{
assert(ptr != NULL);
assert(size != 0);
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
!= 0) {
/* Existing object alignment is inadequate. */
return (true);
}
return (arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero));
}
#endif #endif
#include "jemalloc/internal/prof.h" #include "jemalloc/internal/prof.h"
......
#ifndef JEMALLOC_INTERNAL_DECLS_H
#define JEMALLOC_INTERNAL_DECLS_H
#include <math.h>
#ifdef _WIN32
# include <windows.h>
# include "msvc_compat/windows_extra.h"
#else
# include <sys/param.h>
# include <sys/mman.h>
# if !defined(__pnacl__) && !defined(__native_client__)
# include <sys/syscall.h>
# if !defined(SYS_write) && defined(__NR_write)
# define SYS_write __NR_write
# endif
# include <sys/uio.h>
# endif
# include <pthread.h>
# ifdef JEMALLOC_OS_UNFAIR_LOCK
# include <os/lock.h>
# endif
# ifdef JEMALLOC_GLIBC_MALLOC_HOOK
# include <sched.h>
# endif
# include <errno.h>
# include <sys/time.h>
# include <time.h>
# ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
# include <mach/mach_time.h>
# endif
#endif
#include <sys/types.h>
#include <limits.h>
#ifndef SIZE_T_MAX
# define SIZE_T_MAX SIZE_MAX
#endif
#include <stdarg.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <stddef.h>
#ifndef offsetof
# define offsetof(type, member) ((size_t)&(((type *)NULL)->member))
#endif
#include <string.h>
#include <strings.h>
#include <ctype.h>
#ifdef _MSC_VER
# include <io.h>
typedef intptr_t ssize_t;
# define PATH_MAX 1024
# define STDERR_FILENO 2
# define __func__ __FUNCTION__
# ifdef JEMALLOC_HAS_RESTRICT
# define restrict __restrict
# endif
/* Disable warnings about deprecated system functions. */
# pragma warning(disable: 4996)
#if _MSC_VER < 1800
static int
isblank(int c)
{
return (c == '\t' || c == ' ');
}
#endif
#else
# include <unistd.h>
#endif
#include <fcntl.h>
#endif /* JEMALLOC_INTERNAL_H */
#ifndef JEMALLOC_INTERNAL_DEFS_H_
#define JEMALLOC_INTERNAL_DEFS_H_
/*
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
* public APIs to be prefixed. This makes it possible, with some care, to use
* multiple allocators simultaneously.
*/
#undef JEMALLOC_PREFIX
#undef JEMALLOC_CPREFIX
/*
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
* For shared libraries, symbol visibility mechanisms prevent these symbols
* from being exported, but for static libraries, naming collisions are a real
* possibility.
*/
#undef JEMALLOC_PRIVATE_NAMESPACE
/*
* Hyper-threaded CPUs may need a special instruction inside spin loops in
* order to yield to another virtual CPU.
*/
#undef CPU_SPINWAIT
/* Defined if C11 atomics are available. */
#undef JEMALLOC_C11ATOMICS
/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */
#undef JEMALLOC_ATOMIC9
/*
* Defined if OSAtomic*() functions are available, as provided by Darwin, and
* documented in the atomic(3) manual page.
*/
#undef JEMALLOC_OSATOMIC
/*
* Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and
* __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the
* functions are defined in libgcc instead of being inlines).
*/
#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4
/*
* Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and
* __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the
* functions are defined in libgcc instead of being inlines).
*/
#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8
/*
* Defined if __builtin_clz() and __builtin_clzl() are available.
*/
#undef JEMALLOC_HAVE_BUILTIN_CLZ
/*
* Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
*/
#undef JEMALLOC_OS_UNFAIR_LOCK
/*
* Defined if OSSpin*() functions are available, as provided by Darwin, and
* documented in the spinlock(3) manual page.
*/
#undef JEMALLOC_OSSPIN
/* Defined if syscall(2) is usable. */
#undef JEMALLOC_USE_SYSCALL
/*
* Defined if secure_getenv(3) is available.
*/
#undef JEMALLOC_HAVE_SECURE_GETENV
/*
* Defined if issetugid(2) is available.
*/
#undef JEMALLOC_HAVE_ISSETUGID
/* Defined if pthread_atfork(3) is available. */
#undef JEMALLOC_HAVE_PTHREAD_ATFORK
/*
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
*/
#undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
/*
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
*/
#undef JEMALLOC_HAVE_CLOCK_MONOTONIC
/*
* Defined if mach_absolute_time() is available.
*/
#undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
/*
* Defined if _malloc_thread_cleanup() exists. At least in the case of
* FreeBSD, pthread_key_create() allocates, which if used during malloc
* bootstrapping will cause recursion into the pthreads library. Therefore, if
* _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
* malloc_tsd.
*/
#undef JEMALLOC_MALLOC_THREAD_CLEANUP
/*
* Defined if threaded initialization is known to be safe on this platform.
* Among other things, it must be possible to initialize a mutex without
* triggering allocation in order for threaded allocation to be safe.
*/
#undef JEMALLOC_THREADED_INIT
/*
* Defined if the pthreads implementation defines
* _pthread_mutex_init_calloc_cb(), in which case the function is used in order
* to avoid recursive allocation during mutex initialization.
*/
#undef JEMALLOC_MUTEX_INIT_CB
/* Non-empty if the tls_model attribute is supported. */
#undef JEMALLOC_TLS_MODEL
/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */
#undef JEMALLOC_CC_SILENCE
/* JEMALLOC_CODE_COVERAGE enables test code coverage analysis. */
#undef JEMALLOC_CODE_COVERAGE
/*
* JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
* inline functions.
*/
#undef JEMALLOC_DEBUG
/* JEMALLOC_STATS enables statistics calculation. */
#undef JEMALLOC_STATS
/* JEMALLOC_PROF enables allocation profiling. */
#undef JEMALLOC_PROF
/* Use libunwind for profile backtracing if defined. */
#undef JEMALLOC_PROF_LIBUNWIND
/* Use libgcc for profile backtracing if defined. */
#undef JEMALLOC_PROF_LIBGCC
/* Use gcc intrinsics for profile backtracing if defined. */
#undef JEMALLOC_PROF_GCC
/*
* JEMALLOC_TCACHE enables a thread-specific caching layer for small objects.
* This makes it possible to allocate/deallocate objects without any locking
* when the cache is in the steady state.
*/
#undef JEMALLOC_TCACHE
/*
* JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage
* segment (DSS).
*/
#undef JEMALLOC_DSS
/* Support memory filling (junk/zero/quarantine/redzone). */
#undef JEMALLOC_FILL
/* Support utrace(2)-based tracing. */
#undef JEMALLOC_UTRACE
/* Support Valgrind. */
#undef JEMALLOC_VALGRIND
/* Support optional abort() on OOM. */
#undef JEMALLOC_XMALLOC
/* Support lazy locking (avoid locking unless a second thread is launched). */
#undef JEMALLOC_LAZY_LOCK
/* Minimum size class to support is 2^LG_TINY_MIN bytes. */
#undef LG_TINY_MIN
/*
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
* classes).
*/
#undef LG_QUANTUM
/* One page is 2^LG_PAGE bytes. */
#undef LG_PAGE
/*
* If defined, adjacent virtual memory mappings with identical attributes
* automatically coalesce, and they fragment when changes are made to subranges.
* This is the normal order of things for mmap()/munmap(), but on Windows
* VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
* mappings do *not* coalesce/fragment.
*/
#undef JEMALLOC_MAPS_COALESCE
/*
* If defined, use munmap() to unmap freed chunks, rather than storing them for
* later reuse. This is disabled by default on Linux because common sequences
* of mmap()/munmap() calls will cause virtual memory map holes.
*/
#undef JEMALLOC_MUNMAP
/* TLS is used to map arenas and magazine caches to threads. */
#undef JEMALLOC_TLS
/*
* Used to mark unreachable code to quiet "end of non-void" compiler warnings.
* Don't use this directly; instead use unreachable() from util.h
*/
#undef JEMALLOC_INTERNAL_UNREACHABLE
/*
* ffs*() functions to use for bitmapping. Don't use these directly; instead,
* use ffs_*() from util.h.
*/
#undef JEMALLOC_INTERNAL_FFSLL
#undef JEMALLOC_INTERNAL_FFSL
#undef JEMALLOC_INTERNAL_FFS
/*
* JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside
* within jemalloc-owned chunks before dereferencing them.
*/
#undef JEMALLOC_IVSALLOC
/*
* If defined, explicitly attempt to more uniformly distribute large allocation
* pointer alignments across all cache indices.
*/
#undef JEMALLOC_CACHE_OBLIVIOUS
/*
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
*/
#undef JEMALLOC_ZONE
#undef JEMALLOC_ZONE_VERSION
/*
* Methods for determining whether the OS overcommits.
* JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
* /proc/sys/vm.overcommit_memory file.
* JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
*/
#undef JEMALLOC_SYSCTL_VM_OVERCOMMIT
#undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
/* Defined if madvise(2) is available. */
#undef JEMALLOC_HAVE_MADVISE
/*
* Methods for purging unused pages differ between operating systems.
*
* madvise(..., MADV_FREE) : This marks pages as being unused, such that they
* will be discarded rather than swapped out.
* madvise(..., MADV_DONTNEED) : This immediately discards pages, such that
* new pages will be demand-zeroed if the
* address region is later touched.
*/
#undef JEMALLOC_PURGE_MADVISE_FREE
#undef JEMALLOC_PURGE_MADVISE_DONTNEED
/*
* Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
* arguments to madvise(2).
*/
#undef JEMALLOC_THP
/* Define if operating system has alloca.h header. */
#undef JEMALLOC_HAS_ALLOCA_H
/* C99 restrict keyword supported. */
#undef JEMALLOC_HAS_RESTRICT
/* For use by hash code. */
#undef JEMALLOC_BIG_ENDIAN
/* sizeof(int) == 2^LG_SIZEOF_INT. */
#undef LG_SIZEOF_INT
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
#undef LG_SIZEOF_LONG
/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
#undef LG_SIZEOF_LONG_LONG
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
#undef LG_SIZEOF_INTMAX_T
/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
#undef JEMALLOC_GLIBC_MALLOC_HOOK
/* glibc memalign hook. */
#undef JEMALLOC_GLIBC_MEMALIGN_HOOK
/* Adaptive mutex support in pthreads. */
#undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
/*
* If defined, jemalloc symbols are not exported (doesn't work when
* JEMALLOC_PREFIX is not defined).
*/
#undef JEMALLOC_EXPORT
/* config.malloc_conf options string. */
#undef JEMALLOC_CONFIG_MALLOC_CONF
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
/*
* JEMALLOC_ALWAYS_INLINE and JEMALLOC_INLINE are used within header files for
* functions that are static inline functions if inlining is enabled, and
* single-definition library-private functions if inlining is disabled.
*
* JEMALLOC_ALWAYS_INLINE_C and JEMALLOC_INLINE_C are for use in .c files, in
* which case the denoted functions are always static, regardless of whether
* inlining is enabled.
*/
#if defined(JEMALLOC_DEBUG) || defined(JEMALLOC_CODE_COVERAGE)
/* Disable inlining to make debugging/profiling easier. */
# define JEMALLOC_ALWAYS_INLINE
# define JEMALLOC_ALWAYS_INLINE_C static
# define JEMALLOC_INLINE
# define JEMALLOC_INLINE_C static
# define inline
#else
# define JEMALLOC_ENABLE_INLINE
# ifdef JEMALLOC_HAVE_ATTR
# define JEMALLOC_ALWAYS_INLINE \
static inline JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline)
# define JEMALLOC_ALWAYS_INLINE_C \
static inline JEMALLOC_ATTR(always_inline)
# else
# define JEMALLOC_ALWAYS_INLINE static inline
# define JEMALLOC_ALWAYS_INLINE_C static inline
# endif
# define JEMALLOC_INLINE static inline
# define JEMALLOC_INLINE_C static inline
# ifdef _MSC_VER
# define inline _inline
# endif
#endif
#ifdef JEMALLOC_CC_SILENCE
# define UNUSED JEMALLOC_ATTR(unused)
#else
# define UNUSED
#endif
#define ZU(z) ((size_t)z)
#define ZI(z) ((ssize_t)z)
#define QU(q) ((uint64_t)q)
#define QI(q) ((int64_t)q)
#define KZU(z) ZU(z##ULL)
#define KZI(z) ZI(z##LL)
#define KQU(q) QU(q##ULL)
#define KQI(q) QI(q##LL)
#ifndef __DECONST
# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
#endif
#ifndef JEMALLOC_HAS_RESTRICT
# define restrict
#endif
...@@ -42,7 +42,7 @@ mb_write(void) ...@@ -42,7 +42,7 @@ mb_write(void)
: /* Inputs. */ : /* Inputs. */
: "memory" /* Clobbers. */ : "memory" /* Clobbers. */
); );
#else # else
/* /*
* This is hopefully enough to keep the compiler from reordering * This is hopefully enough to keep the compiler from reordering
* instructions around this one. * instructions around this one.
...@@ -52,7 +52,7 @@ mb_write(void) ...@@ -52,7 +52,7 @@ mb_write(void)
: /* Inputs. */ : /* Inputs. */
: "memory" /* Clobbers. */ : "memory" /* Clobbers. */
); );
#endif # endif
} }
#elif (defined(__amd64__) || defined(__x86_64__)) #elif (defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE void JEMALLOC_INLINE void
...@@ -104,9 +104,9 @@ mb_write(void) ...@@ -104,9 +104,9 @@ mb_write(void)
{ {
malloc_mutex_t mtx; malloc_mutex_t mtx;
malloc_mutex_init(&mtx); malloc_mutex_init(&mtx, "mb", WITNESS_RANK_OMIT);
malloc_mutex_lock(&mtx); malloc_mutex_lock(TSDN_NULL, &mtx);
malloc_mutex_unlock(&mtx); malloc_mutex_unlock(TSDN_NULL, &mtx);
} }
#endif #endif
#endif #endif
......
...@@ -5,18 +5,25 @@ typedef struct malloc_mutex_s malloc_mutex_t; ...@@ -5,18 +5,25 @@ typedef struct malloc_mutex_s malloc_mutex_t;
#ifdef _WIN32 #ifdef _WIN32
# define MALLOC_MUTEX_INITIALIZER # define MALLOC_MUTEX_INITIALIZER
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
# define MALLOC_MUTEX_INITIALIZER \
{OS_UNFAIR_LOCK_INIT, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
#elif (defined(JEMALLOC_OSSPIN)) #elif (defined(JEMALLOC_OSSPIN))
# define MALLOC_MUTEX_INITIALIZER {0} # define MALLOC_MUTEX_INITIALIZER {0, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
#elif (defined(JEMALLOC_MUTEX_INIT_CB)) #elif (defined(JEMALLOC_MUTEX_INIT_CB))
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, NULL} # define MALLOC_MUTEX_INITIALIZER \
{PTHREAD_MUTEX_INITIALIZER, NULL, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
#else #else
# if (defined(PTHREAD_MUTEX_ADAPTIVE_NP) && \ # if (defined(JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP) && \
defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)) defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP))
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP # define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP} # define MALLOC_MUTEX_INITIALIZER \
{PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP, \
WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
# else # else
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT # define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER} # define MALLOC_MUTEX_INITIALIZER \
{PTHREAD_MUTEX_INITIALIZER, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
# endif # endif
#endif #endif
...@@ -26,7 +33,13 @@ typedef struct malloc_mutex_s malloc_mutex_t; ...@@ -26,7 +33,13 @@ typedef struct malloc_mutex_s malloc_mutex_t;
struct malloc_mutex_s { struct malloc_mutex_s {
#ifdef _WIN32 #ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
SRWLOCK lock;
# else
CRITICAL_SECTION lock; CRITICAL_SECTION lock;
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
os_unfair_lock lock;
#elif (defined(JEMALLOC_OSSPIN)) #elif (defined(JEMALLOC_OSSPIN))
OSSpinLock lock; OSSpinLock lock;
#elif (defined(JEMALLOC_MUTEX_INIT_CB)) #elif (defined(JEMALLOC_MUTEX_INIT_CB))
...@@ -35,6 +48,7 @@ struct malloc_mutex_s { ...@@ -35,6 +48,7 @@ struct malloc_mutex_s {
#else #else
pthread_mutex_t lock; pthread_mutex_t lock;
#endif #endif
witness_t witness;
}; };
#endif /* JEMALLOC_H_STRUCTS */ #endif /* JEMALLOC_H_STRUCTS */
...@@ -48,44 +62,62 @@ extern bool isthreaded; ...@@ -48,44 +62,62 @@ extern bool isthreaded;
# define isthreaded true # define isthreaded true
#endif #endif
bool malloc_mutex_init(malloc_mutex_t *mutex); bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
void malloc_mutex_prefork(malloc_mutex_t *mutex); witness_rank_t rank);
void malloc_mutex_postfork_parent(malloc_mutex_t *mutex); void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex);
void malloc_mutex_postfork_child(malloc_mutex_t *mutex); void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex);
bool mutex_boot(void); void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex);
bool malloc_mutex_boot(void);
#endif /* JEMALLOC_H_EXTERNS */ #endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_INLINES #ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE #ifndef JEMALLOC_ENABLE_INLINE
void malloc_mutex_lock(malloc_mutex_t *mutex); void malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex);
void malloc_mutex_unlock(malloc_mutex_t *mutex); void malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex);
void malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex);
void malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex);
#endif #endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_)) #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
JEMALLOC_INLINE void JEMALLOC_INLINE void
malloc_mutex_lock(malloc_mutex_t *mutex) malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex)
{ {
if (isthreaded) { if (isthreaded) {
witness_assert_not_owner(tsdn, &mutex->witness);
#ifdef _WIN32 #ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
AcquireSRWLockExclusive(&mutex->lock);
# else
EnterCriticalSection(&mutex->lock); EnterCriticalSection(&mutex->lock);
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
os_unfair_lock_lock(&mutex->lock);
#elif (defined(JEMALLOC_OSSPIN)) #elif (defined(JEMALLOC_OSSPIN))
OSSpinLockLock(&mutex->lock); OSSpinLockLock(&mutex->lock);
#else #else
pthread_mutex_lock(&mutex->lock); pthread_mutex_lock(&mutex->lock);
#endif #endif
witness_lock(tsdn, &mutex->witness);
} }
} }
JEMALLOC_INLINE void JEMALLOC_INLINE void
malloc_mutex_unlock(malloc_mutex_t *mutex) malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex)
{ {
if (isthreaded) { if (isthreaded) {
witness_unlock(tsdn, &mutex->witness);
#ifdef _WIN32 #ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
ReleaseSRWLockExclusive(&mutex->lock);
# else
LeaveCriticalSection(&mutex->lock); LeaveCriticalSection(&mutex->lock);
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
os_unfair_lock_unlock(&mutex->lock);
#elif (defined(JEMALLOC_OSSPIN)) #elif (defined(JEMALLOC_OSSPIN))
OSSpinLockUnlock(&mutex->lock); OSSpinLockUnlock(&mutex->lock);
#else #else
...@@ -93,6 +125,22 @@ malloc_mutex_unlock(malloc_mutex_t *mutex) ...@@ -93,6 +125,22 @@ malloc_mutex_unlock(malloc_mutex_t *mutex)
#endif #endif
} }
} }
JEMALLOC_INLINE void
malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
if (isthreaded)
witness_assert_owner(tsdn, &mutex->witness);
}
JEMALLOC_INLINE void
malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
if (isthreaded)
witness_assert_not_owner(tsdn, &mutex->witness);
}
#endif #endif
#endif /* JEMALLOC_H_INLINES */ #endif /* JEMALLOC_H_INLINES */
......
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct nstime_s nstime_t;
/* Maximum supported number of seconds (~584 years). */
#define NSTIME_SEC_MAX KQU(18446744072)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct nstime_s {
uint64_t ns;
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void nstime_init(nstime_t *time, uint64_t ns);
void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec);
uint64_t nstime_ns(const nstime_t *time);
uint64_t nstime_sec(const nstime_t *time);
uint64_t nstime_nsec(const nstime_t *time);
void nstime_copy(nstime_t *time, const nstime_t *source);
int nstime_compare(const nstime_t *a, const nstime_t *b);
void nstime_add(nstime_t *time, const nstime_t *addend);
void nstime_subtract(nstime_t *time, const nstime_t *subtrahend);
void nstime_imultiply(nstime_t *time, uint64_t multiplier);
void nstime_idivide(nstime_t *time, uint64_t divisor);
uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor);
#ifdef JEMALLOC_JET
typedef bool (nstime_monotonic_t)(void);
extern nstime_monotonic_t *nstime_monotonic;
typedef bool (nstime_update_t)(nstime_t *);
extern nstime_update_t *nstime_update;
#else
bool nstime_monotonic(void);
bool nstime_update(nstime_t *time);
#endif
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void *pages_map(void *addr, size_t size, bool *commit);
void pages_unmap(void *addr, size_t size);
void *pages_trim(void *addr, size_t alloc_size, size_t leadsize,
size_t size, bool *commit);
bool pages_commit(void *addr, size_t size);
bool pages_decommit(void *addr, size_t size);
bool pages_purge(void *addr, size_t size);
bool pages_huge(void *addr, size_t size);
bool pages_nohuge(void *addr, size_t size);
void pages_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
/*
* A Pairing Heap implementation.
*
* "The Pairing Heap: A New Form of Self-Adjusting Heap"
* https://www.cs.cmu.edu/~sleator/papers/pairing-heaps.pdf
*
* With auxiliary twopass list, described in a follow on paper.
*
* "Pairing Heaps: Experiments and Analysis"
* http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf
*
*******************************************************************************
*/
#ifndef PH_H_
#define PH_H_
/* Node structure. */
#define phn(a_type) \
struct { \
a_type *phn_prev; \
a_type *phn_next; \
a_type *phn_lchild; \
}
/* Root structure. */
#define ph(a_type) \
struct { \
a_type *ph_root; \
}
/* Internal utility macros. */
#define phn_lchild_get(a_type, a_field, a_phn) \
(a_phn->a_field.phn_lchild)
#define phn_lchild_set(a_type, a_field, a_phn, a_lchild) do { \
a_phn->a_field.phn_lchild = a_lchild; \
} while (0)
#define phn_next_get(a_type, a_field, a_phn) \
(a_phn->a_field.phn_next)
#define phn_prev_set(a_type, a_field, a_phn, a_prev) do { \
a_phn->a_field.phn_prev = a_prev; \
} while (0)
#define phn_prev_get(a_type, a_field, a_phn) \
(a_phn->a_field.phn_prev)
#define phn_next_set(a_type, a_field, a_phn, a_next) do { \
a_phn->a_field.phn_next = a_next; \
} while (0)
#define phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do { \
a_type *phn0child; \
\
assert(a_phn0 != NULL); \
assert(a_phn1 != NULL); \
assert(a_cmp(a_phn0, a_phn1) <= 0); \
\
phn_prev_set(a_type, a_field, a_phn1, a_phn0); \
phn0child = phn_lchild_get(a_type, a_field, a_phn0); \
phn_next_set(a_type, a_field, a_phn1, phn0child); \
if (phn0child != NULL) \
phn_prev_set(a_type, a_field, phn0child, a_phn1); \
phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \
} while (0)
#define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \
if (a_phn0 == NULL) \
r_phn = a_phn1; \
else if (a_phn1 == NULL) \
r_phn = a_phn0; \
else if (a_cmp(a_phn0, a_phn1) < 0) { \
phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \
a_cmp); \
r_phn = a_phn0; \
} else { \
phn_merge_ordered(a_type, a_field, a_phn1, a_phn0, \
a_cmp); \
r_phn = a_phn1; \
} \
} while (0)
#define ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do { \
a_type *head = NULL; \
a_type *tail = NULL; \
a_type *phn0 = a_phn; \
a_type *phn1 = phn_next_get(a_type, a_field, phn0); \
\
/* \
* Multipass merge, wherein the first two elements of a FIFO \
* are repeatedly merged, and each result is appended to the \
* singly linked FIFO, until the FIFO contains only a single \
* element. We start with a sibling list but no reference to \
* its tail, so we do a single pass over the sibling list to \
* populate the FIFO. \
*/ \
if (phn1 != NULL) { \
a_type *phnrest = phn_next_get(a_type, a_field, phn1); \
if (phnrest != NULL) \
phn_prev_set(a_type, a_field, phnrest, NULL); \
phn_prev_set(a_type, a_field, phn0, NULL); \
phn_next_set(a_type, a_field, phn0, NULL); \
phn_prev_set(a_type, a_field, phn1, NULL); \
phn_next_set(a_type, a_field, phn1, NULL); \
phn_merge(a_type, a_field, phn0, phn1, a_cmp, phn0); \
head = tail = phn0; \
phn0 = phnrest; \
while (phn0 != NULL) { \
phn1 = phn_next_get(a_type, a_field, phn0); \
if (phn1 != NULL) { \
phnrest = phn_next_get(a_type, a_field, \
phn1); \
if (phnrest != NULL) { \
phn_prev_set(a_type, a_field, \
phnrest, NULL); \
} \
phn_prev_set(a_type, a_field, phn0, \
NULL); \
phn_next_set(a_type, a_field, phn0, \
NULL); \
phn_prev_set(a_type, a_field, phn1, \
NULL); \
phn_next_set(a_type, a_field, phn1, \
NULL); \
phn_merge(a_type, a_field, phn0, phn1, \
a_cmp, phn0); \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
phn0 = phnrest; \
} else { \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
phn0 = NULL; \
} \
} \
phn0 = head; \
phn1 = phn_next_get(a_type, a_field, phn0); \
if (phn1 != NULL) { \
while (true) { \
head = phn_next_get(a_type, a_field, \
phn1); \
assert(phn_prev_get(a_type, a_field, \
phn0) == NULL); \
phn_next_set(a_type, a_field, phn0, \
NULL); \
assert(phn_prev_get(a_type, a_field, \
phn1) == NULL); \
phn_next_set(a_type, a_field, phn1, \
NULL); \
phn_merge(a_type, a_field, phn0, phn1, \
a_cmp, phn0); \
if (head == NULL) \
break; \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
phn0 = head; \
phn1 = phn_next_get(a_type, a_field, \
phn0); \
} \
} \
} \
r_phn = phn0; \
} while (0)
#define ph_merge_aux(a_type, a_field, a_ph, a_cmp) do { \
a_type *phn = phn_next_get(a_type, a_field, a_ph->ph_root); \
if (phn != NULL) { \
phn_prev_set(a_type, a_field, a_ph->ph_root, NULL); \
phn_next_set(a_type, a_field, a_ph->ph_root, NULL); \
phn_prev_set(a_type, a_field, phn, NULL); \
ph_merge_siblings(a_type, a_field, phn, a_cmp, phn); \
assert(phn_next_get(a_type, a_field, phn) == NULL); \
phn_merge(a_type, a_field, a_ph->ph_root, phn, a_cmp, \
a_ph->ph_root); \
} \
} while (0)
#define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \
a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \
if (lchild == NULL) \
r_phn = NULL; \
else { \
ph_merge_siblings(a_type, a_field, lchild, a_cmp, \
r_phn); \
} \
} while (0)
/*
* The ph_proto() macro generates function prototypes that correspond to the
* functions generated by an equivalently parameterized call to ph_gen().
*/
#define ph_proto(a_attr, a_prefix, a_ph_type, a_type) \
a_attr void a_prefix##new(a_ph_type *ph); \
a_attr bool a_prefix##empty(a_ph_type *ph); \
a_attr a_type *a_prefix##first(a_ph_type *ph); \
a_attr void a_prefix##insert(a_ph_type *ph, a_type *phn); \
a_attr a_type *a_prefix##remove_first(a_ph_type *ph); \
a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn);
/*
* The ph_gen() macro generates a type-specific pairing heap implementation,
* based on the above cpp macros.
*/
#define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \
a_attr void \
a_prefix##new(a_ph_type *ph) \
{ \
\
memset(ph, 0, sizeof(ph(a_type))); \
} \
a_attr bool \
a_prefix##empty(a_ph_type *ph) \
{ \
\
return (ph->ph_root == NULL); \
} \
a_attr a_type * \
a_prefix##first(a_ph_type *ph) \
{ \
\
if (ph->ph_root == NULL) \
return (NULL); \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
return (ph->ph_root); \
} \
a_attr void \
a_prefix##insert(a_ph_type *ph, a_type *phn) \
{ \
\
memset(&phn->a_field, 0, sizeof(phn(a_type))); \
\
/* \
* Treat the root as an aux list during insertion, and lazily \
* merge during a_prefix##remove_first(). For elements that \
* are inserted, then removed via a_prefix##remove() before the \
* aux list is ever processed, this makes insert/remove \
* constant-time, whereas eager merging would make insert \
* O(log n). \
*/ \
if (ph->ph_root == NULL) \
ph->ph_root = phn; \
else { \
phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \
a_field, ph->ph_root)); \
if (phn_next_get(a_type, a_field, ph->ph_root) != \
NULL) { \
phn_prev_set(a_type, a_field, \
phn_next_get(a_type, a_field, ph->ph_root), \
phn); \
} \
phn_prev_set(a_type, a_field, phn, ph->ph_root); \
phn_next_set(a_type, a_field, ph->ph_root, phn); \
} \
} \
a_attr a_type * \
a_prefix##remove_first(a_ph_type *ph) \
{ \
a_type *ret; \
\
if (ph->ph_root == NULL) \
return (NULL); \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
\
ret = ph->ph_root; \
\
ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \
ph->ph_root); \
\
return (ret); \
} \
a_attr void \
a_prefix##remove(a_ph_type *ph, a_type *phn) \
{ \
a_type *replace, *parent; \
\
/* \
* We can delete from aux list without merging it, but we need \
* to merge if we are dealing with the root node. \
*/ \
if (ph->ph_root == phn) { \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
if (ph->ph_root == phn) { \
ph_merge_children(a_type, a_field, ph->ph_root, \
a_cmp, ph->ph_root); \
return; \
} \
} \
\
/* Get parent (if phn is leftmost child) before mutating. */ \
if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \
if (phn_lchild_get(a_type, a_field, parent) != phn) \
parent = NULL; \
} \
/* Find a possible replacement node, and link to parent. */ \
ph_merge_children(a_type, a_field, phn, a_cmp, replace); \
/* Set next/prev for sibling linked list. */ \
if (replace != NULL) { \
if (parent != NULL) { \
phn_prev_set(a_type, a_field, replace, parent); \
phn_lchild_set(a_type, a_field, parent, \
replace); \
} else { \
phn_prev_set(a_type, a_field, replace, \
phn_prev_get(a_type, a_field, phn)); \
if (phn_prev_get(a_type, a_field, phn) != \
NULL) { \
phn_next_set(a_type, a_field, \
phn_prev_get(a_type, a_field, phn), \
replace); \
} \
} \
phn_next_set(a_type, a_field, replace, \
phn_next_get(a_type, a_field, phn)); \
if (phn_next_get(a_type, a_field, phn) != NULL) { \
phn_prev_set(a_type, a_field, \
phn_next_get(a_type, a_field, phn), \
replace); \
} \
} else { \
if (parent != NULL) { \
a_type *next = phn_next_get(a_type, a_field, \
phn); \
phn_lchild_set(a_type, a_field, parent, next); \
if (next != NULL) { \
phn_prev_set(a_type, a_field, next, \
parent); \
} \
} else { \
assert(phn_prev_get(a_type, a_field, phn) != \
NULL); \
phn_next_set(a_type, a_field, \
phn_prev_get(a_type, a_field, phn), \
phn_next_get(a_type, a_field, phn)); \
} \
if (phn_next_get(a_type, a_field, phn) != NULL) { \
phn_prev_set(a_type, a_field, \
phn_next_get(a_type, a_field, phn), \
phn_prev_get(a_type, a_field, phn)); \
} \
} \
}
#endif /* PH_H_ */
#define a0calloc JEMALLOC_N(a0calloc)
#define a0free JEMALLOC_N(a0free)
#define a0malloc JEMALLOC_N(a0malloc)
#define arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small)
#define arena_bin_index JEMALLOC_N(arena_bin_index)
#define arena_bin_info JEMALLOC_N(arena_bin_info)
#define arena_boot JEMALLOC_N(arena_boot)
#define arena_dalloc JEMALLOC_N(arena_dalloc)
#define arena_dalloc_bin JEMALLOC_N(arena_dalloc_bin)
#define arena_dalloc_bin_locked JEMALLOC_N(arena_dalloc_bin_locked)
#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
#define arena_dalloc_large JEMALLOC_N(arena_dalloc_large)
#define arena_dalloc_large_locked JEMALLOC_N(arena_dalloc_large_locked)
#define arena_dalloc_small JEMALLOC_N(arena_dalloc_small)
#define arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get)
#define arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set)
#define arena_malloc JEMALLOC_N(arena_malloc)
#define arena_malloc_large JEMALLOC_N(arena_malloc_large)
#define arena_malloc_small JEMALLOC_N(arena_malloc_small)
#define arena_mapbits_allocated_get JEMALLOC_N(arena_mapbits_allocated_get)
#define arena_mapbits_binind_get JEMALLOC_N(arena_mapbits_binind_get)
#define arena_mapbits_dirty_get JEMALLOC_N(arena_mapbits_dirty_get)
#define arena_mapbits_get JEMALLOC_N(arena_mapbits_get)
#define arena_mapbits_large_binind_set JEMALLOC_N(arena_mapbits_large_binind_set)
#define arena_mapbits_large_get JEMALLOC_N(arena_mapbits_large_get)
#define arena_mapbits_large_set JEMALLOC_N(arena_mapbits_large_set)
#define arena_mapbits_large_size_get JEMALLOC_N(arena_mapbits_large_size_get)
#define arena_mapbits_small_runind_get JEMALLOC_N(arena_mapbits_small_runind_get)
#define arena_mapbits_small_set JEMALLOC_N(arena_mapbits_small_set)
#define arena_mapbits_unallocated_set JEMALLOC_N(arena_mapbits_unallocated_set)
#define arena_mapbits_unallocated_size_get JEMALLOC_N(arena_mapbits_unallocated_size_get)
#define arena_mapbits_unallocated_size_set JEMALLOC_N(arena_mapbits_unallocated_size_set)
#define arena_mapbits_unzeroed_get JEMALLOC_N(arena_mapbits_unzeroed_get)
#define arena_mapbits_unzeroed_set JEMALLOC_N(arena_mapbits_unzeroed_set)
#define arena_mapbitsp_get JEMALLOC_N(arena_mapbitsp_get)
#define arena_mapp_get JEMALLOC_N(arena_mapp_get)
#define arena_maxclass JEMALLOC_N(arena_maxclass)
#define arena_new JEMALLOC_N(arena_new)
#define arena_palloc JEMALLOC_N(arena_palloc)
#define arena_postfork_child JEMALLOC_N(arena_postfork_child)
#define arena_postfork_parent JEMALLOC_N(arena_postfork_parent)
#define arena_prefork JEMALLOC_N(arena_prefork)
#define arena_prof_accum JEMALLOC_N(arena_prof_accum)
#define arena_prof_ctx_get JEMALLOC_N(arena_prof_ctx_get)
#define arena_prof_ctx_set JEMALLOC_N(arena_prof_ctx_set)
#define arena_prof_promoted JEMALLOC_N(arena_prof_promoted)
#define arena_ptr_small_binind_get JEMALLOC_N(arena_ptr_small_binind_get)
#define arena_purge_all JEMALLOC_N(arena_purge_all)
#define arena_ralloc JEMALLOC_N(arena_ralloc)
#define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move)
#define arena_run_regind JEMALLOC_N(arena_run_regind)
#define arena_salloc JEMALLOC_N(arena_salloc)
#define arena_stats_merge JEMALLOC_N(arena_stats_merge)
#define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small)
#define arenas JEMALLOC_N(arenas)
#define arenas_booted JEMALLOC_N(arenas_booted)
#define arenas_cleanup JEMALLOC_N(arenas_cleanup)
#define arenas_extend JEMALLOC_N(arenas_extend)
#define arenas_initialized JEMALLOC_N(arenas_initialized)
#define arenas_lock JEMALLOC_N(arenas_lock)
#define arenas_tls JEMALLOC_N(arenas_tls)
#define arenas_tsd JEMALLOC_N(arenas_tsd)
#define arenas_tsd_boot JEMALLOC_N(arenas_tsd_boot)
#define arenas_tsd_cleanup_wrapper JEMALLOC_N(arenas_tsd_cleanup_wrapper)
#define arenas_tsd_get JEMALLOC_N(arenas_tsd_get)
#define arenas_tsd_set JEMALLOC_N(arenas_tsd_set)
#define atomic_add_u JEMALLOC_N(atomic_add_u)
#define atomic_add_uint32 JEMALLOC_N(atomic_add_uint32)
#define atomic_add_uint64 JEMALLOC_N(atomic_add_uint64)
#define atomic_add_z JEMALLOC_N(atomic_add_z)
#define atomic_sub_u JEMALLOC_N(atomic_sub_u)
#define atomic_sub_uint32 JEMALLOC_N(atomic_sub_uint32)
#define atomic_sub_uint64 JEMALLOC_N(atomic_sub_uint64)
#define atomic_sub_z JEMALLOC_N(atomic_sub_z)
#define base_alloc JEMALLOC_N(base_alloc)
#define base_boot JEMALLOC_N(base_boot)
#define base_calloc JEMALLOC_N(base_calloc)
#define base_node_alloc JEMALLOC_N(base_node_alloc)
#define base_node_dealloc JEMALLOC_N(base_node_dealloc)
#define base_postfork_child JEMALLOC_N(base_postfork_child)
#define base_postfork_parent JEMALLOC_N(base_postfork_parent)
#define base_prefork JEMALLOC_N(base_prefork)
#define bitmap_full JEMALLOC_N(bitmap_full)
#define bitmap_get JEMALLOC_N(bitmap_get)
#define bitmap_info_init JEMALLOC_N(bitmap_info_init)
#define bitmap_info_ngroups JEMALLOC_N(bitmap_info_ngroups)
#define bitmap_init JEMALLOC_N(bitmap_init)
#define bitmap_set JEMALLOC_N(bitmap_set)
#define bitmap_sfu JEMALLOC_N(bitmap_sfu)
#define bitmap_size JEMALLOC_N(bitmap_size)
#define bitmap_unset JEMALLOC_N(bitmap_unset)
#define bt_init JEMALLOC_N(bt_init)
#define buferror JEMALLOC_N(buferror)
#define choose_arena JEMALLOC_N(choose_arena)
#define choose_arena_hard JEMALLOC_N(choose_arena_hard)
#define chunk_alloc JEMALLOC_N(chunk_alloc)
#define chunk_alloc_dss JEMALLOC_N(chunk_alloc_dss)
#define chunk_alloc_mmap JEMALLOC_N(chunk_alloc_mmap)
#define chunk_boot JEMALLOC_N(chunk_boot)
#define chunk_dealloc JEMALLOC_N(chunk_dealloc)
#define chunk_dealloc_mmap JEMALLOC_N(chunk_dealloc_mmap)
#define chunk_dss_boot JEMALLOC_N(chunk_dss_boot)
#define chunk_dss_postfork_child JEMALLOC_N(chunk_dss_postfork_child)
#define chunk_dss_postfork_parent JEMALLOC_N(chunk_dss_postfork_parent)
#define chunk_dss_prec_get JEMALLOC_N(chunk_dss_prec_get)
#define chunk_dss_prec_set JEMALLOC_N(chunk_dss_prec_set)
#define chunk_dss_prefork JEMALLOC_N(chunk_dss_prefork)
#define chunk_in_dss JEMALLOC_N(chunk_in_dss)
#define chunk_npages JEMALLOC_N(chunk_npages)
#define chunk_postfork_child JEMALLOC_N(chunk_postfork_child)
#define chunk_postfork_parent JEMALLOC_N(chunk_postfork_parent)
#define chunk_prefork JEMALLOC_N(chunk_prefork)
#define chunk_unmap JEMALLOC_N(chunk_unmap)
#define chunks_mtx JEMALLOC_N(chunks_mtx)
#define chunks_rtree JEMALLOC_N(chunks_rtree)
#define chunksize JEMALLOC_N(chunksize)
#define chunksize_mask JEMALLOC_N(chunksize_mask)
#define ckh_bucket_search JEMALLOC_N(ckh_bucket_search)
#define ckh_count JEMALLOC_N(ckh_count)
#define ckh_delete JEMALLOC_N(ckh_delete)
#define ckh_evict_reloc_insert JEMALLOC_N(ckh_evict_reloc_insert)
#define ckh_insert JEMALLOC_N(ckh_insert)
#define ckh_isearch JEMALLOC_N(ckh_isearch)
#define ckh_iter JEMALLOC_N(ckh_iter)
#define ckh_new JEMALLOC_N(ckh_new)
#define ckh_pointer_hash JEMALLOC_N(ckh_pointer_hash)
#define ckh_pointer_keycomp JEMALLOC_N(ckh_pointer_keycomp)
#define ckh_rebuild JEMALLOC_N(ckh_rebuild)
#define ckh_remove JEMALLOC_N(ckh_remove)
#define ckh_search JEMALLOC_N(ckh_search)
#define ckh_string_hash JEMALLOC_N(ckh_string_hash)
#define ckh_string_keycomp JEMALLOC_N(ckh_string_keycomp)
#define ckh_try_bucket_insert JEMALLOC_N(ckh_try_bucket_insert)
#define ckh_try_insert JEMALLOC_N(ckh_try_insert)
#define ctl_boot JEMALLOC_N(ctl_boot)
#define ctl_bymib JEMALLOC_N(ctl_bymib)
#define ctl_byname JEMALLOC_N(ctl_byname)
#define ctl_nametomib JEMALLOC_N(ctl_nametomib)
#define ctl_postfork_child JEMALLOC_N(ctl_postfork_child)
#define ctl_postfork_parent JEMALLOC_N(ctl_postfork_parent)
#define ctl_prefork JEMALLOC_N(ctl_prefork)
#define dss_prec_names JEMALLOC_N(dss_prec_names)
#define extent_tree_ad_first JEMALLOC_N(extent_tree_ad_first)
#define extent_tree_ad_insert JEMALLOC_N(extent_tree_ad_insert)
#define extent_tree_ad_iter JEMALLOC_N(extent_tree_ad_iter)
#define extent_tree_ad_iter_recurse JEMALLOC_N(extent_tree_ad_iter_recurse)
#define extent_tree_ad_iter_start JEMALLOC_N(extent_tree_ad_iter_start)
#define extent_tree_ad_last JEMALLOC_N(extent_tree_ad_last)
#define extent_tree_ad_new JEMALLOC_N(extent_tree_ad_new)
#define extent_tree_ad_next JEMALLOC_N(extent_tree_ad_next)
#define extent_tree_ad_nsearch JEMALLOC_N(extent_tree_ad_nsearch)
#define extent_tree_ad_prev JEMALLOC_N(extent_tree_ad_prev)
#define extent_tree_ad_psearch JEMALLOC_N(extent_tree_ad_psearch)
#define extent_tree_ad_remove JEMALLOC_N(extent_tree_ad_remove)
#define extent_tree_ad_reverse_iter JEMALLOC_N(extent_tree_ad_reverse_iter)
#define extent_tree_ad_reverse_iter_recurse JEMALLOC_N(extent_tree_ad_reverse_iter_recurse)
#define extent_tree_ad_reverse_iter_start JEMALLOC_N(extent_tree_ad_reverse_iter_start)
#define extent_tree_ad_search JEMALLOC_N(extent_tree_ad_search)
#define extent_tree_szad_first JEMALLOC_N(extent_tree_szad_first)
#define extent_tree_szad_insert JEMALLOC_N(extent_tree_szad_insert)
#define extent_tree_szad_iter JEMALLOC_N(extent_tree_szad_iter)
#define extent_tree_szad_iter_recurse JEMALLOC_N(extent_tree_szad_iter_recurse)
#define extent_tree_szad_iter_start JEMALLOC_N(extent_tree_szad_iter_start)
#define extent_tree_szad_last JEMALLOC_N(extent_tree_szad_last)
#define extent_tree_szad_new JEMALLOC_N(extent_tree_szad_new)
#define extent_tree_szad_next JEMALLOC_N(extent_tree_szad_next)
#define extent_tree_szad_nsearch JEMALLOC_N(extent_tree_szad_nsearch)
#define extent_tree_szad_prev JEMALLOC_N(extent_tree_szad_prev)
#define extent_tree_szad_psearch JEMALLOC_N(extent_tree_szad_psearch)
#define extent_tree_szad_remove JEMALLOC_N(extent_tree_szad_remove)
#define extent_tree_szad_reverse_iter JEMALLOC_N(extent_tree_szad_reverse_iter)
#define extent_tree_szad_reverse_iter_recurse JEMALLOC_N(extent_tree_szad_reverse_iter_recurse)
#define extent_tree_szad_reverse_iter_start JEMALLOC_N(extent_tree_szad_reverse_iter_start)
#define extent_tree_szad_search JEMALLOC_N(extent_tree_szad_search)
#define get_errno JEMALLOC_N(get_errno)
#define hash JEMALLOC_N(hash)
#define huge_allocated JEMALLOC_N(huge_allocated)
#define huge_boot JEMALLOC_N(huge_boot)
#define huge_dalloc JEMALLOC_N(huge_dalloc)
#define huge_malloc JEMALLOC_N(huge_malloc)
#define huge_mtx JEMALLOC_N(huge_mtx)
#define huge_ndalloc JEMALLOC_N(huge_ndalloc)
#define huge_nmalloc JEMALLOC_N(huge_nmalloc)
#define huge_palloc JEMALLOC_N(huge_palloc)
#define huge_postfork_child JEMALLOC_N(huge_postfork_child)
#define huge_postfork_parent JEMALLOC_N(huge_postfork_parent)
#define huge_prefork JEMALLOC_N(huge_prefork)
#define huge_prof_ctx_get JEMALLOC_N(huge_prof_ctx_get)
#define huge_prof_ctx_set JEMALLOC_N(huge_prof_ctx_set)
#define huge_ralloc JEMALLOC_N(huge_ralloc)
#define huge_ralloc_no_move JEMALLOC_N(huge_ralloc_no_move)
#define huge_salloc JEMALLOC_N(huge_salloc)
#define iallocm JEMALLOC_N(iallocm)
#define icalloc JEMALLOC_N(icalloc)
#define icallocx JEMALLOC_N(icallocx)
#define idalloc JEMALLOC_N(idalloc)
#define idallocx JEMALLOC_N(idallocx)
#define imalloc JEMALLOC_N(imalloc)
#define imallocx JEMALLOC_N(imallocx)
#define ipalloc JEMALLOC_N(ipalloc)
#define ipallocx JEMALLOC_N(ipallocx)
#define iqalloc JEMALLOC_N(iqalloc)
#define iqallocx JEMALLOC_N(iqallocx)
#define iralloc JEMALLOC_N(iralloc)
#define irallocx JEMALLOC_N(irallocx)
#define isalloc JEMALLOC_N(isalloc)
#define isthreaded JEMALLOC_N(isthreaded)
#define ivsalloc JEMALLOC_N(ivsalloc)
#define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child)
#define jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent)
#define jemalloc_prefork JEMALLOC_N(jemalloc_prefork)
#define malloc_cprintf JEMALLOC_N(malloc_cprintf)
#define malloc_mutex_init JEMALLOC_N(malloc_mutex_init)
#define malloc_mutex_lock JEMALLOC_N(malloc_mutex_lock)
#define malloc_mutex_postfork_child JEMALLOC_N(malloc_mutex_postfork_child)
#define malloc_mutex_postfork_parent JEMALLOC_N(malloc_mutex_postfork_parent)
#define malloc_mutex_prefork JEMALLOC_N(malloc_mutex_prefork)
#define malloc_mutex_unlock JEMALLOC_N(malloc_mutex_unlock)
#define malloc_printf JEMALLOC_N(malloc_printf)
#define malloc_snprintf JEMALLOC_N(malloc_snprintf)
#define malloc_strtoumax JEMALLOC_N(malloc_strtoumax)
#define malloc_tsd_boot JEMALLOC_N(malloc_tsd_boot)
#define malloc_tsd_cleanup_register JEMALLOC_N(malloc_tsd_cleanup_register)
#define malloc_tsd_dalloc JEMALLOC_N(malloc_tsd_dalloc)
#define malloc_tsd_malloc JEMALLOC_N(malloc_tsd_malloc)
#define malloc_tsd_no_cleanup JEMALLOC_N(malloc_tsd_no_cleanup)
#define malloc_vcprintf JEMALLOC_N(malloc_vcprintf)
#define malloc_vsnprintf JEMALLOC_N(malloc_vsnprintf)
#define malloc_write JEMALLOC_N(malloc_write)
#define map_bias JEMALLOC_N(map_bias)
#define mb_write JEMALLOC_N(mb_write)
#define mutex_boot JEMALLOC_N(mutex_boot)
#define narenas_auto JEMALLOC_N(narenas_auto)
#define narenas_total JEMALLOC_N(narenas_total)
#define narenas_total_get JEMALLOC_N(narenas_total_get)
#define ncpus JEMALLOC_N(ncpus)
#define nhbins JEMALLOC_N(nhbins)
#define opt_abort JEMALLOC_N(opt_abort)
#define opt_junk JEMALLOC_N(opt_junk)
#define opt_lg_chunk JEMALLOC_N(opt_lg_chunk)
#define opt_lg_dirty_mult JEMALLOC_N(opt_lg_dirty_mult)
#define opt_lg_prof_interval JEMALLOC_N(opt_lg_prof_interval)
#define opt_lg_prof_sample JEMALLOC_N(opt_lg_prof_sample)
#define opt_lg_tcache_max JEMALLOC_N(opt_lg_tcache_max)
#define opt_narenas JEMALLOC_N(opt_narenas)
#define opt_prof JEMALLOC_N(opt_prof)
#define opt_prof_accum JEMALLOC_N(opt_prof_accum)
#define opt_prof_active JEMALLOC_N(opt_prof_active)
#define opt_prof_final JEMALLOC_N(opt_prof_final)
#define opt_prof_gdump JEMALLOC_N(opt_prof_gdump)
#define opt_prof_leak JEMALLOC_N(opt_prof_leak)
#define opt_prof_prefix JEMALLOC_N(opt_prof_prefix)
#define opt_quarantine JEMALLOC_N(opt_quarantine)
#define opt_redzone JEMALLOC_N(opt_redzone)
#define opt_stats_print JEMALLOC_N(opt_stats_print)
#define opt_tcache JEMALLOC_N(opt_tcache)
#define opt_utrace JEMALLOC_N(opt_utrace)
#define opt_valgrind JEMALLOC_N(opt_valgrind)
#define opt_xmalloc JEMALLOC_N(opt_xmalloc)
#define opt_zero JEMALLOC_N(opt_zero)
#define p2rz JEMALLOC_N(p2rz)
#define pages_purge JEMALLOC_N(pages_purge)
#define pow2_ceil JEMALLOC_N(pow2_ceil)
#define prof_backtrace JEMALLOC_N(prof_backtrace)
#define prof_boot0 JEMALLOC_N(prof_boot0)
#define prof_boot1 JEMALLOC_N(prof_boot1)
#define prof_boot2 JEMALLOC_N(prof_boot2)
#define prof_ctx_get JEMALLOC_N(prof_ctx_get)
#define prof_ctx_set JEMALLOC_N(prof_ctx_set)
#define prof_free JEMALLOC_N(prof_free)
#define prof_gdump JEMALLOC_N(prof_gdump)
#define prof_idump JEMALLOC_N(prof_idump)
#define prof_interval JEMALLOC_N(prof_interval)
#define prof_lookup JEMALLOC_N(prof_lookup)
#define prof_malloc JEMALLOC_N(prof_malloc)
#define prof_mdump JEMALLOC_N(prof_mdump)
#define prof_postfork_child JEMALLOC_N(prof_postfork_child)
#define prof_postfork_parent JEMALLOC_N(prof_postfork_parent)
#define prof_prefork JEMALLOC_N(prof_prefork)
#define prof_promote JEMALLOC_N(prof_promote)
#define prof_realloc JEMALLOC_N(prof_realloc)
#define prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update)
#define prof_sample_threshold_update JEMALLOC_N(prof_sample_threshold_update)
#define prof_tdata_booted JEMALLOC_N(prof_tdata_booted)
#define prof_tdata_cleanup JEMALLOC_N(prof_tdata_cleanup)
#define prof_tdata_get JEMALLOC_N(prof_tdata_get)
#define prof_tdata_init JEMALLOC_N(prof_tdata_init)
#define prof_tdata_initialized JEMALLOC_N(prof_tdata_initialized)
#define prof_tdata_tls JEMALLOC_N(prof_tdata_tls)
#define prof_tdata_tsd JEMALLOC_N(prof_tdata_tsd)
#define prof_tdata_tsd_boot JEMALLOC_N(prof_tdata_tsd_boot)
#define prof_tdata_tsd_cleanup_wrapper JEMALLOC_N(prof_tdata_tsd_cleanup_wrapper)
#define prof_tdata_tsd_get JEMALLOC_N(prof_tdata_tsd_get)
#define prof_tdata_tsd_set JEMALLOC_N(prof_tdata_tsd_set)
#define quarantine JEMALLOC_N(quarantine)
#define quarantine_boot JEMALLOC_N(quarantine_boot)
#define quarantine_tsd_boot JEMALLOC_N(quarantine_tsd_boot)
#define quarantine_tsd_cleanup_wrapper JEMALLOC_N(quarantine_tsd_cleanup_wrapper)
#define quarantine_tsd_get JEMALLOC_N(quarantine_tsd_get)
#define quarantine_tsd_set JEMALLOC_N(quarantine_tsd_set)
#define register_zone JEMALLOC_N(register_zone)
#define rtree_get JEMALLOC_N(rtree_get)
#define rtree_get_locked JEMALLOC_N(rtree_get_locked)
#define rtree_new JEMALLOC_N(rtree_new)
#define rtree_postfork_child JEMALLOC_N(rtree_postfork_child)
#define rtree_postfork_parent JEMALLOC_N(rtree_postfork_parent)
#define rtree_prefork JEMALLOC_N(rtree_prefork)
#define rtree_set JEMALLOC_N(rtree_set)
#define s2u JEMALLOC_N(s2u)
#define sa2u JEMALLOC_N(sa2u)
#define set_errno JEMALLOC_N(set_errno)
#define stats_cactive JEMALLOC_N(stats_cactive)
#define stats_cactive_add JEMALLOC_N(stats_cactive_add)
#define stats_cactive_get JEMALLOC_N(stats_cactive_get)
#define stats_cactive_sub JEMALLOC_N(stats_cactive_sub)
#define stats_chunks JEMALLOC_N(stats_chunks)
#define stats_print JEMALLOC_N(stats_print)
#define tcache_alloc_easy JEMALLOC_N(tcache_alloc_easy)
#define tcache_alloc_large JEMALLOC_N(tcache_alloc_large)
#define tcache_alloc_small JEMALLOC_N(tcache_alloc_small)
#define tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard)
#define tcache_arena_associate JEMALLOC_N(tcache_arena_associate)
#define tcache_arena_dissociate JEMALLOC_N(tcache_arena_dissociate)
#define tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large)
#define tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small)
#define tcache_bin_info JEMALLOC_N(tcache_bin_info)
#define tcache_boot0 JEMALLOC_N(tcache_boot0)
#define tcache_boot1 JEMALLOC_N(tcache_boot1)
#define tcache_booted JEMALLOC_N(tcache_booted)
#define tcache_create JEMALLOC_N(tcache_create)
#define tcache_dalloc_large JEMALLOC_N(tcache_dalloc_large)
#define tcache_dalloc_small JEMALLOC_N(tcache_dalloc_small)
#define tcache_destroy JEMALLOC_N(tcache_destroy)
#define tcache_enabled_booted JEMALLOC_N(tcache_enabled_booted)
#define tcache_enabled_get JEMALLOC_N(tcache_enabled_get)
#define tcache_enabled_initialized JEMALLOC_N(tcache_enabled_initialized)
#define tcache_enabled_set JEMALLOC_N(tcache_enabled_set)
#define tcache_enabled_tls JEMALLOC_N(tcache_enabled_tls)
#define tcache_enabled_tsd JEMALLOC_N(tcache_enabled_tsd)
#define tcache_enabled_tsd_boot JEMALLOC_N(tcache_enabled_tsd_boot)
#define tcache_enabled_tsd_cleanup_wrapper JEMALLOC_N(tcache_enabled_tsd_cleanup_wrapper)
#define tcache_enabled_tsd_get JEMALLOC_N(tcache_enabled_tsd_get)
#define tcache_enabled_tsd_set JEMALLOC_N(tcache_enabled_tsd_set)
#define tcache_event JEMALLOC_N(tcache_event)
#define tcache_event_hard JEMALLOC_N(tcache_event_hard)
#define tcache_flush JEMALLOC_N(tcache_flush)
#define tcache_get JEMALLOC_N(tcache_get)
#define tcache_initialized JEMALLOC_N(tcache_initialized)
#define tcache_maxclass JEMALLOC_N(tcache_maxclass)
#define tcache_salloc JEMALLOC_N(tcache_salloc)
#define tcache_stats_merge JEMALLOC_N(tcache_stats_merge)
#define tcache_thread_cleanup JEMALLOC_N(tcache_thread_cleanup)
#define tcache_tls JEMALLOC_N(tcache_tls)
#define tcache_tsd JEMALLOC_N(tcache_tsd)
#define tcache_tsd_boot JEMALLOC_N(tcache_tsd_boot)
#define tcache_tsd_cleanup_wrapper JEMALLOC_N(tcache_tsd_cleanup_wrapper)
#define tcache_tsd_get JEMALLOC_N(tcache_tsd_get)
#define tcache_tsd_set JEMALLOC_N(tcache_tsd_set)
#define thread_allocated_booted JEMALLOC_N(thread_allocated_booted)
#define thread_allocated_initialized JEMALLOC_N(thread_allocated_initialized)
#define thread_allocated_tls JEMALLOC_N(thread_allocated_tls)
#define thread_allocated_tsd JEMALLOC_N(thread_allocated_tsd)
#define thread_allocated_tsd_boot JEMALLOC_N(thread_allocated_tsd_boot)
#define thread_allocated_tsd_cleanup_wrapper JEMALLOC_N(thread_allocated_tsd_cleanup_wrapper)
#define thread_allocated_tsd_get JEMALLOC_N(thread_allocated_tsd_get)
#define thread_allocated_tsd_set JEMALLOC_N(thread_allocated_tsd_set)
#define u2rz JEMALLOC_N(u2rz)
#!/bin/sh
for symbol in `cat $1` ; do
echo "#define ${symbol} JEMALLOC_N(${symbol})"
done
a0dalloc
a0get
a0malloc
arena_aalloc
arena_alloc_junk_small
arena_basic_stats_merge
arena_bin_index
arena_bin_info
arena_bitselm_get_const
arena_bitselm_get_mutable
arena_boot
arena_choose
arena_choose_hard
arena_choose_impl
arena_chunk_alloc_huge
arena_chunk_cache_maybe_insert
arena_chunk_cache_maybe_remove
arena_chunk_dalloc_huge
arena_chunk_ralloc_huge_expand
arena_chunk_ralloc_huge_shrink
arena_chunk_ralloc_huge_similar
arena_cleanup
arena_dalloc
arena_dalloc_bin
arena_dalloc_bin_junked_locked
arena_dalloc_junk_large
arena_dalloc_junk_small
arena_dalloc_large
arena_dalloc_large_junked_locked
arena_dalloc_small
arena_decay_tick
arena_decay_ticks
arena_decay_time_default_get
arena_decay_time_default_set
arena_decay_time_get
arena_decay_time_set
arena_dss_prec_get
arena_dss_prec_set
arena_extent_sn_next
arena_get
arena_ichoose
arena_init
arena_lg_dirty_mult_default_get
arena_lg_dirty_mult_default_set
arena_lg_dirty_mult_get
arena_lg_dirty_mult_set
arena_malloc
arena_malloc_hard
arena_malloc_large
arena_mapbits_allocated_get
arena_mapbits_binind_get
arena_mapbits_decommitted_get
arena_mapbits_dirty_get
arena_mapbits_get
arena_mapbits_internal_set
arena_mapbits_large_binind_set
arena_mapbits_large_get
arena_mapbits_large_set
arena_mapbits_large_size_get
arena_mapbits_size_decode
arena_mapbits_size_encode
arena_mapbits_small_runind_get
arena_mapbits_small_set
arena_mapbits_unallocated_set
arena_mapbits_unallocated_size_get
arena_mapbits_unallocated_size_set
arena_mapbits_unzeroed_get
arena_mapbitsp_get_const
arena_mapbitsp_get_mutable
arena_mapbitsp_read
arena_mapbitsp_write
arena_maxrun
arena_maybe_purge
arena_metadata_allocated_add
arena_metadata_allocated_get
arena_metadata_allocated_sub
arena_migrate
arena_miscelm_get_const
arena_miscelm_get_mutable
arena_miscelm_to_pageind
arena_miscelm_to_rpages
arena_new
arena_node_alloc
arena_node_dalloc
arena_nthreads_dec
arena_nthreads_get
arena_nthreads_inc
arena_palloc
arena_postfork_child
arena_postfork_parent
arena_prefork0
arena_prefork1
arena_prefork2
arena_prefork3
arena_prof_accum
arena_prof_accum_impl
arena_prof_accum_locked
arena_prof_promoted
arena_prof_tctx_get
arena_prof_tctx_reset
arena_prof_tctx_set
arena_ptr_small_binind_get
arena_purge
arena_quarantine_junk_small
arena_ralloc
arena_ralloc_junk_large
arena_ralloc_no_move
arena_rd_to_miscelm
arena_redzone_corruption
arena_reset
arena_run_regind
arena_run_to_miscelm
arena_salloc
arena_sdalloc
arena_stats_merge
arena_tcache_fill_small
arena_tdata_get
arena_tdata_get_hard
arenas
arenas_tdata_bypass_cleanup
arenas_tdata_cleanup
atomic_add_p
atomic_add_u
atomic_add_uint32
atomic_add_uint64
atomic_add_z
atomic_cas_p
atomic_cas_u
atomic_cas_uint32
atomic_cas_uint64
atomic_cas_z
atomic_sub_p
atomic_sub_u
atomic_sub_uint32
atomic_sub_uint64
atomic_sub_z
atomic_write_p
atomic_write_u
atomic_write_uint32
atomic_write_uint64
atomic_write_z
base_alloc
base_boot
base_postfork_child
base_postfork_parent
base_prefork
base_stats_get
bitmap_full
bitmap_get
bitmap_info_init
bitmap_init
bitmap_set
bitmap_sfu
bitmap_size
bitmap_unset
bootstrap_calloc
bootstrap_free
bootstrap_malloc
bt_init
buferror
chunk_alloc_base
chunk_alloc_cache
chunk_alloc_dss
chunk_alloc_mmap
chunk_alloc_wrapper
chunk_boot
chunk_dalloc_cache
chunk_dalloc_mmap
chunk_dalloc_wrapper
chunk_deregister
chunk_dss_boot
chunk_dss_mergeable
chunk_dss_prec_get
chunk_dss_prec_set
chunk_hooks_default
chunk_hooks_get
chunk_hooks_set
chunk_in_dss
chunk_lookup
chunk_npages
chunk_purge_wrapper
chunk_register
chunks_rtree
chunksize
chunksize_mask
ckh_count
ckh_delete
ckh_insert
ckh_iter
ckh_new
ckh_pointer_hash
ckh_pointer_keycomp
ckh_remove
ckh_search
ckh_string_hash
ckh_string_keycomp
ctl_boot
ctl_bymib
ctl_byname
ctl_nametomib
ctl_postfork_child
ctl_postfork_parent
ctl_prefork
decay_ticker_get
dss_prec_names
extent_node_achunk_get
extent_node_achunk_set
extent_node_addr_get
extent_node_addr_set
extent_node_arena_get
extent_node_arena_set
extent_node_committed_get
extent_node_committed_set
extent_node_dirty_insert
extent_node_dirty_linkage_init
extent_node_dirty_remove
extent_node_init
extent_node_prof_tctx_get
extent_node_prof_tctx_set
extent_node_size_get
extent_node_size_set
extent_node_sn_get
extent_node_sn_set
extent_node_zeroed_get
extent_node_zeroed_set
extent_tree_ad_destroy
extent_tree_ad_destroy_recurse
extent_tree_ad_empty
extent_tree_ad_first
extent_tree_ad_insert
extent_tree_ad_iter
extent_tree_ad_iter_recurse
extent_tree_ad_iter_start
extent_tree_ad_last
extent_tree_ad_new
extent_tree_ad_next
extent_tree_ad_nsearch
extent_tree_ad_prev
extent_tree_ad_psearch
extent_tree_ad_remove
extent_tree_ad_reverse_iter
extent_tree_ad_reverse_iter_recurse
extent_tree_ad_reverse_iter_start
extent_tree_ad_search
extent_tree_szsnad_destroy
extent_tree_szsnad_destroy_recurse
extent_tree_szsnad_empty
extent_tree_szsnad_first
extent_tree_szsnad_insert
extent_tree_szsnad_iter
extent_tree_szsnad_iter_recurse
extent_tree_szsnad_iter_start
extent_tree_szsnad_last
extent_tree_szsnad_new
extent_tree_szsnad_next
extent_tree_szsnad_nsearch
extent_tree_szsnad_prev
extent_tree_szsnad_psearch
extent_tree_szsnad_remove
extent_tree_szsnad_reverse_iter
extent_tree_szsnad_reverse_iter_recurse
extent_tree_szsnad_reverse_iter_start
extent_tree_szsnad_search
ffs_llu
ffs_lu
ffs_u
ffs_u32
ffs_u64
ffs_zu
get_errno
hash
hash_fmix_32
hash_fmix_64
hash_get_block_32
hash_get_block_64
hash_rotl_32
hash_rotl_64
hash_x64_128
hash_x86_128
hash_x86_32
huge_aalloc
huge_dalloc
huge_dalloc_junk
huge_malloc
huge_palloc
huge_prof_tctx_get
huge_prof_tctx_reset
huge_prof_tctx_set
huge_ralloc
huge_ralloc_no_move
huge_salloc
iaalloc
ialloc
iallocztm
iarena_cleanup
idalloc
idalloctm
in_valgrind
index2size
index2size_compute
index2size_lookup
index2size_tab
ipalloc
ipalloct
ipallocztm
iqalloc
iralloc
iralloct
iralloct_realign
isalloc
isdalloct
isqalloc
isthreaded
ivsalloc
ixalloc
jemalloc_postfork_child
jemalloc_postfork_parent
jemalloc_prefork
large_maxclass
lg_floor
lg_prof_sample
malloc_cprintf
malloc_mutex_assert_not_owner
malloc_mutex_assert_owner
malloc_mutex_boot
malloc_mutex_init
malloc_mutex_lock
malloc_mutex_postfork_child
malloc_mutex_postfork_parent
malloc_mutex_prefork
malloc_mutex_unlock
malloc_printf
malloc_snprintf
malloc_strtoumax
malloc_tsd_boot0
malloc_tsd_boot1
malloc_tsd_cleanup_register
malloc_tsd_dalloc
malloc_tsd_malloc
malloc_tsd_no_cleanup
malloc_vcprintf
malloc_vsnprintf
malloc_write
map_bias
map_misc_offset
mb_write
narenas_auto
narenas_tdata_cleanup
narenas_total_get
ncpus
nhbins
nhclasses
nlclasses
nstime_add
nstime_compare
nstime_copy
nstime_divide
nstime_idivide
nstime_imultiply
nstime_init
nstime_init2
nstime_monotonic
nstime_ns
nstime_nsec
nstime_sec
nstime_subtract
nstime_update
opt_abort
opt_decay_time
opt_dss
opt_junk
opt_junk_alloc
opt_junk_free
opt_lg_chunk
opt_lg_dirty_mult
opt_lg_prof_interval
opt_lg_prof_sample
opt_lg_tcache_max
opt_narenas
opt_prof
opt_prof_accum
opt_prof_active
opt_prof_final
opt_prof_gdump
opt_prof_leak
opt_prof_prefix
opt_prof_thread_active_init
opt_purge
opt_quarantine
opt_redzone
opt_stats_print
opt_tcache
opt_utrace
opt_xmalloc
opt_zero
p2rz
pages_boot
pages_commit
pages_decommit
pages_huge
pages_map
pages_nohuge
pages_purge
pages_trim
pages_unmap
pind2sz
pind2sz_compute
pind2sz_lookup
pind2sz_tab
pow2_ceil_u32
pow2_ceil_u64
pow2_ceil_zu
prng_lg_range_u32
prng_lg_range_u64
prng_lg_range_zu
prng_range_u32
prng_range_u64
prng_range_zu
prng_state_next_u32
prng_state_next_u64
prng_state_next_zu
prof_active
prof_active_get
prof_active_get_unlocked
prof_active_set
prof_alloc_prep
prof_alloc_rollback
prof_backtrace
prof_boot0
prof_boot1
prof_boot2
prof_bt_count
prof_dump_header
prof_dump_open
prof_free
prof_free_sampled_object
prof_gdump
prof_gdump_get
prof_gdump_get_unlocked
prof_gdump_set
prof_gdump_val
prof_idump
prof_interval
prof_lookup
prof_malloc
prof_malloc_sample_object
prof_mdump
prof_postfork_child
prof_postfork_parent
prof_prefork0
prof_prefork1
prof_realloc
prof_reset
prof_sample_accum_update
prof_sample_threshold_update
prof_tctx_get
prof_tctx_reset
prof_tctx_set
prof_tdata_cleanup
prof_tdata_count
prof_tdata_get
prof_tdata_init
prof_tdata_reinit
prof_thread_active_get
prof_thread_active_init_get
prof_thread_active_init_set
prof_thread_active_set
prof_thread_name_get
prof_thread_name_set
psz2ind
psz2u
purge_mode_names
quarantine
quarantine_alloc_hook
quarantine_alloc_hook_work
quarantine_cleanup
rtree_child_read
rtree_child_read_hard
rtree_child_tryread
rtree_delete
rtree_get
rtree_new
rtree_node_valid
rtree_set
rtree_start_level
rtree_subkey
rtree_subtree_read
rtree_subtree_read_hard
rtree_subtree_tryread
rtree_val_read
rtree_val_write
run_quantize_ceil
run_quantize_floor
s2u
s2u_compute
s2u_lookup
sa2u
set_errno
size2index
size2index_compute
size2index_lookup
size2index_tab
spin_adaptive
spin_init
stats_cactive
stats_cactive_add
stats_cactive_get
stats_cactive_sub
stats_print
tcache_alloc_easy
tcache_alloc_large
tcache_alloc_small
tcache_alloc_small_hard
tcache_arena_reassociate
tcache_bin_flush_large
tcache_bin_flush_small
tcache_bin_info
tcache_boot
tcache_cleanup
tcache_create
tcache_dalloc_large
tcache_dalloc_small
tcache_enabled_cleanup
tcache_enabled_get
tcache_enabled_set
tcache_event
tcache_event_hard
tcache_flush
tcache_get
tcache_get_hard
tcache_maxclass
tcache_salloc
tcache_stats_merge
tcaches
tcaches_create
tcaches_destroy
tcaches_flush
tcaches_get
thread_allocated_cleanup
thread_deallocated_cleanup
ticker_copy
ticker_init
ticker_read
ticker_tick
ticker_ticks
tsd_arena_get
tsd_arena_set
tsd_arenap_get
tsd_arenas_tdata_bypass_get
tsd_arenas_tdata_bypass_set
tsd_arenas_tdata_bypassp_get
tsd_arenas_tdata_get
tsd_arenas_tdata_set
tsd_arenas_tdatap_get
tsd_boot
tsd_boot0
tsd_boot1
tsd_booted
tsd_booted_get
tsd_cleanup
tsd_cleanup_wrapper
tsd_fetch
tsd_fetch_impl
tsd_get
tsd_get_allocates
tsd_iarena_get
tsd_iarena_set
tsd_iarenap_get
tsd_initialized
tsd_init_check_recursion
tsd_init_finish
tsd_init_head
tsd_narenas_tdata_get
tsd_narenas_tdata_set
tsd_narenas_tdatap_get
tsd_wrapper_get
tsd_wrapper_set
tsd_nominal
tsd_prof_tdata_get
tsd_prof_tdata_set
tsd_prof_tdatap_get
tsd_quarantine_get
tsd_quarantine_set
tsd_quarantinep_get
tsd_set
tsd_tcache_enabled_get
tsd_tcache_enabled_set
tsd_tcache_enabledp_get
tsd_tcache_get
tsd_tcache_set
tsd_tcachep_get
tsd_thread_allocated_get
tsd_thread_allocated_set
tsd_thread_allocatedp_get
tsd_thread_deallocated_get
tsd_thread_deallocated_set
tsd_thread_deallocatedp_get
tsd_tls
tsd_tsd
tsd_tsdn
tsd_witness_fork_get
tsd_witness_fork_set
tsd_witness_forkp_get
tsd_witnesses_get
tsd_witnesses_set
tsd_witnessesp_get
tsdn_fetch
tsdn_null
tsdn_tsd
u2rz
valgrind_freelike_block
valgrind_make_mem_defined
valgrind_make_mem_noaccess
valgrind_make_mem_undefined
witness_assert_lockless
witness_assert_not_owner
witness_assert_owner
witness_fork_cleanup
witness_init
witness_lock
witness_lock_error
witness_lockless_error
witness_not_owner_error
witness_owner
witness_owner_error
witness_postfork_child
witness_postfork_parent
witness_prefork
witness_unlock
witnesses_cleanup
zone_register
#!/bin/sh
for symbol in `cat $1` ; do
echo "#undef ${symbol}"
done
...@@ -15,34 +15,16 @@ ...@@ -15,34 +15,16 @@
* See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints. * See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints.
* *
* This choice of m has the disadvantage that the quality of the bits is * This choice of m has the disadvantage that the quality of the bits is
* proportional to bit position. For example. the lowest bit has a cycle of 2, * proportional to bit position. For example, the lowest bit has a cycle of 2,
* the next has a cycle of 4, etc. For this reason, we prefer to use the upper * the next has a cycle of 4, etc. For this reason, we prefer to use the upper
* bits. * bits.
*
* Macro parameters:
* uint32_t r : Result.
* unsigned lg_range : (0..32], number of least significant bits to return.
* uint32_t state : Seed value.
* const uint32_t a, c : See above discussion.
*/ */
#define prng32(r, lg_range, state, a, c) do { \
assert(lg_range > 0); \ #define PRNG_A_32 UINT32_C(1103515241)
assert(lg_range <= 32); \ #define PRNG_C_32 UINT32_C(12347)
\
r = (state * (a)) + (c); \ #define PRNG_A_64 UINT64_C(6364136223846793005)
state = r; \ #define PRNG_C_64 UINT64_C(1442695040888963407)
r >>= (32 - lg_range); \
} while (false)
/* Same as prng32(), but 64 bits of pseudo-randomness, using uint64_t. */
#define prng64(r, lg_range, state, a, c) do { \
assert(lg_range > 0); \
assert(lg_range <= 64); \
\
r = (state * (a)) + (c); \
state = r; \
r >>= (64 - lg_range); \
} while (false)
#endif /* JEMALLOC_H_TYPES */ #endif /* JEMALLOC_H_TYPES */
/******************************************************************************/ /******************************************************************************/
...@@ -56,5 +38,170 @@ ...@@ -56,5 +38,170 @@
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_INLINES #ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
uint32_t prng_state_next_u32(uint32_t state);
uint64_t prng_state_next_u64(uint64_t state);
size_t prng_state_next_zu(size_t state);
uint32_t prng_lg_range_u32(uint32_t *state, unsigned lg_range,
bool atomic);
uint64_t prng_lg_range_u64(uint64_t *state, unsigned lg_range);
size_t prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic);
uint32_t prng_range_u32(uint32_t *state, uint32_t range, bool atomic);
uint64_t prng_range_u64(uint64_t *state, uint64_t range);
size_t prng_range_zu(size_t *state, size_t range, bool atomic);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PRNG_C_))
JEMALLOC_ALWAYS_INLINE uint32_t
prng_state_next_u32(uint32_t state)
{
return ((state * PRNG_A_32) + PRNG_C_32);
}
JEMALLOC_ALWAYS_INLINE uint64_t
prng_state_next_u64(uint64_t state)
{
return ((state * PRNG_A_64) + PRNG_C_64);
}
JEMALLOC_ALWAYS_INLINE size_t
prng_state_next_zu(size_t state)
{
#if LG_SIZEOF_PTR == 2
return ((state * PRNG_A_32) + PRNG_C_32);
#elif LG_SIZEOF_PTR == 3
return ((state * PRNG_A_64) + PRNG_C_64);
#else
#error Unsupported pointer size
#endif
}
JEMALLOC_ALWAYS_INLINE uint32_t
prng_lg_range_u32(uint32_t *state, unsigned lg_range, bool atomic)
{
uint32_t ret, state1;
assert(lg_range > 0);
assert(lg_range <= 32);
if (atomic) {
uint32_t state0;
do {
state0 = atomic_read_uint32(state);
state1 = prng_state_next_u32(state0);
} while (atomic_cas_uint32(state, state0, state1));
} else {
state1 = prng_state_next_u32(*state);
*state = state1;
}
ret = state1 >> (32 - lg_range);
return (ret);
}
/* 64-bit atomic operations cannot be supported on all relevant platforms. */
JEMALLOC_ALWAYS_INLINE uint64_t
prng_lg_range_u64(uint64_t *state, unsigned lg_range)
{
uint64_t ret, state1;
assert(lg_range > 0);
assert(lg_range <= 64);
state1 = prng_state_next_u64(*state);
*state = state1;
ret = state1 >> (64 - lg_range);
return (ret);
}
JEMALLOC_ALWAYS_INLINE size_t
prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic)
{
size_t ret, state1;
assert(lg_range > 0);
assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR));
if (atomic) {
size_t state0;
do {
state0 = atomic_read_z(state);
state1 = prng_state_next_zu(state0);
} while (atomic_cas_z(state, state0, state1));
} else {
state1 = prng_state_next_zu(*state);
*state = state1;
}
ret = state1 >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range);
return (ret);
}
JEMALLOC_ALWAYS_INLINE uint32_t
prng_range_u32(uint32_t *state, uint32_t range, bool atomic)
{
uint32_t ret;
unsigned lg_range;
assert(range > 1);
/* Compute the ceiling of lg(range). */
lg_range = ffs_u32(pow2_ceil_u32(range)) - 1;
/* Generate a result in [0..range) via repeated trial. */
do {
ret = prng_lg_range_u32(state, lg_range, atomic);
} while (ret >= range);
return (ret);
}
JEMALLOC_ALWAYS_INLINE uint64_t
prng_range_u64(uint64_t *state, uint64_t range)
{
uint64_t ret;
unsigned lg_range;
assert(range > 1);
/* Compute the ceiling of lg(range). */
lg_range = ffs_u64(pow2_ceil_u64(range)) - 1;
/* Generate a result in [0..range) via repeated trial. */
do {
ret = prng_lg_range_u64(state, lg_range);
} while (ret >= range);
return (ret);
}
JEMALLOC_ALWAYS_INLINE size_t
prng_range_zu(size_t *state, size_t range, bool atomic)
{
size_t ret;
unsigned lg_range;
assert(range > 1);
/* Compute the ceiling of lg(range). */
lg_range = ffs_u64(pow2_ceil_u64(range)) - 1;
/* Generate a result in [0..range) via repeated trial. */
do {
ret = prng_lg_range_zu(state, lg_range, atomic);
} while (ret >= range);
return (ret);
}
#endif
#endif /* JEMALLOC_H_INLINES */ #endif /* JEMALLOC_H_INLINES */
/******************************************************************************/ /******************************************************************************/
...@@ -3,12 +3,16 @@ ...@@ -3,12 +3,16 @@
typedef struct prof_bt_s prof_bt_t; typedef struct prof_bt_s prof_bt_t;
typedef struct prof_cnt_s prof_cnt_t; typedef struct prof_cnt_s prof_cnt_t;
typedef struct prof_thr_cnt_s prof_thr_cnt_t; typedef struct prof_tctx_s prof_tctx_t;
typedef struct prof_ctx_s prof_ctx_t; typedef struct prof_gctx_s prof_gctx_t;
typedef struct prof_tdata_s prof_tdata_t; typedef struct prof_tdata_s prof_tdata_t;
/* Option defaults. */ /* Option defaults. */
#define PROF_PREFIX_DEFAULT "jeprof" #ifdef JEMALLOC_PROF
# define PROF_PREFIX_DEFAULT "jeprof"
#else
# define PROF_PREFIX_DEFAULT ""
#endif
#define LG_PROF_SAMPLE_DEFAULT 19 #define LG_PROF_SAMPLE_DEFAULT 19
#define LG_PROF_INTERVAL_DEFAULT -1 #define LG_PROF_INTERVAL_DEFAULT -1
...@@ -19,9 +23,6 @@ typedef struct prof_tdata_s prof_tdata_t; ...@@ -19,9 +23,6 @@ typedef struct prof_tdata_s prof_tdata_t;
*/ */
#define PROF_BT_MAX 128 #define PROF_BT_MAX 128
/* Maximum number of backtraces to store in each per thread LRU cache. */
#define PROF_TCMAX 1024
/* Initial hash table size. */ /* Initial hash table size. */
#define PROF_CKH_MINITEMS 64 #define PROF_CKH_MINITEMS 64
...@@ -32,11 +33,17 @@ typedef struct prof_tdata_s prof_tdata_t; ...@@ -32,11 +33,17 @@ typedef struct prof_tdata_s prof_tdata_t;
#define PROF_PRINTF_BUFSIZE 128 #define PROF_PRINTF_BUFSIZE 128
/* /*
* Number of mutexes shared among all ctx's. No space is allocated for these * Number of mutexes shared among all gctx's. No space is allocated for these
* unless profiling is enabled, so it's okay to over-provision. * unless profiling is enabled, so it's okay to over-provision.
*/ */
#define PROF_NCTX_LOCKS 1024 #define PROF_NCTX_LOCKS 1024
/*
* Number of mutexes shared among all tdata's. No space is allocated for these
* unless profiling is enabled, so it's okay to over-provision.
*/
#define PROF_NTDATA_LOCKS 256
/* /*
* prof_tdata pointers close to NULL are used to encode state information that * prof_tdata pointers close to NULL are used to encode state information that
* is used for cleaning up during thread shutdown. * is used for cleaning up during thread shutdown.
...@@ -59,143 +66,204 @@ struct prof_bt_s { ...@@ -59,143 +66,204 @@ struct prof_bt_s {
/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */ /* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
typedef struct { typedef struct {
prof_bt_t *bt; prof_bt_t *bt;
unsigned nignore;
unsigned max; unsigned max;
} prof_unwind_data_t; } prof_unwind_data_t;
#endif #endif
struct prof_cnt_s { struct prof_cnt_s {
/* /* Profiling counters. */
* Profiling counters. An allocation/deallocation pair can operate on uint64_t curobjs;
* different prof_thr_cnt_t objects that are linked into the same uint64_t curbytes;
* prof_ctx_t cnts_ql, so it is possible for the cur* counters to go
* negative. In principle it is possible for the *bytes counters to
* overflow/underflow, but a general solution would require something
* like 128-bit counters; this implementation doesn't bother to solve
* that problem.
*/
int64_t curobjs;
int64_t curbytes;
uint64_t accumobjs; uint64_t accumobjs;
uint64_t accumbytes; uint64_t accumbytes;
}; };
struct prof_thr_cnt_s { typedef enum {
/* Linkage into prof_ctx_t's cnts_ql. */ prof_tctx_state_initializing,
ql_elm(prof_thr_cnt_t) cnts_link; prof_tctx_state_nominal,
prof_tctx_state_dumping,
prof_tctx_state_purgatory /* Dumper must finish destroying. */
} prof_tctx_state_t;
/* Linkage into thread's LRU. */ struct prof_tctx_s {
ql_elm(prof_thr_cnt_t) lru_link; /* Thread data for thread that performed the allocation. */
prof_tdata_t *tdata;
/* /*
* Associated context. If a thread frees an object that it did not * Copy of tdata->thr_{uid,discrim}, necessary because tdata may be
* allocate, it is possible that the context is not cached in the * defunct during teardown.
* thread's hash table, in which case it must be able to look up the
* context, insert a new prof_thr_cnt_t into the thread's hash table,
* and link it into the prof_ctx_t's cnts_ql.
*/ */
prof_ctx_t *ctx; uint64_t thr_uid;
uint64_t thr_discrim;
/* Profiling counters, protected by tdata->lock. */
prof_cnt_t cnts;
/* Associated global context. */
prof_gctx_t *gctx;
/* /*
* Threads use memory barriers to update the counters. Since there is * UID that distinguishes multiple tctx's created by the same thread,
* only ever one writer, the only challenge is for the reader to get a * but coexisting in gctx->tctxs. There are two ways that such
* consistent read of the counters. * coexistence can occur:
* * - A dumper thread can cause a tctx to be retained in the purgatory
* The writer uses this series of operations: * state.
* * - Although a single "producer" thread must create all tctx's which
* 1) Increment epoch to an odd number. * share the same thr_uid, multiple "consumers" can each concurrently
* 2) Update counters. * execute portions of prof_tctx_destroy(). prof_tctx_destroy() only
* 3) Increment epoch to an even number. * gets called once each time cnts.cur{objs,bytes} drop to 0, but this
* * threshold can be hit again before the first consumer finishes
* The reader must assure 1) that the epoch is even while it reads the * executing prof_tctx_destroy().
* counters, and 2) that the epoch doesn't change between the time it
* starts and finishes reading the counters.
*/ */
unsigned epoch; uint64_t tctx_uid;
/* Profiling counters. */ /* Linkage into gctx's tctxs. */
prof_cnt_t cnts; rb_node(prof_tctx_t) tctx_link;
};
struct prof_ctx_s { /*
/* Associated backtrace. */ * True during prof_alloc_prep()..prof_malloc_sample_object(), prevents
prof_bt_t *bt; * sample vs destroy race.
*/
bool prepared;
/* Current dump-related state, protected by gctx->lock. */
prof_tctx_state_t state;
/*
* Copy of cnts snapshotted during early dump phase, protected by
* dump_mtx.
*/
prof_cnt_t dump_cnts;
};
typedef rb_tree(prof_tctx_t) prof_tctx_tree_t;
/* Protects nlimbo, cnt_merged, and cnts_ql. */ struct prof_gctx_s {
/* Protects nlimbo, cnt_summed, and tctxs. */
malloc_mutex_t *lock; malloc_mutex_t *lock;
/* /*
* Number of threads that currently cause this ctx to be in a state of * Number of threads that currently cause this gctx to be in a state of
* limbo due to one of: * limbo due to one of:
* - Initializing per thread counters associated with this ctx. * - Initializing this gctx.
* - Preparing to destroy this ctx. * - Initializing per thread counters associated with this gctx.
* - Preparing to destroy this gctx.
* - Dumping a heap profile that includes this gctx.
* nlimbo must be 1 (single destroyer) in order to safely destroy the * nlimbo must be 1 (single destroyer) in order to safely destroy the
* ctx. * gctx.
*/ */
unsigned nlimbo; unsigned nlimbo;
/*
* Tree of profile counters, one for each thread that has allocated in
* this context.
*/
prof_tctx_tree_t tctxs;
/* Linkage for tree of contexts to be dumped. */
rb_node(prof_gctx_t) dump_link;
/* Temporary storage for summation during dump. */ /* Temporary storage for summation during dump. */
prof_cnt_t cnt_summed; prof_cnt_t cnt_summed;
/* When threads exit, they merge their stats into cnt_merged. */ /* Associated backtrace. */
prof_cnt_t cnt_merged; prof_bt_t bt;
/* /* Backtrace vector, variable size, referred to by bt. */
* List of profile counters, one for each thread that has allocated in void *vec[1];
* this context.
*/
ql_head(prof_thr_cnt_t) cnts_ql;
}; };
typedef rb_tree(prof_gctx_t) prof_gctx_tree_t;
struct prof_tdata_s { struct prof_tdata_s {
malloc_mutex_t *lock;
/* Monotonically increasing unique thread identifier. */
uint64_t thr_uid;
/* /*
* Hash of (prof_bt_t *)-->(prof_thr_cnt_t *). Each thread keeps a * Monotonically increasing discriminator among tdata structures
* cache of backtraces, with associated thread-specific prof_thr_cnt_t * associated with the same thr_uid.
* objects. Other threads may read the prof_thr_cnt_t contents, but no
* others will ever write them.
*
* Upon thread exit, the thread must merge all the prof_thr_cnt_t
* counter data into the associated prof_ctx_t objects, and unlink/free
* the prof_thr_cnt_t objects.
*/ */
ckh_t bt2cnt; uint64_t thr_discrim;
/* LRU for contents of bt2cnt. */ /* Included in heap profile dumps if non-NULL. */
ql_head(prof_thr_cnt_t) lru_ql; char *thread_name;
/* Backtrace vector, used for calls to prof_backtrace(). */ bool attached;
void **vec; bool expired;
rb_node(prof_tdata_t) tdata_link;
/*
* Counter used to initialize prof_tctx_t's tctx_uid. No locking is
* necessary when incrementing this field, because only one thread ever
* does so.
*/
uint64_t tctx_uid_next;
/*
* Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks
* backtraces for which it has non-zero allocation/deallocation counters
* associated with thread-specific prof_tctx_t objects. Other threads
* may write to prof_tctx_t contents when freeing associated objects.
*/
ckh_t bt2tctx;
/* Sampling state. */ /* Sampling state. */
uint64_t prng_state; uint64_t prng_state;
uint64_t threshold; uint64_t bytes_until_sample;
uint64_t accum;
/* State used to avoid dumping while operating on prof internals. */ /* State used to avoid dumping while operating on prof internals. */
bool enq; bool enq;
bool enq_idump; bool enq_idump;
bool enq_gdump; bool enq_gdump;
/*
* Set to true during an early dump phase for tdata's which are
* currently being dumped. New threads' tdata's have this initialized
* to false so that they aren't accidentally included in later dump
* phases.
*/
bool dumping;
/*
* True if profiling is active for this tdata's thread
* (thread.prof.active mallctl).
*/
bool active;
/* Temporary storage for summation during dump. */
prof_cnt_t cnt_summed;
/* Backtrace vector, used for calls to prof_backtrace(). */
void *vec[PROF_BT_MAX];
}; };
typedef rb_tree(prof_tdata_t) prof_tdata_tree_t;
#endif /* JEMALLOC_H_STRUCTS */ #endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS #ifdef JEMALLOC_H_EXTERNS
extern bool opt_prof; extern bool opt_prof;
/*
* Even if opt_prof is true, sampling can be temporarily disabled by setting
* opt_prof_active to false. No locking is used when updating opt_prof_active,
* so there are no guarantees regarding how long it will take for all threads
* to notice state changes.
*/
extern bool opt_prof_active; extern bool opt_prof_active;
extern bool opt_prof_thread_active_init;
extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */ extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */
extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */ extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */
extern bool opt_prof_gdump; /* High-water memory dumping. */ extern bool opt_prof_gdump; /* High-water memory dumping. */
extern bool opt_prof_final; /* Final profile dumping. */ extern bool opt_prof_final; /* Final profile dumping. */
extern bool opt_prof_leak; /* Dump leak summary at exit. */ extern bool opt_prof_leak; /* Dump leak summary at exit. */
extern bool opt_prof_accum; /* Report cumulative bytes. */ extern bool opt_prof_accum; /* Report cumulative bytes. */
extern char opt_prof_prefix[PATH_MAX + 1]; extern char opt_prof_prefix[
/* Minimize memory bloat for non-prof builds. */
#ifdef JEMALLOC_PROF
PATH_MAX +
#endif
1];
/* Accessed via prof_active_[gs]et{_unlocked,}(). */
extern bool prof_active;
/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */
extern bool prof_gdump_val;
/* /*
* Profile dump interval, measured in bytes allocated. Each arena triggers a * Profile dump interval, measured in bytes allocated. Each arena triggers a
...@@ -207,373 +275,271 @@ extern char opt_prof_prefix[PATH_MAX + 1]; ...@@ -207,373 +275,271 @@ extern char opt_prof_prefix[PATH_MAX + 1];
extern uint64_t prof_interval; extern uint64_t prof_interval;
/* /*
* If true, promote small sampled objects to large objects, since small run * Initialized as opt_lg_prof_sample, and potentially modified during profiling
* headers do not have embedded profile context pointers. * resets.
*/ */
extern bool prof_promote; extern size_t lg_prof_sample;
void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated);
void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
prof_tctx_t *tctx);
void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx);
void bt_init(prof_bt_t *bt, void **vec); void bt_init(prof_bt_t *bt, void **vec);
void prof_backtrace(prof_bt_t *bt, unsigned nignore); void prof_backtrace(prof_bt_t *bt);
prof_thr_cnt_t *prof_lookup(prof_bt_t *bt); prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt);
void prof_idump(void); #ifdef JEMALLOC_JET
bool prof_mdump(const char *filename); size_t prof_tdata_count(void);
void prof_gdump(void); size_t prof_bt_count(void);
prof_tdata_t *prof_tdata_init(void); const prof_cnt_t *prof_cnt_all(void);
void prof_tdata_cleanup(void *arg); typedef int (prof_dump_open_t)(bool, const char *);
extern prof_dump_open_t *prof_dump_open;
typedef bool (prof_dump_header_t)(tsdn_t *, bool, const prof_cnt_t *);
extern prof_dump_header_t *prof_dump_header;
#endif
void prof_idump(tsdn_t *tsdn);
bool prof_mdump(tsd_t *tsd, const char *filename);
void prof_gdump(tsdn_t *tsdn);
prof_tdata_t *prof_tdata_init(tsd_t *tsd);
prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata);
void prof_reset(tsd_t *tsd, size_t lg_sample);
void prof_tdata_cleanup(tsd_t *tsd);
bool prof_active_get(tsdn_t *tsdn);
bool prof_active_set(tsdn_t *tsdn, bool active);
const char *prof_thread_name_get(tsd_t *tsd);
int prof_thread_name_set(tsd_t *tsd, const char *thread_name);
bool prof_thread_active_get(tsd_t *tsd);
bool prof_thread_active_set(tsd_t *tsd, bool active);
bool prof_thread_active_init_get(tsdn_t *tsdn);
bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init);
bool prof_gdump_get(tsdn_t *tsdn);
bool prof_gdump_set(tsdn_t *tsdn, bool active);
void prof_boot0(void); void prof_boot0(void);
void prof_boot1(void); void prof_boot1(void);
bool prof_boot2(void); bool prof_boot2(tsd_t *tsd);
void prof_prefork(void); void prof_prefork0(tsdn_t *tsdn);
void prof_postfork_parent(void); void prof_prefork1(tsdn_t *tsdn);
void prof_postfork_child(void); void prof_postfork_parent(tsdn_t *tsdn);
void prof_postfork_child(tsdn_t *tsdn);
void prof_sample_threshold_update(prof_tdata_t *tdata);
#endif /* JEMALLOC_H_EXTERNS */ #endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_INLINES #ifdef JEMALLOC_H_INLINES
#define PROF_ALLOC_PREP(nignore, size, ret) do { \
prof_tdata_t *prof_tdata; \
prof_bt_t bt; \
\
assert(size == s2u(size)); \
\
prof_tdata = prof_tdata_get(); \
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) { \
if (prof_tdata != NULL) \
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
else \
ret = NULL; \
break; \
} \
\
if (opt_prof_active == false) { \
/* Sampling is currently inactive, so avoid sampling. */\
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
} else if (opt_lg_prof_sample == 0) { \
/* Don't bother with sampling logic, since sampling */\
/* interval is 1. */\
bt_init(&bt, prof_tdata->vec); \
prof_backtrace(&bt, nignore); \
ret = prof_lookup(&bt); \
} else { \
if (prof_tdata->threshold == 0) { \
/* Initialize. Seed the prng differently for */\
/* each thread. */\
prof_tdata->prng_state = \
(uint64_t)(uintptr_t)&size; \
prof_sample_threshold_update(prof_tdata); \
} \
\
/* Determine whether to capture a backtrace based on */\
/* whether size is enough for prof_accum to reach */\
/* prof_tdata->threshold. However, delay updating */\
/* these variables until prof_{m,re}alloc(), because */\
/* we don't know for sure that the allocation will */\
/* succeed. */\
/* */\
/* Use subtraction rather than addition to avoid */\
/* potential integer overflow. */\
if (size >= prof_tdata->threshold - \
prof_tdata->accum) { \
bt_init(&bt, prof_tdata->vec); \
prof_backtrace(&bt, nignore); \
ret = prof_lookup(&bt); \
} else \
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
} \
} while (0)
#ifndef JEMALLOC_ENABLE_INLINE #ifndef JEMALLOC_ENABLE_INLINE
malloc_tsd_protos(JEMALLOC_ATTR(unused), prof_tdata, prof_tdata_t *) bool prof_active_get_unlocked(void);
bool prof_gdump_get_unlocked(void);
prof_tdata_t *prof_tdata_get(void); prof_tdata_t *prof_tdata_get(tsd_t *tsd, bool create);
void prof_sample_threshold_update(prof_tdata_t *prof_tdata); prof_tctx_t *prof_tctx_get(tsdn_t *tsdn, const void *ptr);
prof_ctx_t *prof_ctx_get(const void *ptr); void prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
void prof_ctx_set(const void *ptr, prof_ctx_t *ctx); prof_tctx_t *tctx);
bool prof_sample_accum_update(size_t size); void prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize,
void prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt); const void *old_ptr, prof_tctx_t *tctx);
void prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt, bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit,
size_t old_size, prof_ctx_t *old_ctx); prof_tdata_t **tdata_out);
void prof_free(const void *ptr, size_t size); prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active,
bool update);
void prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize,
prof_tctx_t *tctx);
void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize,
prof_tctx_t *tctx, bool prof_active, bool updated, const void *old_ptr,
size_t old_usize, prof_tctx_t *old_tctx);
void prof_free(tsd_t *tsd, const void *ptr, size_t usize);
#endif #endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_)) #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_))
/* Thread-specific backtrace cache, used to reduce bt2ctx contention. */ JEMALLOC_ALWAYS_INLINE bool
malloc_tsd_externs(prof_tdata, prof_tdata_t *) prof_active_get_unlocked(void)
malloc_tsd_funcs(JEMALLOC_INLINE, prof_tdata, prof_tdata_t *, NULL, {
prof_tdata_cleanup)
/*
* Even if opt_prof is true, sampling can be temporarily disabled by
* setting prof_active to false. No locking is used when reading
* prof_active in the fast path, so there are no guarantees regarding
* how long it will take for all threads to notice state changes.
*/
return (prof_active);
}
JEMALLOC_ALWAYS_INLINE bool
prof_gdump_get_unlocked(void)
{
JEMALLOC_INLINE prof_tdata_t * /*
prof_tdata_get(void) * No locking is used when reading prof_gdump_val in the fast path, so
* there are no guarantees regarding how long it will take for all
* threads to notice state changes.
*/
return (prof_gdump_val);
}
JEMALLOC_ALWAYS_INLINE prof_tdata_t *
prof_tdata_get(tsd_t *tsd, bool create)
{ {
prof_tdata_t *prof_tdata; prof_tdata_t *tdata;
cassert(config_prof); cassert(config_prof);
prof_tdata = *prof_tdata_tsd_get(); tdata = tsd_prof_tdata_get(tsd);
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) { if (create) {
if (prof_tdata == NULL) if (unlikely(tdata == NULL)) {
prof_tdata = prof_tdata_init(); if (tsd_nominal(tsd)) {
tdata = prof_tdata_init(tsd);
tsd_prof_tdata_set(tsd, tdata);
}
} else if (unlikely(tdata->expired)) {
tdata = prof_tdata_reinit(tsd, tdata);
tsd_prof_tdata_set(tsd, tdata);
}
assert(tdata == NULL || tdata->attached);
} }
return (prof_tdata); return (tdata);
} }
JEMALLOC_INLINE void JEMALLOC_ALWAYS_INLINE prof_tctx_t *
prof_sample_threshold_update(prof_tdata_t *prof_tdata) prof_tctx_get(tsdn_t *tsdn, const void *ptr)
{ {
uint64_t r;
double u;
cassert(config_prof); cassert(config_prof);
assert(ptr != NULL);
/* return (arena_prof_tctx_get(tsdn, ptr));
* Compute sample threshold as a geometrically distributed random
* variable with mean (2^opt_lg_prof_sample).
*
* __ __
* | log(u) | 1
* prof_tdata->threshold = | -------- |, where p = -------------------
* | log(1-p) | opt_lg_prof_sample
* 2
*
* For more information on the math, see:
*
* Non-Uniform Random Variate Generation
* Luc Devroye
* Springer-Verlag, New York, 1986
* pp 500
* (http://cg.scs.carleton.ca/~luc/rnbookindex.html)
*/
prng64(r, 53, prof_tdata->prng_state,
UINT64_C(6364136223846793005), UINT64_C(1442695040888963407));
u = (double)r * (1.0/9007199254740992.0L);
prof_tdata->threshold = (uint64_t)(log(u) /
log(1.0 - (1.0 / (double)((uint64_t)1U << opt_lg_prof_sample))))
+ (uint64_t)1U;
} }
JEMALLOC_INLINE prof_ctx_t * JEMALLOC_ALWAYS_INLINE void
prof_ctx_get(const void *ptr) prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx)
{ {
prof_ctx_t *ret;
arena_chunk_t *chunk;
cassert(config_prof); cassert(config_prof);
assert(ptr != NULL); assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); arena_prof_tctx_set(tsdn, ptr, usize, tctx);
if (chunk != ptr) {
/* Region. */
ret = arena_prof_ctx_get(ptr);
} else
ret = huge_prof_ctx_get(ptr);
return (ret);
} }
JEMALLOC_INLINE void JEMALLOC_ALWAYS_INLINE void
prof_ctx_set(const void *ptr, prof_ctx_t *ctx) prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, const void *old_ptr,
prof_tctx_t *old_tctx)
{ {
arena_chunk_t *chunk;
cassert(config_prof); cassert(config_prof);
assert(ptr != NULL); assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); arena_prof_tctx_reset(tsdn, ptr, usize, old_ptr, old_tctx);
if (chunk != ptr) {
/* Region. */
arena_prof_ctx_set(ptr, ctx);
} else
huge_prof_ctx_set(ptr, ctx);
} }
JEMALLOC_INLINE bool JEMALLOC_ALWAYS_INLINE bool
prof_sample_accum_update(size_t size) prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
prof_tdata_t **tdata_out)
{ {
prof_tdata_t *prof_tdata; prof_tdata_t *tdata;
cassert(config_prof); cassert(config_prof);
/* Sampling logic is unnecessary if the interval is 1. */
assert(opt_lg_prof_sample != 0);
prof_tdata = *prof_tdata_tsd_get(); tdata = prof_tdata_get(tsd, true);
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX))
tdata = NULL;
if (tdata_out != NULL)
*tdata_out = tdata;
if (unlikely(tdata == NULL))
return (true); return (true);
/* Take care to avoid integer overflow. */ if (likely(tdata->bytes_until_sample >= usize)) {
if (size >= prof_tdata->threshold - prof_tdata->accum) { if (update)
prof_tdata->accum -= (prof_tdata->threshold - size); tdata->bytes_until_sample -= usize;
return (true);
} else {
/* Compute new sample threshold. */ /* Compute new sample threshold. */
prof_sample_threshold_update(prof_tdata); if (update)
while (prof_tdata->accum >= prof_tdata->threshold) { prof_sample_threshold_update(tdata);
prof_tdata->accum -= prof_tdata->threshold; return (!tdata->active);
prof_sample_threshold_update(prof_tdata);
} }
return (false); }
} else {
prof_tdata->accum += size; JEMALLOC_ALWAYS_INLINE prof_tctx_t *
return (true); prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update)
{
prof_tctx_t *ret;
prof_tdata_t *tdata;
prof_bt_t bt;
assert(usize == s2u(usize));
if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update,
&tdata)))
ret = (prof_tctx_t *)(uintptr_t)1U;
else {
bt_init(&bt, tdata->vec);
prof_backtrace(&bt);
ret = prof_lookup(tsd, &bt);
} }
return (ret);
} }
JEMALLOC_INLINE void JEMALLOC_ALWAYS_INLINE void
prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt) prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx)
{ {
cassert(config_prof); cassert(config_prof);
assert(ptr != NULL); assert(ptr != NULL);
assert(size == isalloc(ptr, true)); assert(usize == isalloc(tsdn, ptr, true));
if (opt_lg_prof_sample != 0) { if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
if (prof_sample_accum_update(size)) { prof_malloc_sample_object(tsdn, ptr, usize, tctx);
/* else
* Don't sample. For malloc()-like allocation, it is prof_tctx_set(tsdn, ptr, usize, (prof_tctx_t *)(uintptr_t)1U);
* always possible to tell in advance how large an
* object's usable size will be, so there should never
* be a difference between the size passed to
* PROF_ALLOC_PREP() and prof_malloc().
*/
assert((uintptr_t)cnt == (uintptr_t)1U);
}
}
if ((uintptr_t)cnt > (uintptr_t)1U) {
prof_ctx_set(ptr, cnt->ctx);
cnt->epoch++;
/*********/
mb_write();
/*********/
cnt->cnts.curobjs++;
cnt->cnts.curbytes += size;
if (opt_prof_accum) {
cnt->cnts.accumobjs++;
cnt->cnts.accumbytes += size;
}
/*********/
mb_write();
/*********/
cnt->epoch++;
/*********/
mb_write();
/*********/
} else
prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U);
} }
JEMALLOC_INLINE void JEMALLOC_ALWAYS_INLINE void
prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt, prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
size_t old_size, prof_ctx_t *old_ctx) bool prof_active, bool updated, const void *old_ptr, size_t old_usize,
prof_tctx_t *old_tctx)
{ {
prof_thr_cnt_t *told_cnt; bool sampled, old_sampled;
cassert(config_prof); cassert(config_prof);
assert(ptr != NULL || (uintptr_t)cnt <= (uintptr_t)1U); assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
if (ptr != NULL) { if (prof_active && !updated && ptr != NULL) {
assert(size == isalloc(ptr, true)); assert(usize == isalloc(tsd_tsdn(tsd), ptr, true));
if (opt_lg_prof_sample != 0) { if (prof_sample_accum_update(tsd, usize, true, NULL)) {
if (prof_sample_accum_update(size)) {
/* /*
* Don't sample. The size passed to * Don't sample. The usize passed to prof_alloc_prep()
* PROF_ALLOC_PREP() was larger than what * was larger than what actually got allocated, so a
* actually got allocated, so a backtrace was * backtrace was captured for this allocation, even
* captured for this allocation, even though * though its actual usize was insufficient to cross the
* its actual size was insufficient to cross * sample threshold.
* the sample threshold.
*/ */
cnt = (prof_thr_cnt_t *)(uintptr_t)1U; prof_alloc_rollback(tsd, tctx, true);
} tctx = (prof_tctx_t *)(uintptr_t)1U;
} }
} }
if ((uintptr_t)old_ctx > (uintptr_t)1U) { sampled = ((uintptr_t)tctx > (uintptr_t)1U);
told_cnt = prof_lookup(old_ctx->bt); old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U);
if (told_cnt == NULL) {
/* if (unlikely(sampled))
* It's too late to propagate OOM for this realloc(), prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx);
* so operate directly on old_cnt->ctx->cnt_merged. else
*/ prof_tctx_reset(tsd_tsdn(tsd), ptr, usize, old_ptr, old_tctx);
malloc_mutex_lock(old_ctx->lock);
old_ctx->cnt_merged.curobjs--; if (unlikely(old_sampled))
old_ctx->cnt_merged.curbytes -= old_size; prof_free_sampled_object(tsd, old_usize, old_tctx);
malloc_mutex_unlock(old_ctx->lock);
told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
}
} else
told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
if ((uintptr_t)told_cnt > (uintptr_t)1U)
told_cnt->epoch++;
if ((uintptr_t)cnt > (uintptr_t)1U) {
prof_ctx_set(ptr, cnt->ctx);
cnt->epoch++;
} else if (ptr != NULL)
prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U);
/*********/
mb_write();
/*********/
if ((uintptr_t)told_cnt > (uintptr_t)1U) {
told_cnt->cnts.curobjs--;
told_cnt->cnts.curbytes -= old_size;
}
if ((uintptr_t)cnt > (uintptr_t)1U) {
cnt->cnts.curobjs++;
cnt->cnts.curbytes += size;
if (opt_prof_accum) {
cnt->cnts.accumobjs++;
cnt->cnts.accumbytes += size;
}
}
/*********/
mb_write();
/*********/
if ((uintptr_t)told_cnt > (uintptr_t)1U)
told_cnt->epoch++;
if ((uintptr_t)cnt > (uintptr_t)1U)
cnt->epoch++;
/*********/
mb_write(); /* Not strictly necessary. */
} }
JEMALLOC_INLINE void JEMALLOC_ALWAYS_INLINE void
prof_free(const void *ptr, size_t size) prof_free(tsd_t *tsd, const void *ptr, size_t usize)
{ {
prof_ctx_t *ctx = prof_ctx_get(ptr); prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr);
cassert(config_prof); cassert(config_prof);
assert(usize == isalloc(tsd_tsdn(tsd), ptr, true));
if ((uintptr_t)ctx > (uintptr_t)1) { if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
prof_thr_cnt_t *tcnt; prof_free_sampled_object(tsd, usize, tctx);
assert(size == isalloc(ptr, true));
tcnt = prof_lookup(ctx->bt);
if (tcnt != NULL) {
tcnt->epoch++;
/*********/
mb_write();
/*********/
tcnt->cnts.curobjs--;
tcnt->cnts.curbytes -= size;
/*********/
mb_write();
/*********/
tcnt->epoch++;
/*********/
mb_write();
/*********/
} else {
/*
* OOM during free() cannot be propagated, so operate
* directly on cnt->ctx->cnt_merged.
*/
malloc_mutex_lock(ctx->lock);
ctx->cnt_merged.curobjs--;
ctx->cnt_merged.curbytes -= size;
malloc_mutex_unlock(ctx->lock);
}
}
} }
#endif #endif
......
#!/bin/sh
for nm in `cat $1` ; do
n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
echo "#define je_${n} JEMALLOC_N(${n})"
done
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment