Commit 5268379e authored by antirez's avatar antirez
Browse files

Jemalloc updated to 4.0.3.

parent 589c41e4
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */ /* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
#define LG_BITMAP_MAXBITS LG_RUN_MAXREGS #define LG_BITMAP_MAXBITS LG_RUN_MAXREGS
#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS)
typedef struct bitmap_level_s bitmap_level_t; typedef struct bitmap_level_s bitmap_level_t;
typedef struct bitmap_info_s bitmap_info_t; typedef struct bitmap_info_s bitmap_info_t;
...@@ -14,6 +15,51 @@ typedef unsigned long bitmap_t; ...@@ -14,6 +15,51 @@ typedef unsigned long bitmap_t;
#define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS) #define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS)
#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1) #define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)
/* Number of groups required to store a given number of bits. */
#define BITMAP_BITS2GROUPS(nbits) \
((nbits + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS)
/*
* Number of groups required at a particular level for a given number of bits.
*/
#define BITMAP_GROUPS_L0(nbits) \
BITMAP_BITS2GROUPS(nbits)
#define BITMAP_GROUPS_L1(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits))
#define BITMAP_GROUPS_L2(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))
#define BITMAP_GROUPS_L3(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
BITMAP_BITS2GROUPS((nbits)))))
/*
* Assuming the number of levels, number of groups required for a given number
* of bits.
*/
#define BITMAP_GROUPS_1_LEVEL(nbits) \
BITMAP_GROUPS_L0(nbits)
#define BITMAP_GROUPS_2_LEVEL(nbits) \
(BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits))
#define BITMAP_GROUPS_3_LEVEL(nbits) \
(BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits))
#define BITMAP_GROUPS_4_LEVEL(nbits) \
(BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits))
/*
* Maximum number of groups required to support LG_BITMAP_MAXBITS.
*/
#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS)
#else
# error "Unsupported bitmap size"
#endif
/* Maximum number of levels possible. */ /* Maximum number of levels possible. */
#define BITMAP_MAX_LEVELS \ #define BITMAP_MAX_LEVELS \
(LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \ (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \
...@@ -93,7 +139,7 @@ bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) ...@@ -93,7 +139,7 @@ bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
bitmap_t g; bitmap_t g;
assert(bit < binfo->nbits); assert(bit < binfo->nbits);
assert(bitmap_get(bitmap, binfo, bit) == false); assert(!bitmap_get(bitmap, binfo, bit));
goff = bit >> LG_BITMAP_GROUP_NBITS; goff = bit >> LG_BITMAP_GROUP_NBITS;
gp = &bitmap[goff]; gp = &bitmap[goff];
g = *gp; g = *gp;
...@@ -126,15 +172,15 @@ bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) ...@@ -126,15 +172,15 @@ bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo)
bitmap_t g; bitmap_t g;
unsigned i; unsigned i;
assert(bitmap_full(bitmap, binfo) == false); assert(!bitmap_full(bitmap, binfo));
i = binfo->nlevels - 1; i = binfo->nlevels - 1;
g = bitmap[binfo->levels[i].group_offset]; g = bitmap[binfo->levels[i].group_offset];
bit = ffsl(g) - 1; bit = jemalloc_ffsl(g) - 1;
while (i > 0) { while (i > 0) {
i--; i--;
g = bitmap[binfo->levels[i].group_offset + bit]; g = bitmap[binfo->levels[i].group_offset + bit];
bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffsl(g) - 1); bit = (bit << LG_BITMAP_GROUP_NBITS) + (jemalloc_ffsl(g) - 1);
} }
bitmap_set(bitmap, binfo, bit); bitmap_set(bitmap, binfo, bit);
...@@ -158,7 +204,7 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) ...@@ -158,7 +204,7 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) == 0); assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) == 0);
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g; *gp = g;
assert(bitmap_get(bitmap, binfo, bit) == false); assert(!bitmap_get(bitmap, binfo, bit));
/* Propagate group state transitions up the tree. */ /* Propagate group state transitions up the tree. */
if (propagate) { if (propagate) {
unsigned i; unsigned i;
...@@ -172,7 +218,7 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) ...@@ -172,7 +218,7 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
== 0); == 0);
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g; *gp = g;
if (propagate == false) if (!propagate)
break; break;
} }
} }
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
* Size and alignment of memory chunks that are allocated by the OS's virtual * Size and alignment of memory chunks that are allocated by the OS's virtual
* memory system. * memory system.
*/ */
#define LG_CHUNK_DEFAULT 22 #define LG_CHUNK_DEFAULT 21
/* Return the chunk address for allocation address a. */ /* Return the chunk address for allocation address a. */
#define CHUNK_ADDR2BASE(a) \ #define CHUNK_ADDR2BASE(a) \
...@@ -19,6 +19,16 @@ ...@@ -19,6 +19,16 @@
#define CHUNK_CEILING(s) \ #define CHUNK_CEILING(s) \
(((s) + chunksize_mask) & ~chunksize_mask) (((s) + chunksize_mask) & ~chunksize_mask)
#define CHUNK_HOOKS_INITIALIZER { \
NULL, \
NULL, \
NULL, \
NULL, \
NULL, \
NULL, \
NULL \
}
#endif /* JEMALLOC_H_TYPES */ #endif /* JEMALLOC_H_TYPES */
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS #ifdef JEMALLOC_H_STRUCTS
...@@ -30,23 +40,36 @@ ...@@ -30,23 +40,36 @@
extern size_t opt_lg_chunk; extern size_t opt_lg_chunk;
extern const char *opt_dss; extern const char *opt_dss;
/* Protects stats_chunks; currently not used for any other purpose. */ extern rtree_t chunks_rtree;
extern malloc_mutex_t chunks_mtx;
/* Chunk statistics. */
extern chunk_stats_t stats_chunks;
extern rtree_t *chunks_rtree;
extern size_t chunksize; extern size_t chunksize;
extern size_t chunksize_mask; /* (chunksize - 1). */ extern size_t chunksize_mask; /* (chunksize - 1). */
extern size_t chunk_npages; extern size_t chunk_npages;
extern size_t map_bias; /* Number of arena chunk header pages. */
extern size_t arena_maxclass; /* Max size class for arenas. */
void *chunk_alloc(size_t size, size_t alignment, bool base, bool *zero, extern const chunk_hooks_t chunk_hooks_default;
dss_prec_t dss_prec);
void chunk_unmap(void *chunk, size_t size); chunk_hooks_t chunk_hooks_get(arena_t *arena);
void chunk_dealloc(void *chunk, size_t size, bool unmap); chunk_hooks_t chunk_hooks_set(arena_t *arena,
const chunk_hooks_t *chunk_hooks);
bool chunk_register(const void *chunk, const extent_node_t *node);
void chunk_deregister(const void *chunk, const extent_node_t *node);
void *chunk_alloc_base(size_t size);
void *chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks,
void *new_addr, size_t size, size_t alignment, bool *zero,
bool dalloc_node);
void *chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit);
void chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks,
void *chunk, size_t size, bool committed);
void chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks,
void *chunk, size_t size, bool zeroed, bool committed);
void chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
void *chunk, size_t size, bool committed);
bool chunk_purge_arena(arena_t *arena, void *chunk, size_t offset,
size_t length);
bool chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
void *chunk, size_t size, size_t offset, size_t length);
bool chunk_boot(void); bool chunk_boot(void);
void chunk_prefork(void); void chunk_prefork(void);
void chunk_postfork_parent(void); void chunk_postfork_parent(void);
...@@ -56,6 +79,19 @@ void chunk_postfork_child(void); ...@@ -56,6 +79,19 @@ void chunk_postfork_child(void);
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_INLINES #ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
extent_node_t *chunk_lookup(const void *chunk, bool dependent);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_CHUNK_C_))
JEMALLOC_INLINE extent_node_t *
chunk_lookup(const void *ptr, bool dependent)
{
return (rtree_get(&chunks_rtree, (uintptr_t)ptr, dependent));
}
#endif
#endif /* JEMALLOC_H_INLINES */ #endif /* JEMALLOC_H_INLINES */
/******************************************************************************/ /******************************************************************************/
......
...@@ -23,7 +23,8 @@ extern const char *dss_prec_names[]; ...@@ -23,7 +23,8 @@ extern const char *dss_prec_names[];
dss_prec_t chunk_dss_prec_get(void); dss_prec_t chunk_dss_prec_get(void);
bool chunk_dss_prec_set(dss_prec_t dss_prec); bool chunk_dss_prec_set(dss_prec_t dss_prec);
void *chunk_alloc_dss(size_t size, size_t alignment, bool *zero); void *chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit);
bool chunk_in_dss(void *chunk); bool chunk_in_dss(void *chunk);
bool chunk_dss_boot(void); bool chunk_dss_boot(void);
void chunk_dss_prefork(void); void chunk_dss_prefork(void);
......
...@@ -9,10 +9,9 @@ ...@@ -9,10 +9,9 @@
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS #ifdef JEMALLOC_H_EXTERNS
bool pages_purge(void *addr, size_t length); void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero,
bool *commit);
void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero); bool chunk_dalloc_mmap(void *chunk, size_t size);
bool chunk_dealloc_mmap(void *chunk, size_t size);
#endif /* JEMALLOC_H_EXTERNS */ #endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/ /******************************************************************************/
......
...@@ -66,13 +66,13 @@ struct ckh_s { ...@@ -66,13 +66,13 @@ struct ckh_s {
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS #ifdef JEMALLOC_H_EXTERNS
bool ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ckh_keycomp_t *keycomp); ckh_keycomp_t *keycomp);
void ckh_delete(ckh_t *ckh); void ckh_delete(tsd_t *tsd, ckh_t *ckh);
size_t ckh_count(ckh_t *ckh); size_t ckh_count(ckh_t *ckh);
bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data); bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
bool ckh_insert(ckh_t *ckh, const void *key, const void *data); bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data);
bool ckh_remove(ckh_t *ckh, const void *searchkey, void **key, bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
void **data); void **data);
bool ckh_search(ckh_t *ckh, const void *seachkey, void **key, void **data); bool ckh_search(ckh_t *ckh, const void *seachkey, void **key, void **data);
void ckh_string_hash(const void *key, size_t r_hash[2]); void ckh_string_hash(const void *key, size_t r_hash[2]);
......
...@@ -34,6 +34,7 @@ struct ctl_arena_stats_s { ...@@ -34,6 +34,7 @@ struct ctl_arena_stats_s {
bool initialized; bool initialized;
unsigned nthreads; unsigned nthreads;
const char *dss; const char *dss;
ssize_t lg_dirty_mult;
size_t pactive; size_t pactive;
size_t pdirty; size_t pdirty;
arena_stats_t astats; arena_stats_t astats;
...@@ -46,22 +47,15 @@ struct ctl_arena_stats_s { ...@@ -46,22 +47,15 @@ struct ctl_arena_stats_s {
malloc_bin_stats_t bstats[NBINS]; malloc_bin_stats_t bstats[NBINS];
malloc_large_stats_t *lstats; /* nlclasses elements. */ malloc_large_stats_t *lstats; /* nlclasses elements. */
malloc_huge_stats_t *hstats; /* nhclasses elements. */
}; };
struct ctl_stats_s { struct ctl_stats_s {
size_t allocated; size_t allocated;
size_t active; size_t active;
size_t metadata;
size_t resident;
size_t mapped; size_t mapped;
struct {
size_t current; /* stats_chunks.curchunks */
uint64_t total; /* stats_chunks.nchunks */
size_t high; /* stats_chunks.highchunks */
} chunks;
struct {
size_t allocated; /* huge_allocated */
uint64_t nmalloc; /* huge_nmalloc */
uint64_t ndalloc; /* huge_ndalloc */
} huge;
unsigned narenas; unsigned narenas;
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */ ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
}; };
......
...@@ -7,25 +7,53 @@ typedef struct extent_node_s extent_node_t; ...@@ -7,25 +7,53 @@ typedef struct extent_node_s extent_node_t;
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS #ifdef JEMALLOC_H_STRUCTS
/* Tree of extents. */ /* Tree of extents. Use accessor functions for en_* fields. */
struct extent_node_s { struct extent_node_s {
/* Linkage for the size/address-ordered tree. */ /* Arena from which this extent came, if any. */
rb_node(extent_node_t) link_szad; arena_t *en_arena;
/* Linkage for the address-ordered tree. */ /* Pointer to the extent that this tree node is responsible for. */
rb_node(extent_node_t) link_ad; void *en_addr;
/* Total region size. */
size_t en_size;
/*
* The zeroed flag is used by chunk recycling code to track whether
* memory is zero-filled.
*/
bool en_zeroed;
/*
* True if physical memory is committed to the extent, whether
* explicitly or implicitly as on a system that overcommits and
* satisfies physical memory needs on demand via soft page faults.
*/
bool en_committed;
/*
* The achunk flag is used to validate that huge allocation lookups
* don't return arena chunks.
*/
bool en_achunk;
/* Profile counters, used for huge objects. */ /* Profile counters, used for huge objects. */
prof_ctx_t *prof_ctx; prof_tctx_t *en_prof_tctx;
/* Pointer to the extent that this tree node is responsible for. */ /* Linkage for arena's runs_dirty and chunks_cache rings. */
void *addr; arena_runs_dirty_link_t rd;
qr(extent_node_t) cc_link;
/* Total region size. */ union {
size_t size; /* Linkage for the size/address-ordered tree. */
rb_node(extent_node_t) szad_link;
/* Linkage for arena's huge and node_cache lists. */
ql_elm(extent_node_t) ql_link;
};
/* True if zero-filled; used by chunk recycling code. */ /* Linkage for the address-ordered tree. */
bool zeroed; rb_node(extent_node_t) ad_link;
}; };
typedef rb_tree(extent_node_t) extent_tree_t; typedef rb_tree(extent_node_t) extent_tree_t;
...@@ -41,6 +69,171 @@ rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t) ...@@ -41,6 +69,171 @@ rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_INLINES #ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
arena_t *extent_node_arena_get(const extent_node_t *node);
void *extent_node_addr_get(const extent_node_t *node);
size_t extent_node_size_get(const extent_node_t *node);
bool extent_node_zeroed_get(const extent_node_t *node);
bool extent_node_committed_get(const extent_node_t *node);
bool extent_node_achunk_get(const extent_node_t *node);
prof_tctx_t *extent_node_prof_tctx_get(const extent_node_t *node);
void extent_node_arena_set(extent_node_t *node, arena_t *arena);
void extent_node_addr_set(extent_node_t *node, void *addr);
void extent_node_size_set(extent_node_t *node, size_t size);
void extent_node_zeroed_set(extent_node_t *node, bool zeroed);
void extent_node_committed_set(extent_node_t *node, bool committed);
void extent_node_achunk_set(extent_node_t *node, bool achunk);
void extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx);
void extent_node_init(extent_node_t *node, arena_t *arena, void *addr,
size_t size, bool zeroed, bool committed);
void extent_node_dirty_linkage_init(extent_node_t *node);
void extent_node_dirty_insert(extent_node_t *node,
arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty);
void extent_node_dirty_remove(extent_node_t *node);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_))
JEMALLOC_INLINE arena_t *
extent_node_arena_get(const extent_node_t *node)
{
return (node->en_arena);
}
JEMALLOC_INLINE void *
extent_node_addr_get(const extent_node_t *node)
{
return (node->en_addr);
}
JEMALLOC_INLINE size_t
extent_node_size_get(const extent_node_t *node)
{
return (node->en_size);
}
JEMALLOC_INLINE bool
extent_node_zeroed_get(const extent_node_t *node)
{
return (node->en_zeroed);
}
JEMALLOC_INLINE bool
extent_node_committed_get(const extent_node_t *node)
{
assert(!node->en_achunk);
return (node->en_committed);
}
JEMALLOC_INLINE bool
extent_node_achunk_get(const extent_node_t *node)
{
return (node->en_achunk);
}
JEMALLOC_INLINE prof_tctx_t *
extent_node_prof_tctx_get(const extent_node_t *node)
{
return (node->en_prof_tctx);
}
JEMALLOC_INLINE void
extent_node_arena_set(extent_node_t *node, arena_t *arena)
{
node->en_arena = arena;
}
JEMALLOC_INLINE void
extent_node_addr_set(extent_node_t *node, void *addr)
{
node->en_addr = addr;
}
JEMALLOC_INLINE void
extent_node_size_set(extent_node_t *node, size_t size)
{
node->en_size = size;
}
JEMALLOC_INLINE void
extent_node_zeroed_set(extent_node_t *node, bool zeroed)
{
node->en_zeroed = zeroed;
}
JEMALLOC_INLINE void
extent_node_committed_set(extent_node_t *node, bool committed)
{
node->en_committed = committed;
}
JEMALLOC_INLINE void
extent_node_achunk_set(extent_node_t *node, bool achunk)
{
node->en_achunk = achunk;
}
JEMALLOC_INLINE void
extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx)
{
node->en_prof_tctx = tctx;
}
JEMALLOC_INLINE void
extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size,
bool zeroed, bool committed)
{
extent_node_arena_set(node, arena);
extent_node_addr_set(node, addr);
extent_node_size_set(node, size);
extent_node_zeroed_set(node, zeroed);
extent_node_committed_set(node, committed);
extent_node_achunk_set(node, false);
if (config_prof)
extent_node_prof_tctx_set(node, NULL);
}
JEMALLOC_INLINE void
extent_node_dirty_linkage_init(extent_node_t *node)
{
qr_new(&node->rd, rd_link);
qr_new(node, cc_link);
}
JEMALLOC_INLINE void
extent_node_dirty_insert(extent_node_t *node,
arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty)
{
qr_meld(runs_dirty, &node->rd, rd_link);
qr_meld(chunks_dirty, node, cc_link);
}
JEMALLOC_INLINE void
extent_node_dirty_remove(extent_node_t *node)
{
qr_remove(&node->rd, rd_link);
qr_remove(node, cc_link);
}
#endif
#endif /* JEMALLOC_H_INLINES */ #endif /* JEMALLOC_H_INLINES */
/******************************************************************************/ /******************************************************************************/
...@@ -35,13 +35,14 @@ JEMALLOC_INLINE uint32_t ...@@ -35,13 +35,14 @@ JEMALLOC_INLINE uint32_t
hash_rotl_32(uint32_t x, int8_t r) hash_rotl_32(uint32_t x, int8_t r)
{ {
return (x << r) | (x >> (32 - r)); return ((x << r) | (x >> (32 - r)));
} }
JEMALLOC_INLINE uint64_t JEMALLOC_INLINE uint64_t
hash_rotl_64(uint64_t x, int8_t r) hash_rotl_64(uint64_t x, int8_t r)
{ {
return (x << r) | (x >> (64 - r));
return ((x << r) | (x >> (64 - r)));
} }
JEMALLOC_INLINE uint32_t JEMALLOC_INLINE uint32_t
...@@ -76,9 +77,9 @@ hash_fmix_64(uint64_t k) ...@@ -76,9 +77,9 @@ hash_fmix_64(uint64_t k)
{ {
k ^= k >> 33; k ^= k >> 33;
k *= QU(0xff51afd7ed558ccdLLU); k *= KQU(0xff51afd7ed558ccd);
k ^= k >> 33; k ^= k >> 33;
k *= QU(0xc4ceb9fe1a85ec53LLU); k *= KQU(0xc4ceb9fe1a85ec53);
k ^= k >> 33; k ^= k >> 33;
return (k); return (k);
...@@ -247,8 +248,8 @@ hash_x64_128(const void *key, const int len, const uint32_t seed, ...@@ -247,8 +248,8 @@ hash_x64_128(const void *key, const int len, const uint32_t seed,
uint64_t h1 = seed; uint64_t h1 = seed;
uint64_t h2 = seed; uint64_t h2 = seed;
const uint64_t c1 = QU(0x87c37b91114253d5LLU); const uint64_t c1 = KQU(0x87c37b91114253d5);
const uint64_t c2 = QU(0x4cf5ad432745937fLLU); const uint64_t c2 = KQU(0x4cf5ad432745937f);
/* body */ /* body */
{ {
......
...@@ -9,34 +9,24 @@ ...@@ -9,34 +9,24 @@
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS #ifdef JEMALLOC_H_EXTERNS
/* Huge allocation statistics. */ void *huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
extern uint64_t huge_nmalloc; tcache_t *tcache);
extern uint64_t huge_ndalloc; void *huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
extern size_t huge_allocated; bool zero, tcache_t *tcache);
bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min,
/* Protects chunk-related data structures. */ size_t usize_max, bool zero);
extern malloc_mutex_t huge_mtx; void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
size_t usize, size_t alignment, bool zero, tcache_t *tcache);
void *huge_malloc(size_t size, bool zero, dss_prec_t dss_prec);
void *huge_palloc(size_t size, size_t alignment, bool zero,
dss_prec_t dss_prec);
bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
size_t extra);
void *huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero, bool try_tcache_dalloc, dss_prec_t dss_prec);
#ifdef JEMALLOC_JET #ifdef JEMALLOC_JET
typedef void (huge_dalloc_junk_t)(void *, size_t); typedef void (huge_dalloc_junk_t)(void *, size_t);
extern huge_dalloc_junk_t *huge_dalloc_junk; extern huge_dalloc_junk_t *huge_dalloc_junk;
#endif #endif
void huge_dalloc(void *ptr, bool unmap); void huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache);
arena_t *huge_aalloc(const void *ptr);
size_t huge_salloc(const void *ptr); size_t huge_salloc(const void *ptr);
dss_prec_t huge_dss_prec_get(arena_t *arena); prof_tctx_t *huge_prof_tctx_get(const void *ptr);
prof_ctx_t *huge_prof_ctx_get(const void *ptr); void huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx);
void huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx); void huge_prof_tctx_reset(const void *ptr);
bool huge_boot(void);
void huge_prefork(void);
void huge_postfork_parent(void);
void huge_postfork_child(void);
#endif /* JEMALLOC_H_EXTERNS */ #endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/ /******************************************************************************/
......
#ifndef JEMALLOC_INTERNAL_H #ifndef JEMALLOC_INTERNAL_H
#define JEMALLOC_INTERNAL_H #define JEMALLOC_INTERNAL_H
#include <math.h>
#ifdef _WIN32
# include <windows.h>
# define ENOENT ERROR_PATH_NOT_FOUND
# define EINVAL ERROR_BAD_ARGUMENTS
# define EAGAIN ERROR_OUTOFMEMORY
# define EPERM ERROR_WRITE_FAULT
# define EFAULT ERROR_INVALID_ADDRESS
# define ENOMEM ERROR_NOT_ENOUGH_MEMORY
# undef ERANGE
# define ERANGE ERROR_INVALID_DATA
#else
# include <sys/param.h>
# include <sys/mman.h>
# include <sys/syscall.h>
# if !defined(SYS_write) && defined(__NR_write)
# define SYS_write __NR_write
# endif
# include <sys/uio.h>
# include <pthread.h>
# include <errno.h>
#endif
#include <sys/types.h>
#include <limits.h>
#ifndef SIZE_T_MAX
# define SIZE_T_MAX SIZE_MAX
#endif
#include <stdarg.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <stddef.h>
#ifndef offsetof
# define offsetof(type, member) ((size_t)&(((type *)NULL)->member))
#endif
#include <inttypes.h>
#include <string.h>
#include <strings.h>
#include <ctype.h>
#ifdef _MSC_VER
# include <io.h>
typedef intptr_t ssize_t;
# define PATH_MAX 1024
# define STDERR_FILENO 2
# define __func__ __FUNCTION__
/* Disable warnings about deprecated system functions */
# pragma warning(disable: 4996)
#else
# include <unistd.h>
#endif
#include <fcntl.h>
#include "jemalloc_internal_defs.h" #include "jemalloc_internal_defs.h"
#include "jemalloc/internal/jemalloc_internal_decls.h"
#ifdef JEMALLOC_UTRACE #ifdef JEMALLOC_UTRACE
#include <sys/ktrace.h> #include <sys/ktrace.h>
#endif #endif
#ifdef JEMALLOC_VALGRIND
#include <valgrind/valgrind.h>
#include <valgrind/memcheck.h>
#endif
#define JEMALLOC_NO_DEMANGLE #define JEMALLOC_NO_DEMANGLE
#ifdef JEMALLOC_JET #ifdef JEMALLOC_JET
# define JEMALLOC_N(n) jet_##n # define JEMALLOC_N(n) jet_##n
...@@ -85,7 +28,7 @@ static const bool config_debug = ...@@ -85,7 +28,7 @@ static const bool config_debug =
false false
#endif #endif
; ;
static const bool config_dss = static const bool have_dss =
#ifdef JEMALLOC_DSS #ifdef JEMALLOC_DSS
true true
#else #else
...@@ -127,8 +70,8 @@ static const bool config_prof_libunwind = ...@@ -127,8 +70,8 @@ static const bool config_prof_libunwind =
false false
#endif #endif
; ;
static const bool config_mremap = static const bool maps_coalesce =
#ifdef JEMALLOC_MREMAP #ifdef JEMALLOC_MAPS_COALESCE
true true
#else #else
false false
...@@ -190,6 +133,17 @@ static const bool config_ivsalloc = ...@@ -190,6 +133,17 @@ static const bool config_ivsalloc =
false false
#endif #endif
; ;
static const bool config_cache_oblivious =
#ifdef JEMALLOC_CACHE_OBLIVIOUS
true
#else
false
#endif
;
#ifdef JEMALLOC_C11ATOMICS
#include <stdatomic.h>
#endif
#ifdef JEMALLOC_ATOMIC9 #ifdef JEMALLOC_ATOMIC9
#include <machine/atomic.h> #include <machine/atomic.h>
...@@ -229,20 +183,48 @@ static const bool config_ivsalloc = ...@@ -229,20 +183,48 @@ static const bool config_ivsalloc =
#include "jemalloc/internal/jemalloc_internal_macros.h" #include "jemalloc/internal/jemalloc_internal_macros.h"
/* Size class index type. */
typedef unsigned szind_t;
/*
* Flags bits:
*
* a: arena
* t: tcache
* 0: unused
* z: zero
* n: alignment
*
* aaaaaaaa aaaatttt tttttttt 0znnnnnn
*/
#define MALLOCX_ARENA_MASK ((int)~0xfffff)
#define MALLOCX_ARENA_MAX 0xffe
#define MALLOCX_TCACHE_MASK ((int)~0xfff000ffU)
#define MALLOCX_TCACHE_MAX 0xffd
#define MALLOCX_LG_ALIGN_MASK ((int)0x3f) #define MALLOCX_LG_ALIGN_MASK ((int)0x3f)
#define ALLOCM_LG_ALIGN_MASK ((int)0x3f) /* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */
#define MALLOCX_ALIGN_GET_SPECIFIED(flags) \
(ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK))
#define MALLOCX_ALIGN_GET(flags) \
(MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1))
#define MALLOCX_ZERO_GET(flags) \
((bool)(flags & MALLOCX_ZERO))
#define MALLOCX_TCACHE_GET(flags) \
(((unsigned)((flags & MALLOCX_TCACHE_MASK) >> 8)) - 2)
#define MALLOCX_ARENA_GET(flags) \
(((unsigned)(((unsigned)flags) >> 20)) - 1)
/* Smallest size class to support. */ /* Smallest size class to support. */
#define LG_TINY_MIN 3
#define TINY_MIN (1U << LG_TINY_MIN) #define TINY_MIN (1U << LG_TINY_MIN)
/* /*
* Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
* classes). * classes).
*/ */
#ifndef LG_QUANTUM #ifndef LG_QUANTUM
# if (defined(__i386__) || defined(_M_IX86)) # if (defined(__i386__) || defined(_M_IX86))
# define LG_QUANTUM 3 # define LG_QUANTUM 4
# endif # endif
# ifdef __ia64__ # ifdef __ia64__
# define LG_QUANTUM 4 # define LG_QUANTUM 4
...@@ -250,11 +232,11 @@ static const bool config_ivsalloc = ...@@ -250,11 +232,11 @@ static const bool config_ivsalloc =
# ifdef __alpha__ # ifdef __alpha__
# define LG_QUANTUM 4 # define LG_QUANTUM 4
# endif # endif
# ifdef __sparc64__ # if (defined(__sparc64__) || defined(__sparcv9))
# define LG_QUANTUM 4 # define LG_QUANTUM 4
# endif # endif
# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64)) # if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
# define LG_QUANTUM 3 # define LG_QUANTUM 4
# endif # endif
# ifdef __arm__ # ifdef __arm__
# define LG_QUANTUM 3 # define LG_QUANTUM 3
...@@ -268,6 +250,9 @@ static const bool config_ivsalloc = ...@@ -268,6 +250,9 @@ static const bool config_ivsalloc =
# ifdef __mips__ # ifdef __mips__
# define LG_QUANTUM 3 # define LG_QUANTUM 3
# endif # endif
# ifdef __or1k__
# define LG_QUANTUM 3
# endif
# ifdef __powerpc__ # ifdef __powerpc__
# define LG_QUANTUM 4 # define LG_QUANTUM 4
# endif # endif
...@@ -280,8 +265,12 @@ static const bool config_ivsalloc = ...@@ -280,8 +265,12 @@ static const bool config_ivsalloc =
# ifdef __tile__ # ifdef __tile__
# define LG_QUANTUM 4 # define LG_QUANTUM 4
# endif # endif
# ifdef __le32__
# define LG_QUANTUM 4
# endif
# ifndef LG_QUANTUM # ifndef LG_QUANTUM
# error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS" # error "Unknown minimum alignment for architecture; specify via "
"--with-lg-quantum"
# endif # endif
#endif #endif
...@@ -321,12 +310,11 @@ static const bool config_ivsalloc = ...@@ -321,12 +310,11 @@ static const bool config_ivsalloc =
#define CACHELINE_CEILING(s) \ #define CACHELINE_CEILING(s) \
(((s) + CACHELINE_MASK) & ~CACHELINE_MASK) (((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
/* Page size. STATIC_PAGE_SHIFT is determined by the configure script. */ /* Page size. LG_PAGE is determined by the configure script. */
#ifdef PAGE_MASK #ifdef PAGE_MASK
# undef PAGE_MASK # undef PAGE_MASK
#endif #endif
#define LG_PAGE STATIC_PAGE_SHIFT #define PAGE ((size_t)(1U << LG_PAGE))
#define PAGE ((size_t)(1U << STATIC_PAGE_SHIFT))
#define PAGE_MASK ((size_t)(PAGE - 1)) #define PAGE_MASK ((size_t)(PAGE - 1))
/* Return the smallest pagesize multiple that is >= s. */ /* Return the smallest pagesize multiple that is >= s. */
...@@ -345,7 +333,7 @@ static const bool config_ivsalloc = ...@@ -345,7 +333,7 @@ static const bool config_ivsalloc =
#define ALIGNMENT_CEILING(s, alignment) \ #define ALIGNMENT_CEILING(s, alignment) \
(((s) + (alignment - 1)) & (-(alignment))) (((s) + (alignment - 1)) & (-(alignment)))
/* Declare a variable length array */ /* Declare a variable-length array. */
#if __STDC_VERSION__ < 199901L #if __STDC_VERSION__ < 199901L
# ifdef _MSC_VER # ifdef _MSC_VER
# include <malloc.h> # include <malloc.h>
...@@ -358,86 +346,12 @@ static const bool config_ivsalloc = ...@@ -358,86 +346,12 @@ static const bool config_ivsalloc =
# endif # endif
# endif # endif
# define VARIABLE_ARRAY(type, name, count) \ # define VARIABLE_ARRAY(type, name, count) \
type *name = alloca(sizeof(type) * count) type *name = alloca(sizeof(type) * (count))
#else
# define VARIABLE_ARRAY(type, name, count) type name[count]
#endif
#ifdef JEMALLOC_VALGRIND
/*
* The JEMALLOC_VALGRIND_*() macros must be macros rather than functions
* so that when Valgrind reports errors, there are no extra stack frames
* in the backtraces.
*
* The size that is reported to valgrind must be consistent through a chain of
* malloc..realloc..realloc calls. Request size isn't recorded anywhere in
* jemalloc, so it is critical that all callers of these macros provide usize
* rather than request size. As a result, buffer overflow detection is
* technically weakened for the standard API, though it is generally accepted
* practice to consider any extra bytes reported by malloc_usable_size() as
* usable space.
*/
#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do { \
if (config_valgrind && opt_valgrind && cond) \
VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero); \
} while (0)
#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \
old_rzsize, zero) do { \
if (config_valgrind && opt_valgrind) { \
size_t rzsize = p2rz(ptr); \
\
if (ptr == old_ptr) { \
VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \
usize, rzsize); \
if (zero && old_usize < usize) { \
VALGRIND_MAKE_MEM_DEFINED( \
(void *)((uintptr_t)ptr + \
old_usize), usize - old_usize); \
} \
} else { \
if (old_ptr != NULL) { \
VALGRIND_FREELIKE_BLOCK(old_ptr, \
old_rzsize); \
} \
if (ptr != NULL) { \
size_t copy_size = (old_usize < usize) \
? old_usize : usize; \
size_t tail_size = usize - copy_size; \
VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \
rzsize, false); \
if (copy_size > 0) { \
VALGRIND_MAKE_MEM_DEFINED(ptr, \
copy_size); \
} \
if (zero && tail_size > 0) { \
VALGRIND_MAKE_MEM_DEFINED( \
(void *)((uintptr_t)ptr + \
copy_size), tail_size); \
} \
} \
} \
} \
} while (0)
#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \
if (config_valgrind && opt_valgrind) \
VALGRIND_FREELIKE_BLOCK(ptr, rzsize); \
} while (0)
#else #else
#define RUNNING_ON_VALGRIND ((unsigned)0) # define VARIABLE_ARRAY(type, name, count) type name[(count)]
#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
do {} while (0)
#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \
do {} while (0)
#define VALGRIND_FREELIKE_BLOCK(addr, rzB) do {} while (0)
#define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr, _qzz_len) do {} while (0)
#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len) do {} while (0)
#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len) do {} while (0)
#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {} while (0)
#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \
old_rzsize, zero) do {} while (0)
#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0)
#endif #endif
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h" #include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h" #include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prng.h" #include "jemalloc/internal/prng.h"
...@@ -452,9 +366,10 @@ static const bool config_ivsalloc = ...@@ -452,9 +366,10 @@ static const bool config_ivsalloc =
#include "jemalloc/internal/arena.h" #include "jemalloc/internal/arena.h"
#include "jemalloc/internal/bitmap.h" #include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/base.h" #include "jemalloc/internal/base.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/chunk.h" #include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h" #include "jemalloc/internal/huge.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h" #include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h" #include "jemalloc/internal/hash.h"
#include "jemalloc/internal/quarantine.h" #include "jemalloc/internal/quarantine.h"
...@@ -464,6 +379,7 @@ static const bool config_ivsalloc = ...@@ -464,6 +379,7 @@ static const bool config_ivsalloc =
/******************************************************************************/ /******************************************************************************/
#define JEMALLOC_H_STRUCTS #define JEMALLOC_H_STRUCTS
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h" #include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h" #include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prng.h" #include "jemalloc/internal/prng.h"
...@@ -472,68 +388,83 @@ static const bool config_ivsalloc = ...@@ -472,68 +388,83 @@ static const bool config_ivsalloc =
#include "jemalloc/internal/stats.h" #include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h" #include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h" #include "jemalloc/internal/mb.h"
#include "jemalloc/internal/bitmap.h" #include "jemalloc/internal/bitmap.h"
#define JEMALLOC_ARENA_STRUCTS_A
#include "jemalloc/internal/arena.h"
#undef JEMALLOC_ARENA_STRUCTS_A
#include "jemalloc/internal/extent.h" #include "jemalloc/internal/extent.h"
#define JEMALLOC_ARENA_STRUCTS_B
#include "jemalloc/internal/arena.h" #include "jemalloc/internal/arena.h"
#undef JEMALLOC_ARENA_STRUCTS_B
#include "jemalloc/internal/base.h" #include "jemalloc/internal/base.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/chunk.h" #include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h" #include "jemalloc/internal/huge.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h" #include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h" #include "jemalloc/internal/hash.h"
#include "jemalloc/internal/quarantine.h" #include "jemalloc/internal/quarantine.h"
#include "jemalloc/internal/prof.h" #include "jemalloc/internal/prof.h"
typedef struct { #include "jemalloc/internal/tsd.h"
uint64_t allocated;
uint64_t deallocated;
} thread_allocated_t;
/*
* The JEMALLOC_ARG_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro
* argument.
*/
#define THREAD_ALLOCATED_INITIALIZER JEMALLOC_ARG_CONCAT({0, 0})
#undef JEMALLOC_H_STRUCTS #undef JEMALLOC_H_STRUCTS
/******************************************************************************/ /******************************************************************************/
#define JEMALLOC_H_EXTERNS #define JEMALLOC_H_EXTERNS
extern bool opt_abort; extern bool opt_abort;
extern bool opt_junk; extern const char *opt_junk;
extern bool opt_junk_alloc;
extern bool opt_junk_free;
extern size_t opt_quarantine; extern size_t opt_quarantine;
extern bool opt_redzone; extern bool opt_redzone;
extern bool opt_utrace; extern bool opt_utrace;
extern bool opt_valgrind;
extern bool opt_xmalloc; extern bool opt_xmalloc;
extern bool opt_zero; extern bool opt_zero;
extern size_t opt_narenas; extern size_t opt_narenas;
extern bool in_valgrind;
/* Number of CPUs. */ /* Number of CPUs. */
extern unsigned ncpus; extern unsigned ncpus;
/* Protects arenas initialization (arenas, arenas_total). */
extern malloc_mutex_t arenas_lock;
/* /*
* Arenas that are used to service external requests. Not all elements of the * index2size_tab encodes the same information as could be computed (at
* arenas array are necessarily used; arenas are created lazily as needed. * unacceptable cost in some code paths) by index2size_compute().
*
* arenas[0..narenas_auto) are used for automatic multiplexing of threads and
* arenas. arenas[narenas_auto..narenas_total) are only used if the application
* takes some action to create them and allocate from them.
*/ */
extern arena_t **arenas; extern size_t const index2size_tab[NSIZES];
extern unsigned narenas_total; /*
extern unsigned narenas_auto; /* Read-only after initialization. */ * size2index_tab is a compact lookup table that rounds request sizes up to
* size classes. In order to reduce cache footprint, the table is compressed,
* and all accesses are via size2index().
*/
extern uint8_t const size2index_tab[];
arena_t *a0get(void);
void *a0malloc(size_t size);
void a0dalloc(void *ptr);
void *bootstrap_malloc(size_t size);
void *bootstrap_calloc(size_t num, size_t size);
void bootstrap_free(void *ptr);
arena_t *arenas_extend(unsigned ind); arena_t *arenas_extend(unsigned ind);
void arenas_cleanup(void *arg); arena_t *arena_init(unsigned ind);
arena_t *choose_arena_hard(void); unsigned narenas_total_get(void);
arena_t *arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing);
arena_t *arena_choose_hard(tsd_t *tsd);
void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
unsigned arena_nbound(unsigned ind);
void thread_allocated_cleanup(tsd_t *tsd);
void thread_deallocated_cleanup(tsd_t *tsd);
void arena_cleanup(tsd_t *tsd);
void arenas_cache_cleanup(tsd_t *tsd);
void narenas_cache_cleanup(tsd_t *tsd);
void arenas_cache_bypass_cleanup(tsd_t *tsd);
void jemalloc_prefork(void); void jemalloc_prefork(void);
void jemalloc_postfork_parent(void); void jemalloc_postfork_parent(void);
void jemalloc_postfork_child(void); void jemalloc_postfork_child(void);
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h" #include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h" #include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prng.h" #include "jemalloc/internal/prng.h"
...@@ -542,24 +473,26 @@ void jemalloc_postfork_child(void); ...@@ -542,24 +473,26 @@ void jemalloc_postfork_child(void);
#include "jemalloc/internal/stats.h" #include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h" #include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h" #include "jemalloc/internal/mb.h"
#include "jemalloc/internal/bitmap.h" #include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/extent.h" #include "jemalloc/internal/extent.h"
#include "jemalloc/internal/arena.h" #include "jemalloc/internal/arena.h"
#include "jemalloc/internal/base.h" #include "jemalloc/internal/base.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/chunk.h" #include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h" #include "jemalloc/internal/huge.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h" #include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h" #include "jemalloc/internal/hash.h"
#include "jemalloc/internal/quarantine.h" #include "jemalloc/internal/quarantine.h"
#include "jemalloc/internal/prof.h" #include "jemalloc/internal/prof.h"
#include "jemalloc/internal/tsd.h"
#undef JEMALLOC_H_EXTERNS #undef JEMALLOC_H_EXTERNS
/******************************************************************************/ /******************************************************************************/
#define JEMALLOC_H_INLINES #define JEMALLOC_H_INLINES
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h" #include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h" #include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prng.h" #include "jemalloc/internal/prng.h"
...@@ -572,26 +505,158 @@ void jemalloc_postfork_child(void); ...@@ -572,26 +505,158 @@ void jemalloc_postfork_child(void);
#include "jemalloc/internal/mb.h" #include "jemalloc/internal/mb.h"
#include "jemalloc/internal/extent.h" #include "jemalloc/internal/extent.h"
#include "jemalloc/internal/base.h" #include "jemalloc/internal/base.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/chunk.h" #include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h" #include "jemalloc/internal/huge.h"
#ifndef JEMALLOC_ENABLE_INLINE #ifndef JEMALLOC_ENABLE_INLINE
malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *) szind_t size2index_compute(size_t size);
szind_t size2index_lookup(size_t size);
szind_t size2index(size_t size);
size_t index2size_compute(szind_t index);
size_t index2size_lookup(szind_t index);
size_t index2size(szind_t index);
size_t s2u_compute(size_t size);
size_t s2u_lookup(size_t size);
size_t s2u(size_t size); size_t s2u(size_t size);
size_t sa2u(size_t size, size_t alignment); size_t sa2u(size_t size, size_t alignment);
unsigned narenas_total_get(void); arena_t *arena_choose(tsd_t *tsd, arena_t *arena);
arena_t *choose_arena(arena_t *arena); arena_t *arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing,
bool refresh_if_missing);
#endif #endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
/* JEMALLOC_INLINE szind_t
* Map of pthread_self() --> arenas[???], used for selecting an arena to use size2index_compute(size_t size)
* for allocations. {
*/
malloc_tsd_externs(arenas, arena_t *) #if (NTBINS != 0)
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, arenas, arena_t *, NULL, if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
arenas_cleanup) size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
size_t lg_ceil = lg_floor(pow2_ceil(size));
return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin);
}
#endif
{
size_t x = unlikely(ZI(size) < 0) ? ((size<<1) ?
(ZU(1)<<(LG_SIZEOF_PTR+3)) : ((ZU(1)<<(LG_SIZEOF_PTR+3))-1))
: lg_floor((size<<1)-1);
size_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 :
x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM);
size_t grp = shift << LG_SIZE_CLASS_GROUP;
size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
size_t delta_inverse_mask = ZI(-1) << lg_delta;
size_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
size_t index = NTBINS + grp + mod;
return (index);
}
}
JEMALLOC_ALWAYS_INLINE szind_t
size2index_lookup(size_t size)
{
assert(size <= LOOKUP_MAXCLASS);
{
size_t ret = ((size_t)(size2index_tab[(size-1) >>
LG_TINY_MIN]));
assert(ret == size2index_compute(size));
return (ret);
}
}
JEMALLOC_ALWAYS_INLINE szind_t
size2index(size_t size)
{
assert(size > 0);
if (likely(size <= LOOKUP_MAXCLASS))
return (size2index_lookup(size));
return (size2index_compute(size));
}
JEMALLOC_INLINE size_t
index2size_compute(szind_t index)
{
#if (NTBINS > 0)
if (index < NTBINS)
return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index));
#endif
{
size_t reduced_index = index - NTBINS;
size_t grp = reduced_index >> LG_SIZE_CLASS_GROUP;
size_t mod = reduced_index & ((ZU(1) << LG_SIZE_CLASS_GROUP) -
1);
size_t grp_size_mask = ~((!!grp)-1);
size_t grp_size = ((ZU(1) << (LG_QUANTUM +
(LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
size_t shift = (grp == 0) ? 1 : grp;
size_t lg_delta = shift + (LG_QUANTUM-1);
size_t mod_size = (mod+1) << lg_delta;
size_t usize = grp_size + mod_size;
return (usize);
}
}
JEMALLOC_ALWAYS_INLINE size_t
index2size_lookup(szind_t index)
{
size_t ret = (size_t)index2size_tab[index];
assert(ret == index2size_compute(index));
return (ret);
}
JEMALLOC_ALWAYS_INLINE size_t
index2size(szind_t index)
{
assert(index < NSIZES);
return (index2size_lookup(index));
}
JEMALLOC_ALWAYS_INLINE size_t
s2u_compute(size_t size)
{
#if (NTBINS > 0)
if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
size_t lg_ceil = lg_floor(pow2_ceil(size));
return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
(ZU(1) << lg_ceil));
}
#endif
{
size_t x = unlikely(ZI(size) < 0) ? ((size<<1) ?
(ZU(1)<<(LG_SIZEOF_PTR+3)) : ((ZU(1)<<(LG_SIZEOF_PTR+3))-1))
: lg_floor((size<<1)-1);
size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
size_t delta = ZU(1) << lg_delta;
size_t delta_mask = delta - 1;
size_t usize = (size + delta_mask) & ~delta_mask;
return (usize);
}
}
JEMALLOC_ALWAYS_INLINE size_t
s2u_lookup(size_t size)
{
size_t ret = index2size_lookup(size2index_lookup(size));
assert(ret == s2u_compute(size));
return (ret);
}
/* /*
* Compute usable size that would result from allocating an object with the * Compute usable size that would result from allocating an object with the
...@@ -601,11 +666,10 @@ JEMALLOC_ALWAYS_INLINE size_t ...@@ -601,11 +666,10 @@ JEMALLOC_ALWAYS_INLINE size_t
s2u(size_t size) s2u(size_t size)
{ {
if (size <= SMALL_MAXCLASS) assert(size > 0);
return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size); if (likely(size <= LOOKUP_MAXCLASS))
if (size <= arena_maxclass) return (s2u_lookup(size));
return (PAGE_CEILING(size)); return (s2u_compute(size));
return (CHUNK_CEILING(size));
} }
/* /*
...@@ -619,108 +683,128 @@ sa2u(size_t size, size_t alignment) ...@@ -619,108 +683,128 @@ sa2u(size_t size, size_t alignment)
assert(alignment != 0 && ((alignment - 1) & alignment) == 0); assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
/* /* Try for a small size class. */
* Round size up to the nearest multiple of alignment. if (size <= SMALL_MAXCLASS && alignment < PAGE) {
* /*
* This done, we can take advantage of the fact that for each small * Round size up to the nearest multiple of alignment.
* size class, every object is aligned at the smallest power of two *
* that is non-zero in the base two representation of the size. For * This done, we can take advantage of the fact that for each
* example: * small size class, every object is aligned at the smallest
* * power of two that is non-zero in the base two representation
* Size | Base 2 | Minimum alignment * of the size. For example:
* -----+----------+------------------ *
* 96 | 1100000 | 32 * Size | Base 2 | Minimum alignment
* 144 | 10100000 | 32 * -----+----------+------------------
* 192 | 11000000 | 64 * 96 | 1100000 | 32
*/ * 144 | 10100000 | 32
usize = ALIGNMENT_CEILING(size, alignment); * 192 | 11000000 | 64
/* */
* (usize < size) protects against the combination of maximal usize = s2u(ALIGNMENT_CEILING(size, alignment));
* alignment and size greater than maximal alignment. if (usize < LARGE_MINCLASS)
*/ return (usize);
if (usize < size) {
/* size_t overflow. */
return (0);
} }
if (usize <= arena_maxclass && alignment <= PAGE) { /* Try for a large size class. */
if (usize <= SMALL_MAXCLASS) if (likely(size <= large_maxclass) && likely(alignment < chunksize)) {
return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size);
return (PAGE_CEILING(usize));
} else {
size_t run_size;
/* /*
* We can't achieve subpage alignment, so round up alignment * We can't achieve subpage alignment, so round up alignment
* permanently; it makes later calculations simpler. * to the minimum that can actually be supported.
*/ */
alignment = PAGE_CEILING(alignment); alignment = PAGE_CEILING(alignment);
usize = PAGE_CEILING(size);
/* /* Make sure result is a large size class. */
* (usize < size) protects against very large sizes within usize = (size <= LARGE_MINCLASS) ? LARGE_MINCLASS : s2u(size);
* PAGE of SIZE_T_MAX.
*
* (usize + alignment < usize) protects against the
* combination of maximal alignment and usize large enough
* to cause overflow. This is similar to the first overflow
* check above, but it needs to be repeated due to the new
* usize value, which may now be *equal* to maximal
* alignment, whereas before we only detected overflow if the
* original size was *greater* than maximal alignment.
*/
if (usize < size || usize + alignment < usize) {
/* size_t overflow. */
return (0);
}
/* /*
* Calculate the size of the over-size run that arena_palloc() * Calculate the size of the over-size run that arena_palloc()
* would need to allocate in order to guarantee the alignment. * would need to allocate in order to guarantee the alignment.
* If the run wouldn't fit within a chunk, round up to a huge
* allocation size.
*/ */
run_size = usize + alignment - PAGE; if (usize + large_pad + alignment - PAGE <= arena_maxrun)
if (run_size <= arena_maxclass) return (usize);
return (PAGE_CEILING(usize));
return (CHUNK_CEILING(usize));
} }
}
JEMALLOC_INLINE unsigned /* Huge size class. Beware of size_t overflow. */
narenas_total_get(void)
{
unsigned narenas;
malloc_mutex_lock(&arenas_lock); /*
narenas = narenas_total; * We can't achieve subchunk alignment, so round up alignment to the
malloc_mutex_unlock(&arenas_lock); * minimum that can actually be supported.
*/
alignment = CHUNK_CEILING(alignment);
if (alignment == 0) {
/* size_t overflow. */
return (0);
}
/* Make sure result is a huge size class. */
if (size <= chunksize)
usize = chunksize;
else {
usize = s2u(size);
if (usize < size) {
/* size_t overflow. */
return (0);
}
}
return (narenas); /*
* Calculate the multi-chunk mapping that huge_palloc() would need in
* order to guarantee the alignment.
*/
if (usize + alignment - PAGE < usize) {
/* size_t overflow. */
return (0);
}
return (usize);
} }
/* Choose an arena based on a per-thread value. */ /* Choose an arena based on a per-thread value. */
JEMALLOC_INLINE arena_t * JEMALLOC_INLINE arena_t *
choose_arena(arena_t *arena) arena_choose(tsd_t *tsd, arena_t *arena)
{ {
arena_t *ret; arena_t *ret;
if (arena != NULL) if (arena != NULL)
return (arena); return (arena);
if ((ret = *arenas_tsd_get()) == NULL) { if (unlikely((ret = tsd_arena_get(tsd)) == NULL))
ret = choose_arena_hard(); ret = arena_choose_hard(tsd);
assert(ret != NULL);
}
return (ret); return (ret);
} }
JEMALLOC_INLINE arena_t *
arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing,
bool refresh_if_missing)
{
arena_t *arena;
arena_t **arenas_cache = tsd_arenas_cache_get(tsd);
/* init_if_missing requires refresh_if_missing. */
assert(!init_if_missing || refresh_if_missing);
if (unlikely(arenas_cache == NULL)) {
/* arenas_cache hasn't been initialized yet. */
return (arena_get_hard(tsd, ind, init_if_missing));
}
if (unlikely(ind >= tsd_narenas_cache_get(tsd))) {
/*
* ind is invalid, cache is old (too small), or arena to be
* initialized.
*/
return (refresh_if_missing ? arena_get_hard(tsd, ind,
init_if_missing) : NULL);
}
arena = arenas_cache[ind];
if (likely(arena != NULL) || !refresh_if_missing)
return (arena);
return (arena_get_hard(tsd, ind, init_if_missing));
}
#endif #endif
#include "jemalloc/internal/bitmap.h" #include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/rtree.h"
/* /*
* Include arena.h twice in order to resolve circular dependencies with * Include portions of arena.h interleaved with tcache.h in order to resolve
* tcache.h. * circular dependencies.
*/ */
#define JEMALLOC_ARENA_INLINE_A #define JEMALLOC_ARENA_INLINE_A
#include "jemalloc/internal/arena.h" #include "jemalloc/internal/arena.h"
...@@ -733,133 +817,155 @@ choose_arena(arena_t *arena) ...@@ -733,133 +817,155 @@ choose_arena(arena_t *arena)
#include "jemalloc/internal/quarantine.h" #include "jemalloc/internal/quarantine.h"
#ifndef JEMALLOC_ENABLE_INLINE #ifndef JEMALLOC_ENABLE_INLINE
void *imalloct(size_t size, bool try_tcache, arena_t *arena); arena_t *iaalloc(const void *ptr);
void *imalloc(size_t size);
void *icalloct(size_t size, bool try_tcache, arena_t *arena);
void *icalloc(size_t size);
void *ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache,
arena_t *arena);
void *ipalloc(size_t usize, size_t alignment, bool zero);
size_t isalloc(const void *ptr, bool demote); size_t isalloc(const void *ptr, bool demote);
void *iallocztm(tsd_t *tsd, size_t size, bool zero, tcache_t *tcache,
bool is_metadata, arena_t *arena);
void *imalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena);
void *imalloc(tsd_t *tsd, size_t size);
void *icalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena);
void *icalloc(tsd_t *tsd, size_t size);
void *ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, bool is_metadata, arena_t *arena);
void *ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, arena_t *arena);
void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero);
size_t ivsalloc(const void *ptr, bool demote); size_t ivsalloc(const void *ptr, bool demote);
size_t u2rz(size_t usize); size_t u2rz(size_t usize);
size_t p2rz(const void *ptr); size_t p2rz(const void *ptr);
void idalloct(void *ptr, bool try_tcache); void idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata);
void idalloc(void *ptr); void idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache);
void iqalloct(void *ptr, bool try_tcache); void idalloc(tsd_t *tsd, void *ptr);
void iqalloc(void *ptr); void iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache);
void *iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra, void isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, void isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
void *iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t extra, size_t alignment, bool zero, tcache_t *tcache,
arena_t *arena); arena_t *arena);
void *iralloct(void *ptr, size_t size, size_t extra, size_t alignment, void *iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena); size_t alignment, bool zero, tcache_t *tcache, arena_t *arena);
void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment, void *iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
bool zero); size_t alignment, bool zero);
bool ixalloc(void *ptr, size_t size, size_t extra, size_t alignment, bool ixalloc(void *ptr, size_t oldsize, size_t size, size_t extra,
bool zero); size_t alignment, bool zero);
malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
#endif #endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
JEMALLOC_ALWAYS_INLINE arena_t *
iaalloc(const void *ptr)
{
assert(ptr != NULL);
return (arena_aalloc(ptr));
}
/*
* Typical usage:
* void *ptr = [...]
* size_t sz = isalloc(ptr, config_prof);
*/
JEMALLOC_ALWAYS_INLINE size_t
isalloc(const void *ptr, bool demote)
{
assert(ptr != NULL);
/* Demotion only makes sense if config_prof is true. */
assert(config_prof || !demote);
return (arena_salloc(ptr, demote));
}
JEMALLOC_ALWAYS_INLINE void * JEMALLOC_ALWAYS_INLINE void *
imalloct(size_t size, bool try_tcache, arena_t *arena) iallocztm(tsd_t *tsd, size_t size, bool zero, tcache_t *tcache, bool is_metadata,
arena_t *arena)
{ {
void *ret;
assert(size != 0); assert(size != 0);
if (size <= arena_maxclass) ret = arena_malloc(tsd, arena, size, zero, tcache);
return (arena_malloc(arena, size, false, try_tcache)); if (config_stats && is_metadata && likely(ret != NULL)) {
else arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
return (huge_malloc(size, false, huge_dss_prec_get(arena))); config_prof));
}
return (ret);
} }
JEMALLOC_ALWAYS_INLINE void * JEMALLOC_ALWAYS_INLINE void *
imalloc(size_t size) imalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena)
{ {
return (imalloct(size, true, NULL)); return (iallocztm(tsd, size, false, tcache, false, arena));
} }
JEMALLOC_ALWAYS_INLINE void * JEMALLOC_ALWAYS_INLINE void *
icalloct(size_t size, bool try_tcache, arena_t *arena) imalloc(tsd_t *tsd, size_t size)
{ {
if (size <= arena_maxclass) return (iallocztm(tsd, size, false, tcache_get(tsd, true), false, NULL));
return (arena_malloc(arena, size, true, try_tcache));
else
return (huge_malloc(size, true, huge_dss_prec_get(arena)));
} }
JEMALLOC_ALWAYS_INLINE void * JEMALLOC_ALWAYS_INLINE void *
icalloc(size_t size) icalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena)
{ {
return (icalloct(size, true, NULL)); return (iallocztm(tsd, size, true, tcache, false, arena));
} }
JEMALLOC_ALWAYS_INLINE void * JEMALLOC_ALWAYS_INLINE void *
ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache, icalloc(tsd_t *tsd, size_t size)
arena_t *arena) {
return (iallocztm(tsd, size, true, tcache_get(tsd, true), false, NULL));
}
JEMALLOC_ALWAYS_INLINE void *
ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, bool is_metadata, arena_t *arena)
{ {
void *ret; void *ret;
assert(usize != 0); assert(usize != 0);
assert(usize == sa2u(usize, alignment)); assert(usize == sa2u(usize, alignment));
if (usize <= arena_maxclass && alignment <= PAGE) ret = arena_palloc(tsd, arena, usize, alignment, zero, tcache);
ret = arena_malloc(arena, usize, zero, try_tcache);
else {
if (usize <= arena_maxclass) {
ret = arena_palloc(choose_arena(arena), usize,
alignment, zero);
} else if (alignment <= chunksize)
ret = huge_malloc(usize, zero, huge_dss_prec_get(arena));
else
ret = huge_palloc(usize, alignment, zero, huge_dss_prec_get(arena));
}
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret); assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
if (config_stats && is_metadata && likely(ret != NULL)) {
arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
config_prof));
}
return (ret); return (ret);
} }
JEMALLOC_ALWAYS_INLINE void * JEMALLOC_ALWAYS_INLINE void *
ipalloc(size_t usize, size_t alignment, bool zero) ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, arena_t *arena)
{ {
return (ipalloct(usize, alignment, zero, true, NULL)); return (ipallocztm(tsd, usize, alignment, zero, tcache, false, arena));
} }
/* JEMALLOC_ALWAYS_INLINE void *
* Typical usage: ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero)
* void *ptr = [...]
* size_t sz = isalloc(ptr, config_prof);
*/
JEMALLOC_ALWAYS_INLINE size_t
isalloc(const void *ptr, bool demote)
{ {
size_t ret;
arena_chunk_t *chunk;
assert(ptr != NULL);
/* Demotion only makes sense if config_prof is true. */
assert(config_prof || demote == false);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); return (ipallocztm(tsd, usize, alignment, zero, tcache_get(tsd,
if (chunk != ptr) NULL), false, NULL));
ret = arena_salloc(ptr, demote);
else
ret = huge_salloc(ptr);
return (ret);
} }
JEMALLOC_ALWAYS_INLINE size_t JEMALLOC_ALWAYS_INLINE size_t
ivsalloc(const void *ptr, bool demote) ivsalloc(const void *ptr, bool demote)
{ {
extent_node_t *node;
/* Return 0 if ptr is not within a chunk managed by jemalloc. */ /* Return 0 if ptr is not within a chunk managed by jemalloc. */
if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == 0) node = chunk_lookup(ptr, false);
if (node == NULL)
return (0); return (0);
/* Only arena chunks should be looked up via interior pointers. */
assert(extent_node_addr_get(node) == ptr ||
extent_node_achunk_get(node));
return (isalloc(ptr, demote)); return (isalloc(ptr, demote));
} }
...@@ -870,7 +976,7 @@ u2rz(size_t usize) ...@@ -870,7 +976,7 @@ u2rz(size_t usize)
size_t ret; size_t ret;
if (usize <= SMALL_MAXCLASS) { if (usize <= SMALL_MAXCLASS) {
size_t binind = SMALL_SIZE2BIN(usize); szind_t binind = size2index(usize);
ret = arena_bin_info[binind].redzone_size; ret = arena_bin_info[binind].redzone_size;
} else } else
ret = 0; ret = 0;
...@@ -887,47 +993,62 @@ p2rz(const void *ptr) ...@@ -887,47 +993,62 @@ p2rz(const void *ptr)
} }
JEMALLOC_ALWAYS_INLINE void JEMALLOC_ALWAYS_INLINE void
idalloct(void *ptr, bool try_tcache) idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata)
{ {
arena_chunk_t *chunk;
assert(ptr != NULL); assert(ptr != NULL);
if (config_stats && is_metadata) {
arena_metadata_allocated_sub(iaalloc(ptr), isalloc(ptr,
config_prof));
}
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); arena_dalloc(tsd, ptr, tcache);
if (chunk != ptr) }
arena_dalloc(chunk->arena, chunk, ptr, try_tcache);
else JEMALLOC_ALWAYS_INLINE void
huge_dalloc(ptr, true); idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache)
{
idalloctm(tsd, ptr, tcache, false);
} }
JEMALLOC_ALWAYS_INLINE void JEMALLOC_ALWAYS_INLINE void
idalloc(void *ptr) idalloc(tsd_t *tsd, void *ptr)
{ {
idalloct(ptr, true); idalloctm(tsd, ptr, tcache_get(tsd, false), false);
} }
JEMALLOC_ALWAYS_INLINE void JEMALLOC_ALWAYS_INLINE void
iqalloct(void *ptr, bool try_tcache) iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
{ {
if (config_fill && opt_quarantine) if (config_fill && unlikely(opt_quarantine))
quarantine(ptr); quarantine(tsd, ptr);
else else
idalloct(ptr, try_tcache); idalloctm(tsd, ptr, tcache, false);
}
JEMALLOC_ALWAYS_INLINE void
isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
{
arena_sdalloc(tsd, ptr, size, tcache);
} }
JEMALLOC_ALWAYS_INLINE void JEMALLOC_ALWAYS_INLINE void
iqalloc(void *ptr) isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
{ {
iqalloct(ptr, true); if (config_fill && unlikely(opt_quarantine))
quarantine(tsd, ptr);
else
isdalloct(tsd, ptr, size, tcache);
} }
JEMALLOC_ALWAYS_INLINE void * JEMALLOC_ALWAYS_INLINE void *
iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra, iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, size_t extra, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena)
arena_t *arena)
{ {
void *p; void *p;
size_t usize, copysize; size_t usize, copysize;
...@@ -935,7 +1056,7 @@ iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra, ...@@ -935,7 +1056,7 @@ iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
usize = sa2u(size + extra, alignment); usize = sa2u(size + extra, alignment);
if (usize == 0) if (usize == 0)
return (NULL); return (NULL);
p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena); p = ipalloct(tsd, usize, alignment, zero, tcache, arena);
if (p == NULL) { if (p == NULL) {
if (extra == 0) if (extra == 0)
return (NULL); return (NULL);
...@@ -943,7 +1064,7 @@ iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra, ...@@ -943,7 +1064,7 @@ iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
usize = sa2u(size, alignment); usize = sa2u(size, alignment);
if (usize == 0) if (usize == 0)
return (NULL); return (NULL);
p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena); p = ipalloct(tsd, usize, alignment, zero, tcache, arena);
if (p == NULL) if (p == NULL)
return (NULL); return (NULL);
} }
...@@ -953,72 +1074,57 @@ iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra, ...@@ -953,72 +1074,57 @@ iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
*/ */
copysize = (size < oldsize) ? size : oldsize; copysize = (size < oldsize) ? size : oldsize;
memcpy(p, ptr, copysize); memcpy(p, ptr, copysize);
iqalloct(ptr, try_tcache_dalloc); isqalloc(tsd, ptr, oldsize, tcache);
return (p); return (p);
} }
JEMALLOC_ALWAYS_INLINE void * JEMALLOC_ALWAYS_INLINE void *
iralloct(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena) bool zero, tcache_t *tcache, arena_t *arena)
{ {
size_t oldsize;
assert(ptr != NULL); assert(ptr != NULL);
assert(size != 0); assert(size != 0);
oldsize = isalloc(ptr, config_prof);
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
!= 0) { != 0) {
/* /*
* Existing object alignment is inadequate; allocate new space * Existing object alignment is inadequate; allocate new space
* and copy. * and copy.
*/ */
return (iralloct_realign(ptr, oldsize, size, extra, alignment, return (iralloct_realign(tsd, ptr, oldsize, size, 0, alignment,
zero, try_tcache_alloc, try_tcache_dalloc, arena)); zero, tcache, arena));
} }
if (size + extra <= arena_maxclass) { return (arena_ralloc(tsd, arena, ptr, oldsize, size, alignment, zero,
return (arena_ralloc(arena, ptr, oldsize, size, extra, tcache));
alignment, zero, try_tcache_alloc,
try_tcache_dalloc));
} else {
return (huge_ralloc(ptr, oldsize, size, extra,
alignment, zero, try_tcache_dalloc, huge_dss_prec_get(arena)));
}
} }
JEMALLOC_ALWAYS_INLINE void * JEMALLOC_ALWAYS_INLINE void *
iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero) iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
bool zero)
{ {
return (iralloct(ptr, size, extra, alignment, zero, true, true, NULL)); return (iralloct(tsd, ptr, oldsize, size, alignment, zero,
tcache_get(tsd, true), NULL));
} }
JEMALLOC_ALWAYS_INLINE bool JEMALLOC_ALWAYS_INLINE bool
ixalloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero) ixalloc(void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment,
bool zero)
{ {
size_t oldsize;
assert(ptr != NULL); assert(ptr != NULL);
assert(size != 0); assert(size != 0);
oldsize = isalloc(ptr, config_prof);
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
!= 0) { != 0) {
/* Existing object alignment is inadequate. */ /* Existing object alignment is inadequate. */
return (true); return (true);
} }
if (size <= arena_maxclass) return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero));
return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero));
else
return (huge_ralloc_no_move(ptr, oldsize, size, extra));
} }
malloc_tsd_externs(thread_allocated, thread_allocated_t)
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, thread_allocated, thread_allocated_t,
THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup)
#endif #endif
#include "jemalloc/internal/prof.h" #include "jemalloc/internal/prof.h"
......
#ifndef JEMALLOC_INTERNAL_DECLS_H
#define JEMALLOC_INTERNAL_DECLS_H
#include <math.h>
#ifdef _WIN32
# include <windows.h>
# include "msvc_compat/windows_extra.h"
#else
# include <sys/param.h>
# include <sys/mman.h>
# if !defined(__pnacl__) && !defined(__native_client__)
# include <sys/syscall.h>
# if !defined(SYS_write) && defined(__NR_write)
# define SYS_write __NR_write
# endif
# include <sys/uio.h>
# endif
# include <pthread.h>
# include <errno.h>
#endif
#include <sys/types.h>
#include <limits.h>
#ifndef SIZE_T_MAX
# define SIZE_T_MAX SIZE_MAX
#endif
#include <stdarg.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <stddef.h>
#ifndef offsetof
# define offsetof(type, member) ((size_t)&(((type *)NULL)->member))
#endif
#include <string.h>
#include <strings.h>
#include <ctype.h>
#ifdef _MSC_VER
# include <io.h>
typedef intptr_t ssize_t;
# define PATH_MAX 1024
# define STDERR_FILENO 2
# define __func__ __FUNCTION__
# ifdef JEMALLOC_HAS_RESTRICT
# define restrict __restrict
# endif
/* Disable warnings about deprecated system functions. */
# pragma warning(disable: 4996)
#if _MSC_VER < 1800
static int
isblank(int c)
{
return (c == '\t' || c == ' ');
}
#endif
#else
# include <unistd.h>
#endif
#include <fcntl.h>
#endif /* JEMALLOC_INTERNAL_H */
...@@ -22,6 +22,9 @@ ...@@ -22,6 +22,9 @@
*/ */
#undef CPU_SPINWAIT #undef CPU_SPINWAIT
/* Defined if C11 atomics are available. */
#undef JEMALLOC_C11ATOMICS
/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */ /* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */
#undef JEMALLOC_ATOMIC9 #undef JEMALLOC_ATOMIC9
...@@ -35,7 +38,7 @@ ...@@ -35,7 +38,7 @@
* Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and * Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and
* __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite * __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the
* functions are defined in libgcc instead of being inlines) * functions are defined in libgcc instead of being inlines).
*/ */
#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4 #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4
...@@ -43,16 +46,36 @@ ...@@ -43,16 +46,36 @@
* Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and * Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and
* __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite * __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the
* functions are defined in libgcc instead of being inlines) * functions are defined in libgcc instead of being inlines).
*/ */
#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8 #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8
/*
* Defined if __builtin_clz() and __builtin_clzl() are available.
*/
#undef JEMALLOC_HAVE_BUILTIN_CLZ
/*
* Defined if madvise(2) is available.
*/
#undef JEMALLOC_HAVE_MADVISE
/* /*
* Defined if OSSpin*() functions are available, as provided by Darwin, and * Defined if OSSpin*() functions are available, as provided by Darwin, and
* documented in the spinlock(3) manual page. * documented in the spinlock(3) manual page.
*/ */
#undef JEMALLOC_OSSPIN #undef JEMALLOC_OSSPIN
/*
* Defined if secure_getenv(3) is available.
*/
#undef JEMALLOC_HAVE_SECURE_GETENV
/*
* Defined if issetugid(2) is available.
*/
#undef JEMALLOC_HAVE_ISSETUGID
/* /*
* Defined if _malloc_thread_cleanup() exists. At least in the case of * Defined if _malloc_thread_cleanup() exists. At least in the case of
* FreeBSD, pthread_key_create() allocates, which if used during malloc * FreeBSD, pthread_key_create() allocates, which if used during malloc
...@@ -76,9 +99,6 @@ ...@@ -76,9 +99,6 @@
*/ */
#undef JEMALLOC_MUTEX_INIT_CB #undef JEMALLOC_MUTEX_INIT_CB
/* Defined if sbrk() is supported. */
#undef JEMALLOC_HAVE_SBRK
/* Non-empty if the tls_model attribute is supported. */ /* Non-empty if the tls_model attribute is supported. */
#undef JEMALLOC_TLS_MODEL #undef JEMALLOC_TLS_MODEL
...@@ -137,8 +157,26 @@ ...@@ -137,8 +157,26 @@
/* Support lazy locking (avoid locking unless a second thread is launched). */ /* Support lazy locking (avoid locking unless a second thread is launched). */
#undef JEMALLOC_LAZY_LOCK #undef JEMALLOC_LAZY_LOCK
/* One page is 2^STATIC_PAGE_SHIFT bytes. */ /* Minimum size class to support is 2^LG_TINY_MIN bytes. */
#undef STATIC_PAGE_SHIFT #undef LG_TINY_MIN
/*
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
* classes).
*/
#undef LG_QUANTUM
/* One page is 2^LG_PAGE bytes. */
#undef LG_PAGE
/*
* If defined, adjacent virtual memory mappings with identical attributes
* automatically coalesce, and they fragment when changes are made to subranges.
* This is the normal order of things for mmap()/munmap(), but on Windows
* VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
* mappings do *not* coalesce/fragment.
*/
#undef JEMALLOC_MAPS_COALESCE
/* /*
* If defined, use munmap() to unmap freed chunks, rather than storing them for * If defined, use munmap() to unmap freed chunks, rather than storing them for
...@@ -147,22 +185,28 @@ ...@@ -147,22 +185,28 @@
*/ */
#undef JEMALLOC_MUNMAP #undef JEMALLOC_MUNMAP
/*
* If defined, use mremap(...MREMAP_FIXED...) for huge realloc(). This is
* disabled by default because it is Linux-specific and it will cause virtual
* memory map holes, much like munmap(2) does.
*/
#undef JEMALLOC_MREMAP
/* TLS is used to map arenas and magazine caches to threads. */ /* TLS is used to map arenas and magazine caches to threads. */
#undef JEMALLOC_TLS #undef JEMALLOC_TLS
/*
* ffs()/ffsl() functions to use for bitmapping. Don't use these directly;
* instead, use jemalloc_ffs() or jemalloc_ffsl() from util.h.
*/
#undef JEMALLOC_INTERNAL_FFSL
#undef JEMALLOC_INTERNAL_FFS
/* /*
* JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside * JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside
* within jemalloc-owned chunks before dereferencing them. * within jemalloc-owned chunks before dereferencing them.
*/ */
#undef JEMALLOC_IVSALLOC #undef JEMALLOC_IVSALLOC
/*
* If defined, explicitly attempt to more uniformly distribute large allocation
* pointer alignments across all cache indices.
*/
#undef JEMALLOC_CACHE_OBLIVIOUS
/* /*
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings. * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
*/ */
...@@ -182,9 +226,7 @@ ...@@ -182,9 +226,7 @@
#undef JEMALLOC_PURGE_MADVISE_DONTNEED #undef JEMALLOC_PURGE_MADVISE_DONTNEED
#undef JEMALLOC_PURGE_MADVISE_FREE #undef JEMALLOC_PURGE_MADVISE_FREE
/* /* Define if operating system has alloca.h header. */
* Define if operating system has alloca.h header.
*/
#undef JEMALLOC_HAS_ALLOCA_H #undef JEMALLOC_HAS_ALLOCA_H
/* C99 restrict keyword supported. */ /* C99 restrict keyword supported. */
...@@ -202,4 +244,19 @@ ...@@ -202,4 +244,19 @@
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */ /* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
#undef LG_SIZEOF_INTMAX_T #undef LG_SIZEOF_INTMAX_T
/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
#undef JEMALLOC_GLIBC_MALLOC_HOOK
/* glibc memalign hook. */
#undef JEMALLOC_GLIBC_MEMALIGN_HOOK
/* Adaptive mutex support in pthreads. */
#undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
/*
* If defined, jemalloc symbols are not exported (doesn't work when
* JEMALLOC_PREFIX is not defined).
*/
#undef JEMALLOC_EXPORT
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */ #endif /* JEMALLOC_INTERNAL_DEFS_H_ */
...@@ -39,9 +39,15 @@ ...@@ -39,9 +39,15 @@
#endif #endif
#define ZU(z) ((size_t)z) #define ZU(z) ((size_t)z)
#define ZI(z) ((ssize_t)z)
#define QU(q) ((uint64_t)q) #define QU(q) ((uint64_t)q)
#define QI(q) ((int64_t)q) #define QI(q) ((int64_t)q)
#define KZU(z) ZU(z##ULL)
#define KZI(z) ZI(z##LL)
#define KQU(q) QU(q##ULL)
#define KQI(q) QI(q##LL)
#ifndef __DECONST #ifndef __DECONST
# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) # define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
#endif #endif
......
...@@ -10,7 +10,7 @@ typedef struct malloc_mutex_s malloc_mutex_t; ...@@ -10,7 +10,7 @@ typedef struct malloc_mutex_s malloc_mutex_t;
#elif (defined(JEMALLOC_MUTEX_INIT_CB)) #elif (defined(JEMALLOC_MUTEX_INIT_CB))
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, NULL} # define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, NULL}
#else #else
# if (defined(PTHREAD_MUTEX_ADAPTIVE_NP) && \ # if (defined(JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP) && \
defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)) defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP))
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP # define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP} # define MALLOC_MUTEX_INITIALIZER {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP}
...@@ -26,7 +26,11 @@ typedef struct malloc_mutex_s malloc_mutex_t; ...@@ -26,7 +26,11 @@ typedef struct malloc_mutex_s malloc_mutex_t;
struct malloc_mutex_s { struct malloc_mutex_s {
#ifdef _WIN32 #ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
SRWLOCK lock;
# else
CRITICAL_SECTION lock; CRITICAL_SECTION lock;
# endif
#elif (defined(JEMALLOC_OSSPIN)) #elif (defined(JEMALLOC_OSSPIN))
OSSpinLock lock; OSSpinLock lock;
#elif (defined(JEMALLOC_MUTEX_INIT_CB)) #elif (defined(JEMALLOC_MUTEX_INIT_CB))
...@@ -70,7 +74,11 @@ malloc_mutex_lock(malloc_mutex_t *mutex) ...@@ -70,7 +74,11 @@ malloc_mutex_lock(malloc_mutex_t *mutex)
if (isthreaded) { if (isthreaded) {
#ifdef _WIN32 #ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
AcquireSRWLockExclusive(&mutex->lock);
# else
EnterCriticalSection(&mutex->lock); EnterCriticalSection(&mutex->lock);
# endif
#elif (defined(JEMALLOC_OSSPIN)) #elif (defined(JEMALLOC_OSSPIN))
OSSpinLockLock(&mutex->lock); OSSpinLockLock(&mutex->lock);
#else #else
...@@ -85,7 +93,11 @@ malloc_mutex_unlock(malloc_mutex_t *mutex) ...@@ -85,7 +93,11 @@ malloc_mutex_unlock(malloc_mutex_t *mutex)
if (isthreaded) { if (isthreaded) {
#ifdef _WIN32 #ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
ReleaseSRWLockExclusive(&mutex->lock);
# else
LeaveCriticalSection(&mutex->lock); LeaveCriticalSection(&mutex->lock);
# endif
#elif (defined(JEMALLOC_OSSPIN)) #elif (defined(JEMALLOC_OSSPIN))
OSSpinLockUnlock(&mutex->lock); OSSpinLockUnlock(&mutex->lock);
#else #else
......
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void *pages_map(void *addr, size_t size);
void pages_unmap(void *addr, size_t size);
void *pages_trim(void *addr, size_t alloc_size, size_t leadsize,
size_t size);
bool pages_commit(void *addr, size_t size);
bool pages_decommit(void *addr, size_t size);
bool pages_purge(void *addr, size_t size);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
a0calloc a0dalloc
a0free a0get
a0malloc a0malloc
arena_aalloc
arena_alloc_junk_small arena_alloc_junk_small
arena_bin_index arena_bin_index
arena_bin_info arena_bin_info
arena_bitselm_get
arena_boot arena_boot
arena_choose
arena_choose_hard
arena_chunk_alloc_huge
arena_chunk_cache_maybe_insert
arena_chunk_cache_maybe_remove
arena_chunk_dalloc_huge
arena_chunk_ralloc_huge_expand
arena_chunk_ralloc_huge_shrink
arena_chunk_ralloc_huge_similar
arena_cleanup
arena_dalloc arena_dalloc
arena_dalloc_bin arena_dalloc_bin
arena_dalloc_bin_locked arena_dalloc_bin_junked_locked
arena_dalloc_junk_large arena_dalloc_junk_large
arena_dalloc_junk_small arena_dalloc_junk_small
arena_dalloc_large arena_dalloc_large
arena_dalloc_large_locked arena_dalloc_large_junked_locked
arena_dalloc_small arena_dalloc_small
arena_dss_prec_get arena_dss_prec_get
arena_dss_prec_set arena_dss_prec_set
arena_get
arena_get_hard
arena_init
arena_lg_dirty_mult_default_get
arena_lg_dirty_mult_default_set
arena_lg_dirty_mult_get
arena_lg_dirty_mult_set
arena_malloc arena_malloc
arena_malloc_large arena_malloc_large
arena_malloc_small arena_malloc_small
arena_mapbits_allocated_get arena_mapbits_allocated_get
arena_mapbits_binind_get arena_mapbits_binind_get
arena_mapbits_decommitted_get
arena_mapbits_dirty_get arena_mapbits_dirty_get
arena_mapbits_get arena_mapbits_get
arena_mapbits_internal_set
arena_mapbits_large_binind_set arena_mapbits_large_binind_set
arena_mapbits_large_get arena_mapbits_large_get
arena_mapbits_large_set arena_mapbits_large_set
arena_mapbits_large_size_get arena_mapbits_large_size_get
arena_mapbitsp_get
arena_mapbitsp_read
arena_mapbitsp_write
arena_mapbits_size_decode
arena_mapbits_size_encode
arena_mapbits_small_runind_get arena_mapbits_small_runind_get
arena_mapbits_small_set arena_mapbits_small_set
arena_mapbits_unallocated_set arena_mapbits_unallocated_set
arena_mapbits_unallocated_size_get arena_mapbits_unallocated_size_get
arena_mapbits_unallocated_size_set arena_mapbits_unallocated_size_set
arena_mapbits_unzeroed_get arena_mapbits_unzeroed_get
arena_mapbits_unzeroed_set arena_maxrun
arena_mapbitsp_get arena_maybe_purge
arena_mapbitsp_read arena_metadata_allocated_add
arena_mapbitsp_write arena_metadata_allocated_get
arena_mapp_get arena_metadata_allocated_sub
arena_maxclass arena_migrate
arena_miscelm_get
arena_miscelm_to_pageind
arena_miscelm_to_rpages
arena_nbound
arena_new arena_new
arena_node_alloc
arena_node_dalloc
arena_palloc arena_palloc
arena_postfork_child arena_postfork_child
arena_postfork_parent arena_postfork_parent
...@@ -46,50 +78,47 @@ arena_prefork ...@@ -46,50 +78,47 @@ arena_prefork
arena_prof_accum arena_prof_accum
arena_prof_accum_impl arena_prof_accum_impl
arena_prof_accum_locked arena_prof_accum_locked
arena_prof_ctx_get
arena_prof_ctx_set
arena_prof_promoted arena_prof_promoted
arena_prof_tctx_get
arena_prof_tctx_reset
arena_prof_tctx_set
arena_ptr_small_binind_get arena_ptr_small_binind_get
arena_purge_all arena_purge_all
arena_quarantine_junk_small arena_quarantine_junk_small
arena_ralloc arena_ralloc
arena_ralloc_junk_large arena_ralloc_junk_large
arena_ralloc_no_move arena_ralloc_no_move
arena_rd_to_miscelm
arena_redzone_corruption arena_redzone_corruption
arena_run_regind arena_run_regind
arena_run_to_miscelm
arena_salloc arena_salloc
arenas_cache_bypass_cleanup
arenas_cache_cleanup
arena_sdalloc
arena_stats_merge arena_stats_merge
arena_tcache_fill_small arena_tcache_fill_small
arenas atomic_add_p
arenas_booted
arenas_cleanup
arenas_extend
arenas_initialized
arenas_lock
arenas_tls
arenas_tsd
arenas_tsd_boot
arenas_tsd_cleanup_wrapper
arenas_tsd_get
arenas_tsd_get_wrapper
arenas_tsd_init_head
arenas_tsd_set
atomic_add_u atomic_add_u
atomic_add_uint32 atomic_add_uint32
atomic_add_uint64 atomic_add_uint64
atomic_add_z atomic_add_z
atomic_cas_p
atomic_cas_u
atomic_cas_uint32
atomic_cas_uint64
atomic_cas_z
atomic_sub_p
atomic_sub_u atomic_sub_u
atomic_sub_uint32 atomic_sub_uint32
atomic_sub_uint64 atomic_sub_uint64
atomic_sub_z atomic_sub_z
base_alloc base_alloc
base_boot base_boot
base_calloc
base_node_alloc
base_node_dealloc
base_postfork_child base_postfork_child
base_postfork_parent base_postfork_parent
base_prefork base_prefork
base_stats_get
bitmap_full bitmap_full
bitmap_get bitmap_get
bitmap_info_init bitmap_info_init
...@@ -99,49 +128,54 @@ bitmap_set ...@@ -99,49 +128,54 @@ bitmap_set
bitmap_sfu bitmap_sfu
bitmap_size bitmap_size
bitmap_unset bitmap_unset
bootstrap_calloc
bootstrap_free
bootstrap_malloc
bt_init bt_init
buferror buferror
choose_arena chunk_alloc_base
choose_arena_hard chunk_alloc_cache
chunk_alloc
chunk_alloc_dss chunk_alloc_dss
chunk_alloc_mmap chunk_alloc_mmap
chunk_alloc_wrapper
chunk_boot chunk_boot
chunk_dealloc chunk_dalloc_arena
chunk_dealloc_mmap chunk_dalloc_cache
chunk_dalloc_mmap
chunk_dalloc_wrapper
chunk_deregister
chunk_dss_boot chunk_dss_boot
chunk_dss_postfork_child chunk_dss_postfork_child
chunk_dss_postfork_parent chunk_dss_postfork_parent
chunk_dss_prec_get chunk_dss_prec_get
chunk_dss_prec_set chunk_dss_prec_set
chunk_dss_prefork chunk_dss_prefork
chunk_hooks_default
chunk_hooks_get
chunk_hooks_set
chunk_in_dss chunk_in_dss
chunk_lookup
chunk_npages chunk_npages
chunk_postfork_child chunk_postfork_child
chunk_postfork_parent chunk_postfork_parent
chunk_prefork chunk_prefork
chunk_unmap chunk_purge_arena
chunks_mtx chunk_purge_wrapper
chunks_rtree chunk_register
chunksize chunksize
chunksize_mask chunksize_mask
ckh_bucket_search chunks_rtree
ckh_count ckh_count
ckh_delete ckh_delete
ckh_evict_reloc_insert
ckh_insert ckh_insert
ckh_isearch
ckh_iter ckh_iter
ckh_new ckh_new
ckh_pointer_hash ckh_pointer_hash
ckh_pointer_keycomp ckh_pointer_keycomp
ckh_rebuild
ckh_remove ckh_remove
ckh_search ckh_search
ckh_string_hash ckh_string_hash
ckh_string_keycomp ckh_string_keycomp
ckh_try_bucket_insert
ckh_try_insert
ctl_boot ctl_boot
ctl_bymib ctl_bymib
ctl_byname ctl_byname
...@@ -150,6 +184,23 @@ ctl_postfork_child ...@@ -150,6 +184,23 @@ ctl_postfork_child
ctl_postfork_parent ctl_postfork_parent
ctl_prefork ctl_prefork
dss_prec_names dss_prec_names
extent_node_achunk_get
extent_node_achunk_set
extent_node_addr_get
extent_node_addr_set
extent_node_arena_get
extent_node_arena_set
extent_node_dirty_insert
extent_node_dirty_linkage_init
extent_node_dirty_remove
extent_node_init
extent_node_prof_tctx_get
extent_node_prof_tctx_set
extent_node_size_get
extent_node_size_set
extent_node_zeroed_get
extent_node_zeroed_set
extent_tree_ad_empty
extent_tree_ad_first extent_tree_ad_first
extent_tree_ad_insert extent_tree_ad_insert
extent_tree_ad_iter extent_tree_ad_iter
...@@ -166,6 +217,7 @@ extent_tree_ad_reverse_iter ...@@ -166,6 +217,7 @@ extent_tree_ad_reverse_iter
extent_tree_ad_reverse_iter_recurse extent_tree_ad_reverse_iter_recurse
extent_tree_ad_reverse_iter_start extent_tree_ad_reverse_iter_start
extent_tree_ad_search extent_tree_ad_search
extent_tree_szad_empty
extent_tree_szad_first extent_tree_szad_first
extent_tree_szad_insert extent_tree_szad_insert
extent_tree_szad_iter extent_tree_szad_iter
...@@ -193,45 +245,49 @@ hash_rotl_64 ...@@ -193,45 +245,49 @@ hash_rotl_64
hash_x64_128 hash_x64_128
hash_x86_128 hash_x86_128
hash_x86_32 hash_x86_32
huge_allocated huge_aalloc
huge_boot
huge_dalloc huge_dalloc
huge_dalloc_junk huge_dalloc_junk
huge_dss_prec_get
huge_malloc huge_malloc
huge_mtx
huge_ndalloc
huge_nmalloc
huge_palloc huge_palloc
huge_postfork_child huge_prof_tctx_get
huge_postfork_parent huge_prof_tctx_reset
huge_prefork huge_prof_tctx_set
huge_prof_ctx_get
huge_prof_ctx_set
huge_ralloc huge_ralloc
huge_ralloc_no_move huge_ralloc_no_move
huge_salloc huge_salloc
iallocm iaalloc
iallocztm
icalloc icalloc
icalloct icalloct
idalloc idalloc
idalloct idalloct
idalloctm
imalloc imalloc
imalloct imalloct
index2size
index2size_compute
index2size_lookup
index2size_tab
in_valgrind
ipalloc ipalloc
ipalloct ipalloct
ipallocztm
iqalloc iqalloc
iqalloct
iralloc iralloc
iralloct iralloct
iralloct_realign iralloct_realign
isalloc isalloc
isdalloct
isqalloc
isthreaded isthreaded
ivsalloc ivsalloc
ixalloc ixalloc
jemalloc_postfork_child jemalloc_postfork_child
jemalloc_postfork_parent jemalloc_postfork_parent
jemalloc_prefork jemalloc_prefork
large_maxclass
lg_floor
malloc_cprintf malloc_cprintf
malloc_mutex_init malloc_mutex_init
malloc_mutex_lock malloc_mutex_lock
...@@ -242,7 +298,8 @@ malloc_mutex_unlock ...@@ -242,7 +298,8 @@ malloc_mutex_unlock
malloc_printf malloc_printf
malloc_snprintf malloc_snprintf
malloc_strtoumax malloc_strtoumax
malloc_tsd_boot malloc_tsd_boot0
malloc_tsd_boot1
malloc_tsd_cleanup_register malloc_tsd_cleanup_register
malloc_tsd_dalloc malloc_tsd_dalloc
malloc_tsd_malloc malloc_tsd_malloc
...@@ -251,16 +308,18 @@ malloc_vcprintf ...@@ -251,16 +308,18 @@ malloc_vcprintf
malloc_vsnprintf malloc_vsnprintf
malloc_write malloc_write
map_bias map_bias
map_misc_offset
mb_write mb_write
mutex_boot mutex_boot
narenas_auto narenas_cache_cleanup
narenas_total
narenas_total_get narenas_total_get
ncpus ncpus
nhbins nhbins
opt_abort opt_abort
opt_dss opt_dss
opt_junk opt_junk
opt_junk_alloc
opt_junk_free
opt_lg_chunk opt_lg_chunk
opt_lg_dirty_mult opt_lg_dirty_mult
opt_lg_prof_interval opt_lg_prof_interval
...@@ -274,84 +333,99 @@ opt_prof_final ...@@ -274,84 +333,99 @@ opt_prof_final
opt_prof_gdump opt_prof_gdump
opt_prof_leak opt_prof_leak
opt_prof_prefix opt_prof_prefix
opt_prof_thread_active_init
opt_quarantine opt_quarantine
opt_redzone opt_redzone
opt_stats_print opt_stats_print
opt_tcache opt_tcache
opt_utrace opt_utrace
opt_valgrind
opt_xmalloc opt_xmalloc
opt_zero opt_zero
p2rz p2rz
pages_commit
pages_decommit
pages_map
pages_purge pages_purge
pages_trim
pages_unmap
pow2_ceil pow2_ceil
prof_active_get
prof_active_get_unlocked
prof_active_set
prof_alloc_prep
prof_alloc_rollback
prof_backtrace prof_backtrace
prof_boot0 prof_boot0
prof_boot1 prof_boot1
prof_boot2 prof_boot2
prof_bt_count prof_dump_header
prof_ctx_get
prof_ctx_set
prof_dump_open prof_dump_open
prof_free prof_free
prof_free_sampled_object
prof_gdump prof_gdump
prof_gdump_get
prof_gdump_get_unlocked
prof_gdump_set
prof_gdump_val
prof_idump prof_idump
prof_interval prof_interval
prof_lookup prof_lookup
prof_malloc prof_malloc
prof_malloc_sample_object
prof_mdump prof_mdump
prof_postfork_child prof_postfork_child
prof_postfork_parent prof_postfork_parent
prof_prefork prof_prefork
prof_promote
prof_realloc prof_realloc
prof_reset
prof_sample_accum_update prof_sample_accum_update
prof_sample_threshold_update prof_sample_threshold_update
prof_tdata_booted prof_tctx_get
prof_tctx_reset
prof_tctx_set
prof_tdata_cleanup prof_tdata_cleanup
prof_tdata_get prof_tdata_get
prof_tdata_init prof_tdata_init
prof_tdata_initialized prof_tdata_reinit
prof_tdata_tls prof_thread_active_get
prof_tdata_tsd prof_thread_active_init_get
prof_tdata_tsd_boot prof_thread_active_init_set
prof_tdata_tsd_cleanup_wrapper prof_thread_active_set
prof_tdata_tsd_get prof_thread_name_get
prof_tdata_tsd_get_wrapper prof_thread_name_set
prof_tdata_tsd_init_head
prof_tdata_tsd_set
quarantine quarantine
quarantine_alloc_hook quarantine_alloc_hook
quarantine_boot quarantine_alloc_hook_work
quarantine_booted
quarantine_cleanup quarantine_cleanup
quarantine_init
quarantine_tls
quarantine_tsd
quarantine_tsd_boot
quarantine_tsd_cleanup_wrapper
quarantine_tsd_get
quarantine_tsd_get_wrapper
quarantine_tsd_init_head
quarantine_tsd_set
register_zone register_zone
rtree_child_read
rtree_child_read_hard
rtree_child_tryread
rtree_delete rtree_delete
rtree_get rtree_get
rtree_get_locked
rtree_new rtree_new
rtree_postfork_child rtree_node_valid
rtree_postfork_parent
rtree_prefork
rtree_set rtree_set
rtree_start_level
rtree_subkey
rtree_subtree_read
rtree_subtree_read_hard
rtree_subtree_tryread
rtree_val_read
rtree_val_write
s2u s2u
s2u_compute
s2u_lookup
sa2u sa2u
set_errno set_errno
small_size2bin size2index
size2index_compute
size2index_lookup
size2index_tab
stats_cactive stats_cactive
stats_cactive_add stats_cactive_add
stats_cactive_get stats_cactive_get
stats_cactive_sub stats_cactive_sub
stats_chunks
stats_print stats_print
tcache_alloc_easy tcache_alloc_easy
tcache_alloc_large tcache_alloc_large
...@@ -359,55 +433,67 @@ tcache_alloc_small ...@@ -359,55 +433,67 @@ tcache_alloc_small
tcache_alloc_small_hard tcache_alloc_small_hard
tcache_arena_associate tcache_arena_associate
tcache_arena_dissociate tcache_arena_dissociate
tcache_arena_reassociate
tcache_bin_flush_large tcache_bin_flush_large
tcache_bin_flush_small tcache_bin_flush_small
tcache_bin_info tcache_bin_info
tcache_boot0 tcache_boot
tcache_boot1 tcache_cleanup
tcache_booted
tcache_create tcache_create
tcache_dalloc_large tcache_dalloc_large
tcache_dalloc_small tcache_dalloc_small
tcache_destroy tcache_enabled_cleanup
tcache_enabled_booted
tcache_enabled_get tcache_enabled_get
tcache_enabled_initialized
tcache_enabled_set tcache_enabled_set
tcache_enabled_tls
tcache_enabled_tsd
tcache_enabled_tsd_boot
tcache_enabled_tsd_cleanup_wrapper
tcache_enabled_tsd_get
tcache_enabled_tsd_get_wrapper
tcache_enabled_tsd_init_head
tcache_enabled_tsd_set
tcache_event tcache_event
tcache_event_hard tcache_event_hard
tcache_flush tcache_flush
tcache_get tcache_get
tcache_initialized tcache_get_hard
tcache_maxclass tcache_maxclass
tcaches
tcache_salloc tcache_salloc
tcaches_create
tcaches_destroy
tcaches_flush
tcaches_get
tcache_stats_merge tcache_stats_merge
tcache_thread_cleanup thread_allocated_cleanup
tcache_tls thread_deallocated_cleanup
tcache_tsd tsd_arena_get
tcache_tsd_boot tsd_arena_set
tcache_tsd_cleanup_wrapper tsd_boot
tcache_tsd_get tsd_boot0
tcache_tsd_get_wrapper tsd_boot1
tcache_tsd_init_head tsd_booted
tcache_tsd_set tsd_cleanup
thread_allocated_booted tsd_cleanup_wrapper
thread_allocated_initialized tsd_fetch
thread_allocated_tls tsd_get
thread_allocated_tsd tsd_wrapper_get
thread_allocated_tsd_boot tsd_wrapper_set
thread_allocated_tsd_cleanup_wrapper tsd_initialized
thread_allocated_tsd_get
thread_allocated_tsd_get_wrapper
thread_allocated_tsd_init_head
thread_allocated_tsd_set
tsd_init_check_recursion tsd_init_check_recursion
tsd_init_finish tsd_init_finish
tsd_init_head
tsd_nominal
tsd_quarantine_get
tsd_quarantine_set
tsd_set
tsd_tcache_enabled_get
tsd_tcache_enabled_set
tsd_tcache_get
tsd_tcache_set
tsd_tls
tsd_tsd
tsd_prof_tdata_get
tsd_prof_tdata_set
tsd_thread_allocated_get
tsd_thread_allocated_set
tsd_thread_deallocated_get
tsd_thread_deallocated_set
u2rz u2rz
valgrind_freelike_block
valgrind_make_mem_defined
valgrind_make_mem_noaccess
valgrind_make_mem_undefined
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
* See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints. * See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints.
* *
* This choice of m has the disadvantage that the quality of the bits is * This choice of m has the disadvantage that the quality of the bits is
* proportional to bit position. For example. the lowest bit has a cycle of 2, * proportional to bit position. For example, the lowest bit has a cycle of 2,
* the next has a cycle of 4, etc. For this reason, we prefer to use the upper * the next has a cycle of 4, etc. For this reason, we prefer to use the upper
* bits. * bits.
* *
...@@ -26,22 +26,22 @@ ...@@ -26,22 +26,22 @@
* const uint32_t a, c : See above discussion. * const uint32_t a, c : See above discussion.
*/ */
#define prng32(r, lg_range, state, a, c) do { \ #define prng32(r, lg_range, state, a, c) do { \
assert(lg_range > 0); \ assert((lg_range) > 0); \
assert(lg_range <= 32); \ assert((lg_range) <= 32); \
\ \
r = (state * (a)) + (c); \ r = (state * (a)) + (c); \
state = r; \ state = r; \
r >>= (32 - lg_range); \ r >>= (32 - (lg_range)); \
} while (false) } while (false)
/* Same as prng32(), but 64 bits of pseudo-randomness, using uint64_t. */ /* Same as prng32(), but 64 bits of pseudo-randomness, using uint64_t. */
#define prng64(r, lg_range, state, a, c) do { \ #define prng64(r, lg_range, state, a, c) do { \
assert(lg_range > 0); \ assert((lg_range) > 0); \
assert(lg_range <= 64); \ assert((lg_range) <= 64); \
\ \
r = (state * (a)) + (c); \ r = (state * (a)) + (c); \
state = r; \ state = r; \
r >>= (64 - lg_range); \ r >>= (64 - (lg_range)); \
} while (false) } while (false)
#endif /* JEMALLOC_H_TYPES */ #endif /* JEMALLOC_H_TYPES */
......
...@@ -3,8 +3,8 @@ ...@@ -3,8 +3,8 @@
typedef struct prof_bt_s prof_bt_t; typedef struct prof_bt_s prof_bt_t;
typedef struct prof_cnt_s prof_cnt_t; typedef struct prof_cnt_s prof_cnt_t;
typedef struct prof_thr_cnt_s prof_thr_cnt_t; typedef struct prof_tctx_s prof_tctx_t;
typedef struct prof_ctx_s prof_ctx_t; typedef struct prof_gctx_s prof_gctx_t;
typedef struct prof_tdata_s prof_tdata_t; typedef struct prof_tdata_s prof_tdata_t;
/* Option defaults. */ /* Option defaults. */
...@@ -23,9 +23,6 @@ typedef struct prof_tdata_s prof_tdata_t; ...@@ -23,9 +23,6 @@ typedef struct prof_tdata_s prof_tdata_t;
*/ */
#define PROF_BT_MAX 128 #define PROF_BT_MAX 128
/* Maximum number of backtraces to store in each per thread LRU cache. */
#define PROF_TCMAX 1024
/* Initial hash table size. */ /* Initial hash table size. */
#define PROF_CKH_MINITEMS 64 #define PROF_CKH_MINITEMS 64
...@@ -36,11 +33,17 @@ typedef struct prof_tdata_s prof_tdata_t; ...@@ -36,11 +33,17 @@ typedef struct prof_tdata_s prof_tdata_t;
#define PROF_PRINTF_BUFSIZE 128 #define PROF_PRINTF_BUFSIZE 128
/* /*
* Number of mutexes shared among all ctx's. No space is allocated for these * Number of mutexes shared among all gctx's. No space is allocated for these
* unless profiling is enabled, so it's okay to over-provision. * unless profiling is enabled, so it's okay to over-provision.
*/ */
#define PROF_NCTX_LOCKS 1024 #define PROF_NCTX_LOCKS 1024
/*
* Number of mutexes shared among all tdata's. No space is allocated for these
* unless profiling is enabled, so it's okay to over-provision.
*/
#define PROF_NTDATA_LOCKS 256
/* /*
* prof_tdata pointers close to NULL are used to encode state information that * prof_tdata pointers close to NULL are used to encode state information that
* is used for cleaning up during thread shutdown. * is used for cleaning up during thread shutdown.
...@@ -63,141 +66,186 @@ struct prof_bt_s { ...@@ -63,141 +66,186 @@ struct prof_bt_s {
/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */ /* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
typedef struct { typedef struct {
prof_bt_t *bt; prof_bt_t *bt;
unsigned nignore;
unsigned max; unsigned max;
} prof_unwind_data_t; } prof_unwind_data_t;
#endif #endif
struct prof_cnt_s { struct prof_cnt_s {
/* /* Profiling counters. */
* Profiling counters. An allocation/deallocation pair can operate on uint64_t curobjs;
* different prof_thr_cnt_t objects that are linked into the same uint64_t curbytes;
* prof_ctx_t cnts_ql, so it is possible for the cur* counters to go
* negative. In principle it is possible for the *bytes counters to
* overflow/underflow, but a general solution would require something
* like 128-bit counters; this implementation doesn't bother to solve
* that problem.
*/
int64_t curobjs;
int64_t curbytes;
uint64_t accumobjs; uint64_t accumobjs;
uint64_t accumbytes; uint64_t accumbytes;
}; };
struct prof_thr_cnt_s { typedef enum {
/* Linkage into prof_ctx_t's cnts_ql. */ prof_tctx_state_initializing,
ql_elm(prof_thr_cnt_t) cnts_link; prof_tctx_state_nominal,
prof_tctx_state_dumping,
prof_tctx_state_purgatory /* Dumper must finish destroying. */
} prof_tctx_state_t;
/* Linkage into thread's LRU. */ struct prof_tctx_s {
ql_elm(prof_thr_cnt_t) lru_link; /* Thread data for thread that performed the allocation. */
prof_tdata_t *tdata;
/* /*
* Associated context. If a thread frees an object that it did not * Copy of tdata->thr_{uid,discrim}, necessary because tdata may be
* allocate, it is possible that the context is not cached in the * defunct during teardown.
* thread's hash table, in which case it must be able to look up the
* context, insert a new prof_thr_cnt_t into the thread's hash table,
* and link it into the prof_ctx_t's cnts_ql.
*/ */
prof_ctx_t *ctx; uint64_t thr_uid;
uint64_t thr_discrim;
/* Profiling counters, protected by tdata->lock. */
prof_cnt_t cnts;
/* Associated global context. */
prof_gctx_t *gctx;
/* /*
* Threads use memory barriers to update the counters. Since there is * UID that distinguishes multiple tctx's created by the same thread,
* only ever one writer, the only challenge is for the reader to get a * but coexisting in gctx->tctxs. There are two ways that such
* consistent read of the counters. * coexistence can occur:
* * - A dumper thread can cause a tctx to be retained in the purgatory
* The writer uses this series of operations: * state.
* * - Although a single "producer" thread must create all tctx's which
* 1) Increment epoch to an odd number. * share the same thr_uid, multiple "consumers" can each concurrently
* 2) Update counters. * execute portions of prof_tctx_destroy(). prof_tctx_destroy() only
* 3) Increment epoch to an even number. * gets called once each time cnts.cur{objs,bytes} drop to 0, but this
* * threshold can be hit again before the first consumer finishes
* The reader must assure 1) that the epoch is even while it reads the * executing prof_tctx_destroy().
* counters, and 2) that the epoch doesn't change between the time it
* starts and finishes reading the counters.
*/ */
unsigned epoch; uint64_t tctx_uid;
/* Profiling counters. */ /* Linkage into gctx's tctxs. */
prof_cnt_t cnts; rb_node(prof_tctx_t) tctx_link;
};
struct prof_ctx_s { /*
/* Associated backtrace. */ * True during prof_alloc_prep()..prof_malloc_sample_object(), prevents
prof_bt_t *bt; * sample vs destroy race.
*/
bool prepared;
/* Current dump-related state, protected by gctx->lock. */
prof_tctx_state_t state;
/*
* Copy of cnts snapshotted during early dump phase, protected by
* dump_mtx.
*/
prof_cnt_t dump_cnts;
};
typedef rb_tree(prof_tctx_t) prof_tctx_tree_t;
/* Protects nlimbo, cnt_merged, and cnts_ql. */ struct prof_gctx_s {
/* Protects nlimbo, cnt_summed, and tctxs. */
malloc_mutex_t *lock; malloc_mutex_t *lock;
/* /*
* Number of threads that currently cause this ctx to be in a state of * Number of threads that currently cause this gctx to be in a state of
* limbo due to one of: * limbo due to one of:
* - Initializing per thread counters associated with this ctx. * - Initializing this gctx.
* - Preparing to destroy this ctx. * - Initializing per thread counters associated with this gctx.
* - Dumping a heap profile that includes this ctx. * - Preparing to destroy this gctx.
* - Dumping a heap profile that includes this gctx.
* nlimbo must be 1 (single destroyer) in order to safely destroy the * nlimbo must be 1 (single destroyer) in order to safely destroy the
* ctx. * gctx.
*/ */
unsigned nlimbo; unsigned nlimbo;
/* Temporary storage for summation during dump. */
prof_cnt_t cnt_summed;
/* When threads exit, they merge their stats into cnt_merged. */
prof_cnt_t cnt_merged;
/* /*
* List of profile counters, one for each thread that has allocated in * Tree of profile counters, one for each thread that has allocated in
* this context. * this context.
*/ */
ql_head(prof_thr_cnt_t) cnts_ql; prof_tctx_tree_t tctxs;
/* Linkage for tree of contexts to be dumped. */
rb_node(prof_gctx_t) dump_link;
/* Temporary storage for summation during dump. */
prof_cnt_t cnt_summed;
/* Associated backtrace. */
prof_bt_t bt;
/* Linkage for list of contexts to be dumped. */ /* Backtrace vector, variable size, referred to by bt. */
ql_elm(prof_ctx_t) dump_link; void *vec[1];
}; };
typedef ql_head(prof_ctx_t) prof_ctx_list_t; typedef rb_tree(prof_gctx_t) prof_gctx_tree_t;
struct prof_tdata_s { struct prof_tdata_s {
malloc_mutex_t *lock;
/* Monotonically increasing unique thread identifier. */
uint64_t thr_uid;
/* /*
* Hash of (prof_bt_t *)-->(prof_thr_cnt_t *). Each thread keeps a * Monotonically increasing discriminator among tdata structures
* cache of backtraces, with associated thread-specific prof_thr_cnt_t * associated with the same thr_uid.
* objects. Other threads may read the prof_thr_cnt_t contents, but no
* others will ever write them.
*
* Upon thread exit, the thread must merge all the prof_thr_cnt_t
* counter data into the associated prof_ctx_t objects, and unlink/free
* the prof_thr_cnt_t objects.
*/ */
ckh_t bt2cnt; uint64_t thr_discrim;
/* LRU for contents of bt2cnt. */ /* Included in heap profile dumps if non-NULL. */
ql_head(prof_thr_cnt_t) lru_ql; char *thread_name;
/* Backtrace vector, used for calls to prof_backtrace(). */ bool attached;
void **vec; bool expired;
rb_node(prof_tdata_t) tdata_link;
/*
* Counter used to initialize prof_tctx_t's tctx_uid. No locking is
* necessary when incrementing this field, because only one thread ever
* does so.
*/
uint64_t tctx_uid_next;
/*
* Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks
* backtraces for which it has non-zero allocation/deallocation counters
* associated with thread-specific prof_tctx_t objects. Other threads
* may write to prof_tctx_t contents when freeing associated objects.
*/
ckh_t bt2tctx;
/* Sampling state. */ /* Sampling state. */
uint64_t prng_state; uint64_t prng_state;
uint64_t threshold; uint64_t bytes_until_sample;
uint64_t accum;
/* State used to avoid dumping while operating on prof internals. */ /* State used to avoid dumping while operating on prof internals. */
bool enq; bool enq;
bool enq_idump; bool enq_idump;
bool enq_gdump; bool enq_gdump;
/*
* Set to true during an early dump phase for tdata's which are
* currently being dumped. New threads' tdata's have this initialized
* to false so that they aren't accidentally included in later dump
* phases.
*/
bool dumping;
/*
* True if profiling is active for this tdata's thread
* (thread.prof.active mallctl).
*/
bool active;
/* Temporary storage for summation during dump. */
prof_cnt_t cnt_summed;
/* Backtrace vector, used for calls to prof_backtrace(). */
void *vec[PROF_BT_MAX];
}; };
typedef rb_tree(prof_tdata_t) prof_tdata_tree_t;
#endif /* JEMALLOC_H_STRUCTS */ #endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS #ifdef JEMALLOC_H_EXTERNS
extern bool opt_prof; extern bool opt_prof;
/*
* Even if opt_prof is true, sampling can be temporarily disabled by setting
* opt_prof_active to false. No locking is used when updating opt_prof_active,
* so there are no guarantees regarding how long it will take for all threads
* to notice state changes.
*/
extern bool opt_prof_active; extern bool opt_prof_active;
extern bool opt_prof_thread_active_init;
extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */ extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */
extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */ extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */
extern bool opt_prof_gdump; /* High-water memory dumping. */ extern bool opt_prof_gdump; /* High-water memory dumping. */
...@@ -211,6 +259,12 @@ extern char opt_prof_prefix[ ...@@ -211,6 +259,12 @@ extern char opt_prof_prefix[
#endif #endif
1]; 1];
/* Accessed via prof_active_[gs]et{_unlocked,}(). */
extern bool prof_active;
/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */
extern bool prof_gdump_val;
/* /*
* Profile dump interval, measured in bytes allocated. Each arena triggers a * Profile dump interval, measured in bytes allocated. Each arena triggers a
* profile dump when it reaches this threshold. The effect is that the * profile dump when it reaches this threshold. The effect is that the
...@@ -221,391 +275,269 @@ extern char opt_prof_prefix[ ...@@ -221,391 +275,269 @@ extern char opt_prof_prefix[
extern uint64_t prof_interval; extern uint64_t prof_interval;
/* /*
* If true, promote small sampled objects to large objects, since small run * Initialized as opt_lg_prof_sample, and potentially modified during profiling
* headers do not have embedded profile context pointers. * resets.
*/ */
extern bool prof_promote; extern size_t lg_prof_sample;
void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated);
void prof_malloc_sample_object(const void *ptr, size_t usize,
prof_tctx_t *tctx);
void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx);
void bt_init(prof_bt_t *bt, void **vec); void bt_init(prof_bt_t *bt, void **vec);
void prof_backtrace(prof_bt_t *bt, unsigned nignore); void prof_backtrace(prof_bt_t *bt);
prof_thr_cnt_t *prof_lookup(prof_bt_t *bt); prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt);
#ifdef JEMALLOC_JET #ifdef JEMALLOC_JET
size_t prof_tdata_count(void);
size_t prof_bt_count(void); size_t prof_bt_count(void);
const prof_cnt_t *prof_cnt_all(void);
typedef int (prof_dump_open_t)(bool, const char *); typedef int (prof_dump_open_t)(bool, const char *);
extern prof_dump_open_t *prof_dump_open; extern prof_dump_open_t *prof_dump_open;
typedef bool (prof_dump_header_t)(bool, const prof_cnt_t *);
extern prof_dump_header_t *prof_dump_header;
#endif #endif
void prof_idump(void); void prof_idump(void);
bool prof_mdump(const char *filename); bool prof_mdump(const char *filename);
void prof_gdump(void); void prof_gdump(void);
prof_tdata_t *prof_tdata_init(void); prof_tdata_t *prof_tdata_init(tsd_t *tsd);
void prof_tdata_cleanup(void *arg); prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata);
void prof_reset(tsd_t *tsd, size_t lg_sample);
void prof_tdata_cleanup(tsd_t *tsd);
const char *prof_thread_name_get(void);
bool prof_active_get(void);
bool prof_active_set(bool active);
int prof_thread_name_set(tsd_t *tsd, const char *thread_name);
bool prof_thread_active_get(void);
bool prof_thread_active_set(bool active);
bool prof_thread_active_init_get(void);
bool prof_thread_active_init_set(bool active_init);
bool prof_gdump_get(void);
bool prof_gdump_set(bool active);
void prof_boot0(void); void prof_boot0(void);
void prof_boot1(void); void prof_boot1(void);
bool prof_boot2(void); bool prof_boot2(void);
void prof_prefork(void); void prof_prefork(void);
void prof_postfork_parent(void); void prof_postfork_parent(void);
void prof_postfork_child(void); void prof_postfork_child(void);
void prof_sample_threshold_update(prof_tdata_t *tdata);
#endif /* JEMALLOC_H_EXTERNS */ #endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_INLINES #ifdef JEMALLOC_H_INLINES
#define PROF_ALLOC_PREP(nignore, size, ret) do { \
prof_tdata_t *prof_tdata; \
prof_bt_t bt; \
\
assert(size == s2u(size)); \
\
prof_tdata = prof_tdata_get(true); \
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) { \
if (prof_tdata != NULL) \
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
else \
ret = NULL; \
break; \
} \
\
if (opt_prof_active == false) { \
/* Sampling is currently inactive, so avoid sampling. */\
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
} else if (opt_lg_prof_sample == 0) { \
/* Don't bother with sampling logic, since sampling */\
/* interval is 1. */\
bt_init(&bt, prof_tdata->vec); \
prof_backtrace(&bt, nignore); \
ret = prof_lookup(&bt); \
} else { \
if (prof_tdata->threshold == 0) { \
/* Initialize. Seed the prng differently for */\
/* each thread. */\
prof_tdata->prng_state = \
(uint64_t)(uintptr_t)&size; \
prof_sample_threshold_update(prof_tdata); \
} \
\
/* Determine whether to capture a backtrace based on */\
/* whether size is enough for prof_accum to reach */\
/* prof_tdata->threshold. However, delay updating */\
/* these variables until prof_{m,re}alloc(), because */\
/* we don't know for sure that the allocation will */\
/* succeed. */\
/* */\
/* Use subtraction rather than addition to avoid */\
/* potential integer overflow. */\
if (size >= prof_tdata->threshold - \
prof_tdata->accum) { \
bt_init(&bt, prof_tdata->vec); \
prof_backtrace(&bt, nignore); \
ret = prof_lookup(&bt); \
} else \
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
} \
} while (0)
#ifndef JEMALLOC_ENABLE_INLINE #ifndef JEMALLOC_ENABLE_INLINE
malloc_tsd_protos(JEMALLOC_ATTR(unused), prof_tdata, prof_tdata_t *) bool prof_active_get_unlocked(void);
bool prof_gdump_get_unlocked(void);
prof_tdata_t *prof_tdata_get(bool create); prof_tdata_t *prof_tdata_get(tsd_t *tsd, bool create);
void prof_sample_threshold_update(prof_tdata_t *prof_tdata); bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit,
prof_ctx_t *prof_ctx_get(const void *ptr); prof_tdata_t **tdata_out);
void prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx); prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active,
bool prof_sample_accum_update(size_t size); bool update);
void prof_malloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt); prof_tctx_t *prof_tctx_get(const void *ptr);
void prof_realloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt, void prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx);
size_t old_usize, prof_ctx_t *old_ctx); void prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
void prof_free(const void *ptr, size_t size); prof_tctx_t *tctx);
void prof_malloc_sample_object(const void *ptr, size_t usize,
prof_tctx_t *tctx);
void prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx);
void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize,
prof_tctx_t *tctx, bool prof_active, bool updated, const void *old_ptr,
size_t old_usize, prof_tctx_t *old_tctx);
void prof_free(tsd_t *tsd, const void *ptr, size_t usize);
#endif #endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_)) #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_))
/* Thread-specific backtrace cache, used to reduce bt2ctx contention. */ JEMALLOC_ALWAYS_INLINE bool
malloc_tsd_externs(prof_tdata, prof_tdata_t *) prof_active_get_unlocked(void)
malloc_tsd_funcs(JEMALLOC_INLINE, prof_tdata, prof_tdata_t *, NULL, {
prof_tdata_cleanup)
/*
* Even if opt_prof is true, sampling can be temporarily disabled by
* setting prof_active to false. No locking is used when reading
* prof_active in the fast path, so there are no guarantees regarding
* how long it will take for all threads to notice state changes.
*/
return (prof_active);
}
JEMALLOC_INLINE prof_tdata_t * JEMALLOC_ALWAYS_INLINE bool
prof_tdata_get(bool create) prof_gdump_get_unlocked(void)
{ {
prof_tdata_t *prof_tdata;
/*
* No locking is used when reading prof_gdump_val in the fast path, so
* there are no guarantees regarding how long it will take for all
* threads to notice state changes.
*/
return (prof_gdump_val);
}
JEMALLOC_ALWAYS_INLINE prof_tdata_t *
prof_tdata_get(tsd_t *tsd, bool create)
{
prof_tdata_t *tdata;
cassert(config_prof); cassert(config_prof);
prof_tdata = *prof_tdata_tsd_get(); tdata = tsd_prof_tdata_get(tsd);
if (create && prof_tdata == NULL) if (create) {
prof_tdata = prof_tdata_init(); if (unlikely(tdata == NULL)) {
if (tsd_nominal(tsd)) {
tdata = prof_tdata_init(tsd);
tsd_prof_tdata_set(tsd, tdata);
}
} else if (unlikely(tdata->expired)) {
tdata = prof_tdata_reinit(tsd, tdata);
tsd_prof_tdata_set(tsd, tdata);
}
assert(tdata == NULL || tdata->attached);
}
return (prof_tdata); return (tdata);
} }
JEMALLOC_INLINE void JEMALLOC_ALWAYS_INLINE prof_tctx_t *
prof_sample_threshold_update(prof_tdata_t *prof_tdata) prof_tctx_get(const void *ptr)
{ {
/*
* The body of this function is compiled out unless heap profiling is
* enabled, so that it is possible to compile jemalloc with floating
* point support completely disabled. Avoiding floating point code is
* important on memory-constrained systems, but it also enables a
* workaround for versions of glibc that don't properly save/restore
* floating point registers during dynamic lazy symbol loading (which
* internally calls into whatever malloc implementation happens to be
* integrated into the application). Note that some compilers (e.g.
* gcc 4.8) may use floating point registers for fast memory moves, so
* jemalloc must be compiled with such optimizations disabled (e.g.
* -mno-sse) in order for the workaround to be complete.
*/
#ifdef JEMALLOC_PROF
uint64_t r;
double u;
cassert(config_prof); cassert(config_prof);
assert(ptr != NULL);
/* return (arena_prof_tctx_get(ptr));
* Compute sample threshold as a geometrically distributed random
* variable with mean (2^opt_lg_prof_sample).
*
* __ __
* | log(u) | 1
* prof_tdata->threshold = | -------- |, where p = -------------------
* | log(1-p) | opt_lg_prof_sample
* 2
*
* For more information on the math, see:
*
* Non-Uniform Random Variate Generation
* Luc Devroye
* Springer-Verlag, New York, 1986
* pp 500
* (http://luc.devroye.org/rnbookindex.html)
*/
prng64(r, 53, prof_tdata->prng_state,
UINT64_C(6364136223846793005), UINT64_C(1442695040888963407));
u = (double)r * (1.0/9007199254740992.0L);
prof_tdata->threshold = (uint64_t)(log(u) /
log(1.0 - (1.0 / (double)((uint64_t)1U << opt_lg_prof_sample))))
+ (uint64_t)1U;
#endif
} }
JEMALLOC_INLINE prof_ctx_t * JEMALLOC_ALWAYS_INLINE void
prof_ctx_get(const void *ptr) prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx)
{ {
prof_ctx_t *ret;
arena_chunk_t *chunk;
cassert(config_prof); cassert(config_prof);
assert(ptr != NULL); assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); arena_prof_tctx_set(ptr, usize, tctx);
if (chunk != ptr) {
/* Region. */
ret = arena_prof_ctx_get(ptr);
} else
ret = huge_prof_ctx_get(ptr);
return (ret);
} }
JEMALLOC_INLINE void JEMALLOC_ALWAYS_INLINE void
prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx) prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
prof_tctx_t *old_tctx)
{ {
arena_chunk_t *chunk;
cassert(config_prof); cassert(config_prof);
assert(ptr != NULL); assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); arena_prof_tctx_reset(ptr, usize, old_ptr, old_tctx);
if (chunk != ptr) {
/* Region. */
arena_prof_ctx_set(ptr, usize, ctx);
} else
huge_prof_ctx_set(ptr, ctx);
} }
JEMALLOC_INLINE bool JEMALLOC_ALWAYS_INLINE bool
prof_sample_accum_update(size_t size) prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
prof_tdata_t **tdata_out)
{ {
prof_tdata_t *prof_tdata; prof_tdata_t *tdata;
cassert(config_prof); cassert(config_prof);
/* Sampling logic is unnecessary if the interval is 1. */
assert(opt_lg_prof_sample != 0);
prof_tdata = prof_tdata_get(false); tdata = prof_tdata_get(tsd, true);
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) if ((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
tdata = NULL;
if (tdata_out != NULL)
*tdata_out = tdata;
if (tdata == NULL)
return (true); return (true);
/* Take care to avoid integer overflow. */ if (tdata->bytes_until_sample >= usize) {
if (size >= prof_tdata->threshold - prof_tdata->accum) { if (update)
prof_tdata->accum -= (prof_tdata->threshold - size); tdata->bytes_until_sample -= usize;
/* Compute new sample threshold. */
prof_sample_threshold_update(prof_tdata);
while (prof_tdata->accum >= prof_tdata->threshold) {
prof_tdata->accum -= prof_tdata->threshold;
prof_sample_threshold_update(prof_tdata);
}
return (false);
} else {
prof_tdata->accum += size;
return (true); return (true);
} else {
/* Compute new sample threshold. */
if (update)
prof_sample_threshold_update(tdata);
return (!tdata->active);
} }
} }
JEMALLOC_INLINE void JEMALLOC_ALWAYS_INLINE prof_tctx_t *
prof_malloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt) prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update)
{
prof_tctx_t *ret;
prof_tdata_t *tdata;
prof_bt_t bt;
assert(usize == s2u(usize));
if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update,
&tdata)))
ret = (prof_tctx_t *)(uintptr_t)1U;
else {
bt_init(&bt, tdata->vec);
prof_backtrace(&bt);
ret = prof_lookup(tsd, &bt);
}
return (ret);
}
JEMALLOC_ALWAYS_INLINE void
prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx)
{ {
cassert(config_prof); cassert(config_prof);
assert(ptr != NULL); assert(ptr != NULL);
assert(usize == isalloc(ptr, true)); assert(usize == isalloc(ptr, true));
if (opt_lg_prof_sample != 0) { if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
if (prof_sample_accum_update(usize)) { prof_malloc_sample_object(ptr, usize, tctx);
/* else
* Don't sample. For malloc()-like allocation, it is prof_tctx_set(ptr, usize, (prof_tctx_t *)(uintptr_t)1U);
* always possible to tell in advance how large an
* object's usable size will be, so there should never
* be a difference between the usize passed to
* PROF_ALLOC_PREP() and prof_malloc().
*/
assert((uintptr_t)cnt == (uintptr_t)1U);
}
}
if ((uintptr_t)cnt > (uintptr_t)1U) {
prof_ctx_set(ptr, usize, cnt->ctx);
cnt->epoch++;
/*********/
mb_write();
/*********/
cnt->cnts.curobjs++;
cnt->cnts.curbytes += usize;
if (opt_prof_accum) {
cnt->cnts.accumobjs++;
cnt->cnts.accumbytes += usize;
}
/*********/
mb_write();
/*********/
cnt->epoch++;
/*********/
mb_write();
/*********/
} else
prof_ctx_set(ptr, usize, (prof_ctx_t *)(uintptr_t)1U);
} }
JEMALLOC_INLINE void JEMALLOC_ALWAYS_INLINE void
prof_realloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt, prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
size_t old_usize, prof_ctx_t *old_ctx) bool prof_active, bool updated, const void *old_ptr, size_t old_usize,
prof_tctx_t *old_tctx)
{ {
prof_thr_cnt_t *told_cnt; bool sampled, old_sampled;
cassert(config_prof); cassert(config_prof);
assert(ptr != NULL || (uintptr_t)cnt <= (uintptr_t)1U); assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
if (ptr != NULL) { if (prof_active && !updated && ptr != NULL) {
assert(usize == isalloc(ptr, true)); assert(usize == isalloc(ptr, true));
if (opt_lg_prof_sample != 0) { if (prof_sample_accum_update(tsd, usize, true, NULL)) {
if (prof_sample_accum_update(usize)) {
/*
* Don't sample. The usize passed to
* PROF_ALLOC_PREP() was larger than what
* actually got allocated, so a backtrace was
* captured for this allocation, even though
* its actual usize was insufficient to cross
* the sample threshold.
*/
cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
}
}
}
if ((uintptr_t)old_ctx > (uintptr_t)1U) {
told_cnt = prof_lookup(old_ctx->bt);
if (told_cnt == NULL) {
/* /*
* It's too late to propagate OOM for this realloc(), * Don't sample. The usize passed to prof_alloc_prep()
* so operate directly on old_cnt->ctx->cnt_merged. * was larger than what actually got allocated, so a
* backtrace was captured for this allocation, even
* though its actual usize was insufficient to cross the
* sample threshold.
*/ */
malloc_mutex_lock(old_ctx->lock); tctx = (prof_tctx_t *)(uintptr_t)1U;
old_ctx->cnt_merged.curobjs--;
old_ctx->cnt_merged.curbytes -= old_usize;
malloc_mutex_unlock(old_ctx->lock);
told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
} }
} else
told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
if ((uintptr_t)told_cnt > (uintptr_t)1U)
told_cnt->epoch++;
if ((uintptr_t)cnt > (uintptr_t)1U) {
prof_ctx_set(ptr, usize, cnt->ctx);
cnt->epoch++;
} else if (ptr != NULL)
prof_ctx_set(ptr, usize, (prof_ctx_t *)(uintptr_t)1U);
/*********/
mb_write();
/*********/
if ((uintptr_t)told_cnt > (uintptr_t)1U) {
told_cnt->cnts.curobjs--;
told_cnt->cnts.curbytes -= old_usize;
} }
if ((uintptr_t)cnt > (uintptr_t)1U) {
cnt->cnts.curobjs++; sampled = ((uintptr_t)tctx > (uintptr_t)1U);
cnt->cnts.curbytes += usize; old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U);
if (opt_prof_accum) {
cnt->cnts.accumobjs++; if (unlikely(sampled))
cnt->cnts.accumbytes += usize; prof_malloc_sample_object(ptr, usize, tctx);
} else
} prof_tctx_reset(ptr, usize, old_ptr, old_tctx);
/*********/
mb_write(); if (unlikely(old_sampled))
/*********/ prof_free_sampled_object(tsd, old_usize, old_tctx);
if ((uintptr_t)told_cnt > (uintptr_t)1U)
told_cnt->epoch++;
if ((uintptr_t)cnt > (uintptr_t)1U)
cnt->epoch++;
/*********/
mb_write(); /* Not strictly necessary. */
} }
JEMALLOC_INLINE void JEMALLOC_ALWAYS_INLINE void
prof_free(const void *ptr, size_t size) prof_free(tsd_t *tsd, const void *ptr, size_t usize)
{ {
prof_ctx_t *ctx = prof_ctx_get(ptr); prof_tctx_t *tctx = prof_tctx_get(ptr);
cassert(config_prof); cassert(config_prof);
assert(usize == isalloc(ptr, true));
if ((uintptr_t)ctx > (uintptr_t)1) { if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
prof_thr_cnt_t *tcnt; prof_free_sampled_object(tsd, usize, tctx);
assert(size == isalloc(ptr, true));
tcnt = prof_lookup(ctx->bt);
if (tcnt != NULL) {
tcnt->epoch++;
/*********/
mb_write();
/*********/
tcnt->cnts.curobjs--;
tcnt->cnts.curbytes -= size;
/*********/
mb_write();
/*********/
tcnt->epoch++;
/*********/
mb_write();
/*********/
} else {
/*
* OOM during free() cannot be propagated, so operate
* directly on cnt->ctx->cnt_merged.
*/
malloc_mutex_lock(ctx->lock);
ctx->cnt_merged.curobjs--;
ctx->cnt_merged.curbytes -= size;
malloc_mutex_unlock(ctx->lock);
}
}
} }
#endif #endif
......
/* /* List definitions. */
* List definitions.
*/
#define ql_head(a_type) \ #define ql_head(a_type) \
struct { \ struct { \
a_type *qlh_first; \ a_type *qlh_first; \
......
...@@ -40,8 +40,10 @@ struct { \ ...@@ -40,8 +40,10 @@ struct { \
(a_qr_b)->a_field.qre_prev = t; \ (a_qr_b)->a_field.qre_prev = t; \
} while (0) } while (0)
/* qr_meld() and qr_split() are functionally equivalent, so there's no need to /*
* have two copies of the code. */ * qr_meld() and qr_split() are functionally equivalent, so there's no need to
* have two copies of the code.
*/
#define qr_split(a_qr_a, a_qr_b, a_field) \ #define qr_split(a_qr_a, a_qr_b, a_field) \
qr_meld((a_qr_a), (a_qr_b), a_field) qr_meld((a_qr_a), (a_qr_b), a_field)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment