Unverified Commit f63e81c2 authored by Chris Lamb's avatar Chris Lamb Committed by GitHub
Browse files

Merge branch 'unstable' into config-set-maxmemory-grammar

parents eaeba1b2 39c70e72
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct extent_node_s extent_node_t;
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
/* Tree of extents. */
struct extent_node_s {
/* Linkage for the size/address-ordered tree. */
rb_node(extent_node_t) link_szad;
/* Linkage for the address-ordered tree. */
rb_node(extent_node_t) link_ad;
/* Profile counters, used for huge objects. */
prof_ctx_t *prof_ctx;
/* Pointer to the extent that this tree node is responsible for. */
void *addr;
/* Total region size. */
size_t size;
/* True if zero-filled; used by chunk recycling code. */
bool zeroed;
};
typedef rb_tree(extent_node_t) extent_tree_t;
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
rb_proto(, extent_tree_szad_, extent_tree_t, extent_node_t)
rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
#ifndef JEMALLOC_INTERNAL_EXTENT_DSS_H
#define JEMALLOC_INTERNAL_EXTENT_DSS_H
typedef enum {
dss_prec_disabled = 0,
dss_prec_primary = 1,
dss_prec_secondary = 2,
dss_prec_limit = 3
} dss_prec_t;
#define DSS_PREC_DEFAULT dss_prec_secondary
#define DSS_DEFAULT "secondary"
extern const char *dss_prec_names[];
extern const char *opt_dss;
dss_prec_t extent_dss_prec_get(void);
bool extent_dss_prec_set(dss_prec_t dss_prec);
void *extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr,
size_t size, size_t alignment, bool *zero, bool *commit);
bool extent_in_dss(void *addr);
bool extent_dss_mergeable(void *addr_a, void *addr_b);
void extent_dss_boot(void);
#endif /* JEMALLOC_INTERNAL_EXTENT_DSS_H */
#ifndef JEMALLOC_INTERNAL_EXTENT_EXTERNS_H
#define JEMALLOC_INTERNAL_EXTENT_EXTERNS_H
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex_pool.h"
#include "jemalloc/internal/ph.h"
#include "jemalloc/internal/rtree.h"
extern size_t opt_lg_extent_max_active_fit;
extern rtree_t extents_rtree;
extern const extent_hooks_t extent_hooks_default;
extern mutex_pool_t extent_mutex_pool;
extent_t *extent_alloc(tsdn_t *tsdn, arena_t *arena);
void extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
extent_hooks_t *extent_hooks_get(arena_t *arena);
extent_hooks_t *extent_hooks_set(tsd_t *tsd, arena_t *arena,
extent_hooks_t *extent_hooks);
#ifdef JEMALLOC_JET
size_t extent_size_quantize_floor(size_t size);
size_t extent_size_quantize_ceil(size_t size);
#endif
rb_proto(, extent_avail_, extent_tree_t, extent_t)
ph_proto(, extent_heap_, extent_heap_t, extent_t)
bool extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
bool delay_coalesce);
extent_state_t extents_state_get(const extents_t *extents);
size_t extents_npages_get(extents_t *extents);
extent_t *extents_alloc(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr,
size_t size, size_t pad, size_t alignment, bool slab, szind_t szind,
bool *zero, bool *commit);
void extents_dalloc(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent);
extent_t *extents_evict(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_min);
void extents_prefork(tsdn_t *tsdn, extents_t *extents);
void extents_postfork_parent(tsdn_t *tsdn, extents_t *extents);
void extents_postfork_child(tsdn_t *tsdn, extents_t *extents);
extent_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit);
void extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
void extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent);
void extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent);
bool extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length);
bool extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length);
bool extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length);
bool extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length);
extent_t *extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b);
bool extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b);
bool extent_boot(void);
#endif /* JEMALLOC_INTERNAL_EXTENT_EXTERNS_H */
#ifndef JEMALLOC_INTERNAL_EXTENT_INLINES_H
#define JEMALLOC_INTERNAL_EXTENT_INLINES_H
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex_pool.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/sz.h"
static inline void
extent_lock(tsdn_t *tsdn, extent_t *extent) {
assert(extent != NULL);
mutex_pool_lock(tsdn, &extent_mutex_pool, (uintptr_t)extent);
}
static inline void
extent_unlock(tsdn_t *tsdn, extent_t *extent) {
assert(extent != NULL);
mutex_pool_unlock(tsdn, &extent_mutex_pool, (uintptr_t)extent);
}
static inline void
extent_lock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) {
assert(extent1 != NULL && extent2 != NULL);
mutex_pool_lock2(tsdn, &extent_mutex_pool, (uintptr_t)extent1,
(uintptr_t)extent2);
}
static inline void
extent_unlock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) {
assert(extent1 != NULL && extent2 != NULL);
mutex_pool_unlock2(tsdn, &extent_mutex_pool, (uintptr_t)extent1,
(uintptr_t)extent2);
}
static inline arena_t *
extent_arena_get(const extent_t *extent) {
unsigned arena_ind = (unsigned)((extent->e_bits &
EXTENT_BITS_ARENA_MASK) >> EXTENT_BITS_ARENA_SHIFT);
/*
* The following check is omitted because we should never actually read
* a NULL arena pointer.
*/
if (false && arena_ind >= MALLOCX_ARENA_LIMIT) {
return NULL;
}
assert(arena_ind < MALLOCX_ARENA_LIMIT);
return (arena_t *)atomic_load_p(&arenas[arena_ind], ATOMIC_ACQUIRE);
}
static inline szind_t
extent_szind_get_maybe_invalid(const extent_t *extent) {
szind_t szind = (szind_t)((extent->e_bits & EXTENT_BITS_SZIND_MASK) >>
EXTENT_BITS_SZIND_SHIFT);
assert(szind <= NSIZES);
return szind;
}
static inline szind_t
extent_szind_get(const extent_t *extent) {
szind_t szind = extent_szind_get_maybe_invalid(extent);
assert(szind < NSIZES); /* Never call when "invalid". */
return szind;
}
static inline size_t
extent_usize_get(const extent_t *extent) {
return sz_index2size(extent_szind_get(extent));
}
static inline size_t
extent_sn_get(const extent_t *extent) {
return (size_t)((extent->e_bits & EXTENT_BITS_SN_MASK) >>
EXTENT_BITS_SN_SHIFT);
}
static inline extent_state_t
extent_state_get(const extent_t *extent) {
return (extent_state_t)((extent->e_bits & EXTENT_BITS_STATE_MASK) >>
EXTENT_BITS_STATE_SHIFT);
}
static inline bool
extent_zeroed_get(const extent_t *extent) {
return (bool)((extent->e_bits & EXTENT_BITS_ZEROED_MASK) >>
EXTENT_BITS_ZEROED_SHIFT);
}
static inline bool
extent_committed_get(const extent_t *extent) {
return (bool)((extent->e_bits & EXTENT_BITS_COMMITTED_MASK) >>
EXTENT_BITS_COMMITTED_SHIFT);
}
static inline bool
extent_dumpable_get(const extent_t *extent) {
return (bool)((extent->e_bits & EXTENT_BITS_DUMPABLE_MASK) >>
EXTENT_BITS_DUMPABLE_SHIFT);
}
static inline bool
extent_slab_get(const extent_t *extent) {
return (bool)((extent->e_bits & EXTENT_BITS_SLAB_MASK) >>
EXTENT_BITS_SLAB_SHIFT);
}
static inline unsigned
extent_nfree_get(const extent_t *extent) {
assert(extent_slab_get(extent));
return (unsigned)((extent->e_bits & EXTENT_BITS_NFREE_MASK) >>
EXTENT_BITS_NFREE_SHIFT);
}
static inline void *
extent_base_get(const extent_t *extent) {
assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
!extent_slab_get(extent));
return PAGE_ADDR2BASE(extent->e_addr);
}
static inline void *
extent_addr_get(const extent_t *extent) {
assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
!extent_slab_get(extent));
return extent->e_addr;
}
static inline size_t
extent_size_get(const extent_t *extent) {
return (extent->e_size_esn & EXTENT_SIZE_MASK);
}
static inline size_t
extent_esn_get(const extent_t *extent) {
return (extent->e_size_esn & EXTENT_ESN_MASK);
}
static inline size_t
extent_bsize_get(const extent_t *extent) {
return extent->e_bsize;
}
static inline void *
extent_before_get(const extent_t *extent) {
return (void *)((uintptr_t)extent_base_get(extent) - PAGE);
}
static inline void *
extent_last_get(const extent_t *extent) {
return (void *)((uintptr_t)extent_base_get(extent) +
extent_size_get(extent) - PAGE);
}
static inline void *
extent_past_get(const extent_t *extent) {
return (void *)((uintptr_t)extent_base_get(extent) +
extent_size_get(extent));
}
static inline arena_slab_data_t *
extent_slab_data_get(extent_t *extent) {
assert(extent_slab_get(extent));
return &extent->e_slab_data;
}
static inline const arena_slab_data_t *
extent_slab_data_get_const(const extent_t *extent) {
assert(extent_slab_get(extent));
return &extent->e_slab_data;
}
static inline prof_tctx_t *
extent_prof_tctx_get(const extent_t *extent) {
return (prof_tctx_t *)atomic_load_p(&extent->e_prof_tctx,
ATOMIC_ACQUIRE);
}
static inline void
extent_arena_set(extent_t *extent, arena_t *arena) {
unsigned arena_ind = (arena != NULL) ? arena_ind_get(arena) : ((1U <<
MALLOCX_ARENA_BITS) - 1);
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ARENA_MASK) |
((uint64_t)arena_ind << EXTENT_BITS_ARENA_SHIFT);
}
static inline void
extent_addr_set(extent_t *extent, void *addr) {
extent->e_addr = addr;
}
static inline void
extent_addr_randomize(UNUSED tsdn_t *tsdn, extent_t *extent, size_t alignment) {
assert(extent_base_get(extent) == extent_addr_get(extent));
if (alignment < PAGE) {
unsigned lg_range = LG_PAGE -
lg_floor(CACHELINE_CEILING(alignment));
size_t r;
if (!tsdn_null(tsdn)) {
tsd_t *tsd = tsdn_tsd(tsdn);
r = (size_t)prng_lg_range_u64(
tsd_offset_statep_get(tsd), lg_range);
} else {
r = prng_lg_range_zu(
&extent_arena_get(extent)->offset_state,
lg_range, true);
}
uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE -
lg_range);
extent->e_addr = (void *)((uintptr_t)extent->e_addr +
random_offset);
assert(ALIGNMENT_ADDR2BASE(extent->e_addr, alignment) ==
extent->e_addr);
}
}
static inline void
extent_size_set(extent_t *extent, size_t size) {
assert((size & ~EXTENT_SIZE_MASK) == 0);
extent->e_size_esn = size | (extent->e_size_esn & ~EXTENT_SIZE_MASK);
}
static inline void
extent_esn_set(extent_t *extent, size_t esn) {
extent->e_size_esn = (extent->e_size_esn & ~EXTENT_ESN_MASK) | (esn &
EXTENT_ESN_MASK);
}
static inline void
extent_bsize_set(extent_t *extent, size_t bsize) {
extent->e_bsize = bsize;
}
static inline void
extent_szind_set(extent_t *extent, szind_t szind) {
assert(szind <= NSIZES); /* NSIZES means "invalid". */
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SZIND_MASK) |
((uint64_t)szind << EXTENT_BITS_SZIND_SHIFT);
}
static inline void
extent_nfree_set(extent_t *extent, unsigned nfree) {
assert(extent_slab_get(extent));
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_NFREE_MASK) |
((uint64_t)nfree << EXTENT_BITS_NFREE_SHIFT);
}
static inline void
extent_nfree_inc(extent_t *extent) {
assert(extent_slab_get(extent));
extent->e_bits += ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT);
}
static inline void
extent_nfree_dec(extent_t *extent) {
assert(extent_slab_get(extent));
extent->e_bits -= ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT);
}
static inline void
extent_sn_set(extent_t *extent, size_t sn) {
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SN_MASK) |
((uint64_t)sn << EXTENT_BITS_SN_SHIFT);
}
static inline void
extent_state_set(extent_t *extent, extent_state_t state) {
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_STATE_MASK) |
((uint64_t)state << EXTENT_BITS_STATE_SHIFT);
}
static inline void
extent_zeroed_set(extent_t *extent, bool zeroed) {
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ZEROED_MASK) |
((uint64_t)zeroed << EXTENT_BITS_ZEROED_SHIFT);
}
static inline void
extent_committed_set(extent_t *extent, bool committed) {
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_COMMITTED_MASK) |
((uint64_t)committed << EXTENT_BITS_COMMITTED_SHIFT);
}
static inline void
extent_dumpable_set(extent_t *extent, bool dumpable) {
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_DUMPABLE_MASK) |
((uint64_t)dumpable << EXTENT_BITS_DUMPABLE_SHIFT);
}
static inline void
extent_slab_set(extent_t *extent, bool slab) {
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SLAB_MASK) |
((uint64_t)slab << EXTENT_BITS_SLAB_SHIFT);
}
static inline void
extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx) {
atomic_store_p(&extent->e_prof_tctx, tctx, ATOMIC_RELEASE);
}
static inline void
extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed,
bool committed, bool dumpable) {
assert(addr == PAGE_ADDR2BASE(addr) || !slab);
extent_arena_set(extent, arena);
extent_addr_set(extent, addr);
extent_size_set(extent, size);
extent_slab_set(extent, slab);
extent_szind_set(extent, szind);
extent_sn_set(extent, sn);
extent_state_set(extent, state);
extent_zeroed_set(extent, zeroed);
extent_committed_set(extent, committed);
extent_dumpable_set(extent, dumpable);
ql_elm_new(extent, ql_link);
if (config_prof) {
extent_prof_tctx_set(extent, NULL);
}
}
static inline void
extent_binit(extent_t *extent, void *addr, size_t bsize, size_t sn) {
extent_arena_set(extent, NULL);
extent_addr_set(extent, addr);
extent_bsize_set(extent, bsize);
extent_slab_set(extent, false);
extent_szind_set(extent, NSIZES);
extent_sn_set(extent, sn);
extent_state_set(extent, extent_state_active);
extent_zeroed_set(extent, true);
extent_committed_set(extent, true);
extent_dumpable_set(extent, true);
}
static inline void
extent_list_init(extent_list_t *list) {
ql_new(list);
}
static inline extent_t *
extent_list_first(const extent_list_t *list) {
return ql_first(list);
}
static inline extent_t *
extent_list_last(const extent_list_t *list) {
return ql_last(list, ql_link);
}
static inline void
extent_list_append(extent_list_t *list, extent_t *extent) {
ql_tail_insert(list, extent, ql_link);
}
static inline void
extent_list_prepend(extent_list_t *list, extent_t *extent) {
ql_head_insert(list, extent, ql_link);
}
static inline void
extent_list_replace(extent_list_t *list, extent_t *to_remove,
extent_t *to_insert) {
ql_after_insert(to_remove, to_insert, ql_link);
ql_remove(list, to_remove, ql_link);
}
static inline void
extent_list_remove(extent_list_t *list, extent_t *extent) {
ql_remove(list, extent, ql_link);
}
static inline int
extent_sn_comp(const extent_t *a, const extent_t *b) {
size_t a_sn = extent_sn_get(a);
size_t b_sn = extent_sn_get(b);
return (a_sn > b_sn) - (a_sn < b_sn);
}
static inline int
extent_esn_comp(const extent_t *a, const extent_t *b) {
size_t a_esn = extent_esn_get(a);
size_t b_esn = extent_esn_get(b);
return (a_esn > b_esn) - (a_esn < b_esn);
}
static inline int
extent_ad_comp(const extent_t *a, const extent_t *b) {
uintptr_t a_addr = (uintptr_t)extent_addr_get(a);
uintptr_t b_addr = (uintptr_t)extent_addr_get(b);
return (a_addr > b_addr) - (a_addr < b_addr);
}
static inline int
extent_ead_comp(const extent_t *a, const extent_t *b) {
uintptr_t a_eaddr = (uintptr_t)a;
uintptr_t b_eaddr = (uintptr_t)b;
return (a_eaddr > b_eaddr) - (a_eaddr < b_eaddr);
}
static inline int
extent_snad_comp(const extent_t *a, const extent_t *b) {
int ret;
ret = extent_sn_comp(a, b);
if (ret != 0) {
return ret;
}
ret = extent_ad_comp(a, b);
return ret;
}
static inline int
extent_esnead_comp(const extent_t *a, const extent_t *b) {
int ret;
ret = extent_esn_comp(a, b);
if (ret != 0) {
return ret;
}
ret = extent_ead_comp(a, b);
return ret;
}
#endif /* JEMALLOC_INTERNAL_EXTENT_INLINES_H */
#ifndef JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H
#define JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H
extern bool opt_retain;
void *extent_alloc_mmap(void *new_addr, size_t size, size_t alignment,
bool *zero, bool *commit);
bool extent_dalloc_mmap(void *addr, size_t size);
#endif /* JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H */
#ifndef JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
#define JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/ph.h"
#include "jemalloc/internal/size_classes.h"
typedef enum {
extent_state_active = 0,
extent_state_dirty = 1,
extent_state_muzzy = 2,
extent_state_retained = 3
} extent_state_t;
/* Extent (span of pages). Use accessor functions for e_* fields. */
struct extent_s {
/*
* Bitfield containing several fields:
*
* a: arena_ind
* b: slab
* c: committed
* d: dumpable
* z: zeroed
* t: state
* i: szind
* f: nfree
* n: sn
*
* nnnnnnnn ... nnnnffff ffffffii iiiiiitt zdcbaaaa aaaaaaaa
*
* arena_ind: Arena from which this extent came, or all 1 bits if
* unassociated.
*
* slab: The slab flag indicates whether the extent is used for a slab
* of small regions. This helps differentiate small size classes,
* and it indicates whether interior pointers can be looked up via
* iealloc().
*
* committed: The committed flag indicates whether physical memory is
* committed to the extent, whether explicitly or implicitly
* as on a system that overcommits and satisfies physical
* memory needs on demand via soft page faults.
*
* dumpable: The dumpable flag indicates whether or not we've set the
* memory in question to be dumpable. Note that this
* interacts somewhat subtly with user-specified extent hooks,
* since we don't know if *they* are fiddling with
* dumpability (in which case, we don't want to undo whatever
* they're doing). To deal with this scenario, we:
* - Make dumpable false only for memory allocated with the
* default hooks.
* - Only allow memory to go from non-dumpable to dumpable,
* and only once.
* - Never make the OS call to allow dumping when the
* dumpable bit is already set.
* These three constraints mean that we will never
* accidentally dump user memory that the user meant to set
* nondumpable with their extent hooks.
*
*
* zeroed: The zeroed flag is used by extent recycling code to track
* whether memory is zero-filled.
*
* state: The state flag is an extent_state_t.
*
* szind: The szind flag indicates usable size class index for
* allocations residing in this extent, regardless of whether the
* extent is a slab. Extent size and usable size often differ
* even for non-slabs, either due to sz_large_pad or promotion of
* sampled small regions.
*
* nfree: Number of free regions in slab.
*
* sn: Serial number (potentially non-unique).
*
* Serial numbers may wrap around if !opt_retain, but as long as
* comparison functions fall back on address comparison for equal
* serial numbers, stable (if imperfect) ordering is maintained.
*
* Serial numbers may not be unique even in the absence of
* wrap-around, e.g. when splitting an extent and assigning the same
* serial number to both resulting adjacent extents.
*/
uint64_t e_bits;
#define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT))
#define EXTENT_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS
#define EXTENT_BITS_ARENA_SHIFT 0
#define EXTENT_BITS_ARENA_MASK MASK(EXTENT_BITS_ARENA_WIDTH, EXTENT_BITS_ARENA_SHIFT)
#define EXTENT_BITS_SLAB_WIDTH 1
#define EXTENT_BITS_SLAB_SHIFT (EXTENT_BITS_ARENA_WIDTH + EXTENT_BITS_ARENA_SHIFT)
#define EXTENT_BITS_SLAB_MASK MASK(EXTENT_BITS_SLAB_WIDTH, EXTENT_BITS_SLAB_SHIFT)
#define EXTENT_BITS_COMMITTED_WIDTH 1
#define EXTENT_BITS_COMMITTED_SHIFT (EXTENT_BITS_SLAB_WIDTH + EXTENT_BITS_SLAB_SHIFT)
#define EXTENT_BITS_COMMITTED_MASK MASK(EXTENT_BITS_COMMITTED_WIDTH, EXTENT_BITS_COMMITTED_SHIFT)
#define EXTENT_BITS_DUMPABLE_WIDTH 1
#define EXTENT_BITS_DUMPABLE_SHIFT (EXTENT_BITS_COMMITTED_WIDTH + EXTENT_BITS_COMMITTED_SHIFT)
#define EXTENT_BITS_DUMPABLE_MASK MASK(EXTENT_BITS_DUMPABLE_WIDTH, EXTENT_BITS_DUMPABLE_SHIFT)
#define EXTENT_BITS_ZEROED_WIDTH 1
#define EXTENT_BITS_ZEROED_SHIFT (EXTENT_BITS_DUMPABLE_WIDTH + EXTENT_BITS_DUMPABLE_SHIFT)
#define EXTENT_BITS_ZEROED_MASK MASK(EXTENT_BITS_ZEROED_WIDTH, EXTENT_BITS_ZEROED_SHIFT)
#define EXTENT_BITS_STATE_WIDTH 2
#define EXTENT_BITS_STATE_SHIFT (EXTENT_BITS_ZEROED_WIDTH + EXTENT_BITS_ZEROED_SHIFT)
#define EXTENT_BITS_STATE_MASK MASK(EXTENT_BITS_STATE_WIDTH, EXTENT_BITS_STATE_SHIFT)
#define EXTENT_BITS_SZIND_WIDTH LG_CEIL_NSIZES
#define EXTENT_BITS_SZIND_SHIFT (EXTENT_BITS_STATE_WIDTH + EXTENT_BITS_STATE_SHIFT)
#define EXTENT_BITS_SZIND_MASK MASK(EXTENT_BITS_SZIND_WIDTH, EXTENT_BITS_SZIND_SHIFT)
#define EXTENT_BITS_NFREE_WIDTH (LG_SLAB_MAXREGS + 1)
#define EXTENT_BITS_NFREE_SHIFT (EXTENT_BITS_SZIND_WIDTH + EXTENT_BITS_SZIND_SHIFT)
#define EXTENT_BITS_NFREE_MASK MASK(EXTENT_BITS_NFREE_WIDTH, EXTENT_BITS_NFREE_SHIFT)
#define EXTENT_BITS_SN_SHIFT (EXTENT_BITS_NFREE_WIDTH + EXTENT_BITS_NFREE_SHIFT)
#define EXTENT_BITS_SN_MASK (UINT64_MAX << EXTENT_BITS_SN_SHIFT)
/* Pointer to the extent that this structure is responsible for. */
void *e_addr;
union {
/*
* Extent size and serial number associated with the extent
* structure (different than the serial number for the extent at
* e_addr).
*
* ssssssss [...] ssssssss ssssnnnn nnnnnnnn
*/
size_t e_size_esn;
#define EXTENT_SIZE_MASK ((size_t)~(PAGE-1))
#define EXTENT_ESN_MASK ((size_t)PAGE-1)
/* Base extent size, which may not be a multiple of PAGE. */
size_t e_bsize;
};
/*
* List linkage, used by a variety of lists:
* - bin_t's slabs_full
* - extents_t's LRU
* - stashed dirty extents
* - arena's large allocations
*/
ql_elm(extent_t) ql_link;
/*
* Linkage for per size class sn/address-ordered heaps, and
* for extent_avail
*/
phn(extent_t) ph_link;
union {
/* Small region slab metadata. */
arena_slab_data_t e_slab_data;
/*
* Profile counters, used for large objects. Points to a
* prof_tctx_t.
*/
atomic_p_t e_prof_tctx;
};
};
typedef ql_head(extent_t) extent_list_t;
typedef ph(extent_t) extent_tree_t;
typedef ph(extent_t) extent_heap_t;
/* Quantized collection of extents, with built-in LRU queue. */
struct extents_s {
malloc_mutex_t mtx;
/*
* Quantized per size class heaps of extents.
*
* Synchronization: mtx.
*/
extent_heap_t heaps[NPSIZES+1];
/*
* Bitmap for which set bits correspond to non-empty heaps.
*
* Synchronization: mtx.
*/
bitmap_t bitmap[BITMAP_GROUPS(NPSIZES+1)];
/*
* LRU of all extents in heaps.
*
* Synchronization: mtx.
*/
extent_list_t lru;
/*
* Page sum for all extents in heaps.
*
* The synchronization here is a little tricky. Modifications to npages
* must hold mtx, but reads need not (though, a reader who sees npages
* without holding the mutex can't assume anything about the rest of the
* state of the extents_t).
*/
atomic_zu_t npages;
/* All stored extents must be in the same state. */
extent_state_t state;
/*
* If true, delay coalescing until eviction; otherwise coalesce during
* deallocation.
*/
bool delay_coalesce;
};
#endif /* JEMALLOC_INTERNAL_EXTENT_STRUCTS_H */
#ifndef JEMALLOC_INTERNAL_EXTENT_TYPES_H
#define JEMALLOC_INTERNAL_EXTENT_TYPES_H
typedef struct extent_s extent_t;
typedef struct extents_s extents_t;
#define EXTENT_HOOKS_INITIALIZER NULL
#define EXTENT_GROW_MAX_PIND (NPSIZES - 1)
/*
* When reuse (and split) an active extent, (1U << opt_lg_extent_max_active_fit)
* is the max ratio between the size of the active extent and the new extent.
*/
#define LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT 6
#endif /* JEMALLOC_INTERNAL_EXTENT_TYPES_H */
#ifndef JEMALLOC_INTERNAL_HASH_H
#define JEMALLOC_INTERNAL_HASH_H
#include "jemalloc/internal/assert.h"
/*
* The following hash function is based on MurmurHash3, placed into the public
* domain by Austin Appleby. See http://code.google.com/p/smhasher/ for
* domain by Austin Appleby. See https://github.com/aappleby/smhasher for
* details.
*/
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
uint32_t hash_x86_32(const void *key, int len, uint32_t seed);
void hash_x86_128(const void *key, const int len, uint32_t seed,
uint64_t r_out[2]);
void hash_x64_128(const void *key, const int len, const uint32_t seed,
uint64_t r_out[2]);
void hash(const void *key, size_t len, const uint32_t seed,
size_t r_hash[2]);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_))
/******************************************************************************/
/* Internal implementation. */
JEMALLOC_INLINE uint32_t
hash_rotl_32(uint32_t x, int8_t r)
{
return (x << r) | (x >> (32 - r));
static inline uint32_t
hash_rotl_32(uint32_t x, int8_t r) {
return ((x << r) | (x >> (32 - r)));
}
JEMALLOC_INLINE uint64_t
hash_rotl_64(uint64_t x, int8_t r)
{
return (x << r) | (x >> (64 - r));
static inline uint64_t
hash_rotl_64(uint64_t x, int8_t r) {
return ((x << r) | (x >> (64 - r)));
}
JEMALLOC_INLINE uint32_t
hash_get_block_32(const uint32_t *p, int i)
{
static inline uint32_t
hash_get_block_32(const uint32_t *p, int i) {
/* Handle unaligned read. */
if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) {
uint32_t ret;
return (p[i]);
memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t));
return ret;
}
return p[i];
}
JEMALLOC_INLINE uint64_t
hash_get_block_64(const uint64_t *p, int i)
{
static inline uint64_t
hash_get_block_64(const uint64_t *p, int i) {
/* Handle unaligned read. */
if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) {
uint64_t ret;
return (p[i]);
}
memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t));
return ret;
}
JEMALLOC_INLINE uint32_t
hash_fmix_32(uint32_t h)
{
return p[i];
}
static inline uint32_t
hash_fmix_32(uint32_t h) {
h ^= h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
h *= 0xc2b2ae35;
h ^= h >> 16;
return (h);
return h;
}
JEMALLOC_INLINE uint64_t
hash_fmix_64(uint64_t k)
{
static inline uint64_t
hash_fmix_64(uint64_t k) {
k ^= k >> 33;
k *= QU(0xff51afd7ed558ccdLLU);
k *= KQU(0xff51afd7ed558ccd);
k ^= k >> 33;
k *= QU(0xc4ceb9fe1a85ec53LLU);
k *= KQU(0xc4ceb9fe1a85ec53);
k ^= k >> 33;
return (k);
return k;
}
JEMALLOC_INLINE uint32_t
hash_x86_32(const void *key, int len, uint32_t seed)
{
static inline uint32_t
hash_x86_32(const void *key, int len, uint32_t seed) {
const uint8_t *data = (const uint8_t *) key;
const int nblocks = len / 4;
......@@ -132,13 +116,12 @@ hash_x86_32(const void *key, int len, uint32_t seed)
h1 = hash_fmix_32(h1);
return (h1);
return h1;
}
UNUSED JEMALLOC_INLINE void
UNUSED static inline void
hash_x86_128(const void *key, const int len, uint32_t seed,
uint64_t r_out[2])
{
uint64_t r_out[2]) {
const uint8_t * data = (const uint8_t *) key;
const int nblocks = len / 16;
......@@ -237,18 +220,17 @@ hash_x86_128(const void *key, const int len, uint32_t seed,
r_out[1] = (((uint64_t) h4) << 32) | h3;
}
UNUSED JEMALLOC_INLINE void
UNUSED static inline void
hash_x64_128(const void *key, const int len, const uint32_t seed,
uint64_t r_out[2])
{
uint64_t r_out[2]) {
const uint8_t *data = (const uint8_t *) key;
const int nblocks = len / 16;
uint64_t h1 = seed;
uint64_t h2 = seed;
const uint64_t c1 = QU(0x87c37b91114253d5LLU);
const uint64_t c2 = QU(0x4cf5ad432745937fLLU);
const uint64_t c1 = KQU(0x87c37b91114253d5);
const uint64_t c2 = KQU(0x4cf5ad432745937f);
/* body */
{
......@@ -278,22 +260,22 @@ hash_x64_128(const void *key, const int len, const uint32_t seed,
uint64_t k2 = 0;
switch (len & 15) {
case 15: k2 ^= ((uint64_t)(tail[14])) << 48;
case 14: k2 ^= ((uint64_t)(tail[13])) << 40;
case 13: k2 ^= ((uint64_t)(tail[12])) << 32;
case 12: k2 ^= ((uint64_t)(tail[11])) << 24;
case 11: k2 ^= ((uint64_t)(tail[10])) << 16;
case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8;
case 15: k2 ^= ((uint64_t)(tail[14])) << 48; /* falls through */
case 14: k2 ^= ((uint64_t)(tail[13])) << 40; /* falls through */
case 13: k2 ^= ((uint64_t)(tail[12])) << 32; /* falls through */
case 12: k2 ^= ((uint64_t)(tail[11])) << 24; /* falls through */
case 11: k2 ^= ((uint64_t)(tail[10])) << 16; /* falls through */
case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; /* falls through */
case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0;
k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56;
case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48;
case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40;
case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32;
case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24;
case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16;
case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8;
/* falls through */
case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; /* falls through */
case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; /* falls through */
case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; /* falls through */
case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; /* falls through */
case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; /* falls through */
case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; /* falls through */
case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; /* falls through */
case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0;
k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
}
......@@ -317,19 +299,20 @@ hash_x64_128(const void *key, const int len, const uint32_t seed,
/******************************************************************************/
/* API. */
JEMALLOC_INLINE void
hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2])
{
static inline void
hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) {
assert(len <= INT_MAX); /* Unfortunate implementation limitation. */
#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
hash_x64_128(key, len, seed, (uint64_t *)r_hash);
hash_x64_128(key, (int)len, seed, (uint64_t *)r_hash);
#else
uint64_t hashes[2];
hash_x86_128(key, len, seed, hashes);
r_hash[0] = (size_t)hashes[0];
r_hash[1] = (size_t)hashes[1];
{
uint64_t hashes[2];
hash_x86_128(key, (int)len, seed, hashes);
r_hash[0] = (size_t)hashes[0];
r_hash[1] = (size_t)hashes[1];
}
#endif
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
#endif /* JEMALLOC_INTERNAL_HASH_H */
#ifndef JEMALLOC_INTERNAL_HOOKS_H
#define JEMALLOC_INTERNAL_HOOKS_H
extern JEMALLOC_EXPORT void (*hooks_arena_new_hook)();
extern JEMALLOC_EXPORT void (*hooks_libc_hook)();
#define JEMALLOC_HOOK(fn, hook) ((void)(hook != NULL && (hook(), 0)), fn)
#define open JEMALLOC_HOOK(open, hooks_libc_hook)
#define read JEMALLOC_HOOK(read, hooks_libc_hook)
#define write JEMALLOC_HOOK(write, hooks_libc_hook)
#define readlink JEMALLOC_HOOK(readlink, hooks_libc_hook)
#define close JEMALLOC_HOOK(close, hooks_libc_hook)
#define creat JEMALLOC_HOOK(creat, hooks_libc_hook)
#define secure_getenv JEMALLOC_HOOK(secure_getenv, hooks_libc_hook)
/* Note that this is undef'd and re-define'd in src/prof.c. */
#define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, hooks_libc_hook)
#endif /* JEMALLOC_INTERNAL_HOOKS_H */
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
/* Huge allocation statistics. */
extern uint64_t huge_nmalloc;
extern uint64_t huge_ndalloc;
extern size_t huge_allocated;
/* Protects chunk-related data structures. */
extern malloc_mutex_t huge_mtx;
void *huge_malloc(size_t size, bool zero, dss_prec_t dss_prec);
void *huge_palloc(size_t size, size_t alignment, bool zero,
dss_prec_t dss_prec);
bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
size_t extra);
void *huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero, bool try_tcache_dalloc, dss_prec_t dss_prec);
#ifdef JEMALLOC_JET
typedef void (huge_dalloc_junk_t)(void *, size_t);
extern huge_dalloc_junk_t *huge_dalloc_junk;
#endif
void huge_dalloc(void *ptr, bool unmap);
size_t huge_salloc(const void *ptr);
dss_prec_t huge_dss_prec_get(arena_t *arena);
prof_ctx_t *huge_prof_ctx_get(const void *ptr);
void huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
bool huge_boot(void);
void huge_prefork(void);
void huge_postfork_parent(void);
void huge_postfork_child(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
#ifndef JEMALLOC_INTERNAL_H
#define JEMALLOC_INTERNAL_H
#include <math.h>
#ifdef _WIN32
# include <windows.h>
# define ENOENT ERROR_PATH_NOT_FOUND
# define EINVAL ERROR_BAD_ARGUMENTS
# define EAGAIN ERROR_OUTOFMEMORY
# define EPERM ERROR_WRITE_FAULT
# define EFAULT ERROR_INVALID_ADDRESS
# define ENOMEM ERROR_NOT_ENOUGH_MEMORY
# undef ERANGE
# define ERANGE ERROR_INVALID_DATA
#else
# include <sys/param.h>
# include <sys/mman.h>
# include <sys/syscall.h>
# if !defined(SYS_write) && defined(__NR_write)
# define SYS_write __NR_write
# endif
# include <sys/uio.h>
# include <pthread.h>
# include <errno.h>
#endif
#include <sys/types.h>
#include <limits.h>
#ifndef SIZE_T_MAX
# define SIZE_T_MAX SIZE_MAX
#endif
#include <stdarg.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <stddef.h>
#ifndef offsetof
# define offsetof(type, member) ((size_t)&(((type *)NULL)->member))
#endif
#include <inttypes.h>
#include <string.h>
#include <strings.h>
#include <ctype.h>
#ifdef _MSC_VER
# include <io.h>
typedef intptr_t ssize_t;
# define PATH_MAX 1024
# define STDERR_FILENO 2
# define __func__ __FUNCTION__
/* Disable warnings about deprecated system functions */
# pragma warning(disable: 4996)
#else
# include <unistd.h>
#endif
#include <fcntl.h>
#include "jemalloc_internal_defs.h"
#ifdef JEMALLOC_UTRACE
#include <sys/ktrace.h>
#endif
#ifdef JEMALLOC_VALGRIND
#include <valgrind/valgrind.h>
#include <valgrind/memcheck.h>
#endif
#define JEMALLOC_NO_DEMANGLE
#ifdef JEMALLOC_JET
# define JEMALLOC_N(n) jet_##n
# include "jemalloc/internal/public_namespace.h"
# define JEMALLOC_NO_RENAME
# include "../jemalloc@install_suffix@.h"
# undef JEMALLOC_NO_RENAME
#else
# define JEMALLOC_N(n) @private_namespace@##n
# include "../jemalloc@install_suffix@.h"
#endif
#include "jemalloc/internal/private_namespace.h"
static const bool config_debug =
#ifdef JEMALLOC_DEBUG
true
#else
false
#endif
;
static const bool config_dss =
#ifdef JEMALLOC_DSS
true
#else
false
#endif
;
static const bool config_fill =
#ifdef JEMALLOC_FILL
true
#else
false
#endif
;
static const bool config_lazy_lock =
#ifdef JEMALLOC_LAZY_LOCK
true
#else
false
#endif
;
static const bool config_prof =
#ifdef JEMALLOC_PROF
true
#else
false
#endif
;
static const bool config_prof_libgcc =
#ifdef JEMALLOC_PROF_LIBGCC
true
#else
false
#endif
;
static const bool config_prof_libunwind =
#ifdef JEMALLOC_PROF_LIBUNWIND
true
#else
false
#endif
;
static const bool config_mremap =
#ifdef JEMALLOC_MREMAP
true
#else
false
#endif
;
static const bool config_munmap =
#ifdef JEMALLOC_MUNMAP
true
#else
false
#endif
;
static const bool config_stats =
#ifdef JEMALLOC_STATS
true
#else
false
#endif
;
static const bool config_tcache =
#ifdef JEMALLOC_TCACHE
true
#else
false
#endif
;
static const bool config_tls =
#ifdef JEMALLOC_TLS
true
#else
false
#endif
;
static const bool config_utrace =
#ifdef JEMALLOC_UTRACE
true
#else
false
#endif
;
static const bool config_valgrind =
#ifdef JEMALLOC_VALGRIND
true
#else
false
#endif
;
static const bool config_xmalloc =
#ifdef JEMALLOC_XMALLOC
true
#else
false
#endif
;
static const bool config_ivsalloc =
#ifdef JEMALLOC_IVSALLOC
true
#else
false
#endif
;
#ifdef JEMALLOC_ATOMIC9
#include <machine/atomic.h>
#endif
#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
#include <libkern/OSAtomic.h>
#endif
#ifdef JEMALLOC_ZONE
#include <mach/mach_error.h>
#include <mach/mach_init.h>
#include <mach/vm_map.h>
#include <malloc/malloc.h>
#endif
#define RB_COMPACT
#include "jemalloc/internal/rb.h"
#include "jemalloc/internal/qr.h"
#include "jemalloc/internal/ql.h"
/*
* jemalloc can conceptually be broken into components (arena, tcache, etc.),
* but there are circular dependencies that cannot be broken without
* substantial performance degradation. In order to reduce the effect on
* visual code flow, read the header files in multiple passes, with one of the
* following cpp variables defined during each pass:
*
* JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data
* types.
* JEMALLOC_H_STRUCTS : Data structures.
* JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
* JEMALLOC_H_INLINES : Inline functions.
*/
/******************************************************************************/
#define JEMALLOC_H_TYPES
#include "jemalloc/internal/jemalloc_internal_macros.h"
#define MALLOCX_LG_ALIGN_MASK ((int)0x3f)
#define ALLOCM_LG_ALIGN_MASK ((int)0x3f)
/* Smallest size class to support. */
#define LG_TINY_MIN 3
#define TINY_MIN (1U << LG_TINY_MIN)
/*
* Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size
* classes).
*/
#ifndef LG_QUANTUM
# if (defined(__i386__) || defined(_M_IX86))
# define LG_QUANTUM 4
# endif
# ifdef __ia64__
# define LG_QUANTUM 4
# endif
# ifdef __alpha__
# define LG_QUANTUM 4
# endif
# ifdef __sparc64__
# define LG_QUANTUM 4
# endif
# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
# define LG_QUANTUM 4
# endif
# ifdef __arm__
# define LG_QUANTUM 3
# endif
# ifdef __aarch64__
# define LG_QUANTUM 4
# endif
# ifdef __hppa__
# define LG_QUANTUM 4
# endif
# ifdef __mips__
# define LG_QUANTUM 3
# endif
# ifdef __powerpc__
# define LG_QUANTUM 4
# endif
# ifdef __s390__
# define LG_QUANTUM 4
# endif
# ifdef __SH4__
# define LG_QUANTUM 4
# endif
# ifdef __tile__
# define LG_QUANTUM 4
# endif
# ifndef LG_QUANTUM
# error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS"
# endif
#endif
#define QUANTUM ((size_t)(1U << LG_QUANTUM))
#define QUANTUM_MASK (QUANTUM - 1)
/* Return the smallest quantum multiple that is >= a. */
#define QUANTUM_CEILING(a) \
(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
#define LONG ((size_t)(1U << LG_SIZEOF_LONG))
#define LONG_MASK (LONG - 1)
/* Return the smallest long multiple that is >= a. */
#define LONG_CEILING(a) \
(((a) + LONG_MASK) & ~LONG_MASK)
#define SIZEOF_PTR (1U << LG_SIZEOF_PTR)
#define PTR_MASK (SIZEOF_PTR - 1)
/* Return the smallest (void *) multiple that is >= a. */
#define PTR_CEILING(a) \
(((a) + PTR_MASK) & ~PTR_MASK)
/*
* Maximum size of L1 cache line. This is used to avoid cache line aliasing.
* In addition, this controls the spacing of cacheline-spaced size classes.
*
* CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
* only handle raw constants.
*/
#define LG_CACHELINE 6
#define CACHELINE 64
#define CACHELINE_MASK (CACHELINE - 1)
/* Return the smallest cacheline multiple that is >= s. */
#define CACHELINE_CEILING(s) \
(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
/* Page size. STATIC_PAGE_SHIFT is determined by the configure script. */
#ifdef PAGE_MASK
# undef PAGE_MASK
#endif
#define LG_PAGE STATIC_PAGE_SHIFT
#define PAGE ((size_t)(1U << STATIC_PAGE_SHIFT))
#define PAGE_MASK ((size_t)(PAGE - 1))
/* Return the smallest pagesize multiple that is >= s. */
#define PAGE_CEILING(s) \
(((s) + PAGE_MASK) & ~PAGE_MASK)
/* Return the nearest aligned address at or below a. */
#define ALIGNMENT_ADDR2BASE(a, alignment) \
((void *)((uintptr_t)(a) & (-(alignment))))
/* Return the offset between a and the nearest aligned address at or below a. */
#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
((size_t)((uintptr_t)(a) & (alignment - 1)))
/* Return the smallest alignment multiple that is >= s. */
#define ALIGNMENT_CEILING(s, alignment) \
(((s) + (alignment - 1)) & (-(alignment)))
/* Declare a variable length array */
#if __STDC_VERSION__ < 199901L
# ifdef _MSC_VER
# include <malloc.h>
# define alloca _alloca
# else
# ifdef JEMALLOC_HAS_ALLOCA_H
# include <alloca.h>
# else
# include <stdlib.h>
# endif
# endif
# define VARIABLE_ARRAY(type, name, count) \
type *name = alloca(sizeof(type) * count)
#else
# define VARIABLE_ARRAY(type, name, count) type name[count]
#endif
#ifdef JEMALLOC_VALGRIND
/*
* The JEMALLOC_VALGRIND_*() macros must be macros rather than functions
* so that when Valgrind reports errors, there are no extra stack frames
* in the backtraces.
*
* The size that is reported to valgrind must be consistent through a chain of
* malloc..realloc..realloc calls. Request size isn't recorded anywhere in
* jemalloc, so it is critical that all callers of these macros provide usize
* rather than request size. As a result, buffer overflow detection is
* technically weakened for the standard API, though it is generally accepted
* practice to consider any extra bytes reported by malloc_usable_size() as
* usable space.
*/
#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do { \
if (config_valgrind && opt_valgrind && cond) \
VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero); \
} while (0)
#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \
old_rzsize, zero) do { \
if (config_valgrind && opt_valgrind) { \
size_t rzsize = p2rz(ptr); \
\
if (ptr == old_ptr) { \
VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \
usize, rzsize); \
if (zero && old_usize < usize) { \
VALGRIND_MAKE_MEM_DEFINED( \
(void *)((uintptr_t)ptr + \
old_usize), usize - old_usize); \
} \
} else { \
if (old_ptr != NULL) { \
VALGRIND_FREELIKE_BLOCK(old_ptr, \
old_rzsize); \
} \
if (ptr != NULL) { \
size_t copy_size = (old_usize < usize) \
? old_usize : usize; \
size_t tail_size = usize - copy_size; \
VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \
rzsize, false); \
if (copy_size > 0) { \
VALGRIND_MAKE_MEM_DEFINED(ptr, \
copy_size); \
} \
if (zero && tail_size > 0) { \
VALGRIND_MAKE_MEM_DEFINED( \
(void *)((uintptr_t)ptr + \
copy_size), tail_size); \
} \
} \
} \
} \
} while (0)
#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \
if (config_valgrind && opt_valgrind) \
VALGRIND_FREELIKE_BLOCK(ptr, rzsize); \
} while (0)
#else
#define RUNNING_ON_VALGRIND ((unsigned)0)
#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
do {} while (0)
#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \
do {} while (0)
#define VALGRIND_FREELIKE_BLOCK(addr, rzB) do {} while (0)
#define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr, _qzz_len) do {} while (0)
#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len) do {} while (0)
#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len) do {} while (0)
#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {} while (0)
#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \
old_rzsize, zero) do {} while (0)
#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0)
#endif
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/arena.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/base.h"
#include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/quarantine.h"
#include "jemalloc/internal/prof.h"
#undef JEMALLOC_H_TYPES
/******************************************************************************/
#define JEMALLOC_H_STRUCTS
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/arena.h"
#include "jemalloc/internal/base.h"
#include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/quarantine.h"
#include "jemalloc/internal/prof.h"
typedef struct {
uint64_t allocated;
uint64_t deallocated;
} thread_allocated_t;
/*
* The JEMALLOC_ARG_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro
* argument.
*/
#define THREAD_ALLOCATED_INITIALIZER JEMALLOC_ARG_CONCAT({0, 0})
#undef JEMALLOC_H_STRUCTS
/******************************************************************************/
#define JEMALLOC_H_EXTERNS
extern bool opt_abort;
extern bool opt_junk;
extern size_t opt_quarantine;
extern bool opt_redzone;
extern bool opt_utrace;
extern bool opt_valgrind;
extern bool opt_xmalloc;
extern bool opt_zero;
extern size_t opt_narenas;
/* Number of CPUs. */
extern unsigned ncpus;
/* Protects arenas initialization (arenas, arenas_total). */
extern malloc_mutex_t arenas_lock;
/*
* Arenas that are used to service external requests. Not all elements of the
* arenas array are necessarily used; arenas are created lazily as needed.
*
* arenas[0..narenas_auto) are used for automatic multiplexing of threads and
* arenas. arenas[narenas_auto..narenas_total) are only used if the application
* takes some action to create them and allocate from them.
*/
extern arena_t **arenas;
extern unsigned narenas_total;
extern unsigned narenas_auto; /* Read-only after initialization. */
arena_t *arenas_extend(unsigned ind);
void arenas_cleanup(void *arg);
arena_t *choose_arena_hard(void);
void jemalloc_prefork(void);
void jemalloc_postfork_parent(void);
void jemalloc_postfork_child(void);
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/arena.h"
#include "jemalloc/internal/base.h"
#include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/quarantine.h"
#include "jemalloc/internal/prof.h"
#undef JEMALLOC_H_EXTERNS
/******************************************************************************/
#define JEMALLOC_H_INLINES
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/base.h"
#include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h"
#ifndef JEMALLOC_ENABLE_INLINE
malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *)
size_t s2u(size_t size);
size_t sa2u(size_t size, size_t alignment);
unsigned narenas_total_get(void);
arena_t *choose_arena(arena_t *arena);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
/*
* Map of pthread_self() --> arenas[???], used for selecting an arena to use
* for allocations.
*/
malloc_tsd_externs(arenas, arena_t *)
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, arenas, arena_t *, NULL,
arenas_cleanup)
/*
* Compute usable size that would result from allocating an object with the
* specified size.
*/
JEMALLOC_ALWAYS_INLINE size_t
s2u(size_t size)
{
if (size <= SMALL_MAXCLASS)
return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size);
if (size <= arena_maxclass)
return (PAGE_CEILING(size));
return (CHUNK_CEILING(size));
}
/*
* Compute usable size that would result from allocating an object with the
* specified size and alignment.
*/
JEMALLOC_ALWAYS_INLINE size_t
sa2u(size_t size, size_t alignment)
{
size_t usize;
assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
/*
* Round size up to the nearest multiple of alignment.
*
* This done, we can take advantage of the fact that for each small
* size class, every object is aligned at the smallest power of two
* that is non-zero in the base two representation of the size. For
* example:
*
* Size | Base 2 | Minimum alignment
* -----+----------+------------------
* 96 | 1100000 | 32
* 144 | 10100000 | 32
* 192 | 11000000 | 64
*/
usize = ALIGNMENT_CEILING(size, alignment);
/*
* (usize < size) protects against the combination of maximal
* alignment and size greater than maximal alignment.
*/
if (usize < size) {
/* size_t overflow. */
return (0);
}
if (usize <= arena_maxclass && alignment <= PAGE) {
if (usize <= SMALL_MAXCLASS)
return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size);
return (PAGE_CEILING(usize));
} else {
size_t run_size;
/*
* We can't achieve subpage alignment, so round up alignment
* permanently; it makes later calculations simpler.
*/
alignment = PAGE_CEILING(alignment);
usize = PAGE_CEILING(size);
/*
* (usize < size) protects against very large sizes within
* PAGE of SIZE_T_MAX.
*
* (usize + alignment < usize) protects against the
* combination of maximal alignment and usize large enough
* to cause overflow. This is similar to the first overflow
* check above, but it needs to be repeated due to the new
* usize value, which may now be *equal* to maximal
* alignment, whereas before we only detected overflow if the
* original size was *greater* than maximal alignment.
*/
if (usize < size || usize + alignment < usize) {
/* size_t overflow. */
return (0);
}
/*
* Calculate the size of the over-size run that arena_palloc()
* would need to allocate in order to guarantee the alignment.
* If the run wouldn't fit within a chunk, round up to a huge
* allocation size.
*/
run_size = usize + alignment - PAGE;
if (run_size <= arena_maxclass)
return (PAGE_CEILING(usize));
return (CHUNK_CEILING(usize));
}
}
JEMALLOC_INLINE unsigned
narenas_total_get(void)
{
unsigned narenas;
malloc_mutex_lock(&arenas_lock);
narenas = narenas_total;
malloc_mutex_unlock(&arenas_lock);
return (narenas);
}
/* Choose an arena based on a per-thread value. */
JEMALLOC_INLINE arena_t *
choose_arena(arena_t *arena)
{
arena_t *ret;
if (arena != NULL)
return (arena);
if ((ret = *arenas_tsd_get()) == NULL) {
ret = choose_arena_hard();
assert(ret != NULL);
}
return (ret);
}
#endif
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/rtree.h"
/*
* Include arena.h twice in order to resolve circular dependencies with
* tcache.h.
*/
#define JEMALLOC_ARENA_INLINE_A
#include "jemalloc/internal/arena.h"
#undef JEMALLOC_ARENA_INLINE_A
#include "jemalloc/internal/tcache.h"
#define JEMALLOC_ARENA_INLINE_B
#include "jemalloc/internal/arena.h"
#undef JEMALLOC_ARENA_INLINE_B
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/quarantine.h"
#ifndef JEMALLOC_ENABLE_INLINE
void *imalloct(size_t size, bool try_tcache, arena_t *arena);
void *imalloc(size_t size);
void *icalloct(size_t size, bool try_tcache, arena_t *arena);
void *icalloc(size_t size);
void *ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache,
arena_t *arena);
void *ipalloc(size_t usize, size_t alignment, bool zero);
size_t isalloc(const void *ptr, bool demote);
size_t ivsalloc(const void *ptr, bool demote);
size_t u2rz(size_t usize);
size_t p2rz(const void *ptr);
void idalloct(void *ptr, bool try_tcache);
void idalloc(void *ptr);
void iqalloct(void *ptr, bool try_tcache);
void iqalloc(void *ptr);
void *iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
arena_t *arena);
void *iralloct(void *ptr, size_t size, size_t extra, size_t alignment,
bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena);
void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
bool zero);
bool ixalloc(void *ptr, size_t size, size_t extra, size_t alignment,
bool zero);
malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
JEMALLOC_ALWAYS_INLINE void *
imalloct(size_t size, bool try_tcache, arena_t *arena)
{
assert(size != 0);
if (size <= arena_maxclass)
return (arena_malloc(arena, size, false, try_tcache));
else
return (huge_malloc(size, false, huge_dss_prec_get(arena)));
}
JEMALLOC_ALWAYS_INLINE void *
imalloc(size_t size)
{
return (imalloct(size, true, NULL));
}
JEMALLOC_ALWAYS_INLINE void *
icalloct(size_t size, bool try_tcache, arena_t *arena)
{
if (size <= arena_maxclass)
return (arena_malloc(arena, size, true, try_tcache));
else
return (huge_malloc(size, true, huge_dss_prec_get(arena)));
}
JEMALLOC_ALWAYS_INLINE void *
icalloc(size_t size)
{
return (icalloct(size, true, NULL));
}
JEMALLOC_ALWAYS_INLINE void *
ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache,
arena_t *arena)
{
void *ret;
assert(usize != 0);
assert(usize == sa2u(usize, alignment));
if (usize <= arena_maxclass && alignment <= PAGE)
ret = arena_malloc(arena, usize, zero, try_tcache);
else {
if (usize <= arena_maxclass) {
ret = arena_palloc(choose_arena(arena), usize,
alignment, zero);
} else if (alignment <= chunksize)
ret = huge_malloc(usize, zero, huge_dss_prec_get(arena));
else
ret = huge_palloc(usize, alignment, zero, huge_dss_prec_get(arena));
}
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
ipalloc(size_t usize, size_t alignment, bool zero)
{
return (ipalloct(usize, alignment, zero, true, NULL));
}
/*
* Typical usage:
* void *ptr = [...]
* size_t sz = isalloc(ptr, config_prof);
*/
JEMALLOC_ALWAYS_INLINE size_t
isalloc(const void *ptr, bool demote)
{
size_t ret;
arena_chunk_t *chunk;
assert(ptr != NULL);
/* Demotion only makes sense if config_prof is true. */
assert(config_prof || demote == false);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr)
ret = arena_salloc(ptr, demote);
else
ret = huge_salloc(ptr);
return (ret);
}
JEMALLOC_ALWAYS_INLINE size_t
ivsalloc(const void *ptr, bool demote)
{
/* Return 0 if ptr is not within a chunk managed by jemalloc. */
if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == 0)
return (0);
return (isalloc(ptr, demote));
}
JEMALLOC_INLINE size_t
u2rz(size_t usize)
{
size_t ret;
if (usize <= SMALL_MAXCLASS) {
size_t binind = SMALL_SIZE2BIN(usize);
ret = arena_bin_info[binind].redzone_size;
} else
ret = 0;
return (ret);
}
JEMALLOC_INLINE size_t
p2rz(const void *ptr)
{
size_t usize = isalloc(ptr, false);
return (u2rz(usize));
}
JEMALLOC_ALWAYS_INLINE void
idalloct(void *ptr, bool try_tcache)
{
arena_chunk_t *chunk;
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr)
arena_dalloc(chunk->arena, chunk, ptr, try_tcache);
else
huge_dalloc(ptr, true);
}
JEMALLOC_ALWAYS_INLINE void
idalloc(void *ptr)
{
idalloct(ptr, true);
}
JEMALLOC_ALWAYS_INLINE void
iqalloct(void *ptr, bool try_tcache)
{
if (config_fill && opt_quarantine)
quarantine(ptr);
else
idalloct(ptr, try_tcache);
}
JEMALLOC_ALWAYS_INLINE void
iqalloc(void *ptr)
{
iqalloct(ptr, true);
}
JEMALLOC_ALWAYS_INLINE void *
iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
arena_t *arena)
{
void *p;
size_t usize, copysize;
usize = sa2u(size + extra, alignment);
if (usize == 0)
return (NULL);
p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
if (p == NULL) {
if (extra == 0)
return (NULL);
/* Try again, without extra this time. */
usize = sa2u(size, alignment);
if (usize == 0)
return (NULL);
p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
if (p == NULL)
return (NULL);
}
/*
* Copy at most size bytes (not size+extra), since the caller has no
* expectation that the extra bytes will be reliably preserved.
*/
copysize = (size < oldsize) ? size : oldsize;
memcpy(p, ptr, copysize);
iqalloct(ptr, try_tcache_dalloc);
return (p);
}
JEMALLOC_ALWAYS_INLINE void *
iralloct(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena)
{
size_t oldsize;
assert(ptr != NULL);
assert(size != 0);
oldsize = isalloc(ptr, config_prof);
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
!= 0) {
/*
* Existing object alignment is inadequate; allocate new space
* and copy.
*/
return (iralloct_realign(ptr, oldsize, size, extra, alignment,
zero, try_tcache_alloc, try_tcache_dalloc, arena));
}
if (size + extra <= arena_maxclass) {
return (arena_ralloc(arena, ptr, oldsize, size, extra,
alignment, zero, try_tcache_alloc,
try_tcache_dalloc));
} else {
return (huge_ralloc(ptr, oldsize, size, extra,
alignment, zero, try_tcache_dalloc, huge_dss_prec_get(arena)));
}
}
JEMALLOC_ALWAYS_INLINE void *
iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero)
{
return (iralloct(ptr, size, extra, alignment, zero, true, true, NULL));
}
JEMALLOC_ALWAYS_INLINE bool
ixalloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero)
{
size_t oldsize;
assert(ptr != NULL);
assert(size != 0);
oldsize = isalloc(ptr, config_prof);
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
!= 0) {
/* Existing object alignment is inadequate. */
return (true);
}
if (size <= arena_maxclass)
return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero));
else
return (huge_ralloc_no_move(ptr, oldsize, size, extra));
}
malloc_tsd_externs(thread_allocated, thread_allocated_t)
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, thread_allocated, thread_allocated_t,
THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup)
#endif
#include "jemalloc/internal/prof.h"
#undef JEMALLOC_H_INLINES
/******************************************************************************/
#endif /* JEMALLOC_INTERNAL_H */
#ifndef JEMALLOC_INTERNAL_DECLS_H
#define JEMALLOC_INTERNAL_DECLS_H
#include <math.h>
#ifdef _WIN32
# include <windows.h>
# include "msvc_compat/windows_extra.h"
# ifdef _WIN64
# if LG_VADDR <= 32
# error Generate the headers using x64 vcargs
# endif
# else
# if LG_VADDR > 32
# undef LG_VADDR
# define LG_VADDR 32
# endif
# endif
#else
# include <sys/param.h>
# include <sys/mman.h>
# if !defined(__pnacl__) && !defined(__native_client__)
# include <sys/syscall.h>
# if !defined(SYS_write) && defined(__NR_write)
# define SYS_write __NR_write
# endif
# if defined(SYS_open) && defined(__aarch64__)
/* Android headers may define SYS_open to __NR_open even though
* __NR_open may not exist on AArch64 (superseded by __NR_openat). */
# undef SYS_open
# endif
# include <sys/uio.h>
# endif
# include <pthread.h>
# include <signal.h>
# ifdef JEMALLOC_OS_UNFAIR_LOCK
# include <os/lock.h>
# endif
# ifdef JEMALLOC_GLIBC_MALLOC_HOOK
# include <sched.h>
# endif
# include <errno.h>
# include <sys/time.h>
# include <time.h>
# ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
# include <mach/mach_time.h>
# endif
#endif
#include <sys/types.h>
#include <limits.h>
#ifndef SIZE_T_MAX
# define SIZE_T_MAX SIZE_MAX
#endif
#ifndef SSIZE_MAX
# define SSIZE_MAX ((ssize_t)(SIZE_T_MAX >> 1))
#endif
#include <stdarg.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <stddef.h>
#ifndef offsetof
# define offsetof(type, member) ((size_t)&(((type *)NULL)->member))
#endif
#include <string.h>
#include <strings.h>
#include <ctype.h>
#ifdef _MSC_VER
# include <io.h>
typedef intptr_t ssize_t;
# define PATH_MAX 1024
# define STDERR_FILENO 2
# define __func__ __FUNCTION__
# ifdef JEMALLOC_HAS_RESTRICT
# define restrict __restrict
# endif
/* Disable warnings about deprecated system functions. */
# pragma warning(disable: 4996)
#if _MSC_VER < 1800
static int
isblank(int c) {
return (c == '\t' || c == ' ');
}
#endif
#else
# include <unistd.h>
#endif
#include <fcntl.h>
#endif /* JEMALLOC_INTERNAL_H */
#ifndef JEMALLOC_INTERNAL_DEFS_H_
#define JEMALLOC_INTERNAL_DEFS_H_
#define JEMALLOC_INTERNAL_DEFS_H_
/*
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
* public APIs to be prefixed. This makes it possible, with some care, to use
......@@ -8,6 +8,18 @@
#undef JEMALLOC_PREFIX
#undef JEMALLOC_CPREFIX
/*
* Define overrides for non-standard allocator-related functions if they are
* present on the system.
*/
#undef JEMALLOC_OVERRIDE___LIBC_CALLOC
#undef JEMALLOC_OVERRIDE___LIBC_FREE
#undef JEMALLOC_OVERRIDE___LIBC_MALLOC
#undef JEMALLOC_OVERRIDE___LIBC_MEMALIGN
#undef JEMALLOC_OVERRIDE___LIBC_REALLOC
#undef JEMALLOC_OVERRIDE___LIBC_VALLOC
#undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN
/*
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
* For shared libraries, symbol visibility mechanisms prevent these symbols
......@@ -21,21 +33,30 @@
* order to yield to another virtual CPU.
*/
#undef CPU_SPINWAIT
/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */
#undef JEMALLOC_ATOMIC9
/* 1 if CPU_SPINWAIT is defined, 0 otherwise. */
#undef HAVE_CPU_SPINWAIT
/*
* Defined if OSAtomic*() functions are available, as provided by Darwin, and
* documented in the atomic(3) manual page.
* Number of significant bits in virtual addresses. This may be less than the
* total number of bits in a pointer, e.g. on x64, for which the uppermost 16
* bits are the same as bit 47.
*/
#undef JEMALLOC_OSATOMIC
#undef LG_VADDR
/* Defined if C11 atomics are available. */
#undef JEMALLOC_C11_ATOMICS
/* Defined if GCC __atomic atomics are available. */
#undef JEMALLOC_GCC_ATOMIC_ATOMICS
/* Defined if GCC __sync atomics are available. */
#undef JEMALLOC_GCC_SYNC_ATOMICS
/*
* Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and
* __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the
* functions are defined in libgcc instead of being inlines)
* functions are defined in libgcc instead of being inlines).
*/
#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4
......@@ -43,16 +64,60 @@
* Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and
* __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the
* functions are defined in libgcc instead of being inlines)
* functions are defined in libgcc instead of being inlines).
*/
#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8
/*
* Defined if __builtin_clz() and __builtin_clzl() are available.
*/
#undef JEMALLOC_HAVE_BUILTIN_CLZ
/*
* Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
*/
#undef JEMALLOC_OS_UNFAIR_LOCK
/*
* Defined if OSSpin*() functions are available, as provided by Darwin, and
* documented in the spinlock(3) manual page.
*/
#undef JEMALLOC_OSSPIN
/* Defined if syscall(2) is usable. */
#undef JEMALLOC_USE_SYSCALL
/*
* Defined if secure_getenv(3) is available.
*/
#undef JEMALLOC_HAVE_SECURE_GETENV
/*
* Defined if issetugid(2) is available.
*/
#undef JEMALLOC_HAVE_ISSETUGID
/* Defined if pthread_atfork(3) is available. */
#undef JEMALLOC_HAVE_PTHREAD_ATFORK
/* Defined if pthread_setname_np(3) is available. */
#undef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
/*
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
*/
#undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
/*
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
*/
#undef JEMALLOC_HAVE_CLOCK_MONOTONIC
/*
* Defined if mach_absolute_time() is available.
*/
#undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
/*
* Defined if _malloc_thread_cleanup() exists. At least in the case of
* FreeBSD, pthread_key_create() allocates, which if used during malloc
......@@ -76,18 +141,9 @@
*/
#undef JEMALLOC_MUTEX_INIT_CB
/* Defined if sbrk() is supported. */
#undef JEMALLOC_HAVE_SBRK
/* Non-empty if the tls_model attribute is supported. */
#undef JEMALLOC_TLS_MODEL
/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */
#undef JEMALLOC_CC_SILENCE
/* JEMALLOC_CODE_COVERAGE enables test code coverage analysis. */
#undef JEMALLOC_CODE_COVERAGE
/*
* JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
* inline functions.
......@@ -110,81 +166,140 @@
#undef JEMALLOC_PROF_GCC
/*
* JEMALLOC_TCACHE enables a thread-specific caching layer for small objects.
* This makes it possible to allocate/deallocate objects without any locking
* when the cache is in the steady state.
*/
#undef JEMALLOC_TCACHE
/*
* JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage
* JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
* segment (DSS).
*/
#undef JEMALLOC_DSS
/* Support memory filling (junk/zero/quarantine/redzone). */
/* Support memory filling (junk/zero). */
#undef JEMALLOC_FILL
/* Support utrace(2)-based tracing. */
#undef JEMALLOC_UTRACE
/* Support Valgrind. */
#undef JEMALLOC_VALGRIND
/* Support optional abort() on OOM. */
#undef JEMALLOC_XMALLOC
/* Support lazy locking (avoid locking unless a second thread is launched). */
#undef JEMALLOC_LAZY_LOCK
/* One page is 2^STATIC_PAGE_SHIFT bytes. */
#undef STATIC_PAGE_SHIFT
/*
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
* classes).
*/
#undef LG_QUANTUM
/* One page is 2^LG_PAGE bytes. */
#undef LG_PAGE
/*
* One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
* system does not explicitly support huge pages; system calls that require
* explicit huge page support are separately configured.
*/
#undef LG_HUGEPAGE
/*
* If defined, use munmap() to unmap freed chunks, rather than storing them for
* later reuse. This is disabled by default on Linux because common sequences
* of mmap()/munmap() calls will cause virtual memory map holes.
* If defined, adjacent virtual memory mappings with identical attributes
* automatically coalesce, and they fragment when changes are made to subranges.
* This is the normal order of things for mmap()/munmap(), but on Windows
* VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
* mappings do *not* coalesce/fragment.
*/
#undef JEMALLOC_MUNMAP
#undef JEMALLOC_MAPS_COALESCE
/*
* If defined, use mremap(...MREMAP_FIXED...) for huge realloc(). This is
* disabled by default because it is Linux-specific and it will cause virtual
* memory map holes, much like munmap(2) does.
* If defined, retain memory for later reuse by default rather than using e.g.
* munmap() to unmap freed extents. This is enabled on 64-bit Linux because
* common sequences of mmap()/munmap() calls will cause virtual memory map
* holes.
*/
#undef JEMALLOC_MREMAP
#undef JEMALLOC_RETAIN
/* TLS is used to map arenas and magazine caches to threads. */
#undef JEMALLOC_TLS
/*
* JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside
* within jemalloc-owned chunks before dereferencing them.
* Used to mark unreachable code to quiet "end of non-void" compiler warnings.
* Don't use this directly; instead use unreachable() from util.h
*/
#undef JEMALLOC_INTERNAL_UNREACHABLE
/*
* ffs*() functions to use for bitmapping. Don't use these directly; instead,
* use ffs_*() from util.h.
*/
#undef JEMALLOC_IVSALLOC
#undef JEMALLOC_INTERNAL_FFSLL
#undef JEMALLOC_INTERNAL_FFSL
#undef JEMALLOC_INTERNAL_FFS
/*
* If defined, explicitly attempt to more uniformly distribute large allocation
* pointer alignments across all cache indices.
*/
#undef JEMALLOC_CACHE_OBLIVIOUS
/*
* If defined, enable logging facilities. We make this a configure option to
* avoid taking extra branches everywhere.
*/
#undef JEMALLOC_LOG
/*
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
*/
#undef JEMALLOC_ZONE
#undef JEMALLOC_ZONE_VERSION
/*
* Methods for determining whether the OS overcommits.
* JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
* /proc/sys/vm.overcommit_memory file.
* JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
*/
#undef JEMALLOC_SYSCTL_VM_OVERCOMMIT
#undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
/* Defined if madvise(2) is available. */
#undef JEMALLOC_HAVE_MADVISE
/*
* Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
* arguments to madvise(2).
*/
#undef JEMALLOC_HAVE_MADVISE_HUGE
/*
* Methods for purging unused pages differ between operating systems.
*
* madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages,
* madvise(..., MADV_FREE) : This marks pages as being unused, such that they
* will be discarded rather than swapped out.
* madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is
* defined, this immediately discards pages,
* such that new pages will be demand-zeroed if
* the address region is later touched.
* madvise(..., MADV_FREE) : On FreeBSD and Darwin, this marks pages as being
* unused, such that they will be discarded rather
* than swapped out.
* the address region is later touched;
* otherwise this behaves similarly to
* MADV_FREE, though typically with higher
* system overhead.
*/
#undef JEMALLOC_PURGE_MADVISE_DONTNEED
#undef JEMALLOC_PURGE_MADVISE_FREE
#undef JEMALLOC_PURGE_MADVISE_DONTNEED
#undef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
/* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */
#undef JEMALLOC_DEFINE_MADVISE_FREE
/*
* Define if operating system has alloca.h header.
* Defined if MADV_DO[NT]DUMP is supported as an argument to madvise.
*/
#undef JEMALLOC_MADVISE_DONTDUMP
/*
* Defined if transparent huge pages (THPs) are supported via the
* MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
*/
#undef JEMALLOC_THP
/* Define if operating system has alloca.h header. */
#undef JEMALLOC_HAS_ALLOCA_H
/* C99 restrict keyword supported. */
......@@ -199,7 +314,53 @@
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
#undef LG_SIZEOF_LONG
/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
#undef LG_SIZEOF_LONG_LONG
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
#undef LG_SIZEOF_INTMAX_T
/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
#undef JEMALLOC_GLIBC_MALLOC_HOOK
/* glibc memalign hook. */
#undef JEMALLOC_GLIBC_MEMALIGN_HOOK
/* pthread support */
#undef JEMALLOC_HAVE_PTHREAD
/* dlsym() support */
#undef JEMALLOC_HAVE_DLSYM
/* Adaptive mutex support in pthreads. */
#undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
/* GNU specific sched_getcpu support */
#undef JEMALLOC_HAVE_SCHED_GETCPU
/* GNU specific sched_setaffinity support */
#undef JEMALLOC_HAVE_SCHED_SETAFFINITY
/*
* If defined, all the features necessary for background threads are present.
*/
#undef JEMALLOC_BACKGROUND_THREAD
/*
* If defined, jemalloc symbols are not exported (doesn't work when
* JEMALLOC_PREFIX is not defined).
*/
#undef JEMALLOC_EXPORT
/* config.malloc_conf options string. */
#undef JEMALLOC_CONFIG_MALLOC_CONF
/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
#undef JEMALLOC_IS_MALLOC
/*
* Defined if strerror_r returns char * if _GNU_SOURCE is defined.
*/
#undef JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
#ifndef JEMALLOC_INTERNAL_EXTERNS_H
#define JEMALLOC_INTERNAL_EXTERNS_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/tsd_types.h"
/* TSD checks this to set thread local slow state accordingly. */
extern bool malloc_slow;
/* Run-time options. */
extern bool opt_abort;
extern bool opt_abort_conf;
extern const char *opt_junk;
extern bool opt_junk_alloc;
extern bool opt_junk_free;
extern bool opt_utrace;
extern bool opt_xmalloc;
extern bool opt_zero;
extern unsigned opt_narenas;
/* Number of CPUs. */
extern unsigned ncpus;
/* Number of arenas used for automatic multiplexing of threads and arenas. */
extern unsigned narenas_auto;
/*
* Arenas that are used to service external requests. Not all elements of the
* arenas array are necessarily used; arenas are created lazily as needed.
*/
extern atomic_p_t arenas[];
void *a0malloc(size_t size);
void a0dalloc(void *ptr);
void *bootstrap_malloc(size_t size);
void *bootstrap_calloc(size_t num, size_t size);
void bootstrap_free(void *ptr);
void arena_set(unsigned ind, arena_t *arena);
unsigned narenas_total_get(void);
arena_t *arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind);
arena_t *arena_choose_hard(tsd_t *tsd, bool internal);
void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
void iarena_cleanup(tsd_t *tsd);
void arena_cleanup(tsd_t *tsd);
void arenas_tdata_cleanup(tsd_t *tsd);
void jemalloc_prefork(void);
void jemalloc_postfork_parent(void);
void jemalloc_postfork_child(void);
bool malloc_initialized(void);
#endif /* JEMALLOC_INTERNAL_EXTERNS_H */
#ifndef JEMALLOC_INTERNAL_INCLUDES_H
#define JEMALLOC_INTERNAL_INCLUDES_H
/*
* jemalloc can conceptually be broken into components (arena, tcache, etc.),
* but there are circular dependencies that cannot be broken without
* substantial performance degradation.
*
* Historically, we dealt with this by each header into four sections (types,
* structs, externs, and inlines), and included each header file multiple times
* in this file, picking out the portion we want on each pass using the
* following #defines:
* JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data
* types.
* JEMALLOC_H_STRUCTS : Data structures.
* JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
* JEMALLOC_H_INLINES : Inline functions.
*
* We're moving toward a world in which the dependencies are explicit; each file
* will #include the headers it depends on (rather than relying on them being
* implicitly available via this file including every header file in the
* project).
*
* We're now in an intermediate state: we've broken up the header files to avoid
* having to include each one multiple times, but have not yet moved the
* dependency information into the header files (i.e. we still rely on the
* ordering in this file to ensure all a header's dependencies are available in
* its translation unit). Each component is now broken up into multiple header
* files, corresponding to the sections above (e.g. instead of "foo.h", we now
* have "foo_types.h", "foo_structs.h", "foo_externs.h", "foo_inlines.h").
*
* Those files which have been converted to explicitly include their
* inter-component dependencies are now in the initial HERMETIC HEADERS
* section. All headers may still rely on jemalloc_preamble.h (which, by fiat,
* must be included first in every translation unit) for system headers and
* global jemalloc definitions, however.
*/
/******************************************************************************/
/* TYPES */
/******************************************************************************/
#include "jemalloc/internal/extent_types.h"
#include "jemalloc/internal/base_types.h"
#include "jemalloc/internal/arena_types.h"
#include "jemalloc/internal/tcache_types.h"
#include "jemalloc/internal/prof_types.h"
/******************************************************************************/
/* STRUCTS */
/******************************************************************************/
#include "jemalloc/internal/arena_structs_a.h"
#include "jemalloc/internal/extent_structs.h"
#include "jemalloc/internal/base_structs.h"
#include "jemalloc/internal/prof_structs.h"
#include "jemalloc/internal/arena_structs_b.h"
#include "jemalloc/internal/tcache_structs.h"
#include "jemalloc/internal/background_thread_structs.h"
/******************************************************************************/
/* EXTERNS */
/******************************************************************************/
#include "jemalloc/internal/jemalloc_internal_externs.h"
#include "jemalloc/internal/extent_externs.h"
#include "jemalloc/internal/base_externs.h"
#include "jemalloc/internal/arena_externs.h"
#include "jemalloc/internal/large_externs.h"
#include "jemalloc/internal/tcache_externs.h"
#include "jemalloc/internal/prof_externs.h"
#include "jemalloc/internal/background_thread_externs.h"
/******************************************************************************/
/* INLINES */
/******************************************************************************/
#include "jemalloc/internal/jemalloc_internal_inlines_a.h"
#include "jemalloc/internal/base_inlines.h"
/*
* Include portions of arena code interleaved with tcache code in order to
* resolve circular dependencies.
*/
#include "jemalloc/internal/prof_inlines_a.h"
#include "jemalloc/internal/arena_inlines_a.h"
#include "jemalloc/internal/extent_inlines.h"
#include "jemalloc/internal/jemalloc_internal_inlines_b.h"
#include "jemalloc/internal/tcache_inlines.h"
#include "jemalloc/internal/arena_inlines_b.h"
#include "jemalloc/internal/jemalloc_internal_inlines_c.h"
#include "jemalloc/internal/prof_inlines_b.h"
#include "jemalloc/internal/background_thread_inlines.h"
#endif /* JEMALLOC_INTERNAL_INCLUDES_H */
#ifndef JEMALLOC_INTERNAL_INLINES_A_H
#define JEMALLOC_INTERNAL_INLINES_A_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bit_util.h"
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/ticker.h"
JEMALLOC_ALWAYS_INLINE malloc_cpuid_t
malloc_getcpu(void) {
assert(have_percpu_arena);
#if defined(JEMALLOC_HAVE_SCHED_GETCPU)
return (malloc_cpuid_t)sched_getcpu();
#else
not_reached();
return -1;
#endif
}
/* Return the chosen arena index based on current cpu. */
JEMALLOC_ALWAYS_INLINE unsigned
percpu_arena_choose(void) {
assert(have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena));
malloc_cpuid_t cpuid = malloc_getcpu();
assert(cpuid >= 0);
unsigned arena_ind;
if ((opt_percpu_arena == percpu_arena) || ((unsigned)cpuid < ncpus /
2)) {
arena_ind = cpuid;
} else {
assert(opt_percpu_arena == per_phycpu_arena);
/* Hyper threads on the same physical CPU share arena. */
arena_ind = cpuid - ncpus / 2;
}
return arena_ind;
}
/* Return the limit of percpu auto arena range, i.e. arenas[0...ind_limit). */
JEMALLOC_ALWAYS_INLINE unsigned
percpu_arena_ind_limit(percpu_arena_mode_t mode) {
assert(have_percpu_arena && PERCPU_ARENA_ENABLED(mode));
if (mode == per_phycpu_arena && ncpus > 1) {
if (ncpus % 2) {
/* This likely means a misconfig. */
return ncpus / 2 + 1;
}
return ncpus / 2;
} else {
return ncpus;
}
}
static inline arena_tdata_t *
arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing) {
arena_tdata_t *tdata;
arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
if (unlikely(arenas_tdata == NULL)) {
/* arenas_tdata hasn't been initialized yet. */
return arena_tdata_get_hard(tsd, ind);
}
if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) {
/*
* ind is invalid, cache is old (too small), or tdata to be
* initialized.
*/
return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) :
NULL);
}
tdata = &arenas_tdata[ind];
if (likely(tdata != NULL) || !refresh_if_missing) {
return tdata;
}
return arena_tdata_get_hard(tsd, ind);
}
static inline arena_t *
arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) {
arena_t *ret;
assert(ind < MALLOCX_ARENA_LIMIT);
ret = (arena_t *)atomic_load_p(&arenas[ind], ATOMIC_ACQUIRE);
if (unlikely(ret == NULL)) {
if (init_if_missing) {
ret = arena_init(tsdn, ind,
(extent_hooks_t *)&extent_hooks_default);
}
}
return ret;
}
static inline ticker_t *
decay_ticker_get(tsd_t *tsd, unsigned ind) {
arena_tdata_t *tdata;
tdata = arena_tdata_get(tsd, ind, true);
if (unlikely(tdata == NULL)) {
return NULL;
}
return &tdata->decay_ticker;
}
JEMALLOC_ALWAYS_INLINE cache_bin_t *
tcache_small_bin_get(tcache_t *tcache, szind_t binind) {
assert(binind < NBINS);
return &tcache->bins_small[binind];
}
JEMALLOC_ALWAYS_INLINE cache_bin_t *
tcache_large_bin_get(tcache_t *tcache, szind_t binind) {
assert(binind >= NBINS &&binind < nhbins);
return &tcache->bins_large[binind - NBINS];
}
JEMALLOC_ALWAYS_INLINE bool
tcache_available(tsd_t *tsd) {
/*
* Thread specific auto tcache might be unavailable if: 1) during tcache
* initialization, or 2) disabled through thread.tcache.enabled mallctl
* or config options. This check covers all cases.
*/
if (likely(tsd_tcache_enabled_get(tsd))) {
/* Associated arena == NULL implies tcache init in progress. */
assert(tsd_tcachep_get(tsd)->arena == NULL ||
tcache_small_bin_get(tsd_tcachep_get(tsd), 0)->avail !=
NULL);
return true;
}
return false;
}
JEMALLOC_ALWAYS_INLINE tcache_t *
tcache_get(tsd_t *tsd) {
if (!tcache_available(tsd)) {
return NULL;
}
return tsd_tcachep_get(tsd);
}
static inline void
pre_reentrancy(tsd_t *tsd, arena_t *arena) {
/* arena is the current context. Reentry from a0 is not allowed. */
assert(arena != arena_get(tsd_tsdn(tsd), 0, false));
bool fast = tsd_fast(tsd);
assert(tsd_reentrancy_level_get(tsd) < INT8_MAX);
++*tsd_reentrancy_levelp_get(tsd);
if (fast) {
/* Prepare slow path for reentrancy. */
tsd_slow_update(tsd);
assert(tsd->state == tsd_state_nominal_slow);
}
}
static inline void
post_reentrancy(tsd_t *tsd) {
int8_t *reentrancy_level = tsd_reentrancy_levelp_get(tsd);
assert(*reentrancy_level > 0);
if (--*reentrancy_level == 0) {
tsd_slow_update(tsd);
}
}
#endif /* JEMALLOC_INTERNAL_INLINES_A_H */
#ifndef JEMALLOC_INTERNAL_INLINES_B_H
#define JEMALLOC_INTERNAL_INLINES_B_H
#include "jemalloc/internal/rtree.h"
/* Choose an arena based on a per-thread value. */
static inline arena_t *
arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) {
arena_t *ret;
if (arena != NULL) {
return arena;
}
/* During reentrancy, arena 0 is the safest bet. */
if (unlikely(tsd_reentrancy_level_get(tsd) > 0)) {
return arena_get(tsd_tsdn(tsd), 0, true);
}
ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd);
if (unlikely(ret == NULL)) {
ret = arena_choose_hard(tsd, internal);
assert(ret);
if (tcache_available(tsd)) {
tcache_t *tcache = tcache_get(tsd);
if (tcache->arena != NULL) {
/* See comments in tcache_data_init().*/
assert(tcache->arena ==
arena_get(tsd_tsdn(tsd), 0, false));
if (tcache->arena != ret) {
tcache_arena_reassociate(tsd_tsdn(tsd),
tcache, ret);
}
} else {
tcache_arena_associate(tsd_tsdn(tsd), tcache,
ret);
}
}
}
/*
* Note that for percpu arena, if the current arena is outside of the
* auto percpu arena range, (i.e. thread is assigned to a manually
* managed arena), then percpu arena is skipped.
*/
if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena) &&
!internal && (arena_ind_get(ret) <
percpu_arena_ind_limit(opt_percpu_arena)) && (ret->last_thd !=
tsd_tsdn(tsd))) {
unsigned ind = percpu_arena_choose();
if (arena_ind_get(ret) != ind) {
percpu_arena_update(tsd, ind);
ret = tsd_arena_get(tsd);
}
ret->last_thd = tsd_tsdn(tsd);
}
return ret;
}
static inline arena_t *
arena_choose(tsd_t *tsd, arena_t *arena) {
return arena_choose_impl(tsd, arena, false);
}
static inline arena_t *
arena_ichoose(tsd_t *tsd, arena_t *arena) {
return arena_choose_impl(tsd, arena, true);
}
static inline bool
arena_is_auto(arena_t *arena) {
assert(narenas_auto > 0);
return (arena_ind_get(arena) < narenas_auto);
}
JEMALLOC_ALWAYS_INLINE extent_t *
iealloc(tsdn_t *tsdn, const void *ptr) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
return rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true);
}
#endif /* JEMALLOC_INTERNAL_INLINES_B_H */
#ifndef JEMALLOC_INTERNAL_INLINES_C_H
#define JEMALLOC_INTERNAL_INLINES_C_H
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/witness.h"
/*
* Translating the names of the 'i' functions:
* Abbreviations used in the first part of the function name (before
* alloc/dalloc) describe what that function accomplishes:
* a: arena (query)
* s: size (query, or sized deallocation)
* e: extent (query)
* p: aligned (allocates)
* vs: size (query, without knowing that the pointer is into the heap)
* r: rallocx implementation
* x: xallocx implementation
* Abbreviations used in the second part of the function name (after
* alloc/dalloc) describe the arguments it takes
* z: whether to return zeroed memory
* t: accepts a tcache_t * parameter
* m: accepts an arena_t * parameter
*/
JEMALLOC_ALWAYS_INLINE arena_t *
iaalloc(tsdn_t *tsdn, const void *ptr) {
assert(ptr != NULL);
return arena_aalloc(tsdn, ptr);
}
JEMALLOC_ALWAYS_INLINE size_t
isalloc(tsdn_t *tsdn, const void *ptr) {
assert(ptr != NULL);
return arena_salloc(tsdn, ptr);
}
JEMALLOC_ALWAYS_INLINE void *
iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
bool is_internal, arena_t *arena, bool slow_path) {
void *ret;
assert(size != 0);
assert(!is_internal || tcache == NULL);
assert(!is_internal || arena == NULL || arena_is_auto(arena));
if (!tsdn_null(tsdn) && tsd_reentrancy_level_get(tsdn_tsd(tsdn)) == 0) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
}
ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path);
if (config_stats && is_internal && likely(ret != NULL)) {
arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret));
}
return ret;
}
JEMALLOC_ALWAYS_INLINE void *
ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path) {
return iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd), false,
NULL, slow_path);
}
JEMALLOC_ALWAYS_INLINE void *
ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, bool is_internal, arena_t *arena) {
void *ret;
assert(usize != 0);
assert(usize == sz_sa2u(usize, alignment));
assert(!is_internal || tcache == NULL);
assert(!is_internal || arena == NULL || arena_is_auto(arena));
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache);
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
if (config_stats && is_internal && likely(ret != NULL)) {
arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret));
}
return ret;
}
JEMALLOC_ALWAYS_INLINE void *
ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, arena_t *arena) {
return ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena);
}
JEMALLOC_ALWAYS_INLINE void *
ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero) {
return ipallocztm(tsd_tsdn(tsd), usize, alignment, zero,
tcache_get(tsd), false, NULL);
}
JEMALLOC_ALWAYS_INLINE size_t
ivsalloc(tsdn_t *tsdn, const void *ptr) {
return arena_vsalloc(tsdn, ptr);
}
JEMALLOC_ALWAYS_INLINE void
idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, alloc_ctx_t *alloc_ctx,
bool is_internal, bool slow_path) {
assert(ptr != NULL);
assert(!is_internal || tcache == NULL);
assert(!is_internal || arena_is_auto(iaalloc(tsdn, ptr)));
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
if (config_stats && is_internal) {
arena_internal_sub(iaalloc(tsdn, ptr), isalloc(tsdn, ptr));
}
if (!is_internal && !tsdn_null(tsdn) &&
tsd_reentrancy_level_get(tsdn_tsd(tsdn)) != 0) {
assert(tcache == NULL);
}
arena_dalloc(tsdn, ptr, tcache, alloc_ctx, slow_path);
}
JEMALLOC_ALWAYS_INLINE void
idalloc(tsd_t *tsd, void *ptr) {
idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd), NULL, false, true);
}
JEMALLOC_ALWAYS_INLINE void
isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
alloc_ctx_t *alloc_ctx, bool slow_path) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
arena_sdalloc(tsdn, ptr, size, tcache, alloc_ctx, slow_path);
}
JEMALLOC_ALWAYS_INLINE void *
iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
size_t extra, size_t alignment, bool zero, tcache_t *tcache,
arena_t *arena) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
void *p;
size_t usize, copysize;
usize = sz_sa2u(size + extra, alignment);
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
return NULL;
}
p = ipalloct(tsdn, usize, alignment, zero, tcache, arena);
if (p == NULL) {
if (extra == 0) {
return NULL;
}
/* Try again, without extra this time. */
usize = sz_sa2u(size, alignment);
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
return NULL;
}
p = ipalloct(tsdn, usize, alignment, zero, tcache, arena);
if (p == NULL) {
return NULL;
}
}
/*
* Copy at most size bytes (not size+extra), since the caller has no
* expectation that the extra bytes will be reliably preserved.
*/
copysize = (size < oldsize) ? size : oldsize;
memcpy(p, ptr, copysize);
isdalloct(tsdn, ptr, oldsize, tcache, NULL, true);
return p;
}
JEMALLOC_ALWAYS_INLINE void *
iralloct(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t alignment,
bool zero, tcache_t *tcache, arena_t *arena) {
assert(ptr != NULL);
assert(size != 0);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
!= 0) {
/*
* Existing object alignment is inadequate; allocate new space
* and copy.
*/
return iralloct_realign(tsdn, ptr, oldsize, size, 0, alignment,
zero, tcache, arena);
}
return arena_ralloc(tsdn, arena, ptr, oldsize, size, alignment, zero,
tcache);
}
JEMALLOC_ALWAYS_INLINE void *
iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
bool zero) {
return iralloct(tsd_tsdn(tsd), ptr, oldsize, size, alignment, zero,
tcache_get(tsd), NULL);
}
JEMALLOC_ALWAYS_INLINE bool
ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero) {
assert(ptr != NULL);
assert(size != 0);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
!= 0) {
/* Existing object alignment is inadequate. */
return true;
}
return arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero);
}
JEMALLOC_ALWAYS_INLINE int
iget_defrag_hint(tsdn_t *tsdn, void* ptr, int *bin_util, int *run_util) {
int defrag = 0;
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
szind_t szind;
bool is_slab;
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, true, &szind, &is_slab);
if (likely(is_slab)) {
/* Small allocation. */
extent_t *slab = iealloc(tsdn, ptr);
arena_t *arena = extent_arena_get(slab);
szind_t binind = extent_szind_get(slab);
bin_t *bin = &arena->bins[binind];
malloc_mutex_lock(tsdn, &bin->lock);
/* don't bother moving allocations from the slab currently used for new allocations */
if (slab != bin->slabcur) {
const bin_info_t *bin_info = &bin_infos[binind];
size_t availregs = bin_info->nregs * bin->stats.curslabs;
*bin_util = ((long long)bin->stats.curregs<<16) / availregs;
*run_util = ((long long)(bin_info->nregs - extent_nfree_get(slab))<<16) / bin_info->nregs;
defrag = 1;
}
malloc_mutex_unlock(tsdn, &bin->lock);
}
return defrag;
}
#endif /* JEMALLOC_INTERNAL_INLINES_C_H */
/*
* JEMALLOC_ALWAYS_INLINE and JEMALLOC_INLINE are used within header files for
* functions that are static inline functions if inlining is enabled, and
* single-definition library-private functions if inlining is disabled.
*
* JEMALLOC_ALWAYS_INLINE_C and JEMALLOC_INLINE_C are for use in .c files, in
* which case the denoted functions are always static, regardless of whether
* inlining is enabled.
*/
#if defined(JEMALLOC_DEBUG) || defined(JEMALLOC_CODE_COVERAGE)
/* Disable inlining to make debugging/profiling easier. */
# define JEMALLOC_ALWAYS_INLINE
# define JEMALLOC_ALWAYS_INLINE_C static
# define JEMALLOC_INLINE
# define JEMALLOC_INLINE_C static
# define inline
#else
# define JEMALLOC_ENABLE_INLINE
# ifdef JEMALLOC_HAVE_ATTR
# define JEMALLOC_ALWAYS_INLINE \
static inline JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline)
# define JEMALLOC_ALWAYS_INLINE_C \
static inline JEMALLOC_ATTR(always_inline)
# else
# define JEMALLOC_ALWAYS_INLINE static inline
# define JEMALLOC_ALWAYS_INLINE_C static inline
# endif
# define JEMALLOC_INLINE static inline
# define JEMALLOC_INLINE_C static inline
# ifdef _MSC_VER
# define inline _inline
# endif
#endif
#ifndef JEMALLOC_INTERNAL_MACROS_H
#define JEMALLOC_INTERNAL_MACROS_H
#ifdef JEMALLOC_CC_SILENCE
# define UNUSED JEMALLOC_ATTR(unused)
#ifdef JEMALLOC_DEBUG
# define JEMALLOC_ALWAYS_INLINE static inline
#else
# define UNUSED
# define JEMALLOC_ALWAYS_INLINE JEMALLOC_ATTR(always_inline) static inline
#endif
#ifdef _MSC_VER
# define inline _inline
#endif
#define ZU(z) ((size_t)z)
#define QU(q) ((uint64_t)q)
#define QI(q) ((int64_t)q)
#define UNUSED JEMALLOC_ATTR(unused)
#define ZU(z) ((size_t)z)
#define ZD(z) ((ssize_t)z)
#define QU(q) ((uint64_t)q)
#define QD(q) ((int64_t)q)
#define KZU(z) ZU(z##ULL)
#define KZD(z) ZD(z##LL)
#define KQU(q) QU(q##ULL)
#define KQD(q) QI(q##LL)
#ifndef __DECONST
# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
#endif
#ifndef JEMALLOC_HAS_RESTRICT
#if !defined(JEMALLOC_HAS_RESTRICT) || defined(__cplusplus)
# define restrict
#endif
/* Various function pointers are statick and immutable except during testing. */
#ifdef JEMALLOC_JET
# define JET_MUTABLE
#else
# define JET_MUTABLE const
#endif
#define JEMALLOC_VA_ARGS_HEAD(head, ...) head
#define JEMALLOC_VA_ARGS_TAIL(head, ...) __VA_ARGS__
#endif /* JEMALLOC_INTERNAL_MACROS_H */
#ifndef JEMALLOC_INTERNAL_TYPES_H
#define JEMALLOC_INTERNAL_TYPES_H
/* Page size index type. */
typedef unsigned pszind_t;
/* Size class index type. */
typedef unsigned szind_t;
/* Processor / core id type. */
typedef int malloc_cpuid_t;
/*
* Flags bits:
*
* a: arena
* t: tcache
* 0: unused
* z: zero
* n: alignment
*
* aaaaaaaa aaaatttt tttttttt 0znnnnnn
*/
#define MALLOCX_ARENA_BITS 12
#define MALLOCX_TCACHE_BITS 12
#define MALLOCX_LG_ALIGN_BITS 6
#define MALLOCX_ARENA_SHIFT 20
#define MALLOCX_TCACHE_SHIFT 8
#define MALLOCX_ARENA_MASK \
(((1 << MALLOCX_ARENA_BITS) - 1) << MALLOCX_ARENA_SHIFT)
/* NB: Arena index bias decreases the maximum number of arenas by 1. */
#define MALLOCX_ARENA_LIMIT ((1 << MALLOCX_ARENA_BITS) - 1)
#define MALLOCX_TCACHE_MASK \
(((1 << MALLOCX_TCACHE_BITS) - 1) << MALLOCX_TCACHE_SHIFT)
#define MALLOCX_TCACHE_MAX ((1 << MALLOCX_TCACHE_BITS) - 3)
#define MALLOCX_LG_ALIGN_MASK ((1 << MALLOCX_LG_ALIGN_BITS) - 1)
/* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */
#define MALLOCX_ALIGN_GET_SPECIFIED(flags) \
(ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK))
#define MALLOCX_ALIGN_GET(flags) \
(MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1))
#define MALLOCX_ZERO_GET(flags) \
((bool)(flags & MALLOCX_ZERO))
#define MALLOCX_TCACHE_GET(flags) \
(((unsigned)((flags & MALLOCX_TCACHE_MASK) >> MALLOCX_TCACHE_SHIFT)) - 2)
#define MALLOCX_ARENA_GET(flags) \
(((unsigned)(((unsigned)flags) >> MALLOCX_ARENA_SHIFT)) - 1)
/* Smallest size class to support. */
#define TINY_MIN (1U << LG_TINY_MIN)
/*
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
* classes).
*/
#ifndef LG_QUANTUM
# if (defined(__i386__) || defined(_M_IX86))
# define LG_QUANTUM 4
# endif
# ifdef __ia64__
# define LG_QUANTUM 4
# endif
# ifdef __alpha__
# define LG_QUANTUM 4
# endif
# if (defined(__sparc64__) || defined(__sparcv9) || defined(__sparc_v9__))
# define LG_QUANTUM 4
# endif
# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
# define LG_QUANTUM 4
# endif
# ifdef __arm__
# define LG_QUANTUM 3
# endif
# ifdef __aarch64__
# define LG_QUANTUM 4
# endif
# ifdef __hppa__
# define LG_QUANTUM 4
# endif
# ifdef __m68k__
# define LG_QUANTUM 3
# endif
# ifdef __mips__
# define LG_QUANTUM 3
# endif
# ifdef __nios2__
# define LG_QUANTUM 3
# endif
# ifdef __or1k__
# define LG_QUANTUM 3
# endif
# ifdef __powerpc__
# define LG_QUANTUM 4
# endif
# if defined(__riscv) || defined(__riscv__)
# define LG_QUANTUM 4
# endif
# ifdef __s390__
# define LG_QUANTUM 4
# endif
# if (defined (__SH3E__) || defined(__SH4_SINGLE__) || defined(__SH4__) || \
defined(__SH4_SINGLE_ONLY__))
# define LG_QUANTUM 4
# endif
# ifdef __tile__
# define LG_QUANTUM 4
# endif
# ifdef __le32__
# define LG_QUANTUM 4
# endif
# ifndef LG_QUANTUM
# error "Unknown minimum alignment for architecture; specify via "
"--with-lg-quantum"
# endif
#endif
#define QUANTUM ((size_t)(1U << LG_QUANTUM))
#define QUANTUM_MASK (QUANTUM - 1)
/* Return the smallest quantum multiple that is >= a. */
#define QUANTUM_CEILING(a) \
(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
#define LONG ((size_t)(1U << LG_SIZEOF_LONG))
#define LONG_MASK (LONG - 1)
/* Return the smallest long multiple that is >= a. */
#define LONG_CEILING(a) \
(((a) + LONG_MASK) & ~LONG_MASK)
#define SIZEOF_PTR (1U << LG_SIZEOF_PTR)
#define PTR_MASK (SIZEOF_PTR - 1)
/* Return the smallest (void *) multiple that is >= a. */
#define PTR_CEILING(a) \
(((a) + PTR_MASK) & ~PTR_MASK)
/*
* Maximum size of L1 cache line. This is used to avoid cache line aliasing.
* In addition, this controls the spacing of cacheline-spaced size classes.
*
* CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
* only handle raw constants.
*/
#define LG_CACHELINE 6
#define CACHELINE 64
#define CACHELINE_MASK (CACHELINE - 1)
/* Return the smallest cacheline multiple that is >= s. */
#define CACHELINE_CEILING(s) \
(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
/* Return the nearest aligned address at or below a. */
#define ALIGNMENT_ADDR2BASE(a, alignment) \
((void *)((uintptr_t)(a) & ((~(alignment)) + 1)))
/* Return the offset between a and the nearest aligned address at or below a. */
#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
((size_t)((uintptr_t)(a) & (alignment - 1)))
/* Return the smallest alignment multiple that is >= s. */
#define ALIGNMENT_CEILING(s, alignment) \
(((s) + (alignment - 1)) & ((~(alignment)) + 1))
/* Declare a variable-length array. */
#if __STDC_VERSION__ < 199901L
# ifdef _MSC_VER
# include <malloc.h>
# define alloca _alloca
# else
# ifdef JEMALLOC_HAS_ALLOCA_H
# include <alloca.h>
# else
# include <stdlib.h>
# endif
# endif
# define VARIABLE_ARRAY(type, name, count) \
type *name = alloca(sizeof(type) * (count))
#else
# define VARIABLE_ARRAY(type, name, count) type name[(count)]
#endif
#endif /* JEMALLOC_INTERNAL_TYPES_H */
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment