Unverified Commit a51eb05b authored by Oran Agra's avatar Oran Agra Committed by GitHub
Browse files

Release Redis 7.2 RC2

parents e26a769d 986dbf71
#ifndef JEMALLOC_INTERNAL_BIN_INFO_H
#define JEMALLOC_INTERNAL_BIN_INFO_H
#include "jemalloc/internal/bitmap.h"
/*
* Read-only information associated with each element of arena_t's bins array
* is stored separately, partly to reduce memory usage (only one copy, rather
* than one per arena), but mainly to avoid false cacheline sharing.
*
* Each slab has the following layout:
*
* /--------------------\
* | region 0 |
* |--------------------|
* | region 1 |
* |--------------------|
* | ... |
* | ... |
* | ... |
* |--------------------|
* | region nregs-1 |
* \--------------------/
*/
typedef struct bin_info_s bin_info_t;
struct bin_info_s {
/* Size of regions in a slab for this bin's size class. */
size_t reg_size;
/* Total size of a slab for this bin's size class. */
size_t slab_size;
/* Total number of regions in a slab for this bin's size class. */
uint32_t nregs;
/* Number of sharded bins in each arena for this size class. */
uint32_t n_shards;
/*
* Metadata used to manipulate bitmaps for slabs associated with this
* bin.
*/
bitmap_info_t bitmap_info;
};
extern bin_info_t bin_infos[SC_NBINS];
void bin_info_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]);
#endif /* JEMALLOC_INTERNAL_BIN_INFO_H */
......@@ -47,8 +47,11 @@ struct bin_stats_s {
/* Current size of nonfull slabs heap in this bin. */
size_t nonfull_slabs;
};
typedef struct bin_stats_data_s bin_stats_data_t;
struct bin_stats_data_s {
bin_stats_t stats_data;
mutex_prof_data_t mutex_data;
};
#endif /* JEMALLOC_INTERNAL_BIN_STATS_H */
......@@ -3,7 +3,7 @@
#include "jemalloc/internal/sc.h"
#define BIN_SHARDS_MAX (1 << EXTENT_BITS_BINSHARD_WIDTH)
#define BIN_SHARDS_MAX (1 << EDATA_BITS_BINSHARD_WIDTH)
#define N_BIN_SHARDS_DEFAULT 1
/* Used in TSD static initializer only. Real init in arena_bind(). */
......
......@@ -3,144 +3,383 @@
#include "jemalloc/internal/assert.h"
#define BIT_UTIL_INLINE static inline
/* Sanity check. */
#if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \
|| !defined(JEMALLOC_INTERNAL_FFS)
# error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure
#endif
/*
* Unlike the builtins and posix ffs functions, our ffs requires a non-zero
* input, and returns the position of the lowest bit set (as opposed to the
* posix versions, which return 1 larger than that position and use a return
* value of zero as a sentinel. This tends to simplify logic in callers, and
* allows for consistency with the builtins we build fls on top of.
*/
static inline unsigned
ffs_llu(unsigned long long x) {
util_assume(x != 0);
return JEMALLOC_INTERNAL_FFSLL(x) - 1;
}
BIT_UTIL_INLINE unsigned
ffs_llu(unsigned long long bitmap) {
return JEMALLOC_INTERNAL_FFSLL(bitmap);
static inline unsigned
ffs_lu(unsigned long x) {
util_assume(x != 0);
return JEMALLOC_INTERNAL_FFSL(x) - 1;
}
BIT_UTIL_INLINE unsigned
ffs_lu(unsigned long bitmap) {
return JEMALLOC_INTERNAL_FFSL(bitmap);
static inline unsigned
ffs_u(unsigned x) {
util_assume(x != 0);
return JEMALLOC_INTERNAL_FFS(x) - 1;
}
BIT_UTIL_INLINE unsigned
ffs_u(unsigned bitmap) {
return JEMALLOC_INTERNAL_FFS(bitmap);
#define DO_FLS_SLOW(x, suffix) do { \
util_assume(x != 0); \
x |= (x >> 1); \
x |= (x >> 2); \
x |= (x >> 4); \
x |= (x >> 8); \
x |= (x >> 16); \
if (sizeof(x) > 4) { \
/* \
* If sizeof(x) is 4, then the expression "x >> 32" \
* will generate compiler warnings even if the code \
* never executes. This circumvents the warning, and \
* gets compiled out in optimized builds. \
*/ \
int constant_32 = sizeof(x) * 4; \
x |= (x >> constant_32); \
} \
x++; \
if (x == 0) { \
return 8 * sizeof(x) - 1; \
} \
return ffs_##suffix(x) - 1; \
} while(0)
static inline unsigned
fls_llu_slow(unsigned long long x) {
DO_FLS_SLOW(x, llu);
}
#ifdef JEMALLOC_INTERNAL_POPCOUNTL
BIT_UTIL_INLINE unsigned
static inline unsigned
fls_lu_slow(unsigned long x) {
DO_FLS_SLOW(x, lu);
}
static inline unsigned
fls_u_slow(unsigned x) {
DO_FLS_SLOW(x, u);
}
#undef DO_FLS_SLOW
#ifdef JEMALLOC_HAVE_BUILTIN_CLZ
static inline unsigned
fls_llu(unsigned long long x) {
util_assume(x != 0);
/*
* Note that the xor here is more naturally written as subtraction; the
* last bit set is the number of bits in the type minus the number of
* leading zero bits. But GCC implements that as:
* bsr edi, edi
* mov eax, 31
* xor edi, 31
* sub eax, edi
* If we write it as xor instead, then we get
* bsr eax, edi
* as desired.
*/
return (8 * sizeof(x) - 1) ^ __builtin_clzll(x);
}
static inline unsigned
fls_lu(unsigned long x) {
util_assume(x != 0);
return (8 * sizeof(x) - 1) ^ __builtin_clzl(x);
}
static inline unsigned
fls_u(unsigned x) {
util_assume(x != 0);
return (8 * sizeof(x) - 1) ^ __builtin_clz(x);
}
#elif defined(_MSC_VER)
#if LG_SIZEOF_PTR == 3
#define DO_BSR64(bit, x) _BitScanReverse64(&bit, x)
#else
/*
* This never actually runs; we're just dodging a compiler error for the
* never-taken branch where sizeof(void *) == 8.
*/
#define DO_BSR64(bit, x) bit = 0; unreachable()
#endif
#define DO_FLS(x) do { \
if (x == 0) { \
return 8 * sizeof(x); \
} \
unsigned long bit; \
if (sizeof(x) == 4) { \
_BitScanReverse(&bit, (unsigned)x); \
return (unsigned)bit; \
} \
if (sizeof(x) == 8 && sizeof(void *) == 8) { \
DO_BSR64(bit, x); \
return (unsigned)bit; \
} \
if (sizeof(x) == 8 && sizeof(void *) == 4) { \
/* Dodge a compiler warning, as above. */ \
int constant_32 = sizeof(x) * 4; \
if (_BitScanReverse(&bit, \
(unsigned)(x >> constant_32))) { \
return 32 + (unsigned)bit; \
} else { \
_BitScanReverse(&bit, (unsigned)x); \
return (unsigned)bit; \
} \
} \
unreachable(); \
} while (0)
static inline unsigned
fls_llu(unsigned long long x) {
DO_FLS(x);
}
static inline unsigned
fls_lu(unsigned long x) {
DO_FLS(x);
}
static inline unsigned
fls_u(unsigned x) {
DO_FLS(x);
}
#undef DO_FLS
#undef DO_BSR64
#else
static inline unsigned
fls_llu(unsigned long long x) {
return fls_llu_slow(x);
}
static inline unsigned
fls_lu(unsigned long x) {
return fls_lu_slow(x);
}
static inline unsigned
fls_u(unsigned x) {
return fls_u_slow(x);
}
#endif
#if LG_SIZEOF_LONG_LONG > 3
# error "Haven't implemented popcount for 16-byte ints."
#endif
#define DO_POPCOUNT(x, type) do { \
/* \
* Algorithm from an old AMD optimization reference manual. \
* We're putting a little bit more work than you might expect \
* into the no-instrinsic case, since we only support the \
* GCC intrinsics spelling of popcount (for now). Detecting \
* whether or not the popcount builtin is actually useable in \
* MSVC is nontrivial. \
*/ \
\
type bmul = (type)0x0101010101010101ULL; \
\
/* \
* Replace each 2 bits with the sideways sum of the original \
* values. 0x5 = 0b0101. \
* \
* You might expect this to be: \
* x = (x & 0x55...) + ((x >> 1) & 0x55...). \
* That costs an extra mask relative to this, though. \
*/ \
x = x - ((x >> 1) & (0x55U * bmul)); \
/* Replace each 4 bits with their sideays sum. 0x3 = 0b0011. */\
x = (x & (bmul * 0x33U)) + ((x >> 2) & (bmul * 0x33U)); \
/* \
* Replace each 8 bits with their sideways sum. Note that we \
* can't overflow within each 4-bit sum here, so we can skip \
* the initial mask. \
*/ \
x = (x + (x >> 4)) & (bmul * 0x0FU); \
/* \
* None of the partial sums in this multiplication (viewed in \
* base-256) can overflow into the next digit. So the least \
* significant byte of the product will be the least \
* significant byte of the original value, the second least \
* significant byte will be the sum of the two least \
* significant bytes of the original value, and so on. \
* Importantly, the high byte will be the byte-wise sum of all \
* the bytes of the original value. \
*/ \
x = x * bmul; \
x >>= ((sizeof(x) - 1) * 8); \
return (unsigned)x; \
} while(0)
static inline unsigned
popcount_u_slow(unsigned bitmap) {
DO_POPCOUNT(bitmap, unsigned);
}
static inline unsigned
popcount_lu_slow(unsigned long bitmap) {
DO_POPCOUNT(bitmap, unsigned long);
}
static inline unsigned
popcount_llu_slow(unsigned long long bitmap) {
DO_POPCOUNT(bitmap, unsigned long long);
}
#undef DO_POPCOUNT
static inline unsigned
popcount_u(unsigned bitmap) {
#ifdef JEMALLOC_INTERNAL_POPCOUNT
return JEMALLOC_INTERNAL_POPCOUNT(bitmap);
#else
return popcount_u_slow(bitmap);
#endif
}
static inline unsigned
popcount_lu(unsigned long bitmap) {
return JEMALLOC_INTERNAL_POPCOUNTL(bitmap);
#ifdef JEMALLOC_INTERNAL_POPCOUNTL
return JEMALLOC_INTERNAL_POPCOUNTL(bitmap);
#else
return popcount_lu_slow(bitmap);
#endif
}
static inline unsigned
popcount_llu(unsigned long long bitmap) {
#ifdef JEMALLOC_INTERNAL_POPCOUNTLL
return JEMALLOC_INTERNAL_POPCOUNTLL(bitmap);
#else
return popcount_llu_slow(bitmap);
#endif
}
/*
* Clears first unset bit in bitmap, and returns
* place of bit. bitmap *must not* be 0.
*/
BIT_UTIL_INLINE size_t
static inline size_t
cfs_lu(unsigned long* bitmap) {
size_t bit = ffs_lu(*bitmap) - 1;
util_assume(*bitmap != 0);
size_t bit = ffs_lu(*bitmap);
*bitmap ^= ZU(1) << bit;
return bit;
}
BIT_UTIL_INLINE unsigned
ffs_zu(size_t bitmap) {
static inline unsigned
ffs_zu(size_t x) {
#if LG_SIZEOF_PTR == LG_SIZEOF_INT
return ffs_u(bitmap);
return ffs_u(x);
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG
return ffs_lu(bitmap);
return ffs_lu(x);
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG
return ffs_llu(bitmap);
return ffs_llu(x);
#else
#error No implementation for size_t ffs()
#endif
}
BIT_UTIL_INLINE unsigned
ffs_u64(uint64_t bitmap) {
static inline unsigned
fls_zu(size_t x) {
#if LG_SIZEOF_PTR == LG_SIZEOF_INT
return fls_u(x);
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG
return fls_lu(x);
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG
return fls_llu(x);
#else
#error No implementation for size_t fls()
#endif
}
static inline unsigned
ffs_u64(uint64_t x) {
#if LG_SIZEOF_LONG == 3
return ffs_lu(bitmap);
return ffs_lu(x);
#elif LG_SIZEOF_LONG_LONG == 3
return ffs_llu(bitmap);
return ffs_llu(x);
#else
#error No implementation for 64-bit ffs()
#endif
}
BIT_UTIL_INLINE unsigned
ffs_u32(uint32_t bitmap) {
static inline unsigned
fls_u64(uint64_t x) {
#if LG_SIZEOF_LONG == 3
return fls_lu(x);
#elif LG_SIZEOF_LONG_LONG == 3
return fls_llu(x);
#else
#error No implementation for 64-bit fls()
#endif
}
static inline unsigned
ffs_u32(uint32_t x) {
#if LG_SIZEOF_INT == 2
return ffs_u(bitmap);
return ffs_u(x);
#else
#error No implementation for 32-bit ffs()
#endif
return ffs_u(bitmap);
return ffs_u(x);
}
static inline unsigned
fls_u32(uint32_t x) {
#if LG_SIZEOF_INT == 2
return fls_u(x);
#else
#error No implementation for 32-bit fls()
#endif
return fls_u(x);
}
BIT_UTIL_INLINE uint64_t
static inline uint64_t
pow2_ceil_u64(uint64_t x) {
#if (defined(__amd64__) || defined(__x86_64__) || defined(JEMALLOC_HAVE_BUILTIN_CLZ))
if(unlikely(x <= 1)) {
if (unlikely(x <= 1)) {
return x;
}
size_t msb_on_index;
#if (defined(__amd64__) || defined(__x86_64__))
asm ("bsrq %1, %0"
: "=r"(msb_on_index) // Outputs.
: "r"(x-1) // Inputs.
);
#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
msb_on_index = (63 ^ __builtin_clzll(x - 1));
#endif
size_t msb_on_index = fls_u64(x - 1);
/*
* Range-check; it's on the callers to ensure that the result of this
* call won't overflow.
*/
assert(msb_on_index < 63);
return 1ULL << (msb_on_index + 1);
#else
x--;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
x |= x >> 32;
x++;
return x;
#endif
}
BIT_UTIL_INLINE uint32_t
static inline uint32_t
pow2_ceil_u32(uint32_t x) {
#if ((defined(__i386__) || defined(JEMALLOC_HAVE_BUILTIN_CLZ)) && (!defined(__s390__)))
if(unlikely(x <= 1)) {
return x;
if (unlikely(x <= 1)) {
return x;
}
size_t msb_on_index;
#if (defined(__i386__))
asm ("bsr %1, %0"
: "=r"(msb_on_index) // Outputs.
: "r"(x-1) // Inputs.
);
#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
msb_on_index = (31 ^ __builtin_clz(x - 1));
#endif
size_t msb_on_index = fls_u32(x - 1);
/* As above. */
assert(msb_on_index < 31);
return 1U << (msb_on_index + 1);
#else
x--;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
x++;
return x;
#endif
}
/* Compute the smallest power of 2 that is >= x. */
BIT_UTIL_INLINE size_t
static inline size_t
pow2_ceil_zu(size_t x) {
#if (LG_SIZEOF_PTR == 3)
return pow2_ceil_u64(x);
......@@ -149,77 +388,21 @@ pow2_ceil_zu(size_t x) {
#endif
}
#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
BIT_UTIL_INLINE unsigned
lg_floor(size_t x) {
size_t ret;
assert(x != 0);
asm ("bsr %1, %0"
: "=r"(ret) // Outputs.
: "r"(x) // Inputs.
);
assert(ret < UINT_MAX);
return (unsigned)ret;
}
#elif (defined(_MSC_VER))
BIT_UTIL_INLINE unsigned
static inline unsigned
lg_floor(size_t x) {
unsigned long ret;
assert(x != 0);
util_assume(x != 0);
#if (LG_SIZEOF_PTR == 3)
_BitScanReverse64(&ret, x);
#elif (LG_SIZEOF_PTR == 2)
_BitScanReverse(&ret, x);
return fls_u64(x);
#else
# error "Unsupported type size for lg_floor()"
return fls_u32(x);
#endif
assert(ret < UINT_MAX);
return (unsigned)ret;
}
#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
BIT_UTIL_INLINE unsigned
lg_floor(size_t x) {
assert(x != 0);
#if (LG_SIZEOF_PTR == LG_SIZEOF_INT)
return ((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x);
#elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG)
return ((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x);
#else
# error "Unsupported type size for lg_floor()"
#endif
}
#else
BIT_UTIL_INLINE unsigned
lg_floor(size_t x) {
assert(x != 0);
x |= (x >> 1);
x |= (x >> 2);
x |= (x >> 4);
x |= (x >> 8);
x |= (x >> 16);
#if (LG_SIZEOF_PTR == 3)
x |= (x >> 32);
#endif
if (x == SIZE_T_MAX) {
return (8 << LG_SIZEOF_PTR) - 1;
}
x++;
return ffs_zu(x) - 2;
}
#endif
BIT_UTIL_INLINE unsigned
static inline unsigned
lg_ceil(size_t x) {
return lg_floor(x) + ((x & (x - 1)) == 0 ? 0 : 1);
}
#undef BIT_UTIL_INLINE
/* A compile-time version of lg_floor and lg_ceil. */
#define LG_FLOOR_1(x) 0
#define LG_FLOOR_2(x) (x < (1ULL << 1) ? LG_FLOOR_1(x) : 1 + LG_FLOOR_1(x >> 1))
......
#ifndef JEMALLOC_INTERNAL_BITMAP_H
#define JEMALLOC_INTERNAL_BITMAP_H
#include "jemalloc/internal/arena_types.h"
#include "jemalloc/internal/bit_util.h"
#include "jemalloc/internal/sc.h"
......@@ -9,9 +8,9 @@ typedef unsigned long bitmap_t;
#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG
/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
#if LG_SLAB_MAXREGS > LG_CEIL(SC_NSIZES)
#if SC_LG_SLAB_MAXREGS > LG_CEIL(SC_NSIZES)
/* Maximum bitmap bit count is determined by maximum regions per slab. */
# define LG_BITMAP_MAXBITS LG_SLAB_MAXREGS
# define LG_BITMAP_MAXBITS SC_LG_SLAB_MAXREGS
#else
/* Maximum bitmap bit count is determined by number of extent size classes. */
# define LG_BITMAP_MAXBITS LG_CEIL(SC_NSIZES)
......@@ -273,7 +272,7 @@ bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) {
}
return bitmap_ffu(bitmap, binfo, sib_base);
}
bit += ((size_t)(ffs_lu(group_masked) - 1)) <<
bit += ((size_t)ffs_lu(group_masked)) <<
(lg_bits_per_group - LG_BITMAP_GROUP_NBITS);
}
assert(bit >= min_bit);
......@@ -285,9 +284,9 @@ bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) {
- 1);
size_t bit;
do {
bit = ffs_lu(g);
if (bit != 0) {
return (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
if (g != 0) {
bit = ffs_lu(g);
return (i << LG_BITMAP_GROUP_NBITS) + bit;
}
i++;
g = bitmap[i];
......@@ -308,20 +307,20 @@ bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) {
#ifdef BITMAP_USE_TREE
i = binfo->nlevels - 1;
g = bitmap[binfo->levels[i].group_offset];
bit = ffs_lu(g) - 1;
bit = ffs_lu(g);
while (i > 0) {
i--;
g = bitmap[binfo->levels[i].group_offset + bit];
bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffs_lu(g) - 1);
bit = (bit << LG_BITMAP_GROUP_NBITS) + ffs_lu(g);
}
#else
i = 0;
g = bitmap[0];
while ((bit = ffs_lu(g)) == 0) {
while (g == 0) {
i++;
g = bitmap[i];
}
bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
bit = (i << LG_BITMAP_GROUP_NBITS) + ffs_lu(g);
#endif
bitmap_set(bitmap, binfo, bit);
return bit;
......
#ifndef JEMALLOC_INTERNAL_BUF_WRITER_H
#define JEMALLOC_INTERNAL_BUF_WRITER_H
/*
* Note: when using the buffered writer, cbopaque is passed to write_cb only
* when the buffer is flushed. It would make a difference if cbopaque points
* to something that's changing for each write_cb call, or something that
* affects write_cb in a way dependent on the content of the output string.
* However, the most typical usage case in practice is that cbopaque points to
* some "option like" content for the write_cb, so it doesn't matter.
*/
typedef struct {
write_cb_t *write_cb;
void *cbopaque;
char *buf;
size_t buf_size;
size_t buf_end;
bool internal_buf;
} buf_writer_t;
bool buf_writer_init(tsdn_t *tsdn, buf_writer_t *buf_writer,
write_cb_t *write_cb, void *cbopaque, char *buf, size_t buf_len);
void buf_writer_flush(buf_writer_t *buf_writer);
write_cb_t buf_writer_cb;
void buf_writer_terminate(tsdn_t *tsdn, buf_writer_t *buf_writer);
typedef ssize_t (read_cb_t)(void *read_cbopaque, void *buf, size_t limit);
void buf_writer_pipe(buf_writer_t *buf_writer, read_cb_t *read_cb,
void *read_cbopaque);
#endif /* JEMALLOC_INTERNAL_BUF_WRITER_H */
......@@ -2,6 +2,7 @@
#define JEMALLOC_INTERNAL_CACHE_BIN_H
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/sz.h"
/*
* The cache_bins are the mechanism that the tcache and the arena use to
......@@ -13,14 +14,38 @@
* of the tcache at all.
*/
/*
* The size in bytes of each cache bin stack. We also use this to indicate
* *counts* of individual objects.
*/
typedef uint16_t cache_bin_sz_t;
/*
* The count of the number of cached allocations in a bin. We make this signed
* so that negative numbers can encode "invalid" states (e.g. a low water mark
* of -1 for a cache that has been depleted).
* Leave a noticeable mark pattern on the cache bin stack boundaries, in case a
* bug starts leaking those. Make it look like the junk pattern but be distinct
* from it.
*/
typedef int32_t cache_bin_sz_t;
static const uintptr_t cache_bin_preceding_junk =
(uintptr_t)0x7a7a7a7a7a7a7a7aULL;
/* Note: a7 vs. 7a above -- this tells you which pointer leaked. */
static const uintptr_t cache_bin_trailing_junk =
(uintptr_t)0xa7a7a7a7a7a7a7a7ULL;
/*
* That implies the following value, for the maximum number of items in any
* individual bin. The cache bins track their bounds looking just at the low
* bits of a pointer, compared against a cache_bin_sz_t. So that's
* 1 << (sizeof(cache_bin_sz_t) * 8)
* bytes spread across pointer sized objects to get the maximum.
*/
#define CACHE_BIN_NCACHED_MAX (((size_t)1 << sizeof(cache_bin_sz_t) * 8) \
/ sizeof(void *) - 1)
/*
* This lives inside the cache_bin (for locality reasons), and is initialized
* alongside it, but is otherwise not modified by any cache bin operations.
* It's logically public and maintained by its callers.
*/
typedef struct cache_bin_stats_s cache_bin_stats_t;
struct cache_bin_stats_s {
/*
......@@ -36,34 +61,75 @@ struct cache_bin_stats_s {
*/
typedef struct cache_bin_info_s cache_bin_info_t;
struct cache_bin_info_s {
/* Upper limit on ncached. */
cache_bin_sz_t ncached_max;
};
/*
* Responsible for caching allocations associated with a single size.
*
* Several pointers are used to track the stack. To save on metadata bytes,
* only the stack_head is a full sized pointer (which is dereferenced on the
* fastpath), while the others store only the low 16 bits -- this is correct
* because a single stack never takes more space than 2^16 bytes, and at the
* same time only equality checks are performed on the low bits.
*
* (low addr) (high addr)
* |------stashed------|------available------|------cached-----|
* ^ ^ ^ ^
* low_bound(derived) low_bits_full stack_head low_bits_empty
*/
typedef struct cache_bin_s cache_bin_t;
struct cache_bin_s {
/* Min # cached since last GC. */
cache_bin_sz_t low_water;
/* # of cached objects. */
cache_bin_sz_t ncached;
/*
* ncached and stats are both modified frequently. Let's keep them
* The stack grows down. Whenever the bin is nonempty, the head points
* to an array entry containing a valid allocation. When it is empty,
* the head points to one element past the owned array.
*/
void **stack_head;
/*
* cur_ptr and stats are both modified frequently. Let's keep them
* close so that they have a higher chance of being on the same
* cacheline, thus less write-backs.
*/
cache_bin_stats_t tstats;
/*
* Stack of available objects.
* The low bits of the address of the first item in the stack that
* hasn't been used since the last GC, to track the low water mark (min
* # of cached items).
*
* To make use of adjacent cacheline prefetch, the items in the avail
* stack goes to higher address for newer allocations. avail points
* just above the available space, which means that
* avail[-ncached, ... -1] are available items and the lowest item will
* be allocated first.
* Since the stack grows down, this is a higher address than
* low_bits_full.
*/
void **avail;
uint16_t low_bits_low_water;
/*
* The low bits of the value that stack_head will take on when the array
* is full (of cached & stashed items). But remember that stack_head
* always points to a valid item when the array is nonempty -- this is
* in the array.
*
* Recall that since the stack grows down, this is the lowest available
* address in the array for caching. Only adjusted when stashing items.
*/
uint16_t low_bits_full;
/*
* The low bits of the value that stack_head will take on when the array
* is empty.
*
* The stack grows down -- this is one past the highest address in the
* array. Immutable after initialization.
*/
uint16_t low_bits_empty;
};
/*
* The cache_bins live inside the tcache, but the arena (by design) isn't
* supposed to know much about tcache internals. To let the arena iterate over
* associated bins, we keep (with the tcache) a linked list of
* cache_bin_array_descriptor_ts that tell the arena how to find the bins.
*/
typedef struct cache_bin_array_descriptor_s cache_bin_array_descriptor_t;
struct cache_bin_array_descriptor_s {
/*
......@@ -72,37 +138,214 @@ struct cache_bin_array_descriptor_s {
*/
ql_elm(cache_bin_array_descriptor_t) link;
/* Pointers to the tcache bins. */
cache_bin_t *bins_small;
cache_bin_t *bins_large;
cache_bin_t *bins;
};
static inline void
cache_bin_array_descriptor_init(cache_bin_array_descriptor_t *descriptor,
cache_bin_t *bins_small, cache_bin_t *bins_large) {
cache_bin_t *bins) {
ql_elm_new(descriptor, link);
descriptor->bins_small = bins_small;
descriptor->bins_large = bins_large;
descriptor->bins = bins;
}
JEMALLOC_ALWAYS_INLINE void *
cache_bin_alloc_easy(cache_bin_t *bin, bool *success) {
void *ret;
JEMALLOC_ALWAYS_INLINE bool
cache_bin_nonfast_aligned(const void *ptr) {
if (!config_uaf_detection) {
return false;
}
/*
* Currently we use alignment to decide which pointer to junk & stash on
* dealloc (for catching use-after-free). In some common cases a
* page-aligned check is needed already (sdalloc w/ config_prof), so we
* are getting it more or less for free -- no added instructions on
* free_fastpath.
*
* Another way of deciding which pointer to sample, is adding another
* thread_event to pick one every N bytes. That also adds no cost on
* the fastpath, however it will tend to pick large allocations which is
* not the desired behavior.
*/
return ((uintptr_t)ptr & san_cache_bin_nonfast_mask) == 0;
}
/* Returns ncached_max: Upper limit on ncached. */
static inline cache_bin_sz_t
cache_bin_info_ncached_max(cache_bin_info_t *info) {
return info->ncached_max;
}
/*
* Internal.
*
* Asserts that the pointer associated with earlier is <= the one associated
* with later.
*/
static inline void
cache_bin_assert_earlier(cache_bin_t *bin, uint16_t earlier, uint16_t later) {
if (earlier > later) {
assert(bin->low_bits_full > bin->low_bits_empty);
}
}
bin->ncached--;
/*
* Internal.
*
* Does difference calculations that handle wraparound correctly. Earlier must
* be associated with the position earlier in memory.
*/
static inline uint16_t
cache_bin_diff(cache_bin_t *bin, uint16_t earlier, uint16_t later, bool racy) {
/*
* When it's racy, bin->low_bits_full can be modified concurrently. It
* can cross the uint16_t max value and become less than
* bin->low_bits_empty at the time of the check.
*/
if (!racy) {
cache_bin_assert_earlier(bin, earlier, later);
}
return later - earlier;
}
/*
* Number of items currently cached in the bin, without checking ncached_max.
* We require specifying whether or not the request is racy or not (i.e. whether
* or not concurrent modifications are possible).
*/
static inline cache_bin_sz_t
cache_bin_ncached_get_internal(cache_bin_t *bin, bool racy) {
cache_bin_sz_t diff = cache_bin_diff(bin,
(uint16_t)(uintptr_t)bin->stack_head, bin->low_bits_empty, racy);
cache_bin_sz_t n = diff / sizeof(void *);
/*
* Check for both bin->ncached == 0 and ncached < low_water
* in a single branch.
* We have undefined behavior here; if this function is called from the
* arena stats updating code, then stack_head could change from the
* first line to the next one. Morally, these loads should be atomic,
* but compilers won't currently generate comparisons with in-memory
* operands against atomics, and these variables get accessed on the
* fast paths. This should still be "safe" in the sense of generating
* the correct assembly for the foreseeable future, though.
*/
if (unlikely(bin->ncached <= bin->low_water)) {
bin->low_water = bin->ncached;
if (bin->ncached == -1) {
bin->ncached = 0;
*success = false;
return NULL;
}
assert(n == 0 || *(bin->stack_head) != NULL || racy);
return n;
}
/*
* Number of items currently cached in the bin, with checking ncached_max. The
* caller must know that no concurrent modification of the cache_bin is
* possible.
*/
static inline cache_bin_sz_t
cache_bin_ncached_get_local(cache_bin_t *bin, cache_bin_info_t *info) {
cache_bin_sz_t n = cache_bin_ncached_get_internal(bin,
/* racy */ false);
assert(n <= cache_bin_info_ncached_max(info));
return n;
}
/*
* Internal.
*
* A pointer to the position one past the end of the backing array.
*
* Do not call if racy, because both 'bin->stack_head' and 'bin->low_bits_full'
* are subject to concurrent modifications.
*/
static inline void **
cache_bin_empty_position_get(cache_bin_t *bin) {
cache_bin_sz_t diff = cache_bin_diff(bin,
(uint16_t)(uintptr_t)bin->stack_head, bin->low_bits_empty,
/* racy */ false);
uintptr_t empty_bits = (uintptr_t)bin->stack_head + diff;
void **ret = (void **)empty_bits;
assert(ret >= bin->stack_head);
return ret;
}
/*
* Internal.
*
* Calculates low bits of the lower bound of the usable cache bin's range (see
* cache_bin_t visual representation above).
*
* No values are concurrently modified, so should be safe to read in a
* multithreaded environment. Currently concurrent access happens only during
* arena statistics collection.
*/
static inline uint16_t
cache_bin_low_bits_low_bound_get(cache_bin_t *bin, cache_bin_info_t *info) {
return (uint16_t)bin->low_bits_empty -
info->ncached_max * sizeof(void *);
}
/*
* Internal.
*
* A pointer to the position with the lowest address of the backing array.
*/
static inline void **
cache_bin_low_bound_get(cache_bin_t *bin, cache_bin_info_t *info) {
cache_bin_sz_t ncached_max = cache_bin_info_ncached_max(info);
void **ret = cache_bin_empty_position_get(bin) - ncached_max;
assert(ret <= bin->stack_head);
return ret;
}
/*
* As the name implies. This is important since it's not correct to try to
* batch fill a nonempty cache bin.
*/
static inline void
cache_bin_assert_empty(cache_bin_t *bin, cache_bin_info_t *info) {
assert(cache_bin_ncached_get_local(bin, info) == 0);
assert(cache_bin_empty_position_get(bin) == bin->stack_head);
}
/*
* Get low water, but without any of the correctness checking we do for the
* caller-usable version, if we are temporarily breaking invariants (like
* ncached >= low_water during flush).
*/
static inline cache_bin_sz_t
cache_bin_low_water_get_internal(cache_bin_t *bin) {
return cache_bin_diff(bin, bin->low_bits_low_water,
bin->low_bits_empty, /* racy */ false) / sizeof(void *);
}
/* Returns the numeric value of low water in [0, ncached]. */
static inline cache_bin_sz_t
cache_bin_low_water_get(cache_bin_t *bin, cache_bin_info_t *info) {
cache_bin_sz_t low_water = cache_bin_low_water_get_internal(bin);
assert(low_water <= cache_bin_info_ncached_max(info));
assert(low_water <= cache_bin_ncached_get_local(bin, info));
cache_bin_assert_earlier(bin, (uint16_t)(uintptr_t)bin->stack_head,
bin->low_bits_low_water);
return low_water;
}
/*
* Indicates that the current cache bin position should be the low water mark
* going forward.
*/
static inline void
cache_bin_low_water_set(cache_bin_t *bin) {
bin->low_bits_low_water = (uint16_t)(uintptr_t)bin->stack_head;
}
static inline void
cache_bin_low_water_adjust(cache_bin_t *bin) {
if (cache_bin_ncached_get_internal(bin, /* racy */ false)
< cache_bin_low_water_get_internal(bin)) {
cache_bin_low_water_set(bin);
}
}
JEMALLOC_ALWAYS_INLINE void *
cache_bin_alloc_impl(cache_bin_t *bin, bool *success, bool adjust_low_water) {
/*
* success (instead of ret) should be checked upon the return of this
* function. We avoid checking (ret == NULL) because there is never a
......@@ -110,22 +353,318 @@ cache_bin_alloc_easy(cache_bin_t *bin, bool *success) {
* and eagerly checking ret would cause pipeline stall (waiting for the
* cacheline).
*/
*success = true;
ret = *(bin->avail - (bin->ncached + 1));
return ret;
/*
* This may read from the empty position; however the loaded value won't
* be used. It's safe because the stack has one more slot reserved.
*/
void *ret = *bin->stack_head;
uint16_t low_bits = (uint16_t)(uintptr_t)bin->stack_head;
void **new_head = bin->stack_head + 1;
/*
* Note that the low water mark is at most empty; if we pass this check,
* we know we're non-empty.
*/
if (likely(low_bits != bin->low_bits_low_water)) {
bin->stack_head = new_head;
*success = true;
return ret;
}
if (!adjust_low_water) {
*success = false;
return NULL;
}
/*
* In the fast-path case where we call alloc_easy and then alloc, the
* previous checking and computation is optimized away -- we didn't
* actually commit any of our operations.
*/
if (likely(low_bits != bin->low_bits_empty)) {
bin->stack_head = new_head;
bin->low_bits_low_water = (uint16_t)(uintptr_t)new_head;
*success = true;
return ret;
}
*success = false;
return NULL;
}
/*
* Allocate an item out of the bin, failing if we're at the low-water mark.
*/
JEMALLOC_ALWAYS_INLINE void *
cache_bin_alloc_easy(cache_bin_t *bin, bool *success) {
/* We don't look at info if we're not adjusting low-water. */
return cache_bin_alloc_impl(bin, success, false);
}
/*
* Allocate an item out of the bin, even if we're currently at the low-water
* mark (and failing only if the bin is empty).
*/
JEMALLOC_ALWAYS_INLINE void *
cache_bin_alloc(cache_bin_t *bin, bool *success) {
return cache_bin_alloc_impl(bin, success, true);
}
JEMALLOC_ALWAYS_INLINE cache_bin_sz_t
cache_bin_alloc_batch(cache_bin_t *bin, size_t num, void **out) {
cache_bin_sz_t n = cache_bin_ncached_get_internal(bin,
/* racy */ false);
if (n > num) {
n = (cache_bin_sz_t)num;
}
memcpy(out, bin->stack_head, n * sizeof(void *));
bin->stack_head += n;
cache_bin_low_water_adjust(bin);
return n;
}
JEMALLOC_ALWAYS_INLINE bool
cache_bin_dalloc_easy(cache_bin_t *bin, cache_bin_info_t *bin_info, void *ptr) {
if (unlikely(bin->ncached == bin_info->ncached_max)) {
cache_bin_full(cache_bin_t *bin) {
return ((uint16_t)(uintptr_t)bin->stack_head == bin->low_bits_full);
}
/*
* Free an object into the given bin. Fails only if the bin is full.
*/
JEMALLOC_ALWAYS_INLINE bool
cache_bin_dalloc_easy(cache_bin_t *bin, void *ptr) {
if (unlikely(cache_bin_full(bin))) {
return false;
}
assert(bin->ncached < bin_info->ncached_max);
bin->ncached++;
*(bin->avail - bin->ncached) = ptr;
bin->stack_head--;
*bin->stack_head = ptr;
cache_bin_assert_earlier(bin, bin->low_bits_full,
(uint16_t)(uintptr_t)bin->stack_head);
return true;
}
/* Returns false if failed to stash (i.e. bin is full). */
JEMALLOC_ALWAYS_INLINE bool
cache_bin_stash(cache_bin_t *bin, void *ptr) {
if (cache_bin_full(bin)) {
return false;
}
/* Stash at the full position, in the [full, head) range. */
uint16_t low_bits_head = (uint16_t)(uintptr_t)bin->stack_head;
/* Wraparound handled as well. */
uint16_t diff = cache_bin_diff(bin, bin->low_bits_full, low_bits_head,
/* racy */ false);
*(void **)((uintptr_t)bin->stack_head - diff) = ptr;
assert(!cache_bin_full(bin));
bin->low_bits_full += sizeof(void *);
cache_bin_assert_earlier(bin, bin->low_bits_full, low_bits_head);
return true;
}
/*
* Get the number of stashed pointers.
*
* When called from a thread not owning the TLS (i.e. racy = true), it's
* important to keep in mind that 'bin->stack_head' and 'bin->low_bits_full' can
* be modified concurrently and almost none assertions about their values can be
* made.
*/
JEMALLOC_ALWAYS_INLINE cache_bin_sz_t
cache_bin_nstashed_get_internal(cache_bin_t *bin, cache_bin_info_t *info,
bool racy) {
cache_bin_sz_t ncached_max = cache_bin_info_ncached_max(info);
uint16_t low_bits_low_bound = cache_bin_low_bits_low_bound_get(bin,
info);
cache_bin_sz_t n = cache_bin_diff(bin, low_bits_low_bound,
bin->low_bits_full, racy) / sizeof(void *);
assert(n <= ncached_max);
if (!racy) {
/* Below are for assertions only. */
void **low_bound = cache_bin_low_bound_get(bin, info);
assert((uint16_t)(uintptr_t)low_bound == low_bits_low_bound);
void *stashed = *(low_bound + n - 1);
bool aligned = cache_bin_nonfast_aligned(stashed);
#ifdef JEMALLOC_JET
/* Allow arbitrary pointers to be stashed in tests. */
aligned = true;
#endif
assert(n == 0 || (stashed != NULL && aligned));
}
return n;
}
JEMALLOC_ALWAYS_INLINE cache_bin_sz_t
cache_bin_nstashed_get_local(cache_bin_t *bin, cache_bin_info_t *info) {
cache_bin_sz_t n = cache_bin_nstashed_get_internal(bin, info,
/* racy */ false);
assert(n <= cache_bin_info_ncached_max(info));
return n;
}
/*
* Obtain a racy view of the number of items currently in the cache bin, in the
* presence of possible concurrent modifications.
*/
static inline void
cache_bin_nitems_get_remote(cache_bin_t *bin, cache_bin_info_t *info,
cache_bin_sz_t *ncached, cache_bin_sz_t *nstashed) {
cache_bin_sz_t n = cache_bin_ncached_get_internal(bin, /* racy */ true);
assert(n <= cache_bin_info_ncached_max(info));
*ncached = n;
n = cache_bin_nstashed_get_internal(bin, info, /* racy */ true);
assert(n <= cache_bin_info_ncached_max(info));
*nstashed = n;
/* Note that cannot assert ncached + nstashed <= ncached_max (racy). */
}
/*
* Filling and flushing are done in batch, on arrays of void *s. For filling,
* the arrays go forward, and can be accessed with ordinary array arithmetic.
* For flushing, we work from the end backwards, and so need to use special
* accessors that invert the usual ordering.
*
* This is important for maintaining first-fit; the arena code fills with
* earliest objects first, and so those are the ones we should return first for
* cache_bin_alloc calls. When flushing, we should flush the objects that we
* wish to return later; those at the end of the array. This is better for the
* first-fit heuristic as well as for cache locality; the most recently freed
* objects are the ones most likely to still be in cache.
*
* This all sounds very hand-wavey and theoretical, but reverting the ordering
* on one or the other pathway leads to measurable slowdowns.
*/
typedef struct cache_bin_ptr_array_s cache_bin_ptr_array_t;
struct cache_bin_ptr_array_s {
cache_bin_sz_t n;
void **ptr;
};
/*
* Declare a cache_bin_ptr_array_t sufficient for nval items.
*
* In the current implementation, this could be just part of a
* cache_bin_ptr_array_init_... call, since we reuse the cache bin stack memory.
* Indirecting behind a macro, though, means experimenting with linked-list
* representations is easy (since they'll require an alloca in the calling
* frame).
*/
#define CACHE_BIN_PTR_ARRAY_DECLARE(name, nval) \
cache_bin_ptr_array_t name; \
name.n = (nval)
/*
* Start a fill. The bin must be empty, and This must be followed by a
* finish_fill call before doing any alloc/dalloc operations on the bin.
*/
static inline void
cache_bin_init_ptr_array_for_fill(cache_bin_t *bin, cache_bin_info_t *info,
cache_bin_ptr_array_t *arr, cache_bin_sz_t nfill) {
cache_bin_assert_empty(bin, info);
arr->ptr = cache_bin_empty_position_get(bin) - nfill;
}
/*
* While nfill in cache_bin_init_ptr_array_for_fill is the number we *intend* to
* fill, nfilled here is the number we actually filled (which may be less, in
* case of OOM.
*/
static inline void
cache_bin_finish_fill(cache_bin_t *bin, cache_bin_info_t *info,
cache_bin_ptr_array_t *arr, cache_bin_sz_t nfilled) {
cache_bin_assert_empty(bin, info);
void **empty_position = cache_bin_empty_position_get(bin);
if (nfilled < arr->n) {
memmove(empty_position - nfilled, empty_position - arr->n,
nfilled * sizeof(void *));
}
bin->stack_head = empty_position - nfilled;
}
/*
* Same deal, but with flush. Unlike fill (which can fail), the user must flush
* everything we give them.
*/
static inline void
cache_bin_init_ptr_array_for_flush(cache_bin_t *bin, cache_bin_info_t *info,
cache_bin_ptr_array_t *arr, cache_bin_sz_t nflush) {
arr->ptr = cache_bin_empty_position_get(bin) - nflush;
assert(cache_bin_ncached_get_local(bin, info) == 0
|| *arr->ptr != NULL);
}
static inline void
cache_bin_finish_flush(cache_bin_t *bin, cache_bin_info_t *info,
cache_bin_ptr_array_t *arr, cache_bin_sz_t nflushed) {
unsigned rem = cache_bin_ncached_get_local(bin, info) - nflushed;
memmove(bin->stack_head + nflushed, bin->stack_head,
rem * sizeof(void *));
bin->stack_head = bin->stack_head + nflushed;
cache_bin_low_water_adjust(bin);
}
static inline void
cache_bin_init_ptr_array_for_stashed(cache_bin_t *bin, szind_t binind,
cache_bin_info_t *info, cache_bin_ptr_array_t *arr,
cache_bin_sz_t nstashed) {
assert(nstashed > 0);
assert(cache_bin_nstashed_get_local(bin, info) == nstashed);
void **low_bound = cache_bin_low_bound_get(bin, info);
arr->ptr = low_bound;
assert(*arr->ptr != NULL);
}
static inline void
cache_bin_finish_flush_stashed(cache_bin_t *bin, cache_bin_info_t *info) {
void **low_bound = cache_bin_low_bound_get(bin, info);
/* Reset the bin local full position. */
bin->low_bits_full = (uint16_t)(uintptr_t)low_bound;
assert(cache_bin_nstashed_get_local(bin, info) == 0);
}
/*
* Initialize a cache_bin_info to represent up to the given number of items in
* the cache_bins it is associated with.
*/
void cache_bin_info_init(cache_bin_info_t *bin_info,
cache_bin_sz_t ncached_max);
/*
* Given an array of initialized cache_bin_info_ts, determine how big an
* allocation is required to initialize a full set of cache_bin_ts.
*/
void cache_bin_info_compute_alloc(cache_bin_info_t *infos, szind_t ninfos,
size_t *size, size_t *alignment);
/*
* Actually initialize some cache bins. Callers should allocate the backing
* memory indicated by a call to cache_bin_compute_alloc. They should then
* preincrement, call init once for each bin and info, and then call
* cache_bin_postincrement. *alloc_cur will then point immediately past the end
* of the allocation.
*/
void cache_bin_preincrement(cache_bin_info_t *infos, szind_t ninfos,
void *alloc, size_t *cur_offset);
void cache_bin_postincrement(cache_bin_info_t *infos, szind_t ninfos,
void *alloc, size_t *cur_offset);
void cache_bin_init(cache_bin_t *bin, cache_bin_info_t *info, void *alloc,
size_t *cur_offset);
/*
* If a cache bin was zero initialized (either because it lives in static or
* thread-local storage, or was memset to 0), this function indicates whether or
* not cache_bin_init was called on it.
*/
bool cache_bin_still_zero_initialized(cache_bin_t *bin);
#endif /* JEMALLOC_INTERNAL_CACHE_BIN_H */
#ifndef JEMALLOC_INTERNAL_COUNTER_H
#define JEMALLOC_INTERNAL_COUNTER_H
#include "jemalloc/internal/mutex.h"
typedef struct counter_accum_s {
LOCKEDINT_MTX_DECLARE(mtx)
locked_u64_t accumbytes;
uint64_t interval;
} counter_accum_t;
JEMALLOC_ALWAYS_INLINE bool
counter_accum(tsdn_t *tsdn, counter_accum_t *counter, uint64_t bytes) {
uint64_t interval = counter->interval;
assert(interval > 0);
LOCKEDINT_MTX_LOCK(tsdn, counter->mtx);
/*
* If the event moves fast enough (and/or if the event handling is slow
* enough), extreme overflow can cause counter trigger coalescing.
* This is an intentional mechanism that avoids rate-limiting
* allocation.
*/
bool overflow = locked_inc_mod_u64(tsdn, LOCKEDINT_MTX(counter->mtx),
&counter->accumbytes, bytes, interval);
LOCKEDINT_MTX_UNLOCK(tsdn, counter->mtx);
return overflow;
}
bool counter_accum_init(counter_accum_t *counter, uint64_t interval);
void counter_prefork(tsdn_t *tsdn, counter_accum_t *counter);
void counter_postfork_parent(tsdn_t *tsdn, counter_accum_t *counter);
void counter_postfork_child(tsdn_t *tsdn, counter_accum_t *counter);
#endif /* JEMALLOC_INTERNAL_COUNTER_H */
......@@ -42,9 +42,11 @@ typedef struct ctl_arena_stats_s {
uint64_t nfills_small;
uint64_t nflushes_small;
bin_stats_t bstats[SC_NBINS];
bin_stats_data_t bstats[SC_NBINS];
arena_stats_large_t lstats[SC_NSIZES - SC_NBINS];
arena_stats_extents_t estats[SC_NPSIZES];
pac_estats_t estats[SC_NPSIZES];
hpa_shard_stats_t hpastats;
sec_stats_t secstats;
} ctl_arena_stats_t;
typedef struct ctl_stats_s {
......@@ -96,13 +98,17 @@ typedef struct ctl_arenas_s {
int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
void *newp, size_t newlen);
int ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp);
int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen);
int ctl_mibnametomib(tsd_t *tsd, size_t *mib, size_t miblen, const char *name,
size_t *miblenp);
int ctl_bymibname(tsd_t *tsd, size_t *mib, size_t miblen, const char *name,
size_t *miblenp, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
bool ctl_boot(void);
void ctl_prefork(tsdn_t *tsdn);
void ctl_postfork_parent(tsdn_t *tsdn);
void ctl_postfork_child(tsdn_t *tsdn);
void ctl_mtx_assert_held(tsdn_t *tsdn);
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
......@@ -131,4 +137,23 @@ void ctl_postfork_child(tsdn_t *tsdn);
} \
} while (0)
#define xmallctlmibnametomib(mib, miblen, name, miblenp) do { \
if (ctl_mibnametomib(tsd_fetch(), mib, miblen, name, miblenp) \
!= 0) { \
malloc_write( \
"<jemalloc>: Failure in ctl_mibnametomib()\n"); \
abort(); \
} \
} while (0)
#define xmallctlbymibname(mib, miblen, name, miblenp, oldp, oldlenp, \
newp, newlen) do { \
if (ctl_bymibname(tsd_fetch(), mib, miblen, name, miblenp, \
oldp, oldlenp, newp, newlen) != 0) { \
malloc_write( \
"<jemalloc>: Failure in ctl_bymibname()\n"); \
abort(); \
} \
} while (0)
#endif /* JEMALLOC_INTERNAL_CTL_H */
#ifndef JEMALLOC_INTERNAL_DECAY_H
#define JEMALLOC_INTERNAL_DECAY_H
#include "jemalloc/internal/smoothstep.h"
#define DECAY_UNBOUNDED_TIME_TO_PURGE ((uint64_t)-1)
/*
* The decay_t computes the number of pages we should purge at any given time.
* Page allocators inform a decay object when pages enter a decay-able state
* (i.e. dirty or muzzy), and query it to determine how many pages should be
* purged at any given time.
*
* This is mostly a single-threaded data structure and doesn't care about
* synchronization at all; it's the caller's responsibility to manage their
* synchronization on their own. There are two exceptions:
* 1) It's OK to racily call decay_ms_read (i.e. just the simplest state query).
* 2) The mtx and purging fields live (and are initialized) here, but are
* logically owned by the page allocator. This is just a convenience (since
* those fields would be duplicated for both the dirty and muzzy states
* otherwise).
*/
typedef struct decay_s decay_t;
struct decay_s {
/* Synchronizes all non-atomic fields. */
malloc_mutex_t mtx;
/*
* True if a thread is currently purging the extents associated with
* this decay structure.
*/
bool purging;
/*
* Approximate time in milliseconds from the creation of a set of unused
* dirty pages until an equivalent set of unused dirty pages is purged
* and/or reused.
*/
atomic_zd_t time_ms;
/* time / SMOOTHSTEP_NSTEPS. */
nstime_t interval;
/*
* Time at which the current decay interval logically started. We do
* not actually advance to a new epoch until sometime after it starts
* because of scheduling and computation delays, and it is even possible
* to completely skip epochs. In all cases, during epoch advancement we
* merge all relevant activity into the most recently recorded epoch.
*/
nstime_t epoch;
/* Deadline randomness generator. */
uint64_t jitter_state;
/*
* Deadline for current epoch. This is the sum of interval and per
* epoch jitter which is a uniform random variable in [0..interval).
* Epochs always advance by precise multiples of interval, but we
* randomize the deadline to reduce the likelihood of arenas purging in
* lockstep.
*/
nstime_t deadline;
/*
* The number of pages we cap ourselves at in the current epoch, per
* decay policies. Updated on an epoch change. After an epoch change,
* the caller should take steps to try to purge down to this amount.
*/
size_t npages_limit;
/*
* Number of unpurged pages at beginning of current epoch. During epoch
* advancement we use the delta between arena->decay_*.nunpurged and
* ecache_npages_get(&arena->ecache_*) to determine how many dirty pages,
* if any, were generated.
*/
size_t nunpurged;
/*
* Trailing log of how many unused dirty pages were generated during
* each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
* element is the most recent epoch. Corresponding epoch times are
* relative to epoch.
*
* Updated only on epoch advance, triggered by
* decay_maybe_advance_epoch, below.
*/
size_t backlog[SMOOTHSTEP_NSTEPS];
/* Peak number of pages in associated extents. Used for debug only. */
uint64_t ceil_npages;
};
/*
* The current decay time setting. This is the only public access to a decay_t
* that's allowed without holding mtx.
*/
static inline ssize_t
decay_ms_read(const decay_t *decay) {
return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
}
/*
* See the comment on the struct field -- the limit on pages we should allow in
* this decay state this epoch.
*/
static inline size_t
decay_npages_limit_get(const decay_t *decay) {
return decay->npages_limit;
}
/* How many unused dirty pages were generated during the last epoch. */
static inline size_t
decay_epoch_npages_delta(const decay_t *decay) {
return decay->backlog[SMOOTHSTEP_NSTEPS - 1];
}
/*
* Current epoch duration, in nanoseconds. Given that new epochs are started
* somewhat haphazardly, this is not necessarily exactly the time between any
* two calls to decay_maybe_advance_epoch; see the comments on fields in the
* decay_t.
*/
static inline uint64_t
decay_epoch_duration_ns(const decay_t *decay) {
return nstime_ns(&decay->interval);
}
static inline bool
decay_immediately(const decay_t *decay) {
ssize_t decay_ms = decay_ms_read(decay);
return decay_ms == 0;
}
static inline bool
decay_disabled(const decay_t *decay) {
ssize_t decay_ms = decay_ms_read(decay);
return decay_ms < 0;
}
/* Returns true if decay is enabled and done gradually. */
static inline bool
decay_gradually(const decay_t *decay) {
ssize_t decay_ms = decay_ms_read(decay);
return decay_ms > 0;
}
/*
* Returns true if the passed in decay time setting is valid.
* < -1 : invalid
* -1 : never decay
* 0 : decay immediately
* > 0 : some positive decay time, up to a maximum allowed value of
* NSTIME_SEC_MAX * 1000, which corresponds to decaying somewhere in the early
* 27th century. By that time, we expect to have implemented alternate purging
* strategies.
*/
bool decay_ms_valid(ssize_t decay_ms);
/*
* As a precondition, the decay_t must be zeroed out (as if with memset).
*
* Returns true on error.
*/
bool decay_init(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms);
/*
* Given an already-initialized decay_t, reinitialize it with the given decay
* time. The decay_t must have previously been initialized (and should not then
* be zeroed).
*/
void decay_reinit(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms);
/*
* Compute how many of 'npages_new' pages we would need to purge in 'time'.
*/
uint64_t decay_npages_purge_in(decay_t *decay, nstime_t *time,
size_t npages_new);
/* Returns true if the epoch advanced and there are pages to purge. */
bool decay_maybe_advance_epoch(decay_t *decay, nstime_t *new_time,
size_t current_npages);
/*
* Calculates wait time until a number of pages in the interval
* [0.5 * npages_threshold .. 1.5 * npages_threshold] should be purged.
*
* Returns number of nanoseconds or DECAY_UNBOUNDED_TIME_TO_PURGE in case of
* indefinite wait.
*/
uint64_t decay_ns_until_purge(decay_t *decay, size_t npages_current,
uint64_t npages_threshold);
#endif /* JEMALLOC_INTERNAL_DECAY_H */
#ifndef JEMALLOC_INTERNAL_ECACHE_H
#define JEMALLOC_INTERNAL_ECACHE_H
#include "jemalloc/internal/eset.h"
#include "jemalloc/internal/san.h"
#include "jemalloc/internal/mutex.h"
typedef struct ecache_s ecache_t;
struct ecache_s {
malloc_mutex_t mtx;
eset_t eset;
eset_t guarded_eset;
/* All stored extents must be in the same state. */
extent_state_t state;
/* The index of the ehooks the ecache is associated with. */
unsigned ind;
/*
* If true, delay coalescing until eviction; otherwise coalesce during
* deallocation.
*/
bool delay_coalesce;
};
static inline size_t
ecache_npages_get(ecache_t *ecache) {
return eset_npages_get(&ecache->eset) +
eset_npages_get(&ecache->guarded_eset);
}
/* Get the number of extents in the given page size index. */
static inline size_t
ecache_nextents_get(ecache_t *ecache, pszind_t ind) {
return eset_nextents_get(&ecache->eset, ind) +
eset_nextents_get(&ecache->guarded_eset, ind);
}
/* Get the sum total bytes of the extents in the given page size index. */
static inline size_t
ecache_nbytes_get(ecache_t *ecache, pszind_t ind) {
return eset_nbytes_get(&ecache->eset, ind) +
eset_nbytes_get(&ecache->guarded_eset, ind);
}
static inline unsigned
ecache_ind_get(ecache_t *ecache) {
return ecache->ind;
}
bool ecache_init(tsdn_t *tsdn, ecache_t *ecache, extent_state_t state,
unsigned ind, bool delay_coalesce);
void ecache_prefork(tsdn_t *tsdn, ecache_t *ecache);
void ecache_postfork_parent(tsdn_t *tsdn, ecache_t *ecache);
void ecache_postfork_child(tsdn_t *tsdn, ecache_t *ecache);
#endif /* JEMALLOC_INTERNAL_ECACHE_H */
#ifndef JEMALLOC_INTERNAL_EDATA_H
#define JEMALLOC_INTERNAL_EDATA_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bin_info.h"
#include "jemalloc/internal/bit_util.h"
#include "jemalloc/internal/hpdata.h"
#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/ph.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/slab_data.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/typed_list.h"
/*
* sizeof(edata_t) is 128 bytes on 64-bit architectures. Ensure the alignment
* to free up the low bits in the rtree leaf.
*/
#define EDATA_ALIGNMENT 128
enum extent_state_e {
extent_state_active = 0,
extent_state_dirty = 1,
extent_state_muzzy = 2,
extent_state_retained = 3,
extent_state_transition = 4, /* States below are intermediate. */
extent_state_merging = 5,
extent_state_max = 5 /* Sanity checking only. */
};
typedef enum extent_state_e extent_state_t;
enum extent_head_state_e {
EXTENT_NOT_HEAD,
EXTENT_IS_HEAD /* See comments in ehooks_default_merge_impl(). */
};
typedef enum extent_head_state_e extent_head_state_t;
/*
* Which implementation of the page allocator interface, (PAI, defined in
* pai.h) owns the given extent?
*/
enum extent_pai_e {
EXTENT_PAI_PAC = 0,
EXTENT_PAI_HPA = 1
};
typedef enum extent_pai_e extent_pai_t;
struct e_prof_info_s {
/* Time when this was allocated. */
nstime_t e_prof_alloc_time;
/* Allocation request size. */
size_t e_prof_alloc_size;
/* Points to a prof_tctx_t. */
atomic_p_t e_prof_tctx;
/*
* Points to a prof_recent_t for the allocation; NULL
* means the recent allocation record no longer exists.
* Protected by prof_recent_alloc_mtx.
*/
atomic_p_t e_prof_recent_alloc;
};
typedef struct e_prof_info_s e_prof_info_t;
/*
* The information about a particular edata that lives in an emap. Space is
* more precious there (the information, plus the edata pointer, has to live in
* a 64-bit word if we want to enable a packed representation.
*
* There are two things that are special about the information here:
* - It's quicker to access. You have one fewer pointer hop, since finding the
* edata_t associated with an item always requires accessing the rtree leaf in
* which this data is stored.
* - It can be read unsynchronized, and without worrying about lifetime issues.
*/
typedef struct edata_map_info_s edata_map_info_t;
struct edata_map_info_s {
bool slab;
szind_t szind;
};
typedef struct edata_cmp_summary_s edata_cmp_summary_t;
struct edata_cmp_summary_s {
uint64_t sn;
uintptr_t addr;
};
/* Extent (span of pages). Use accessor functions for e_* fields. */
typedef struct edata_s edata_t;
ph_structs(edata_avail, edata_t);
ph_structs(edata_heap, edata_t);
struct edata_s {
/*
* Bitfield containing several fields:
*
* a: arena_ind
* b: slab
* c: committed
* p: pai
* z: zeroed
* g: guarded
* t: state
* i: szind
* f: nfree
* s: bin_shard
*
* 00000000 ... 0000ssss ssffffff ffffiiii iiiitttg zpcbaaaa aaaaaaaa
*
* arena_ind: Arena from which this extent came, or all 1 bits if
* unassociated.
*
* slab: The slab flag indicates whether the extent is used for a slab
* of small regions. This helps differentiate small size classes,
* and it indicates whether interior pointers can be looked up via
* iealloc().
*
* committed: The committed flag indicates whether physical memory is
* committed to the extent, whether explicitly or implicitly
* as on a system that overcommits and satisfies physical
* memory needs on demand via soft page faults.
*
* pai: The pai flag is an extent_pai_t.
*
* zeroed: The zeroed flag is used by extent recycling code to track
* whether memory is zero-filled.
*
* guarded: The guarded flag is use by the sanitizer to track whether
* the extent has page guards around it.
*
* state: The state flag is an extent_state_t.
*
* szind: The szind flag indicates usable size class index for
* allocations residing in this extent, regardless of whether the
* extent is a slab. Extent size and usable size often differ
* even for non-slabs, either due to sz_large_pad or promotion of
* sampled small regions.
*
* nfree: Number of free regions in slab.
*
* bin_shard: the shard of the bin from which this extent came.
*/
uint64_t e_bits;
#define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT))
#define EDATA_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS
#define EDATA_BITS_ARENA_SHIFT 0
#define EDATA_BITS_ARENA_MASK MASK(EDATA_BITS_ARENA_WIDTH, EDATA_BITS_ARENA_SHIFT)
#define EDATA_BITS_SLAB_WIDTH 1
#define EDATA_BITS_SLAB_SHIFT (EDATA_BITS_ARENA_WIDTH + EDATA_BITS_ARENA_SHIFT)
#define EDATA_BITS_SLAB_MASK MASK(EDATA_BITS_SLAB_WIDTH, EDATA_BITS_SLAB_SHIFT)
#define EDATA_BITS_COMMITTED_WIDTH 1
#define EDATA_BITS_COMMITTED_SHIFT (EDATA_BITS_SLAB_WIDTH + EDATA_BITS_SLAB_SHIFT)
#define EDATA_BITS_COMMITTED_MASK MASK(EDATA_BITS_COMMITTED_WIDTH, EDATA_BITS_COMMITTED_SHIFT)
#define EDATA_BITS_PAI_WIDTH 1
#define EDATA_BITS_PAI_SHIFT (EDATA_BITS_COMMITTED_WIDTH + EDATA_BITS_COMMITTED_SHIFT)
#define EDATA_BITS_PAI_MASK MASK(EDATA_BITS_PAI_WIDTH, EDATA_BITS_PAI_SHIFT)
#define EDATA_BITS_ZEROED_WIDTH 1
#define EDATA_BITS_ZEROED_SHIFT (EDATA_BITS_PAI_WIDTH + EDATA_BITS_PAI_SHIFT)
#define EDATA_BITS_ZEROED_MASK MASK(EDATA_BITS_ZEROED_WIDTH, EDATA_BITS_ZEROED_SHIFT)
#define EDATA_BITS_GUARDED_WIDTH 1
#define EDATA_BITS_GUARDED_SHIFT (EDATA_BITS_ZEROED_WIDTH + EDATA_BITS_ZEROED_SHIFT)
#define EDATA_BITS_GUARDED_MASK MASK(EDATA_BITS_GUARDED_WIDTH, EDATA_BITS_GUARDED_SHIFT)
#define EDATA_BITS_STATE_WIDTH 3
#define EDATA_BITS_STATE_SHIFT (EDATA_BITS_GUARDED_WIDTH + EDATA_BITS_GUARDED_SHIFT)
#define EDATA_BITS_STATE_MASK MASK(EDATA_BITS_STATE_WIDTH, EDATA_BITS_STATE_SHIFT)
#define EDATA_BITS_SZIND_WIDTH LG_CEIL(SC_NSIZES)
#define EDATA_BITS_SZIND_SHIFT (EDATA_BITS_STATE_WIDTH + EDATA_BITS_STATE_SHIFT)
#define EDATA_BITS_SZIND_MASK MASK(EDATA_BITS_SZIND_WIDTH, EDATA_BITS_SZIND_SHIFT)
#define EDATA_BITS_NFREE_WIDTH (SC_LG_SLAB_MAXREGS + 1)
#define EDATA_BITS_NFREE_SHIFT (EDATA_BITS_SZIND_WIDTH + EDATA_BITS_SZIND_SHIFT)
#define EDATA_BITS_NFREE_MASK MASK(EDATA_BITS_NFREE_WIDTH, EDATA_BITS_NFREE_SHIFT)
#define EDATA_BITS_BINSHARD_WIDTH 6
#define EDATA_BITS_BINSHARD_SHIFT (EDATA_BITS_NFREE_WIDTH + EDATA_BITS_NFREE_SHIFT)
#define EDATA_BITS_BINSHARD_MASK MASK(EDATA_BITS_BINSHARD_WIDTH, EDATA_BITS_BINSHARD_SHIFT)
#define EDATA_BITS_IS_HEAD_WIDTH 1
#define EDATA_BITS_IS_HEAD_SHIFT (EDATA_BITS_BINSHARD_WIDTH + EDATA_BITS_BINSHARD_SHIFT)
#define EDATA_BITS_IS_HEAD_MASK MASK(EDATA_BITS_IS_HEAD_WIDTH, EDATA_BITS_IS_HEAD_SHIFT)
/* Pointer to the extent that this structure is responsible for. */
void *e_addr;
union {
/*
* Extent size and serial number associated with the extent
* structure (different than the serial number for the extent at
* e_addr).
*
* ssssssss [...] ssssssss ssssnnnn nnnnnnnn
*/
size_t e_size_esn;
#define EDATA_SIZE_MASK ((size_t)~(PAGE-1))
#define EDATA_ESN_MASK ((size_t)PAGE-1)
/* Base extent size, which may not be a multiple of PAGE. */
size_t e_bsize;
};
/*
* If this edata is a user allocation from an HPA, it comes out of some
* pageslab (we don't yet support huegpage allocations that don't fit
* into pageslabs). This tracks it.
*/
hpdata_t *e_ps;
/*
* Serial number. These are not necessarily unique; splitting an extent
* results in two extents with the same serial number.
*/
uint64_t e_sn;
union {
/*
* List linkage used when the edata_t is active; either in
* arena's large allocations or bin_t's slabs_full.
*/
ql_elm(edata_t) ql_link_active;
/*
* Pairing heap linkage. Used whenever the extent is inactive
* (in the page allocators), or when it is active and in
* slabs_nonfull, or when the edata_t is unassociated with an
* extent and sitting in an edata_cache.
*/
union {
edata_heap_link_t heap_link;
edata_avail_link_t avail_link;
};
};
union {
/*
* List linkage used when the extent is inactive:
* - Stashed dirty extents
* - Ecache LRU functionality.
*/
ql_elm(edata_t) ql_link_inactive;
/* Small region slab metadata. */
slab_data_t e_slab_data;
/* Profiling data, used for large objects. */
e_prof_info_t e_prof_info;
};
};
TYPED_LIST(edata_list_active, edata_t, ql_link_active)
TYPED_LIST(edata_list_inactive, edata_t, ql_link_inactive)
static inline unsigned
edata_arena_ind_get(const edata_t *edata) {
unsigned arena_ind = (unsigned)((edata->e_bits &
EDATA_BITS_ARENA_MASK) >> EDATA_BITS_ARENA_SHIFT);
assert(arena_ind < MALLOCX_ARENA_LIMIT);
return arena_ind;
}
static inline szind_t
edata_szind_get_maybe_invalid(const edata_t *edata) {
szind_t szind = (szind_t)((edata->e_bits & EDATA_BITS_SZIND_MASK) >>
EDATA_BITS_SZIND_SHIFT);
assert(szind <= SC_NSIZES);
return szind;
}
static inline szind_t
edata_szind_get(const edata_t *edata) {
szind_t szind = edata_szind_get_maybe_invalid(edata);
assert(szind < SC_NSIZES); /* Never call when "invalid". */
return szind;
}
static inline size_t
edata_usize_get(const edata_t *edata) {
return sz_index2size(edata_szind_get(edata));
}
static inline unsigned
edata_binshard_get(const edata_t *edata) {
unsigned binshard = (unsigned)((edata->e_bits &
EDATA_BITS_BINSHARD_MASK) >> EDATA_BITS_BINSHARD_SHIFT);
assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
return binshard;
}
static inline uint64_t
edata_sn_get(const edata_t *edata) {
return edata->e_sn;
}
static inline extent_state_t
edata_state_get(const edata_t *edata) {
return (extent_state_t)((edata->e_bits & EDATA_BITS_STATE_MASK) >>
EDATA_BITS_STATE_SHIFT);
}
static inline bool
edata_guarded_get(const edata_t *edata) {
return (bool)((edata->e_bits & EDATA_BITS_GUARDED_MASK) >>
EDATA_BITS_GUARDED_SHIFT);
}
static inline bool
edata_zeroed_get(const edata_t *edata) {
return (bool)((edata->e_bits & EDATA_BITS_ZEROED_MASK) >>
EDATA_BITS_ZEROED_SHIFT);
}
static inline bool
edata_committed_get(const edata_t *edata) {
return (bool)((edata->e_bits & EDATA_BITS_COMMITTED_MASK) >>
EDATA_BITS_COMMITTED_SHIFT);
}
static inline extent_pai_t
edata_pai_get(const edata_t *edata) {
return (extent_pai_t)((edata->e_bits & EDATA_BITS_PAI_MASK) >>
EDATA_BITS_PAI_SHIFT);
}
static inline bool
edata_slab_get(const edata_t *edata) {
return (bool)((edata->e_bits & EDATA_BITS_SLAB_MASK) >>
EDATA_BITS_SLAB_SHIFT);
}
static inline unsigned
edata_nfree_get(const edata_t *edata) {
assert(edata_slab_get(edata));
return (unsigned)((edata->e_bits & EDATA_BITS_NFREE_MASK) >>
EDATA_BITS_NFREE_SHIFT);
}
static inline void *
edata_base_get(const edata_t *edata) {
assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) ||
!edata_slab_get(edata));
return PAGE_ADDR2BASE(edata->e_addr);
}
static inline void *
edata_addr_get(const edata_t *edata) {
assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) ||
!edata_slab_get(edata));
return edata->e_addr;
}
static inline size_t
edata_size_get(const edata_t *edata) {
return (edata->e_size_esn & EDATA_SIZE_MASK);
}
static inline size_t
edata_esn_get(const edata_t *edata) {
return (edata->e_size_esn & EDATA_ESN_MASK);
}
static inline size_t
edata_bsize_get(const edata_t *edata) {
return edata->e_bsize;
}
static inline hpdata_t *
edata_ps_get(const edata_t *edata) {
assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
return edata->e_ps;
}
static inline void *
edata_before_get(const edata_t *edata) {
return (void *)((uintptr_t)edata_base_get(edata) - PAGE);
}
static inline void *
edata_last_get(const edata_t *edata) {
return (void *)((uintptr_t)edata_base_get(edata) +
edata_size_get(edata) - PAGE);
}
static inline void *
edata_past_get(const edata_t *edata) {
return (void *)((uintptr_t)edata_base_get(edata) +
edata_size_get(edata));
}
static inline slab_data_t *
edata_slab_data_get(edata_t *edata) {
assert(edata_slab_get(edata));
return &edata->e_slab_data;
}
static inline const slab_data_t *
edata_slab_data_get_const(const edata_t *edata) {
assert(edata_slab_get(edata));
return &edata->e_slab_data;
}
static inline prof_tctx_t *
edata_prof_tctx_get(const edata_t *edata) {
return (prof_tctx_t *)atomic_load_p(&edata->e_prof_info.e_prof_tctx,
ATOMIC_ACQUIRE);
}
static inline const nstime_t *
edata_prof_alloc_time_get(const edata_t *edata) {
return &edata->e_prof_info.e_prof_alloc_time;
}
static inline size_t
edata_prof_alloc_size_get(const edata_t *edata) {
return edata->e_prof_info.e_prof_alloc_size;
}
static inline prof_recent_t *
edata_prof_recent_alloc_get_dont_call_directly(const edata_t *edata) {
return (prof_recent_t *)atomic_load_p(
&edata->e_prof_info.e_prof_recent_alloc, ATOMIC_RELAXED);
}
static inline void
edata_arena_ind_set(edata_t *edata, unsigned arena_ind) {
edata->e_bits = (edata->e_bits & ~EDATA_BITS_ARENA_MASK) |
((uint64_t)arena_ind << EDATA_BITS_ARENA_SHIFT);
}
static inline void
edata_binshard_set(edata_t *edata, unsigned binshard) {
/* The assertion assumes szind is set already. */
assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
edata->e_bits = (edata->e_bits & ~EDATA_BITS_BINSHARD_MASK) |
((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT);
}
static inline void
edata_addr_set(edata_t *edata, void *addr) {
edata->e_addr = addr;
}
static inline void
edata_size_set(edata_t *edata, size_t size) {
assert((size & ~EDATA_SIZE_MASK) == 0);
edata->e_size_esn = size | (edata->e_size_esn & ~EDATA_SIZE_MASK);
}
static inline void
edata_esn_set(edata_t *edata, size_t esn) {
edata->e_size_esn = (edata->e_size_esn & ~EDATA_ESN_MASK) | (esn &
EDATA_ESN_MASK);
}
static inline void
edata_bsize_set(edata_t *edata, size_t bsize) {
edata->e_bsize = bsize;
}
static inline void
edata_ps_set(edata_t *edata, hpdata_t *ps) {
assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
edata->e_ps = ps;
}
static inline void
edata_szind_set(edata_t *edata, szind_t szind) {
assert(szind <= SC_NSIZES); /* SC_NSIZES means "invalid". */
edata->e_bits = (edata->e_bits & ~EDATA_BITS_SZIND_MASK) |
((uint64_t)szind << EDATA_BITS_SZIND_SHIFT);
}
static inline void
edata_nfree_set(edata_t *edata, unsigned nfree) {
assert(edata_slab_get(edata));
edata->e_bits = (edata->e_bits & ~EDATA_BITS_NFREE_MASK) |
((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT);
}
static inline void
edata_nfree_binshard_set(edata_t *edata, unsigned nfree, unsigned binshard) {
/* The assertion assumes szind is set already. */
assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
edata->e_bits = (edata->e_bits &
(~EDATA_BITS_NFREE_MASK & ~EDATA_BITS_BINSHARD_MASK)) |
((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT) |
((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT);
}
static inline void
edata_nfree_inc(edata_t *edata) {
assert(edata_slab_get(edata));
edata->e_bits += ((uint64_t)1U << EDATA_BITS_NFREE_SHIFT);
}
static inline void
edata_nfree_dec(edata_t *edata) {
assert(edata_slab_get(edata));
edata->e_bits -= ((uint64_t)1U << EDATA_BITS_NFREE_SHIFT);
}
static inline void
edata_nfree_sub(edata_t *edata, uint64_t n) {
assert(edata_slab_get(edata));
edata->e_bits -= (n << EDATA_BITS_NFREE_SHIFT);
}
static inline void
edata_sn_set(edata_t *edata, uint64_t sn) {
edata->e_sn = sn;
}
static inline void
edata_state_set(edata_t *edata, extent_state_t state) {
edata->e_bits = (edata->e_bits & ~EDATA_BITS_STATE_MASK) |
((uint64_t)state << EDATA_BITS_STATE_SHIFT);
}
static inline void
edata_guarded_set(edata_t *edata, bool guarded) {
edata->e_bits = (edata->e_bits & ~EDATA_BITS_GUARDED_MASK) |
((uint64_t)guarded << EDATA_BITS_GUARDED_SHIFT);
}
static inline void
edata_zeroed_set(edata_t *edata, bool zeroed) {
edata->e_bits = (edata->e_bits & ~EDATA_BITS_ZEROED_MASK) |
((uint64_t)zeroed << EDATA_BITS_ZEROED_SHIFT);
}
static inline void
edata_committed_set(edata_t *edata, bool committed) {
edata->e_bits = (edata->e_bits & ~EDATA_BITS_COMMITTED_MASK) |
((uint64_t)committed << EDATA_BITS_COMMITTED_SHIFT);
}
static inline void
edata_pai_set(edata_t *edata, extent_pai_t pai) {
edata->e_bits = (edata->e_bits & ~EDATA_BITS_PAI_MASK) |
((uint64_t)pai << EDATA_BITS_PAI_SHIFT);
}
static inline void
edata_slab_set(edata_t *edata, bool slab) {
edata->e_bits = (edata->e_bits & ~EDATA_BITS_SLAB_MASK) |
((uint64_t)slab << EDATA_BITS_SLAB_SHIFT);
}
static inline void
edata_prof_tctx_set(edata_t *edata, prof_tctx_t *tctx) {
atomic_store_p(&edata->e_prof_info.e_prof_tctx, tctx, ATOMIC_RELEASE);
}
static inline void
edata_prof_alloc_time_set(edata_t *edata, nstime_t *t) {
nstime_copy(&edata->e_prof_info.e_prof_alloc_time, t);
}
static inline void
edata_prof_alloc_size_set(edata_t *edata, size_t size) {
edata->e_prof_info.e_prof_alloc_size = size;
}
static inline void
edata_prof_recent_alloc_set_dont_call_directly(edata_t *edata,
prof_recent_t *recent_alloc) {
atomic_store_p(&edata->e_prof_info.e_prof_recent_alloc, recent_alloc,
ATOMIC_RELAXED);
}
static inline bool
edata_is_head_get(edata_t *edata) {
return (bool)((edata->e_bits & EDATA_BITS_IS_HEAD_MASK) >>
EDATA_BITS_IS_HEAD_SHIFT);
}
static inline void
edata_is_head_set(edata_t *edata, bool is_head) {
edata->e_bits = (edata->e_bits & ~EDATA_BITS_IS_HEAD_MASK) |
((uint64_t)is_head << EDATA_BITS_IS_HEAD_SHIFT);
}
static inline bool
edata_state_in_transition(extent_state_t state) {
return state >= extent_state_transition;
}
/*
* Because this function is implemented as a sequence of bitfield modifications,
* even though each individual bit is properly initialized, we technically read
* uninitialized data within it. This is mostly fine, since most callers get
* their edatas from zeroing sources, but callers who make stack edata_ts need
* to manually zero them.
*/
static inline void
edata_init(edata_t *edata, unsigned arena_ind, void *addr, size_t size,
bool slab, szind_t szind, uint64_t sn, extent_state_t state, bool zeroed,
bool committed, extent_pai_t pai, extent_head_state_t is_head) {
assert(addr == PAGE_ADDR2BASE(addr) || !slab);
edata_arena_ind_set(edata, arena_ind);
edata_addr_set(edata, addr);
edata_size_set(edata, size);
edata_slab_set(edata, slab);
edata_szind_set(edata, szind);
edata_sn_set(edata, sn);
edata_state_set(edata, state);
edata_guarded_set(edata, false);
edata_zeroed_set(edata, zeroed);
edata_committed_set(edata, committed);
edata_pai_set(edata, pai);
edata_is_head_set(edata, is_head == EXTENT_IS_HEAD);
if (config_prof) {
edata_prof_tctx_set(edata, NULL);
}
}
static inline void
edata_binit(edata_t *edata, void *addr, size_t bsize, uint64_t sn) {
edata_arena_ind_set(edata, (1U << MALLOCX_ARENA_BITS) - 1);
edata_addr_set(edata, addr);
edata_bsize_set(edata, bsize);
edata_slab_set(edata, false);
edata_szind_set(edata, SC_NSIZES);
edata_sn_set(edata, sn);
edata_state_set(edata, extent_state_active);
edata_guarded_set(edata, false);
edata_zeroed_set(edata, true);
edata_committed_set(edata, true);
/*
* This isn't strictly true, but base allocated extents never get
* deallocated and can't be looked up in the emap, but no sense in
* wasting a state bit to encode this fact.
*/
edata_pai_set(edata, EXTENT_PAI_PAC);
}
static inline int
edata_esn_comp(const edata_t *a, const edata_t *b) {
size_t a_esn = edata_esn_get(a);
size_t b_esn = edata_esn_get(b);
return (a_esn > b_esn) - (a_esn < b_esn);
}
static inline int
edata_ead_comp(const edata_t *a, const edata_t *b) {
uintptr_t a_eaddr = (uintptr_t)a;
uintptr_t b_eaddr = (uintptr_t)b;
return (a_eaddr > b_eaddr) - (a_eaddr < b_eaddr);
}
static inline edata_cmp_summary_t
edata_cmp_summary_get(const edata_t *edata) {
return (edata_cmp_summary_t){edata_sn_get(edata),
(uintptr_t)edata_addr_get(edata)};
}
static inline int
edata_cmp_summary_comp(edata_cmp_summary_t a, edata_cmp_summary_t b) {
int ret;
ret = (a.sn > b.sn) - (a.sn < b.sn);
if (ret != 0) {
return ret;
}
ret = (a.addr > b.addr) - (a.addr < b.addr);
return ret;
}
static inline int
edata_snad_comp(const edata_t *a, const edata_t *b) {
edata_cmp_summary_t a_cmp = edata_cmp_summary_get(a);
edata_cmp_summary_t b_cmp = edata_cmp_summary_get(b);
return edata_cmp_summary_comp(a_cmp, b_cmp);
}
static inline int
edata_esnead_comp(const edata_t *a, const edata_t *b) {
int ret;
ret = edata_esn_comp(a, b);
if (ret != 0) {
return ret;
}
ret = edata_ead_comp(a, b);
return ret;
}
ph_proto(, edata_avail, edata_t)
ph_proto(, edata_heap, edata_t)
#endif /* JEMALLOC_INTERNAL_EDATA_H */
#ifndef JEMALLOC_INTERNAL_EDATA_CACHE_H
#define JEMALLOC_INTERNAL_EDATA_CACHE_H
#include "jemalloc/internal/base.h"
/* For tests only. */
#define EDATA_CACHE_FAST_FILL 4
/*
* A cache of edata_t structures allocated via base_alloc_edata (as opposed to
* the underlying extents they describe). The contents of returned edata_t
* objects are garbage and cannot be relied upon.
*/
typedef struct edata_cache_s edata_cache_t;
struct edata_cache_s {
edata_avail_t avail;
atomic_zu_t count;
malloc_mutex_t mtx;
base_t *base;
};
bool edata_cache_init(edata_cache_t *edata_cache, base_t *base);
edata_t *edata_cache_get(tsdn_t *tsdn, edata_cache_t *edata_cache);
void edata_cache_put(tsdn_t *tsdn, edata_cache_t *edata_cache, edata_t *edata);
void edata_cache_prefork(tsdn_t *tsdn, edata_cache_t *edata_cache);
void edata_cache_postfork_parent(tsdn_t *tsdn, edata_cache_t *edata_cache);
void edata_cache_postfork_child(tsdn_t *tsdn, edata_cache_t *edata_cache);
/*
* An edata_cache_small is like an edata_cache, but it relies on external
* synchronization and avoids first-fit strategies.
*/
typedef struct edata_cache_fast_s edata_cache_fast_t;
struct edata_cache_fast_s {
edata_list_inactive_t list;
edata_cache_t *fallback;
bool disabled;
};
void edata_cache_fast_init(edata_cache_fast_t *ecs, edata_cache_t *fallback);
edata_t *edata_cache_fast_get(tsdn_t *tsdn, edata_cache_fast_t *ecs);
void edata_cache_fast_put(tsdn_t *tsdn, edata_cache_fast_t *ecs,
edata_t *edata);
void edata_cache_fast_disable(tsdn_t *tsdn, edata_cache_fast_t *ecs);
#endif /* JEMALLOC_INTERNAL_EDATA_CACHE_H */
#ifndef JEMALLOC_INTERNAL_EHOOKS_H
#define JEMALLOC_INTERNAL_EHOOKS_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/extent_mmap.h"
/*
* This module is the internal interface to the extent hooks (both
* user-specified and external). Eventually, this will give us the flexibility
* to use multiple different versions of user-visible extent-hook APIs under a
* single user interface.
*
* Current API expansions (not available to anyone but the default hooks yet):
* - Head state tracking. Hooks can decide whether or not to merge two
* extents based on whether or not one of them is the head (i.e. was
* allocated on its own). The later extent loses its "head" status.
*/
extern const extent_hooks_t ehooks_default_extent_hooks;
typedef struct ehooks_s ehooks_t;
struct ehooks_s {
/*
* The user-visible id that goes with the ehooks (i.e. that of the base
* they're a part of, the associated arena's index within the arenas
* array).
*/
unsigned ind;
/* Logically an extent_hooks_t *. */
atomic_p_t ptr;
};
extern const extent_hooks_t ehooks_default_extent_hooks;
/*
* These are not really part of the public API. Each hook has a fast-path for
* the default-hooks case that can avoid various small inefficiencies:
* - Forgetting tsd and then calling tsd_get within the hook.
* - Getting more state than necessary out of the extent_t.
* - Doing arena_ind -> arena -> arena_ind lookups.
* By making the calls to these functions visible to the compiler, it can move
* those extra bits of computation down below the fast-paths where they get ignored.
*/
void *ehooks_default_alloc_impl(tsdn_t *tsdn, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
bool ehooks_default_dalloc_impl(void *addr, size_t size);
void ehooks_default_destroy_impl(void *addr, size_t size);
bool ehooks_default_commit_impl(void *addr, size_t offset, size_t length);
bool ehooks_default_decommit_impl(void *addr, size_t offset, size_t length);
#ifdef PAGES_CAN_PURGE_LAZY
bool ehooks_default_purge_lazy_impl(void *addr, size_t offset, size_t length);
#endif
#ifdef PAGES_CAN_PURGE_FORCED
bool ehooks_default_purge_forced_impl(void *addr, size_t offset, size_t length);
#endif
bool ehooks_default_split_impl();
/*
* Merge is the only default extent hook we declare -- see the comment in
* ehooks_merge.
*/
bool ehooks_default_merge(extent_hooks_t *extent_hooks, void *addr_a,
size_t size_a, void *addr_b, size_t size_b, bool committed,
unsigned arena_ind);
bool ehooks_default_merge_impl(tsdn_t *tsdn, void *addr_a, void *addr_b);
void ehooks_default_zero_impl(void *addr, size_t size);
void ehooks_default_guard_impl(void *guard1, void *guard2);
void ehooks_default_unguard_impl(void *guard1, void *guard2);
/*
* We don't officially support reentrancy from wtihin the extent hooks. But
* various people who sit within throwing distance of the jemalloc team want
* that functionality in certain limited cases. The default reentrancy guards
* assert that we're not reentrant from a0 (since it's the bootstrap arena,
* where reentrant allocations would be redirected), which we would incorrectly
* trigger in cases where a0 has extent hooks (those hooks themselves can't be
* reentrant, then, but there are reasonable uses for such functionality, like
* putting internal metadata on hugepages). Therefore, we use the raw
* reentrancy guards.
*
* Eventually, we need to think more carefully about whether and where we
* support allocating from within extent hooks (and what that means for things
* like profiling, stats collection, etc.), and document what the guarantee is.
*/
static inline void
ehooks_pre_reentrancy(tsdn_t *tsdn) {
tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
tsd_pre_reentrancy_raw(tsd);
}
static inline void
ehooks_post_reentrancy(tsdn_t *tsdn) {
tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
tsd_post_reentrancy_raw(tsd);
}
/* Beginning of the public API. */
void ehooks_init(ehooks_t *ehooks, extent_hooks_t *extent_hooks, unsigned ind);
static inline unsigned
ehooks_ind_get(const ehooks_t *ehooks) {
return ehooks->ind;
}
static inline void
ehooks_set_extent_hooks_ptr(ehooks_t *ehooks, extent_hooks_t *extent_hooks) {
atomic_store_p(&ehooks->ptr, extent_hooks, ATOMIC_RELEASE);
}
static inline extent_hooks_t *
ehooks_get_extent_hooks_ptr(ehooks_t *ehooks) {
return (extent_hooks_t *)atomic_load_p(&ehooks->ptr, ATOMIC_ACQUIRE);
}
static inline bool
ehooks_are_default(ehooks_t *ehooks) {
return ehooks_get_extent_hooks_ptr(ehooks) ==
&ehooks_default_extent_hooks;
}
/*
* In some cases, a caller needs to allocate resources before attempting to call
* a hook. If that hook is doomed to fail, this is wasteful. We therefore
* include some checks for such cases.
*/
static inline bool
ehooks_dalloc_will_fail(ehooks_t *ehooks) {
if (ehooks_are_default(ehooks)) {
return opt_retain;
} else {
return ehooks_get_extent_hooks_ptr(ehooks)->dalloc == NULL;
}
}
static inline bool
ehooks_split_will_fail(ehooks_t *ehooks) {
return ehooks_get_extent_hooks_ptr(ehooks)->split == NULL;
}
static inline bool
ehooks_merge_will_fail(ehooks_t *ehooks) {
return ehooks_get_extent_hooks_ptr(ehooks)->merge == NULL;
}
static inline bool
ehooks_guard_will_fail(ehooks_t *ehooks) {
/*
* Before the guard hooks are officially introduced, limit the use to
* the default hooks only.
*/
return !ehooks_are_default(ehooks);
}
/*
* Some hooks are required to return zeroed memory in certain situations. In
* debug mode, we do some heuristic checks that they did what they were supposed
* to.
*
* This isn't really ehooks-specific (i.e. anyone can check for zeroed memory).
* But incorrect zero information indicates an ehook bug.
*/
static inline void
ehooks_debug_zero_check(void *addr, size_t size) {
assert(((uintptr_t)addr & PAGE_MASK) == 0);
assert((size & PAGE_MASK) == 0);
assert(size > 0);
if (config_debug) {
/* Check the whole first page. */
size_t *p = (size_t *)addr;
for (size_t i = 0; i < PAGE / sizeof(size_t); i++) {
assert(p[i] == 0);
}
/*
* And 4 spots within. There's a tradeoff here; the larger
* this number, the more likely it is that we'll catch a bug
* where ehooks return a sparsely non-zero range. But
* increasing the number of checks also increases the number of
* page faults in debug mode. FreeBSD does much of their
* day-to-day development work in debug mode, so we don't want
* even the debug builds to be too slow.
*/
const size_t nchecks = 4;
assert(PAGE >= sizeof(size_t) * nchecks);
for (size_t i = 0; i < nchecks; ++i) {
assert(p[i * (size / sizeof(size_t) / nchecks)] == 0);
}
}
}
static inline void *
ehooks_alloc(tsdn_t *tsdn, ehooks_t *ehooks, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit) {
bool orig_zero = *zero;
void *ret;
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
if (extent_hooks == &ehooks_default_extent_hooks) {
ret = ehooks_default_alloc_impl(tsdn, new_addr, size,
alignment, zero, commit, ehooks_ind_get(ehooks));
} else {
ehooks_pre_reentrancy(tsdn);
ret = extent_hooks->alloc(extent_hooks, new_addr, size,
alignment, zero, commit, ehooks_ind_get(ehooks));
ehooks_post_reentrancy(tsdn);
}
assert(new_addr == NULL || ret == NULL || new_addr == ret);
assert(!orig_zero || *zero);
if (*zero && ret != NULL) {
ehooks_debug_zero_check(ret, size);
}
return ret;
}
static inline bool
ehooks_dalloc(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
bool committed) {
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
if (extent_hooks == &ehooks_default_extent_hooks) {
return ehooks_default_dalloc_impl(addr, size);
} else if (extent_hooks->dalloc == NULL) {
return true;
} else {
ehooks_pre_reentrancy(tsdn);
bool err = extent_hooks->dalloc(extent_hooks, addr, size,
committed, ehooks_ind_get(ehooks));
ehooks_post_reentrancy(tsdn);
return err;
}
}
static inline void
ehooks_destroy(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
bool committed) {
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
if (extent_hooks == &ehooks_default_extent_hooks) {
ehooks_default_destroy_impl(addr, size);
} else if (extent_hooks->destroy == NULL) {
/* Do nothing. */
} else {
ehooks_pre_reentrancy(tsdn);
extent_hooks->destroy(extent_hooks, addr, size, committed,
ehooks_ind_get(ehooks));
ehooks_post_reentrancy(tsdn);
}
}
static inline bool
ehooks_commit(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
size_t offset, size_t length) {
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
bool err;
if (extent_hooks == &ehooks_default_extent_hooks) {
err = ehooks_default_commit_impl(addr, offset, length);
} else if (extent_hooks->commit == NULL) {
err = true;
} else {
ehooks_pre_reentrancy(tsdn);
err = extent_hooks->commit(extent_hooks, addr, size,
offset, length, ehooks_ind_get(ehooks));
ehooks_post_reentrancy(tsdn);
}
if (!err) {
ehooks_debug_zero_check(addr, size);
}
return err;
}
static inline bool
ehooks_decommit(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
size_t offset, size_t length) {
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
if (extent_hooks == &ehooks_default_extent_hooks) {
return ehooks_default_decommit_impl(addr, offset, length);
} else if (extent_hooks->decommit == NULL) {
return true;
} else {
ehooks_pre_reentrancy(tsdn);
bool err = extent_hooks->decommit(extent_hooks, addr, size,
offset, length, ehooks_ind_get(ehooks));
ehooks_post_reentrancy(tsdn);
return err;
}
}
static inline bool
ehooks_purge_lazy(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
size_t offset, size_t length) {
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
#ifdef PAGES_CAN_PURGE_LAZY
if (extent_hooks == &ehooks_default_extent_hooks) {
return ehooks_default_purge_lazy_impl(addr, offset, length);
}
#endif
if (extent_hooks->purge_lazy == NULL) {
return true;
} else {
ehooks_pre_reentrancy(tsdn);
bool err = extent_hooks->purge_lazy(extent_hooks, addr, size,
offset, length, ehooks_ind_get(ehooks));
ehooks_post_reentrancy(tsdn);
return err;
}
}
static inline bool
ehooks_purge_forced(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
size_t offset, size_t length) {
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
/*
* It would be correct to have a ehooks_debug_zero_check call at the end
* of this function; purge_forced is required to zero. But checking
* would touch the page in question, which may have performance
* consequences (imagine the hooks are using hugepages, with a global
* zero page off). Even in debug mode, it's usually a good idea to
* avoid cases that can dramatically increase memory consumption.
*/
#ifdef PAGES_CAN_PURGE_FORCED
if (extent_hooks == &ehooks_default_extent_hooks) {
return ehooks_default_purge_forced_impl(addr, offset, length);
}
#endif
if (extent_hooks->purge_forced == NULL) {
return true;
} else {
ehooks_pre_reentrancy(tsdn);
bool err = extent_hooks->purge_forced(extent_hooks, addr, size,
offset, length, ehooks_ind_get(ehooks));
ehooks_post_reentrancy(tsdn);
return err;
}
}
static inline bool
ehooks_split(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
size_t size_a, size_t size_b, bool committed) {
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
if (ehooks_are_default(ehooks)) {
return ehooks_default_split_impl();
} else if (extent_hooks->split == NULL) {
return true;
} else {
ehooks_pre_reentrancy(tsdn);
bool err = extent_hooks->split(extent_hooks, addr, size, size_a,
size_b, committed, ehooks_ind_get(ehooks));
ehooks_post_reentrancy(tsdn);
return err;
}
}
static inline bool
ehooks_merge(tsdn_t *tsdn, ehooks_t *ehooks, void *addr_a, size_t size_a,
void *addr_b, size_t size_b, bool committed) {
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
if (extent_hooks == &ehooks_default_extent_hooks) {
return ehooks_default_merge_impl(tsdn, addr_a, addr_b);
} else if (extent_hooks->merge == NULL) {
return true;
} else {
ehooks_pre_reentrancy(tsdn);
bool err = extent_hooks->merge(extent_hooks, addr_a, size_a,
addr_b, size_b, committed, ehooks_ind_get(ehooks));
ehooks_post_reentrancy(tsdn);
return err;
}
}
static inline void
ehooks_zero(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size) {
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
if (extent_hooks == &ehooks_default_extent_hooks) {
ehooks_default_zero_impl(addr, size);
} else {
/*
* It would be correct to try using the user-provided purge
* hooks (since they are required to have zeroed the extent if
* they indicate success), but we don't necessarily know their
* cost. We'll be conservative and use memset.
*/
memset(addr, 0, size);
}
}
static inline bool
ehooks_guard(tsdn_t *tsdn, ehooks_t *ehooks, void *guard1, void *guard2) {
bool err;
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
if (extent_hooks == &ehooks_default_extent_hooks) {
ehooks_default_guard_impl(guard1, guard2);
err = false;
} else {
err = true;
}
return err;
}
static inline bool
ehooks_unguard(tsdn_t *tsdn, ehooks_t *ehooks, void *guard1, void *guard2) {
bool err;
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
if (extent_hooks == &ehooks_default_extent_hooks) {
ehooks_default_unguard_impl(guard1, guard2);
err = false;
} else {
err = true;
}
return err;
}
#endif /* JEMALLOC_INTERNAL_EHOOKS_H */
#ifndef JEMALLOC_INTERNAL_EMAP_H
#define JEMALLOC_INTERNAL_EMAP_H
#include "jemalloc/internal/base.h"
#include "jemalloc/internal/rtree.h"
/*
* Note: Ends without at semicolon, so that
* EMAP_DECLARE_RTREE_CTX;
* in uses will avoid empty-statement warnings.
*/
#define EMAP_DECLARE_RTREE_CTX \
rtree_ctx_t rtree_ctx_fallback; \
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback)
typedef struct emap_s emap_t;
struct emap_s {
rtree_t rtree;
};
/* Used to pass rtree lookup context down the path. */
typedef struct emap_alloc_ctx_t emap_alloc_ctx_t;
struct emap_alloc_ctx_t {
szind_t szind;
bool slab;
};
typedef struct emap_full_alloc_ctx_s emap_full_alloc_ctx_t;
struct emap_full_alloc_ctx_s {
szind_t szind;
bool slab;
edata_t *edata;
};
bool emap_init(emap_t *emap, base_t *base, bool zeroed);
void emap_remap(tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind,
bool slab);
void emap_update_edata_state(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
extent_state_t state);
/*
* The two acquire functions below allow accessing neighbor edatas, if it's safe
* and valid to do so (i.e. from the same arena, of the same state, etc.). This
* is necessary because the ecache locks are state based, and only protect
* edatas with the same state. Therefore the neighbor edata's state needs to be
* verified first, before chasing the edata pointer. The returned edata will be
* in an acquired state, meaning other threads will be prevented from accessing
* it, even if technically the edata can still be discovered from the rtree.
*
* This means, at any moment when holding pointers to edata, either one of the
* state based locks is held (and the edatas are all of the protected state), or
* the edatas are in an acquired state (e.g. in active or merging state). The
* acquire operation itself (changing the edata to an acquired state) is done
* under the state locks.
*/
edata_t *emap_try_acquire_edata_neighbor(tsdn_t *tsdn, emap_t *emap,
edata_t *edata, extent_pai_t pai, extent_state_t expected_state,
bool forward);
edata_t *emap_try_acquire_edata_neighbor_expand(tsdn_t *tsdn, emap_t *emap,
edata_t *edata, extent_pai_t pai, extent_state_t expected_state);
void emap_release_edata(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
extent_state_t new_state);
/*
* Associate the given edata with its beginning and end address, setting the
* szind and slab info appropriately.
* Returns true on error (i.e. resource exhaustion).
*/
bool emap_register_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
szind_t szind, bool slab);
/*
* Does the same thing, but with the interior of the range, for slab
* allocations.
*
* You might wonder why we don't just have a single emap_register function that
* does both depending on the value of 'slab'. The answer is twofold:
* - As a practical matter, in places like the extract->split->commit pathway,
* we defer the interior operation until we're sure that the commit won't fail
* (but we have to register the split boundaries there).
* - In general, we're trying to move to a world where the page-specific
* allocator doesn't know as much about how the pages it allocates will be
* used, and passing a 'slab' parameter everywhere makes that more
* complicated.
*
* Unlike the boundary version, this function can't fail; this is because slabs
* can't get big enough to touch a new page that neither of the boundaries
* touched, so no allocation is necessary to fill the interior once the boundary
* has been touched.
*/
void emap_register_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
szind_t szind);
void emap_deregister_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata);
void emap_deregister_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata);
typedef struct emap_prepare_s emap_prepare_t;
struct emap_prepare_s {
rtree_leaf_elm_t *lead_elm_a;
rtree_leaf_elm_t *lead_elm_b;
rtree_leaf_elm_t *trail_elm_a;
rtree_leaf_elm_t *trail_elm_b;
};
/**
* These functions the emap metadata management for merging, splitting, and
* reusing extents. In particular, they set the boundary mappings from
* addresses to edatas. If the result is going to be used as a slab, you
* still need to call emap_register_interior on it, though.
*
* Remap simply changes the szind and slab status of an extent's boundary
* mappings. If the extent is not a slab, it doesn't bother with updating the
* end mapping (since lookups only occur in the interior of an extent for
* slabs). Since the szind and slab status only make sense for active extents,
* this should only be called while activating or deactivating an extent.
*
* Split and merge have a "prepare" and a "commit" portion. The prepare portion
* does the operations that can be done without exclusive access to the extent
* in question, while the commit variant requires exclusive access to maintain
* the emap invariants. The only function that can fail is emap_split_prepare,
* and it returns true on failure (at which point the caller shouldn't commit).
*
* In all cases, "lead" refers to the lower-addressed extent, and trail to the
* higher-addressed one. It's the caller's responsibility to set the edata
* state appropriately.
*/
bool emap_split_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
edata_t *edata, size_t size_a, edata_t *trail, size_t size_b);
void emap_split_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
edata_t *lead, size_t size_a, edata_t *trail, size_t size_b);
void emap_merge_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
edata_t *lead, edata_t *trail);
void emap_merge_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
edata_t *lead, edata_t *trail);
/* Assert that the emap's view of the given edata matches the edata's view. */
void emap_do_assert_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata);
static inline void
emap_assert_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
if (config_debug) {
emap_do_assert_mapped(tsdn, emap, edata);
}
}
/* Assert that the given edata isn't in the map. */
void emap_do_assert_not_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata);
static inline void
emap_assert_not_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
if (config_debug) {
emap_do_assert_not_mapped(tsdn, emap, edata);
}
}
JEMALLOC_ALWAYS_INLINE bool
emap_edata_in_transition(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
assert(config_debug);
emap_assert_mapped(tsdn, emap, edata);
EMAP_DECLARE_RTREE_CTX;
rtree_contents_t contents = rtree_read(tsdn, &emap->rtree, rtree_ctx,
(uintptr_t)edata_base_get(edata));
return edata_state_in_transition(contents.metadata.state);
}
JEMALLOC_ALWAYS_INLINE bool
emap_edata_is_acquired(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
if (!config_debug) {
/* For assertions only. */
return false;
}
/*
* The edata is considered acquired if no other threads will attempt to
* read / write any fields from it. This includes a few cases:
*
* 1) edata not hooked into emap yet -- This implies the edata just got
* allocated or initialized.
*
* 2) in an active or transition state -- In both cases, the edata can
* be discovered from the emap, however the state tracked in the rtree
* will prevent other threads from accessing the actual edata.
*/
EMAP_DECLARE_RTREE_CTX;
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &emap->rtree,
rtree_ctx, (uintptr_t)edata_base_get(edata), /* dependent */ true,
/* init_missing */ false);
if (elm == NULL) {
return true;
}
rtree_contents_t contents = rtree_leaf_elm_read(tsdn, &emap->rtree, elm,
/* dependent */ true);
if (contents.edata == NULL ||
contents.metadata.state == extent_state_active ||
edata_state_in_transition(contents.metadata.state)) {
return true;
}
return false;
}
JEMALLOC_ALWAYS_INLINE void
extent_assert_can_coalesce(const edata_t *inner, const edata_t *outer) {
assert(edata_arena_ind_get(inner) == edata_arena_ind_get(outer));
assert(edata_pai_get(inner) == edata_pai_get(outer));
assert(edata_committed_get(inner) == edata_committed_get(outer));
assert(edata_state_get(inner) == extent_state_active);
assert(edata_state_get(outer) == extent_state_merging);
assert(!edata_guarded_get(inner) && !edata_guarded_get(outer));
assert(edata_base_get(inner) == edata_past_get(outer) ||
edata_base_get(outer) == edata_past_get(inner));
}
JEMALLOC_ALWAYS_INLINE void
extent_assert_can_expand(const edata_t *original, const edata_t *expand) {
assert(edata_arena_ind_get(original) == edata_arena_ind_get(expand));
assert(edata_pai_get(original) == edata_pai_get(expand));
assert(edata_state_get(original) == extent_state_active);
assert(edata_state_get(expand) == extent_state_merging);
assert(edata_past_get(original) == edata_base_get(expand));
}
JEMALLOC_ALWAYS_INLINE edata_t *
emap_edata_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr) {
EMAP_DECLARE_RTREE_CTX;
return rtree_read(tsdn, &emap->rtree, rtree_ctx, (uintptr_t)ptr).edata;
}
/* Fills in alloc_ctx with the info in the map. */
JEMALLOC_ALWAYS_INLINE void
emap_alloc_ctx_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr,
emap_alloc_ctx_t *alloc_ctx) {
EMAP_DECLARE_RTREE_CTX;
rtree_metadata_t metadata = rtree_metadata_read(tsdn, &emap->rtree,
rtree_ctx, (uintptr_t)ptr);
alloc_ctx->szind = metadata.szind;
alloc_ctx->slab = metadata.slab;
}
/* The pointer must be mapped. */
JEMALLOC_ALWAYS_INLINE void
emap_full_alloc_ctx_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr,
emap_full_alloc_ctx_t *full_alloc_ctx) {
EMAP_DECLARE_RTREE_CTX;
rtree_contents_t contents = rtree_read(tsdn, &emap->rtree, rtree_ctx,
(uintptr_t)ptr);
full_alloc_ctx->edata = contents.edata;
full_alloc_ctx->szind = contents.metadata.szind;
full_alloc_ctx->slab = contents.metadata.slab;
}
/*
* The pointer is allowed to not be mapped.
*
* Returns true when the pointer is not present.
*/
JEMALLOC_ALWAYS_INLINE bool
emap_full_alloc_ctx_try_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr,
emap_full_alloc_ctx_t *full_alloc_ctx) {
EMAP_DECLARE_RTREE_CTX;
rtree_contents_t contents;
bool err = rtree_read_independent(tsdn, &emap->rtree, rtree_ctx,
(uintptr_t)ptr, &contents);
if (err) {
return true;
}
full_alloc_ctx->edata = contents.edata;
full_alloc_ctx->szind = contents.metadata.szind;
full_alloc_ctx->slab = contents.metadata.slab;
return false;
}
/*
* Only used on the fastpath of free. Returns true when cannot be fulfilled by
* fast path, e.g. when the metadata key is not cached.
*/
JEMALLOC_ALWAYS_INLINE bool
emap_alloc_ctx_try_lookup_fast(tsd_t *tsd, emap_t *emap, const void *ptr,
emap_alloc_ctx_t *alloc_ctx) {
/* Use the unsafe getter since this may gets called during exit. */
rtree_ctx_t *rtree_ctx = tsd_rtree_ctxp_get_unsafe(tsd);
rtree_metadata_t metadata;
bool err = rtree_metadata_try_read_fast(tsd_tsdn(tsd), &emap->rtree,
rtree_ctx, (uintptr_t)ptr, &metadata);
if (err) {
return true;
}
alloc_ctx->szind = metadata.szind;
alloc_ctx->slab = metadata.slab;
return false;
}
/*
* We want to do batch lookups out of the cache bins, which use
* cache_bin_ptr_array_get to access the i'th element of the bin (since they
* invert usual ordering in deciding what to flush). This lets the emap avoid
* caring about its caller's ordering.
*/
typedef const void *(*emap_ptr_getter)(void *ctx, size_t ind);
/*
* This allows size-checking assertions, which we can only do while we're in the
* process of edata lookups.
*/
typedef void (*emap_metadata_visitor)(void *ctx, emap_full_alloc_ctx_t *alloc_ctx);
typedef union emap_batch_lookup_result_u emap_batch_lookup_result_t;
union emap_batch_lookup_result_u {
edata_t *edata;
rtree_leaf_elm_t *rtree_leaf;
};
JEMALLOC_ALWAYS_INLINE void
emap_edata_lookup_batch(tsd_t *tsd, emap_t *emap, size_t nptrs,
emap_ptr_getter ptr_getter, void *ptr_getter_ctx,
emap_metadata_visitor metadata_visitor, void *metadata_visitor_ctx,
emap_batch_lookup_result_t *result) {
/* Avoids null-checking tsdn in the loop below. */
util_assume(tsd != NULL);
rtree_ctx_t *rtree_ctx = tsd_rtree_ctxp_get(tsd);
for (size_t i = 0; i < nptrs; i++) {
const void *ptr = ptr_getter(ptr_getter_ctx, i);
/*
* Reuse the edatas array as a temp buffer, lying a little about
* the types.
*/
result[i].rtree_leaf = rtree_leaf_elm_lookup(tsd_tsdn(tsd),
&emap->rtree, rtree_ctx, (uintptr_t)ptr,
/* dependent */ true, /* init_missing */ false);
}
for (size_t i = 0; i < nptrs; i++) {
rtree_leaf_elm_t *elm = result[i].rtree_leaf;
rtree_contents_t contents = rtree_leaf_elm_read(tsd_tsdn(tsd),
&emap->rtree, elm, /* dependent */ true);
result[i].edata = contents.edata;
emap_full_alloc_ctx_t alloc_ctx;
/*
* Not all these fields are read in practice by the metadata
* visitor. But the compiler can easily optimize away the ones
* that aren't, so no sense in being incomplete.
*/
alloc_ctx.szind = contents.metadata.szind;
alloc_ctx.slab = contents.metadata.slab;
alloc_ctx.edata = contents.edata;
metadata_visitor(metadata_visitor_ctx, &alloc_ctx);
}
}
#endif /* JEMALLOC_INTERNAL_EMAP_H */
......@@ -6,6 +6,7 @@
typedef enum emitter_output_e emitter_output_t;
enum emitter_output_e {
emitter_output_json,
emitter_output_json_compact,
emitter_output_table
};
......@@ -21,6 +22,7 @@ typedef enum emitter_type_e emitter_type_t;
enum emitter_type_e {
emitter_type_bool,
emitter_type_int,
emitter_type_int64,
emitter_type_unsigned,
emitter_type_uint32,
emitter_type_uint64,
......@@ -66,7 +68,7 @@ typedef struct emitter_s emitter_t;
struct emitter_s {
emitter_output_t output;
/* The output information. */
void (*write_cb)(void *, const char *);
write_cb_t *write_cb;
void *cbopaque;
int nesting_depth;
/* True if we've already emitted a value at the given depth. */
......@@ -75,6 +77,12 @@ struct emitter_s {
bool emitted_key;
};
static inline bool
emitter_outputs_json(emitter_t *emitter) {
return emitter->output == emitter_output_json ||
emitter->output == emitter_output_json_compact;
}
/* Internal convenience function. Write to the emitter the given string. */
JEMALLOC_FORMAT_PRINTF(2, 3)
static inline void
......@@ -135,13 +143,16 @@ emitter_print_value(emitter_t *emitter, emitter_justify_t justify, int width,
switch (value_type) {
case emitter_type_bool:
emitter_printf(emitter,
emitter_printf(emitter,
emitter_gen_fmt(fmt, FMT_SIZE, "%s", justify, width),
*(const bool *)value ? "true" : "false");
break;
case emitter_type_int:
EMIT_SIMPLE(int, "%d")
break;
case emitter_type_int64:
EMIT_SIMPLE(int64_t, "%" FMTd64)
break;
case emitter_type_unsigned:
EMIT_SIMPLE(unsigned, "%u")
break;
......@@ -159,7 +170,7 @@ emitter_print_value(emitter_t *emitter, emitter_justify_t justify, int width,
* anywhere near the fmt size.
*/
assert(str_written < BUF_SIZE);
emitter_printf(emitter,
emitter_printf(emitter,
emitter_gen_fmt(fmt, FMT_SIZE, "%s", justify, width), buf);
break;
case emitter_type_uint32:
......@@ -196,6 +207,7 @@ static inline void
emitter_indent(emitter_t *emitter) {
int amount = emitter->nesting_depth;
const char *indent_str;
assert(emitter->output != emitter_output_json_compact);
if (emitter->output == emitter_output_json) {
indent_str = "\t";
} else {
......@@ -209,12 +221,18 @@ emitter_indent(emitter_t *emitter) {
static inline void
emitter_json_key_prefix(emitter_t *emitter) {
assert(emitter_outputs_json(emitter));
if (emitter->emitted_key) {
emitter->emitted_key = false;
return;
}
emitter_printf(emitter, "%s\n", emitter->item_at_depth ? "," : "");
emitter_indent(emitter);
if (emitter->item_at_depth) {
emitter_printf(emitter, ",");
}
if (emitter->output != emitter_output_json_compact) {
emitter_printf(emitter, "\n");
emitter_indent(emitter);
}
}
/******************************************************************************/
......@@ -222,27 +240,28 @@ emitter_json_key_prefix(emitter_t *emitter) {
static inline void
emitter_init(emitter_t *emitter, emitter_output_t emitter_output,
void (*write_cb)(void *, const char *), void *cbopaque) {
write_cb_t *write_cb, void *cbopaque) {
emitter->output = emitter_output;
emitter->write_cb = write_cb;
emitter->cbopaque = cbopaque;
emitter->item_at_depth = false;
emitter->emitted_key = false;
emitter->emitted_key = false;
emitter->nesting_depth = 0;
}
/******************************************************************************/
/* JSON public API. */
/*
/*
* Emits a key (e.g. as appears in an object). The next json entity emitted will
* be the corresponding value.
*/
static inline void
emitter_json_key(emitter_t *emitter, const char *json_key) {
if (emitter->output == emitter_output_json) {
if (emitter_outputs_json(emitter)) {
emitter_json_key_prefix(emitter);
emitter_printf(emitter, "\"%s\": ", json_key);
emitter_printf(emitter, "\"%s\":%s", json_key,
emitter->output == emitter_output_json_compact ? "" : " ");
emitter->emitted_key = true;
}
}
......@@ -250,7 +269,7 @@ emitter_json_key(emitter_t *emitter, const char *json_key) {
static inline void
emitter_json_value(emitter_t *emitter, emitter_type_t value_type,
const void *value) {
if (emitter->output == emitter_output_json) {
if (emitter_outputs_json(emitter)) {
emitter_json_key_prefix(emitter);
emitter_print_value(emitter, emitter_justify_none, -1,
value_type, value);
......@@ -268,7 +287,7 @@ emitter_json_kv(emitter_t *emitter, const char *json_key,
static inline void
emitter_json_array_begin(emitter_t *emitter) {
if (emitter->output == emitter_output_json) {
if (emitter_outputs_json(emitter)) {
emitter_json_key_prefix(emitter);
emitter_printf(emitter, "[");
emitter_nest_inc(emitter);
......@@ -284,18 +303,20 @@ emitter_json_array_kv_begin(emitter_t *emitter, const char *json_key) {
static inline void
emitter_json_array_end(emitter_t *emitter) {
if (emitter->output == emitter_output_json) {
if (emitter_outputs_json(emitter)) {
assert(emitter->nesting_depth > 0);
emitter_nest_dec(emitter);
emitter_printf(emitter, "\n");
emitter_indent(emitter);
if (emitter->output != emitter_output_json_compact) {
emitter_printf(emitter, "\n");
emitter_indent(emitter);
}
emitter_printf(emitter, "]");
}
}
static inline void
emitter_json_object_begin(emitter_t *emitter) {
if (emitter->output == emitter_output_json) {
if (emitter_outputs_json(emitter)) {
emitter_json_key_prefix(emitter);
emitter_printf(emitter, "{");
emitter_nest_inc(emitter);
......@@ -311,11 +332,13 @@ emitter_json_object_kv_begin(emitter_t *emitter, const char *json_key) {
static inline void
emitter_json_object_end(emitter_t *emitter) {
if (emitter->output == emitter_output_json) {
if (emitter_outputs_json(emitter)) {
assert(emitter->nesting_depth > 0);
emitter_nest_dec(emitter);
emitter_printf(emitter, "\n");
emitter_indent(emitter);
if (emitter->output != emitter_output_json_compact) {
emitter_printf(emitter, "\n");
emitter_indent(emitter);
}
emitter_printf(emitter, "}");
}
}
......@@ -420,7 +443,7 @@ emitter_kv_note(emitter_t *emitter, const char *json_key, const char *table_key,
emitter_type_t value_type, const void *value,
const char *table_note_key, emitter_type_t table_note_value_type,
const void *table_note_value) {
if (emitter->output == emitter_output_json) {
if (emitter_outputs_json(emitter)) {
emitter_json_key(emitter, json_key);
emitter_json_value(emitter, value_type, value);
} else {
......@@ -440,7 +463,7 @@ emitter_kv(emitter_t *emitter, const char *json_key, const char *table_key,
static inline void
emitter_dict_begin(emitter_t *emitter, const char *json_key,
const char *table_header) {
if (emitter->output == emitter_output_json) {
if (emitter_outputs_json(emitter)) {
emitter_json_key(emitter, json_key);
emitter_json_object_begin(emitter);
} else {
......@@ -450,7 +473,7 @@ emitter_dict_begin(emitter_t *emitter, const char *json_key,
static inline void
emitter_dict_end(emitter_t *emitter) {
if (emitter->output == emitter_output_json) {
if (emitter_outputs_json(emitter)) {
emitter_json_object_end(emitter);
} else {
emitter_table_dict_end(emitter);
......@@ -459,7 +482,7 @@ emitter_dict_end(emitter_t *emitter) {
static inline void
emitter_begin(emitter_t *emitter) {
if (emitter->output == emitter_output_json) {
if (emitter_outputs_json(emitter)) {
assert(emitter->nesting_depth == 0);
emitter_printf(emitter, "{");
emitter_nest_inc(emitter);
......@@ -476,10 +499,11 @@ emitter_begin(emitter_t *emitter) {
static inline void
emitter_end(emitter_t *emitter) {
if (emitter->output == emitter_output_json) {
if (emitter_outputs_json(emitter)) {
assert(emitter->nesting_depth == 1);
emitter_nest_dec(emitter);
emitter_printf(emitter, "\n}\n");
emitter_printf(emitter, "%s", emitter->output ==
emitter_output_json_compact ? "}" : "\n}\n");
}
}
......
#ifndef JEMALLOC_INTERNAL_ESET_H
#define JEMALLOC_INTERNAL_ESET_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/fb.h"
#include "jemalloc/internal/edata.h"
#include "jemalloc/internal/mutex.h"
/*
* An eset ("extent set") is a quantized collection of extents, with built-in
* LRU queue.
*
* This class is not thread-safe; synchronization must be done externally if
* there are mutating operations. One exception is the stats counters, which
* may be read without any locking.
*/
typedef struct eset_bin_s eset_bin_t;
struct eset_bin_s {
edata_heap_t heap;
/*
* We do first-fit across multiple size classes. If we compared against
* the min element in each heap directly, we'd take a cache miss per
* extent we looked at. If we co-locate the edata summaries, we only
* take a miss on the edata we're actually going to return (which is
* inevitable anyways).
*/
edata_cmp_summary_t heap_min;
};
typedef struct eset_bin_stats_s eset_bin_stats_t;
struct eset_bin_stats_s {
atomic_zu_t nextents;
atomic_zu_t nbytes;
};
typedef struct eset_s eset_t;
struct eset_s {
/* Bitmap for which set bits correspond to non-empty heaps. */
fb_group_t bitmap[FB_NGROUPS(SC_NPSIZES + 1)];
/* Quantized per size class heaps of extents. */
eset_bin_t bins[SC_NPSIZES + 1];
eset_bin_stats_t bin_stats[SC_NPSIZES + 1];
/* LRU of all extents in heaps. */
edata_list_inactive_t lru;
/* Page sum for all extents in heaps. */
atomic_zu_t npages;
/*
* A duplication of the data in the containing ecache. We use this only
* for assertions on the states of the passed-in extents.
*/
extent_state_t state;
};
void eset_init(eset_t *eset, extent_state_t state);
size_t eset_npages_get(eset_t *eset);
/* Get the number of extents in the given page size index. */
size_t eset_nextents_get(eset_t *eset, pszind_t ind);
/* Get the sum total bytes of the extents in the given page size index. */
size_t eset_nbytes_get(eset_t *eset, pszind_t ind);
void eset_insert(eset_t *eset, edata_t *edata);
void eset_remove(eset_t *eset, edata_t *edata);
/*
* Select an extent from this eset of the given size and alignment. Returns
* null if no such item could be found.
*/
edata_t *eset_fit(eset_t *eset, size_t esize, size_t alignment, bool exact_only,
unsigned lg_max_fit);
#endif /* JEMALLOC_INTERNAL_ESET_H */
#ifndef JEMALLOC_INTERNAL_EXP_GROW_H
#define JEMALLOC_INTERNAL_EXP_GROW_H
typedef struct exp_grow_s exp_grow_t;
struct exp_grow_s {
/*
* Next extent size class in a growing series to use when satisfying a
* request via the extent hooks (only if opt_retain). This limits the
* number of disjoint virtual memory ranges so that extent merging can
* be effective even if multiple arenas' extent allocation requests are
* highly interleaved.
*
* retain_grow_limit is the max allowed size ind to expand (unless the
* required size is greater). Default is no limit, and controlled
* through mallctl only.
*/
pszind_t next;
pszind_t limit;
};
static inline bool
exp_grow_size_prepare(exp_grow_t *exp_grow, size_t alloc_size_min,
size_t *r_alloc_size, pszind_t *r_skip) {
*r_skip = 0;
*r_alloc_size = sz_pind2sz(exp_grow->next + *r_skip);
while (*r_alloc_size < alloc_size_min) {
(*r_skip)++;
if (exp_grow->next + *r_skip >=
sz_psz2ind(SC_LARGE_MAXCLASS)) {
/* Outside legal range. */
return true;
}
*r_alloc_size = sz_pind2sz(exp_grow->next + *r_skip);
}
return false;
}
static inline void
exp_grow_size_commit(exp_grow_t *exp_grow, pszind_t skip) {
if (exp_grow->next + skip + 1 <= exp_grow->limit) {
exp_grow->next += skip + 1;
} else {
exp_grow->next = exp_grow->limit;
}
}
void exp_grow_init(exp_grow_t *exp_grow);
#endif /* JEMALLOC_INTERNAL_EXP_GROW_H */
#ifndef JEMALLOC_INTERNAL_EXTENT_H
#define JEMALLOC_INTERNAL_EXTENT_H
#include "jemalloc/internal/ecache.h"
#include "jemalloc/internal/ehooks.h"
#include "jemalloc/internal/ph.h"
#include "jemalloc/internal/rtree.h"
/*
* This module contains the page-level allocator. It chooses the addresses that
* allocations requested by other modules will inhabit, and updates the global
* metadata to reflect allocation/deallocation/purging decisions.
*/
/*
* When reuse (and split) an active extent, (1U << opt_lg_extent_max_active_fit)
* is the max ratio between the size of the active extent and the new extent.
*/
#define LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT 6
extern size_t opt_lg_extent_max_active_fit;
edata_t *ecache_alloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
bool zero, bool guarded);
edata_t *ecache_alloc_grow(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
bool zero, bool guarded);
void ecache_dalloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
ecache_t *ecache, edata_t *edata);
edata_t *ecache_evict(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
ecache_t *ecache, size_t npages_min);
void extent_gdump_add(tsdn_t *tsdn, const edata_t *edata);
void extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
edata_t *edata);
void extent_dalloc_gap(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *edata);
edata_t *extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
void *new_addr, size_t size, size_t alignment, bool zero, bool *commit,
bool growing_retained);
void extent_dalloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *edata);
void extent_destroy_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *edata);
bool extent_commit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
size_t offset, size_t length);
bool extent_decommit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
size_t offset, size_t length);
bool extent_purge_lazy_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
size_t offset, size_t length);
bool extent_purge_forced_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
size_t offset, size_t length);
edata_t *extent_split_wrapper(tsdn_t *tsdn, pac_t *pac,
ehooks_t *ehooks, edata_t *edata, size_t size_a, size_t size_b,
bool holding_core_locks);
bool extent_merge_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *a, edata_t *b);
bool extent_commit_zero(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
bool commit, bool zero, bool growing_retained);
size_t extent_sn_next(pac_t *pac);
bool extent_boot(void);
JEMALLOC_ALWAYS_INLINE bool
extent_neighbor_head_state_mergeable(bool edata_is_head,
bool neighbor_is_head, bool forward) {
/*
* Head states checking: disallow merging if the higher addr extent is a
* head extent. This helps preserve first-fit, and more importantly
* makes sure no merge across arenas.
*/
if (forward) {
if (neighbor_is_head) {
return false;
}
} else {
if (edata_is_head) {
return false;
}
}
return true;
}
JEMALLOC_ALWAYS_INLINE bool
extent_can_acquire_neighbor(edata_t *edata, rtree_contents_t contents,
extent_pai_t pai, extent_state_t expected_state, bool forward,
bool expanding) {
edata_t *neighbor = contents.edata;
if (neighbor == NULL) {
return false;
}
/* It's not safe to access *neighbor yet; must verify states first. */
bool neighbor_is_head = contents.metadata.is_head;
if (!extent_neighbor_head_state_mergeable(edata_is_head_get(edata),
neighbor_is_head, forward)) {
return false;
}
extent_state_t neighbor_state = contents.metadata.state;
if (pai == EXTENT_PAI_PAC) {
if (neighbor_state != expected_state) {
return false;
}
/* From this point, it's safe to access *neighbor. */
if (!expanding && (edata_committed_get(edata) !=
edata_committed_get(neighbor))) {
/*
* Some platforms (e.g. Windows) require an explicit
* commit step (and writing to uncommitted memory is not
* allowed).
*/
return false;
}
} else {
if (neighbor_state == extent_state_active) {
return false;
}
/* From this point, it's safe to access *neighbor. */
}
assert(edata_pai_get(edata) == pai);
if (edata_pai_get(neighbor) != pai) {
return false;
}
if (opt_retain) {
assert(edata_arena_ind_get(edata) ==
edata_arena_ind_get(neighbor));
} else {
if (edata_arena_ind_get(edata) !=
edata_arena_ind_get(neighbor)) {
return false;
}
}
assert(!edata_guarded_get(edata) && !edata_guarded_get(neighbor));
return true;
}
#endif /* JEMALLOC_INTERNAL_EXTENT_H */
#ifndef JEMALLOC_INTERNAL_EXTENT_EXTERNS_H
#define JEMALLOC_INTERNAL_EXTENT_EXTERNS_H
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex_pool.h"
#include "jemalloc/internal/ph.h"
#include "jemalloc/internal/rtree.h"
extern size_t opt_lg_extent_max_active_fit;
extern rtree_t extents_rtree;
extern const extent_hooks_t extent_hooks_default;
extern mutex_pool_t extent_mutex_pool;
extent_t *extent_alloc(tsdn_t *tsdn, arena_t *arena);
void extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
extent_hooks_t *extent_hooks_get(arena_t *arena);
extent_hooks_t *extent_hooks_set(tsd_t *tsd, arena_t *arena,
extent_hooks_t *extent_hooks);
#ifdef JEMALLOC_JET
size_t extent_size_quantize_floor(size_t size);
size_t extent_size_quantize_ceil(size_t size);
#endif
ph_proto(, extent_avail_, extent_tree_t, extent_t)
ph_proto(, extent_heap_, extent_heap_t, extent_t)
bool extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
bool delay_coalesce);
extent_state_t extents_state_get(const extents_t *extents);
size_t extents_npages_get(extents_t *extents);
/* Get the number of extents in the given page size index. */
size_t extents_nextents_get(extents_t *extents, pszind_t ind);
/* Get the sum total bytes of the extents in the given page size index. */
size_t extents_nbytes_get(extents_t *extents, pszind_t ind);
extent_t *extents_alloc(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr,
size_t size, size_t pad, size_t alignment, bool slab, szind_t szind,
bool *zero, bool *commit);
void extents_dalloc(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent);
extent_t *extents_evict(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_min);
void extents_prefork(tsdn_t *tsdn, extents_t *extents);
void extents_postfork_parent(tsdn_t *tsdn, extents_t *extents);
void extents_postfork_child(tsdn_t *tsdn, extents_t *extents);
extent_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit);
void extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
void extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent);
void extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent);
bool extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length);
bool extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length);
bool extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length);
bool extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length);
extent_t *extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b);
bool extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b);
bool extent_boot(void);
void extent_util_stats_get(tsdn_t *tsdn, const void *ptr,
size_t *nfree, size_t *nregs, size_t *size);
void extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
size_t *nfree, size_t *nregs, size_t *size,
size_t *bin_nfree, size_t *bin_nregs, void **slabcur_addr);
#endif /* JEMALLOC_INTERNAL_EXTENT_EXTERNS_H */
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment