Commit b8beda3c authored by Oran Agra's avatar Oran Agra
Browse files

Merge commit jemalloc 5.3.0

parents d659c734 6d23d3ac
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#define JEMALLOC_INTERNAL_CACHE_BIN_H #define JEMALLOC_INTERNAL_CACHE_BIN_H
#include "jemalloc/internal/ql.h" #include "jemalloc/internal/ql.h"
#include "jemalloc/internal/sz.h"
/* /*
* The cache_bins are the mechanism that the tcache and the arena use to * The cache_bins are the mechanism that the tcache and the arena use to
...@@ -13,14 +14,38 @@ ...@@ -13,14 +14,38 @@
* of the tcache at all. * of the tcache at all.
*/ */
/*
* The size in bytes of each cache bin stack. We also use this to indicate
* *counts* of individual objects.
*/
typedef uint16_t cache_bin_sz_t;
/* /*
* The count of the number of cached allocations in a bin. We make this signed * Leave a noticeable mark pattern on the cache bin stack boundaries, in case a
* so that negative numbers can encode "invalid" states (e.g. a low water mark * bug starts leaking those. Make it look like the junk pattern but be distinct
* of -1 for a cache that has been depleted). * from it.
*/ */
typedef int32_t cache_bin_sz_t; static const uintptr_t cache_bin_preceding_junk =
(uintptr_t)0x7a7a7a7a7a7a7a7aULL;
/* Note: a7 vs. 7a above -- this tells you which pointer leaked. */
static const uintptr_t cache_bin_trailing_junk =
(uintptr_t)0xa7a7a7a7a7a7a7a7ULL;
/*
* That implies the following value, for the maximum number of items in any
* individual bin. The cache bins track their bounds looking just at the low
* bits of a pointer, compared against a cache_bin_sz_t. So that's
* 1 << (sizeof(cache_bin_sz_t) * 8)
* bytes spread across pointer sized objects to get the maximum.
*/
#define CACHE_BIN_NCACHED_MAX (((size_t)1 << sizeof(cache_bin_sz_t) * 8) \
/ sizeof(void *) - 1)
/*
* This lives inside the cache_bin (for locality reasons), and is initialized
* alongside it, but is otherwise not modified by any cache bin operations.
* It's logically public and maintained by its callers.
*/
typedef struct cache_bin_stats_s cache_bin_stats_t; typedef struct cache_bin_stats_s cache_bin_stats_t;
struct cache_bin_stats_s { struct cache_bin_stats_s {
/* /*
...@@ -36,34 +61,75 @@ struct cache_bin_stats_s { ...@@ -36,34 +61,75 @@ struct cache_bin_stats_s {
*/ */
typedef struct cache_bin_info_s cache_bin_info_t; typedef struct cache_bin_info_s cache_bin_info_t;
struct cache_bin_info_s { struct cache_bin_info_s {
/* Upper limit on ncached. */
cache_bin_sz_t ncached_max; cache_bin_sz_t ncached_max;
}; };
/*
* Responsible for caching allocations associated with a single size.
*
* Several pointers are used to track the stack. To save on metadata bytes,
* only the stack_head is a full sized pointer (which is dereferenced on the
* fastpath), while the others store only the low 16 bits -- this is correct
* because a single stack never takes more space than 2^16 bytes, and at the
* same time only equality checks are performed on the low bits.
*
* (low addr) (high addr)
* |------stashed------|------available------|------cached-----|
* ^ ^ ^ ^
* low_bound(derived) low_bits_full stack_head low_bits_empty
*/
typedef struct cache_bin_s cache_bin_t; typedef struct cache_bin_s cache_bin_t;
struct cache_bin_s { struct cache_bin_s {
/* Min # cached since last GC. */
cache_bin_sz_t low_water;
/* # of cached objects. */
cache_bin_sz_t ncached;
/* /*
* ncached and stats are both modified frequently. Let's keep them * The stack grows down. Whenever the bin is nonempty, the head points
* to an array entry containing a valid allocation. When it is empty,
* the head points to one element past the owned array.
*/
void **stack_head;
/*
* cur_ptr and stats are both modified frequently. Let's keep them
* close so that they have a higher chance of being on the same * close so that they have a higher chance of being on the same
* cacheline, thus less write-backs. * cacheline, thus less write-backs.
*/ */
cache_bin_stats_t tstats; cache_bin_stats_t tstats;
/* /*
* Stack of available objects. * The low bits of the address of the first item in the stack that
* hasn't been used since the last GC, to track the low water mark (min
* # of cached items).
* *
* To make use of adjacent cacheline prefetch, the items in the avail * Since the stack grows down, this is a higher address than
* stack goes to higher address for newer allocations. avail points * low_bits_full.
* just above the available space, which means that
* avail[-ncached, ... -1] are available items and the lowest item will
* be allocated first.
*/ */
void **avail; uint16_t low_bits_low_water;
/*
* The low bits of the value that stack_head will take on when the array
* is full (of cached & stashed items). But remember that stack_head
* always points to a valid item when the array is nonempty -- this is
* in the array.
*
* Recall that since the stack grows down, this is the lowest available
* address in the array for caching. Only adjusted when stashing items.
*/
uint16_t low_bits_full;
/*
* The low bits of the value that stack_head will take on when the array
* is empty.
*
* The stack grows down -- this is one past the highest address in the
* array. Immutable after initialization.
*/
uint16_t low_bits_empty;
}; };
/*
* The cache_bins live inside the tcache, but the arena (by design) isn't
* supposed to know much about tcache internals. To let the arena iterate over
* associated bins, we keep (with the tcache) a linked list of
* cache_bin_array_descriptor_ts that tell the arena how to find the bins.
*/
typedef struct cache_bin_array_descriptor_s cache_bin_array_descriptor_t; typedef struct cache_bin_array_descriptor_s cache_bin_array_descriptor_t;
struct cache_bin_array_descriptor_s { struct cache_bin_array_descriptor_s {
/* /*
...@@ -72,37 +138,214 @@ struct cache_bin_array_descriptor_s { ...@@ -72,37 +138,214 @@ struct cache_bin_array_descriptor_s {
*/ */
ql_elm(cache_bin_array_descriptor_t) link; ql_elm(cache_bin_array_descriptor_t) link;
/* Pointers to the tcache bins. */ /* Pointers to the tcache bins. */
cache_bin_t *bins_small; cache_bin_t *bins;
cache_bin_t *bins_large;
}; };
static inline void static inline void
cache_bin_array_descriptor_init(cache_bin_array_descriptor_t *descriptor, cache_bin_array_descriptor_init(cache_bin_array_descriptor_t *descriptor,
cache_bin_t *bins_small, cache_bin_t *bins_large) { cache_bin_t *bins) {
ql_elm_new(descriptor, link); ql_elm_new(descriptor, link);
descriptor->bins_small = bins_small; descriptor->bins = bins;
descriptor->bins_large = bins_large;
} }
JEMALLOC_ALWAYS_INLINE void * JEMALLOC_ALWAYS_INLINE bool
cache_bin_alloc_easy(cache_bin_t *bin, bool *success) { cache_bin_nonfast_aligned(const void *ptr) {
void *ret; if (!config_uaf_detection) {
return false;
}
/*
* Currently we use alignment to decide which pointer to junk & stash on
* dealloc (for catching use-after-free). In some common cases a
* page-aligned check is needed already (sdalloc w/ config_prof), so we
* are getting it more or less for free -- no added instructions on
* free_fastpath.
*
* Another way of deciding which pointer to sample, is adding another
* thread_event to pick one every N bytes. That also adds no cost on
* the fastpath, however it will tend to pick large allocations which is
* not the desired behavior.
*/
return ((uintptr_t)ptr & san_cache_bin_nonfast_mask) == 0;
}
/* Returns ncached_max: Upper limit on ncached. */
static inline cache_bin_sz_t
cache_bin_info_ncached_max(cache_bin_info_t *info) {
return info->ncached_max;
}
/*
* Internal.
*
* Asserts that the pointer associated with earlier is <= the one associated
* with later.
*/
static inline void
cache_bin_assert_earlier(cache_bin_t *bin, uint16_t earlier, uint16_t later) {
if (earlier > later) {
assert(bin->low_bits_full > bin->low_bits_empty);
}
}
bin->ncached--; /*
* Internal.
*
* Does difference calculations that handle wraparound correctly. Earlier must
* be associated with the position earlier in memory.
*/
static inline uint16_t
cache_bin_diff(cache_bin_t *bin, uint16_t earlier, uint16_t later, bool racy) {
/*
* When it's racy, bin->low_bits_full can be modified concurrently. It
* can cross the uint16_t max value and become less than
* bin->low_bits_empty at the time of the check.
*/
if (!racy) {
cache_bin_assert_earlier(bin, earlier, later);
}
return later - earlier;
}
/*
* Number of items currently cached in the bin, without checking ncached_max.
* We require specifying whether or not the request is racy or not (i.e. whether
* or not concurrent modifications are possible).
*/
static inline cache_bin_sz_t
cache_bin_ncached_get_internal(cache_bin_t *bin, bool racy) {
cache_bin_sz_t diff = cache_bin_diff(bin,
(uint16_t)(uintptr_t)bin->stack_head, bin->low_bits_empty, racy);
cache_bin_sz_t n = diff / sizeof(void *);
/* /*
* Check for both bin->ncached == 0 and ncached < low_water * We have undefined behavior here; if this function is called from the
* in a single branch. * arena stats updating code, then stack_head could change from the
* first line to the next one. Morally, these loads should be atomic,
* but compilers won't currently generate comparisons with in-memory
* operands against atomics, and these variables get accessed on the
* fast paths. This should still be "safe" in the sense of generating
* the correct assembly for the foreseeable future, though.
*/ */
if (unlikely(bin->ncached <= bin->low_water)) { assert(n == 0 || *(bin->stack_head) != NULL || racy);
bin->low_water = bin->ncached; return n;
if (bin->ncached == -1) { }
bin->ncached = 0;
*success = false; /*
return NULL; * Number of items currently cached in the bin, with checking ncached_max. The
} * caller must know that no concurrent modification of the cache_bin is
* possible.
*/
static inline cache_bin_sz_t
cache_bin_ncached_get_local(cache_bin_t *bin, cache_bin_info_t *info) {
cache_bin_sz_t n = cache_bin_ncached_get_internal(bin,
/* racy */ false);
assert(n <= cache_bin_info_ncached_max(info));
return n;
}
/*
* Internal.
*
* A pointer to the position one past the end of the backing array.
*
* Do not call if racy, because both 'bin->stack_head' and 'bin->low_bits_full'
* are subject to concurrent modifications.
*/
static inline void **
cache_bin_empty_position_get(cache_bin_t *bin) {
cache_bin_sz_t diff = cache_bin_diff(bin,
(uint16_t)(uintptr_t)bin->stack_head, bin->low_bits_empty,
/* racy */ false);
uintptr_t empty_bits = (uintptr_t)bin->stack_head + diff;
void **ret = (void **)empty_bits;
assert(ret >= bin->stack_head);
return ret;
}
/*
* Internal.
*
* Calculates low bits of the lower bound of the usable cache bin's range (see
* cache_bin_t visual representation above).
*
* No values are concurrently modified, so should be safe to read in a
* multithreaded environment. Currently concurrent access happens only during
* arena statistics collection.
*/
static inline uint16_t
cache_bin_low_bits_low_bound_get(cache_bin_t *bin, cache_bin_info_t *info) {
return (uint16_t)bin->low_bits_empty -
info->ncached_max * sizeof(void *);
}
/*
* Internal.
*
* A pointer to the position with the lowest address of the backing array.
*/
static inline void **
cache_bin_low_bound_get(cache_bin_t *bin, cache_bin_info_t *info) {
cache_bin_sz_t ncached_max = cache_bin_info_ncached_max(info);
void **ret = cache_bin_empty_position_get(bin) - ncached_max;
assert(ret <= bin->stack_head);
return ret;
}
/*
* As the name implies. This is important since it's not correct to try to
* batch fill a nonempty cache bin.
*/
static inline void
cache_bin_assert_empty(cache_bin_t *bin, cache_bin_info_t *info) {
assert(cache_bin_ncached_get_local(bin, info) == 0);
assert(cache_bin_empty_position_get(bin) == bin->stack_head);
}
/*
* Get low water, but without any of the correctness checking we do for the
* caller-usable version, if we are temporarily breaking invariants (like
* ncached >= low_water during flush).
*/
static inline cache_bin_sz_t
cache_bin_low_water_get_internal(cache_bin_t *bin) {
return cache_bin_diff(bin, bin->low_bits_low_water,
bin->low_bits_empty, /* racy */ false) / sizeof(void *);
}
/* Returns the numeric value of low water in [0, ncached]. */
static inline cache_bin_sz_t
cache_bin_low_water_get(cache_bin_t *bin, cache_bin_info_t *info) {
cache_bin_sz_t low_water = cache_bin_low_water_get_internal(bin);
assert(low_water <= cache_bin_info_ncached_max(info));
assert(low_water <= cache_bin_ncached_get_local(bin, info));
cache_bin_assert_earlier(bin, (uint16_t)(uintptr_t)bin->stack_head,
bin->low_bits_low_water);
return low_water;
}
/*
* Indicates that the current cache bin position should be the low water mark
* going forward.
*/
static inline void
cache_bin_low_water_set(cache_bin_t *bin) {
bin->low_bits_low_water = (uint16_t)(uintptr_t)bin->stack_head;
}
static inline void
cache_bin_low_water_adjust(cache_bin_t *bin) {
if (cache_bin_ncached_get_internal(bin, /* racy */ false)
< cache_bin_low_water_get_internal(bin)) {
cache_bin_low_water_set(bin);
} }
}
JEMALLOC_ALWAYS_INLINE void *
cache_bin_alloc_impl(cache_bin_t *bin, bool *success, bool adjust_low_water) {
/* /*
* success (instead of ret) should be checked upon the return of this * success (instead of ret) should be checked upon the return of this
* function. We avoid checking (ret == NULL) because there is never a * function. We avoid checking (ret == NULL) because there is never a
...@@ -110,22 +353,318 @@ cache_bin_alloc_easy(cache_bin_t *bin, bool *success) { ...@@ -110,22 +353,318 @@ cache_bin_alloc_easy(cache_bin_t *bin, bool *success) {
* and eagerly checking ret would cause pipeline stall (waiting for the * and eagerly checking ret would cause pipeline stall (waiting for the
* cacheline). * cacheline).
*/ */
*success = true;
ret = *(bin->avail - (bin->ncached + 1));
return ret; /*
* This may read from the empty position; however the loaded value won't
* be used. It's safe because the stack has one more slot reserved.
*/
void *ret = *bin->stack_head;
uint16_t low_bits = (uint16_t)(uintptr_t)bin->stack_head;
void **new_head = bin->stack_head + 1;
/*
* Note that the low water mark is at most empty; if we pass this check,
* we know we're non-empty.
*/
if (likely(low_bits != bin->low_bits_low_water)) {
bin->stack_head = new_head;
*success = true;
return ret;
}
if (!adjust_low_water) {
*success = false;
return NULL;
}
/*
* In the fast-path case where we call alloc_easy and then alloc, the
* previous checking and computation is optimized away -- we didn't
* actually commit any of our operations.
*/
if (likely(low_bits != bin->low_bits_empty)) {
bin->stack_head = new_head;
bin->low_bits_low_water = (uint16_t)(uintptr_t)new_head;
*success = true;
return ret;
}
*success = false;
return NULL;
}
/*
* Allocate an item out of the bin, failing if we're at the low-water mark.
*/
JEMALLOC_ALWAYS_INLINE void *
cache_bin_alloc_easy(cache_bin_t *bin, bool *success) {
/* We don't look at info if we're not adjusting low-water. */
return cache_bin_alloc_impl(bin, success, false);
}
/*
* Allocate an item out of the bin, even if we're currently at the low-water
* mark (and failing only if the bin is empty).
*/
JEMALLOC_ALWAYS_INLINE void *
cache_bin_alloc(cache_bin_t *bin, bool *success) {
return cache_bin_alloc_impl(bin, success, true);
}
JEMALLOC_ALWAYS_INLINE cache_bin_sz_t
cache_bin_alloc_batch(cache_bin_t *bin, size_t num, void **out) {
cache_bin_sz_t n = cache_bin_ncached_get_internal(bin,
/* racy */ false);
if (n > num) {
n = (cache_bin_sz_t)num;
}
memcpy(out, bin->stack_head, n * sizeof(void *));
bin->stack_head += n;
cache_bin_low_water_adjust(bin);
return n;
} }
JEMALLOC_ALWAYS_INLINE bool JEMALLOC_ALWAYS_INLINE bool
cache_bin_dalloc_easy(cache_bin_t *bin, cache_bin_info_t *bin_info, void *ptr) { cache_bin_full(cache_bin_t *bin) {
if (unlikely(bin->ncached == bin_info->ncached_max)) { return ((uint16_t)(uintptr_t)bin->stack_head == bin->low_bits_full);
}
/*
* Free an object into the given bin. Fails only if the bin is full.
*/
JEMALLOC_ALWAYS_INLINE bool
cache_bin_dalloc_easy(cache_bin_t *bin, void *ptr) {
if (unlikely(cache_bin_full(bin))) {
return false; return false;
} }
assert(bin->ncached < bin_info->ncached_max);
bin->ncached++; bin->stack_head--;
*(bin->avail - bin->ncached) = ptr; *bin->stack_head = ptr;
cache_bin_assert_earlier(bin, bin->low_bits_full,
(uint16_t)(uintptr_t)bin->stack_head);
return true; return true;
} }
/* Returns false if failed to stash (i.e. bin is full). */
JEMALLOC_ALWAYS_INLINE bool
cache_bin_stash(cache_bin_t *bin, void *ptr) {
if (cache_bin_full(bin)) {
return false;
}
/* Stash at the full position, in the [full, head) range. */
uint16_t low_bits_head = (uint16_t)(uintptr_t)bin->stack_head;
/* Wraparound handled as well. */
uint16_t diff = cache_bin_diff(bin, bin->low_bits_full, low_bits_head,
/* racy */ false);
*(void **)((uintptr_t)bin->stack_head - diff) = ptr;
assert(!cache_bin_full(bin));
bin->low_bits_full += sizeof(void *);
cache_bin_assert_earlier(bin, bin->low_bits_full, low_bits_head);
return true;
}
/*
* Get the number of stashed pointers.
*
* When called from a thread not owning the TLS (i.e. racy = true), it's
* important to keep in mind that 'bin->stack_head' and 'bin->low_bits_full' can
* be modified concurrently and almost none assertions about their values can be
* made.
*/
JEMALLOC_ALWAYS_INLINE cache_bin_sz_t
cache_bin_nstashed_get_internal(cache_bin_t *bin, cache_bin_info_t *info,
bool racy) {
cache_bin_sz_t ncached_max = cache_bin_info_ncached_max(info);
uint16_t low_bits_low_bound = cache_bin_low_bits_low_bound_get(bin,
info);
cache_bin_sz_t n = cache_bin_diff(bin, low_bits_low_bound,
bin->low_bits_full, racy) / sizeof(void *);
assert(n <= ncached_max);
if (!racy) {
/* Below are for assertions only. */
void **low_bound = cache_bin_low_bound_get(bin, info);
assert((uint16_t)(uintptr_t)low_bound == low_bits_low_bound);
void *stashed = *(low_bound + n - 1);
bool aligned = cache_bin_nonfast_aligned(stashed);
#ifdef JEMALLOC_JET
/* Allow arbitrary pointers to be stashed in tests. */
aligned = true;
#endif
assert(n == 0 || (stashed != NULL && aligned));
}
return n;
}
JEMALLOC_ALWAYS_INLINE cache_bin_sz_t
cache_bin_nstashed_get_local(cache_bin_t *bin, cache_bin_info_t *info) {
cache_bin_sz_t n = cache_bin_nstashed_get_internal(bin, info,
/* racy */ false);
assert(n <= cache_bin_info_ncached_max(info));
return n;
}
/*
* Obtain a racy view of the number of items currently in the cache bin, in the
* presence of possible concurrent modifications.
*/
static inline void
cache_bin_nitems_get_remote(cache_bin_t *bin, cache_bin_info_t *info,
cache_bin_sz_t *ncached, cache_bin_sz_t *nstashed) {
cache_bin_sz_t n = cache_bin_ncached_get_internal(bin, /* racy */ true);
assert(n <= cache_bin_info_ncached_max(info));
*ncached = n;
n = cache_bin_nstashed_get_internal(bin, info, /* racy */ true);
assert(n <= cache_bin_info_ncached_max(info));
*nstashed = n;
/* Note that cannot assert ncached + nstashed <= ncached_max (racy). */
}
/*
* Filling and flushing are done in batch, on arrays of void *s. For filling,
* the arrays go forward, and can be accessed with ordinary array arithmetic.
* For flushing, we work from the end backwards, and so need to use special
* accessors that invert the usual ordering.
*
* This is important for maintaining first-fit; the arena code fills with
* earliest objects first, and so those are the ones we should return first for
* cache_bin_alloc calls. When flushing, we should flush the objects that we
* wish to return later; those at the end of the array. This is better for the
* first-fit heuristic as well as for cache locality; the most recently freed
* objects are the ones most likely to still be in cache.
*
* This all sounds very hand-wavey and theoretical, but reverting the ordering
* on one or the other pathway leads to measurable slowdowns.
*/
typedef struct cache_bin_ptr_array_s cache_bin_ptr_array_t;
struct cache_bin_ptr_array_s {
cache_bin_sz_t n;
void **ptr;
};
/*
* Declare a cache_bin_ptr_array_t sufficient for nval items.
*
* In the current implementation, this could be just part of a
* cache_bin_ptr_array_init_... call, since we reuse the cache bin stack memory.
* Indirecting behind a macro, though, means experimenting with linked-list
* representations is easy (since they'll require an alloca in the calling
* frame).
*/
#define CACHE_BIN_PTR_ARRAY_DECLARE(name, nval) \
cache_bin_ptr_array_t name; \
name.n = (nval)
/*
* Start a fill. The bin must be empty, and This must be followed by a
* finish_fill call before doing any alloc/dalloc operations on the bin.
*/
static inline void
cache_bin_init_ptr_array_for_fill(cache_bin_t *bin, cache_bin_info_t *info,
cache_bin_ptr_array_t *arr, cache_bin_sz_t nfill) {
cache_bin_assert_empty(bin, info);
arr->ptr = cache_bin_empty_position_get(bin) - nfill;
}
/*
* While nfill in cache_bin_init_ptr_array_for_fill is the number we *intend* to
* fill, nfilled here is the number we actually filled (which may be less, in
* case of OOM.
*/
static inline void
cache_bin_finish_fill(cache_bin_t *bin, cache_bin_info_t *info,
cache_bin_ptr_array_t *arr, cache_bin_sz_t nfilled) {
cache_bin_assert_empty(bin, info);
void **empty_position = cache_bin_empty_position_get(bin);
if (nfilled < arr->n) {
memmove(empty_position - nfilled, empty_position - arr->n,
nfilled * sizeof(void *));
}
bin->stack_head = empty_position - nfilled;
}
/*
* Same deal, but with flush. Unlike fill (which can fail), the user must flush
* everything we give them.
*/
static inline void
cache_bin_init_ptr_array_for_flush(cache_bin_t *bin, cache_bin_info_t *info,
cache_bin_ptr_array_t *arr, cache_bin_sz_t nflush) {
arr->ptr = cache_bin_empty_position_get(bin) - nflush;
assert(cache_bin_ncached_get_local(bin, info) == 0
|| *arr->ptr != NULL);
}
static inline void
cache_bin_finish_flush(cache_bin_t *bin, cache_bin_info_t *info,
cache_bin_ptr_array_t *arr, cache_bin_sz_t nflushed) {
unsigned rem = cache_bin_ncached_get_local(bin, info) - nflushed;
memmove(bin->stack_head + nflushed, bin->stack_head,
rem * sizeof(void *));
bin->stack_head = bin->stack_head + nflushed;
cache_bin_low_water_adjust(bin);
}
static inline void
cache_bin_init_ptr_array_for_stashed(cache_bin_t *bin, szind_t binind,
cache_bin_info_t *info, cache_bin_ptr_array_t *arr,
cache_bin_sz_t nstashed) {
assert(nstashed > 0);
assert(cache_bin_nstashed_get_local(bin, info) == nstashed);
void **low_bound = cache_bin_low_bound_get(bin, info);
arr->ptr = low_bound;
assert(*arr->ptr != NULL);
}
static inline void
cache_bin_finish_flush_stashed(cache_bin_t *bin, cache_bin_info_t *info) {
void **low_bound = cache_bin_low_bound_get(bin, info);
/* Reset the bin local full position. */
bin->low_bits_full = (uint16_t)(uintptr_t)low_bound;
assert(cache_bin_nstashed_get_local(bin, info) == 0);
}
/*
* Initialize a cache_bin_info to represent up to the given number of items in
* the cache_bins it is associated with.
*/
void cache_bin_info_init(cache_bin_info_t *bin_info,
cache_bin_sz_t ncached_max);
/*
* Given an array of initialized cache_bin_info_ts, determine how big an
* allocation is required to initialize a full set of cache_bin_ts.
*/
void cache_bin_info_compute_alloc(cache_bin_info_t *infos, szind_t ninfos,
size_t *size, size_t *alignment);
/*
* Actually initialize some cache bins. Callers should allocate the backing
* memory indicated by a call to cache_bin_compute_alloc. They should then
* preincrement, call init once for each bin and info, and then call
* cache_bin_postincrement. *alloc_cur will then point immediately past the end
* of the allocation.
*/
void cache_bin_preincrement(cache_bin_info_t *infos, szind_t ninfos,
void *alloc, size_t *cur_offset);
void cache_bin_postincrement(cache_bin_info_t *infos, szind_t ninfos,
void *alloc, size_t *cur_offset);
void cache_bin_init(cache_bin_t *bin, cache_bin_info_t *info, void *alloc,
size_t *cur_offset);
/*
* If a cache bin was zero initialized (either because it lives in static or
* thread-local storage, or was memset to 0), this function indicates whether or
* not cache_bin_init was called on it.
*/
bool cache_bin_still_zero_initialized(cache_bin_t *bin);
#endif /* JEMALLOC_INTERNAL_CACHE_BIN_H */ #endif /* JEMALLOC_INTERNAL_CACHE_BIN_H */
#ifndef JEMALLOC_INTERNAL_COUNTER_H
#define JEMALLOC_INTERNAL_COUNTER_H
#include "jemalloc/internal/mutex.h"
typedef struct counter_accum_s {
LOCKEDINT_MTX_DECLARE(mtx)
locked_u64_t accumbytes;
uint64_t interval;
} counter_accum_t;
JEMALLOC_ALWAYS_INLINE bool
counter_accum(tsdn_t *tsdn, counter_accum_t *counter, uint64_t bytes) {
uint64_t interval = counter->interval;
assert(interval > 0);
LOCKEDINT_MTX_LOCK(tsdn, counter->mtx);
/*
* If the event moves fast enough (and/or if the event handling is slow
* enough), extreme overflow can cause counter trigger coalescing.
* This is an intentional mechanism that avoids rate-limiting
* allocation.
*/
bool overflow = locked_inc_mod_u64(tsdn, LOCKEDINT_MTX(counter->mtx),
&counter->accumbytes, bytes, interval);
LOCKEDINT_MTX_UNLOCK(tsdn, counter->mtx);
return overflow;
}
bool counter_accum_init(counter_accum_t *counter, uint64_t interval);
void counter_prefork(tsdn_t *tsdn, counter_accum_t *counter);
void counter_postfork_parent(tsdn_t *tsdn, counter_accum_t *counter);
void counter_postfork_child(tsdn_t *tsdn, counter_accum_t *counter);
#endif /* JEMALLOC_INTERNAL_COUNTER_H */
...@@ -42,9 +42,11 @@ typedef struct ctl_arena_stats_s { ...@@ -42,9 +42,11 @@ typedef struct ctl_arena_stats_s {
uint64_t nfills_small; uint64_t nfills_small;
uint64_t nflushes_small; uint64_t nflushes_small;
bin_stats_t bstats[SC_NBINS]; bin_stats_data_t bstats[SC_NBINS];
arena_stats_large_t lstats[SC_NSIZES - SC_NBINS]; arena_stats_large_t lstats[SC_NSIZES - SC_NBINS];
arena_stats_extents_t estats[SC_NPSIZES]; pac_estats_t estats[SC_NPSIZES];
hpa_shard_stats_t hpastats;
sec_stats_t secstats;
} ctl_arena_stats_t; } ctl_arena_stats_t;
typedef struct ctl_stats_s { typedef struct ctl_stats_s {
...@@ -96,13 +98,17 @@ typedef struct ctl_arenas_s { ...@@ -96,13 +98,17 @@ typedef struct ctl_arenas_s {
int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
void *newp, size_t newlen); void *newp, size_t newlen);
int ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp); int ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp);
int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen); size_t *oldlenp, void *newp, size_t newlen);
int ctl_mibnametomib(tsd_t *tsd, size_t *mib, size_t miblen, const char *name,
size_t *miblenp);
int ctl_bymibname(tsd_t *tsd, size_t *mib, size_t miblen, const char *name,
size_t *miblenp, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
bool ctl_boot(void); bool ctl_boot(void);
void ctl_prefork(tsdn_t *tsdn); void ctl_prefork(tsdn_t *tsdn);
void ctl_postfork_parent(tsdn_t *tsdn); void ctl_postfork_parent(tsdn_t *tsdn);
void ctl_postfork_child(tsdn_t *tsdn); void ctl_postfork_child(tsdn_t *tsdn);
void ctl_mtx_assert_held(tsdn_t *tsdn);
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \ #define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \ if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
...@@ -131,4 +137,23 @@ void ctl_postfork_child(tsdn_t *tsdn); ...@@ -131,4 +137,23 @@ void ctl_postfork_child(tsdn_t *tsdn);
} \ } \
} while (0) } while (0)
#define xmallctlmibnametomib(mib, miblen, name, miblenp) do { \
if (ctl_mibnametomib(tsd_fetch(), mib, miblen, name, miblenp) \
!= 0) { \
malloc_write( \
"<jemalloc>: Failure in ctl_mibnametomib()\n"); \
abort(); \
} \
} while (0)
#define xmallctlbymibname(mib, miblen, name, miblenp, oldp, oldlenp, \
newp, newlen) do { \
if (ctl_bymibname(tsd_fetch(), mib, miblen, name, miblenp, \
oldp, oldlenp, newp, newlen) != 0) { \
malloc_write( \
"<jemalloc>: Failure in ctl_bymibname()\n"); \
abort(); \
} \
} while (0)
#endif /* JEMALLOC_INTERNAL_CTL_H */ #endif /* JEMALLOC_INTERNAL_CTL_H */
#ifndef JEMALLOC_INTERNAL_DECAY_H
#define JEMALLOC_INTERNAL_DECAY_H
#include "jemalloc/internal/smoothstep.h"
#define DECAY_UNBOUNDED_TIME_TO_PURGE ((uint64_t)-1)
/*
* The decay_t computes the number of pages we should purge at any given time.
* Page allocators inform a decay object when pages enter a decay-able state
* (i.e. dirty or muzzy), and query it to determine how many pages should be
* purged at any given time.
*
* This is mostly a single-threaded data structure and doesn't care about
* synchronization at all; it's the caller's responsibility to manage their
* synchronization on their own. There are two exceptions:
* 1) It's OK to racily call decay_ms_read (i.e. just the simplest state query).
* 2) The mtx and purging fields live (and are initialized) here, but are
* logically owned by the page allocator. This is just a convenience (since
* those fields would be duplicated for both the dirty and muzzy states
* otherwise).
*/
typedef struct decay_s decay_t;
struct decay_s {
/* Synchronizes all non-atomic fields. */
malloc_mutex_t mtx;
/*
* True if a thread is currently purging the extents associated with
* this decay structure.
*/
bool purging;
/*
* Approximate time in milliseconds from the creation of a set of unused
* dirty pages until an equivalent set of unused dirty pages is purged
* and/or reused.
*/
atomic_zd_t time_ms;
/* time / SMOOTHSTEP_NSTEPS. */
nstime_t interval;
/*
* Time at which the current decay interval logically started. We do
* not actually advance to a new epoch until sometime after it starts
* because of scheduling and computation delays, and it is even possible
* to completely skip epochs. In all cases, during epoch advancement we
* merge all relevant activity into the most recently recorded epoch.
*/
nstime_t epoch;
/* Deadline randomness generator. */
uint64_t jitter_state;
/*
* Deadline for current epoch. This is the sum of interval and per
* epoch jitter which is a uniform random variable in [0..interval).
* Epochs always advance by precise multiples of interval, but we
* randomize the deadline to reduce the likelihood of arenas purging in
* lockstep.
*/
nstime_t deadline;
/*
* The number of pages we cap ourselves at in the current epoch, per
* decay policies. Updated on an epoch change. After an epoch change,
* the caller should take steps to try to purge down to this amount.
*/
size_t npages_limit;
/*
* Number of unpurged pages at beginning of current epoch. During epoch
* advancement we use the delta between arena->decay_*.nunpurged and
* ecache_npages_get(&arena->ecache_*) to determine how many dirty pages,
* if any, were generated.
*/
size_t nunpurged;
/*
* Trailing log of how many unused dirty pages were generated during
* each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
* element is the most recent epoch. Corresponding epoch times are
* relative to epoch.
*
* Updated only on epoch advance, triggered by
* decay_maybe_advance_epoch, below.
*/
size_t backlog[SMOOTHSTEP_NSTEPS];
/* Peak number of pages in associated extents. Used for debug only. */
uint64_t ceil_npages;
};
/*
* The current decay time setting. This is the only public access to a decay_t
* that's allowed without holding mtx.
*/
static inline ssize_t
decay_ms_read(const decay_t *decay) {
return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
}
/*
* See the comment on the struct field -- the limit on pages we should allow in
* this decay state this epoch.
*/
static inline size_t
decay_npages_limit_get(const decay_t *decay) {
return decay->npages_limit;
}
/* How many unused dirty pages were generated during the last epoch. */
static inline size_t
decay_epoch_npages_delta(const decay_t *decay) {
return decay->backlog[SMOOTHSTEP_NSTEPS - 1];
}
/*
* Current epoch duration, in nanoseconds. Given that new epochs are started
* somewhat haphazardly, this is not necessarily exactly the time between any
* two calls to decay_maybe_advance_epoch; see the comments on fields in the
* decay_t.
*/
static inline uint64_t
decay_epoch_duration_ns(const decay_t *decay) {
return nstime_ns(&decay->interval);
}
static inline bool
decay_immediately(const decay_t *decay) {
ssize_t decay_ms = decay_ms_read(decay);
return decay_ms == 0;
}
static inline bool
decay_disabled(const decay_t *decay) {
ssize_t decay_ms = decay_ms_read(decay);
return decay_ms < 0;
}
/* Returns true if decay is enabled and done gradually. */
static inline bool
decay_gradually(const decay_t *decay) {
ssize_t decay_ms = decay_ms_read(decay);
return decay_ms > 0;
}
/*
* Returns true if the passed in decay time setting is valid.
* < -1 : invalid
* -1 : never decay
* 0 : decay immediately
* > 0 : some positive decay time, up to a maximum allowed value of
* NSTIME_SEC_MAX * 1000, which corresponds to decaying somewhere in the early
* 27th century. By that time, we expect to have implemented alternate purging
* strategies.
*/
bool decay_ms_valid(ssize_t decay_ms);
/*
* As a precondition, the decay_t must be zeroed out (as if with memset).
*
* Returns true on error.
*/
bool decay_init(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms);
/*
* Given an already-initialized decay_t, reinitialize it with the given decay
* time. The decay_t must have previously been initialized (and should not then
* be zeroed).
*/
void decay_reinit(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms);
/*
* Compute how many of 'npages_new' pages we would need to purge in 'time'.
*/
uint64_t decay_npages_purge_in(decay_t *decay, nstime_t *time,
size_t npages_new);
/* Returns true if the epoch advanced and there are pages to purge. */
bool decay_maybe_advance_epoch(decay_t *decay, nstime_t *new_time,
size_t current_npages);
/*
* Calculates wait time until a number of pages in the interval
* [0.5 * npages_threshold .. 1.5 * npages_threshold] should be purged.
*
* Returns number of nanoseconds or DECAY_UNBOUNDED_TIME_TO_PURGE in case of
* indefinite wait.
*/
uint64_t decay_ns_until_purge(decay_t *decay, size_t npages_current,
uint64_t npages_threshold);
#endif /* JEMALLOC_INTERNAL_DECAY_H */
#ifndef JEMALLOC_INTERNAL_ECACHE_H
#define JEMALLOC_INTERNAL_ECACHE_H
#include "jemalloc/internal/eset.h"
#include "jemalloc/internal/san.h"
#include "jemalloc/internal/mutex.h"
typedef struct ecache_s ecache_t;
struct ecache_s {
malloc_mutex_t mtx;
eset_t eset;
eset_t guarded_eset;
/* All stored extents must be in the same state. */
extent_state_t state;
/* The index of the ehooks the ecache is associated with. */
unsigned ind;
/*
* If true, delay coalescing until eviction; otherwise coalesce during
* deallocation.
*/
bool delay_coalesce;
};
static inline size_t
ecache_npages_get(ecache_t *ecache) {
return eset_npages_get(&ecache->eset) +
eset_npages_get(&ecache->guarded_eset);
}
/* Get the number of extents in the given page size index. */
static inline size_t
ecache_nextents_get(ecache_t *ecache, pszind_t ind) {
return eset_nextents_get(&ecache->eset, ind) +
eset_nextents_get(&ecache->guarded_eset, ind);
}
/* Get the sum total bytes of the extents in the given page size index. */
static inline size_t
ecache_nbytes_get(ecache_t *ecache, pszind_t ind) {
return eset_nbytes_get(&ecache->eset, ind) +
eset_nbytes_get(&ecache->guarded_eset, ind);
}
static inline unsigned
ecache_ind_get(ecache_t *ecache) {
return ecache->ind;
}
bool ecache_init(tsdn_t *tsdn, ecache_t *ecache, extent_state_t state,
unsigned ind, bool delay_coalesce);
void ecache_prefork(tsdn_t *tsdn, ecache_t *ecache);
void ecache_postfork_parent(tsdn_t *tsdn, ecache_t *ecache);
void ecache_postfork_child(tsdn_t *tsdn, ecache_t *ecache);
#endif /* JEMALLOC_INTERNAL_ECACHE_H */
#ifndef JEMALLOC_INTERNAL_EDATA_H
#define JEMALLOC_INTERNAL_EDATA_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bin_info.h"
#include "jemalloc/internal/bit_util.h"
#include "jemalloc/internal/hpdata.h"
#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/ph.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/slab_data.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/typed_list.h"
/*
* sizeof(edata_t) is 128 bytes on 64-bit architectures. Ensure the alignment
* to free up the low bits in the rtree leaf.
*/
#define EDATA_ALIGNMENT 128
enum extent_state_e {
extent_state_active = 0,
extent_state_dirty = 1,
extent_state_muzzy = 2,
extent_state_retained = 3,
extent_state_transition = 4, /* States below are intermediate. */
extent_state_merging = 5,
extent_state_max = 5 /* Sanity checking only. */
};
typedef enum extent_state_e extent_state_t;
enum extent_head_state_e {
EXTENT_NOT_HEAD,
EXTENT_IS_HEAD /* See comments in ehooks_default_merge_impl(). */
};
typedef enum extent_head_state_e extent_head_state_t;
/*
* Which implementation of the page allocator interface, (PAI, defined in
* pai.h) owns the given extent?
*/
enum extent_pai_e {
EXTENT_PAI_PAC = 0,
EXTENT_PAI_HPA = 1
};
typedef enum extent_pai_e extent_pai_t;
struct e_prof_info_s {
/* Time when this was allocated. */
nstime_t e_prof_alloc_time;
/* Allocation request size. */
size_t e_prof_alloc_size;
/* Points to a prof_tctx_t. */
atomic_p_t e_prof_tctx;
/*
* Points to a prof_recent_t for the allocation; NULL
* means the recent allocation record no longer exists.
* Protected by prof_recent_alloc_mtx.
*/
atomic_p_t e_prof_recent_alloc;
};
typedef struct e_prof_info_s e_prof_info_t;
/*
* The information about a particular edata that lives in an emap. Space is
* more precious there (the information, plus the edata pointer, has to live in
* a 64-bit word if we want to enable a packed representation.
*
* There are two things that are special about the information here:
* - It's quicker to access. You have one fewer pointer hop, since finding the
* edata_t associated with an item always requires accessing the rtree leaf in
* which this data is stored.
* - It can be read unsynchronized, and without worrying about lifetime issues.
*/
typedef struct edata_map_info_s edata_map_info_t;
struct edata_map_info_s {
bool slab;
szind_t szind;
};
typedef struct edata_cmp_summary_s edata_cmp_summary_t;
struct edata_cmp_summary_s {
uint64_t sn;
uintptr_t addr;
};
/* Extent (span of pages). Use accessor functions for e_* fields. */
typedef struct edata_s edata_t;
ph_structs(edata_avail, edata_t);
ph_structs(edata_heap, edata_t);
struct edata_s {
/*
* Bitfield containing several fields:
*
* a: arena_ind
* b: slab
* c: committed
* p: pai
* z: zeroed
* g: guarded
* t: state
* i: szind
* f: nfree
* s: bin_shard
*
* 00000000 ... 0000ssss ssffffff ffffiiii iiiitttg zpcbaaaa aaaaaaaa
*
* arena_ind: Arena from which this extent came, or all 1 bits if
* unassociated.
*
* slab: The slab flag indicates whether the extent is used for a slab
* of small regions. This helps differentiate small size classes,
* and it indicates whether interior pointers can be looked up via
* iealloc().
*
* committed: The committed flag indicates whether physical memory is
* committed to the extent, whether explicitly or implicitly
* as on a system that overcommits and satisfies physical
* memory needs on demand via soft page faults.
*
* pai: The pai flag is an extent_pai_t.
*
* zeroed: The zeroed flag is used by extent recycling code to track
* whether memory is zero-filled.
*
* guarded: The guarded flag is use by the sanitizer to track whether
* the extent has page guards around it.
*
* state: The state flag is an extent_state_t.
*
* szind: The szind flag indicates usable size class index for
* allocations residing in this extent, regardless of whether the
* extent is a slab. Extent size and usable size often differ
* even for non-slabs, either due to sz_large_pad or promotion of
* sampled small regions.
*
* nfree: Number of free regions in slab.
*
* bin_shard: the shard of the bin from which this extent came.
*/
uint64_t e_bits;
#define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT))
#define EDATA_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS
#define EDATA_BITS_ARENA_SHIFT 0
#define EDATA_BITS_ARENA_MASK MASK(EDATA_BITS_ARENA_WIDTH, EDATA_BITS_ARENA_SHIFT)
#define EDATA_BITS_SLAB_WIDTH 1
#define EDATA_BITS_SLAB_SHIFT (EDATA_BITS_ARENA_WIDTH + EDATA_BITS_ARENA_SHIFT)
#define EDATA_BITS_SLAB_MASK MASK(EDATA_BITS_SLAB_WIDTH, EDATA_BITS_SLAB_SHIFT)
#define EDATA_BITS_COMMITTED_WIDTH 1
#define EDATA_BITS_COMMITTED_SHIFT (EDATA_BITS_SLAB_WIDTH + EDATA_BITS_SLAB_SHIFT)
#define EDATA_BITS_COMMITTED_MASK MASK(EDATA_BITS_COMMITTED_WIDTH, EDATA_BITS_COMMITTED_SHIFT)
#define EDATA_BITS_PAI_WIDTH 1
#define EDATA_BITS_PAI_SHIFT (EDATA_BITS_COMMITTED_WIDTH + EDATA_BITS_COMMITTED_SHIFT)
#define EDATA_BITS_PAI_MASK MASK(EDATA_BITS_PAI_WIDTH, EDATA_BITS_PAI_SHIFT)
#define EDATA_BITS_ZEROED_WIDTH 1
#define EDATA_BITS_ZEROED_SHIFT (EDATA_BITS_PAI_WIDTH + EDATA_BITS_PAI_SHIFT)
#define EDATA_BITS_ZEROED_MASK MASK(EDATA_BITS_ZEROED_WIDTH, EDATA_BITS_ZEROED_SHIFT)
#define EDATA_BITS_GUARDED_WIDTH 1
#define EDATA_BITS_GUARDED_SHIFT (EDATA_BITS_ZEROED_WIDTH + EDATA_BITS_ZEROED_SHIFT)
#define EDATA_BITS_GUARDED_MASK MASK(EDATA_BITS_GUARDED_WIDTH, EDATA_BITS_GUARDED_SHIFT)
#define EDATA_BITS_STATE_WIDTH 3
#define EDATA_BITS_STATE_SHIFT (EDATA_BITS_GUARDED_WIDTH + EDATA_BITS_GUARDED_SHIFT)
#define EDATA_BITS_STATE_MASK MASK(EDATA_BITS_STATE_WIDTH, EDATA_BITS_STATE_SHIFT)
#define EDATA_BITS_SZIND_WIDTH LG_CEIL(SC_NSIZES)
#define EDATA_BITS_SZIND_SHIFT (EDATA_BITS_STATE_WIDTH + EDATA_BITS_STATE_SHIFT)
#define EDATA_BITS_SZIND_MASK MASK(EDATA_BITS_SZIND_WIDTH, EDATA_BITS_SZIND_SHIFT)
#define EDATA_BITS_NFREE_WIDTH (SC_LG_SLAB_MAXREGS + 1)
#define EDATA_BITS_NFREE_SHIFT (EDATA_BITS_SZIND_WIDTH + EDATA_BITS_SZIND_SHIFT)
#define EDATA_BITS_NFREE_MASK MASK(EDATA_BITS_NFREE_WIDTH, EDATA_BITS_NFREE_SHIFT)
#define EDATA_BITS_BINSHARD_WIDTH 6
#define EDATA_BITS_BINSHARD_SHIFT (EDATA_BITS_NFREE_WIDTH + EDATA_BITS_NFREE_SHIFT)
#define EDATA_BITS_BINSHARD_MASK MASK(EDATA_BITS_BINSHARD_WIDTH, EDATA_BITS_BINSHARD_SHIFT)
#define EDATA_BITS_IS_HEAD_WIDTH 1
#define EDATA_BITS_IS_HEAD_SHIFT (EDATA_BITS_BINSHARD_WIDTH + EDATA_BITS_BINSHARD_SHIFT)
#define EDATA_BITS_IS_HEAD_MASK MASK(EDATA_BITS_IS_HEAD_WIDTH, EDATA_BITS_IS_HEAD_SHIFT)
/* Pointer to the extent that this structure is responsible for. */
void *e_addr;
union {
/*
* Extent size and serial number associated with the extent
* structure (different than the serial number for the extent at
* e_addr).
*
* ssssssss [...] ssssssss ssssnnnn nnnnnnnn
*/
size_t e_size_esn;
#define EDATA_SIZE_MASK ((size_t)~(PAGE-1))
#define EDATA_ESN_MASK ((size_t)PAGE-1)
/* Base extent size, which may not be a multiple of PAGE. */
size_t e_bsize;
};
/*
* If this edata is a user allocation from an HPA, it comes out of some
* pageslab (we don't yet support huegpage allocations that don't fit
* into pageslabs). This tracks it.
*/
hpdata_t *e_ps;
/*
* Serial number. These are not necessarily unique; splitting an extent
* results in two extents with the same serial number.
*/
uint64_t e_sn;
union {
/*
* List linkage used when the edata_t is active; either in
* arena's large allocations or bin_t's slabs_full.
*/
ql_elm(edata_t) ql_link_active;
/*
* Pairing heap linkage. Used whenever the extent is inactive
* (in the page allocators), or when it is active and in
* slabs_nonfull, or when the edata_t is unassociated with an
* extent and sitting in an edata_cache.
*/
union {
edata_heap_link_t heap_link;
edata_avail_link_t avail_link;
};
};
union {
/*
* List linkage used when the extent is inactive:
* - Stashed dirty extents
* - Ecache LRU functionality.
*/
ql_elm(edata_t) ql_link_inactive;
/* Small region slab metadata. */
slab_data_t e_slab_data;
/* Profiling data, used for large objects. */
e_prof_info_t e_prof_info;
};
};
TYPED_LIST(edata_list_active, edata_t, ql_link_active)
TYPED_LIST(edata_list_inactive, edata_t, ql_link_inactive)
static inline unsigned
edata_arena_ind_get(const edata_t *edata) {
unsigned arena_ind = (unsigned)((edata->e_bits &
EDATA_BITS_ARENA_MASK) >> EDATA_BITS_ARENA_SHIFT);
assert(arena_ind < MALLOCX_ARENA_LIMIT);
return arena_ind;
}
static inline szind_t
edata_szind_get_maybe_invalid(const edata_t *edata) {
szind_t szind = (szind_t)((edata->e_bits & EDATA_BITS_SZIND_MASK) >>
EDATA_BITS_SZIND_SHIFT);
assert(szind <= SC_NSIZES);
return szind;
}
static inline szind_t
edata_szind_get(const edata_t *edata) {
szind_t szind = edata_szind_get_maybe_invalid(edata);
assert(szind < SC_NSIZES); /* Never call when "invalid". */
return szind;
}
static inline size_t
edata_usize_get(const edata_t *edata) {
return sz_index2size(edata_szind_get(edata));
}
static inline unsigned
edata_binshard_get(const edata_t *edata) {
unsigned binshard = (unsigned)((edata->e_bits &
EDATA_BITS_BINSHARD_MASK) >> EDATA_BITS_BINSHARD_SHIFT);
assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
return binshard;
}
static inline uint64_t
edata_sn_get(const edata_t *edata) {
return edata->e_sn;
}
static inline extent_state_t
edata_state_get(const edata_t *edata) {
return (extent_state_t)((edata->e_bits & EDATA_BITS_STATE_MASK) >>
EDATA_BITS_STATE_SHIFT);
}
static inline bool
edata_guarded_get(const edata_t *edata) {
return (bool)((edata->e_bits & EDATA_BITS_GUARDED_MASK) >>
EDATA_BITS_GUARDED_SHIFT);
}
static inline bool
edata_zeroed_get(const edata_t *edata) {
return (bool)((edata->e_bits & EDATA_BITS_ZEROED_MASK) >>
EDATA_BITS_ZEROED_SHIFT);
}
static inline bool
edata_committed_get(const edata_t *edata) {
return (bool)((edata->e_bits & EDATA_BITS_COMMITTED_MASK) >>
EDATA_BITS_COMMITTED_SHIFT);
}
static inline extent_pai_t
edata_pai_get(const edata_t *edata) {
return (extent_pai_t)((edata->e_bits & EDATA_BITS_PAI_MASK) >>
EDATA_BITS_PAI_SHIFT);
}
static inline bool
edata_slab_get(const edata_t *edata) {
return (bool)((edata->e_bits & EDATA_BITS_SLAB_MASK) >>
EDATA_BITS_SLAB_SHIFT);
}
static inline unsigned
edata_nfree_get(const edata_t *edata) {
assert(edata_slab_get(edata));
return (unsigned)((edata->e_bits & EDATA_BITS_NFREE_MASK) >>
EDATA_BITS_NFREE_SHIFT);
}
static inline void *
edata_base_get(const edata_t *edata) {
assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) ||
!edata_slab_get(edata));
return PAGE_ADDR2BASE(edata->e_addr);
}
static inline void *
edata_addr_get(const edata_t *edata) {
assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) ||
!edata_slab_get(edata));
return edata->e_addr;
}
static inline size_t
edata_size_get(const edata_t *edata) {
return (edata->e_size_esn & EDATA_SIZE_MASK);
}
static inline size_t
edata_esn_get(const edata_t *edata) {
return (edata->e_size_esn & EDATA_ESN_MASK);
}
static inline size_t
edata_bsize_get(const edata_t *edata) {
return edata->e_bsize;
}
static inline hpdata_t *
edata_ps_get(const edata_t *edata) {
assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
return edata->e_ps;
}
static inline void *
edata_before_get(const edata_t *edata) {
return (void *)((uintptr_t)edata_base_get(edata) - PAGE);
}
static inline void *
edata_last_get(const edata_t *edata) {
return (void *)((uintptr_t)edata_base_get(edata) +
edata_size_get(edata) - PAGE);
}
static inline void *
edata_past_get(const edata_t *edata) {
return (void *)((uintptr_t)edata_base_get(edata) +
edata_size_get(edata));
}
static inline slab_data_t *
edata_slab_data_get(edata_t *edata) {
assert(edata_slab_get(edata));
return &edata->e_slab_data;
}
static inline const slab_data_t *
edata_slab_data_get_const(const edata_t *edata) {
assert(edata_slab_get(edata));
return &edata->e_slab_data;
}
static inline prof_tctx_t *
edata_prof_tctx_get(const edata_t *edata) {
return (prof_tctx_t *)atomic_load_p(&edata->e_prof_info.e_prof_tctx,
ATOMIC_ACQUIRE);
}
static inline const nstime_t *
edata_prof_alloc_time_get(const edata_t *edata) {
return &edata->e_prof_info.e_prof_alloc_time;
}
static inline size_t
edata_prof_alloc_size_get(const edata_t *edata) {
return edata->e_prof_info.e_prof_alloc_size;
}
static inline prof_recent_t *
edata_prof_recent_alloc_get_dont_call_directly(const edata_t *edata) {
return (prof_recent_t *)atomic_load_p(
&edata->e_prof_info.e_prof_recent_alloc, ATOMIC_RELAXED);
}
static inline void
edata_arena_ind_set(edata_t *edata, unsigned arena_ind) {
edata->e_bits = (edata->e_bits & ~EDATA_BITS_ARENA_MASK) |
((uint64_t)arena_ind << EDATA_BITS_ARENA_SHIFT);
}
static inline void
edata_binshard_set(edata_t *edata, unsigned binshard) {
/* The assertion assumes szind is set already. */
assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
edata->e_bits = (edata->e_bits & ~EDATA_BITS_BINSHARD_MASK) |
((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT);
}
static inline void
edata_addr_set(edata_t *edata, void *addr) {
edata->e_addr = addr;
}
static inline void
edata_size_set(edata_t *edata, size_t size) {
assert((size & ~EDATA_SIZE_MASK) == 0);
edata->e_size_esn = size | (edata->e_size_esn & ~EDATA_SIZE_MASK);
}
static inline void
edata_esn_set(edata_t *edata, size_t esn) {
edata->e_size_esn = (edata->e_size_esn & ~EDATA_ESN_MASK) | (esn &
EDATA_ESN_MASK);
}
static inline void
edata_bsize_set(edata_t *edata, size_t bsize) {
edata->e_bsize = bsize;
}
static inline void
edata_ps_set(edata_t *edata, hpdata_t *ps) {
assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
edata->e_ps = ps;
}
static inline void
edata_szind_set(edata_t *edata, szind_t szind) {
assert(szind <= SC_NSIZES); /* SC_NSIZES means "invalid". */
edata->e_bits = (edata->e_bits & ~EDATA_BITS_SZIND_MASK) |
((uint64_t)szind << EDATA_BITS_SZIND_SHIFT);
}
static inline void
edata_nfree_set(edata_t *edata, unsigned nfree) {
assert(edata_slab_get(edata));
edata->e_bits = (edata->e_bits & ~EDATA_BITS_NFREE_MASK) |
((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT);
}
static inline void
edata_nfree_binshard_set(edata_t *edata, unsigned nfree, unsigned binshard) {
/* The assertion assumes szind is set already. */
assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
edata->e_bits = (edata->e_bits &
(~EDATA_BITS_NFREE_MASK & ~EDATA_BITS_BINSHARD_MASK)) |
((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT) |
((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT);
}
static inline void
edata_nfree_inc(edata_t *edata) {
assert(edata_slab_get(edata));
edata->e_bits += ((uint64_t)1U << EDATA_BITS_NFREE_SHIFT);
}
static inline void
edata_nfree_dec(edata_t *edata) {
assert(edata_slab_get(edata));
edata->e_bits -= ((uint64_t)1U << EDATA_BITS_NFREE_SHIFT);
}
static inline void
edata_nfree_sub(edata_t *edata, uint64_t n) {
assert(edata_slab_get(edata));
edata->e_bits -= (n << EDATA_BITS_NFREE_SHIFT);
}
static inline void
edata_sn_set(edata_t *edata, uint64_t sn) {
edata->e_sn = sn;
}
static inline void
edata_state_set(edata_t *edata, extent_state_t state) {
edata->e_bits = (edata->e_bits & ~EDATA_BITS_STATE_MASK) |
((uint64_t)state << EDATA_BITS_STATE_SHIFT);
}
static inline void
edata_guarded_set(edata_t *edata, bool guarded) {
edata->e_bits = (edata->e_bits & ~EDATA_BITS_GUARDED_MASK) |
((uint64_t)guarded << EDATA_BITS_GUARDED_SHIFT);
}
static inline void
edata_zeroed_set(edata_t *edata, bool zeroed) {
edata->e_bits = (edata->e_bits & ~EDATA_BITS_ZEROED_MASK) |
((uint64_t)zeroed << EDATA_BITS_ZEROED_SHIFT);
}
static inline void
edata_committed_set(edata_t *edata, bool committed) {
edata->e_bits = (edata->e_bits & ~EDATA_BITS_COMMITTED_MASK) |
((uint64_t)committed << EDATA_BITS_COMMITTED_SHIFT);
}
static inline void
edata_pai_set(edata_t *edata, extent_pai_t pai) {
edata->e_bits = (edata->e_bits & ~EDATA_BITS_PAI_MASK) |
((uint64_t)pai << EDATA_BITS_PAI_SHIFT);
}
static inline void
edata_slab_set(edata_t *edata, bool slab) {
edata->e_bits = (edata->e_bits & ~EDATA_BITS_SLAB_MASK) |
((uint64_t)slab << EDATA_BITS_SLAB_SHIFT);
}
static inline void
edata_prof_tctx_set(edata_t *edata, prof_tctx_t *tctx) {
atomic_store_p(&edata->e_prof_info.e_prof_tctx, tctx, ATOMIC_RELEASE);
}
static inline void
edata_prof_alloc_time_set(edata_t *edata, nstime_t *t) {
nstime_copy(&edata->e_prof_info.e_prof_alloc_time, t);
}
static inline void
edata_prof_alloc_size_set(edata_t *edata, size_t size) {
edata->e_prof_info.e_prof_alloc_size = size;
}
static inline void
edata_prof_recent_alloc_set_dont_call_directly(edata_t *edata,
prof_recent_t *recent_alloc) {
atomic_store_p(&edata->e_prof_info.e_prof_recent_alloc, recent_alloc,
ATOMIC_RELAXED);
}
static inline bool
edata_is_head_get(edata_t *edata) {
return (bool)((edata->e_bits & EDATA_BITS_IS_HEAD_MASK) >>
EDATA_BITS_IS_HEAD_SHIFT);
}
static inline void
edata_is_head_set(edata_t *edata, bool is_head) {
edata->e_bits = (edata->e_bits & ~EDATA_BITS_IS_HEAD_MASK) |
((uint64_t)is_head << EDATA_BITS_IS_HEAD_SHIFT);
}
static inline bool
edata_state_in_transition(extent_state_t state) {
return state >= extent_state_transition;
}
/*
* Because this function is implemented as a sequence of bitfield modifications,
* even though each individual bit is properly initialized, we technically read
* uninitialized data within it. This is mostly fine, since most callers get
* their edatas from zeroing sources, but callers who make stack edata_ts need
* to manually zero them.
*/
static inline void
edata_init(edata_t *edata, unsigned arena_ind, void *addr, size_t size,
bool slab, szind_t szind, uint64_t sn, extent_state_t state, bool zeroed,
bool committed, extent_pai_t pai, extent_head_state_t is_head) {
assert(addr == PAGE_ADDR2BASE(addr) || !slab);
edata_arena_ind_set(edata, arena_ind);
edata_addr_set(edata, addr);
edata_size_set(edata, size);
edata_slab_set(edata, slab);
edata_szind_set(edata, szind);
edata_sn_set(edata, sn);
edata_state_set(edata, state);
edata_guarded_set(edata, false);
edata_zeroed_set(edata, zeroed);
edata_committed_set(edata, committed);
edata_pai_set(edata, pai);
edata_is_head_set(edata, is_head == EXTENT_IS_HEAD);
if (config_prof) {
edata_prof_tctx_set(edata, NULL);
}
}
static inline void
edata_binit(edata_t *edata, void *addr, size_t bsize, uint64_t sn) {
edata_arena_ind_set(edata, (1U << MALLOCX_ARENA_BITS) - 1);
edata_addr_set(edata, addr);
edata_bsize_set(edata, bsize);
edata_slab_set(edata, false);
edata_szind_set(edata, SC_NSIZES);
edata_sn_set(edata, sn);
edata_state_set(edata, extent_state_active);
edata_guarded_set(edata, false);
edata_zeroed_set(edata, true);
edata_committed_set(edata, true);
/*
* This isn't strictly true, but base allocated extents never get
* deallocated and can't be looked up in the emap, but no sense in
* wasting a state bit to encode this fact.
*/
edata_pai_set(edata, EXTENT_PAI_PAC);
}
static inline int
edata_esn_comp(const edata_t *a, const edata_t *b) {
size_t a_esn = edata_esn_get(a);
size_t b_esn = edata_esn_get(b);
return (a_esn > b_esn) - (a_esn < b_esn);
}
static inline int
edata_ead_comp(const edata_t *a, const edata_t *b) {
uintptr_t a_eaddr = (uintptr_t)a;
uintptr_t b_eaddr = (uintptr_t)b;
return (a_eaddr > b_eaddr) - (a_eaddr < b_eaddr);
}
static inline edata_cmp_summary_t
edata_cmp_summary_get(const edata_t *edata) {
return (edata_cmp_summary_t){edata_sn_get(edata),
(uintptr_t)edata_addr_get(edata)};
}
static inline int
edata_cmp_summary_comp(edata_cmp_summary_t a, edata_cmp_summary_t b) {
int ret;
ret = (a.sn > b.sn) - (a.sn < b.sn);
if (ret != 0) {
return ret;
}
ret = (a.addr > b.addr) - (a.addr < b.addr);
return ret;
}
static inline int
edata_snad_comp(const edata_t *a, const edata_t *b) {
edata_cmp_summary_t a_cmp = edata_cmp_summary_get(a);
edata_cmp_summary_t b_cmp = edata_cmp_summary_get(b);
return edata_cmp_summary_comp(a_cmp, b_cmp);
}
static inline int
edata_esnead_comp(const edata_t *a, const edata_t *b) {
int ret;
ret = edata_esn_comp(a, b);
if (ret != 0) {
return ret;
}
ret = edata_ead_comp(a, b);
return ret;
}
ph_proto(, edata_avail, edata_t)
ph_proto(, edata_heap, edata_t)
#endif /* JEMALLOC_INTERNAL_EDATA_H */
#ifndef JEMALLOC_INTERNAL_EDATA_CACHE_H
#define JEMALLOC_INTERNAL_EDATA_CACHE_H
#include "jemalloc/internal/base.h"
/* For tests only. */
#define EDATA_CACHE_FAST_FILL 4
/*
* A cache of edata_t structures allocated via base_alloc_edata (as opposed to
* the underlying extents they describe). The contents of returned edata_t
* objects are garbage and cannot be relied upon.
*/
typedef struct edata_cache_s edata_cache_t;
struct edata_cache_s {
edata_avail_t avail;
atomic_zu_t count;
malloc_mutex_t mtx;
base_t *base;
};
bool edata_cache_init(edata_cache_t *edata_cache, base_t *base);
edata_t *edata_cache_get(tsdn_t *tsdn, edata_cache_t *edata_cache);
void edata_cache_put(tsdn_t *tsdn, edata_cache_t *edata_cache, edata_t *edata);
void edata_cache_prefork(tsdn_t *tsdn, edata_cache_t *edata_cache);
void edata_cache_postfork_parent(tsdn_t *tsdn, edata_cache_t *edata_cache);
void edata_cache_postfork_child(tsdn_t *tsdn, edata_cache_t *edata_cache);
/*
* An edata_cache_small is like an edata_cache, but it relies on external
* synchronization and avoids first-fit strategies.
*/
typedef struct edata_cache_fast_s edata_cache_fast_t;
struct edata_cache_fast_s {
edata_list_inactive_t list;
edata_cache_t *fallback;
bool disabled;
};
void edata_cache_fast_init(edata_cache_fast_t *ecs, edata_cache_t *fallback);
edata_t *edata_cache_fast_get(tsdn_t *tsdn, edata_cache_fast_t *ecs);
void edata_cache_fast_put(tsdn_t *tsdn, edata_cache_fast_t *ecs,
edata_t *edata);
void edata_cache_fast_disable(tsdn_t *tsdn, edata_cache_fast_t *ecs);
#endif /* JEMALLOC_INTERNAL_EDATA_CACHE_H */
#ifndef JEMALLOC_INTERNAL_EHOOKS_H
#define JEMALLOC_INTERNAL_EHOOKS_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/extent_mmap.h"
/*
* This module is the internal interface to the extent hooks (both
* user-specified and external). Eventually, this will give us the flexibility
* to use multiple different versions of user-visible extent-hook APIs under a
* single user interface.
*
* Current API expansions (not available to anyone but the default hooks yet):
* - Head state tracking. Hooks can decide whether or not to merge two
* extents based on whether or not one of them is the head (i.e. was
* allocated on its own). The later extent loses its "head" status.
*/
extern const extent_hooks_t ehooks_default_extent_hooks;
typedef struct ehooks_s ehooks_t;
struct ehooks_s {
/*
* The user-visible id that goes with the ehooks (i.e. that of the base
* they're a part of, the associated arena's index within the arenas
* array).
*/
unsigned ind;
/* Logically an extent_hooks_t *. */
atomic_p_t ptr;
};
extern const extent_hooks_t ehooks_default_extent_hooks;
/*
* These are not really part of the public API. Each hook has a fast-path for
* the default-hooks case that can avoid various small inefficiencies:
* - Forgetting tsd and then calling tsd_get within the hook.
* - Getting more state than necessary out of the extent_t.
* - Doing arena_ind -> arena -> arena_ind lookups.
* By making the calls to these functions visible to the compiler, it can move
* those extra bits of computation down below the fast-paths where they get ignored.
*/
void *ehooks_default_alloc_impl(tsdn_t *tsdn, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
bool ehooks_default_dalloc_impl(void *addr, size_t size);
void ehooks_default_destroy_impl(void *addr, size_t size);
bool ehooks_default_commit_impl(void *addr, size_t offset, size_t length);
bool ehooks_default_decommit_impl(void *addr, size_t offset, size_t length);
#ifdef PAGES_CAN_PURGE_LAZY
bool ehooks_default_purge_lazy_impl(void *addr, size_t offset, size_t length);
#endif
#ifdef PAGES_CAN_PURGE_FORCED
bool ehooks_default_purge_forced_impl(void *addr, size_t offset, size_t length);
#endif
bool ehooks_default_split_impl();
/*
* Merge is the only default extent hook we declare -- see the comment in
* ehooks_merge.
*/
bool ehooks_default_merge(extent_hooks_t *extent_hooks, void *addr_a,
size_t size_a, void *addr_b, size_t size_b, bool committed,
unsigned arena_ind);
bool ehooks_default_merge_impl(tsdn_t *tsdn, void *addr_a, void *addr_b);
void ehooks_default_zero_impl(void *addr, size_t size);
void ehooks_default_guard_impl(void *guard1, void *guard2);
void ehooks_default_unguard_impl(void *guard1, void *guard2);
/*
* We don't officially support reentrancy from wtihin the extent hooks. But
* various people who sit within throwing distance of the jemalloc team want
* that functionality in certain limited cases. The default reentrancy guards
* assert that we're not reentrant from a0 (since it's the bootstrap arena,
* where reentrant allocations would be redirected), which we would incorrectly
* trigger in cases where a0 has extent hooks (those hooks themselves can't be
* reentrant, then, but there are reasonable uses for such functionality, like
* putting internal metadata on hugepages). Therefore, we use the raw
* reentrancy guards.
*
* Eventually, we need to think more carefully about whether and where we
* support allocating from within extent hooks (and what that means for things
* like profiling, stats collection, etc.), and document what the guarantee is.
*/
static inline void
ehooks_pre_reentrancy(tsdn_t *tsdn) {
tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
tsd_pre_reentrancy_raw(tsd);
}
static inline void
ehooks_post_reentrancy(tsdn_t *tsdn) {
tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
tsd_post_reentrancy_raw(tsd);
}
/* Beginning of the public API. */
void ehooks_init(ehooks_t *ehooks, extent_hooks_t *extent_hooks, unsigned ind);
static inline unsigned
ehooks_ind_get(const ehooks_t *ehooks) {
return ehooks->ind;
}
static inline void
ehooks_set_extent_hooks_ptr(ehooks_t *ehooks, extent_hooks_t *extent_hooks) {
atomic_store_p(&ehooks->ptr, extent_hooks, ATOMIC_RELEASE);
}
static inline extent_hooks_t *
ehooks_get_extent_hooks_ptr(ehooks_t *ehooks) {
return (extent_hooks_t *)atomic_load_p(&ehooks->ptr, ATOMIC_ACQUIRE);
}
static inline bool
ehooks_are_default(ehooks_t *ehooks) {
return ehooks_get_extent_hooks_ptr(ehooks) ==
&ehooks_default_extent_hooks;
}
/*
* In some cases, a caller needs to allocate resources before attempting to call
* a hook. If that hook is doomed to fail, this is wasteful. We therefore
* include some checks for such cases.
*/
static inline bool
ehooks_dalloc_will_fail(ehooks_t *ehooks) {
if (ehooks_are_default(ehooks)) {
return opt_retain;
} else {
return ehooks_get_extent_hooks_ptr(ehooks)->dalloc == NULL;
}
}
static inline bool
ehooks_split_will_fail(ehooks_t *ehooks) {
return ehooks_get_extent_hooks_ptr(ehooks)->split == NULL;
}
static inline bool
ehooks_merge_will_fail(ehooks_t *ehooks) {
return ehooks_get_extent_hooks_ptr(ehooks)->merge == NULL;
}
static inline bool
ehooks_guard_will_fail(ehooks_t *ehooks) {
/*
* Before the guard hooks are officially introduced, limit the use to
* the default hooks only.
*/
return !ehooks_are_default(ehooks);
}
/*
* Some hooks are required to return zeroed memory in certain situations. In
* debug mode, we do some heuristic checks that they did what they were supposed
* to.
*
* This isn't really ehooks-specific (i.e. anyone can check for zeroed memory).
* But incorrect zero information indicates an ehook bug.
*/
static inline void
ehooks_debug_zero_check(void *addr, size_t size) {
assert(((uintptr_t)addr & PAGE_MASK) == 0);
assert((size & PAGE_MASK) == 0);
assert(size > 0);
if (config_debug) {
/* Check the whole first page. */
size_t *p = (size_t *)addr;
for (size_t i = 0; i < PAGE / sizeof(size_t); i++) {
assert(p[i] == 0);
}
/*
* And 4 spots within. There's a tradeoff here; the larger
* this number, the more likely it is that we'll catch a bug
* where ehooks return a sparsely non-zero range. But
* increasing the number of checks also increases the number of
* page faults in debug mode. FreeBSD does much of their
* day-to-day development work in debug mode, so we don't want
* even the debug builds to be too slow.
*/
const size_t nchecks = 4;
assert(PAGE >= sizeof(size_t) * nchecks);
for (size_t i = 0; i < nchecks; ++i) {
assert(p[i * (size / sizeof(size_t) / nchecks)] == 0);
}
}
}
static inline void *
ehooks_alloc(tsdn_t *tsdn, ehooks_t *ehooks, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit) {
bool orig_zero = *zero;
void *ret;
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
if (extent_hooks == &ehooks_default_extent_hooks) {
ret = ehooks_default_alloc_impl(tsdn, new_addr, size,
alignment, zero, commit, ehooks_ind_get(ehooks));
} else {
ehooks_pre_reentrancy(tsdn);
ret = extent_hooks->alloc(extent_hooks, new_addr, size,
alignment, zero, commit, ehooks_ind_get(ehooks));
ehooks_post_reentrancy(tsdn);
}
assert(new_addr == NULL || ret == NULL || new_addr == ret);
assert(!orig_zero || *zero);
if (*zero && ret != NULL) {
ehooks_debug_zero_check(ret, size);
}
return ret;
}
static inline bool
ehooks_dalloc(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
bool committed) {
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
if (extent_hooks == &ehooks_default_extent_hooks) {
return ehooks_default_dalloc_impl(addr, size);
} else if (extent_hooks->dalloc == NULL) {
return true;
} else {
ehooks_pre_reentrancy(tsdn);
bool err = extent_hooks->dalloc(extent_hooks, addr, size,
committed, ehooks_ind_get(ehooks));
ehooks_post_reentrancy(tsdn);
return err;
}
}
static inline void
ehooks_destroy(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
bool committed) {
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
if (extent_hooks == &ehooks_default_extent_hooks) {
ehooks_default_destroy_impl(addr, size);
} else if (extent_hooks->destroy == NULL) {
/* Do nothing. */
} else {
ehooks_pre_reentrancy(tsdn);
extent_hooks->destroy(extent_hooks, addr, size, committed,
ehooks_ind_get(ehooks));
ehooks_post_reentrancy(tsdn);
}
}
static inline bool
ehooks_commit(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
size_t offset, size_t length) {
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
bool err;
if (extent_hooks == &ehooks_default_extent_hooks) {
err = ehooks_default_commit_impl(addr, offset, length);
} else if (extent_hooks->commit == NULL) {
err = true;
} else {
ehooks_pre_reentrancy(tsdn);
err = extent_hooks->commit(extent_hooks, addr, size,
offset, length, ehooks_ind_get(ehooks));
ehooks_post_reentrancy(tsdn);
}
if (!err) {
ehooks_debug_zero_check(addr, size);
}
return err;
}
static inline bool
ehooks_decommit(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
size_t offset, size_t length) {
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
if (extent_hooks == &ehooks_default_extent_hooks) {
return ehooks_default_decommit_impl(addr, offset, length);
} else if (extent_hooks->decommit == NULL) {
return true;
} else {
ehooks_pre_reentrancy(tsdn);
bool err = extent_hooks->decommit(extent_hooks, addr, size,
offset, length, ehooks_ind_get(ehooks));
ehooks_post_reentrancy(tsdn);
return err;
}
}
static inline bool
ehooks_purge_lazy(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
size_t offset, size_t length) {
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
#ifdef PAGES_CAN_PURGE_LAZY
if (extent_hooks == &ehooks_default_extent_hooks) {
return ehooks_default_purge_lazy_impl(addr, offset, length);
}
#endif
if (extent_hooks->purge_lazy == NULL) {
return true;
} else {
ehooks_pre_reentrancy(tsdn);
bool err = extent_hooks->purge_lazy(extent_hooks, addr, size,
offset, length, ehooks_ind_get(ehooks));
ehooks_post_reentrancy(tsdn);
return err;
}
}
static inline bool
ehooks_purge_forced(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
size_t offset, size_t length) {
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
/*
* It would be correct to have a ehooks_debug_zero_check call at the end
* of this function; purge_forced is required to zero. But checking
* would touch the page in question, which may have performance
* consequences (imagine the hooks are using hugepages, with a global
* zero page off). Even in debug mode, it's usually a good idea to
* avoid cases that can dramatically increase memory consumption.
*/
#ifdef PAGES_CAN_PURGE_FORCED
if (extent_hooks == &ehooks_default_extent_hooks) {
return ehooks_default_purge_forced_impl(addr, offset, length);
}
#endif
if (extent_hooks->purge_forced == NULL) {
return true;
} else {
ehooks_pre_reentrancy(tsdn);
bool err = extent_hooks->purge_forced(extent_hooks, addr, size,
offset, length, ehooks_ind_get(ehooks));
ehooks_post_reentrancy(tsdn);
return err;
}
}
static inline bool
ehooks_split(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
size_t size_a, size_t size_b, bool committed) {
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
if (ehooks_are_default(ehooks)) {
return ehooks_default_split_impl();
} else if (extent_hooks->split == NULL) {
return true;
} else {
ehooks_pre_reentrancy(tsdn);
bool err = extent_hooks->split(extent_hooks, addr, size, size_a,
size_b, committed, ehooks_ind_get(ehooks));
ehooks_post_reentrancy(tsdn);
return err;
}
}
static inline bool
ehooks_merge(tsdn_t *tsdn, ehooks_t *ehooks, void *addr_a, size_t size_a,
void *addr_b, size_t size_b, bool committed) {
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
if (extent_hooks == &ehooks_default_extent_hooks) {
return ehooks_default_merge_impl(tsdn, addr_a, addr_b);
} else if (extent_hooks->merge == NULL) {
return true;
} else {
ehooks_pre_reentrancy(tsdn);
bool err = extent_hooks->merge(extent_hooks, addr_a, size_a,
addr_b, size_b, committed, ehooks_ind_get(ehooks));
ehooks_post_reentrancy(tsdn);
return err;
}
}
static inline void
ehooks_zero(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size) {
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
if (extent_hooks == &ehooks_default_extent_hooks) {
ehooks_default_zero_impl(addr, size);
} else {
/*
* It would be correct to try using the user-provided purge
* hooks (since they are required to have zeroed the extent if
* they indicate success), but we don't necessarily know their
* cost. We'll be conservative and use memset.
*/
memset(addr, 0, size);
}
}
static inline bool
ehooks_guard(tsdn_t *tsdn, ehooks_t *ehooks, void *guard1, void *guard2) {
bool err;
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
if (extent_hooks == &ehooks_default_extent_hooks) {
ehooks_default_guard_impl(guard1, guard2);
err = false;
} else {
err = true;
}
return err;
}
static inline bool
ehooks_unguard(tsdn_t *tsdn, ehooks_t *ehooks, void *guard1, void *guard2) {
bool err;
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
if (extent_hooks == &ehooks_default_extent_hooks) {
ehooks_default_unguard_impl(guard1, guard2);
err = false;
} else {
err = true;
}
return err;
}
#endif /* JEMALLOC_INTERNAL_EHOOKS_H */
#ifndef JEMALLOC_INTERNAL_EMAP_H
#define JEMALLOC_INTERNAL_EMAP_H
#include "jemalloc/internal/base.h"
#include "jemalloc/internal/rtree.h"
/*
* Note: Ends without at semicolon, so that
* EMAP_DECLARE_RTREE_CTX;
* in uses will avoid empty-statement warnings.
*/
#define EMAP_DECLARE_RTREE_CTX \
rtree_ctx_t rtree_ctx_fallback; \
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback)
typedef struct emap_s emap_t;
struct emap_s {
rtree_t rtree;
};
/* Used to pass rtree lookup context down the path. */
typedef struct emap_alloc_ctx_t emap_alloc_ctx_t;
struct emap_alloc_ctx_t {
szind_t szind;
bool slab;
};
typedef struct emap_full_alloc_ctx_s emap_full_alloc_ctx_t;
struct emap_full_alloc_ctx_s {
szind_t szind;
bool slab;
edata_t *edata;
};
bool emap_init(emap_t *emap, base_t *base, bool zeroed);
void emap_remap(tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind,
bool slab);
void emap_update_edata_state(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
extent_state_t state);
/*
* The two acquire functions below allow accessing neighbor edatas, if it's safe
* and valid to do so (i.e. from the same arena, of the same state, etc.). This
* is necessary because the ecache locks are state based, and only protect
* edatas with the same state. Therefore the neighbor edata's state needs to be
* verified first, before chasing the edata pointer. The returned edata will be
* in an acquired state, meaning other threads will be prevented from accessing
* it, even if technically the edata can still be discovered from the rtree.
*
* This means, at any moment when holding pointers to edata, either one of the
* state based locks is held (and the edatas are all of the protected state), or
* the edatas are in an acquired state (e.g. in active or merging state). The
* acquire operation itself (changing the edata to an acquired state) is done
* under the state locks.
*/
edata_t *emap_try_acquire_edata_neighbor(tsdn_t *tsdn, emap_t *emap,
edata_t *edata, extent_pai_t pai, extent_state_t expected_state,
bool forward);
edata_t *emap_try_acquire_edata_neighbor_expand(tsdn_t *tsdn, emap_t *emap,
edata_t *edata, extent_pai_t pai, extent_state_t expected_state);
void emap_release_edata(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
extent_state_t new_state);
/*
* Associate the given edata with its beginning and end address, setting the
* szind and slab info appropriately.
* Returns true on error (i.e. resource exhaustion).
*/
bool emap_register_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
szind_t szind, bool slab);
/*
* Does the same thing, but with the interior of the range, for slab
* allocations.
*
* You might wonder why we don't just have a single emap_register function that
* does both depending on the value of 'slab'. The answer is twofold:
* - As a practical matter, in places like the extract->split->commit pathway,
* we defer the interior operation until we're sure that the commit won't fail
* (but we have to register the split boundaries there).
* - In general, we're trying to move to a world where the page-specific
* allocator doesn't know as much about how the pages it allocates will be
* used, and passing a 'slab' parameter everywhere makes that more
* complicated.
*
* Unlike the boundary version, this function can't fail; this is because slabs
* can't get big enough to touch a new page that neither of the boundaries
* touched, so no allocation is necessary to fill the interior once the boundary
* has been touched.
*/
void emap_register_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
szind_t szind);
void emap_deregister_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata);
void emap_deregister_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata);
typedef struct emap_prepare_s emap_prepare_t;
struct emap_prepare_s {
rtree_leaf_elm_t *lead_elm_a;
rtree_leaf_elm_t *lead_elm_b;
rtree_leaf_elm_t *trail_elm_a;
rtree_leaf_elm_t *trail_elm_b;
};
/**
* These functions the emap metadata management for merging, splitting, and
* reusing extents. In particular, they set the boundary mappings from
* addresses to edatas. If the result is going to be used as a slab, you
* still need to call emap_register_interior on it, though.
*
* Remap simply changes the szind and slab status of an extent's boundary
* mappings. If the extent is not a slab, it doesn't bother with updating the
* end mapping (since lookups only occur in the interior of an extent for
* slabs). Since the szind and slab status only make sense for active extents,
* this should only be called while activating or deactivating an extent.
*
* Split and merge have a "prepare" and a "commit" portion. The prepare portion
* does the operations that can be done without exclusive access to the extent
* in question, while the commit variant requires exclusive access to maintain
* the emap invariants. The only function that can fail is emap_split_prepare,
* and it returns true on failure (at which point the caller shouldn't commit).
*
* In all cases, "lead" refers to the lower-addressed extent, and trail to the
* higher-addressed one. It's the caller's responsibility to set the edata
* state appropriately.
*/
bool emap_split_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
edata_t *edata, size_t size_a, edata_t *trail, size_t size_b);
void emap_split_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
edata_t *lead, size_t size_a, edata_t *trail, size_t size_b);
void emap_merge_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
edata_t *lead, edata_t *trail);
void emap_merge_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
edata_t *lead, edata_t *trail);
/* Assert that the emap's view of the given edata matches the edata's view. */
void emap_do_assert_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata);
static inline void
emap_assert_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
if (config_debug) {
emap_do_assert_mapped(tsdn, emap, edata);
}
}
/* Assert that the given edata isn't in the map. */
void emap_do_assert_not_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata);
static inline void
emap_assert_not_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
if (config_debug) {
emap_do_assert_not_mapped(tsdn, emap, edata);
}
}
JEMALLOC_ALWAYS_INLINE bool
emap_edata_in_transition(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
assert(config_debug);
emap_assert_mapped(tsdn, emap, edata);
EMAP_DECLARE_RTREE_CTX;
rtree_contents_t contents = rtree_read(tsdn, &emap->rtree, rtree_ctx,
(uintptr_t)edata_base_get(edata));
return edata_state_in_transition(contents.metadata.state);
}
JEMALLOC_ALWAYS_INLINE bool
emap_edata_is_acquired(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
if (!config_debug) {
/* For assertions only. */
return false;
}
/*
* The edata is considered acquired if no other threads will attempt to
* read / write any fields from it. This includes a few cases:
*
* 1) edata not hooked into emap yet -- This implies the edata just got
* allocated or initialized.
*
* 2) in an active or transition state -- In both cases, the edata can
* be discovered from the emap, however the state tracked in the rtree
* will prevent other threads from accessing the actual edata.
*/
EMAP_DECLARE_RTREE_CTX;
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &emap->rtree,
rtree_ctx, (uintptr_t)edata_base_get(edata), /* dependent */ true,
/* init_missing */ false);
if (elm == NULL) {
return true;
}
rtree_contents_t contents = rtree_leaf_elm_read(tsdn, &emap->rtree, elm,
/* dependent */ true);
if (contents.edata == NULL ||
contents.metadata.state == extent_state_active ||
edata_state_in_transition(contents.metadata.state)) {
return true;
}
return false;
}
JEMALLOC_ALWAYS_INLINE void
extent_assert_can_coalesce(const edata_t *inner, const edata_t *outer) {
assert(edata_arena_ind_get(inner) == edata_arena_ind_get(outer));
assert(edata_pai_get(inner) == edata_pai_get(outer));
assert(edata_committed_get(inner) == edata_committed_get(outer));
assert(edata_state_get(inner) == extent_state_active);
assert(edata_state_get(outer) == extent_state_merging);
assert(!edata_guarded_get(inner) && !edata_guarded_get(outer));
assert(edata_base_get(inner) == edata_past_get(outer) ||
edata_base_get(outer) == edata_past_get(inner));
}
JEMALLOC_ALWAYS_INLINE void
extent_assert_can_expand(const edata_t *original, const edata_t *expand) {
assert(edata_arena_ind_get(original) == edata_arena_ind_get(expand));
assert(edata_pai_get(original) == edata_pai_get(expand));
assert(edata_state_get(original) == extent_state_active);
assert(edata_state_get(expand) == extent_state_merging);
assert(edata_past_get(original) == edata_base_get(expand));
}
JEMALLOC_ALWAYS_INLINE edata_t *
emap_edata_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr) {
EMAP_DECLARE_RTREE_CTX;
return rtree_read(tsdn, &emap->rtree, rtree_ctx, (uintptr_t)ptr).edata;
}
/* Fills in alloc_ctx with the info in the map. */
JEMALLOC_ALWAYS_INLINE void
emap_alloc_ctx_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr,
emap_alloc_ctx_t *alloc_ctx) {
EMAP_DECLARE_RTREE_CTX;
rtree_metadata_t metadata = rtree_metadata_read(tsdn, &emap->rtree,
rtree_ctx, (uintptr_t)ptr);
alloc_ctx->szind = metadata.szind;
alloc_ctx->slab = metadata.slab;
}
/* The pointer must be mapped. */
JEMALLOC_ALWAYS_INLINE void
emap_full_alloc_ctx_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr,
emap_full_alloc_ctx_t *full_alloc_ctx) {
EMAP_DECLARE_RTREE_CTX;
rtree_contents_t contents = rtree_read(tsdn, &emap->rtree, rtree_ctx,
(uintptr_t)ptr);
full_alloc_ctx->edata = contents.edata;
full_alloc_ctx->szind = contents.metadata.szind;
full_alloc_ctx->slab = contents.metadata.slab;
}
/*
* The pointer is allowed to not be mapped.
*
* Returns true when the pointer is not present.
*/
JEMALLOC_ALWAYS_INLINE bool
emap_full_alloc_ctx_try_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr,
emap_full_alloc_ctx_t *full_alloc_ctx) {
EMAP_DECLARE_RTREE_CTX;
rtree_contents_t contents;
bool err = rtree_read_independent(tsdn, &emap->rtree, rtree_ctx,
(uintptr_t)ptr, &contents);
if (err) {
return true;
}
full_alloc_ctx->edata = contents.edata;
full_alloc_ctx->szind = contents.metadata.szind;
full_alloc_ctx->slab = contents.metadata.slab;
return false;
}
/*
* Only used on the fastpath of free. Returns true when cannot be fulfilled by
* fast path, e.g. when the metadata key is not cached.
*/
JEMALLOC_ALWAYS_INLINE bool
emap_alloc_ctx_try_lookup_fast(tsd_t *tsd, emap_t *emap, const void *ptr,
emap_alloc_ctx_t *alloc_ctx) {
/* Use the unsafe getter since this may gets called during exit. */
rtree_ctx_t *rtree_ctx = tsd_rtree_ctxp_get_unsafe(tsd);
rtree_metadata_t metadata;
bool err = rtree_metadata_try_read_fast(tsd_tsdn(tsd), &emap->rtree,
rtree_ctx, (uintptr_t)ptr, &metadata);
if (err) {
return true;
}
alloc_ctx->szind = metadata.szind;
alloc_ctx->slab = metadata.slab;
return false;
}
/*
* We want to do batch lookups out of the cache bins, which use
* cache_bin_ptr_array_get to access the i'th element of the bin (since they
* invert usual ordering in deciding what to flush). This lets the emap avoid
* caring about its caller's ordering.
*/
typedef const void *(*emap_ptr_getter)(void *ctx, size_t ind);
/*
* This allows size-checking assertions, which we can only do while we're in the
* process of edata lookups.
*/
typedef void (*emap_metadata_visitor)(void *ctx, emap_full_alloc_ctx_t *alloc_ctx);
typedef union emap_batch_lookup_result_u emap_batch_lookup_result_t;
union emap_batch_lookup_result_u {
edata_t *edata;
rtree_leaf_elm_t *rtree_leaf;
};
JEMALLOC_ALWAYS_INLINE void
emap_edata_lookup_batch(tsd_t *tsd, emap_t *emap, size_t nptrs,
emap_ptr_getter ptr_getter, void *ptr_getter_ctx,
emap_metadata_visitor metadata_visitor, void *metadata_visitor_ctx,
emap_batch_lookup_result_t *result) {
/* Avoids null-checking tsdn in the loop below. */
util_assume(tsd != NULL);
rtree_ctx_t *rtree_ctx = tsd_rtree_ctxp_get(tsd);
for (size_t i = 0; i < nptrs; i++) {
const void *ptr = ptr_getter(ptr_getter_ctx, i);
/*
* Reuse the edatas array as a temp buffer, lying a little about
* the types.
*/
result[i].rtree_leaf = rtree_leaf_elm_lookup(tsd_tsdn(tsd),
&emap->rtree, rtree_ctx, (uintptr_t)ptr,
/* dependent */ true, /* init_missing */ false);
}
for (size_t i = 0; i < nptrs; i++) {
rtree_leaf_elm_t *elm = result[i].rtree_leaf;
rtree_contents_t contents = rtree_leaf_elm_read(tsd_tsdn(tsd),
&emap->rtree, elm, /* dependent */ true);
result[i].edata = contents.edata;
emap_full_alloc_ctx_t alloc_ctx;
/*
* Not all these fields are read in practice by the metadata
* visitor. But the compiler can easily optimize away the ones
* that aren't, so no sense in being incomplete.
*/
alloc_ctx.szind = contents.metadata.szind;
alloc_ctx.slab = contents.metadata.slab;
alloc_ctx.edata = contents.edata;
metadata_visitor(metadata_visitor_ctx, &alloc_ctx);
}
}
#endif /* JEMALLOC_INTERNAL_EMAP_H */
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
typedef enum emitter_output_e emitter_output_t; typedef enum emitter_output_e emitter_output_t;
enum emitter_output_e { enum emitter_output_e {
emitter_output_json, emitter_output_json,
emitter_output_json_compact,
emitter_output_table emitter_output_table
}; };
...@@ -21,6 +22,7 @@ typedef enum emitter_type_e emitter_type_t; ...@@ -21,6 +22,7 @@ typedef enum emitter_type_e emitter_type_t;
enum emitter_type_e { enum emitter_type_e {
emitter_type_bool, emitter_type_bool,
emitter_type_int, emitter_type_int,
emitter_type_int64,
emitter_type_unsigned, emitter_type_unsigned,
emitter_type_uint32, emitter_type_uint32,
emitter_type_uint64, emitter_type_uint64,
...@@ -66,7 +68,7 @@ typedef struct emitter_s emitter_t; ...@@ -66,7 +68,7 @@ typedef struct emitter_s emitter_t;
struct emitter_s { struct emitter_s {
emitter_output_t output; emitter_output_t output;
/* The output information. */ /* The output information. */
void (*write_cb)(void *, const char *); write_cb_t *write_cb;
void *cbopaque; void *cbopaque;
int nesting_depth; int nesting_depth;
/* True if we've already emitted a value at the given depth. */ /* True if we've already emitted a value at the given depth. */
...@@ -75,6 +77,12 @@ struct emitter_s { ...@@ -75,6 +77,12 @@ struct emitter_s {
bool emitted_key; bool emitted_key;
}; };
static inline bool
emitter_outputs_json(emitter_t *emitter) {
return emitter->output == emitter_output_json ||
emitter->output == emitter_output_json_compact;
}
/* Internal convenience function. Write to the emitter the given string. */ /* Internal convenience function. Write to the emitter the given string. */
JEMALLOC_FORMAT_PRINTF(2, 3) JEMALLOC_FORMAT_PRINTF(2, 3)
static inline void static inline void
...@@ -135,13 +143,16 @@ emitter_print_value(emitter_t *emitter, emitter_justify_t justify, int width, ...@@ -135,13 +143,16 @@ emitter_print_value(emitter_t *emitter, emitter_justify_t justify, int width,
switch (value_type) { switch (value_type) {
case emitter_type_bool: case emitter_type_bool:
emitter_printf(emitter, emitter_printf(emitter,
emitter_gen_fmt(fmt, FMT_SIZE, "%s", justify, width), emitter_gen_fmt(fmt, FMT_SIZE, "%s", justify, width),
*(const bool *)value ? "true" : "false"); *(const bool *)value ? "true" : "false");
break; break;
case emitter_type_int: case emitter_type_int:
EMIT_SIMPLE(int, "%d") EMIT_SIMPLE(int, "%d")
break; break;
case emitter_type_int64:
EMIT_SIMPLE(int64_t, "%" FMTd64)
break;
case emitter_type_unsigned: case emitter_type_unsigned:
EMIT_SIMPLE(unsigned, "%u") EMIT_SIMPLE(unsigned, "%u")
break; break;
...@@ -159,7 +170,7 @@ emitter_print_value(emitter_t *emitter, emitter_justify_t justify, int width, ...@@ -159,7 +170,7 @@ emitter_print_value(emitter_t *emitter, emitter_justify_t justify, int width,
* anywhere near the fmt size. * anywhere near the fmt size.
*/ */
assert(str_written < BUF_SIZE); assert(str_written < BUF_SIZE);
emitter_printf(emitter, emitter_printf(emitter,
emitter_gen_fmt(fmt, FMT_SIZE, "%s", justify, width), buf); emitter_gen_fmt(fmt, FMT_SIZE, "%s", justify, width), buf);
break; break;
case emitter_type_uint32: case emitter_type_uint32:
...@@ -196,6 +207,7 @@ static inline void ...@@ -196,6 +207,7 @@ static inline void
emitter_indent(emitter_t *emitter) { emitter_indent(emitter_t *emitter) {
int amount = emitter->nesting_depth; int amount = emitter->nesting_depth;
const char *indent_str; const char *indent_str;
assert(emitter->output != emitter_output_json_compact);
if (emitter->output == emitter_output_json) { if (emitter->output == emitter_output_json) {
indent_str = "\t"; indent_str = "\t";
} else { } else {
...@@ -209,12 +221,18 @@ emitter_indent(emitter_t *emitter) { ...@@ -209,12 +221,18 @@ emitter_indent(emitter_t *emitter) {
static inline void static inline void
emitter_json_key_prefix(emitter_t *emitter) { emitter_json_key_prefix(emitter_t *emitter) {
assert(emitter_outputs_json(emitter));
if (emitter->emitted_key) { if (emitter->emitted_key) {
emitter->emitted_key = false; emitter->emitted_key = false;
return; return;
} }
emitter_printf(emitter, "%s\n", emitter->item_at_depth ? "," : ""); if (emitter->item_at_depth) {
emitter_indent(emitter); emitter_printf(emitter, ",");
}
if (emitter->output != emitter_output_json_compact) {
emitter_printf(emitter, "\n");
emitter_indent(emitter);
}
} }
/******************************************************************************/ /******************************************************************************/
...@@ -222,27 +240,28 @@ emitter_json_key_prefix(emitter_t *emitter) { ...@@ -222,27 +240,28 @@ emitter_json_key_prefix(emitter_t *emitter) {
static inline void static inline void
emitter_init(emitter_t *emitter, emitter_output_t emitter_output, emitter_init(emitter_t *emitter, emitter_output_t emitter_output,
void (*write_cb)(void *, const char *), void *cbopaque) { write_cb_t *write_cb, void *cbopaque) {
emitter->output = emitter_output; emitter->output = emitter_output;
emitter->write_cb = write_cb; emitter->write_cb = write_cb;
emitter->cbopaque = cbopaque; emitter->cbopaque = cbopaque;
emitter->item_at_depth = false; emitter->item_at_depth = false;
emitter->emitted_key = false; emitter->emitted_key = false;
emitter->nesting_depth = 0; emitter->nesting_depth = 0;
} }
/******************************************************************************/ /******************************************************************************/
/* JSON public API. */ /* JSON public API. */
/* /*
* Emits a key (e.g. as appears in an object). The next json entity emitted will * Emits a key (e.g. as appears in an object). The next json entity emitted will
* be the corresponding value. * be the corresponding value.
*/ */
static inline void static inline void
emitter_json_key(emitter_t *emitter, const char *json_key) { emitter_json_key(emitter_t *emitter, const char *json_key) {
if (emitter->output == emitter_output_json) { if (emitter_outputs_json(emitter)) {
emitter_json_key_prefix(emitter); emitter_json_key_prefix(emitter);
emitter_printf(emitter, "\"%s\": ", json_key); emitter_printf(emitter, "\"%s\":%s", json_key,
emitter->output == emitter_output_json_compact ? "" : " ");
emitter->emitted_key = true; emitter->emitted_key = true;
} }
} }
...@@ -250,7 +269,7 @@ emitter_json_key(emitter_t *emitter, const char *json_key) { ...@@ -250,7 +269,7 @@ emitter_json_key(emitter_t *emitter, const char *json_key) {
static inline void static inline void
emitter_json_value(emitter_t *emitter, emitter_type_t value_type, emitter_json_value(emitter_t *emitter, emitter_type_t value_type,
const void *value) { const void *value) {
if (emitter->output == emitter_output_json) { if (emitter_outputs_json(emitter)) {
emitter_json_key_prefix(emitter); emitter_json_key_prefix(emitter);
emitter_print_value(emitter, emitter_justify_none, -1, emitter_print_value(emitter, emitter_justify_none, -1,
value_type, value); value_type, value);
...@@ -268,7 +287,7 @@ emitter_json_kv(emitter_t *emitter, const char *json_key, ...@@ -268,7 +287,7 @@ emitter_json_kv(emitter_t *emitter, const char *json_key,
static inline void static inline void
emitter_json_array_begin(emitter_t *emitter) { emitter_json_array_begin(emitter_t *emitter) {
if (emitter->output == emitter_output_json) { if (emitter_outputs_json(emitter)) {
emitter_json_key_prefix(emitter); emitter_json_key_prefix(emitter);
emitter_printf(emitter, "["); emitter_printf(emitter, "[");
emitter_nest_inc(emitter); emitter_nest_inc(emitter);
...@@ -284,18 +303,20 @@ emitter_json_array_kv_begin(emitter_t *emitter, const char *json_key) { ...@@ -284,18 +303,20 @@ emitter_json_array_kv_begin(emitter_t *emitter, const char *json_key) {
static inline void static inline void
emitter_json_array_end(emitter_t *emitter) { emitter_json_array_end(emitter_t *emitter) {
if (emitter->output == emitter_output_json) { if (emitter_outputs_json(emitter)) {
assert(emitter->nesting_depth > 0); assert(emitter->nesting_depth > 0);
emitter_nest_dec(emitter); emitter_nest_dec(emitter);
emitter_printf(emitter, "\n"); if (emitter->output != emitter_output_json_compact) {
emitter_indent(emitter); emitter_printf(emitter, "\n");
emitter_indent(emitter);
}
emitter_printf(emitter, "]"); emitter_printf(emitter, "]");
} }
} }
static inline void static inline void
emitter_json_object_begin(emitter_t *emitter) { emitter_json_object_begin(emitter_t *emitter) {
if (emitter->output == emitter_output_json) { if (emitter_outputs_json(emitter)) {
emitter_json_key_prefix(emitter); emitter_json_key_prefix(emitter);
emitter_printf(emitter, "{"); emitter_printf(emitter, "{");
emitter_nest_inc(emitter); emitter_nest_inc(emitter);
...@@ -311,11 +332,13 @@ emitter_json_object_kv_begin(emitter_t *emitter, const char *json_key) { ...@@ -311,11 +332,13 @@ emitter_json_object_kv_begin(emitter_t *emitter, const char *json_key) {
static inline void static inline void
emitter_json_object_end(emitter_t *emitter) { emitter_json_object_end(emitter_t *emitter) {
if (emitter->output == emitter_output_json) { if (emitter_outputs_json(emitter)) {
assert(emitter->nesting_depth > 0); assert(emitter->nesting_depth > 0);
emitter_nest_dec(emitter); emitter_nest_dec(emitter);
emitter_printf(emitter, "\n"); if (emitter->output != emitter_output_json_compact) {
emitter_indent(emitter); emitter_printf(emitter, "\n");
emitter_indent(emitter);
}
emitter_printf(emitter, "}"); emitter_printf(emitter, "}");
} }
} }
...@@ -420,7 +443,7 @@ emitter_kv_note(emitter_t *emitter, const char *json_key, const char *table_key, ...@@ -420,7 +443,7 @@ emitter_kv_note(emitter_t *emitter, const char *json_key, const char *table_key,
emitter_type_t value_type, const void *value, emitter_type_t value_type, const void *value,
const char *table_note_key, emitter_type_t table_note_value_type, const char *table_note_key, emitter_type_t table_note_value_type,
const void *table_note_value) { const void *table_note_value) {
if (emitter->output == emitter_output_json) { if (emitter_outputs_json(emitter)) {
emitter_json_key(emitter, json_key); emitter_json_key(emitter, json_key);
emitter_json_value(emitter, value_type, value); emitter_json_value(emitter, value_type, value);
} else { } else {
...@@ -440,7 +463,7 @@ emitter_kv(emitter_t *emitter, const char *json_key, const char *table_key, ...@@ -440,7 +463,7 @@ emitter_kv(emitter_t *emitter, const char *json_key, const char *table_key,
static inline void static inline void
emitter_dict_begin(emitter_t *emitter, const char *json_key, emitter_dict_begin(emitter_t *emitter, const char *json_key,
const char *table_header) { const char *table_header) {
if (emitter->output == emitter_output_json) { if (emitter_outputs_json(emitter)) {
emitter_json_key(emitter, json_key); emitter_json_key(emitter, json_key);
emitter_json_object_begin(emitter); emitter_json_object_begin(emitter);
} else { } else {
...@@ -450,7 +473,7 @@ emitter_dict_begin(emitter_t *emitter, const char *json_key, ...@@ -450,7 +473,7 @@ emitter_dict_begin(emitter_t *emitter, const char *json_key,
static inline void static inline void
emitter_dict_end(emitter_t *emitter) { emitter_dict_end(emitter_t *emitter) {
if (emitter->output == emitter_output_json) { if (emitter_outputs_json(emitter)) {
emitter_json_object_end(emitter); emitter_json_object_end(emitter);
} else { } else {
emitter_table_dict_end(emitter); emitter_table_dict_end(emitter);
...@@ -459,7 +482,7 @@ emitter_dict_end(emitter_t *emitter) { ...@@ -459,7 +482,7 @@ emitter_dict_end(emitter_t *emitter) {
static inline void static inline void
emitter_begin(emitter_t *emitter) { emitter_begin(emitter_t *emitter) {
if (emitter->output == emitter_output_json) { if (emitter_outputs_json(emitter)) {
assert(emitter->nesting_depth == 0); assert(emitter->nesting_depth == 0);
emitter_printf(emitter, "{"); emitter_printf(emitter, "{");
emitter_nest_inc(emitter); emitter_nest_inc(emitter);
...@@ -476,10 +499,11 @@ emitter_begin(emitter_t *emitter) { ...@@ -476,10 +499,11 @@ emitter_begin(emitter_t *emitter) {
static inline void static inline void
emitter_end(emitter_t *emitter) { emitter_end(emitter_t *emitter) {
if (emitter->output == emitter_output_json) { if (emitter_outputs_json(emitter)) {
assert(emitter->nesting_depth == 1); assert(emitter->nesting_depth == 1);
emitter_nest_dec(emitter); emitter_nest_dec(emitter);
emitter_printf(emitter, "\n}\n"); emitter_printf(emitter, "%s", emitter->output ==
emitter_output_json_compact ? "}" : "\n}\n");
} }
} }
......
#ifndef JEMALLOC_INTERNAL_ESET_H
#define JEMALLOC_INTERNAL_ESET_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/fb.h"
#include "jemalloc/internal/edata.h"
#include "jemalloc/internal/mutex.h"
/*
* An eset ("extent set") is a quantized collection of extents, with built-in
* LRU queue.
*
* This class is not thread-safe; synchronization must be done externally if
* there are mutating operations. One exception is the stats counters, which
* may be read without any locking.
*/
typedef struct eset_bin_s eset_bin_t;
struct eset_bin_s {
edata_heap_t heap;
/*
* We do first-fit across multiple size classes. If we compared against
* the min element in each heap directly, we'd take a cache miss per
* extent we looked at. If we co-locate the edata summaries, we only
* take a miss on the edata we're actually going to return (which is
* inevitable anyways).
*/
edata_cmp_summary_t heap_min;
};
typedef struct eset_bin_stats_s eset_bin_stats_t;
struct eset_bin_stats_s {
atomic_zu_t nextents;
atomic_zu_t nbytes;
};
typedef struct eset_s eset_t;
struct eset_s {
/* Bitmap for which set bits correspond to non-empty heaps. */
fb_group_t bitmap[FB_NGROUPS(SC_NPSIZES + 1)];
/* Quantized per size class heaps of extents. */
eset_bin_t bins[SC_NPSIZES + 1];
eset_bin_stats_t bin_stats[SC_NPSIZES + 1];
/* LRU of all extents in heaps. */
edata_list_inactive_t lru;
/* Page sum for all extents in heaps. */
atomic_zu_t npages;
/*
* A duplication of the data in the containing ecache. We use this only
* for assertions on the states of the passed-in extents.
*/
extent_state_t state;
};
void eset_init(eset_t *eset, extent_state_t state);
size_t eset_npages_get(eset_t *eset);
/* Get the number of extents in the given page size index. */
size_t eset_nextents_get(eset_t *eset, pszind_t ind);
/* Get the sum total bytes of the extents in the given page size index. */
size_t eset_nbytes_get(eset_t *eset, pszind_t ind);
void eset_insert(eset_t *eset, edata_t *edata);
void eset_remove(eset_t *eset, edata_t *edata);
/*
* Select an extent from this eset of the given size and alignment. Returns
* null if no such item could be found.
*/
edata_t *eset_fit(eset_t *eset, size_t esize, size_t alignment, bool exact_only,
unsigned lg_max_fit);
#endif /* JEMALLOC_INTERNAL_ESET_H */
#ifndef JEMALLOC_INTERNAL_EXP_GROW_H
#define JEMALLOC_INTERNAL_EXP_GROW_H
typedef struct exp_grow_s exp_grow_t;
struct exp_grow_s {
/*
* Next extent size class in a growing series to use when satisfying a
* request via the extent hooks (only if opt_retain). This limits the
* number of disjoint virtual memory ranges so that extent merging can
* be effective even if multiple arenas' extent allocation requests are
* highly interleaved.
*
* retain_grow_limit is the max allowed size ind to expand (unless the
* required size is greater). Default is no limit, and controlled
* through mallctl only.
*/
pszind_t next;
pszind_t limit;
};
static inline bool
exp_grow_size_prepare(exp_grow_t *exp_grow, size_t alloc_size_min,
size_t *r_alloc_size, pszind_t *r_skip) {
*r_skip = 0;
*r_alloc_size = sz_pind2sz(exp_grow->next + *r_skip);
while (*r_alloc_size < alloc_size_min) {
(*r_skip)++;
if (exp_grow->next + *r_skip >=
sz_psz2ind(SC_LARGE_MAXCLASS)) {
/* Outside legal range. */
return true;
}
*r_alloc_size = sz_pind2sz(exp_grow->next + *r_skip);
}
return false;
}
static inline void
exp_grow_size_commit(exp_grow_t *exp_grow, pszind_t skip) {
if (exp_grow->next + skip + 1 <= exp_grow->limit) {
exp_grow->next += skip + 1;
} else {
exp_grow->next = exp_grow->limit;
}
}
void exp_grow_init(exp_grow_t *exp_grow);
#endif /* JEMALLOC_INTERNAL_EXP_GROW_H */
#ifndef JEMALLOC_INTERNAL_EXTENT_H
#define JEMALLOC_INTERNAL_EXTENT_H
#include "jemalloc/internal/ecache.h"
#include "jemalloc/internal/ehooks.h"
#include "jemalloc/internal/ph.h"
#include "jemalloc/internal/rtree.h"
/*
* This module contains the page-level allocator. It chooses the addresses that
* allocations requested by other modules will inhabit, and updates the global
* metadata to reflect allocation/deallocation/purging decisions.
*/
/*
* When reuse (and split) an active extent, (1U << opt_lg_extent_max_active_fit)
* is the max ratio between the size of the active extent and the new extent.
*/
#define LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT 6
extern size_t opt_lg_extent_max_active_fit;
edata_t *ecache_alloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
bool zero, bool guarded);
edata_t *ecache_alloc_grow(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
bool zero, bool guarded);
void ecache_dalloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
ecache_t *ecache, edata_t *edata);
edata_t *ecache_evict(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
ecache_t *ecache, size_t npages_min);
void extent_gdump_add(tsdn_t *tsdn, const edata_t *edata);
void extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
edata_t *edata);
void extent_dalloc_gap(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *edata);
edata_t *extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
void *new_addr, size_t size, size_t alignment, bool zero, bool *commit,
bool growing_retained);
void extent_dalloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *edata);
void extent_destroy_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *edata);
bool extent_commit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
size_t offset, size_t length);
bool extent_decommit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
size_t offset, size_t length);
bool extent_purge_lazy_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
size_t offset, size_t length);
bool extent_purge_forced_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
size_t offset, size_t length);
edata_t *extent_split_wrapper(tsdn_t *tsdn, pac_t *pac,
ehooks_t *ehooks, edata_t *edata, size_t size_a, size_t size_b,
bool holding_core_locks);
bool extent_merge_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *a, edata_t *b);
bool extent_commit_zero(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
bool commit, bool zero, bool growing_retained);
size_t extent_sn_next(pac_t *pac);
bool extent_boot(void);
JEMALLOC_ALWAYS_INLINE bool
extent_neighbor_head_state_mergeable(bool edata_is_head,
bool neighbor_is_head, bool forward) {
/*
* Head states checking: disallow merging if the higher addr extent is a
* head extent. This helps preserve first-fit, and more importantly
* makes sure no merge across arenas.
*/
if (forward) {
if (neighbor_is_head) {
return false;
}
} else {
if (edata_is_head) {
return false;
}
}
return true;
}
JEMALLOC_ALWAYS_INLINE bool
extent_can_acquire_neighbor(edata_t *edata, rtree_contents_t contents,
extent_pai_t pai, extent_state_t expected_state, bool forward,
bool expanding) {
edata_t *neighbor = contents.edata;
if (neighbor == NULL) {
return false;
}
/* It's not safe to access *neighbor yet; must verify states first. */
bool neighbor_is_head = contents.metadata.is_head;
if (!extent_neighbor_head_state_mergeable(edata_is_head_get(edata),
neighbor_is_head, forward)) {
return false;
}
extent_state_t neighbor_state = contents.metadata.state;
if (pai == EXTENT_PAI_PAC) {
if (neighbor_state != expected_state) {
return false;
}
/* From this point, it's safe to access *neighbor. */
if (!expanding && (edata_committed_get(edata) !=
edata_committed_get(neighbor))) {
/*
* Some platforms (e.g. Windows) require an explicit
* commit step (and writing to uncommitted memory is not
* allowed).
*/
return false;
}
} else {
if (neighbor_state == extent_state_active) {
return false;
}
/* From this point, it's safe to access *neighbor. */
}
assert(edata_pai_get(edata) == pai);
if (edata_pai_get(neighbor) != pai) {
return false;
}
if (opt_retain) {
assert(edata_arena_ind_get(edata) ==
edata_arena_ind_get(neighbor));
} else {
if (edata_arena_ind_get(edata) !=
edata_arena_ind_get(neighbor)) {
return false;
}
}
assert(!edata_guarded_get(edata) && !edata_guarded_get(neighbor));
return true;
}
#endif /* JEMALLOC_INTERNAL_EXTENT_H */
#ifndef JEMALLOC_INTERNAL_EXTENT_EXTERNS_H
#define JEMALLOC_INTERNAL_EXTENT_EXTERNS_H
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex_pool.h"
#include "jemalloc/internal/ph.h"
#include "jemalloc/internal/rtree.h"
extern size_t opt_lg_extent_max_active_fit;
extern rtree_t extents_rtree;
extern const extent_hooks_t extent_hooks_default;
extern mutex_pool_t extent_mutex_pool;
extent_t *extent_alloc(tsdn_t *tsdn, arena_t *arena);
void extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
extent_hooks_t *extent_hooks_get(arena_t *arena);
extent_hooks_t *extent_hooks_set(tsd_t *tsd, arena_t *arena,
extent_hooks_t *extent_hooks);
#ifdef JEMALLOC_JET
size_t extent_size_quantize_floor(size_t size);
size_t extent_size_quantize_ceil(size_t size);
#endif
ph_proto(, extent_avail_, extent_tree_t, extent_t)
ph_proto(, extent_heap_, extent_heap_t, extent_t)
bool extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
bool delay_coalesce);
extent_state_t extents_state_get(const extents_t *extents);
size_t extents_npages_get(extents_t *extents);
/* Get the number of extents in the given page size index. */
size_t extents_nextents_get(extents_t *extents, pszind_t ind);
/* Get the sum total bytes of the extents in the given page size index. */
size_t extents_nbytes_get(extents_t *extents, pszind_t ind);
extent_t *extents_alloc(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr,
size_t size, size_t pad, size_t alignment, bool slab, szind_t szind,
bool *zero, bool *commit);
void extents_dalloc(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent);
extent_t *extents_evict(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_min);
void extents_prefork(tsdn_t *tsdn, extents_t *extents);
void extents_postfork_parent(tsdn_t *tsdn, extents_t *extents);
void extents_postfork_child(tsdn_t *tsdn, extents_t *extents);
extent_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit);
void extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
void extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent);
void extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent);
bool extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length);
bool extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length);
bool extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length);
bool extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length);
extent_t *extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b);
bool extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b);
bool extent_boot(void);
void extent_util_stats_get(tsdn_t *tsdn, const void *ptr,
size_t *nfree, size_t *nregs, size_t *size);
void extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
size_t *nfree, size_t *nregs, size_t *size,
size_t *bin_nfree, size_t *bin_nregs, void **slabcur_addr);
#endif /* JEMALLOC_INTERNAL_EXTENT_EXTERNS_H */
#ifndef JEMALLOC_INTERNAL_EXTENT_INLINES_H
#define JEMALLOC_INTERNAL_EXTENT_INLINES_H
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex_pool.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/sz.h"
static inline void
extent_lock(tsdn_t *tsdn, extent_t *extent) {
assert(extent != NULL);
mutex_pool_lock(tsdn, &extent_mutex_pool, (uintptr_t)extent);
}
static inline void
extent_unlock(tsdn_t *tsdn, extent_t *extent) {
assert(extent != NULL);
mutex_pool_unlock(tsdn, &extent_mutex_pool, (uintptr_t)extent);
}
static inline void
extent_lock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) {
assert(extent1 != NULL && extent2 != NULL);
mutex_pool_lock2(tsdn, &extent_mutex_pool, (uintptr_t)extent1,
(uintptr_t)extent2);
}
static inline void
extent_unlock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) {
assert(extent1 != NULL && extent2 != NULL);
mutex_pool_unlock2(tsdn, &extent_mutex_pool, (uintptr_t)extent1,
(uintptr_t)extent2);
}
static inline unsigned
extent_arena_ind_get(const extent_t *extent) {
unsigned arena_ind = (unsigned)((extent->e_bits &
EXTENT_BITS_ARENA_MASK) >> EXTENT_BITS_ARENA_SHIFT);
assert(arena_ind < MALLOCX_ARENA_LIMIT);
return arena_ind;
}
static inline arena_t *
extent_arena_get(const extent_t *extent) {
unsigned arena_ind = extent_arena_ind_get(extent);
return (arena_t *)atomic_load_p(&arenas[arena_ind], ATOMIC_ACQUIRE);
}
static inline szind_t
extent_szind_get_maybe_invalid(const extent_t *extent) {
szind_t szind = (szind_t)((extent->e_bits & EXTENT_BITS_SZIND_MASK) >>
EXTENT_BITS_SZIND_SHIFT);
assert(szind <= SC_NSIZES);
return szind;
}
static inline szind_t
extent_szind_get(const extent_t *extent) {
szind_t szind = extent_szind_get_maybe_invalid(extent);
assert(szind < SC_NSIZES); /* Never call when "invalid". */
return szind;
}
static inline size_t
extent_usize_get(const extent_t *extent) {
return sz_index2size(extent_szind_get(extent));
}
static inline unsigned
extent_binshard_get(const extent_t *extent) {
unsigned binshard = (unsigned)((extent->e_bits &
EXTENT_BITS_BINSHARD_MASK) >> EXTENT_BITS_BINSHARD_SHIFT);
assert(binshard < bin_infos[extent_szind_get(extent)].n_shards);
return binshard;
}
static inline size_t
extent_sn_get(const extent_t *extent) {
return (size_t)((extent->e_bits & EXTENT_BITS_SN_MASK) >>
EXTENT_BITS_SN_SHIFT);
}
static inline extent_state_t
extent_state_get(const extent_t *extent) {
return (extent_state_t)((extent->e_bits & EXTENT_BITS_STATE_MASK) >>
EXTENT_BITS_STATE_SHIFT);
}
static inline bool
extent_zeroed_get(const extent_t *extent) {
return (bool)((extent->e_bits & EXTENT_BITS_ZEROED_MASK) >>
EXTENT_BITS_ZEROED_SHIFT);
}
static inline bool
extent_committed_get(const extent_t *extent) {
return (bool)((extent->e_bits & EXTENT_BITS_COMMITTED_MASK) >>
EXTENT_BITS_COMMITTED_SHIFT);
}
static inline bool
extent_dumpable_get(const extent_t *extent) {
return (bool)((extent->e_bits & EXTENT_BITS_DUMPABLE_MASK) >>
EXTENT_BITS_DUMPABLE_SHIFT);
}
static inline bool
extent_slab_get(const extent_t *extent) {
return (bool)((extent->e_bits & EXTENT_BITS_SLAB_MASK) >>
EXTENT_BITS_SLAB_SHIFT);
}
static inline unsigned
extent_nfree_get(const extent_t *extent) {
assert(extent_slab_get(extent));
return (unsigned)((extent->e_bits & EXTENT_BITS_NFREE_MASK) >>
EXTENT_BITS_NFREE_SHIFT);
}
static inline void *
extent_base_get(const extent_t *extent) {
assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
!extent_slab_get(extent));
return PAGE_ADDR2BASE(extent->e_addr);
}
static inline void *
extent_addr_get(const extent_t *extent) {
assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
!extent_slab_get(extent));
return extent->e_addr;
}
static inline size_t
extent_size_get(const extent_t *extent) {
return (extent->e_size_esn & EXTENT_SIZE_MASK);
}
static inline size_t
extent_esn_get(const extent_t *extent) {
return (extent->e_size_esn & EXTENT_ESN_MASK);
}
static inline size_t
extent_bsize_get(const extent_t *extent) {
return extent->e_bsize;
}
static inline void *
extent_before_get(const extent_t *extent) {
return (void *)((uintptr_t)extent_base_get(extent) - PAGE);
}
static inline void *
extent_last_get(const extent_t *extent) {
return (void *)((uintptr_t)extent_base_get(extent) +
extent_size_get(extent) - PAGE);
}
static inline void *
extent_past_get(const extent_t *extent) {
return (void *)((uintptr_t)extent_base_get(extent) +
extent_size_get(extent));
}
static inline arena_slab_data_t *
extent_slab_data_get(extent_t *extent) {
assert(extent_slab_get(extent));
return &extent->e_slab_data;
}
static inline const arena_slab_data_t *
extent_slab_data_get_const(const extent_t *extent) {
assert(extent_slab_get(extent));
return &extent->e_slab_data;
}
static inline prof_tctx_t *
extent_prof_tctx_get(const extent_t *extent) {
return (prof_tctx_t *)atomic_load_p(&extent->e_prof_tctx,
ATOMIC_ACQUIRE);
}
static inline nstime_t
extent_prof_alloc_time_get(const extent_t *extent) {
return extent->e_alloc_time;
}
static inline void
extent_arena_set(extent_t *extent, arena_t *arena) {
unsigned arena_ind = (arena != NULL) ? arena_ind_get(arena) : ((1U <<
MALLOCX_ARENA_BITS) - 1);
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ARENA_MASK) |
((uint64_t)arena_ind << EXTENT_BITS_ARENA_SHIFT);
}
static inline void
extent_binshard_set(extent_t *extent, unsigned binshard) {
/* The assertion assumes szind is set already. */
assert(binshard < bin_infos[extent_szind_get(extent)].n_shards);
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_BINSHARD_MASK) |
((uint64_t)binshard << EXTENT_BITS_BINSHARD_SHIFT);
}
static inline void
extent_addr_set(extent_t *extent, void *addr) {
extent->e_addr = addr;
}
static inline void
extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment) {
assert(extent_base_get(extent) == extent_addr_get(extent));
if (alignment < PAGE) {
unsigned lg_range = LG_PAGE -
lg_floor(CACHELINE_CEILING(alignment));
size_t r;
if (!tsdn_null(tsdn)) {
tsd_t *tsd = tsdn_tsd(tsdn);
r = (size_t)prng_lg_range_u64(
tsd_offset_statep_get(tsd), lg_range);
} else {
r = prng_lg_range_zu(
&extent_arena_get(extent)->offset_state,
lg_range, true);
}
uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE -
lg_range);
extent->e_addr = (void *)((uintptr_t)extent->e_addr +
random_offset);
assert(ALIGNMENT_ADDR2BASE(extent->e_addr, alignment) ==
extent->e_addr);
}
}
static inline void
extent_size_set(extent_t *extent, size_t size) {
assert((size & ~EXTENT_SIZE_MASK) == 0);
extent->e_size_esn = size | (extent->e_size_esn & ~EXTENT_SIZE_MASK);
}
static inline void
extent_esn_set(extent_t *extent, size_t esn) {
extent->e_size_esn = (extent->e_size_esn & ~EXTENT_ESN_MASK) | (esn &
EXTENT_ESN_MASK);
}
static inline void
extent_bsize_set(extent_t *extent, size_t bsize) {
extent->e_bsize = bsize;
}
static inline void
extent_szind_set(extent_t *extent, szind_t szind) {
assert(szind <= SC_NSIZES); /* SC_NSIZES means "invalid". */
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SZIND_MASK) |
((uint64_t)szind << EXTENT_BITS_SZIND_SHIFT);
}
static inline void
extent_nfree_set(extent_t *extent, unsigned nfree) {
assert(extent_slab_get(extent));
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_NFREE_MASK) |
((uint64_t)nfree << EXTENT_BITS_NFREE_SHIFT);
}
static inline void
extent_nfree_binshard_set(extent_t *extent, unsigned nfree, unsigned binshard) {
/* The assertion assumes szind is set already. */
assert(binshard < bin_infos[extent_szind_get(extent)].n_shards);
extent->e_bits = (extent->e_bits &
(~EXTENT_BITS_NFREE_MASK & ~EXTENT_BITS_BINSHARD_MASK)) |
((uint64_t)binshard << EXTENT_BITS_BINSHARD_SHIFT) |
((uint64_t)nfree << EXTENT_BITS_NFREE_SHIFT);
}
static inline void
extent_nfree_inc(extent_t *extent) {
assert(extent_slab_get(extent));
extent->e_bits += ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT);
}
static inline void
extent_nfree_dec(extent_t *extent) {
assert(extent_slab_get(extent));
extent->e_bits -= ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT);
}
static inline void
extent_nfree_sub(extent_t *extent, uint64_t n) {
assert(extent_slab_get(extent));
extent->e_bits -= (n << EXTENT_BITS_NFREE_SHIFT);
}
static inline void
extent_sn_set(extent_t *extent, size_t sn) {
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SN_MASK) |
((uint64_t)sn << EXTENT_BITS_SN_SHIFT);
}
static inline void
extent_state_set(extent_t *extent, extent_state_t state) {
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_STATE_MASK) |
((uint64_t)state << EXTENT_BITS_STATE_SHIFT);
}
static inline void
extent_zeroed_set(extent_t *extent, bool zeroed) {
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ZEROED_MASK) |
((uint64_t)zeroed << EXTENT_BITS_ZEROED_SHIFT);
}
static inline void
extent_committed_set(extent_t *extent, bool committed) {
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_COMMITTED_MASK) |
((uint64_t)committed << EXTENT_BITS_COMMITTED_SHIFT);
}
static inline void
extent_dumpable_set(extent_t *extent, bool dumpable) {
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_DUMPABLE_MASK) |
((uint64_t)dumpable << EXTENT_BITS_DUMPABLE_SHIFT);
}
static inline void
extent_slab_set(extent_t *extent, bool slab) {
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SLAB_MASK) |
((uint64_t)slab << EXTENT_BITS_SLAB_SHIFT);
}
static inline void
extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx) {
atomic_store_p(&extent->e_prof_tctx, tctx, ATOMIC_RELEASE);
}
static inline void
extent_prof_alloc_time_set(extent_t *extent, nstime_t t) {
nstime_copy(&extent->e_alloc_time, &t);
}
static inline bool
extent_is_head_get(extent_t *extent) {
if (maps_coalesce) {
not_reached();
}
return (bool)((extent->e_bits & EXTENT_BITS_IS_HEAD_MASK) >>
EXTENT_BITS_IS_HEAD_SHIFT);
}
static inline void
extent_is_head_set(extent_t *extent, bool is_head) {
if (maps_coalesce) {
not_reached();
}
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_IS_HEAD_MASK) |
((uint64_t)is_head << EXTENT_BITS_IS_HEAD_SHIFT);
}
static inline void
extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed,
bool committed, bool dumpable, extent_head_state_t is_head) {
assert(addr == PAGE_ADDR2BASE(addr) || !slab);
extent_arena_set(extent, arena);
extent_addr_set(extent, addr);
extent_size_set(extent, size);
extent_slab_set(extent, slab);
extent_szind_set(extent, szind);
extent_sn_set(extent, sn);
extent_state_set(extent, state);
extent_zeroed_set(extent, zeroed);
extent_committed_set(extent, committed);
extent_dumpable_set(extent, dumpable);
ql_elm_new(extent, ql_link);
if (!maps_coalesce) {
extent_is_head_set(extent, (is_head == EXTENT_IS_HEAD) ? true :
false);
}
if (config_prof) {
extent_prof_tctx_set(extent, NULL);
}
}
static inline void
extent_binit(extent_t *extent, void *addr, size_t bsize, size_t sn) {
extent_arena_set(extent, NULL);
extent_addr_set(extent, addr);
extent_bsize_set(extent, bsize);
extent_slab_set(extent, false);
extent_szind_set(extent, SC_NSIZES);
extent_sn_set(extent, sn);
extent_state_set(extent, extent_state_active);
extent_zeroed_set(extent, true);
extent_committed_set(extent, true);
extent_dumpable_set(extent, true);
}
static inline void
extent_list_init(extent_list_t *list) {
ql_new(list);
}
static inline extent_t *
extent_list_first(const extent_list_t *list) {
return ql_first(list);
}
static inline extent_t *
extent_list_last(const extent_list_t *list) {
return ql_last(list, ql_link);
}
static inline void
extent_list_append(extent_list_t *list, extent_t *extent) {
ql_tail_insert(list, extent, ql_link);
}
static inline void
extent_list_prepend(extent_list_t *list, extent_t *extent) {
ql_head_insert(list, extent, ql_link);
}
static inline void
extent_list_replace(extent_list_t *list, extent_t *to_remove,
extent_t *to_insert) {
ql_after_insert(to_remove, to_insert, ql_link);
ql_remove(list, to_remove, ql_link);
}
static inline void
extent_list_remove(extent_list_t *list, extent_t *extent) {
ql_remove(list, extent, ql_link);
}
static inline int
extent_sn_comp(const extent_t *a, const extent_t *b) {
size_t a_sn = extent_sn_get(a);
size_t b_sn = extent_sn_get(b);
return (a_sn > b_sn) - (a_sn < b_sn);
}
static inline int
extent_esn_comp(const extent_t *a, const extent_t *b) {
size_t a_esn = extent_esn_get(a);
size_t b_esn = extent_esn_get(b);
return (a_esn > b_esn) - (a_esn < b_esn);
}
static inline int
extent_ad_comp(const extent_t *a, const extent_t *b) {
uintptr_t a_addr = (uintptr_t)extent_addr_get(a);
uintptr_t b_addr = (uintptr_t)extent_addr_get(b);
return (a_addr > b_addr) - (a_addr < b_addr);
}
static inline int
extent_ead_comp(const extent_t *a, const extent_t *b) {
uintptr_t a_eaddr = (uintptr_t)a;
uintptr_t b_eaddr = (uintptr_t)b;
return (a_eaddr > b_eaddr) - (a_eaddr < b_eaddr);
}
static inline int
extent_snad_comp(const extent_t *a, const extent_t *b) {
int ret;
ret = extent_sn_comp(a, b);
if (ret != 0) {
return ret;
}
ret = extent_ad_comp(a, b);
return ret;
}
static inline int
extent_esnead_comp(const extent_t *a, const extent_t *b) {
int ret;
ret = extent_esn_comp(a, b);
if (ret != 0) {
return ret;
}
ret = extent_ead_comp(a, b);
return ret;
}
#endif /* JEMALLOC_INTERNAL_EXTENT_INLINES_H */
#ifndef JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
#define JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bit_util.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/ph.h"
#include "jemalloc/internal/sc.h"
typedef enum {
extent_state_active = 0,
extent_state_dirty = 1,
extent_state_muzzy = 2,
extent_state_retained = 3
} extent_state_t;
/* Extent (span of pages). Use accessor functions for e_* fields. */
struct extent_s {
/*
* Bitfield containing several fields:
*
* a: arena_ind
* b: slab
* c: committed
* d: dumpable
* z: zeroed
* t: state
* i: szind
* f: nfree
* s: bin_shard
* n: sn
*
* nnnnnnnn ... nnnnnnss ssssffff ffffffii iiiiiitt zdcbaaaa aaaaaaaa
*
* arena_ind: Arena from which this extent came, or all 1 bits if
* unassociated.
*
* slab: The slab flag indicates whether the extent is used for a slab
* of small regions. This helps differentiate small size classes,
* and it indicates whether interior pointers can be looked up via
* iealloc().
*
* committed: The committed flag indicates whether physical memory is
* committed to the extent, whether explicitly or implicitly
* as on a system that overcommits and satisfies physical
* memory needs on demand via soft page faults.
*
* dumpable: The dumpable flag indicates whether or not we've set the
* memory in question to be dumpable. Note that this
* interacts somewhat subtly with user-specified extent hooks,
* since we don't know if *they* are fiddling with
* dumpability (in which case, we don't want to undo whatever
* they're doing). To deal with this scenario, we:
* - Make dumpable false only for memory allocated with the
* default hooks.
* - Only allow memory to go from non-dumpable to dumpable,
* and only once.
* - Never make the OS call to allow dumping when the
* dumpable bit is already set.
* These three constraints mean that we will never
* accidentally dump user memory that the user meant to set
* nondumpable with their extent hooks.
*
*
* zeroed: The zeroed flag is used by extent recycling code to track
* whether memory is zero-filled.
*
* state: The state flag is an extent_state_t.
*
* szind: The szind flag indicates usable size class index for
* allocations residing in this extent, regardless of whether the
* extent is a slab. Extent size and usable size often differ
* even for non-slabs, either due to sz_large_pad or promotion of
* sampled small regions.
*
* nfree: Number of free regions in slab.
*
* bin_shard: the shard of the bin from which this extent came.
*
* sn: Serial number (potentially non-unique).
*
* Serial numbers may wrap around if !opt_retain, but as long as
* comparison functions fall back on address comparison for equal
* serial numbers, stable (if imperfect) ordering is maintained.
*
* Serial numbers may not be unique even in the absence of
* wrap-around, e.g. when splitting an extent and assigning the same
* serial number to both resulting adjacent extents.
*/
uint64_t e_bits;
#define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT))
#define EXTENT_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS
#define EXTENT_BITS_ARENA_SHIFT 0
#define EXTENT_BITS_ARENA_MASK MASK(EXTENT_BITS_ARENA_WIDTH, EXTENT_BITS_ARENA_SHIFT)
#define EXTENT_BITS_SLAB_WIDTH 1
#define EXTENT_BITS_SLAB_SHIFT (EXTENT_BITS_ARENA_WIDTH + EXTENT_BITS_ARENA_SHIFT)
#define EXTENT_BITS_SLAB_MASK MASK(EXTENT_BITS_SLAB_WIDTH, EXTENT_BITS_SLAB_SHIFT)
#define EXTENT_BITS_COMMITTED_WIDTH 1
#define EXTENT_BITS_COMMITTED_SHIFT (EXTENT_BITS_SLAB_WIDTH + EXTENT_BITS_SLAB_SHIFT)
#define EXTENT_BITS_COMMITTED_MASK MASK(EXTENT_BITS_COMMITTED_WIDTH, EXTENT_BITS_COMMITTED_SHIFT)
#define EXTENT_BITS_DUMPABLE_WIDTH 1
#define EXTENT_BITS_DUMPABLE_SHIFT (EXTENT_BITS_COMMITTED_WIDTH + EXTENT_BITS_COMMITTED_SHIFT)
#define EXTENT_BITS_DUMPABLE_MASK MASK(EXTENT_BITS_DUMPABLE_WIDTH, EXTENT_BITS_DUMPABLE_SHIFT)
#define EXTENT_BITS_ZEROED_WIDTH 1
#define EXTENT_BITS_ZEROED_SHIFT (EXTENT_BITS_DUMPABLE_WIDTH + EXTENT_BITS_DUMPABLE_SHIFT)
#define EXTENT_BITS_ZEROED_MASK MASK(EXTENT_BITS_ZEROED_WIDTH, EXTENT_BITS_ZEROED_SHIFT)
#define EXTENT_BITS_STATE_WIDTH 2
#define EXTENT_BITS_STATE_SHIFT (EXTENT_BITS_ZEROED_WIDTH + EXTENT_BITS_ZEROED_SHIFT)
#define EXTENT_BITS_STATE_MASK MASK(EXTENT_BITS_STATE_WIDTH, EXTENT_BITS_STATE_SHIFT)
#define EXTENT_BITS_SZIND_WIDTH LG_CEIL(SC_NSIZES)
#define EXTENT_BITS_SZIND_SHIFT (EXTENT_BITS_STATE_WIDTH + EXTENT_BITS_STATE_SHIFT)
#define EXTENT_BITS_SZIND_MASK MASK(EXTENT_BITS_SZIND_WIDTH, EXTENT_BITS_SZIND_SHIFT)
#define EXTENT_BITS_NFREE_WIDTH (LG_SLAB_MAXREGS + 1)
#define EXTENT_BITS_NFREE_SHIFT (EXTENT_BITS_SZIND_WIDTH + EXTENT_BITS_SZIND_SHIFT)
#define EXTENT_BITS_NFREE_MASK MASK(EXTENT_BITS_NFREE_WIDTH, EXTENT_BITS_NFREE_SHIFT)
#define EXTENT_BITS_BINSHARD_WIDTH 6
#define EXTENT_BITS_BINSHARD_SHIFT (EXTENT_BITS_NFREE_WIDTH + EXTENT_BITS_NFREE_SHIFT)
#define EXTENT_BITS_BINSHARD_MASK MASK(EXTENT_BITS_BINSHARD_WIDTH, EXTENT_BITS_BINSHARD_SHIFT)
#define EXTENT_BITS_IS_HEAD_WIDTH 1
#define EXTENT_BITS_IS_HEAD_SHIFT (EXTENT_BITS_BINSHARD_WIDTH + EXTENT_BITS_BINSHARD_SHIFT)
#define EXTENT_BITS_IS_HEAD_MASK MASK(EXTENT_BITS_IS_HEAD_WIDTH, EXTENT_BITS_IS_HEAD_SHIFT)
#define EXTENT_BITS_SN_SHIFT (EXTENT_BITS_IS_HEAD_WIDTH + EXTENT_BITS_IS_HEAD_SHIFT)
#define EXTENT_BITS_SN_MASK (UINT64_MAX << EXTENT_BITS_SN_SHIFT)
/* Pointer to the extent that this structure is responsible for. */
void *e_addr;
union {
/*
* Extent size and serial number associated with the extent
* structure (different than the serial number for the extent at
* e_addr).
*
* ssssssss [...] ssssssss ssssnnnn nnnnnnnn
*/
size_t e_size_esn;
#define EXTENT_SIZE_MASK ((size_t)~(PAGE-1))
#define EXTENT_ESN_MASK ((size_t)PAGE-1)
/* Base extent size, which may not be a multiple of PAGE. */
size_t e_bsize;
};
/*
* List linkage, used by a variety of lists:
* - bin_t's slabs_full
* - extents_t's LRU
* - stashed dirty extents
* - arena's large allocations
*/
ql_elm(extent_t) ql_link;
/*
* Linkage for per size class sn/address-ordered heaps, and
* for extent_avail
*/
phn(extent_t) ph_link;
union {
/* Small region slab metadata. */
arena_slab_data_t e_slab_data;
/* Profiling data, used for large objects. */
struct {
/* Time when this was allocated. */
nstime_t e_alloc_time;
/* Points to a prof_tctx_t. */
atomic_p_t e_prof_tctx;
};
};
};
typedef ql_head(extent_t) extent_list_t;
typedef ph(extent_t) extent_tree_t;
typedef ph(extent_t) extent_heap_t;
/* Quantized collection of extents, with built-in LRU queue. */
struct extents_s {
malloc_mutex_t mtx;
/*
* Quantized per size class heaps of extents.
*
* Synchronization: mtx.
*/
extent_heap_t heaps[SC_NPSIZES + 1];
atomic_zu_t nextents[SC_NPSIZES + 1];
atomic_zu_t nbytes[SC_NPSIZES + 1];
/*
* Bitmap for which set bits correspond to non-empty heaps.
*
* Synchronization: mtx.
*/
bitmap_t bitmap[BITMAP_GROUPS(SC_NPSIZES + 1)];
/*
* LRU of all extents in heaps.
*
* Synchronization: mtx.
*/
extent_list_t lru;
/*
* Page sum for all extents in heaps.
*
* The synchronization here is a little tricky. Modifications to npages
* must hold mtx, but reads need not (though, a reader who sees npages
* without holding the mutex can't assume anything about the rest of the
* state of the extents_t).
*/
atomic_zu_t npages;
/* All stored extents must be in the same state. */
extent_state_t state;
/*
* If true, delay coalescing until eviction; otherwise coalesce during
* deallocation.
*/
bool delay_coalesce;
};
/*
* The following two structs are for experimental purposes. See
* experimental_utilization_query_ctl and
* experimental_utilization_batch_query_ctl in src/ctl.c.
*/
struct extent_util_stats_s {
size_t nfree;
size_t nregs;
size_t size;
};
struct extent_util_stats_verbose_s {
void *slabcur_addr;
size_t nfree;
size_t nregs;
size_t size;
size_t bin_nfree;
size_t bin_nregs;
};
#endif /* JEMALLOC_INTERNAL_EXTENT_STRUCTS_H */
#ifndef JEMALLOC_INTERNAL_EXTENT_TYPES_H
#define JEMALLOC_INTERNAL_EXTENT_TYPES_H
typedef struct extent_s extent_t;
typedef struct extents_s extents_t;
typedef struct extent_util_stats_s extent_util_stats_t;
typedef struct extent_util_stats_verbose_s extent_util_stats_verbose_t;
#define EXTENT_HOOKS_INITIALIZER NULL
/*
* When reuse (and split) an active extent, (1U << opt_lg_extent_max_active_fit)
* is the max ratio between the size of the active extent and the new extent.
*/
#define LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT 6
typedef enum {
EXTENT_NOT_HEAD,
EXTENT_IS_HEAD /* Only relevant for Windows && opt.retain. */
} extent_head_state_t;
#endif /* JEMALLOC_INTERNAL_EXTENT_TYPES_H */
#ifndef JEMALLOC_INTERNAL_FB_H
#define JEMALLOC_INTERNAL_FB_H
/*
* The flat bitmap module. This has a larger API relative to the bitmap module
* (supporting things like backwards searches, and searching for both set and
* unset bits), at the cost of slower operations for very large bitmaps.
*
* Initialized flat bitmaps start at all-zeros (all bits unset).
*/
typedef unsigned long fb_group_t;
#define FB_GROUP_BITS (ZU(1) << (LG_SIZEOF_LONG + 3))
#define FB_NGROUPS(nbits) ((nbits) / FB_GROUP_BITS \
+ ((nbits) % FB_GROUP_BITS == 0 ? 0 : 1))
static inline void
fb_init(fb_group_t *fb, size_t nbits) {
size_t ngroups = FB_NGROUPS(nbits);
memset(fb, 0, ngroups * sizeof(fb_group_t));
}
static inline bool
fb_empty(fb_group_t *fb, size_t nbits) {
size_t ngroups = FB_NGROUPS(nbits);
for (size_t i = 0; i < ngroups; i++) {
if (fb[i] != 0) {
return false;
}
}
return true;
}
static inline bool
fb_full(fb_group_t *fb, size_t nbits) {
size_t ngroups = FB_NGROUPS(nbits);
size_t trailing_bits = nbits % FB_GROUP_BITS;
size_t limit = (trailing_bits == 0 ? ngroups : ngroups - 1);
for (size_t i = 0; i < limit; i++) {
if (fb[i] != ~(fb_group_t)0) {
return false;
}
}
if (trailing_bits == 0) {
return true;
}
return fb[ngroups - 1] == ((fb_group_t)1 << trailing_bits) - 1;
}
static inline bool
fb_get(fb_group_t *fb, size_t nbits, size_t bit) {
assert(bit < nbits);
size_t group_ind = bit / FB_GROUP_BITS;
size_t bit_ind = bit % FB_GROUP_BITS;
return (bool)(fb[group_ind] & ((fb_group_t)1 << bit_ind));
}
static inline void
fb_set(fb_group_t *fb, size_t nbits, size_t bit) {
assert(bit < nbits);
size_t group_ind = bit / FB_GROUP_BITS;
size_t bit_ind = bit % FB_GROUP_BITS;
fb[group_ind] |= ((fb_group_t)1 << bit_ind);
}
static inline void
fb_unset(fb_group_t *fb, size_t nbits, size_t bit) {
assert(bit < nbits);
size_t group_ind = bit / FB_GROUP_BITS;
size_t bit_ind = bit % FB_GROUP_BITS;
fb[group_ind] &= ~((fb_group_t)1 << bit_ind);
}
/*
* Some implementation details. This visitation function lets us apply a group
* visitor to each group in the bitmap (potentially modifying it). The mask
* indicates which bits are logically part of the visitation.
*/
typedef void (*fb_group_visitor_t)(void *ctx, fb_group_t *fb, fb_group_t mask);
JEMALLOC_ALWAYS_INLINE void
fb_visit_impl(fb_group_t *fb, size_t nbits, fb_group_visitor_t visit, void *ctx,
size_t start, size_t cnt) {
assert(cnt > 0);
assert(start + cnt <= nbits);
size_t group_ind = start / FB_GROUP_BITS;
size_t start_bit_ind = start % FB_GROUP_BITS;
/*
* The first group is special; it's the only one we don't start writing
* to from bit 0.
*/
size_t first_group_cnt = (start_bit_ind + cnt > FB_GROUP_BITS
? FB_GROUP_BITS - start_bit_ind : cnt);
/*
* We can basically split affected words into:
* - The first group, where we touch only the high bits
* - The last group, where we touch only the low bits
* - The middle, where we set all the bits to the same thing.
* We treat each case individually. The last two could be merged, but
* this can lead to bad codegen for those middle words.
*/
/* First group */
fb_group_t mask = ((~(fb_group_t)0)
>> (FB_GROUP_BITS - first_group_cnt))
<< start_bit_ind;
visit(ctx, &fb[group_ind], mask);
cnt -= first_group_cnt;
group_ind++;
/* Middle groups */
while (cnt > FB_GROUP_BITS) {
visit(ctx, &fb[group_ind], ~(fb_group_t)0);
cnt -= FB_GROUP_BITS;
group_ind++;
}
/* Last group */
if (cnt != 0) {
mask = (~(fb_group_t)0) >> (FB_GROUP_BITS - cnt);
visit(ctx, &fb[group_ind], mask);
}
}
JEMALLOC_ALWAYS_INLINE void
fb_assign_visitor(void *ctx, fb_group_t *fb, fb_group_t mask) {
bool val = *(bool *)ctx;
if (val) {
*fb |= mask;
} else {
*fb &= ~mask;
}
}
/* Sets the cnt bits starting at position start. Must not have a 0 count. */
static inline void
fb_set_range(fb_group_t *fb, size_t nbits, size_t start, size_t cnt) {
bool val = true;
fb_visit_impl(fb, nbits, &fb_assign_visitor, &val, start, cnt);
}
/* Unsets the cnt bits starting at position start. Must not have a 0 count. */
static inline void
fb_unset_range(fb_group_t *fb, size_t nbits, size_t start, size_t cnt) {
bool val = false;
fb_visit_impl(fb, nbits, &fb_assign_visitor, &val, start, cnt);
}
JEMALLOC_ALWAYS_INLINE void
fb_scount_visitor(void *ctx, fb_group_t *fb, fb_group_t mask) {
size_t *scount = (size_t *)ctx;
*scount += popcount_lu(*fb & mask);
}
/* Finds the number of set bit in the of length cnt starting at start. */
JEMALLOC_ALWAYS_INLINE size_t
fb_scount(fb_group_t *fb, size_t nbits, size_t start, size_t cnt) {
size_t scount = 0;
fb_visit_impl(fb, nbits, &fb_scount_visitor, &scount, start, cnt);
return scount;
}
/* Finds the number of unset bit in the of length cnt starting at start. */
JEMALLOC_ALWAYS_INLINE size_t
fb_ucount(fb_group_t *fb, size_t nbits, size_t start, size_t cnt) {
size_t scount = fb_scount(fb, nbits, start, cnt);
return cnt - scount;
}
/*
* An implementation detail; find the first bit at position >= min_bit with the
* value val.
*
* Returns the number of bits in the bitmap if no such bit exists.
*/
JEMALLOC_ALWAYS_INLINE ssize_t
fb_find_impl(fb_group_t *fb, size_t nbits, size_t start, bool val,
bool forward) {
assert(start < nbits);
size_t ngroups = FB_NGROUPS(nbits);
ssize_t group_ind = start / FB_GROUP_BITS;
size_t bit_ind = start % FB_GROUP_BITS;
fb_group_t maybe_invert = (val ? 0 : (fb_group_t)-1);
fb_group_t group = fb[group_ind];
group ^= maybe_invert;
if (forward) {
/* Only keep ones in bits bit_ind and above. */
group &= ~((1LU << bit_ind) - 1);
} else {
/*
* Only keep ones in bits bit_ind and below. You might more
* naturally express this as (1 << (bit_ind + 1)) - 1, but
* that shifts by an invalid amount if bit_ind is one less than
* FB_GROUP_BITS.
*/
group &= ((2LU << bit_ind) - 1);
}
ssize_t group_ind_bound = forward ? (ssize_t)ngroups : -1;
while (group == 0) {
group_ind += forward ? 1 : -1;
if (group_ind == group_ind_bound) {
return forward ? (ssize_t)nbits : (ssize_t)-1;
}
group = fb[group_ind];
group ^= maybe_invert;
}
assert(group != 0);
size_t bit = forward ? ffs_lu(group) : fls_lu(group);
size_t pos = group_ind * FB_GROUP_BITS + bit;
/*
* The high bits of a partially filled last group are zeros, so if we're
* looking for zeros we don't want to report an invalid result.
*/
if (forward && !val && pos > nbits) {
return nbits;
}
return pos;
}
/*
* Find the first set bit in the bitmap with an index >= min_bit. Returns the
* number of bits in the bitmap if no such bit exists.
*/
static inline size_t
fb_ffu(fb_group_t *fb, size_t nbits, size_t min_bit) {
return (size_t)fb_find_impl(fb, nbits, min_bit, /* val */ false,
/* forward */ true);
}
/* The same, but looks for an unset bit. */
static inline size_t
fb_ffs(fb_group_t *fb, size_t nbits, size_t min_bit) {
return (size_t)fb_find_impl(fb, nbits, min_bit, /* val */ true,
/* forward */ true);
}
/*
* Find the last set bit in the bitmap with an index <= max_bit. Returns -1 if
* no such bit exists.
*/
static inline ssize_t
fb_flu(fb_group_t *fb, size_t nbits, size_t max_bit) {
return fb_find_impl(fb, nbits, max_bit, /* val */ false,
/* forward */ false);
}
static inline ssize_t
fb_fls(fb_group_t *fb, size_t nbits, size_t max_bit) {
return fb_find_impl(fb, nbits, max_bit, /* val */ true,
/* forward */ false);
}
/* Returns whether or not we found a range. */
JEMALLOC_ALWAYS_INLINE bool
fb_iter_range_impl(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
size_t *r_len, bool val, bool forward) {
assert(start < nbits);
ssize_t next_range_begin = fb_find_impl(fb, nbits, start, val, forward);
if ((forward && next_range_begin == (ssize_t)nbits)
|| (!forward && next_range_begin == (ssize_t)-1)) {
return false;
}
/* Half open range; the set bits are [begin, end). */
ssize_t next_range_end = fb_find_impl(fb, nbits, next_range_begin, !val,
forward);
if (forward) {
*r_begin = next_range_begin;
*r_len = next_range_end - next_range_begin;
} else {
*r_begin = next_range_end + 1;
*r_len = next_range_begin - next_range_end;
}
return true;
}
/*
* Used to iterate through ranges of set bits.
*
* Tries to find the next contiguous sequence of set bits with a first index >=
* start. If one exists, puts the earliest bit of the range in *r_begin, its
* length in *r_len, and returns true. Otherwise, returns false (without
* touching *r_begin or *r_end).
*/
static inline bool
fb_srange_iter(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
size_t *r_len) {
return fb_iter_range_impl(fb, nbits, start, r_begin, r_len,
/* val */ true, /* forward */ true);
}
/*
* The same as fb_srange_iter, but searches backwards from start rather than
* forwards. (The position returned is still the earliest bit in the range).
*/
static inline bool
fb_srange_riter(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
size_t *r_len) {
return fb_iter_range_impl(fb, nbits, start, r_begin, r_len,
/* val */ true, /* forward */ false);
}
/* Similar to fb_srange_iter, but searches for unset bits. */
static inline bool
fb_urange_iter(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
size_t *r_len) {
return fb_iter_range_impl(fb, nbits, start, r_begin, r_len,
/* val */ false, /* forward */ true);
}
/* Similar to fb_srange_riter, but searches for unset bits. */
static inline bool
fb_urange_riter(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
size_t *r_len) {
return fb_iter_range_impl(fb, nbits, start, r_begin, r_len,
/* val */ false, /* forward */ false);
}
JEMALLOC_ALWAYS_INLINE size_t
fb_range_longest_impl(fb_group_t *fb, size_t nbits, bool val) {
size_t begin = 0;
size_t longest_len = 0;
size_t len = 0;
while (begin < nbits && fb_iter_range_impl(fb, nbits, begin, &begin,
&len, val, /* forward */ true)) {
if (len > longest_len) {
longest_len = len;
}
begin += len;
}
return longest_len;
}
static inline size_t
fb_srange_longest(fb_group_t *fb, size_t nbits) {
return fb_range_longest_impl(fb, nbits, /* val */ true);
}
static inline size_t
fb_urange_longest(fb_group_t *fb, size_t nbits) {
return fb_range_longest_impl(fb, nbits, /* val */ false);
}
/*
* Initializes each bit of dst with the bitwise-AND of the corresponding bits of
* src1 and src2. All bitmaps must be the same size.
*/
static inline void
fb_bit_and(fb_group_t *dst, fb_group_t *src1, fb_group_t *src2, size_t nbits) {
size_t ngroups = FB_NGROUPS(nbits);
for (size_t i = 0; i < ngroups; i++) {
dst[i] = src1[i] & src2[i];
}
}
/* Like fb_bit_and, but with bitwise-OR. */
static inline void
fb_bit_or(fb_group_t *dst, fb_group_t *src1, fb_group_t *src2, size_t nbits) {
size_t ngroups = FB_NGROUPS(nbits);
for (size_t i = 0; i < ngroups; i++) {
dst[i] = src1[i] | src2[i];
}
}
/* Initializes dst bit i to the negation of source bit i. */
static inline void
fb_bit_not(fb_group_t *dst, fb_group_t *src, size_t nbits) {
size_t ngroups = FB_NGROUPS(nbits);
for (size_t i = 0; i < ngroups; i++) {
dst[i] = ~src[i];
}
}
#endif /* JEMALLOC_INTERNAL_FB_H */
#ifndef JEMALLOC_INTERNAL_FXP_H
#define JEMALLOC_INTERNAL_FXP_H
/*
* A simple fixed-point math implementation, supporting only unsigned values
* (with overflow being an error).
*
* It's not in general safe to use floating point in core code, because various
* libc implementations we get linked against can assume that malloc won't touch
* floating point state and call it with an unusual calling convention.
*/
/*
* High 16 bits are the integer part, low 16 are the fractional part. Or
* equivalently, repr == 2**16 * val, where we use "val" to refer to the
* (imaginary) fractional representation of the true value.
*
* We pick a uint32_t here since it's convenient in some places to
* double the representation size (i.e. multiplication and division use
* 64-bit integer types), and a uint64_t is the largest type we're
* certain is available.
*/
typedef uint32_t fxp_t;
#define FXP_INIT_INT(x) ((x) << 16)
#define FXP_INIT_PERCENT(pct) (((pct) << 16) / 100)
/*
* Amount of precision used in parsing and printing numbers. The integer bound
* is simply because the integer part of the number gets 16 bits, and so is
* bounded by 65536.
*
* We use a lot of precision for the fractional part, even though most of it
* gets rounded off; this lets us get exact values for the important special
* case where the denominator is a small power of 2 (for instance,
* 1/512 == 0.001953125 is exactly representable even with only 16 bits of
* fractional precision). We need to left-shift by 16 before dividing by
* 10**precision, so we pick precision to be floor(log(2**48)) = 14.
*/
#define FXP_INTEGER_PART_DIGITS 5
#define FXP_FRACTIONAL_PART_DIGITS 14
/*
* In addition to the integer and fractional parts of the number, we need to
* include a null character and (possibly) a decimal point.
*/
#define FXP_BUF_SIZE (FXP_INTEGER_PART_DIGITS + FXP_FRACTIONAL_PART_DIGITS + 2)
static inline fxp_t
fxp_add(fxp_t a, fxp_t b) {
return a + b;
}
static inline fxp_t
fxp_sub(fxp_t a, fxp_t b) {
assert(a >= b);
return a - b;
}
static inline fxp_t
fxp_mul(fxp_t a, fxp_t b) {
uint64_t unshifted = (uint64_t)a * (uint64_t)b;
/*
* Unshifted is (a.val * 2**16) * (b.val * 2**16)
* == (a.val * b.val) * 2**32, but we want
* (a.val * b.val) * 2 ** 16.
*/
return (uint32_t)(unshifted >> 16);
}
static inline fxp_t
fxp_div(fxp_t a, fxp_t b) {
assert(b != 0);
uint64_t unshifted = ((uint64_t)a << 32) / (uint64_t)b;
/*
* Unshifted is (a.val * 2**16) * (2**32) / (b.val * 2**16)
* == (a.val / b.val) * (2 ** 32), which again corresponds to a right
* shift of 16.
*/
return (uint32_t)(unshifted >> 16);
}
static inline uint32_t
fxp_round_down(fxp_t a) {
return a >> 16;
}
static inline uint32_t
fxp_round_nearest(fxp_t a) {
uint32_t fractional_part = (a & ((1U << 16) - 1));
uint32_t increment = (uint32_t)(fractional_part >= (1U << 15));
return (a >> 16) + increment;
}
/*
* Approximately computes x * frac, without the size limitations that would be
* imposed by converting u to an fxp_t.
*/
static inline size_t
fxp_mul_frac(size_t x_orig, fxp_t frac) {
assert(frac <= (1U << 16));
/*
* Work around an over-enthusiastic warning about type limits below (on
* 32-bit platforms, a size_t is always less than 1ULL << 48).
*/
uint64_t x = (uint64_t)x_orig;
/*
* If we can guarantee no overflow, multiply first before shifting, to
* preserve some precision. Otherwise, shift first and then multiply.
* In the latter case, we only lose the low 16 bits of a 48-bit number,
* so we're still accurate to within 1/2**32.
*/
if (x < (1ULL << 48)) {
return (size_t)((x * frac) >> 16);
} else {
return (size_t)((x >> 16) * (uint64_t)frac);
}
}
/*
* Returns true on error. Otherwise, returns false and updates *ptr to point to
* the first character not parsed (because it wasn't a digit).
*/
bool fxp_parse(fxp_t *a, const char *ptr, char **end);
void fxp_print(fxp_t a, char buf[FXP_BUF_SIZE]);
#endif /* JEMALLOC_INTERNAL_FXP_H */
...@@ -104,8 +104,8 @@ hash_x86_32(const void *key, int len, uint32_t seed) { ...@@ -104,8 +104,8 @@ hash_x86_32(const void *key, int len, uint32_t seed) {
uint32_t k1 = 0; uint32_t k1 = 0;
switch (len & 3) { switch (len & 3) {
case 3: k1 ^= tail[2] << 16; JEMALLOC_FALLTHROUGH case 3: k1 ^= tail[2] << 16; JEMALLOC_FALLTHROUGH;
case 2: k1 ^= tail[1] << 8; JEMALLOC_FALLTHROUGH case 2: k1 ^= tail[1] << 8; JEMALLOC_FALLTHROUGH;
case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15); case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15);
k1 *= c2; h1 ^= k1; k1 *= c2; h1 ^= k1;
} }
...@@ -177,29 +177,29 @@ hash_x86_128(const void *key, const int len, uint32_t seed, ...@@ -177,29 +177,29 @@ hash_x86_128(const void *key, const int len, uint32_t seed,
uint32_t k4 = 0; uint32_t k4 = 0;
switch (len & 15) { switch (len & 15) {
case 15: k4 ^= tail[14] << 16; JEMALLOC_FALLTHROUGH case 15: k4 ^= tail[14] << 16; JEMALLOC_FALLTHROUGH;
case 14: k4 ^= tail[13] << 8; JEMALLOC_FALLTHROUGH case 14: k4 ^= tail[13] << 8; JEMALLOC_FALLTHROUGH;
case 13: k4 ^= tail[12] << 0; case 13: k4 ^= tail[12] << 0;
k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
JEMALLOC_FALLTHROUGH JEMALLOC_FALLTHROUGH;
case 12: k3 ^= tail[11] << 24; JEMALLOC_FALLTHROUGH case 12: k3 ^= (uint32_t) tail[11] << 24; JEMALLOC_FALLTHROUGH;
case 11: k3 ^= tail[10] << 16; JEMALLOC_FALLTHROUGH case 11: k3 ^= tail[10] << 16; JEMALLOC_FALLTHROUGH;
case 10: k3 ^= tail[ 9] << 8; JEMALLOC_FALLTHROUGH case 10: k3 ^= tail[ 9] << 8; JEMALLOC_FALLTHROUGH;
case 9: k3 ^= tail[ 8] << 0; case 9: k3 ^= tail[ 8] << 0;
k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
JEMALLOC_FALLTHROUGH JEMALLOC_FALLTHROUGH;
case 8: k2 ^= tail[ 7] << 24; JEMALLOC_FALLTHROUGH case 8: k2 ^= (uint32_t) tail[ 7] << 24; JEMALLOC_FALLTHROUGH;
case 7: k2 ^= tail[ 6] << 16; JEMALLOC_FALLTHROUGH case 7: k2 ^= tail[ 6] << 16; JEMALLOC_FALLTHROUGH;
case 6: k2 ^= tail[ 5] << 8; JEMALLOC_FALLTHROUGH case 6: k2 ^= tail[ 5] << 8; JEMALLOC_FALLTHROUGH;
case 5: k2 ^= tail[ 4] << 0; case 5: k2 ^= tail[ 4] << 0;
k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
JEMALLOC_FALLTHROUGH JEMALLOC_FALLTHROUGH;
case 4: k1 ^= tail[ 3] << 24; JEMALLOC_FALLTHROUGH case 4: k1 ^= (uint32_t) tail[ 3] << 24; JEMALLOC_FALLTHROUGH;
case 3: k1 ^= tail[ 2] << 16; JEMALLOC_FALLTHROUGH case 3: k1 ^= tail[ 2] << 16; JEMALLOC_FALLTHROUGH;
case 2: k1 ^= tail[ 1] << 8; JEMALLOC_FALLTHROUGH case 2: k1 ^= tail[ 1] << 8; JEMALLOC_FALLTHROUGH;
case 1: k1 ^= tail[ 0] << 0; case 1: k1 ^= tail[ 0] << 0;
k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
JEMALLOC_FALLTHROUGH break;
} }
} }
...@@ -261,24 +261,25 @@ hash_x64_128(const void *key, const int len, const uint32_t seed, ...@@ -261,24 +261,25 @@ hash_x64_128(const void *key, const int len, const uint32_t seed,
uint64_t k2 = 0; uint64_t k2 = 0;
switch (len & 15) { switch (len & 15) {
case 15: k2 ^= ((uint64_t)(tail[14])) << 48; JEMALLOC_FALLTHROUGH case 15: k2 ^= ((uint64_t)(tail[14])) << 48; JEMALLOC_FALLTHROUGH;
case 14: k2 ^= ((uint64_t)(tail[13])) << 40; JEMALLOC_FALLTHROUGH case 14: k2 ^= ((uint64_t)(tail[13])) << 40; JEMALLOC_FALLTHROUGH;
case 13: k2 ^= ((uint64_t)(tail[12])) << 32; JEMALLOC_FALLTHROUGH case 13: k2 ^= ((uint64_t)(tail[12])) << 32; JEMALLOC_FALLTHROUGH;
case 12: k2 ^= ((uint64_t)(tail[11])) << 24; JEMALLOC_FALLTHROUGH case 12: k2 ^= ((uint64_t)(tail[11])) << 24; JEMALLOC_FALLTHROUGH;
case 11: k2 ^= ((uint64_t)(tail[10])) << 16; JEMALLOC_FALLTHROUGH case 11: k2 ^= ((uint64_t)(tail[10])) << 16; JEMALLOC_FALLTHROUGH;
case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; JEMALLOC_FALLTHROUGH case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; JEMALLOC_FALLTHROUGH;
case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0; case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0;
k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
JEMALLOC_FALLTHROUGH JEMALLOC_FALLTHROUGH;
case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; JEMALLOC_FALLTHROUGH case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; JEMALLOC_FALLTHROUGH;
case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; JEMALLOC_FALLTHROUGH case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; JEMALLOC_FALLTHROUGH;
case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; JEMALLOC_FALLTHROUGH case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; JEMALLOC_FALLTHROUGH;
case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; JEMALLOC_FALLTHROUGH case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; JEMALLOC_FALLTHROUGH;
case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; JEMALLOC_FALLTHROUGH case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; JEMALLOC_FALLTHROUGH;
case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; JEMALLOC_FALLTHROUGH case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; JEMALLOC_FALLTHROUGH;
case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; JEMALLOC_FALLTHROUGH case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; JEMALLOC_FALLTHROUGH;
case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0; case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0;
k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
break;
} }
} }
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment