Commit b8beda3c authored by Oran Agra's avatar Oran Agra
Browse files

Merge commit jemalloc 5.3.0

parents d659c734 6d23d3ac
......@@ -7,36 +7,19 @@
#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/tsd_types.h"
/* Various uses of this struct need it to be a named type. */
typedef ql_elm(tsd_t) tsd_link_t;
struct tcache_s {
/*
* To minimize our cache-footprint, we put the frequently accessed data
* together at the start of this struct.
*/
/* Cleared after arena_prof_accum(). */
uint64_t prof_accumbytes;
/* Drives incremental GC. */
ticker_t gc_ticker;
/*
* The pointer stacks associated with bins follow as a contiguous array.
* During tcache initialization, the avail pointer in each element of
* tbins is initialized to point to the proper offset within this array.
/*
* The tcache state is split into the slow and hot path data. Each has a
* pointer to the other, and the data always comes in pairs. The layout of each
* of them varies in practice; tcache_slow lives in the TSD for the automatic
* tcache, and as part of a dynamic allocation for manual allocations. Keeping
* a pointer to tcache_slow lets us treat these cases uniformly, rather than
* splitting up the tcache [de]allocation code into those paths called with the
* TSD tcache and those called with a manual tcache.
*/
cache_bin_t bins_small[SC_NBINS];
/*
* This data is less hot; we can be a little less careful with our
* footprint here.
*/
struct tcache_slow_s {
/* Lets us track all the tcaches in an arena. */
ql_elm(tcache_t) link;
/* Logically scoped to tsd, but put here for cache layout reasons. */
ql_elm(tsd_t) tsd_link;
bool in_hook;
ql_elm(tcache_slow_t) link;
/*
* The descriptor lets the arena find our cache bins without seeing the
......@@ -51,12 +34,27 @@ struct tcache_s {
szind_t next_gc_bin;
/* For small bins, fill (ncached_max >> lg_fill_div). */
uint8_t lg_fill_div[SC_NBINS];
/* For small bins, whether has been refilled since last GC. */
bool bin_refilled[SC_NBINS];
/*
* For small bins, the number of items we can pretend to flush before
* actually flushing.
*/
uint8_t bin_flush_delay_items[SC_NBINS];
/*
* We put the cache bins for large size classes at the end of the
* struct, since some of them might not get used. This might end up
* letting us avoid touching an extra page if we don't have to.
* The start of the allocation containing the dynamic allocation for
* either the cache bins alone, or the cache bin memory as well as this
* tcache_slow_t and its associated tcache_t.
*/
cache_bin_t bins_large[SC_NSIZES-SC_NBINS];
void *dyn_alloc;
/* The associated bins. */
tcache_t *tcache;
};
struct tcache_s {
tcache_slow_t *tcache_slow;
cache_bin_t bins[TCACHE_NBINS_MAX];
};
/* Linkage for list of available (previously used) explicit tcache IDs. */
......
......@@ -3,6 +3,7 @@
#include "jemalloc/internal/sc.h"
typedef struct tcache_slow_s tcache_slow_t;
typedef struct tcache_s tcache_t;
typedef struct tcaches_s tcaches_t;
......@@ -16,39 +17,9 @@ typedef struct tcaches_s tcaches_t;
#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3)
#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY
/*
* Absolute minimum number of cache slots for each small bin.
*/
#define TCACHE_NSLOTS_SMALL_MIN 20
/*
* Absolute maximum number of cache slots for each small bin in the thread
* cache. This is an additional constraint beyond that imposed as: twice the
* number of regions per slab for this size class.
*
* This constant must be an even number.
*/
#define TCACHE_NSLOTS_SMALL_MAX 200
/* Number of cache slots for large size classes. */
#define TCACHE_NSLOTS_LARGE 20
/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
#define LG_TCACHE_MAXCLASS_DEFAULT 15
/*
* TCACHE_GC_SWEEP is the approximate number of allocation events between
* full GC sweeps. Integer rounding may cause the actual number to be
* slightly higher, since GC is performed incrementally.
*/
#define TCACHE_GC_SWEEP 8192
/* Number of tcache allocation/deallocation events between incremental GCs. */
#define TCACHE_GC_INCR \
((TCACHE_GC_SWEEP / SC_NBINS) + ((TCACHE_GC_SWEEP / SC_NBINS == 0) ? 0 : 1))
/* Used in TSD static initializer only. Real init in tcache_data_init(). */
/* Used in TSD static initializer only. Real init in tsd_tcache_data_init(). */
#define TCACHE_ZERO_INITIALIZER {0}
#define TCACHE_SLOW_ZERO_INITIALIZER {0}
/* Used in TSD static initializer only. Will be initialized to opt_tcache. */
#define TCACHE_ENABLED_ZERO_INITIALIZER false
......@@ -56,4 +27,9 @@ typedef struct tcaches_s tcaches_t;
/* Used for explicit tcache only. Means flushed but not destroyed. */
#define TCACHES_ELM_NEED_REINIT ((tcache_t *)(uintptr_t)1)
#define TCACHE_LG_MAXCLASS_LIMIT 23 /* tcache_maxclass = 8M */
#define TCACHE_MAXCLASS_LIMIT ((size_t)1 << TCACHE_LG_MAXCLASS_LIMIT)
#define TCACHE_NBINS_MAX (SC_NBINS + SC_NGROUP * \
(TCACHE_LG_MAXCLASS_LIMIT - SC_LG_LARGE_MINCLASS) + 1)
#endif /* JEMALLOC_INTERNAL_TCACHE_TYPES_H */
......@@ -4,16 +4,21 @@
extern JEMALLOC_EXPORT void (*test_hooks_arena_new_hook)();
extern JEMALLOC_EXPORT void (*test_hooks_libc_hook)();
#define JEMALLOC_HOOK(fn, hook) ((void)(hook != NULL && (hook(), 0)), fn)
#if defined(JEMALLOC_JET) || defined(JEMALLOC_UNIT_TEST)
# define JEMALLOC_TEST_HOOK(fn, hook) ((void)(hook != NULL && (hook(), 0)), fn)
#define open JEMALLOC_HOOK(open, test_hooks_libc_hook)
#define read JEMALLOC_HOOK(read, test_hooks_libc_hook)
#define write JEMALLOC_HOOK(write, test_hooks_libc_hook)
#define readlink JEMALLOC_HOOK(readlink, test_hooks_libc_hook)
#define close JEMALLOC_HOOK(close, test_hooks_libc_hook)
#define creat JEMALLOC_HOOK(creat, test_hooks_libc_hook)
#define secure_getenv JEMALLOC_HOOK(secure_getenv, test_hooks_libc_hook)
# define open JEMALLOC_TEST_HOOK(open, test_hooks_libc_hook)
# define read JEMALLOC_TEST_HOOK(read, test_hooks_libc_hook)
# define write JEMALLOC_TEST_HOOK(write, test_hooks_libc_hook)
# define readlink JEMALLOC_TEST_HOOK(readlink, test_hooks_libc_hook)
# define close JEMALLOC_TEST_HOOK(close, test_hooks_libc_hook)
# define creat JEMALLOC_TEST_HOOK(creat, test_hooks_libc_hook)
# define secure_getenv JEMALLOC_TEST_HOOK(secure_getenv, test_hooks_libc_hook)
/* Note that this is undef'd and re-define'd in src/prof.c. */
#define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, test_hooks_libc_hook)
# define _Unwind_Backtrace JEMALLOC_TEST_HOOK(_Unwind_Backtrace, test_hooks_libc_hook)
#else
# define JEMALLOC_TEST_HOOK(fn, hook) fn
#endif
#endif /* JEMALLOC_INTERNAL_TEST_HOOKS_H */
#ifndef JEMALLOC_INTERNAL_THREAD_EVENT_H
#define JEMALLOC_INTERNAL_THREAD_EVENT_H
#include "jemalloc/internal/tsd.h"
/* "te" is short for "thread_event" */
/*
* TE_MIN_START_WAIT should not exceed the minimal allocation usize.
*/
#define TE_MIN_START_WAIT ((uint64_t)1U)
#define TE_MAX_START_WAIT UINT64_MAX
/*
* Maximum threshold on thread_(de)allocated_next_event_fast, so that there is
* no need to check overflow in malloc fast path. (The allocation size in malloc
* fast path never exceeds SC_LOOKUP_MAXCLASS.)
*/
#define TE_NEXT_EVENT_FAST_MAX (UINT64_MAX - SC_LOOKUP_MAXCLASS + 1U)
/*
* The max interval helps make sure that malloc stays on the fast path in the
* common case, i.e. thread_allocated < thread_allocated_next_event_fast. When
* thread_allocated is within an event's distance to TE_NEXT_EVENT_FAST_MAX
* above, thread_allocated_next_event_fast is wrapped around and we fall back to
* the medium-fast path. The max interval makes sure that we're not staying on
* the fallback case for too long, even if there's no active event or if all
* active events have long wait times.
*/
#define TE_MAX_INTERVAL ((uint64_t)(4U << 20))
/*
* Invalid elapsed time, for situations where elapsed time is not needed. See
* comments in thread_event.c for more info.
*/
#define TE_INVALID_ELAPSED UINT64_MAX
typedef struct te_ctx_s {
bool is_alloc;
uint64_t *current;
uint64_t *last_event;
uint64_t *next_event;
uint64_t *next_event_fast;
} te_ctx_t;
void te_assert_invariants_debug(tsd_t *tsd);
void te_event_trigger(tsd_t *tsd, te_ctx_t *ctx);
void te_recompute_fast_threshold(tsd_t *tsd);
void tsd_te_init(tsd_t *tsd);
/*
* List of all events, in the following format:
* E(event, (condition), is_alloc_event)
*/
#define ITERATE_OVER_ALL_EVENTS \
E(tcache_gc, (opt_tcache_gc_incr_bytes > 0), true) \
E(prof_sample, (config_prof && opt_prof), true) \
E(stats_interval, (opt_stats_interval >= 0), true) \
E(tcache_gc_dalloc, (opt_tcache_gc_incr_bytes > 0), false) \
E(peak_alloc, config_stats, true) \
E(peak_dalloc, config_stats, false)
#define E(event, condition_unused, is_alloc_event_unused) \
C(event##_event_wait)
/* List of all thread event counters. */
#define ITERATE_OVER_ALL_COUNTERS \
C(thread_allocated) \
C(thread_allocated_last_event) \
ITERATE_OVER_ALL_EVENTS \
C(prof_sample_last_event) \
C(stats_interval_last_event)
/* Getters directly wrap TSD getters. */
#define C(counter) \
JEMALLOC_ALWAYS_INLINE uint64_t \
counter##_get(tsd_t *tsd) { \
return tsd_##counter##_get(tsd); \
}
ITERATE_OVER_ALL_COUNTERS
#undef C
/*
* Setters call the TSD pointer getters rather than the TSD setters, so that
* the counters can be modified even when TSD state is reincarnated or
* minimal_initialized: if an event is triggered in such cases, we will
* temporarily delay the event and let it be immediately triggered at the next
* allocation call.
*/
#define C(counter) \
JEMALLOC_ALWAYS_INLINE void \
counter##_set(tsd_t *tsd, uint64_t v) { \
*tsd_##counter##p_get(tsd) = v; \
}
ITERATE_OVER_ALL_COUNTERS
#undef C
/*
* For generating _event_wait getter / setter functions for each individual
* event.
*/
#undef E
/*
* The malloc and free fastpath getters -- use the unsafe getters since tsd may
* be non-nominal, in which case the fast_threshold will be set to 0. This
* allows checking for events and tsd non-nominal in a single branch.
*
* Note that these can only be used on the fastpath.
*/
JEMALLOC_ALWAYS_INLINE void
te_malloc_fastpath_ctx(tsd_t *tsd, uint64_t *allocated, uint64_t *threshold) {
*allocated = *tsd_thread_allocatedp_get_unsafe(tsd);
*threshold = *tsd_thread_allocated_next_event_fastp_get_unsafe(tsd);
assert(*threshold <= TE_NEXT_EVENT_FAST_MAX);
}
JEMALLOC_ALWAYS_INLINE void
te_free_fastpath_ctx(tsd_t *tsd, uint64_t *deallocated, uint64_t *threshold) {
/* Unsafe getters since this may happen before tsd_init. */
*deallocated = *tsd_thread_deallocatedp_get_unsafe(tsd);
*threshold = *tsd_thread_deallocated_next_event_fastp_get_unsafe(tsd);
assert(*threshold <= TE_NEXT_EVENT_FAST_MAX);
}
JEMALLOC_ALWAYS_INLINE bool
te_ctx_is_alloc(te_ctx_t *ctx) {
return ctx->is_alloc;
}
JEMALLOC_ALWAYS_INLINE uint64_t
te_ctx_current_bytes_get(te_ctx_t *ctx) {
return *ctx->current;
}
JEMALLOC_ALWAYS_INLINE void
te_ctx_current_bytes_set(te_ctx_t *ctx, uint64_t v) {
*ctx->current = v;
}
JEMALLOC_ALWAYS_INLINE uint64_t
te_ctx_last_event_get(te_ctx_t *ctx) {
return *ctx->last_event;
}
JEMALLOC_ALWAYS_INLINE void
te_ctx_last_event_set(te_ctx_t *ctx, uint64_t v) {
*ctx->last_event = v;
}
/* Below 3 for next_event_fast. */
JEMALLOC_ALWAYS_INLINE uint64_t
te_ctx_next_event_fast_get(te_ctx_t *ctx) {
uint64_t v = *ctx->next_event_fast;
assert(v <= TE_NEXT_EVENT_FAST_MAX);
return v;
}
JEMALLOC_ALWAYS_INLINE void
te_ctx_next_event_fast_set(te_ctx_t *ctx, uint64_t v) {
assert(v <= TE_NEXT_EVENT_FAST_MAX);
*ctx->next_event_fast = v;
}
JEMALLOC_ALWAYS_INLINE void
te_next_event_fast_set_non_nominal(tsd_t *tsd) {
/*
* Set the fast thresholds to zero when tsd is non-nominal. Use the
* unsafe getter as this may get called during tsd init and clean up.
*/
*tsd_thread_allocated_next_event_fastp_get_unsafe(tsd) = 0;
*tsd_thread_deallocated_next_event_fastp_get_unsafe(tsd) = 0;
}
/* For next_event. Setter also updates the fast threshold. */
JEMALLOC_ALWAYS_INLINE uint64_t
te_ctx_next_event_get(te_ctx_t *ctx) {
return *ctx->next_event;
}
JEMALLOC_ALWAYS_INLINE void
te_ctx_next_event_set(tsd_t *tsd, te_ctx_t *ctx, uint64_t v) {
*ctx->next_event = v;
te_recompute_fast_threshold(tsd);
}
/*
* The function checks in debug mode whether the thread event counters are in
* a consistent state, which forms the invariants before and after each round
* of thread event handling that we can rely on and need to promise.
* The invariants are only temporarily violated in the middle of
* te_event_advance() if an event is triggered (the te_event_trigger() call at
* the end will restore the invariants).
*/
JEMALLOC_ALWAYS_INLINE void
te_assert_invariants(tsd_t *tsd) {
if (config_debug) {
te_assert_invariants_debug(tsd);
}
}
JEMALLOC_ALWAYS_INLINE void
te_ctx_get(tsd_t *tsd, te_ctx_t *ctx, bool is_alloc) {
ctx->is_alloc = is_alloc;
if (is_alloc) {
ctx->current = tsd_thread_allocatedp_get(tsd);
ctx->last_event = tsd_thread_allocated_last_eventp_get(tsd);
ctx->next_event = tsd_thread_allocated_next_eventp_get(tsd);
ctx->next_event_fast =
tsd_thread_allocated_next_event_fastp_get(tsd);
} else {
ctx->current = tsd_thread_deallocatedp_get(tsd);
ctx->last_event = tsd_thread_deallocated_last_eventp_get(tsd);
ctx->next_event = tsd_thread_deallocated_next_eventp_get(tsd);
ctx->next_event_fast =
tsd_thread_deallocated_next_event_fastp_get(tsd);
}
}
/*
* The lookahead functionality facilitates events to be able to lookahead, i.e.
* without touching the event counters, to determine whether an event would be
* triggered. The event counters are not advanced until the end of the
* allocation / deallocation calls, so the lookahead can be useful if some
* preparation work for some event must be done early in the allocation /
* deallocation calls.
*
* Currently only the profiling sampling event needs the lookahead
* functionality, so we don't yet define general purpose lookahead functions.
*
* Surplus is a terminology referring to the amount of bytes beyond what's
* needed for triggering an event, which can be a useful quantity to have in
* general when lookahead is being called.
*/
JEMALLOC_ALWAYS_INLINE bool
te_prof_sample_event_lookahead_surplus(tsd_t *tsd, size_t usize,
size_t *surplus) {
if (surplus != NULL) {
/*
* This is a dead store: the surplus will be overwritten before
* any read. The initialization suppresses compiler warnings.
* Meanwhile, using SIZE_MAX to initialize is good for
* debugging purpose, because a valid surplus value is strictly
* less than usize, which is at most SIZE_MAX.
*/
*surplus = SIZE_MAX;
}
if (unlikely(!tsd_nominal(tsd) || tsd_reentrancy_level_get(tsd) > 0)) {
return false;
}
/* The subtraction is intentionally susceptible to underflow. */
uint64_t accumbytes = tsd_thread_allocated_get(tsd) + usize -
tsd_thread_allocated_last_event_get(tsd);
uint64_t sample_wait = tsd_prof_sample_event_wait_get(tsd);
if (accumbytes < sample_wait) {
return false;
}
assert(accumbytes - sample_wait < (uint64_t)usize);
if (surplus != NULL) {
*surplus = (size_t)(accumbytes - sample_wait);
}
return true;
}
JEMALLOC_ALWAYS_INLINE bool
te_prof_sample_event_lookahead(tsd_t *tsd, size_t usize) {
return te_prof_sample_event_lookahead_surplus(tsd, usize, NULL);
}
JEMALLOC_ALWAYS_INLINE void
te_event_advance(tsd_t *tsd, size_t usize, bool is_alloc) {
te_assert_invariants(tsd);
te_ctx_t ctx;
te_ctx_get(tsd, &ctx, is_alloc);
uint64_t bytes_before = te_ctx_current_bytes_get(&ctx);
te_ctx_current_bytes_set(&ctx, bytes_before + usize);
/* The subtraction is intentionally susceptible to underflow. */
if (likely(usize < te_ctx_next_event_get(&ctx) - bytes_before)) {
te_assert_invariants(tsd);
} else {
te_event_trigger(tsd, &ctx);
}
}
JEMALLOC_ALWAYS_INLINE void
thread_dalloc_event(tsd_t *tsd, size_t usize) {
te_event_advance(tsd, usize, false);
}
JEMALLOC_ALWAYS_INLINE void
thread_alloc_event(tsd_t *tsd, size_t usize) {
te_event_advance(tsd, usize, true);
}
#endif /* JEMALLOC_INTERNAL_THREAD_EVENT_H */
#ifndef JEMALLOC_INTERNAL_TICKER_H
#define JEMALLOC_INTERNAL_TICKER_H
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/util.h"
/**
......@@ -10,11 +11,11 @@
* have occurred with a call to ticker_ticks), which will return true (and reset
* the counter) if the countdown hit zero.
*/
typedef struct {
typedef struct ticker_s ticker_t;
struct ticker_s {
int32_t tick;
int32_t nticks;
} ticker_t;
};
static inline void
ticker_init(ticker_t *ticker, int32_t nticks) {
......@@ -88,4 +89,87 @@ ticker_trytick(ticker_t *ticker) {
return false;
}
/*
* The ticker_geom_t is much like the ticker_t, except that instead of ticker
* having a constant countdown, it has an approximate one; each tick has
* approximately a 1/nticks chance of triggering the count.
*
* The motivation is in triggering arena decay. With a naive strategy, each
* thread would maintain a ticker per arena, and check if decay is necessary
* each time that the arena's ticker fires. This has two costs:
* - Since under reasonable assumptions both threads and arenas can scale
* linearly with the number of CPUs, maintaining per-arena data in each thread
* scales quadratically with the number of CPUs.
* - These tickers are often a cache miss down tcache flush pathways.
*
* By giving each tick a 1/nticks chance of firing, we still maintain the same
* average number of ticks-until-firing per arena, with only a single ticker's
* worth of metadata.
*/
/* See ticker.c for an explanation of these constants. */
#define TICKER_GEOM_NBITS 6
#define TICKER_GEOM_MUL 61
extern const uint8_t ticker_geom_table[1 << TICKER_GEOM_NBITS];
/* Not actually any different from ticker_t; just for type safety. */
typedef struct ticker_geom_s ticker_geom_t;
struct ticker_geom_s {
int32_t tick;
int32_t nticks;
};
/*
* Just pick the average delay for the first counter. We're more concerned with
* the behavior over long periods of time rather than the exact timing of the
* initial ticks.
*/
#define TICKER_GEOM_INIT(nticks) {nticks, nticks}
static inline void
ticker_geom_init(ticker_geom_t *ticker, int32_t nticks) {
/*
* Make sure there's no overflow possible. This shouldn't really be a
* problem for reasonable nticks choices, which are all static and
* relatively small.
*/
assert((uint64_t)nticks * (uint64_t)255 / (uint64_t)TICKER_GEOM_MUL
<= (uint64_t)INT32_MAX);
ticker->tick = nticks;
ticker->nticks = nticks;
}
static inline int32_t
ticker_geom_read(const ticker_geom_t *ticker) {
return ticker->tick;
}
/* Same deal as above. */
#if defined(__GNUC__) && !defined(__clang__) \
&& (defined(__x86_64__) || defined(__i386__))
JEMALLOC_NOINLINE
#endif
static bool
ticker_geom_fixup(ticker_geom_t *ticker, uint64_t *prng_state) {
uint64_t idx = prng_lg_range_u64(prng_state, TICKER_GEOM_NBITS);
ticker->tick = (uint32_t)(
(uint64_t)ticker->nticks * (uint64_t)ticker_geom_table[idx]
/ (uint64_t)TICKER_GEOM_MUL);
return true;
}
static inline bool
ticker_geom_ticks(ticker_geom_t *ticker, uint64_t *prng_state, int32_t nticks) {
ticker->tick -= nticks;
if (unlikely(ticker->tick < 0)) {
return ticker_geom_fixup(ticker, prng_state);
}
return false;
}
static inline bool
ticker_geom_tick(ticker_geom_t *ticker, uint64_t *prng_state) {
return ticker_geom_ticks(ticker, prng_state, 1);
}
#endif /* JEMALLOC_INTERNAL_TICKER_H */
#ifndef JEMALLOC_INTERNAL_TSD_H
#define JEMALLOC_INTERNAL_TSD_H
#include "jemalloc/internal/activity_callback.h"
#include "jemalloc/internal/arena_types.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/bin_types.h"
#include "jemalloc/internal/jemalloc_internal_externs.h"
#include "jemalloc/internal/peak.h"
#include "jemalloc/internal/prof_types.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/rtree_tsd.h"
......@@ -15,39 +17,30 @@
/*
* Thread-Specific-Data layout
* --- data accessed on tcache fast path: state, rtree_ctx, stats, prof ---
* s: state
* e: tcache_enabled
* m: thread_allocated (config_stats)
* f: thread_deallocated (config_stats)
* p: prof_tdata (config_prof)
* c: rtree_ctx (rtree cache accessed on deallocation)
* t: tcache
* --- data not accessed on tcache fast path: arena-related fields ---
* d: arenas_tdata_bypass
* r: reentrancy_level
* x: narenas_tdata
* i: iarena
* a: arena
* o: arenas_tdata
* Loading TSD data is on the critical path of basically all malloc operations.
* In particular, tcache and rtree_ctx rely on hot CPU cache to be effective.
* Use a compact layout to reduce cache footprint.
* +--- 64-bit and 64B cacheline; 1B each letter; First byte on the left. ---+
* |---------------------------- 1st cacheline ----------------------------|
* | sedrxxxx mmmmmmmm ffffffff pppppppp [c * 32 ........ ........ .......] |
* |---------------------------- 2nd cacheline ----------------------------|
* | [c * 64 ........ ........ ........ ........ ........ ........ .......] |
* |---------------------------- 3nd cacheline ----------------------------|
* | [c * 32 ........ ........ .......] iiiiiiii aaaaaaaa oooooooo [t...... |
* +-------------------------------------------------------------------------+
* Note: the entire tcache is embedded into TSD and spans multiple cachelines.
*
* The last 3 members (i, a and o) before tcache isn't really needed on tcache
* fast path. However we have a number of unused tcache bins and witnesses
* (never touched unless config_debug) at the end of tcache, so we place them
* there to avoid breaking the cachelines and possibly paging in an extra page.
* At least some thread-local data gets touched on the fast-path of almost all
* malloc operations. But much of it is only necessary down slow-paths, or
* testing. We want to colocate the fast-path data so that it can live on the
* same cacheline if possible. So we define three tiers of hotness:
* TSD_DATA_FAST: Touched on the alloc/dalloc fast paths.
* TSD_DATA_SLOW: Touched down slow paths. "Slow" here is sort of general;
* there are "semi-slow" paths like "not a sized deallocation, but can still
* live in the tcache". We'll want to keep these closer to the fast-path
* data.
* TSD_DATA_SLOWER: Only touched in test or debug modes, or not touched at all.
*
* An additional concern is that the larger tcache bins won't be used (we have a
* bin per size class, but by default only cache relatively small objects). So
* the earlier bins are in the TSD_DATA_FAST tier, but the later ones are in the
* TSD_DATA_SLOWER tier.
*
* As a result of all this, we put the slow data first, then the fast data, then
* the slower data, while keeping the tcache as the last element of the fast
* data (so that the fast -> slower transition happens midway through the
* tcache). While we don't yet play alignment tricks to guarantee it, this
* increases our odds of getting some cache/page locality on fast paths.
*/
#ifdef JEMALLOC_JET
typedef void (*test_callback_t)(int *);
# define MALLOC_TSD_TEST_DATA_INIT 0x72b65c10
......@@ -60,50 +53,112 @@ typedef void (*test_callback_t)(int *);
# define MALLOC_TEST_TSD_INITIALIZER
#endif
/* O(name, type, nullable type */
#define MALLOC_TSD \
typedef ql_elm(tsd_t) tsd_link_t;
/* O(name, type, nullable type) */
#define TSD_DATA_SLOW \
O(tcache_enabled, bool, bool) \
O(arenas_tdata_bypass, bool, bool) \
O(reentrancy_level, int8_t, int8_t) \
O(narenas_tdata, uint32_t, uint32_t) \
O(offset_state, uint64_t, uint64_t) \
O(thread_allocated, uint64_t, uint64_t) \
O(thread_deallocated, uint64_t, uint64_t) \
O(bytes_until_sample, int64_t, int64_t) \
O(thread_allocated_last_event, uint64_t, uint64_t) \
O(thread_allocated_next_event, uint64_t, uint64_t) \
O(thread_deallocated_last_event, uint64_t, uint64_t) \
O(thread_deallocated_next_event, uint64_t, uint64_t) \
O(tcache_gc_event_wait, uint64_t, uint64_t) \
O(tcache_gc_dalloc_event_wait, uint64_t, uint64_t) \
O(prof_sample_event_wait, uint64_t, uint64_t) \
O(prof_sample_last_event, uint64_t, uint64_t) \
O(stats_interval_event_wait, uint64_t, uint64_t) \
O(stats_interval_last_event, uint64_t, uint64_t) \
O(peak_alloc_event_wait, uint64_t, uint64_t) \
O(peak_dalloc_event_wait, uint64_t, uint64_t) \
O(prof_tdata, prof_tdata_t *, prof_tdata_t *) \
O(rtree_ctx, rtree_ctx_t, rtree_ctx_t) \
O(prng_state, uint64_t, uint64_t) \
O(san_extents_until_guard_small, uint64_t, uint64_t) \
O(san_extents_until_guard_large, uint64_t, uint64_t) \
O(iarena, arena_t *, arena_t *) \
O(arena, arena_t *, arena_t *) \
O(arenas_tdata, arena_tdata_t *, arena_tdata_t *)\
O(arena_decay_ticker, ticker_geom_t, ticker_geom_t) \
O(sec_shard, uint8_t, uint8_t) \
O(binshards, tsd_binshards_t, tsd_binshards_t)\
O(tcache, tcache_t, tcache_t) \
O(tsd_link, tsd_link_t, tsd_link_t) \
O(in_hook, bool, bool) \
O(peak, peak_t, peak_t) \
O(activity_callback_thunk, activity_callback_thunk_t, \
activity_callback_thunk_t) \
O(tcache_slow, tcache_slow_t, tcache_slow_t) \
O(rtree_ctx, rtree_ctx_t, rtree_ctx_t)
#define TSD_DATA_SLOW_INITIALIZER \
/* tcache_enabled */ TCACHE_ENABLED_ZERO_INITIALIZER, \
/* reentrancy_level */ 0, \
/* thread_allocated_last_event */ 0, \
/* thread_allocated_next_event */ 0, \
/* thread_deallocated_last_event */ 0, \
/* thread_deallocated_next_event */ 0, \
/* tcache_gc_event_wait */ 0, \
/* tcache_gc_dalloc_event_wait */ 0, \
/* prof_sample_event_wait */ 0, \
/* prof_sample_last_event */ 0, \
/* stats_interval_event_wait */ 0, \
/* stats_interval_last_event */ 0, \
/* peak_alloc_event_wait */ 0, \
/* peak_dalloc_event_wait */ 0, \
/* prof_tdata */ NULL, \
/* prng_state */ 0, \
/* san_extents_until_guard_small */ 0, \
/* san_extents_until_guard_large */ 0, \
/* iarena */ NULL, \
/* arena */ NULL, \
/* arena_decay_ticker */ \
TICKER_GEOM_INIT(ARENA_DECAY_NTICKS_PER_UPDATE), \
/* sec_shard */ (uint8_t)-1, \
/* binshards */ TSD_BINSHARDS_ZERO_INITIALIZER, \
/* tsd_link */ {NULL}, \
/* in_hook */ false, \
/* peak */ PEAK_INITIALIZER, \
/* activity_callback_thunk */ \
ACTIVITY_CALLBACK_THUNK_INITIALIZER, \
/* tcache_slow */ TCACHE_SLOW_ZERO_INITIALIZER, \
/* rtree_ctx */ RTREE_CTX_INITIALIZER,
/* O(name, type, nullable type) */
#define TSD_DATA_FAST \
O(thread_allocated, uint64_t, uint64_t) \
O(thread_allocated_next_event_fast, uint64_t, uint64_t) \
O(thread_deallocated, uint64_t, uint64_t) \
O(thread_deallocated_next_event_fast, uint64_t, uint64_t) \
O(tcache, tcache_t, tcache_t)
#define TSD_DATA_FAST_INITIALIZER \
/* thread_allocated */ 0, \
/* thread_allocated_next_event_fast */ 0, \
/* thread_deallocated */ 0, \
/* thread_deallocated_next_event_fast */ 0, \
/* tcache */ TCACHE_ZERO_INITIALIZER,
/* O(name, type, nullable type) */
#define TSD_DATA_SLOWER \
O(witness_tsd, witness_tsd_t, witness_tsdn_t) \
MALLOC_TEST_TSD
#define TSD_DATA_SLOWER_INITIALIZER \
/* witness */ WITNESS_TSD_INITIALIZER \
/* test data */ MALLOC_TEST_TSD_INITIALIZER
#define TSD_INITIALIZER { \
ATOMIC_INIT(tsd_state_uninitialized), \
TCACHE_ENABLED_ZERO_INITIALIZER, \
false, \
0, \
0, \
0, \
0, \
0, \
0, \
NULL, \
RTREE_CTX_ZERO_INITIALIZER, \
NULL, \
NULL, \
NULL, \
TSD_BINSHARDS_ZERO_INITIALIZER, \
TCACHE_ZERO_INITIALIZER, \
WITNESS_TSD_INITIALIZER \
MALLOC_TEST_TSD_INITIALIZER \
TSD_DATA_SLOW_INITIALIZER \
/* state */ ATOMIC_INIT(tsd_state_uninitialized), \
TSD_DATA_FAST_INITIALIZER \
TSD_DATA_SLOWER_INITIALIZER \
}
#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32)
void _malloc_tsd_cleanup_register(bool (*f)(void));
#endif
void *malloc_tsd_malloc(size_t size);
void malloc_tsd_dalloc(void *wrapper);
void malloc_tsd_cleanup_register(bool (*f)(void));
tsd_t *malloc_tsd_boot0(void);
void malloc_tsd_boot1(void);
void tsd_cleanup(void *arg);
......@@ -189,14 +244,17 @@ struct tsd_s {
* setters below.
*/
#define O(n, t, nt) \
t TSD_MANGLE(n);
TSD_DATA_SLOW
/*
* We manually limit the state to just a single byte. Unless the 8-bit
* atomics are unavailable (which is rare).
*/
tsd_state_t state;
#define O(n, t, nt) \
t TSD_MANGLE(n);
MALLOC_TSD
TSD_DATA_FAST
TSD_DATA_SLOWER
#undef O
};
......@@ -262,7 +320,9 @@ JEMALLOC_ALWAYS_INLINE t * \
tsd_##n##p_get_unsafe(tsd_t *tsd) { \
return &tsd->TSD_MANGLE(n); \
}
MALLOC_TSD
TSD_DATA_SLOW
TSD_DATA_FAST
TSD_DATA_SLOWER
#undef O
/* tsd_foop_get(tsd) returns a pointer to the thread-local instance of foo. */
......@@ -281,7 +341,9 @@ tsd_##n##p_get(tsd_t *tsd) { \
state == tsd_state_minimal_initialized); \
return tsd_##n##p_get_unsafe(tsd); \
}
MALLOC_TSD
TSD_DATA_SLOW
TSD_DATA_FAST
TSD_DATA_SLOWER
#undef O
/*
......@@ -297,7 +359,9 @@ tsdn_##n##p_get(tsdn_t *tsdn) { \
tsd_t *tsd = tsdn_tsd(tsdn); \
return (nt *)tsd_##n##p_get(tsd); \
}
MALLOC_TSD
TSD_DATA_SLOW
TSD_DATA_FAST
TSD_DATA_SLOWER
#undef O
/* tsd_foo_get(tsd) returns the value of the thread-local instance of foo. */
......@@ -306,7 +370,9 @@ JEMALLOC_ALWAYS_INLINE t \
tsd_##n##_get(tsd_t *tsd) { \
return *tsd_##n##p_get(tsd); \
}
MALLOC_TSD
TSD_DATA_SLOW
TSD_DATA_FAST
TSD_DATA_SLOWER
#undef O
/* tsd_foo_set(tsd, val) updates the thread-local instance of foo to be val. */
......@@ -317,7 +383,9 @@ tsd_##n##_set(tsd_t *tsd, t val) { \
tsd_state_get(tsd) != tsd_state_minimal_initialized); \
*tsd_##n##p_get(tsd) = val; \
}
MALLOC_TSD
TSD_DATA_SLOW
TSD_DATA_FAST
TSD_DATA_SLOWER
#undef O
JEMALLOC_ALWAYS_INLINE void
......@@ -382,7 +450,10 @@ tsd_fetch(void) {
static inline bool
tsd_nominal(tsd_t *tsd) {
return (tsd_state_get(tsd) <= tsd_state_nominal_max);
bool nominal = tsd_state_get(tsd) <= tsd_state_nominal_max;
assert(nominal || tsd_reentrancy_level_get(tsd) > 0);
return nominal;
}
JEMALLOC_ALWAYS_INLINE tsdn_t *
......@@ -412,4 +483,36 @@ tsdn_rtree_ctx(tsdn_t *tsdn, rtree_ctx_t *fallback) {
return tsd_rtree_ctx(tsdn_tsd(tsdn));
}
static inline bool
tsd_state_nocleanup(tsd_t *tsd) {
return tsd_state_get(tsd) == tsd_state_reincarnated ||
tsd_state_get(tsd) == tsd_state_minimal_initialized;
}
/*
* These "raw" tsd reentrancy functions don't have any debug checking to make
* sure that we're not touching arena 0. Better is to call pre_reentrancy and
* post_reentrancy if this is possible.
*/
static inline void
tsd_pre_reentrancy_raw(tsd_t *tsd) {
bool fast = tsd_fast(tsd);
assert(tsd_reentrancy_level_get(tsd) < INT8_MAX);
++*tsd_reentrancy_levelp_get(tsd);
if (fast) {
/* Prepare slow path for reentrancy. */
tsd_slow_update(tsd);
assert(tsd_state_get(tsd) == tsd_state_nominal_slow);
}
}
static inline void
tsd_post_reentrancy_raw(tsd_t *tsd) {
int8_t *reentrancy_level = tsd_reentrancy_levelp_get(tsd);
assert(*reentrancy_level > 0);
if (--*reentrancy_level == 0) {
tsd_slow_update(tsd);
}
}
#endif /* JEMALLOC_INTERNAL_TSD_H */
......@@ -52,6 +52,9 @@ tsd_cleanup_wrapper(void *arg) {
JEMALLOC_ALWAYS_INLINE void
tsd_wrapper_set(tsd_wrapper_t *wrapper) {
if (unlikely(!tsd_booted)) {
return;
}
if (pthread_setspecific(tsd_tsd, (void *)wrapper) != 0) {
malloc_write("<jemalloc>: Error setting TSD\n");
abort();
......@@ -60,7 +63,13 @@ tsd_wrapper_set(tsd_wrapper_t *wrapper) {
JEMALLOC_ALWAYS_INLINE tsd_wrapper_t *
tsd_wrapper_get(bool init) {
tsd_wrapper_t *wrapper = (tsd_wrapper_t *)pthread_getspecific(tsd_tsd);
tsd_wrapper_t *wrapper;
if (unlikely(!tsd_booted)) {
return &tsd_boot_wrapper;
}
wrapper = (tsd_wrapper_t *)pthread_getspecific(tsd_tsd);
if (init && unlikely(wrapper == NULL)) {
tsd_init_block_t block;
......@@ -91,11 +100,21 @@ tsd_wrapper_get(bool init) {
JEMALLOC_ALWAYS_INLINE bool
tsd_boot0(void) {
tsd_wrapper_t *wrapper;
tsd_init_block_t block;
wrapper = (tsd_wrapper_t *)
tsd_init_check_recursion(&tsd_init_head, &block);
if (wrapper) {
return false;
}
block.data = &tsd_boot_wrapper;
if (pthread_key_create(&tsd_tsd, tsd_cleanup_wrapper) != 0) {
return true;
}
tsd_wrapper_set(&tsd_boot_wrapper);
tsd_booted = true;
tsd_wrapper_set(&tsd_boot_wrapper);
tsd_init_finish(&tsd_init_head, &block);
return false;
}
......
......@@ -21,7 +21,7 @@ tsd_cleanup_wrapper(void) {
JEMALLOC_ALWAYS_INLINE bool
tsd_boot0(void) {
malloc_tsd_cleanup_register(&tsd_cleanup_wrapper);
_malloc_tsd_cleanup_register(&tsd_cleanup_wrapper);
tsd_booted = true;
return false;
}
......
#ifndef JEMALLOC_INTERNAL_TSD_TYPES_H
#define JEMALLOC_INTERNAL_TSD_TYPES_H
#define MALLOC_TSD_CLEANUPS_MAX 2
#define MALLOC_TSD_CLEANUPS_MAX 4
typedef struct tsd_s tsd_t;
typedef struct tsdn_s tsdn_t;
......
......@@ -72,7 +72,7 @@ tsd_boot0(void) {
if (tsd_tsd == TLS_OUT_OF_INDEXES) {
return true;
}
malloc_tsd_cleanup_register(&tsd_cleanup_wrapper);
_malloc_tsd_cleanup_register(&tsd_cleanup_wrapper);
tsd_wrapper_set(&tsd_boot_wrapper);
tsd_booted = true;
return false;
......
#ifndef JEMALLOC_INTERNAL_TYPED_LIST_H
#define JEMALLOC_INTERNAL_TYPED_LIST_H
/*
* This wraps the ql module to implement a list class in a way that's a little
* bit easier to use; it handles ql_elm_new calls and provides type safety.
*/
#define TYPED_LIST(list_type, el_type, linkage) \
typedef struct { \
ql_head(el_type) head; \
} list_type##_t; \
static inline void \
list_type##_init(list_type##_t *list) { \
ql_new(&list->head); \
} \
static inline el_type * \
list_type##_first(const list_type##_t *list) { \
return ql_first(&list->head); \
} \
static inline el_type * \
list_type##_last(const list_type##_t *list) { \
return ql_last(&list->head, linkage); \
} \
static inline void \
list_type##_append(list_type##_t *list, el_type *item) { \
ql_elm_new(item, linkage); \
ql_tail_insert(&list->head, item, linkage); \
} \
static inline void \
list_type##_prepend(list_type##_t *list, el_type *item) { \
ql_elm_new(item, linkage); \
ql_head_insert(&list->head, item, linkage); \
} \
static inline void \
list_type##_replace(list_type##_t *list, el_type *to_remove, \
el_type *to_insert) { \
ql_elm_new(to_insert, linkage); \
ql_after_insert(to_remove, to_insert, linkage); \
ql_remove(&list->head, to_remove, linkage); \
} \
static inline void \
list_type##_remove(list_type##_t *list, el_type *item) { \
ql_remove(&list->head, item, linkage); \
} \
static inline bool \
list_type##_empty(list_type##_t *list) { \
return ql_empty(&list->head); \
} \
static inline void \
list_type##_concat(list_type##_t *list_a, list_type##_t *list_b) { \
ql_concat(&list_a->head, &list_b->head, linkage); \
}
#endif /* JEMALLOC_INTERNAL_TYPED_LIST_H */
......@@ -62,6 +62,62 @@ get_errno(void) {
#endif
}
JEMALLOC_ALWAYS_INLINE void
util_assume(bool b) {
if (!b) {
unreachable();
}
}
/* ptr should be valid. */
JEMALLOC_ALWAYS_INLINE void
util_prefetch_read(void *ptr) {
/*
* This should arguably be a config check; but any version of GCC so old
* that it doesn't support __builtin_prefetch is also too old to build
* jemalloc.
*/
#ifdef __GNUC__
if (config_debug) {
/* Enforce the "valid ptr" requirement. */
*(volatile char *)ptr;
}
__builtin_prefetch(ptr, /* read or write */ 0, /* locality hint */ 3);
#else
*(volatile char *)ptr;
#endif
}
JEMALLOC_ALWAYS_INLINE void
util_prefetch_write(void *ptr) {
#ifdef __GNUC__
if (config_debug) {
*(volatile char *)ptr;
}
/*
* The only difference from the read variant is that this has a 1 as the
* second argument (the write hint).
*/
__builtin_prefetch(ptr, 1, 3);
#else
*(volatile char *)ptr;
#endif
}
JEMALLOC_ALWAYS_INLINE void
util_prefetch_read_range(void *ptr, size_t sz) {
for (size_t i = 0; i < sz; i += CACHELINE) {
util_prefetch_read((void *)((uintptr_t)ptr + i));
}
}
JEMALLOC_ALWAYS_INLINE void
util_prefetch_write_range(void *ptr, size_t sz) {
for (size_t i = 0; i < sz; i += CACHELINE) {
util_prefetch_write((void *)((uintptr_t)ptr + i));
}
}
#undef UTIL_INLINE
#endif /* JEMALLOC_INTERNAL_UTIL_H */
......@@ -7,60 +7,76 @@
/* LOCK RANKS */
/******************************************************************************/
/*
* Witnesses with rank WITNESS_RANK_OMIT are completely ignored by the witness
* machinery.
enum witness_rank_e {
/*
* Order matters within this enum listing -- higher valued locks can
* only be acquired after lower-valued ones. We use the
* auto-incrementing-ness of enum values to enforce this.
*/
#define WITNESS_RANK_OMIT 0U
#define WITNESS_RANK_MIN 1U
#define WITNESS_RANK_INIT 1U
#define WITNESS_RANK_CTL 1U
#define WITNESS_RANK_TCACHES 2U
#define WITNESS_RANK_ARENAS 3U
#define WITNESS_RANK_BACKGROUND_THREAD_GLOBAL 4U
#define WITNESS_RANK_PROF_DUMP 5U
#define WITNESS_RANK_PROF_BT2GCTX 6U
#define WITNESS_RANK_PROF_TDATAS 7U
#define WITNESS_RANK_PROF_TDATA 8U
#define WITNESS_RANK_PROF_LOG 9U
#define WITNESS_RANK_PROF_GCTX 10U
#define WITNESS_RANK_BACKGROUND_THREAD 11U
/*
* Used as an argument to witness_assert_depth_to_rank() in order to validate
* depth excluding non-core locks with lower ranks. Since the rank argument to
* witness_assert_depth_to_rank() is inclusive rather than exclusive, this
* definition can have the same value as the minimally ranked core lock.
/*
* Witnesses with rank WITNESS_RANK_OMIT are completely ignored by the
* witness machinery.
*/
WITNESS_RANK_OMIT,
WITNESS_RANK_MIN,
WITNESS_RANK_INIT = WITNESS_RANK_MIN,
WITNESS_RANK_CTL,
WITNESS_RANK_TCACHES,
WITNESS_RANK_ARENAS,
WITNESS_RANK_BACKGROUND_THREAD_GLOBAL,
WITNESS_RANK_PROF_DUMP,
WITNESS_RANK_PROF_BT2GCTX,
WITNESS_RANK_PROF_TDATAS,
WITNESS_RANK_PROF_TDATA,
WITNESS_RANK_PROF_LOG,
WITNESS_RANK_PROF_GCTX,
WITNESS_RANK_PROF_RECENT_DUMP,
WITNESS_RANK_BACKGROUND_THREAD,
/*
* Used as an argument to witness_assert_depth_to_rank() in order to
* validate depth excluding non-core locks with lower ranks. Since the
* rank argument to witness_assert_depth_to_rank() is inclusive rather
* than exclusive, this definition can have the same value as the
* minimally ranked core lock.
*/
#define WITNESS_RANK_CORE 12U
#define WITNESS_RANK_DECAY 12U
#define WITNESS_RANK_TCACHE_QL 13U
#define WITNESS_RANK_EXTENT_GROW 14U
#define WITNESS_RANK_EXTENTS 15U
#define WITNESS_RANK_EXTENT_AVAIL 16U
#define WITNESS_RANK_EXTENT_POOL 17U
#define WITNESS_RANK_RTREE 18U
#define WITNESS_RANK_BASE 19U
#define WITNESS_RANK_ARENA_LARGE 20U
#define WITNESS_RANK_HOOK 21U
#define WITNESS_RANK_LEAF 0xffffffffU
#define WITNESS_RANK_BIN WITNESS_RANK_LEAF
#define WITNESS_RANK_ARENA_STATS WITNESS_RANK_LEAF
#define WITNESS_RANK_DSS WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_ACCUM WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF
WITNESS_RANK_CORE,
WITNESS_RANK_DECAY = WITNESS_RANK_CORE,
WITNESS_RANK_TCACHE_QL,
WITNESS_RANK_SEC_SHARD,
WITNESS_RANK_EXTENT_GROW,
WITNESS_RANK_HPA_SHARD_GROW = WITNESS_RANK_EXTENT_GROW,
WITNESS_RANK_SAN_BUMP_ALLOC = WITNESS_RANK_EXTENT_GROW,
WITNESS_RANK_EXTENTS,
WITNESS_RANK_HPA_SHARD = WITNESS_RANK_EXTENTS,
WITNESS_RANK_HPA_CENTRAL_GROW,
WITNESS_RANK_HPA_CENTRAL,
WITNESS_RANK_EDATA_CACHE,
WITNESS_RANK_RTREE,
WITNESS_RANK_BASE,
WITNESS_RANK_ARENA_LARGE,
WITNESS_RANK_HOOK,
WITNESS_RANK_LEAF=0x1000,
WITNESS_RANK_BIN = WITNESS_RANK_LEAF,
WITNESS_RANK_ARENA_STATS = WITNESS_RANK_LEAF,
WITNESS_RANK_COUNTER_ACCUM = WITNESS_RANK_LEAF,
WITNESS_RANK_DSS = WITNESS_RANK_LEAF,
WITNESS_RANK_PROF_ACTIVE = WITNESS_RANK_LEAF,
WITNESS_RANK_PROF_DUMP_FILENAME = WITNESS_RANK_LEAF,
WITNESS_RANK_PROF_GDUMP = WITNESS_RANK_LEAF,
WITNESS_RANK_PROF_NEXT_THR_UID = WITNESS_RANK_LEAF,
WITNESS_RANK_PROF_RECENT_ALLOC = WITNESS_RANK_LEAF,
WITNESS_RANK_PROF_STATS = WITNESS_RANK_LEAF,
WITNESS_RANK_PROF_THREAD_ACTIVE_INIT = WITNESS_RANK_LEAF,
};
typedef enum witness_rank_e witness_rank_t;
/******************************************************************************/
/* PER-WITNESS DATA */
......@@ -72,7 +88,6 @@
#endif
typedef struct witness_s witness_t;
typedef unsigned witness_rank_t;
typedef ql_head(witness_t) witness_list_t;
typedef int witness_comp_t (const witness_t *, void *, const witness_t *,
void *);
......@@ -82,8 +97,8 @@ struct witness_s {
const char *name;
/*
* Witness rank, where 0 is lowest and UINT_MAX is highest. Witnesses
* must be acquired in order of increasing rank.
* Witness rank, where 0 is lowest and WITNESS_RANK_LEAF is highest.
* Witnesses must be acquired in order of increasing rank.
*/
witness_rank_t rank;
......@@ -228,26 +243,13 @@ witness_assert_not_owner(witness_tsdn_t *witness_tsdn,
}
}
static inline void
witness_assert_depth_to_rank(witness_tsdn_t *witness_tsdn,
witness_rank_t rank_inclusive, unsigned depth) {
witness_tsd_t *witness_tsd;
unsigned d;
witness_list_t *witnesses;
witness_t *w;
/* Returns depth. Not intended for direct use. */
static inline unsigned
witness_depth_to_rank(witness_list_t *witnesses, witness_rank_t rank_inclusive)
{
unsigned d = 0;
witness_t *w = ql_last(witnesses, link);
if (!config_debug) {
return;
}
if (witness_tsdn_null(witness_tsdn)) {
return;
}
witness_tsd = witness_tsdn_tsd(witness_tsdn);
d = 0;
witnesses = &witness_tsd->witnesses;
w = ql_last(witnesses, link);
if (w != NULL) {
ql_reverse_foreach(w, witnesses, link) {
if (w->rank < rank_inclusive) {
......@@ -256,6 +258,20 @@ witness_assert_depth_to_rank(witness_tsdn_t *witness_tsdn,
d++;
}
}
return d;
}
static inline void
witness_assert_depth_to_rank(witness_tsdn_t *witness_tsdn,
witness_rank_t rank_inclusive, unsigned depth) {
if (!config_debug || witness_tsdn_null(witness_tsdn)) {
return;
}
witness_list_t *witnesses = &witness_tsdn_tsd(witness_tsdn)->witnesses;
unsigned d = witness_depth_to_rank(witnesses, rank_inclusive);
if (d != depth) {
witness_depth_error(witnesses, rank_inclusive, depth);
}
......@@ -271,6 +287,21 @@ witness_assert_lockless(witness_tsdn_t *witness_tsdn) {
witness_assert_depth(witness_tsdn, 0);
}
static inline void
witness_assert_positive_depth_to_rank(witness_tsdn_t *witness_tsdn,
witness_rank_t rank_inclusive) {
if (!config_debug || witness_tsdn_null(witness_tsdn)) {
return;
}
witness_list_t *witnesses = &witness_tsdn_tsd(witness_tsdn)->witnesses;
unsigned d = witness_depth_to_rank(witnesses, rank_inclusive);
if (d == 0) {
witness_depth_error(witnesses, rank_inclusive, 1);
}
}
static inline void
witness_lock(witness_tsdn_t *witness_tsdn, witness_t *witness) {
witness_tsd_t *witness_tsd;
......
......@@ -13,6 +13,12 @@
/* Defined if format(printf, ...) attribute is supported. */
#undef JEMALLOC_HAVE_ATTR_FORMAT_PRINTF
/* Defined if fallthrough attribute is supported. */
#undef JEMALLOC_HAVE_ATTR_FALLTHROUGH
/* Defined if cold attribute is supported. */
#undef JEMALLOC_HAVE_ATTR_COLD
/*
* Define overrides for non-standard allocator-related functions if they are
* present on the system.
......
......@@ -71,6 +71,7 @@
# endif
# define JEMALLOC_FORMAT_ARG(i)
# define JEMALLOC_FORMAT_PRINTF(s, i)
# define JEMALLOC_FALLTHROUGH
# define JEMALLOC_NOINLINE __declspec(noinline)
# ifdef __cplusplus
# define JEMALLOC_NOTHROW __declspec(nothrow)
......@@ -84,6 +85,7 @@
# else
# define JEMALLOC_ALLOCATOR
# endif
# define JEMALLOC_COLD
#elif defined(JEMALLOC_HAVE_ATTR)
# define JEMALLOC_ATTR(s) __attribute__((s))
# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
......@@ -109,11 +111,21 @@
# else
# define JEMALLOC_FORMAT_PRINTF(s, i)
# endif
# ifdef JEMALLOC_HAVE_ATTR_FALLTHROUGH
# define JEMALLOC_FALLTHROUGH JEMALLOC_ATTR(fallthrough)
# else
# define JEMALLOC_FALLTHROUGH
# endif
# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow)
# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
# define JEMALLOC_RESTRICT_RETURN
# define JEMALLOC_ALLOCATOR
# ifdef JEMALLOC_HAVE_ATTR_COLD
# define JEMALLOC_COLD JEMALLOC_ATTR(__cold__)
# else
# define JEMALLOC_COLD
# endif
#else
# define JEMALLOC_ATTR(s)
# define JEMALLOC_ALIGNED(s)
......@@ -121,11 +133,19 @@
# define JEMALLOC_ALLOC_SIZE2(s1, s2)
# define JEMALLOC_EXPORT
# define JEMALLOC_FORMAT_PRINTF(s, i)
# define JEMALLOC_FALLTHROUGH
# define JEMALLOC_NOINLINE
# define JEMALLOC_NOTHROW
# define JEMALLOC_SECTION(s)
# define JEMALLOC_RESTRICT_RETURN
# define JEMALLOC_ALLOCATOR
# define JEMALLOC_COLD
#endif
#if (defined(__APPLE__) || defined(__FreeBSD__)) && !defined(JEMALLOC_NO_RENAME)
# define JEMALLOC_SYS_NOTHROW
#else
# define JEMALLOC_SYS_NOTHROW JEMALLOC_NOTHROW
#endif
/* This version of Jemalloc, modified for Redis, has the je_get_defrag_hint()
......
......@@ -8,21 +8,22 @@ extern JEMALLOC_EXPORT void (*@je_@malloc_message)(void *cbopaque,
const char *s);
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *@je_@malloc(size_t size)
void JEMALLOC_SYS_NOTHROW *@je_@malloc(size_t size)
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *@je_@calloc(size_t num, size_t size)
void JEMALLOC_SYS_NOTHROW *@je_@calloc(size_t num, size_t size)
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2);
JEMALLOC_EXPORT int JEMALLOC_NOTHROW @je_@posix_memalign(void **memptr,
size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT int JEMALLOC_SYS_NOTHROW @je_@posix_memalign(
void **memptr, size_t alignment, size_t size) JEMALLOC_CXX_THROW
JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *@je_@aligned_alloc(size_t alignment,
void JEMALLOC_SYS_NOTHROW *@je_@aligned_alloc(size_t alignment,
size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc)
JEMALLOC_ALLOC_SIZE(2);
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *@je_@realloc(void *ptr, size_t size)
void JEMALLOC_SYS_NOTHROW *@je_@realloc(void *ptr, size_t size)
JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2);
JEMALLOC_EXPORT void JEMALLOC_NOTHROW @je_@free(void *ptr)
JEMALLOC_EXPORT void JEMALLOC_SYS_NOTHROW @je_@free(void *ptr)
JEMALLOC_CXX_THROW;
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
......@@ -52,15 +53,19 @@ JEMALLOC_EXPORT void JEMALLOC_NOTHROW @je_@malloc_stats_print(
const char *opts);
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@malloc_usable_size(
JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW;
#ifdef JEMALLOC_HAVE_MALLOC_SIZE
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@malloc_size(
const void *ptr);
#endif
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *@je_@memalign(size_t alignment, size_t size)
void JEMALLOC_SYS_NOTHROW *@je_@memalign(size_t alignment, size_t size)
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc);
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *@je_@valloc(size_t size) JEMALLOC_CXX_THROW
void JEMALLOC_SYS_NOTHROW *@je_@valloc(size_t size) JEMALLOC_CXX_THROW
JEMALLOC_ATTR(malloc);
#endif
# ===========================================================================
# http://www.gnu.org/software/autoconf-archive/ax_cxx_compile_stdcxx.html
# https://www.gnu.org/software/autoconf-archive/ax_cxx_compile_stdcxx.html
# ===========================================================================
#
# SYNOPSIS
......@@ -33,21 +33,23 @@
# Copyright (c) 2014, 2015 Google Inc.; contributed by Alexey Sokolov <sokolov@google.com>
# Copyright (c) 2015 Paul Norman <penorman@mac.com>
# Copyright (c) 2015 Moritz Klammler <moritz@klammler.eu>
# Copyright (c) 2016, 2018 Krzesimir Nowak <qdlacz@gmail.com>
# Copyright (c) 2019 Enji Cooper <yaneurabeya@gmail.com>
#
# Copying and distribution of this file, with or without modification, are
# permitted in any medium without royalty provided the copyright notice
# and this notice are preserved. This file is offered as-is, without any
# warranty.
#serial 4
#serial 11
dnl This macro is based on the code from the AX_CXX_COMPILE_STDCXX_11 macro
dnl (serial version number 13).
AC_DEFUN([AX_CXX_COMPILE_STDCXX], [dnl
m4_if([$1], [11], [],
[$1], [14], [],
[$1], [17], [m4_fatal([support for C++17 not yet implemented in AX_CXX_COMPILE_STDCXX])],
m4_if([$1], [11], [ax_cxx_compile_alternatives="11 0x"],
[$1], [14], [ax_cxx_compile_alternatives="14 1y"],
[$1], [17], [ax_cxx_compile_alternatives="17 1z"],
[m4_fatal([invalid first argument `$1' to AX_CXX_COMPILE_STDCXX])])dnl
m4_if([$2], [], [],
[$2], [ext], [],
......@@ -59,18 +61,11 @@ AC_DEFUN([AX_CXX_COMPILE_STDCXX], [dnl
[m4_fatal([invalid third argument `$3' to AX_CXX_COMPILE_STDCXX])])
AC_LANG_PUSH([C++])dnl
ac_success=no
AC_CACHE_CHECK(whether $CXX supports C++$1 features by default,
ax_cv_cxx_compile_cxx$1,
[AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])],
[ax_cv_cxx_compile_cxx$1=yes],
[ax_cv_cxx_compile_cxx$1=no])])
if test x$ax_cv_cxx_compile_cxx$1 = xyes; then
ac_success=yes
fi
m4_if([$2], [noext], [], [dnl
if test x$ac_success = xno; then
for switch in -std=gnu++$1 -std=gnu++0x; do
for alternative in ${ax_cxx_compile_alternatives}; do
switch="-std=gnu++${alternative}"
cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx$1_$switch])
AC_CACHE_CHECK(whether $CXX supports C++$1 features with $switch,
$cachevar,
......@@ -96,7 +91,8 @@ AC_DEFUN([AX_CXX_COMPILE_STDCXX], [dnl
dnl HP's aCC needs +std=c++11 according to:
dnl http://h21007.www2.hp.com/portal/download/files/unprot/aCxx/PDF_Release_Notes/769149-001.pdf
dnl Cray's crayCC needs "-h std=c++11"
for switch in -std=c++$1 -std=c++0x +std=c++$1 "-h std=c++$1"; do
for alternative in ${ax_cxx_compile_alternatives}; do
for switch in -std=c++${alternative} +std=c++${alternative} "-h std=c++${alternative}"; do
cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx$1_$switch])
AC_CACHE_CHECK(whether $CXX supports C++$1 features with $switch,
$cachevar,
......@@ -115,6 +111,10 @@ AC_DEFUN([AX_CXX_COMPILE_STDCXX], [dnl
break
fi
done
if test x$ac_success = xyes; then
break
fi
done
fi])
AC_LANG_POP([C++])
if test x$ax_cxx_compile_cxx$1_required = xtrue; then
......@@ -148,6 +148,11 @@ m4_define([_AX_CXX_COMPILE_STDCXX_testbody_14],
_AX_CXX_COMPILE_STDCXX_testbody_new_in_14
)
m4_define([_AX_CXX_COMPILE_STDCXX_testbody_17],
_AX_CXX_COMPILE_STDCXX_testbody_new_in_11
_AX_CXX_COMPILE_STDCXX_testbody_new_in_14
_AX_CXX_COMPILE_STDCXX_testbody_new_in_17
)
dnl Tests for new features in C++11
......@@ -185,11 +190,13 @@ namespace cxx11
struct Base
{
virtual ~Base() {}
virtual void f() {}
};
struct Derived : public Base
{
virtual ~Derived() override {}
virtual void f() override {}
};
......@@ -518,7 +525,7 @@ namespace cxx14
}
namespace test_digit_seperators
namespace test_digit_separators
{
constexpr auto ten_million = 100'000'000;
......@@ -560,3 +567,385 @@ namespace cxx14
#endif // __cplusplus >= 201402L
]])
dnl Tests for new features in C++17
m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_17], [[
// If the compiler admits that it is not ready for C++17, why torture it?
// Hopefully, this will speed up the test.
#ifndef __cplusplus
#error "This is not a C++ compiler"
#elif __cplusplus < 201703L
#error "This is not a C++17 compiler"
#else
#include <initializer_list>
#include <utility>
#include <type_traits>
namespace cxx17
{
namespace test_constexpr_lambdas
{
constexpr int foo = [](){return 42;}();
}
namespace test::nested_namespace::definitions
{
}
namespace test_fold_expression
{
template<typename... Args>
int multiply(Args... args)
{
return (args * ... * 1);
}
template<typename... Args>
bool all(Args... args)
{
return (args && ...);
}
}
namespace test_extended_static_assert
{
static_assert (true);
}
namespace test_auto_brace_init_list
{
auto foo = {5};
auto bar {5};
static_assert(std::is_same<std::initializer_list<int>, decltype(foo)>::value);
static_assert(std::is_same<int, decltype(bar)>::value);
}
namespace test_typename_in_template_template_parameter
{
template<template<typename> typename X> struct D;
}
namespace test_fallthrough_nodiscard_maybe_unused_attributes
{
int f1()
{
return 42;
}
[[nodiscard]] int f2()
{
[[maybe_unused]] auto unused = f1();
switch (f1())
{
case 17:
f1();
[[fallthrough]];
case 42:
f1();
}
return f1();
}
}
namespace test_extended_aggregate_initialization
{
struct base1
{
int b1, b2 = 42;
};
struct base2
{
base2() {
b3 = 42;
}
int b3;
};
struct derived : base1, base2
{
int d;
};
derived d1 {{1, 2}, {}, 4}; // full initialization
derived d2 {{}, {}, 4}; // value-initialized bases
}
namespace test_general_range_based_for_loop
{
struct iter
{
int i;
int& operator* ()
{
return i;
}
const int& operator* () const
{
return i;
}
iter& operator++()
{
++i;
return *this;
}
};
struct sentinel
{
int i;
};
bool operator== (const iter& i, const sentinel& s)
{
return i.i == s.i;
}
bool operator!= (const iter& i, const sentinel& s)
{
return !(i == s);
}
struct range
{
iter begin() const
{
return {0};
}
sentinel end() const
{
return {5};
}
};
void f()
{
range r {};
for (auto i : r)
{
[[maybe_unused]] auto v = i;
}
}
}
namespace test_lambda_capture_asterisk_this_by_value
{
struct t
{
int i;
int foo()
{
return [*this]()
{
return i;
}();
}
};
}
namespace test_enum_class_construction
{
enum class byte : unsigned char
{};
byte foo {42};
}
namespace test_constexpr_if
{
template <bool cond>
int f ()
{
if constexpr(cond)
{
return 13;
}
else
{
return 42;
}
}
}
namespace test_selection_statement_with_initializer
{
int f()
{
return 13;
}
int f2()
{
if (auto i = f(); i > 0)
{
return 3;
}
switch (auto i = f(); i + 4)
{
case 17:
return 2;
default:
return 1;
}
}
}
namespace test_template_argument_deduction_for_class_templates
{
template <typename T1, typename T2>
struct pair
{
pair (T1 p1, T2 p2)
: m1 {p1},
m2 {p2}
{}
T1 m1;
T2 m2;
};
void f()
{
[[maybe_unused]] auto p = pair{13, 42u};
}
}
namespace test_non_type_auto_template_parameters
{
template <auto n>
struct B
{};
B<5> b1;
B<'a'> b2;
}
namespace test_structured_bindings
{
int arr[2] = { 1, 2 };
std::pair<int, int> pr = { 1, 2 };
auto f1() -> int(&)[2]
{
return arr;
}
auto f2() -> std::pair<int, int>&
{
return pr;
}
struct S
{
int x1 : 2;
volatile double y1;
};
S f3()
{
return {};
}
auto [ x1, y1 ] = f1();
auto& [ xr1, yr1 ] = f1();
auto [ x2, y2 ] = f2();
auto& [ xr2, yr2 ] = f2();
const auto [ x3, y3 ] = f3();
}
namespace test_exception_spec_type_system
{
struct Good {};
struct Bad {};
void g1() noexcept;
void g2();
template<typename T>
Bad
f(T*, T*);
template<typename T1, typename T2>
Good
f(T1*, T2*);
static_assert (std::is_same_v<Good, decltype(f(g1, g2))>);
}
namespace test_inline_variables
{
template<class T> void f(T)
{}
template<class T> inline T g(T)
{
return T{};
}
template<> inline void f<>(int)
{}
template<> int g<>(int)
{
return 5;
}
}
} // namespace cxx17
#endif // __cplusplus < 201703L
]])
......@@ -39,34 +39,64 @@
<ClCompile Include="..\..\..\..\src\background_thread.c" />
<ClCompile Include="..\..\..\..\src\base.c" />
<ClCompile Include="..\..\..\..\src\bin.c" />
<ClCompile Include="..\..\..\..\src\bin_info.c" />
<ClCompile Include="..\..\..\..\src\bitmap.c" />
<ClCompile Include="..\..\..\..\src\buf_writer.c" />
<ClCompile Include="..\..\..\..\src\cache_bin.c" />
<ClCompile Include="..\..\..\..\src\ckh.c" />
<ClCompile Include="..\..\..\..\src\counter.c" />
<ClCompile Include="..\..\..\..\src\ctl.c" />
<ClCompile Include="..\..\..\..\src\decay.c" />
<ClCompile Include="..\..\..\..\src\div.c" />
<ClCompile Include="..\..\..\..\src\ecache.c" />
<ClCompile Include="..\..\..\..\src\edata.c" />
<ClCompile Include="..\..\..\..\src\edata_cache.c" />
<ClCompile Include="..\..\..\..\src\ehooks.c" />
<ClCompile Include="..\..\..\..\src\emap.c" />
<ClCompile Include="..\..\..\..\src\eset.c" />
<ClCompile Include="..\..\..\..\src\exp_grow.c" />
<ClCompile Include="..\..\..\..\src\extent.c" />
<ClCompile Include="..\..\..\..\src\extent_dss.c" />
<ClCompile Include="..\..\..\..\src\extent_mmap.c" />
<ClCompile Include="..\..\..\..\src\hash.c" />
<ClCompile Include="..\..\..\..\src\fxp.c" />
<ClCompile Include="..\..\..\..\src\hook.c" />
<ClCompile Include="..\..\..\..\src\hpa.c" />
<ClCompile Include="..\..\..\..\src\hpa_hooks.c" />
<ClCompile Include="..\..\..\..\src\hpdata.c" />
<ClCompile Include="..\..\..\..\src\inspect.c" />
<ClCompile Include="..\..\..\..\src\jemalloc.c" />
<ClCompile Include="..\..\..\..\src\large.c" />
<ClCompile Include="..\..\..\..\src\log.c" />
<ClCompile Include="..\..\..\..\src\malloc_io.c" />
<ClCompile Include="..\..\..\..\src\mutex.c" />
<ClCompile Include="..\..\..\..\src\mutex_pool.c" />
<ClCompile Include="..\..\..\..\src\nstime.c" />
<ClCompile Include="..\..\..\..\src\pa.c" />
<ClCompile Include="..\..\..\..\src\pa_extra.c" />
<ClCompile Include="..\..\..\..\src\pai.c" />
<ClCompile Include="..\..\..\..\src\pac.c" />
<ClCompile Include="..\..\..\..\src\pages.c" />
<ClCompile Include="..\..\..\..\src\prng.c" />
<ClCompile Include="..\..\..\..\src\peak_event.c" />
<ClCompile Include="..\..\..\..\src\prof.c" />
<ClCompile Include="..\..\..\..\src\prof_data.c" />
<ClCompile Include="..\..\..\..\src\prof_log.c" />
<ClCompile Include="..\..\..\..\src\prof_recent.c" />
<ClCompile Include="..\..\..\..\src\prof_stats.c" />
<ClCompile Include="..\..\..\..\src\prof_sys.c" />
<ClCompile Include="..\..\..\..\src\psset.c" />
<ClCompile Include="..\..\..\..\src\rtree.c" />
<ClCompile Include="..\..\..\..\src\safety_check.c" />
<ClCompile Include="..\..\..\..\src\san.c" />
<ClCompile Include="..\..\..\..\src\san_bump.c" />
<ClCompile Include="..\..\..\..\src\sc.c" />
<ClCompile Include="..\..\..\..\src\sec.c" />
<ClCompile Include="..\..\..\..\src\stats.c" />
<ClCompile Include="..\..\..\..\src\sz.c" />
<ClCompile Include="..\..\..\..\src\tcache.c" />
<ClCompile Include="..\..\..\..\src\test_hooks.c" />
<ClCompile Include="..\..\..\..\src\thread_event.c" />
<ClCompile Include="..\..\..\..\src\ticker.c" />
<ClCompile Include="..\..\..\..\src\tsd.c" />
<ClCompile Include="..\..\..\..\src\witness.c" />
<ClCompile Include="..\..\..\..\src\safety_check.c" />
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{8D6BB292-9E1C-413D-9F98-4864BDC1514A}</ProjectGuid>
......
......@@ -16,15 +16,39 @@
<ClCompile Include="..\..\..\..\src\base.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\bin.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\bitmap.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\buf_writer.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\cache_bin.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\ckh.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\counter.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\ctl.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\decay.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\div.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\emap.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\exp_grow.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\extent.c">
<Filter>Source Files</Filter>
</ClCompile>
......@@ -34,45 +58,93 @@
<ClCompile Include="..\..\..\..\src\extent_mmap.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\hash.c">
<ClCompile Include="..\..\..\..\src\fxp.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\hook.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\hpa.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\hpa_hooks.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\hpdata.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\inspect.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\jemalloc.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\large.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\log.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\malloc_io.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\mutex.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\mutex_pool.c">
<ClCompile Include="..\..\..\..\src\nstime.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\nstime.c">
<ClCompile Include="..\..\..\..\src\pa.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\pa_extra.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\pai.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\pac.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\pages.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\prng.c">
<ClCompile Include="..\..\..\..\src\peak_event.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\prof.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\prof_data.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\prof_log.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\prof_recent.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\prof_stats.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\prof_sys.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\psset.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\rtree.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\safety_check.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\sc.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\sec.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\stats.c">
<Filter>Source Files</Filter>
</ClCompile>
......@@ -82,6 +154,12 @@
<ClCompile Include="..\..\..\..\src\tcache.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\test_hooks.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\thread_event.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\ticker.c">
<Filter>Source Files</Filter>
</ClCompile>
......@@ -91,16 +169,28 @@
<ClCompile Include="..\..\..\..\src\witness.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\log.c">
<ClCompile Include="..\..\..\..\src\bin_info.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\bin.c">
<ClCompile Include="..\..\..\..\src\ecache.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\div.c">
<ClCompile Include="..\..\..\..\src\edata.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\safety_check.c">
<ClCompile Include="..\..\..\..\src\edata_cache.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\ehooks.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\eset.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\san.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\san_bump.c">
<Filter>Source Files</Filter>
</ClCompile>
</ItemGroup>
......
......@@ -39,35 +39,64 @@
<ClCompile Include="..\..\..\..\src\background_thread.c" />
<ClCompile Include="..\..\..\..\src\base.c" />
<ClCompile Include="..\..\..\..\src\bin.c" />
<ClCompile Include="..\..\..\..\src\bin_info.c" />
<ClCompile Include="..\..\..\..\src\bitmap.c" />
<ClCompile Include="..\..\..\..\src\buf_writer.c" />
<ClCompile Include="..\..\..\..\src\cache_bin.c" />
<ClCompile Include="..\..\..\..\src\ckh.c" />
<ClCompile Include="..\..\..\..\src\counter.c" />
<ClCompile Include="..\..\..\..\src\ctl.c" />
<ClCompile Include="..\..\..\..\src\decay.c" />
<ClCompile Include="..\..\..\..\src\div.c" />
<ClCompile Include="..\..\..\..\src\ecache.c" />
<ClCompile Include="..\..\..\..\src\edata.c" />
<ClCompile Include="..\..\..\..\src\edata_cache.c" />
<ClCompile Include="..\..\..\..\src\ehooks.c" />
<ClCompile Include="..\..\..\..\src\emap.c" />
<ClCompile Include="..\..\..\..\src\eset.c" />
<ClCompile Include="..\..\..\..\src\exp_grow.c" />
<ClCompile Include="..\..\..\..\src\extent.c" />
<ClCompile Include="..\..\..\..\src\extent_dss.c" />
<ClCompile Include="..\..\..\..\src\extent_mmap.c" />
<ClCompile Include="..\..\..\..\src\hash.c" />
<ClCompile Include="..\..\..\..\src\fxp.c" />
<ClCompile Include="..\..\..\..\src\hook.c" />
<ClCompile Include="..\..\..\..\src\hpa.c" />
<ClCompile Include="..\..\..\..\src\hpa_hooks.c" />
<ClCompile Include="..\..\..\..\src\hpdata.c" />
<ClCompile Include="..\..\..\..\src\inspect.c" />
<ClCompile Include="..\..\..\..\src\jemalloc.c" />
<ClCompile Include="..\..\..\..\src\large.c" />
<ClCompile Include="..\..\..\..\src\log.c" />
<ClCompile Include="..\..\..\..\src\malloc_io.c" />
<ClCompile Include="..\..\..\..\src\mutex.c" />
<ClCompile Include="..\..\..\..\src\mutex_pool.c" />
<ClCompile Include="..\..\..\..\src\nstime.c" />
<ClCompile Include="..\..\..\..\src\pa.c" />
<ClCompile Include="..\..\..\..\src\pa_extra.c" />
<ClCompile Include="..\..\..\..\src\pai.c" />
<ClCompile Include="..\..\..\..\src\pac.c" />
<ClCompile Include="..\..\..\..\src\pages.c" />
<ClCompile Include="..\..\..\..\src\prng.c" />
<ClCompile Include="..\..\..\..\src\peak_event.c" />
<ClCompile Include="..\..\..\..\src\prof.c" />
<ClCompile Include="..\..\..\..\src\prof_data.c" />
<ClCompile Include="..\..\..\..\src\prof_log.c" />
<ClCompile Include="..\..\..\..\src\prof_recent.c" />
<ClCompile Include="..\..\..\..\src\prof_stats.c" />
<ClCompile Include="..\..\..\..\src\prof_sys.c" />
<ClCompile Include="..\..\..\..\src\psset.c" />
<ClCompile Include="..\..\..\..\src\rtree.c" />
<ClCompile Include="..\..\..\..\src\safety_check.c" />
<ClCompile Include="..\..\..\..\src\san.c" />
<ClCompile Include="..\..\..\..\src\san_bump.c" />
<ClCompile Include="..\..\..\..\src\sc.c" />
<ClCompile Include="..\..\..\..\src\sec.c" />
<ClCompile Include="..\..\..\..\src\stats.c" />
<ClCompile Include="..\..\..\..\src\sz.c" />
<ClCompile Include="..\..\..\..\src\tcache.c" />
<ClCompile Include="..\..\..\..\src\test_hooks.c" />
<ClCompile Include="..\..\..\..\src\thread_event.c" />
<ClCompile Include="..\..\..\..\src\ticker.c" />
<ClCompile Include="..\..\..\..\src\tsd.c" />
<ClCompile Include="..\..\..\..\src\witness.c" />
<ClCompile Include="..\..\..\..\src\safety_check.c" />
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{8D6BB292-9E1C-413D-9F98-4864BDC1514A}</ProjectGuid>
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment