Commit b8beda3c authored by Oran Agra's avatar Oran Agra
Browse files

Merge commit jemalloc 5.3.0

parents d659c734 6d23d3ac
...@@ -2,77 +2,41 @@ ...@@ -2,77 +2,41 @@
#define JEMALLOC_INTERNAL_ARENA_STATS_H #define JEMALLOC_INTERNAL_ARENA_STATS_H
#include "jemalloc/internal/atomic.h" #include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/lockedint.h"
#include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex_prof.h" #include "jemalloc/internal/mutex_prof.h"
#include "jemalloc/internal/pa.h"
#include "jemalloc/internal/sc.h" #include "jemalloc/internal/sc.h"
JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
/*
* In those architectures that support 64-bit atomics, we use atomic updates for
* our 64-bit values. Otherwise, we use a plain uint64_t and synchronize
* externally.
*/
#ifdef JEMALLOC_ATOMIC_U64
typedef atomic_u64_t arena_stats_u64_t;
#else
/* Must hold the arena stats mutex while reading atomically. */
typedef uint64_t arena_stats_u64_t;
#endif
typedef struct arena_stats_large_s arena_stats_large_t; typedef struct arena_stats_large_s arena_stats_large_t;
struct arena_stats_large_s { struct arena_stats_large_s {
/* /*
* Total number of allocation/deallocation requests served directly by * Total number of allocation/deallocation requests served directly by
* the arena. * the arena.
*/ */
arena_stats_u64_t nmalloc; locked_u64_t nmalloc;
arena_stats_u64_t ndalloc; locked_u64_t ndalloc;
/* /*
* Number of allocation requests that correspond to this size class. * Number of allocation requests that correspond to this size class.
* This includes requests served by tcache, though tcache only * This includes requests served by tcache, though tcache only
* periodically merges into this counter. * periodically merges into this counter.
*/ */
arena_stats_u64_t nrequests; /* Partially derived. */ locked_u64_t nrequests; /* Partially derived. */
/* /*
* Number of tcache fills / flushes for large (similarly, periodically * Number of tcache fills / flushes for large (similarly, periodically
* merged). Note that there is no large tcache batch-fill currently * merged). Note that there is no large tcache batch-fill currently
* (i.e. only fill 1 at a time); however flush may be batched. * (i.e. only fill 1 at a time); however flush may be batched.
*/ */
arena_stats_u64_t nfills; /* Partially derived. */ locked_u64_t nfills; /* Partially derived. */
arena_stats_u64_t nflushes; /* Partially derived. */ locked_u64_t nflushes; /* Partially derived. */
/* Current number of allocations of this size class. */ /* Current number of allocations of this size class. */
size_t curlextents; /* Derived. */ size_t curlextents; /* Derived. */
}; };
typedef struct arena_stats_decay_s arena_stats_decay_t;
struct arena_stats_decay_s {
/* Total number of purge sweeps. */
arena_stats_u64_t npurge;
/* Total number of madvise calls made. */
arena_stats_u64_t nmadvise;
/* Total number of pages purged. */
arena_stats_u64_t purged;
};
typedef struct arena_stats_extents_s arena_stats_extents_t;
struct arena_stats_extents_s {
/*
* Stats for a given index in the range [0, SC_NPSIZES] in an extents_t.
* We track both bytes and # of extents: two extents in the same bucket
* may have different sizes if adjacent size classes differ by more than
* a page, so bytes cannot always be derived from # of extents.
*/
atomic_zu_t ndirty;
atomic_zu_t dirty_bytes;
atomic_zu_t nmuzzy;
atomic_zu_t muzzy_bytes;
atomic_zu_t nretained;
atomic_zu_t retained_bytes;
};
/* /*
* Arena stats. Note that fields marked "derived" are not directly maintained * Arena stats. Note that fields marked "derived" are not directly maintained
* within the arena code; rather their values are derived during stats merge * within the arena code; rather their values are derived during stats merge
...@@ -80,43 +44,36 @@ struct arena_stats_extents_s { ...@@ -80,43 +44,36 @@ struct arena_stats_extents_s {
*/ */
typedef struct arena_stats_s arena_stats_t; typedef struct arena_stats_s arena_stats_t;
struct arena_stats_s { struct arena_stats_s {
#ifndef JEMALLOC_ATOMIC_U64 LOCKEDINT_MTX_DECLARE(mtx)
malloc_mutex_t mtx;
#endif
/* Number of bytes currently mapped, excluding retained memory. */
atomic_zu_t mapped; /* Partially derived. */
/* /*
* Number of unused virtual memory bytes currently retained. Retained * resident includes the base stats -- that's why it lives here and not
* bytes are technically mapped (though always decommitted or purged), * in pa_shard_stats_t.
* but they are excluded from the mapped statistic (above).
*/ */
atomic_zu_t retained; /* Derived. */ size_t base; /* Derived. */
size_t resident; /* Derived. */
/* Number of extent_t structs allocated by base, but not being used. */ size_t metadata_thp; /* Derived. */
atomic_zu_t extent_avail; size_t mapped; /* Derived. */
arena_stats_decay_t decay_dirty;
arena_stats_decay_t decay_muzzy;
atomic_zu_t base; /* Derived. */
atomic_zu_t internal; atomic_zu_t internal;
atomic_zu_t resident; /* Derived. */
atomic_zu_t metadata_thp;
atomic_zu_t allocated_large; /* Derived. */ size_t allocated_large; /* Derived. */
arena_stats_u64_t nmalloc_large; /* Derived. */ uint64_t nmalloc_large; /* Derived. */
arena_stats_u64_t ndalloc_large; /* Derived. */ uint64_t ndalloc_large; /* Derived. */
arena_stats_u64_t nfills_large; /* Derived. */ uint64_t nfills_large; /* Derived. */
arena_stats_u64_t nflushes_large; /* Derived. */ uint64_t nflushes_large; /* Derived. */
arena_stats_u64_t nrequests_large; /* Derived. */ uint64_t nrequests_large; /* Derived. */
/* VM space had to be leaked (undocumented). Normally 0. */ /*
atomic_zu_t abandoned_vm; * The stats logically owned by the pa_shard in the same arena. This
* lives here only because it's convenient for the purposes of the ctl
* module -- it only knows about the single arena_stats.
*/
pa_shard_stats_t pa_shard_stats;
/* Number of bytes cached in tcache associated with this arena. */ /* Number of bytes cached in tcache associated with this arena. */
atomic_zu_t tcache_bytes; /* Derived. */ size_t tcache_bytes; /* Derived. */
size_t tcache_stashed_bytes; /* Derived. */
mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes]; mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes];
...@@ -134,138 +91,24 @@ arena_stats_init(tsdn_t *tsdn, arena_stats_t *arena_stats) { ...@@ -134,138 +91,24 @@ arena_stats_init(tsdn_t *tsdn, arena_stats_t *arena_stats) {
assert(((char *)arena_stats)[i] == 0); assert(((char *)arena_stats)[i] == 0);
} }
} }
#ifndef JEMALLOC_ATOMIC_U64 if (LOCKEDINT_MTX_INIT(arena_stats->mtx, "arena_stats",
if (malloc_mutex_init(&arena_stats->mtx, "arena_stats",
WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) { WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) {
return true; return true;
} }
#endif
/* Memory is zeroed, so there is no need to clear stats. */ /* Memory is zeroed, so there is no need to clear stats. */
return false; return false;
} }
static inline void
arena_stats_lock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
#ifndef JEMALLOC_ATOMIC_U64
malloc_mutex_lock(tsdn, &arena_stats->mtx);
#endif
}
static inline void
arena_stats_unlock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
#ifndef JEMALLOC_ATOMIC_U64
malloc_mutex_unlock(tsdn, &arena_stats->mtx);
#endif
}
static inline uint64_t
arena_stats_read_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
arena_stats_u64_t *p) {
#ifdef JEMALLOC_ATOMIC_U64
return atomic_load_u64(p, ATOMIC_RELAXED);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
return *p;
#endif
}
static inline void
arena_stats_add_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
arena_stats_u64_t *p, uint64_t x) {
#ifdef JEMALLOC_ATOMIC_U64
atomic_fetch_add_u64(p, x, ATOMIC_RELAXED);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
*p += x;
#endif
}
static inline void
arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
arena_stats_u64_t *p, uint64_t x) {
#ifdef JEMALLOC_ATOMIC_U64
uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED);
assert(r - x <= r);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
*p -= x;
assert(*p + x >= *p);
#endif
}
/*
* Non-atomically sets *dst += src. *dst needs external synchronization.
* This lets us avoid the cost of a fetch_add when its unnecessary (note that
* the types here are atomic).
*/
static inline void
arena_stats_accum_u64(arena_stats_u64_t *dst, uint64_t src) {
#ifdef JEMALLOC_ATOMIC_U64
uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
atomic_store_u64(dst, src + cur_dst, ATOMIC_RELAXED);
#else
*dst += src;
#endif
}
static inline size_t
arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats,
atomic_zu_t *p) {
#ifdef JEMALLOC_ATOMIC_U64
return atomic_load_zu(p, ATOMIC_RELAXED);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
return atomic_load_zu(p, ATOMIC_RELAXED);
#endif
}
static inline void
arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats,
atomic_zu_t *p, size_t x) {
#ifdef JEMALLOC_ATOMIC_U64
atomic_fetch_add_zu(p, x, ATOMIC_RELAXED);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
atomic_store_zu(p, cur + x, ATOMIC_RELAXED);
#endif
}
static inline void
arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats,
atomic_zu_t *p, size_t x) {
#ifdef JEMALLOC_ATOMIC_U64
size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED);
assert(r - x <= r);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
atomic_store_zu(p, cur - x, ATOMIC_RELAXED);
#endif
}
/* Like the _u64 variant, needs an externally synchronized *dst. */
static inline void
arena_stats_accum_zu(atomic_zu_t *dst, size_t src) {
size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
atomic_store_zu(dst, src + cur_dst, ATOMIC_RELAXED);
}
static inline void static inline void
arena_stats_large_flush_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats, arena_stats_large_flush_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
szind_t szind, uint64_t nrequests) { szind_t szind, uint64_t nrequests) {
arena_stats_lock(tsdn, arena_stats); LOCKEDINT_MTX_LOCK(tsdn, arena_stats->mtx);
arena_stats_large_t *lstats = &arena_stats->lstats[szind - SC_NBINS]; arena_stats_large_t *lstats = &arena_stats->lstats[szind - SC_NBINS];
arena_stats_add_u64(tsdn, arena_stats, &lstats->nrequests, nrequests); locked_inc_u64(tsdn, LOCKEDINT_MTX(arena_stats->mtx),
arena_stats_add_u64(tsdn, arena_stats, &lstats->nflushes, 1); &lstats->nrequests, nrequests);
arena_stats_unlock(tsdn, arena_stats); locked_inc_u64(tsdn, LOCKEDINT_MTX(arena_stats->mtx),
} &lstats->nflushes, 1);
LOCKEDINT_MTX_UNLOCK(tsdn, arena_stats->mtx);
static inline void
arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t size) {
arena_stats_lock(tsdn, arena_stats);
arena_stats_add_zu(tsdn, arena_stats, &arena_stats->mapped, size);
arena_stats_unlock(tsdn, arena_stats);
} }
#endif /* JEMALLOC_INTERNAL_ARENA_STATS_H */ #endif /* JEMALLOC_INTERNAL_ARENA_STATS_H */
#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H #ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_H
#define JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H #define JEMALLOC_INTERNAL_ARENA_STRUCTS_H
#include "jemalloc/internal/arena_stats.h" #include "jemalloc/internal/arena_stats.h"
#include "jemalloc/internal/atomic.h" #include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bin.h" #include "jemalloc/internal/bin.h"
#include "jemalloc/internal/bitmap.h" #include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/counter.h"
#include "jemalloc/internal/ecache.h"
#include "jemalloc/internal/edata_cache.h"
#include "jemalloc/internal/extent_dss.h" #include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/jemalloc_internal_types.h" #include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/nstime.h" #include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/pa.h"
#include "jemalloc/internal/ql.h" #include "jemalloc/internal/ql.h"
#include "jemalloc/internal/sc.h" #include "jemalloc/internal/sc.h"
#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/ticker.h" #include "jemalloc/internal/ticker.h"
struct arena_decay_s {
/* Synchronizes all non-atomic fields. */
malloc_mutex_t mtx;
/*
* True if a thread is currently purging the extents associated with
* this decay structure.
*/
bool purging;
/*
* Approximate time in milliseconds from the creation of a set of unused
* dirty pages until an equivalent set of unused dirty pages is purged
* and/or reused.
*/
atomic_zd_t time_ms;
/* time / SMOOTHSTEP_NSTEPS. */
nstime_t interval;
/*
* Time at which the current decay interval logically started. We do
* not actually advance to a new epoch until sometime after it starts
* because of scheduling and computation delays, and it is even possible
* to completely skip epochs. In all cases, during epoch advancement we
* merge all relevant activity into the most recently recorded epoch.
*/
nstime_t epoch;
/* Deadline randomness generator. */
uint64_t jitter_state;
/*
* Deadline for current epoch. This is the sum of interval and per
* epoch jitter which is a uniform random variable in [0..interval).
* Epochs always advance by precise multiples of interval, but we
* randomize the deadline to reduce the likelihood of arenas purging in
* lockstep.
*/
nstime_t deadline;
/*
* Number of unpurged pages at beginning of current epoch. During epoch
* advancement we use the delta between arena->decay_*.nunpurged and
* extents_npages_get(&arena->extents_*) to determine how many dirty
* pages, if any, were generated.
*/
size_t nunpurged;
/*
* Trailing log of how many unused dirty pages were generated during
* each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
* element is the most recent epoch. Corresponding epoch times are
* relative to epoch.
*/
size_t backlog[SMOOTHSTEP_NSTEPS];
/*
* Pointer to associated stats. These stats are embedded directly in
* the arena's stats due to how stats structures are shared between the
* arena and ctl code.
*
* Synchronization: Same as associated arena's stats field. */
arena_stats_decay_t *stats;
/* Peak number of pages in associated extents. Used for debug only. */
uint64_t ceil_npages;
};
struct arena_s { struct arena_s {
/* /*
* Number of threads currently assigned to this arena. Each thread has * Number of threads currently assigned to this arena. Each thread has
...@@ -110,28 +53,10 @@ struct arena_s { ...@@ -110,28 +53,10 @@ struct arena_s {
* *
* Synchronization: tcache_ql_mtx. * Synchronization: tcache_ql_mtx.
*/ */
ql_head(tcache_t) tcache_ql; ql_head(tcache_slow_t) tcache_ql;
ql_head(cache_bin_array_descriptor_t) cache_bin_array_descriptor_ql; ql_head(cache_bin_array_descriptor_t) cache_bin_array_descriptor_ql;
malloc_mutex_t tcache_ql_mtx; malloc_mutex_t tcache_ql_mtx;
/* Synchronization: internal. */
prof_accum_t prof_accum;
/*
* PRNG state for cache index randomization of large allocation base
* pointers.
*
* Synchronization: atomic.
*/
atomic_zu_t offset_state;
/*
* Extent serial number generator state.
*
* Synchronization: atomic.
*/
atomic_zu_t extent_sn_next;
/* /*
* Represents a dss_prec_t, but atomically. * Represents a dss_prec_t, but atomically.
* *
...@@ -139,74 +64,23 @@ struct arena_s { ...@@ -139,74 +64,23 @@ struct arena_s {
*/ */
atomic_u_t dss_prec; atomic_u_t dss_prec;
/*
* Number of pages in active extents.
*
* Synchronization: atomic.
*/
atomic_zu_t nactive;
/* /*
* Extant large allocations. * Extant large allocations.
* *
* Synchronization: large_mtx. * Synchronization: large_mtx.
*/ */
extent_list_t large; edata_list_active_t large;
/* Synchronizes all large allocation/update/deallocation. */ /* Synchronizes all large allocation/update/deallocation. */
malloc_mutex_t large_mtx; malloc_mutex_t large_mtx;
/* /* The page-level allocator shard this arena uses. */
* Collections of extents that were previously allocated. These are pa_shard_t pa_shard;
* used when allocating extents, in an attempt to re-use address space.
*
* Synchronization: internal.
*/
extents_t extents_dirty;
extents_t extents_muzzy;
extents_t extents_retained;
/* /*
* Decay-based purging state, responsible for scheduling extent state * A cached copy of base->ind. This can get accessed on hot paths;
* transitions. * looking it up in base requires an extra pointer hop / cache miss.
*
* Synchronization: internal.
*/
arena_decay_t decay_dirty; /* dirty --> muzzy */
arena_decay_t decay_muzzy; /* muzzy --> retained */
/*
* Next extent size class in a growing series to use when satisfying a
* request via the extent hooks (only if opt_retain). This limits the
* number of disjoint virtual memory ranges so that extent merging can
* be effective even if multiple arenas' extent allocation requests are
* highly interleaved.
*
* retain_grow_limit is the max allowed size ind to expand (unless the
* required size is greater). Default is no limit, and controlled
* through mallctl only.
*
* Synchronization: extent_grow_mtx
*/ */
pszind_t extent_grow_next; unsigned ind;
pszind_t retain_grow_limit;
malloc_mutex_t extent_grow_mtx;
/*
* Available extent structures that were allocated via
* base_alloc_extent().
*
* Synchronization: extent_avail_mtx.
*/
extent_tree_t extent_avail;
atomic_zu_t extent_avail_cnt;
malloc_mutex_t extent_avail_mtx;
/*
* bins is used to store heaps of free regions.
*
* Synchronization: internal.
*/
bins_t bins[SC_NBINS];
/* /*
* Base allocator, from which arena metadata are allocated. * Base allocator, from which arena metadata are allocated.
...@@ -216,17 +90,12 @@ struct arena_s { ...@@ -216,17 +90,12 @@ struct arena_s {
base_t *base; base_t *base;
/* Used to determine uptime. Read-only after initialization. */ /* Used to determine uptime. Read-only after initialization. */
nstime_t create_time; nstime_t create_time;
};
/* Used in conjunction with tsd for fast arena-related context lookup. */ /*
struct arena_tdata_s { * The arena is allocated alongside its bins; really this is a
ticker_t decay_ticker; * dynamically sized array determined by the binshard settings.
}; */
bin_t bins[0];
/* Used to pass rtree lookup context down the path. */
struct alloc_ctx_s {
szind_t szind;
bool slab;
}; };
#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H */ #endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_H */
#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H
#define JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H
#include "jemalloc/internal/bitmap.h"
struct arena_slab_data_s {
/* Per region allocated/deallocated bitmap. */
bitmap_t bitmap[BITMAP_GROUPS_MAX];
};
#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H */
...@@ -3,21 +3,14 @@ ...@@ -3,21 +3,14 @@
#include "jemalloc/internal/sc.h" #include "jemalloc/internal/sc.h"
/* Maximum number of regions in one slab. */
#define LG_SLAB_MAXREGS (LG_PAGE - SC_LG_TINY_MIN)
#define SLAB_MAXREGS (1U << LG_SLAB_MAXREGS)
/* Default decay times in milliseconds. */ /* Default decay times in milliseconds. */
#define DIRTY_DECAY_MS_DEFAULT ZD(10 * 1000) #define DIRTY_DECAY_MS_DEFAULT ZD(10 * 1000)
#define MUZZY_DECAY_MS_DEFAULT (0) #define MUZZY_DECAY_MS_DEFAULT (0)
/* Number of event ticks between time checks. */ /* Number of event ticks between time checks. */
#define DECAY_NTICKS_PER_UPDATE 1000 #define ARENA_DECAY_NTICKS_PER_UPDATE 1000
typedef struct arena_slab_data_s arena_slab_data_t;
typedef struct arena_decay_s arena_decay_t; typedef struct arena_decay_s arena_decay_t;
typedef struct arena_s arena_t; typedef struct arena_s arena_t;
typedef struct arena_tdata_s arena_tdata_t;
typedef struct alloc_ctx_s alloc_ctx_t;
typedef enum { typedef enum {
percpu_arena_mode_names_base = 0, /* Used for options processing. */ percpu_arena_mode_names_base = 0, /* Used for options processing. */
...@@ -48,4 +41,18 @@ typedef enum { ...@@ -48,4 +41,18 @@ typedef enum {
*/ */
#define OVERSIZE_THRESHOLD_DEFAULT (8 << 20) #define OVERSIZE_THRESHOLD_DEFAULT (8 << 20)
struct arena_config_s {
/* extent hooks to be used for the arena */
extent_hooks_t *extent_hooks;
/*
* Use extent hooks for metadata (base) allocations when true.
*/
bool metadata_use_hooks;
};
typedef struct arena_config_s arena_config_t;
extern const arena_config_t arena_config_default;
#endif /* JEMALLOC_INTERNAL_ARENA_TYPES_H */ #endif /* JEMALLOC_INTERNAL_ARENA_TYPES_H */
...@@ -51,6 +51,27 @@ ...@@ -51,6 +51,27 @@
#define ATOMIC_ACQ_REL atomic_memory_order_acq_rel #define ATOMIC_ACQ_REL atomic_memory_order_acq_rel
#define ATOMIC_SEQ_CST atomic_memory_order_seq_cst #define ATOMIC_SEQ_CST atomic_memory_order_seq_cst
/*
* Another convenience -- simple atomic helper functions.
*/
#define JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(type, short_type, \
lg_size) \
JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, lg_size) \
ATOMIC_INLINE void \
atomic_load_add_store_##short_type(atomic_##short_type##_t *a, \
type inc) { \
type oldval = atomic_load_##short_type(a, ATOMIC_RELAXED); \
type newval = oldval + inc; \
atomic_store_##short_type(a, newval, ATOMIC_RELAXED); \
} \
ATOMIC_INLINE void \
atomic_load_sub_store_##short_type(atomic_##short_type##_t *a, \
type inc) { \
type oldval = atomic_load_##short_type(a, ATOMIC_RELAXED); \
type newval = oldval - inc; \
atomic_store_##short_type(a, newval, ATOMIC_RELAXED); \
}
/* /*
* Not all platforms have 64-bit atomics. If we do, this #define exposes that * Not all platforms have 64-bit atomics. If we do, this #define exposes that
* fact. * fact.
...@@ -67,18 +88,18 @@ JEMALLOC_GENERATE_ATOMICS(void *, p, LG_SIZEOF_PTR) ...@@ -67,18 +88,18 @@ JEMALLOC_GENERATE_ATOMICS(void *, p, LG_SIZEOF_PTR)
*/ */
JEMALLOC_GENERATE_ATOMICS(bool, b, 0) JEMALLOC_GENERATE_ATOMICS(bool, b, 0)
JEMALLOC_GENERATE_INT_ATOMICS(unsigned, u, LG_SIZEOF_INT) JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(unsigned, u, LG_SIZEOF_INT)
JEMALLOC_GENERATE_INT_ATOMICS(size_t, zu, LG_SIZEOF_PTR) JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(size_t, zu, LG_SIZEOF_PTR)
JEMALLOC_GENERATE_INT_ATOMICS(ssize_t, zd, LG_SIZEOF_PTR) JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(ssize_t, zd, LG_SIZEOF_PTR)
JEMALLOC_GENERATE_INT_ATOMICS(uint8_t, u8, 0) JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(uint8_t, u8, 0)
JEMALLOC_GENERATE_INT_ATOMICS(uint32_t, u32, 2) JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(uint32_t, u32, 2)
#ifdef JEMALLOC_ATOMIC_U64 #ifdef JEMALLOC_ATOMIC_U64
JEMALLOC_GENERATE_INT_ATOMICS(uint64_t, u64, 3) JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(uint64_t, u64, 3)
#endif #endif
#undef ATOMIC_INLINE #undef ATOMIC_INLINE
......
...@@ -12,8 +12,9 @@ extern background_thread_info_t *background_thread_info; ...@@ -12,8 +12,9 @@ extern background_thread_info_t *background_thread_info;
bool background_thread_create(tsd_t *tsd, unsigned arena_ind); bool background_thread_create(tsd_t *tsd, unsigned arena_ind);
bool background_threads_enable(tsd_t *tsd); bool background_threads_enable(tsd_t *tsd);
bool background_threads_disable(tsd_t *tsd); bool background_threads_disable(tsd_t *tsd);
void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena, bool background_thread_is_started(background_thread_info_t* info);
arena_decay_t *decay, size_t npages_new); void background_thread_wakeup_early(background_thread_info_t *info,
nstime_t *remaining_sleep);
void background_thread_prefork0(tsdn_t *tsdn); void background_thread_prefork0(tsdn_t *tsdn);
void background_thread_prefork1(tsdn_t *tsdn); void background_thread_prefork1(tsdn_t *tsdn);
void background_thread_postfork_parent(tsdn_t *tsdn); void background_thread_postfork_parent(tsdn_t *tsdn);
...@@ -27,6 +28,6 @@ extern int pthread_create_wrapper(pthread_t *__restrict, const pthread_attr_t *, ...@@ -27,6 +28,6 @@ extern int pthread_create_wrapper(pthread_t *__restrict, const pthread_attr_t *,
void *(*)(void *), void *__restrict); void *(*)(void *), void *__restrict);
#endif #endif
bool background_thread_boot0(void); bool background_thread_boot0(void);
bool background_thread_boot1(tsdn_t *tsdn); bool background_thread_boot1(tsdn_t *tsdn, base_t *base);
#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H */ #endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H */
...@@ -45,18 +45,4 @@ background_thread_indefinite_sleep(background_thread_info_t *info) { ...@@ -45,18 +45,4 @@ background_thread_indefinite_sleep(background_thread_info_t *info) {
return atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE); return atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE);
} }
JEMALLOC_ALWAYS_INLINE void
arena_background_thread_inactivity_check(tsdn_t *tsdn, arena_t *arena,
bool is_background_thread) {
if (!background_thread_enabled() || is_background_thread) {
return;
}
background_thread_info_t *info =
arena_background_thread_info_get(arena);
if (background_thread_indefinite_sleep(info)) {
background_thread_interval_check(tsdn, arena,
&arena->decay_dirty, 0);
}
}
#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H */ #endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H */
...@@ -11,6 +11,17 @@ ...@@ -11,6 +11,17 @@
#define MAX_BACKGROUND_THREAD_LIMIT MALLOCX_ARENA_LIMIT #define MAX_BACKGROUND_THREAD_LIMIT MALLOCX_ARENA_LIMIT
#define DEFAULT_NUM_BACKGROUND_THREAD 4 #define DEFAULT_NUM_BACKGROUND_THREAD 4
/*
* These exist only as a transitional state. Eventually, deferral should be
* part of the PAI, and each implementation can indicate wait times with more
* specificity.
*/
#define BACKGROUND_THREAD_HPA_INTERVAL_MAX_UNINITIALIZED (-2)
#define BACKGROUND_THREAD_HPA_INTERVAL_MAX_DEFAULT_WHEN_ENABLED 5000
#define BACKGROUND_THREAD_DEFERRED_MIN UINT64_C(0)
#define BACKGROUND_THREAD_DEFERRED_MAX UINT64_MAX
typedef enum { typedef enum {
background_thread_stopped, background_thread_stopped,
background_thread_started, background_thread_started,
...@@ -48,6 +59,7 @@ struct background_thread_stats_s { ...@@ -48,6 +59,7 @@ struct background_thread_stats_s {
size_t num_threads; size_t num_threads;
uint64_t num_runs; uint64_t num_runs;
nstime_t run_interval; nstime_t run_interval;
mutex_prof_data_t max_counter_per_bg_thd;
}; };
typedef struct background_thread_stats_s background_thread_stats_t; typedef struct background_thread_stats_s background_thread_stats_t;
......
#ifndef JEMALLOC_INTERNAL_BASE_H
#define JEMALLOC_INTERNAL_BASE_H
#include "jemalloc/internal/edata.h"
#include "jemalloc/internal/ehooks.h"
#include "jemalloc/internal/mutex.h"
enum metadata_thp_mode_e {
metadata_thp_disabled = 0,
/*
* Lazily enable hugepage for metadata. To avoid high RSS caused by THP
* + low usage arena (i.e. THP becomes a significant percentage), the
* "auto" option only starts using THP after a base allocator used up
* the first THP region. Starting from the second hugepage (in a single
* arena), "auto" behaves the same as "always", i.e. madvise hugepage
* right away.
*/
metadata_thp_auto = 1,
metadata_thp_always = 2,
metadata_thp_mode_limit = 3
};
typedef enum metadata_thp_mode_e metadata_thp_mode_t;
#define METADATA_THP_DEFAULT metadata_thp_disabled
extern metadata_thp_mode_t opt_metadata_thp;
extern const char *metadata_thp_mode_names[];
/* Embedded at the beginning of every block of base-managed virtual memory. */
typedef struct base_block_s base_block_t;
struct base_block_s {
/* Total size of block's virtual memory mapping. */
size_t size;
/* Next block in list of base's blocks. */
base_block_t *next;
/* Tracks unused trailing space. */
edata_t edata;
};
typedef struct base_s base_t;
struct base_s {
/*
* User-configurable extent hook functions.
*/
ehooks_t ehooks;
/*
* User-configurable extent hook functions for metadata allocations.
*/
ehooks_t ehooks_base;
/* Protects base_alloc() and base_stats_get() operations. */
malloc_mutex_t mtx;
/* Using THP when true (metadata_thp auto mode). */
bool auto_thp_switched;
/*
* Most recent size class in the series of increasingly large base
* extents. Logarithmic spacing between subsequent allocations ensures
* that the total number of distinct mappings remains small.
*/
pszind_t pind_last;
/* Serial number generation state. */
size_t extent_sn_next;
/* Chain of all blocks associated with base. */
base_block_t *blocks;
/* Heap of extents that track unused trailing space within blocks. */
edata_heap_t avail[SC_NSIZES];
/* Stats, only maintained if config_stats. */
size_t allocated;
size_t resident;
size_t mapped;
/* Number of THP regions touched. */
size_t n_thp;
};
static inline unsigned
base_ind_get(const base_t *base) {
return ehooks_ind_get(&base->ehooks);
}
static inline bool
metadata_thp_enabled(void) {
return (opt_metadata_thp != metadata_thp_disabled);
}
base_t *b0get(void);
base_t *base_new(tsdn_t *tsdn, unsigned ind,
const extent_hooks_t *extent_hooks, bool metadata_use_hooks);
void base_delete(tsdn_t *tsdn, base_t *base);
ehooks_t *base_ehooks_get(base_t *base);
ehooks_t *base_ehooks_get_for_metadata(base_t *base);
extent_hooks_t *base_extent_hooks_set(base_t *base,
extent_hooks_t *extent_hooks);
void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment);
edata_t *base_alloc_edata(tsdn_t *tsdn, base_t *base);
void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated,
size_t *resident, size_t *mapped, size_t *n_thp);
void base_prefork(tsdn_t *tsdn, base_t *base);
void base_postfork_parent(tsdn_t *tsdn, base_t *base);
void base_postfork_child(tsdn_t *tsdn, base_t *base);
bool base_boot(tsdn_t *tsdn);
#endif /* JEMALLOC_INTERNAL_BASE_H */
#ifndef JEMALLOC_INTERNAL_BASE_EXTERNS_H
#define JEMALLOC_INTERNAL_BASE_EXTERNS_H
extern metadata_thp_mode_t opt_metadata_thp;
extern const char *metadata_thp_mode_names[];
base_t *b0get(void);
base_t *base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
void base_delete(tsdn_t *tsdn, base_t *base);
extent_hooks_t *base_extent_hooks_get(base_t *base);
extent_hooks_t *base_extent_hooks_set(base_t *base,
extent_hooks_t *extent_hooks);
void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment);
extent_t *base_alloc_extent(tsdn_t *tsdn, base_t *base);
void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated,
size_t *resident, size_t *mapped, size_t *n_thp);
void base_prefork(tsdn_t *tsdn, base_t *base);
void base_postfork_parent(tsdn_t *tsdn, base_t *base);
void base_postfork_child(tsdn_t *tsdn, base_t *base);
bool base_boot(tsdn_t *tsdn);
#endif /* JEMALLOC_INTERNAL_BASE_EXTERNS_H */
#ifndef JEMALLOC_INTERNAL_BASE_INLINES_H
#define JEMALLOC_INTERNAL_BASE_INLINES_H
static inline unsigned
base_ind_get(const base_t *base) {
return base->ind;
}
static inline bool
metadata_thp_enabled(void) {
return (opt_metadata_thp != metadata_thp_disabled);
}
#endif /* JEMALLOC_INTERNAL_BASE_INLINES_H */
#ifndef JEMALLOC_INTERNAL_BASE_STRUCTS_H
#define JEMALLOC_INTERNAL_BASE_STRUCTS_H
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/sc.h"
/* Embedded at the beginning of every block of base-managed virtual memory. */
struct base_block_s {
/* Total size of block's virtual memory mapping. */
size_t size;
/* Next block in list of base's blocks. */
base_block_t *next;
/* Tracks unused trailing space. */
extent_t extent;
};
struct base_s {
/* Associated arena's index within the arenas array. */
unsigned ind;
/*
* User-configurable extent hook functions. Points to an
* extent_hooks_t.
*/
atomic_p_t extent_hooks;
/* Protects base_alloc() and base_stats_get() operations. */
malloc_mutex_t mtx;
/* Using THP when true (metadata_thp auto mode). */
bool auto_thp_switched;
/*
* Most recent size class in the series of increasingly large base
* extents. Logarithmic spacing between subsequent allocations ensures
* that the total number of distinct mappings remains small.
*/
pszind_t pind_last;
/* Serial number generation state. */
size_t extent_sn_next;
/* Chain of all blocks associated with base. */
base_block_t *blocks;
/* Heap of extents that track unused trailing space within blocks. */
extent_heap_t avail[SC_NSIZES];
/* Stats, only maintained if config_stats. */
size_t allocated;
size_t resident;
size_t mapped;
/* Number of THP regions touched. */
size_t n_thp;
};
#endif /* JEMALLOC_INTERNAL_BASE_STRUCTS_H */
#ifndef JEMALLOC_INTERNAL_BASE_TYPES_H
#define JEMALLOC_INTERNAL_BASE_TYPES_H
typedef struct base_block_s base_block_t;
typedef struct base_s base_t;
#define METADATA_THP_DEFAULT metadata_thp_disabled
/*
* In auto mode, arenas switch to huge pages for the base allocator on the
* second base block. a0 switches to thp on the 5th block (after 20 megabytes
* of metadata), since more metadata (e.g. rtree nodes) come from a0's base.
*/
#define BASE_AUTO_THP_THRESHOLD 2
#define BASE_AUTO_THP_THRESHOLD_A0 5
typedef enum {
metadata_thp_disabled = 0,
/*
* Lazily enable hugepage for metadata. To avoid high RSS caused by THP
* + low usage arena (i.e. THP becomes a significant percentage), the
* "auto" option only starts using THP after a base allocator used up
* the first THP region. Starting from the second hugepage (in a single
* arena), "auto" behaves the same as "always", i.e. madvise hugepage
* right away.
*/
metadata_thp_auto = 1,
metadata_thp_always = 2,
metadata_thp_mode_limit = 3
} metadata_thp_mode_t;
#endif /* JEMALLOC_INTERNAL_BASE_TYPES_H */
...@@ -3,8 +3,7 @@ ...@@ -3,8 +3,7 @@
#include "jemalloc/internal/bin_stats.h" #include "jemalloc/internal/bin_stats.h"
#include "jemalloc/internal/bin_types.h" #include "jemalloc/internal/bin_types.h"
#include "jemalloc/internal/extent_types.h" #include "jemalloc/internal/edata.h"
#include "jemalloc/internal/extent_structs.h"
#include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/sc.h" #include "jemalloc/internal/sc.h"
...@@ -12,74 +11,34 @@ ...@@ -12,74 +11,34 @@
* A bin contains a set of extents that are currently being used for slab * A bin contains a set of extents that are currently being used for slab
* allocations. * allocations.
*/ */
/*
* Read-only information associated with each element of arena_t's bins array
* is stored separately, partly to reduce memory usage (only one copy, rather
* than one per arena), but mainly to avoid false cacheline sharing.
*
* Each slab has the following layout:
*
* /--------------------\
* | region 0 |
* |--------------------|
* | region 1 |
* |--------------------|
* | ... |
* | ... |
* | ... |
* |--------------------|
* | region nregs-1 |
* \--------------------/
*/
typedef struct bin_info_s bin_info_t;
struct bin_info_s {
/* Size of regions in a slab for this bin's size class. */
size_t reg_size;
/* Total size of a slab for this bin's size class. */
size_t slab_size;
/* Total number of regions in a slab for this bin's size class. */
uint32_t nregs;
/* Number of sharded bins in each arena for this size class. */
uint32_t n_shards;
/*
* Metadata used to manipulate bitmaps for slabs associated with this
* bin.
*/
bitmap_info_t bitmap_info;
};
extern bin_info_t bin_infos[SC_NBINS];
typedef struct bin_s bin_t; typedef struct bin_s bin_t;
struct bin_s { struct bin_s {
/* All operations on bin_t fields require lock ownership. */ /* All operations on bin_t fields require lock ownership. */
malloc_mutex_t lock; malloc_mutex_t lock;
/*
* Bin statistics. These get touched every time the lock is acquired,
* so put them close by in the hopes of getting some cache locality.
*/
bin_stats_t stats;
/* /*
* Current slab being used to service allocations of this bin's size * Current slab being used to service allocations of this bin's size
* class. slabcur is independent of slabs_{nonfull,full}; whenever * class. slabcur is independent of slabs_{nonfull,full}; whenever
* slabcur is reassigned, the previous slab must be deallocated or * slabcur is reassigned, the previous slab must be deallocated or
* inserted into slabs_{nonfull,full}. * inserted into slabs_{nonfull,full}.
*/ */
extent_t *slabcur; edata_t *slabcur;
/* /*
* Heap of non-full slabs. This heap is used to assure that new * Heap of non-full slabs. This heap is used to assure that new
* allocations come from the non-full slab that is oldest/lowest in * allocations come from the non-full slab that is oldest/lowest in
* memory. * memory.
*/ */
extent_heap_t slabs_nonfull; edata_heap_t slabs_nonfull;
/* List used to track full slabs. */ /* List used to track full slabs. */
extent_list_t slabs_full; edata_list_active_t slabs_full;
/* Bin statistics. */
bin_stats_t stats;
}; };
/* A set of sharded bins of the same size class. */ /* A set of sharded bins of the same size class. */
...@@ -92,7 +51,6 @@ struct bins_s { ...@@ -92,7 +51,6 @@ struct bins_s {
void bin_shard_sizes_boot(unsigned bin_shards[SC_NBINS]); void bin_shard_sizes_boot(unsigned bin_shards[SC_NBINS]);
bool bin_update_shard_size(unsigned bin_shards[SC_NBINS], size_t start_size, bool bin_update_shard_size(unsigned bin_shards[SC_NBINS], size_t start_size,
size_t end_size, size_t nshards); size_t end_size, size_t nshards);
void bin_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]);
/* Initializes a bin to empty. Returns true on error. */ /* Initializes a bin to empty. Returns true on error. */
bool bin_init(bin_t *bin); bool bin_init(bin_t *bin);
...@@ -104,19 +62,20 @@ void bin_postfork_child(tsdn_t *tsdn, bin_t *bin); ...@@ -104,19 +62,20 @@ void bin_postfork_child(tsdn_t *tsdn, bin_t *bin);
/* Stats. */ /* Stats. */
static inline void static inline void
bin_stats_merge(tsdn_t *tsdn, bin_stats_t *dst_bin_stats, bin_t *bin) { bin_stats_merge(tsdn_t *tsdn, bin_stats_data_t *dst_bin_stats, bin_t *bin) {
malloc_mutex_lock(tsdn, &bin->lock); malloc_mutex_lock(tsdn, &bin->lock);
malloc_mutex_prof_accum(tsdn, &dst_bin_stats->mutex_data, &bin->lock); malloc_mutex_prof_accum(tsdn, &dst_bin_stats->mutex_data, &bin->lock);
dst_bin_stats->nmalloc += bin->stats.nmalloc; bin_stats_t *stats = &dst_bin_stats->stats_data;
dst_bin_stats->ndalloc += bin->stats.ndalloc; stats->nmalloc += bin->stats.nmalloc;
dst_bin_stats->nrequests += bin->stats.nrequests; stats->ndalloc += bin->stats.ndalloc;
dst_bin_stats->curregs += bin->stats.curregs; stats->nrequests += bin->stats.nrequests;
dst_bin_stats->nfills += bin->stats.nfills; stats->curregs += bin->stats.curregs;
dst_bin_stats->nflushes += bin->stats.nflushes; stats->nfills += bin->stats.nfills;
dst_bin_stats->nslabs += bin->stats.nslabs; stats->nflushes += bin->stats.nflushes;
dst_bin_stats->reslabs += bin->stats.reslabs; stats->nslabs += bin->stats.nslabs;
dst_bin_stats->curslabs += bin->stats.curslabs; stats->reslabs += bin->stats.reslabs;
dst_bin_stats->nonfull_slabs += bin->stats.nonfull_slabs; stats->curslabs += bin->stats.curslabs;
stats->nonfull_slabs += bin->stats.nonfull_slabs;
malloc_mutex_unlock(tsdn, &bin->lock); malloc_mutex_unlock(tsdn, &bin->lock);
} }
......
#ifndef JEMALLOC_INTERNAL_BIN_INFO_H
#define JEMALLOC_INTERNAL_BIN_INFO_H
#include "jemalloc/internal/bitmap.h"
/*
* Read-only information associated with each element of arena_t's bins array
* is stored separately, partly to reduce memory usage (only one copy, rather
* than one per arena), but mainly to avoid false cacheline sharing.
*
* Each slab has the following layout:
*
* /--------------------\
* | region 0 |
* |--------------------|
* | region 1 |
* |--------------------|
* | ... |
* | ... |
* | ... |
* |--------------------|
* | region nregs-1 |
* \--------------------/
*/
typedef struct bin_info_s bin_info_t;
struct bin_info_s {
/* Size of regions in a slab for this bin's size class. */
size_t reg_size;
/* Total size of a slab for this bin's size class. */
size_t slab_size;
/* Total number of regions in a slab for this bin's size class. */
uint32_t nregs;
/* Number of sharded bins in each arena for this size class. */
uint32_t n_shards;
/*
* Metadata used to manipulate bitmaps for slabs associated with this
* bin.
*/
bitmap_info_t bitmap_info;
};
extern bin_info_t bin_infos[SC_NBINS];
void bin_info_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]);
#endif /* JEMALLOC_INTERNAL_BIN_INFO_H */
...@@ -47,8 +47,11 @@ struct bin_stats_s { ...@@ -47,8 +47,11 @@ struct bin_stats_s {
/* Current size of nonfull slabs heap in this bin. */ /* Current size of nonfull slabs heap in this bin. */
size_t nonfull_slabs; size_t nonfull_slabs;
};
typedef struct bin_stats_data_s bin_stats_data_t;
struct bin_stats_data_s {
bin_stats_t stats_data;
mutex_prof_data_t mutex_data; mutex_prof_data_t mutex_data;
}; };
#endif /* JEMALLOC_INTERNAL_BIN_STATS_H */ #endif /* JEMALLOC_INTERNAL_BIN_STATS_H */
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
#include "jemalloc/internal/sc.h" #include "jemalloc/internal/sc.h"
#define BIN_SHARDS_MAX (1 << EXTENT_BITS_BINSHARD_WIDTH) #define BIN_SHARDS_MAX (1 << EDATA_BITS_BINSHARD_WIDTH)
#define N_BIN_SHARDS_DEFAULT 1 #define N_BIN_SHARDS_DEFAULT 1
/* Used in TSD static initializer only. Real init in arena_bind(). */ /* Used in TSD static initializer only. Real init in arena_bind(). */
......
...@@ -3,144 +3,383 @@ ...@@ -3,144 +3,383 @@
#include "jemalloc/internal/assert.h" #include "jemalloc/internal/assert.h"
#define BIT_UTIL_INLINE static inline
/* Sanity check. */ /* Sanity check. */
#if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \ #if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \
|| !defined(JEMALLOC_INTERNAL_FFS) || !defined(JEMALLOC_INTERNAL_FFS)
# error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure # error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure
#endif #endif
/*
* Unlike the builtins and posix ffs functions, our ffs requires a non-zero
* input, and returns the position of the lowest bit set (as opposed to the
* posix versions, which return 1 larger than that position and use a return
* value of zero as a sentinel. This tends to simplify logic in callers, and
* allows for consistency with the builtins we build fls on top of.
*/
static inline unsigned
ffs_llu(unsigned long long x) {
util_assume(x != 0);
return JEMALLOC_INTERNAL_FFSLL(x) - 1;
}
BIT_UTIL_INLINE unsigned static inline unsigned
ffs_llu(unsigned long long bitmap) { ffs_lu(unsigned long x) {
return JEMALLOC_INTERNAL_FFSLL(bitmap); util_assume(x != 0);
return JEMALLOC_INTERNAL_FFSL(x) - 1;
} }
BIT_UTIL_INLINE unsigned static inline unsigned
ffs_lu(unsigned long bitmap) { ffs_u(unsigned x) {
return JEMALLOC_INTERNAL_FFSL(bitmap); util_assume(x != 0);
return JEMALLOC_INTERNAL_FFS(x) - 1;
} }
BIT_UTIL_INLINE unsigned #define DO_FLS_SLOW(x, suffix) do { \
ffs_u(unsigned bitmap) { util_assume(x != 0); \
return JEMALLOC_INTERNAL_FFS(bitmap); x |= (x >> 1); \
x |= (x >> 2); \
x |= (x >> 4); \
x |= (x >> 8); \
x |= (x >> 16); \
if (sizeof(x) > 4) { \
/* \
* If sizeof(x) is 4, then the expression "x >> 32" \
* will generate compiler warnings even if the code \
* never executes. This circumvents the warning, and \
* gets compiled out in optimized builds. \
*/ \
int constant_32 = sizeof(x) * 4; \
x |= (x >> constant_32); \
} \
x++; \
if (x == 0) { \
return 8 * sizeof(x) - 1; \
} \
return ffs_##suffix(x) - 1; \
} while(0)
static inline unsigned
fls_llu_slow(unsigned long long x) {
DO_FLS_SLOW(x, llu);
} }
#ifdef JEMALLOC_INTERNAL_POPCOUNTL static inline unsigned
BIT_UTIL_INLINE unsigned fls_lu_slow(unsigned long x) {
DO_FLS_SLOW(x, lu);
}
static inline unsigned
fls_u_slow(unsigned x) {
DO_FLS_SLOW(x, u);
}
#undef DO_FLS_SLOW
#ifdef JEMALLOC_HAVE_BUILTIN_CLZ
static inline unsigned
fls_llu(unsigned long long x) {
util_assume(x != 0);
/*
* Note that the xor here is more naturally written as subtraction; the
* last bit set is the number of bits in the type minus the number of
* leading zero bits. But GCC implements that as:
* bsr edi, edi
* mov eax, 31
* xor edi, 31
* sub eax, edi
* If we write it as xor instead, then we get
* bsr eax, edi
* as desired.
*/
return (8 * sizeof(x) - 1) ^ __builtin_clzll(x);
}
static inline unsigned
fls_lu(unsigned long x) {
util_assume(x != 0);
return (8 * sizeof(x) - 1) ^ __builtin_clzl(x);
}
static inline unsigned
fls_u(unsigned x) {
util_assume(x != 0);
return (8 * sizeof(x) - 1) ^ __builtin_clz(x);
}
#elif defined(_MSC_VER)
#if LG_SIZEOF_PTR == 3
#define DO_BSR64(bit, x) _BitScanReverse64(&bit, x)
#else
/*
* This never actually runs; we're just dodging a compiler error for the
* never-taken branch where sizeof(void *) == 8.
*/
#define DO_BSR64(bit, x) bit = 0; unreachable()
#endif
#define DO_FLS(x) do { \
if (x == 0) { \
return 8 * sizeof(x); \
} \
unsigned long bit; \
if (sizeof(x) == 4) { \
_BitScanReverse(&bit, (unsigned)x); \
return (unsigned)bit; \
} \
if (sizeof(x) == 8 && sizeof(void *) == 8) { \
DO_BSR64(bit, x); \
return (unsigned)bit; \
} \
if (sizeof(x) == 8 && sizeof(void *) == 4) { \
/* Dodge a compiler warning, as above. */ \
int constant_32 = sizeof(x) * 4; \
if (_BitScanReverse(&bit, \
(unsigned)(x >> constant_32))) { \
return 32 + (unsigned)bit; \
} else { \
_BitScanReverse(&bit, (unsigned)x); \
return (unsigned)bit; \
} \
} \
unreachable(); \
} while (0)
static inline unsigned
fls_llu(unsigned long long x) {
DO_FLS(x);
}
static inline unsigned
fls_lu(unsigned long x) {
DO_FLS(x);
}
static inline unsigned
fls_u(unsigned x) {
DO_FLS(x);
}
#undef DO_FLS
#undef DO_BSR64
#else
static inline unsigned
fls_llu(unsigned long long x) {
return fls_llu_slow(x);
}
static inline unsigned
fls_lu(unsigned long x) {
return fls_lu_slow(x);
}
static inline unsigned
fls_u(unsigned x) {
return fls_u_slow(x);
}
#endif
#if LG_SIZEOF_LONG_LONG > 3
# error "Haven't implemented popcount for 16-byte ints."
#endif
#define DO_POPCOUNT(x, type) do { \
/* \
* Algorithm from an old AMD optimization reference manual. \
* We're putting a little bit more work than you might expect \
* into the no-instrinsic case, since we only support the \
* GCC intrinsics spelling of popcount (for now). Detecting \
* whether or not the popcount builtin is actually useable in \
* MSVC is nontrivial. \
*/ \
\
type bmul = (type)0x0101010101010101ULL; \
\
/* \
* Replace each 2 bits with the sideways sum of the original \
* values. 0x5 = 0b0101. \
* \
* You might expect this to be: \
* x = (x & 0x55...) + ((x >> 1) & 0x55...). \
* That costs an extra mask relative to this, though. \
*/ \
x = x - ((x >> 1) & (0x55U * bmul)); \
/* Replace each 4 bits with their sideays sum. 0x3 = 0b0011. */\
x = (x & (bmul * 0x33U)) + ((x >> 2) & (bmul * 0x33U)); \
/* \
* Replace each 8 bits with their sideways sum. Note that we \
* can't overflow within each 4-bit sum here, so we can skip \
* the initial mask. \
*/ \
x = (x + (x >> 4)) & (bmul * 0x0FU); \
/* \
* None of the partial sums in this multiplication (viewed in \
* base-256) can overflow into the next digit. So the least \
* significant byte of the product will be the least \
* significant byte of the original value, the second least \
* significant byte will be the sum of the two least \
* significant bytes of the original value, and so on. \
* Importantly, the high byte will be the byte-wise sum of all \
* the bytes of the original value. \
*/ \
x = x * bmul; \
x >>= ((sizeof(x) - 1) * 8); \
return (unsigned)x; \
} while(0)
static inline unsigned
popcount_u_slow(unsigned bitmap) {
DO_POPCOUNT(bitmap, unsigned);
}
static inline unsigned
popcount_lu_slow(unsigned long bitmap) {
DO_POPCOUNT(bitmap, unsigned long);
}
static inline unsigned
popcount_llu_slow(unsigned long long bitmap) {
DO_POPCOUNT(bitmap, unsigned long long);
}
#undef DO_POPCOUNT
static inline unsigned
popcount_u(unsigned bitmap) {
#ifdef JEMALLOC_INTERNAL_POPCOUNT
return JEMALLOC_INTERNAL_POPCOUNT(bitmap);
#else
return popcount_u_slow(bitmap);
#endif
}
static inline unsigned
popcount_lu(unsigned long bitmap) { popcount_lu(unsigned long bitmap) {
return JEMALLOC_INTERNAL_POPCOUNTL(bitmap); #ifdef JEMALLOC_INTERNAL_POPCOUNTL
return JEMALLOC_INTERNAL_POPCOUNTL(bitmap);
#else
return popcount_lu_slow(bitmap);
#endif
} }
static inline unsigned
popcount_llu(unsigned long long bitmap) {
#ifdef JEMALLOC_INTERNAL_POPCOUNTLL
return JEMALLOC_INTERNAL_POPCOUNTLL(bitmap);
#else
return popcount_llu_slow(bitmap);
#endif #endif
}
/* /*
* Clears first unset bit in bitmap, and returns * Clears first unset bit in bitmap, and returns
* place of bit. bitmap *must not* be 0. * place of bit. bitmap *must not* be 0.
*/ */
BIT_UTIL_INLINE size_t static inline size_t
cfs_lu(unsigned long* bitmap) { cfs_lu(unsigned long* bitmap) {
size_t bit = ffs_lu(*bitmap) - 1; util_assume(*bitmap != 0);
size_t bit = ffs_lu(*bitmap);
*bitmap ^= ZU(1) << bit; *bitmap ^= ZU(1) << bit;
return bit; return bit;
} }
BIT_UTIL_INLINE unsigned static inline unsigned
ffs_zu(size_t bitmap) { ffs_zu(size_t x) {
#if LG_SIZEOF_PTR == LG_SIZEOF_INT #if LG_SIZEOF_PTR == LG_SIZEOF_INT
return ffs_u(bitmap); return ffs_u(x);
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG #elif LG_SIZEOF_PTR == LG_SIZEOF_LONG
return ffs_lu(bitmap); return ffs_lu(x);
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG #elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG
return ffs_llu(bitmap); return ffs_llu(x);
#else #else
#error No implementation for size_t ffs() #error No implementation for size_t ffs()
#endif #endif
} }
BIT_UTIL_INLINE unsigned static inline unsigned
ffs_u64(uint64_t bitmap) { fls_zu(size_t x) {
#if LG_SIZEOF_PTR == LG_SIZEOF_INT
return fls_u(x);
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG
return fls_lu(x);
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG
return fls_llu(x);
#else
#error No implementation for size_t fls()
#endif
}
static inline unsigned
ffs_u64(uint64_t x) {
#if LG_SIZEOF_LONG == 3 #if LG_SIZEOF_LONG == 3
return ffs_lu(bitmap); return ffs_lu(x);
#elif LG_SIZEOF_LONG_LONG == 3 #elif LG_SIZEOF_LONG_LONG == 3
return ffs_llu(bitmap); return ffs_llu(x);
#else #else
#error No implementation for 64-bit ffs() #error No implementation for 64-bit ffs()
#endif #endif
} }
BIT_UTIL_INLINE unsigned static inline unsigned
ffs_u32(uint32_t bitmap) { fls_u64(uint64_t x) {
#if LG_SIZEOF_LONG == 3
return fls_lu(x);
#elif LG_SIZEOF_LONG_LONG == 3
return fls_llu(x);
#else
#error No implementation for 64-bit fls()
#endif
}
static inline unsigned
ffs_u32(uint32_t x) {
#if LG_SIZEOF_INT == 2 #if LG_SIZEOF_INT == 2
return ffs_u(bitmap); return ffs_u(x);
#else #else
#error No implementation for 32-bit ffs() #error No implementation for 32-bit ffs()
#endif #endif
return ffs_u(bitmap); return ffs_u(x);
}
static inline unsigned
fls_u32(uint32_t x) {
#if LG_SIZEOF_INT == 2
return fls_u(x);
#else
#error No implementation for 32-bit fls()
#endif
return fls_u(x);
} }
BIT_UTIL_INLINE uint64_t static inline uint64_t
pow2_ceil_u64(uint64_t x) { pow2_ceil_u64(uint64_t x) {
#if (defined(__amd64__) || defined(__x86_64__) || defined(JEMALLOC_HAVE_BUILTIN_CLZ)) if (unlikely(x <= 1)) {
if(unlikely(x <= 1)) {
return x; return x;
} }
size_t msb_on_index; size_t msb_on_index = fls_u64(x - 1);
#if (defined(__amd64__) || defined(__x86_64__)) /*
asm ("bsrq %1, %0" * Range-check; it's on the callers to ensure that the result of this
: "=r"(msb_on_index) // Outputs. * call won't overflow.
: "r"(x-1) // Inputs. */
);
#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
msb_on_index = (63 ^ __builtin_clzll(x - 1));
#endif
assert(msb_on_index < 63); assert(msb_on_index < 63);
return 1ULL << (msb_on_index + 1); return 1ULL << (msb_on_index + 1);
#else
x--;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
x |= x >> 32;
x++;
return x;
#endif
} }
BIT_UTIL_INLINE uint32_t static inline uint32_t
pow2_ceil_u32(uint32_t x) { pow2_ceil_u32(uint32_t x) {
#if ((defined(__i386__) || defined(JEMALLOC_HAVE_BUILTIN_CLZ)) && (!defined(__s390__))) if (unlikely(x <= 1)) {
if(unlikely(x <= 1)) { return x;
return x;
} }
size_t msb_on_index; size_t msb_on_index = fls_u32(x - 1);
#if (defined(__i386__)) /* As above. */
asm ("bsr %1, %0"
: "=r"(msb_on_index) // Outputs.
: "r"(x-1) // Inputs.
);
#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
msb_on_index = (31 ^ __builtin_clz(x - 1));
#endif
assert(msb_on_index < 31); assert(msb_on_index < 31);
return 1U << (msb_on_index + 1); return 1U << (msb_on_index + 1);
#else
x--;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
x++;
return x;
#endif
} }
/* Compute the smallest power of 2 that is >= x. */ /* Compute the smallest power of 2 that is >= x. */
BIT_UTIL_INLINE size_t static inline size_t
pow2_ceil_zu(size_t x) { pow2_ceil_zu(size_t x) {
#if (LG_SIZEOF_PTR == 3) #if (LG_SIZEOF_PTR == 3)
return pow2_ceil_u64(x); return pow2_ceil_u64(x);
...@@ -149,77 +388,21 @@ pow2_ceil_zu(size_t x) { ...@@ -149,77 +388,21 @@ pow2_ceil_zu(size_t x) {
#endif #endif
} }
#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__)) static inline unsigned
BIT_UTIL_INLINE unsigned
lg_floor(size_t x) {
size_t ret;
assert(x != 0);
asm ("bsr %1, %0"
: "=r"(ret) // Outputs.
: "r"(x) // Inputs.
);
assert(ret < UINT_MAX);
return (unsigned)ret;
}
#elif (defined(_MSC_VER))
BIT_UTIL_INLINE unsigned
lg_floor(size_t x) { lg_floor(size_t x) {
unsigned long ret; util_assume(x != 0);
assert(x != 0);
#if (LG_SIZEOF_PTR == 3) #if (LG_SIZEOF_PTR == 3)
_BitScanReverse64(&ret, x); return fls_u64(x);
#elif (LG_SIZEOF_PTR == 2)
_BitScanReverse(&ret, x);
#else #else
# error "Unsupported type size for lg_floor()" return fls_u32(x);
#endif #endif
assert(ret < UINT_MAX);
return (unsigned)ret;
} }
#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
BIT_UTIL_INLINE unsigned
lg_floor(size_t x) {
assert(x != 0);
#if (LG_SIZEOF_PTR == LG_SIZEOF_INT) static inline unsigned
return ((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x);
#elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG)
return ((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x);
#else
# error "Unsupported type size for lg_floor()"
#endif
}
#else
BIT_UTIL_INLINE unsigned
lg_floor(size_t x) {
assert(x != 0);
x |= (x >> 1);
x |= (x >> 2);
x |= (x >> 4);
x |= (x >> 8);
x |= (x >> 16);
#if (LG_SIZEOF_PTR == 3)
x |= (x >> 32);
#endif
if (x == SIZE_T_MAX) {
return (8 << LG_SIZEOF_PTR) - 1;
}
x++;
return ffs_zu(x) - 2;
}
#endif
BIT_UTIL_INLINE unsigned
lg_ceil(size_t x) { lg_ceil(size_t x) {
return lg_floor(x) + ((x & (x - 1)) == 0 ? 0 : 1); return lg_floor(x) + ((x & (x - 1)) == 0 ? 0 : 1);
} }
#undef BIT_UTIL_INLINE
/* A compile-time version of lg_floor and lg_ceil. */ /* A compile-time version of lg_floor and lg_ceil. */
#define LG_FLOOR_1(x) 0 #define LG_FLOOR_1(x) 0
#define LG_FLOOR_2(x) (x < (1ULL << 1) ? LG_FLOOR_1(x) : 1 + LG_FLOOR_1(x >> 1)) #define LG_FLOOR_2(x) (x < (1ULL << 1) ? LG_FLOOR_1(x) : 1 + LG_FLOOR_1(x >> 1))
......
#ifndef JEMALLOC_INTERNAL_BITMAP_H #ifndef JEMALLOC_INTERNAL_BITMAP_H
#define JEMALLOC_INTERNAL_BITMAP_H #define JEMALLOC_INTERNAL_BITMAP_H
#include "jemalloc/internal/arena_types.h"
#include "jemalloc/internal/bit_util.h" #include "jemalloc/internal/bit_util.h"
#include "jemalloc/internal/sc.h" #include "jemalloc/internal/sc.h"
...@@ -9,9 +8,9 @@ typedef unsigned long bitmap_t; ...@@ -9,9 +8,9 @@ typedef unsigned long bitmap_t;
#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG #define LG_SIZEOF_BITMAP LG_SIZEOF_LONG
/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */ /* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
#if LG_SLAB_MAXREGS > LG_CEIL(SC_NSIZES) #if SC_LG_SLAB_MAXREGS > LG_CEIL(SC_NSIZES)
/* Maximum bitmap bit count is determined by maximum regions per slab. */ /* Maximum bitmap bit count is determined by maximum regions per slab. */
# define LG_BITMAP_MAXBITS LG_SLAB_MAXREGS # define LG_BITMAP_MAXBITS SC_LG_SLAB_MAXREGS
#else #else
/* Maximum bitmap bit count is determined by number of extent size classes. */ /* Maximum bitmap bit count is determined by number of extent size classes. */
# define LG_BITMAP_MAXBITS LG_CEIL(SC_NSIZES) # define LG_BITMAP_MAXBITS LG_CEIL(SC_NSIZES)
...@@ -273,7 +272,7 @@ bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) { ...@@ -273,7 +272,7 @@ bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) {
} }
return bitmap_ffu(bitmap, binfo, sib_base); return bitmap_ffu(bitmap, binfo, sib_base);
} }
bit += ((size_t)(ffs_lu(group_masked) - 1)) << bit += ((size_t)ffs_lu(group_masked)) <<
(lg_bits_per_group - LG_BITMAP_GROUP_NBITS); (lg_bits_per_group - LG_BITMAP_GROUP_NBITS);
} }
assert(bit >= min_bit); assert(bit >= min_bit);
...@@ -285,9 +284,9 @@ bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) { ...@@ -285,9 +284,9 @@ bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) {
- 1); - 1);
size_t bit; size_t bit;
do { do {
bit = ffs_lu(g); if (g != 0) {
if (bit != 0) { bit = ffs_lu(g);
return (i << LG_BITMAP_GROUP_NBITS) + (bit - 1); return (i << LG_BITMAP_GROUP_NBITS) + bit;
} }
i++; i++;
g = bitmap[i]; g = bitmap[i];
...@@ -308,20 +307,20 @@ bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) { ...@@ -308,20 +307,20 @@ bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) {
#ifdef BITMAP_USE_TREE #ifdef BITMAP_USE_TREE
i = binfo->nlevels - 1; i = binfo->nlevels - 1;
g = bitmap[binfo->levels[i].group_offset]; g = bitmap[binfo->levels[i].group_offset];
bit = ffs_lu(g) - 1; bit = ffs_lu(g);
while (i > 0) { while (i > 0) {
i--; i--;
g = bitmap[binfo->levels[i].group_offset + bit]; g = bitmap[binfo->levels[i].group_offset + bit];
bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffs_lu(g) - 1); bit = (bit << LG_BITMAP_GROUP_NBITS) + ffs_lu(g);
} }
#else #else
i = 0; i = 0;
g = bitmap[0]; g = bitmap[0];
while ((bit = ffs_lu(g)) == 0) { while (g == 0) {
i++; i++;
g = bitmap[i]; g = bitmap[i];
} }
bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1); bit = (i << LG_BITMAP_GROUP_NBITS) + ffs_lu(g);
#endif #endif
bitmap_set(bitmap, binfo, bit); bitmap_set(bitmap, binfo, bit);
return bit; return bit;
......
#ifndef JEMALLOC_INTERNAL_BUF_WRITER_H
#define JEMALLOC_INTERNAL_BUF_WRITER_H
/*
* Note: when using the buffered writer, cbopaque is passed to write_cb only
* when the buffer is flushed. It would make a difference if cbopaque points
* to something that's changing for each write_cb call, or something that
* affects write_cb in a way dependent on the content of the output string.
* However, the most typical usage case in practice is that cbopaque points to
* some "option like" content for the write_cb, so it doesn't matter.
*/
typedef struct {
write_cb_t *write_cb;
void *cbopaque;
char *buf;
size_t buf_size;
size_t buf_end;
bool internal_buf;
} buf_writer_t;
bool buf_writer_init(tsdn_t *tsdn, buf_writer_t *buf_writer,
write_cb_t *write_cb, void *cbopaque, char *buf, size_t buf_len);
void buf_writer_flush(buf_writer_t *buf_writer);
write_cb_t buf_writer_cb;
void buf_writer_terminate(tsdn_t *tsdn, buf_writer_t *buf_writer);
typedef ssize_t (read_cb_t)(void *read_cbopaque, void *buf, size_t limit);
void buf_writer_pipe(buf_writer_t *buf_writer, read_cb_t *read_cb,
void *read_cbopaque);
#endif /* JEMALLOC_INTERNAL_BUF_WRITER_H */
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment