Commit b8beda3c authored by Oran Agra's avatar Oran Agra
Browse files

Merge commit jemalloc 5.3.0

parents d659c734 6d23d3ac
#ifndef JEMALLOC_INTERNAL_HPA_H
#define JEMALLOC_INTERNAL_HPA_H
#include "jemalloc/internal/exp_grow.h"
#include "jemalloc/internal/hpa_hooks.h"
#include "jemalloc/internal/hpa_opts.h"
#include "jemalloc/internal/pai.h"
#include "jemalloc/internal/psset.h"
typedef struct hpa_central_s hpa_central_t;
struct hpa_central_s {
/*
* The mutex guarding most of the operations on the central data
* structure.
*/
malloc_mutex_t mtx;
/*
* Guards expansion of eden. We separate this from the regular mutex so
* that cheaper operations can still continue while we're doing the OS
* call.
*/
malloc_mutex_t grow_mtx;
/*
* Either NULL (if empty), or some integer multiple of a
* hugepage-aligned number of hugepages. We carve them off one at a
* time to satisfy new pageslab requests.
*
* Guarded by grow_mtx.
*/
void *eden;
size_t eden_len;
/* Source for metadata. */
base_t *base;
/* Number of grow operations done on this hpa_central_t. */
uint64_t age_counter;
/* The HPA hooks. */
hpa_hooks_t hooks;
};
typedef struct hpa_shard_nonderived_stats_s hpa_shard_nonderived_stats_t;
struct hpa_shard_nonderived_stats_s {
/*
* The number of times we've purged within a hugepage.
*
* Guarded by mtx.
*/
uint64_t npurge_passes;
/*
* The number of individual purge calls we perform (which should always
* be bigger than npurge_passes, since each pass purges at least one
* extent within a hugepage.
*
* Guarded by mtx.
*/
uint64_t npurges;
/*
* The number of times we've hugified a pageslab.
*
* Guarded by mtx.
*/
uint64_t nhugifies;
/*
* The number of times we've dehugified a pageslab.
*
* Guarded by mtx.
*/
uint64_t ndehugifies;
};
/* Completely derived; only used by CTL. */
typedef struct hpa_shard_stats_s hpa_shard_stats_t;
struct hpa_shard_stats_s {
psset_stats_t psset_stats;
hpa_shard_nonderived_stats_t nonderived_stats;
};
typedef struct hpa_shard_s hpa_shard_t;
struct hpa_shard_s {
/*
* pai must be the first member; we cast from a pointer to it to a
* pointer to the hpa_shard_t.
*/
pai_t pai;
/* The central allocator we get our hugepages from. */
hpa_central_t *central;
/* Protects most of this shard's state. */
malloc_mutex_t mtx;
/*
* Guards the shard's access to the central allocator (preventing
* multiple threads operating on this shard from accessing the central
* allocator).
*/
malloc_mutex_t grow_mtx;
/* The base metadata allocator. */
base_t *base;
/*
* This edata cache is the one we use when allocating a small extent
* from a pageslab. The pageslab itself comes from the centralized
* allocator, and so will use its edata_cache.
*/
edata_cache_fast_t ecf;
psset_t psset;
/*
* How many grow operations have occurred.
*
* Guarded by grow_mtx.
*/
uint64_t age_counter;
/* The arena ind we're associated with. */
unsigned ind;
/*
* Our emap. This is just a cache of the emap pointer in the associated
* hpa_central.
*/
emap_t *emap;
/* The configuration choices for this hpa shard. */
hpa_shard_opts_t opts;
/*
* How many pages have we started but not yet finished purging in this
* hpa shard.
*/
size_t npending_purge;
/*
* Those stats which are copied directly into the CTL-centric hpa shard
* stats.
*/
hpa_shard_nonderived_stats_t stats;
/*
* Last time we performed purge on this shard.
*/
nstime_t last_purge;
};
/*
* Whether or not the HPA can be used given the current configuration. This is
* is not necessarily a guarantee that it backs its allocations by hugepages,
* just that it can function properly given the system it's running on.
*/
bool hpa_supported();
bool hpa_central_init(hpa_central_t *central, base_t *base, const hpa_hooks_t *hooks);
bool hpa_shard_init(hpa_shard_t *shard, hpa_central_t *central, emap_t *emap,
base_t *base, edata_cache_t *edata_cache, unsigned ind,
const hpa_shard_opts_t *opts);
void hpa_shard_stats_accum(hpa_shard_stats_t *dst, hpa_shard_stats_t *src);
void hpa_shard_stats_merge(tsdn_t *tsdn, hpa_shard_t *shard,
hpa_shard_stats_t *dst);
/*
* Notify the shard that we won't use it for allocations much longer. Due to
* the possibility of races, we don't actually prevent allocations; just flush
* and disable the embedded edata_cache_small.
*/
void hpa_shard_disable(tsdn_t *tsdn, hpa_shard_t *shard);
void hpa_shard_destroy(tsdn_t *tsdn, hpa_shard_t *shard);
void hpa_shard_set_deferral_allowed(tsdn_t *tsdn, hpa_shard_t *shard,
bool deferral_allowed);
void hpa_shard_do_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard);
/*
* We share the fork ordering with the PA and arena prefork handling; that's why
* these are 3 and 4 rather than 0 and 1.
*/
void hpa_shard_prefork3(tsdn_t *tsdn, hpa_shard_t *shard);
void hpa_shard_prefork4(tsdn_t *tsdn, hpa_shard_t *shard);
void hpa_shard_postfork_parent(tsdn_t *tsdn, hpa_shard_t *shard);
void hpa_shard_postfork_child(tsdn_t *tsdn, hpa_shard_t *shard);
#endif /* JEMALLOC_INTERNAL_HPA_H */
#ifndef JEMALLOC_INTERNAL_HPA_HOOKS_H
#define JEMALLOC_INTERNAL_HPA_HOOKS_H
typedef struct hpa_hooks_s hpa_hooks_t;
struct hpa_hooks_s {
void *(*map)(size_t size);
void (*unmap)(void *ptr, size_t size);
void (*purge)(void *ptr, size_t size);
void (*hugify)(void *ptr, size_t size);
void (*dehugify)(void *ptr, size_t size);
void (*curtime)(nstime_t *r_time, bool first_reading);
uint64_t (*ms_since)(nstime_t *r_time);
};
extern hpa_hooks_t hpa_hooks_default;
#endif /* JEMALLOC_INTERNAL_HPA_HOOKS_H */
#ifndef JEMALLOC_INTERNAL_HPA_OPTS_H
#define JEMALLOC_INTERNAL_HPA_OPTS_H
#include "jemalloc/internal/fxp.h"
/*
* This file is morally part of hpa.h, but is split out for header-ordering
* reasons.
*/
typedef struct hpa_shard_opts_s hpa_shard_opts_t;
struct hpa_shard_opts_s {
/*
* The largest size we'll allocate out of the shard. For those
* allocations refused, the caller (in practice, the PA module) will
* fall back to the more general (for now) PAC, which can always handle
* any allocation request.
*/
size_t slab_max_alloc;
/*
* When the number of active bytes in a hugepage is >=
* hugification_threshold, we force hugify it.
*/
size_t hugification_threshold;
/*
* The HPA purges whenever the number of pages exceeds dirty_mult *
* active_pages. This may be set to (fxp_t)-1 to disable purging.
*/
fxp_t dirty_mult;
/*
* Whether or not the PAI methods are allowed to defer work to a
* subsequent hpa_shard_do_deferred_work() call. Practically, this
* corresponds to background threads being enabled. We track this
* ourselves for encapsulation purposes.
*/
bool deferral_allowed;
/*
* How long a hugepage has to be a hugification candidate before it will
* actually get hugified.
*/
uint64_t hugify_delay_ms;
/*
* Minimum amount of time between purges.
*/
uint64_t min_purge_interval_ms;
};
#define HPA_SHARD_OPTS_DEFAULT { \
/* slab_max_alloc */ \
64 * 1024, \
/* hugification_threshold */ \
HUGEPAGE * 95 / 100, \
/* dirty_mult */ \
FXP_INIT_PERCENT(25), \
/* \
* deferral_allowed \
* \
* Really, this is always set by the arena during creation \
* or by an hpa_shard_set_deferral_allowed call, so the value \
* we put here doesn't matter. \
*/ \
false, \
/* hugify_delay_ms */ \
10 * 1000, \
/* min_purge_interval_ms */ \
5 * 1000 \
}
#endif /* JEMALLOC_INTERNAL_HPA_OPTS_H */
#ifndef JEMALLOC_INTERNAL_HPDATA_H
#define JEMALLOC_INTERNAL_HPDATA_H
#include "jemalloc/internal/fb.h"
#include "jemalloc/internal/ph.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/typed_list.h"
/*
* The metadata representation we use for extents in hugepages. While the PAC
* uses the edata_t to represent both active and inactive extents, the HP only
* uses the edata_t for active ones; instead, inactive extent state is tracked
* within hpdata associated with the enclosing hugepage-sized, hugepage-aligned
* region of virtual address space.
*
* An hpdata need not be "truly" backed by a hugepage (which is not necessarily
* an observable property of any given region of address space). It's just
* hugepage-sized and hugepage-aligned; it's *potentially* huge.
*/
typedef struct hpdata_s hpdata_t;
ph_structs(hpdata_age_heap, hpdata_t);
struct hpdata_s {
/*
* We likewise follow the edata convention of mangling names and forcing
* the use of accessors -- this lets us add some consistency checks on
* access.
*/
/*
* The address of the hugepage in question. This can't be named h_addr,
* since that conflicts with a macro defined in Windows headers.
*/
void *h_address;
/* Its age (measured in psset operations). */
uint64_t h_age;
/* Whether or not we think the hugepage is mapped that way by the OS. */
bool h_huge;
/*
* For some properties, we keep parallel sets of bools; h_foo_allowed
* and h_in_psset_foo_container. This is a decoupling mechanism to
* avoid bothering the hpa (which manages policies) from the psset
* (which is the mechanism used to enforce those policies). This allows
* all the container management logic to live in one place, without the
* HPA needing to know or care how that happens.
*/
/*
* Whether or not the hpdata is allowed to be used to serve allocations,
* and whether or not the psset is currently tracking it as such.
*/
bool h_alloc_allowed;
bool h_in_psset_alloc_container;
/*
* The same, but with purging. There's no corresponding
* h_in_psset_purge_container, because the psset (currently) always
* removes hpdatas from their containers during updates (to implement
* LRU for purging).
*/
bool h_purge_allowed;
/* And with hugifying. */
bool h_hugify_allowed;
/* When we became a hugification candidate. */
nstime_t h_time_hugify_allowed;
bool h_in_psset_hugify_container;
/* Whether or not a purge or hugify is currently happening. */
bool h_mid_purge;
bool h_mid_hugify;
/*
* Whether or not the hpdata is being updated in the psset (i.e. if
* there has been a psset_update_begin call issued without a matching
* psset_update_end call). Eventually this will expand to other types
* of updates.
*/
bool h_updating;
/* Whether or not the hpdata is in a psset. */
bool h_in_psset;
union {
/* When nonempty (and also nonfull), used by the psset bins. */
hpdata_age_heap_link_t age_link;
/*
* When empty (or not corresponding to any hugepage), list
* linkage.
*/
ql_elm(hpdata_t) ql_link_empty;
};
/*
* Linkage for the psset to track candidates for purging and hugifying.
*/
ql_elm(hpdata_t) ql_link_purge;
ql_elm(hpdata_t) ql_link_hugify;
/* The length of the largest contiguous sequence of inactive pages. */
size_t h_longest_free_range;
/* Number of active pages. */
size_t h_nactive;
/* A bitmap with bits set in the active pages. */
fb_group_t active_pages[FB_NGROUPS(HUGEPAGE_PAGES)];
/*
* Number of dirty or active pages, and a bitmap tracking them. One
* way to think of this is as which pages are dirty from the OS's
* perspective.
*/
size_t h_ntouched;
/* The touched pages (using the same definition as above). */
fb_group_t touched_pages[FB_NGROUPS(HUGEPAGE_PAGES)];
};
TYPED_LIST(hpdata_empty_list, hpdata_t, ql_link_empty)
TYPED_LIST(hpdata_purge_list, hpdata_t, ql_link_purge)
TYPED_LIST(hpdata_hugify_list, hpdata_t, ql_link_hugify)
ph_proto(, hpdata_age_heap, hpdata_t);
static inline void *
hpdata_addr_get(const hpdata_t *hpdata) {
return hpdata->h_address;
}
static inline void
hpdata_addr_set(hpdata_t *hpdata, void *addr) {
assert(HUGEPAGE_ADDR2BASE(addr) == addr);
hpdata->h_address = addr;
}
static inline uint64_t
hpdata_age_get(const hpdata_t *hpdata) {
return hpdata->h_age;
}
static inline void
hpdata_age_set(hpdata_t *hpdata, uint64_t age) {
hpdata->h_age = age;
}
static inline bool
hpdata_huge_get(const hpdata_t *hpdata) {
return hpdata->h_huge;
}
static inline bool
hpdata_alloc_allowed_get(const hpdata_t *hpdata) {
return hpdata->h_alloc_allowed;
}
static inline void
hpdata_alloc_allowed_set(hpdata_t *hpdata, bool alloc_allowed) {
hpdata->h_alloc_allowed = alloc_allowed;
}
static inline bool
hpdata_in_psset_alloc_container_get(const hpdata_t *hpdata) {
return hpdata->h_in_psset_alloc_container;
}
static inline void
hpdata_in_psset_alloc_container_set(hpdata_t *hpdata, bool in_container) {
assert(in_container != hpdata->h_in_psset_alloc_container);
hpdata->h_in_psset_alloc_container = in_container;
}
static inline bool
hpdata_purge_allowed_get(const hpdata_t *hpdata) {
return hpdata->h_purge_allowed;
}
static inline void
hpdata_purge_allowed_set(hpdata_t *hpdata, bool purge_allowed) {
assert(purge_allowed == false || !hpdata->h_mid_purge);
hpdata->h_purge_allowed = purge_allowed;
}
static inline bool
hpdata_hugify_allowed_get(const hpdata_t *hpdata) {
return hpdata->h_hugify_allowed;
}
static inline void
hpdata_allow_hugify(hpdata_t *hpdata, nstime_t now) {
assert(!hpdata->h_mid_hugify);
hpdata->h_hugify_allowed = true;
hpdata->h_time_hugify_allowed = now;
}
static inline nstime_t
hpdata_time_hugify_allowed(hpdata_t *hpdata) {
return hpdata->h_time_hugify_allowed;
}
static inline void
hpdata_disallow_hugify(hpdata_t *hpdata) {
hpdata->h_hugify_allowed = false;
}
static inline bool
hpdata_in_psset_hugify_container_get(const hpdata_t *hpdata) {
return hpdata->h_in_psset_hugify_container;
}
static inline void
hpdata_in_psset_hugify_container_set(hpdata_t *hpdata, bool in_container) {
assert(in_container != hpdata->h_in_psset_hugify_container);
hpdata->h_in_psset_hugify_container = in_container;
}
static inline bool
hpdata_mid_purge_get(const hpdata_t *hpdata) {
return hpdata->h_mid_purge;
}
static inline void
hpdata_mid_purge_set(hpdata_t *hpdata, bool mid_purge) {
assert(mid_purge != hpdata->h_mid_purge);
hpdata->h_mid_purge = mid_purge;
}
static inline bool
hpdata_mid_hugify_get(const hpdata_t *hpdata) {
return hpdata->h_mid_hugify;
}
static inline void
hpdata_mid_hugify_set(hpdata_t *hpdata, bool mid_hugify) {
assert(mid_hugify != hpdata->h_mid_hugify);
hpdata->h_mid_hugify = mid_hugify;
}
static inline bool
hpdata_changing_state_get(const hpdata_t *hpdata) {
return hpdata->h_mid_purge || hpdata->h_mid_hugify;
}
static inline bool
hpdata_updating_get(const hpdata_t *hpdata) {
return hpdata->h_updating;
}
static inline void
hpdata_updating_set(hpdata_t *hpdata, bool updating) {
assert(updating != hpdata->h_updating);
hpdata->h_updating = updating;
}
static inline bool
hpdata_in_psset_get(const hpdata_t *hpdata) {
return hpdata->h_in_psset;
}
static inline void
hpdata_in_psset_set(hpdata_t *hpdata, bool in_psset) {
assert(in_psset != hpdata->h_in_psset);
hpdata->h_in_psset = in_psset;
}
static inline size_t
hpdata_longest_free_range_get(const hpdata_t *hpdata) {
return hpdata->h_longest_free_range;
}
static inline void
hpdata_longest_free_range_set(hpdata_t *hpdata, size_t longest_free_range) {
assert(longest_free_range <= HUGEPAGE_PAGES);
hpdata->h_longest_free_range = longest_free_range;
}
static inline size_t
hpdata_nactive_get(hpdata_t *hpdata) {
return hpdata->h_nactive;
}
static inline size_t
hpdata_ntouched_get(hpdata_t *hpdata) {
return hpdata->h_ntouched;
}
static inline size_t
hpdata_ndirty_get(hpdata_t *hpdata) {
return hpdata->h_ntouched - hpdata->h_nactive;
}
static inline size_t
hpdata_nretained_get(hpdata_t *hpdata) {
return HUGEPAGE_PAGES - hpdata->h_ntouched;
}
static inline void
hpdata_assert_empty(hpdata_t *hpdata) {
assert(fb_empty(hpdata->active_pages, HUGEPAGE_PAGES));
assert(hpdata->h_nactive == 0);
}
/*
* Only used in tests, and in hpdata_assert_consistent, below. Verifies some
* consistency properties of the hpdata (e.g. that cached counts of page stats
* match computed ones).
*/
static inline bool
hpdata_consistent(hpdata_t *hpdata) {
if(fb_urange_longest(hpdata->active_pages, HUGEPAGE_PAGES)
!= hpdata_longest_free_range_get(hpdata)) {
return false;
}
if (fb_scount(hpdata->active_pages, HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES)
!= hpdata->h_nactive) {
return false;
}
if (fb_scount(hpdata->touched_pages, HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES)
!= hpdata->h_ntouched) {
return false;
}
if (hpdata->h_ntouched < hpdata->h_nactive) {
return false;
}
if (hpdata->h_huge && hpdata->h_ntouched != HUGEPAGE_PAGES) {
return false;
}
if (hpdata_changing_state_get(hpdata)
&& ((hpdata->h_purge_allowed) || hpdata->h_hugify_allowed)) {
return false;
}
if (hpdata_hugify_allowed_get(hpdata)
!= hpdata_in_psset_hugify_container_get(hpdata)) {
return false;
}
return true;
}
static inline void
hpdata_assert_consistent(hpdata_t *hpdata) {
assert(hpdata_consistent(hpdata));
}
static inline bool
hpdata_empty(hpdata_t *hpdata) {
return hpdata->h_nactive == 0;
}
static inline bool
hpdata_full(hpdata_t *hpdata) {
return hpdata->h_nactive == HUGEPAGE_PAGES;
}
void hpdata_init(hpdata_t *hpdata, void *addr, uint64_t age);
/*
* Given an hpdata which can serve an allocation request, pick and reserve an
* offset within that allocation.
*/
void *hpdata_reserve_alloc(hpdata_t *hpdata, size_t sz);
void hpdata_unreserve(hpdata_t *hpdata, void *begin, size_t sz);
/*
* The hpdata_purge_prepare_t allows grabbing the metadata required to purge
* subranges of a hugepage while holding a lock, drop the lock during the actual
* purging of them, and reacquire it to update the metadata again.
*/
typedef struct hpdata_purge_state_s hpdata_purge_state_t;
struct hpdata_purge_state_s {
size_t npurged;
size_t ndirty_to_purge;
fb_group_t to_purge[FB_NGROUPS(HUGEPAGE_PAGES)];
size_t next_purge_search_begin;
};
/*
* Initializes purge state. The access to hpdata must be externally
* synchronized with other hpdata_* calls.
*
* You can tell whether or not a thread is purging or hugifying a given hpdata
* via hpdata_changing_state_get(hpdata). Racing hugification or purging
* operations aren't allowed.
*
* Once you begin purging, you have to follow through and call hpdata_purge_next
* until you're done, and then end. Allocating out of an hpdata undergoing
* purging is not allowed.
*
* Returns the number of dirty pages that will be purged.
*/
size_t hpdata_purge_begin(hpdata_t *hpdata, hpdata_purge_state_t *purge_state);
/*
* If there are more extents to purge, sets *r_purge_addr and *r_purge_size to
* true, and returns true. Otherwise, returns false to indicate that we're
* done.
*
* This requires exclusive access to the purge state, but *not* to the hpdata.
* In particular, unreserve calls are allowed while purging (i.e. you can dalloc
* into one part of the hpdata while purging a different part).
*/
bool hpdata_purge_next(hpdata_t *hpdata, hpdata_purge_state_t *purge_state,
void **r_purge_addr, size_t *r_purge_size);
/*
* Updates the hpdata metadata after all purging is done. Needs external
* synchronization.
*/
void hpdata_purge_end(hpdata_t *hpdata, hpdata_purge_state_t *purge_state);
void hpdata_hugify(hpdata_t *hpdata);
void hpdata_dehugify(hpdata_t *hpdata);
#endif /* JEMALLOC_INTERNAL_HPDATA_H */
#ifndef JEMALLOC_INTERNAL_INSPECT_H
#define JEMALLOC_INTERNAL_INSPECT_H
/*
* This module contains the heap introspection capabilities. For now they are
* exposed purely through mallctl APIs in the experimental namespace, but this
* may change over time.
*/
/*
* The following two structs are for experimental purposes. See
* experimental_utilization_query_ctl and
* experimental_utilization_batch_query_ctl in src/ctl.c.
*/
typedef struct inspect_extent_util_stats_s inspect_extent_util_stats_t;
struct inspect_extent_util_stats_s {
size_t nfree;
size_t nregs;
size_t size;
};
typedef struct inspect_extent_util_stats_verbose_s
inspect_extent_util_stats_verbose_t;
struct inspect_extent_util_stats_verbose_s {
void *slabcur_addr;
size_t nfree;
size_t nregs;
size_t size;
size_t bin_nfree;
size_t bin_nregs;
};
void inspect_extent_util_stats_get(tsdn_t *tsdn, const void *ptr,
size_t *nfree, size_t *nregs, size_t *size);
void inspect_extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
size_t *nfree, size_t *nregs, size_t *size,
size_t *bin_nfree, size_t *bin_nregs, void **slabcur_addr);
#endif /* JEMALLOC_INTERNAL_INSPECT_H */
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#ifdef _WIN32 #ifdef _WIN32
# include <windows.h> # include <windows.h>
# include "msvc_compat/windows_extra.h" # include "msvc_compat/windows_extra.h"
# include "msvc_compat/strings.h"
# ifdef _WIN64 # ifdef _WIN64
# if LG_VADDR <= 32 # if LG_VADDR <= 32
# error Generate the headers using x64 vcargs # error Generate the headers using x64 vcargs
...@@ -31,8 +32,12 @@ ...@@ -31,8 +32,12 @@
# include <sys/uio.h> # include <sys/uio.h>
# endif # endif
# include <pthread.h> # include <pthread.h>
# ifdef __FreeBSD__ # if defined(__FreeBSD__) || defined(__DragonFly__)
# include <pthread_np.h> # include <pthread_np.h>
# include <sched.h>
# if defined(__FreeBSD__)
# define cpu_set_t cpuset_t
# endif
# endif # endif
# include <signal.h> # include <signal.h>
# ifdef JEMALLOC_OS_UNFAIR_LOCK # ifdef JEMALLOC_OS_UNFAIR_LOCK
...@@ -91,4 +96,13 @@ isblank(int c) { ...@@ -91,4 +96,13 @@ isblank(int c) {
#endif #endif
#include <fcntl.h> #include <fcntl.h>
/*
* The Win32 midl compiler has #define small char; we don't use midl, but
* "small" is a nice identifier to have available when talking about size
* classes.
*/
#ifdef small
# undef small
#endif
#endif /* JEMALLOC_INTERNAL_H */ #endif /* JEMALLOC_INTERNAL_H */
...@@ -85,6 +85,12 @@ ...@@ -85,6 +85,12 @@
/* Defined if pthread_setname_np(3) is available. */ /* Defined if pthread_setname_np(3) is available. */
#undef JEMALLOC_HAVE_PTHREAD_SETNAME_NP #undef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
/* Defined if pthread_getname_np(3) is available. */
#undef JEMALLOC_HAVE_PTHREAD_GETNAME_NP
/* Defined if pthread_get_name_np(3) is available. */
#undef JEMALLOC_HAVE_PTHREAD_GET_NAME_NP
/* /*
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available. * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
*/ */
...@@ -100,6 +106,11 @@ ...@@ -100,6 +106,11 @@
*/ */
#undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
/*
* Defined if clock_gettime(CLOCK_REALTIME, ...) is available.
*/
#undef JEMALLOC_HAVE_CLOCK_REALTIME
/* /*
* Defined if _malloc_thread_cleanup() exists. At least in the case of * Defined if _malloc_thread_cleanup() exists. At least in the case of
* FreeBSD, pthread_key_create() allocates, which if used during malloc * FreeBSD, pthread_key_create() allocates, which if used during malloc
...@@ -162,6 +173,9 @@ ...@@ -162,6 +173,9 @@
/* Support utrace(2)-based tracing. */ /* Support utrace(2)-based tracing. */
#undef JEMALLOC_UTRACE #undef JEMALLOC_UTRACE
/* Support utrace(2)-based tracing (label based signature). */
#undef JEMALLOC_UTRACE_LABEL
/* Support optional abort() on OOM. */ /* Support optional abort() on OOM. */
#undef JEMALLOC_XMALLOC #undef JEMALLOC_XMALLOC
...@@ -177,6 +191,9 @@ ...@@ -177,6 +191,9 @@
/* One page is 2^LG_PAGE bytes. */ /* One page is 2^LG_PAGE bytes. */
#undef LG_PAGE #undef LG_PAGE
/* Maximum number of regions in a slab. */
#undef CONFIG_LG_SLAB_MAXREGS
/* /*
* One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the * One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
* system does not explicitly support huge pages; system calls that require * system does not explicitly support huge pages; system calls that require
...@@ -290,12 +307,41 @@ ...@@ -290,12 +307,41 @@
*/ */
#undef JEMALLOC_MADVISE_DONTDUMP #undef JEMALLOC_MADVISE_DONTDUMP
/*
* Defined if MADV_[NO]CORE is supported as an argument to madvise.
*/
#undef JEMALLOC_MADVISE_NOCORE
/* Defined if mprotect(2) is available. */
#undef JEMALLOC_HAVE_MPROTECT
/* /*
* Defined if transparent huge pages (THPs) are supported via the * Defined if transparent huge pages (THPs) are supported via the
* MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled. * MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
*/ */
#undef JEMALLOC_THP #undef JEMALLOC_THP
/* Defined if posix_madvise is available. */
#undef JEMALLOC_HAVE_POSIX_MADVISE
/*
* Method for purging unused pages using posix_madvise.
*
* posix_madvise(..., POSIX_MADV_DONTNEED)
*/
#undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED
#undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS
/*
* Defined if memcntl page admin call is supported
*/
#undef JEMALLOC_HAVE_MEMCNTL
/*
* Defined if malloc_size is supported
*/
#undef JEMALLOC_HAVE_MALLOC_SIZE
/* Define if operating system has alloca.h header. */ /* Define if operating system has alloca.h header. */
#undef JEMALLOC_HAS_ALLOCA_H #undef JEMALLOC_HAS_ALLOCA_H
...@@ -363,4 +409,19 @@ ...@@ -363,4 +409,19 @@
/* Performs additional safety checks when defined. */ /* Performs additional safety checks when defined. */
#undef JEMALLOC_OPT_SAFETY_CHECKS #undef JEMALLOC_OPT_SAFETY_CHECKS
/* Is C++ support being built? */
#undef JEMALLOC_ENABLE_CXX
/* Performs additional size checks when defined. */
#undef JEMALLOC_OPT_SIZE_CHECKS
/* Allows sampled junk and stash for checking use-after-free when defined. */
#undef JEMALLOC_UAF_DETECTION
/* Darwin VM_MAKE_TAG support */
#undef JEMALLOC_HAVE_VM_MAKE_TAG
/* If defined, realloc(ptr, 0) defaults to "free" instead of "alloc". */
#undef JEMALLOC_ZERO_REALLOC_DEFAULT_FREE
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */ #endif /* JEMALLOC_INTERNAL_DEFS_H_ */
...@@ -2,7 +2,10 @@ ...@@ -2,7 +2,10 @@
#define JEMALLOC_INTERNAL_EXTERNS_H #define JEMALLOC_INTERNAL_EXTERNS_H
#include "jemalloc/internal/atomic.h" #include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/hpa_opts.h"
#include "jemalloc/internal/sec_opts.h"
#include "jemalloc/internal/tsd_types.h" #include "jemalloc/internal/tsd_types.h"
#include "jemalloc/internal/nstime.h"
/* TSD checks this to set thread local slow state accordingly. */ /* TSD checks this to set thread local slow state accordingly. */
extern bool malloc_slow; extern bool malloc_slow;
...@@ -10,14 +13,30 @@ extern bool malloc_slow; ...@@ -10,14 +13,30 @@ extern bool malloc_slow;
/* Run-time options. */ /* Run-time options. */
extern bool opt_abort; extern bool opt_abort;
extern bool opt_abort_conf; extern bool opt_abort_conf;
extern bool opt_trust_madvise;
extern bool opt_confirm_conf; extern bool opt_confirm_conf;
extern bool opt_hpa;
extern hpa_shard_opts_t opt_hpa_opts;
extern sec_opts_t opt_hpa_sec_opts;
extern const char *opt_junk; extern const char *opt_junk;
extern bool opt_junk_alloc; extern bool opt_junk_alloc;
extern bool opt_junk_free; extern bool opt_junk_free;
extern void (*junk_free_callback)(void *ptr, size_t size);
extern void (*junk_alloc_callback)(void *ptr, size_t size);
extern bool opt_utrace; extern bool opt_utrace;
extern bool opt_xmalloc; extern bool opt_xmalloc;
extern bool opt_experimental_infallible_new;
extern bool opt_zero; extern bool opt_zero;
extern unsigned opt_narenas; extern unsigned opt_narenas;
extern zero_realloc_action_t opt_zero_realloc_action;
extern malloc_init_t malloc_init_state;
extern const char *zero_realloc_mode_names[];
extern atomic_zu_t zero_realloc_count;
extern bool opt_cache_oblivious;
/* Escape free-fastpath when ptr & mask == 0 (for sanitization purpose). */
extern uintptr_t san_cache_bin_nonfast_mask;
/* Number of CPUs. */ /* Number of CPUs. */
extern unsigned ncpus; extern unsigned ncpus;
...@@ -41,17 +60,16 @@ void *bootstrap_calloc(size_t num, size_t size); ...@@ -41,17 +60,16 @@ void *bootstrap_calloc(size_t num, size_t size);
void bootstrap_free(void *ptr); void bootstrap_free(void *ptr);
void arena_set(unsigned ind, arena_t *arena); void arena_set(unsigned ind, arena_t *arena);
unsigned narenas_total_get(void); unsigned narenas_total_get(void);
arena_t *arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks); arena_t *arena_init(tsdn_t *tsdn, unsigned ind, const arena_config_t *config);
arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind);
arena_t *arena_choose_hard(tsd_t *tsd, bool internal); arena_t *arena_choose_hard(tsd_t *tsd, bool internal);
void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind); void arena_migrate(tsd_t *tsd, arena_t *oldarena, arena_t *newarena);
void iarena_cleanup(tsd_t *tsd); void iarena_cleanup(tsd_t *tsd);
void arena_cleanup(tsd_t *tsd); void arena_cleanup(tsd_t *tsd);
void arenas_tdata_cleanup(tsd_t *tsd); size_t batch_alloc(void **ptrs, size_t num, size_t size, int flags);
void jemalloc_prefork(void); void jemalloc_prefork(void);
void jemalloc_postfork_parent(void); void jemalloc_postfork_parent(void);
void jemalloc_postfork_child(void); void jemalloc_postfork_child(void);
bool malloc_initialized(void);
void je_sdallocx_noflags(void *ptr, size_t size); void je_sdallocx_noflags(void *ptr, size_t size);
void *malloc_default(size_t size);
#endif /* JEMALLOC_INTERNAL_EXTERNS_H */ #endif /* JEMALLOC_INTERNAL_EXTERNS_H */
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
* structs, externs, and inlines), and included each header file multiple times * structs, externs, and inlines), and included each header file multiple times
* in this file, picking out the portion we want on each pass using the * in this file, picking out the portion we want on each pass using the
* following #defines: * following #defines:
* JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data * JEMALLOC_H_TYPES : Preprocessor-defined constants and pseudo-opaque data
* types. * types.
* JEMALLOC_H_STRUCTS : Data structures. * JEMALLOC_H_STRUCTS : Data structures.
* JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes. * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
...@@ -40,8 +40,6 @@ ...@@ -40,8 +40,6 @@
/* TYPES */ /* TYPES */
/******************************************************************************/ /******************************************************************************/
#include "jemalloc/internal/extent_types.h"
#include "jemalloc/internal/base_types.h"
#include "jemalloc/internal/arena_types.h" #include "jemalloc/internal/arena_types.h"
#include "jemalloc/internal/tcache_types.h" #include "jemalloc/internal/tcache_types.h"
#include "jemalloc/internal/prof_types.h" #include "jemalloc/internal/prof_types.h"
...@@ -50,11 +48,8 @@ ...@@ -50,11 +48,8 @@
/* STRUCTS */ /* STRUCTS */
/******************************************************************************/ /******************************************************************************/
#include "jemalloc/internal/arena_structs_a.h"
#include "jemalloc/internal/extent_structs.h"
#include "jemalloc/internal/base_structs.h"
#include "jemalloc/internal/prof_structs.h" #include "jemalloc/internal/prof_structs.h"
#include "jemalloc/internal/arena_structs_b.h" #include "jemalloc/internal/arena_structs.h"
#include "jemalloc/internal/tcache_structs.h" #include "jemalloc/internal/tcache_structs.h"
#include "jemalloc/internal/background_thread_structs.h" #include "jemalloc/internal/background_thread_structs.h"
...@@ -63,8 +58,6 @@ ...@@ -63,8 +58,6 @@
/******************************************************************************/ /******************************************************************************/
#include "jemalloc/internal/jemalloc_internal_externs.h" #include "jemalloc/internal/jemalloc_internal_externs.h"
#include "jemalloc/internal/extent_externs.h"
#include "jemalloc/internal/base_externs.h"
#include "jemalloc/internal/arena_externs.h" #include "jemalloc/internal/arena_externs.h"
#include "jemalloc/internal/large_externs.h" #include "jemalloc/internal/large_externs.h"
#include "jemalloc/internal/tcache_externs.h" #include "jemalloc/internal/tcache_externs.h"
...@@ -76,19 +69,16 @@ ...@@ -76,19 +69,16 @@
/******************************************************************************/ /******************************************************************************/
#include "jemalloc/internal/jemalloc_internal_inlines_a.h" #include "jemalloc/internal/jemalloc_internal_inlines_a.h"
#include "jemalloc/internal/base_inlines.h"
/* /*
* Include portions of arena code interleaved with tcache code in order to * Include portions of arena code interleaved with tcache code in order to
* resolve circular dependencies. * resolve circular dependencies.
*/ */
#include "jemalloc/internal/prof_inlines_a.h"
#include "jemalloc/internal/arena_inlines_a.h" #include "jemalloc/internal/arena_inlines_a.h"
#include "jemalloc/internal/extent_inlines.h"
#include "jemalloc/internal/jemalloc_internal_inlines_b.h" #include "jemalloc/internal/jemalloc_internal_inlines_b.h"
#include "jemalloc/internal/tcache_inlines.h" #include "jemalloc/internal/tcache_inlines.h"
#include "jemalloc/internal/arena_inlines_b.h" #include "jemalloc/internal/arena_inlines_b.h"
#include "jemalloc/internal/jemalloc_internal_inlines_c.h" #include "jemalloc/internal/jemalloc_internal_inlines_c.h"
#include "jemalloc/internal/prof_inlines_b.h" #include "jemalloc/internal/prof_inlines.h"
#include "jemalloc/internal/background_thread_inlines.h" #include "jemalloc/internal/background_thread_inlines.h"
#endif /* JEMALLOC_INTERNAL_INCLUDES_H */ #endif /* JEMALLOC_INTERNAL_INCLUDES_H */
...@@ -56,31 +56,6 @@ percpu_arena_ind_limit(percpu_arena_mode_t mode) { ...@@ -56,31 +56,6 @@ percpu_arena_ind_limit(percpu_arena_mode_t mode) {
} }
} }
static inline arena_tdata_t *
arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing) {
arena_tdata_t *tdata;
arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
if (unlikely(arenas_tdata == NULL)) {
/* arenas_tdata hasn't been initialized yet. */
return arena_tdata_get_hard(tsd, ind);
}
if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) {
/*
* ind is invalid, cache is old (too small), or tdata to be
* initialized.
*/
return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) :
NULL);
}
tdata = &arenas_tdata[ind];
if (likely(tdata != NULL) || !refresh_if_missing) {
return tdata;
}
return arena_tdata_get_hard(tsd, ind);
}
static inline arena_t * static inline arena_t *
arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) { arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) {
arena_t *ret; arena_t *ret;
...@@ -90,36 +65,12 @@ arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) { ...@@ -90,36 +65,12 @@ arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) {
ret = (arena_t *)atomic_load_p(&arenas[ind], ATOMIC_ACQUIRE); ret = (arena_t *)atomic_load_p(&arenas[ind], ATOMIC_ACQUIRE);
if (unlikely(ret == NULL)) { if (unlikely(ret == NULL)) {
if (init_if_missing) { if (init_if_missing) {
ret = arena_init(tsdn, ind, ret = arena_init(tsdn, ind, &arena_config_default);
(extent_hooks_t *)&extent_hooks_default);
} }
} }
return ret; return ret;
} }
static inline ticker_t *
decay_ticker_get(tsd_t *tsd, unsigned ind) {
arena_tdata_t *tdata;
tdata = arena_tdata_get(tsd, ind, true);
if (unlikely(tdata == NULL)) {
return NULL;
}
return &tdata->decay_ticker;
}
JEMALLOC_ALWAYS_INLINE cache_bin_t *
tcache_small_bin_get(tcache_t *tcache, szind_t binind) {
assert(binind < SC_NBINS);
return &tcache->bins_small[binind];
}
JEMALLOC_ALWAYS_INLINE cache_bin_t *
tcache_large_bin_get(tcache_t *tcache, szind_t binind) {
assert(binind >= SC_NBINS &&binind < nhbins);
return &tcache->bins_large[binind - SC_NBINS];
}
JEMALLOC_ALWAYS_INLINE bool JEMALLOC_ALWAYS_INLINE bool
tcache_available(tsd_t *tsd) { tcache_available(tsd_t *tsd) {
/* /*
...@@ -129,9 +80,9 @@ tcache_available(tsd_t *tsd) { ...@@ -129,9 +80,9 @@ tcache_available(tsd_t *tsd) {
*/ */
if (likely(tsd_tcache_enabled_get(tsd))) { if (likely(tsd_tcache_enabled_get(tsd))) {
/* Associated arena == NULL implies tcache init in progress. */ /* Associated arena == NULL implies tcache init in progress. */
assert(tsd_tcachep_get(tsd)->arena == NULL || if (config_debug && tsd_tcache_slowp_get(tsd)->arena != NULL) {
tcache_small_bin_get(tsd_tcachep_get(tsd), 0)->avail != tcache_assert_initialized(tsd_tcachep_get(tsd));
NULL); }
return true; return true;
} }
...@@ -147,28 +98,25 @@ tcache_get(tsd_t *tsd) { ...@@ -147,28 +98,25 @@ tcache_get(tsd_t *tsd) {
return tsd_tcachep_get(tsd); return tsd_tcachep_get(tsd);
} }
JEMALLOC_ALWAYS_INLINE tcache_slow_t *
tcache_slow_get(tsd_t *tsd) {
if (!tcache_available(tsd)) {
return NULL;
}
return tsd_tcache_slowp_get(tsd);
}
static inline void static inline void
pre_reentrancy(tsd_t *tsd, arena_t *arena) { pre_reentrancy(tsd_t *tsd, arena_t *arena) {
/* arena is the current context. Reentry from a0 is not allowed. */ /* arena is the current context. Reentry from a0 is not allowed. */
assert(arena != arena_get(tsd_tsdn(tsd), 0, false)); assert(arena != arena_get(tsd_tsdn(tsd), 0, false));
tsd_pre_reentrancy_raw(tsd);
bool fast = tsd_fast(tsd);
assert(tsd_reentrancy_level_get(tsd) < INT8_MAX);
++*tsd_reentrancy_levelp_get(tsd);
if (fast) {
/* Prepare slow path for reentrancy. */
tsd_slow_update(tsd);
assert(tsd_state_get(tsd) == tsd_state_nominal_slow);
}
} }
static inline void static inline void
post_reentrancy(tsd_t *tsd) { post_reentrancy(tsd_t *tsd) {
int8_t *reentrancy_level = tsd_reentrancy_levelp_get(tsd); tsd_post_reentrancy_raw(tsd);
assert(*reentrancy_level > 0);
if (--*reentrancy_level == 0) {
tsd_slow_update(tsd);
}
} }
#endif /* JEMALLOC_INTERNAL_INLINES_A_H */ #endif /* JEMALLOC_INTERNAL_INLINES_A_H */
#ifndef JEMALLOC_INTERNAL_INLINES_B_H #ifndef JEMALLOC_INTERNAL_INLINES_B_H
#define JEMALLOC_INTERNAL_INLINES_B_H #define JEMALLOC_INTERNAL_INLINES_B_H
#include "jemalloc/internal/rtree.h" #include "jemalloc/internal/extent.h"
static inline void
percpu_arena_update(tsd_t *tsd, unsigned cpu) {
assert(have_percpu_arena);
arena_t *oldarena = tsd_arena_get(tsd);
assert(oldarena != NULL);
unsigned oldind = arena_ind_get(oldarena);
if (oldind != cpu) {
unsigned newind = cpu;
arena_t *newarena = arena_get(tsd_tsdn(tsd), newind, true);
assert(newarena != NULL);
/* Set new arena/tcache associations. */
arena_migrate(tsd, oldarena, newarena);
tcache_t *tcache = tcache_get(tsd);
if (tcache != NULL) {
tcache_slow_t *tcache_slow = tsd_tcache_slowp_get(tsd);
tcache_arena_reassociate(tsd_tsdn(tsd), tcache_slow,
tcache, newarena);
}
}
}
/* Choose an arena based on a per-thread value. */ /* Choose an arena based on a per-thread value. */
static inline arena_t * static inline arena_t *
...@@ -22,18 +46,19 @@ arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) { ...@@ -22,18 +46,19 @@ arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) {
ret = arena_choose_hard(tsd, internal); ret = arena_choose_hard(tsd, internal);
assert(ret); assert(ret);
if (tcache_available(tsd)) { if (tcache_available(tsd)) {
tcache_t *tcache = tcache_get(tsd); tcache_slow_t *tcache_slow = tsd_tcache_slowp_get(tsd);
if (tcache->arena != NULL) { tcache_t *tcache = tsd_tcachep_get(tsd);
/* See comments in tcache_data_init().*/ if (tcache_slow->arena != NULL) {
assert(tcache->arena == /* See comments in tsd_tcache_data_init().*/
assert(tcache_slow->arena ==
arena_get(tsd_tsdn(tsd), 0, false)); arena_get(tsd_tsdn(tsd), 0, false));
if (tcache->arena != ret) { if (tcache_slow->arena != ret) {
tcache_arena_reassociate(tsd_tsdn(tsd), tcache_arena_reassociate(tsd_tsdn(tsd),
tcache, ret); tcache_slow, tcache, ret);
} }
} else { } else {
tcache_arena_associate(tsd_tsdn(tsd), tcache, tcache_arena_associate(tsd_tsdn(tsd),
ret); tcache_slow, tcache, ret);
} }
} }
} }
...@@ -75,13 +100,4 @@ arena_is_auto(arena_t *arena) { ...@@ -75,13 +100,4 @@ arena_is_auto(arena_t *arena) {
return (arena_ind_get(arena) < manual_arena_base); return (arena_ind_get(arena) < manual_arena_base);
} }
JEMALLOC_ALWAYS_INLINE extent_t *
iealloc(tsdn_t *tsdn, const void *ptr) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
return rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true);
}
#endif /* JEMALLOC_INTERNAL_INLINES_B_H */ #endif /* JEMALLOC_INTERNAL_INLINES_B_H */
...@@ -3,7 +3,9 @@ ...@@ -3,7 +3,9 @@
#include "jemalloc/internal/hook.h" #include "jemalloc/internal/hook.h"
#include "jemalloc/internal/jemalloc_internal_types.h" #include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/log.h"
#include "jemalloc/internal/sz.h" #include "jemalloc/internal/sz.h"
#include "jemalloc/internal/thread_event.h"
#include "jemalloc/internal/witness.h" #include "jemalloc/internal/witness.h"
/* /*
...@@ -101,8 +103,8 @@ ivsalloc(tsdn_t *tsdn, const void *ptr) { ...@@ -101,8 +103,8 @@ ivsalloc(tsdn_t *tsdn, const void *ptr) {
} }
JEMALLOC_ALWAYS_INLINE void JEMALLOC_ALWAYS_INLINE void
idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, alloc_ctx_t *alloc_ctx, idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
bool is_internal, bool slow_path) { emap_alloc_ctx_t *alloc_ctx, bool is_internal, bool slow_path) {
assert(ptr != NULL); assert(ptr != NULL);
assert(!is_internal || tcache == NULL); assert(!is_internal || tcache == NULL);
assert(!is_internal || arena_is_auto(iaalloc(tsdn, ptr))); assert(!is_internal || arena_is_auto(iaalloc(tsdn, ptr)));
...@@ -125,7 +127,7 @@ idalloc(tsd_t *tsd, void *ptr) { ...@@ -125,7 +127,7 @@ idalloc(tsd_t *tsd, void *ptr) {
JEMALLOC_ALWAYS_INLINE void JEMALLOC_ALWAYS_INLINE void
isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
alloc_ctx_t *alloc_ctx, bool slow_path) { emap_alloc_ctx_t *alloc_ctx, bool slow_path) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0); WITNESS_RANK_CORE, 0);
arena_sdalloc(tsdn, ptr, size, tcache, alloc_ctx, slow_path); arena_sdalloc(tsdn, ptr, size, tcache, alloc_ctx, slow_path);
...@@ -219,6 +221,122 @@ ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra, ...@@ -219,6 +221,122 @@ ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra,
newsize); newsize);
} }
JEMALLOC_ALWAYS_INLINE void
fastpath_success_finish(tsd_t *tsd, uint64_t allocated_after,
cache_bin_t *bin, void *ret) {
thread_allocated_set(tsd, allocated_after);
if (config_stats) {
bin->tstats.nrequests++;
}
LOG("core.malloc.exit", "result: %p", ret);
}
JEMALLOC_ALWAYS_INLINE bool
malloc_initialized(void) {
return (malloc_init_state == malloc_init_initialized);
}
/*
* malloc() fastpath. Included here so that we can inline it into operator new;
* function call overhead there is non-negligible as a fraction of total CPU in
* allocation-heavy C++ programs. We take the fallback alloc to allow malloc
* (which can return NULL) to differ in its behavior from operator new (which
* can't). It matches the signature of malloc / operator new so that we can
* tail-call the fallback allocator, allowing us to avoid setting up the call
* frame in the common case.
*
* Fastpath assumes size <= SC_LOOKUP_MAXCLASS, and that we hit
* tcache. If either of these is false, we tail-call to the slowpath,
* malloc_default(). Tail-calling is used to avoid any caller-saved
* registers.
*
* fastpath supports ticker and profiling, both of which will also
* tail-call to the slowpath if they fire.
*/
JEMALLOC_ALWAYS_INLINE void *
imalloc_fastpath(size_t size, void *(fallback_alloc)(size_t)) {
LOG("core.malloc.entry", "size: %zu", size);
if (tsd_get_allocates() && unlikely(!malloc_initialized())) {
return fallback_alloc(size);
}
tsd_t *tsd = tsd_get(false);
if (unlikely((size > SC_LOOKUP_MAXCLASS) || tsd == NULL)) {
return fallback_alloc(size);
}
/*
* The code below till the branch checking the next_event threshold may
* execute before malloc_init(), in which case the threshold is 0 to
* trigger slow path and initialization.
*
* Note that when uninitialized, only the fast-path variants of the sz /
* tsd facilities may be called.
*/
szind_t ind;
/*
* The thread_allocated counter in tsd serves as a general purpose
* accumulator for bytes of allocation to trigger different types of
* events. usize is always needed to advance thread_allocated, though
* it's not always needed in the core allocation logic.
*/
size_t usize;
sz_size2index_usize_fastpath(size, &ind, &usize);
/* Fast path relies on size being a bin. */
assert(ind < SC_NBINS);
assert((SC_LOOKUP_MAXCLASS < SC_SMALL_MAXCLASS) &&
(size <= SC_SMALL_MAXCLASS));
uint64_t allocated, threshold;
te_malloc_fastpath_ctx(tsd, &allocated, &threshold);
uint64_t allocated_after = allocated + usize;
/*
* The ind and usize might be uninitialized (or partially) before
* malloc_init(). The assertions check for: 1) full correctness (usize
* & ind) when initialized; and 2) guaranteed slow-path (threshold == 0)
* when !initialized.
*/
if (!malloc_initialized()) {
assert(threshold == 0);
} else {
assert(ind == sz_size2index(size));
assert(usize > 0 && usize == sz_index2size(ind));
}
/*
* Check for events and tsd non-nominal (fast_threshold will be set to
* 0) in a single branch.
*/
if (unlikely(allocated_after >= threshold)) {
return fallback_alloc(size);
}
assert(tsd_fast(tsd));
tcache_t *tcache = tsd_tcachep_get(tsd);
assert(tcache == tcache_get(tsd));
cache_bin_t *bin = &tcache->bins[ind];
bool tcache_success;
void *ret;
/*
* We split up the code this way so that redundant low-water
* computation doesn't happen on the (more common) case in which we
* don't touch the low water mark. The compiler won't do this
* duplication on its own.
*/
ret = cache_bin_alloc_easy(bin, &tcache_success);
if (tcache_success) {
fastpath_success_finish(tsd, allocated_after, bin, ret);
return ret;
}
ret = cache_bin_alloc(bin, &tcache_success);
if (tcache_success) {
fastpath_success_finish(tsd, allocated_after, bin, ret);
return ret;
}
return fallback_alloc(size);
}
JEMALLOC_ALWAYS_INLINE int JEMALLOC_ALWAYS_INLINE int
iget_defrag_hint(tsdn_t *tsdn, void* ptr) { iget_defrag_hint(tsdn_t *tsdn, void* ptr) {
int defrag = 0; int defrag = 0;
......
...@@ -4,7 +4,11 @@ ...@@ -4,7 +4,11 @@
#ifdef JEMALLOC_DEBUG #ifdef JEMALLOC_DEBUG
# define JEMALLOC_ALWAYS_INLINE static inline # define JEMALLOC_ALWAYS_INLINE static inline
#else #else
# ifdef _MSC_VER
# define JEMALLOC_ALWAYS_INLINE static __forceinline
# else
# define JEMALLOC_ALWAYS_INLINE JEMALLOC_ATTR(always_inline) static inline # define JEMALLOC_ALWAYS_INLINE JEMALLOC_ATTR(always_inline) static inline
# endif
#endif #endif
#ifdef _MSC_VER #ifdef _MSC_VER
# define inline _inline # define inline _inline
...@@ -40,13 +44,6 @@ ...@@ -40,13 +44,6 @@
#define JEMALLOC_VA_ARGS_HEAD(head, ...) head #define JEMALLOC_VA_ARGS_HEAD(head, ...) head
#define JEMALLOC_VA_ARGS_TAIL(head, ...) __VA_ARGS__ #define JEMALLOC_VA_ARGS_TAIL(head, ...) __VA_ARGS__
#if (defined(__GNUC__) || defined(__GNUG__)) && !defined(__clang__) \
&& defined(JEMALLOC_HAVE_ATTR) && (__GNUC__ >= 7)
#define JEMALLOC_FALLTHROUGH JEMALLOC_ATTR(fallthrough);
#else
#define JEMALLOC_FALLTHROUGH /* falls through */
#endif
/* Diagnostic suppression macros */ /* Diagnostic suppression macros */
#if defined(_MSC_VER) && !defined(__clang__) #if defined(_MSC_VER) && !defined(__clang__)
# define JEMALLOC_DIAGNOSTIC_PUSH __pragma(warning(push)) # define JEMALLOC_DIAGNOSTIC_PUSH __pragma(warning(push))
......
...@@ -3,15 +3,31 @@ ...@@ -3,15 +3,31 @@
#include "jemalloc/internal/quantum.h" #include "jemalloc/internal/quantum.h"
/* Page size index type. */
typedef unsigned pszind_t;
/* Size class index type. */
typedef unsigned szind_t;
/* Processor / core id type. */ /* Processor / core id type. */
typedef int malloc_cpuid_t; typedef int malloc_cpuid_t;
/* When realloc(non-null-ptr, 0) is called, what happens? */
enum zero_realloc_action_e {
/* Realloc(ptr, 0) is free(ptr); return malloc(0); */
zero_realloc_action_alloc = 0,
/* Realloc(ptr, 0) is free(ptr); */
zero_realloc_action_free = 1,
/* Realloc(ptr, 0) aborts. */
zero_realloc_action_abort = 2
};
typedef enum zero_realloc_action_e zero_realloc_action_t;
/* Signature of write callback. */
typedef void (write_cb_t)(void *, const char *);
enum malloc_init_e {
malloc_init_uninitialized = 3,
malloc_init_a0_initialized = 2,
malloc_init_recursible = 1,
malloc_init_initialized = 0 /* Common case --> jnz. */
};
typedef enum malloc_init_e malloc_init_t;
/* /*
* Flags bits: * Flags bits:
* *
......
...@@ -4,8 +4,14 @@ ...@@ -4,8 +4,14 @@
#include "jemalloc_internal_defs.h" #include "jemalloc_internal_defs.h"
#include "jemalloc/internal/jemalloc_internal_decls.h" #include "jemalloc/internal/jemalloc_internal_decls.h"
#ifdef JEMALLOC_UTRACE #if defined(JEMALLOC_UTRACE) || defined(JEMALLOC_UTRACE_LABEL)
#include <sys/ktrace.h> #include <sys/ktrace.h>
# if defined(JEMALLOC_UTRACE)
# define UTRACE_CALL(p, l) utrace(p, l)
# else
# define UTRACE_CALL(p, l) utrace("jemalloc_process", p, l)
# define JEMALLOC_UTRACE
# endif
#endif #endif
#define JEMALLOC_NO_DEMANGLE #define JEMALLOC_NO_DEMANGLE
...@@ -180,6 +186,35 @@ static const bool config_opt_safety_checks = ...@@ -180,6 +186,35 @@ static const bool config_opt_safety_checks =
#endif #endif
; ;
/*
* Extra debugging of sized deallocations too onerous to be included in the
* general safety checks.
*/
static const bool config_opt_size_checks =
#if defined(JEMALLOC_OPT_SIZE_CHECKS) || defined(JEMALLOC_DEBUG)
true
#else
false
#endif
;
static const bool config_uaf_detection =
#if defined(JEMALLOC_UAF_DETECTION) || defined(JEMALLOC_DEBUG)
true
#else
false
#endif
;
/* Whether or not the C++ extensions are enabled. */
static const bool config_enable_cxx =
#ifdef JEMALLOC_ENABLE_CXX
true
#else
false
#endif
;
#if defined(_WIN32) || defined(JEMALLOC_HAVE_SCHED_GETCPU) #if defined(_WIN32) || defined(JEMALLOC_HAVE_SCHED_GETCPU)
/* Currently percpu_arena depends on sched_getcpu. */ /* Currently percpu_arena depends on sched_getcpu. */
#define JEMALLOC_PERCPU_ARENA #define JEMALLOC_PERCPU_ARENA
...@@ -209,5 +244,20 @@ static const bool have_background_thread = ...@@ -209,5 +244,20 @@ static const bool have_background_thread =
false false
#endif #endif
; ;
static const bool config_high_res_timer =
#ifdef JEMALLOC_HAVE_CLOCK_REALTIME
true
#else
false
#endif
;
static const bool have_memcntl =
#ifdef JEMALLOC_HAVE_MEMCNTL
true
#else
false
#endif
;
#endif /* JEMALLOC_PREAMBLE_H */ #endif /* JEMALLOC_PREAMBLE_H */
...@@ -6,27 +6,19 @@ ...@@ -6,27 +6,19 @@
void *large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero); void *large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero);
void *large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, void *large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
bool zero); bool zero);
bool large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min, bool large_ralloc_no_move(tsdn_t *tsdn, edata_t *edata, size_t usize_min,
size_t usize_max, bool zero); size_t usize_max, bool zero);
void *large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize, void *large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
size_t alignment, bool zero, tcache_t *tcache, size_t alignment, bool zero, tcache_t *tcache,
hook_ralloc_args_t *hook_args); hook_ralloc_args_t *hook_args);
typedef void (large_dalloc_junk_t)(void *, size_t); void large_dalloc_prep_locked(tsdn_t *tsdn, edata_t *edata);
extern large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk; void large_dalloc_finish(tsdn_t *tsdn, edata_t *edata);
void large_dalloc(tsdn_t *tsdn, edata_t *edata);
typedef void (large_dalloc_maybe_junk_t)(void *, size_t); size_t large_salloc(tsdn_t *tsdn, const edata_t *edata);
extern large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk; void large_prof_info_get(tsd_t *tsd, edata_t *edata, prof_info_t *prof_info,
bool reset_recent);
void large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent); void large_prof_tctx_reset(edata_t *edata);
void large_dalloc_finish(tsdn_t *tsdn, extent_t *extent); void large_prof_info_set(edata_t *edata, prof_tctx_t *tctx, size_t size);
void large_dalloc(tsdn_t *tsdn, extent_t *extent);
size_t large_salloc(tsdn_t *tsdn, const extent_t *extent);
prof_tctx_t *large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent);
void large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx);
void large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent);
nstime_t large_prof_alloc_time_get(const extent_t *extent);
void large_prof_alloc_time_set(extent_t *extent, nstime_t time);
#endif /* JEMALLOC_INTERNAL_LARGE_EXTERNS_H */ #endif /* JEMALLOC_INTERNAL_LARGE_EXTERNS_H */
#ifndef JEMALLOC_INTERNAL_LOCKEDINT_H
#define JEMALLOC_INTERNAL_LOCKEDINT_H
/*
* In those architectures that support 64-bit atomics, we use atomic updates for
* our 64-bit values. Otherwise, we use a plain uint64_t and synchronize
* externally.
*/
typedef struct locked_u64_s locked_u64_t;
#ifdef JEMALLOC_ATOMIC_U64
struct locked_u64_s {
atomic_u64_t val;
};
#else
/* Must hold the associated mutex. */
struct locked_u64_s {
uint64_t val;
};
#endif
typedef struct locked_zu_s locked_zu_t;
struct locked_zu_s {
atomic_zu_t val;
};
#ifndef JEMALLOC_ATOMIC_U64
# define LOCKEDINT_MTX_DECLARE(name) malloc_mutex_t name;
# define LOCKEDINT_MTX_INIT(mu, name, rank, rank_mode) \
malloc_mutex_init(&(mu), name, rank, rank_mode)
# define LOCKEDINT_MTX(mtx) (&(mtx))
# define LOCKEDINT_MTX_LOCK(tsdn, mu) malloc_mutex_lock(tsdn, &(mu))
# define LOCKEDINT_MTX_UNLOCK(tsdn, mu) malloc_mutex_unlock(tsdn, &(mu))
# define LOCKEDINT_MTX_PREFORK(tsdn, mu) malloc_mutex_prefork(tsdn, &(mu))
# define LOCKEDINT_MTX_POSTFORK_PARENT(tsdn, mu) \
malloc_mutex_postfork_parent(tsdn, &(mu))
# define LOCKEDINT_MTX_POSTFORK_CHILD(tsdn, mu) \
malloc_mutex_postfork_child(tsdn, &(mu))
#else
# define LOCKEDINT_MTX_DECLARE(name)
# define LOCKEDINT_MTX(mtx) NULL
# define LOCKEDINT_MTX_INIT(mu, name, rank, rank_mode) false
# define LOCKEDINT_MTX_LOCK(tsdn, mu)
# define LOCKEDINT_MTX_UNLOCK(tsdn, mu)
# define LOCKEDINT_MTX_PREFORK(tsdn, mu)
# define LOCKEDINT_MTX_POSTFORK_PARENT(tsdn, mu)
# define LOCKEDINT_MTX_POSTFORK_CHILD(tsdn, mu)
#endif
#ifdef JEMALLOC_ATOMIC_U64
# define LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx) assert((mtx) == NULL)
#else
# define LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx) \
malloc_mutex_assert_owner(tsdn, (mtx))
#endif
static inline uint64_t
locked_read_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p) {
LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
#ifdef JEMALLOC_ATOMIC_U64
return atomic_load_u64(&p->val, ATOMIC_RELAXED);
#else
return p->val;
#endif
}
static inline void
locked_inc_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p,
uint64_t x) {
LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
#ifdef JEMALLOC_ATOMIC_U64
atomic_fetch_add_u64(&p->val, x, ATOMIC_RELAXED);
#else
p->val += x;
#endif
}
static inline void
locked_dec_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p,
uint64_t x) {
LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
#ifdef JEMALLOC_ATOMIC_U64
uint64_t r = atomic_fetch_sub_u64(&p->val, x, ATOMIC_RELAXED);
assert(r - x <= r);
#else
p->val -= x;
assert(p->val + x >= p->val);
#endif
}
/* Increment and take modulus. Returns whether the modulo made any change. */
static inline bool
locked_inc_mod_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p,
const uint64_t x, const uint64_t modulus) {
LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
uint64_t before, after;
bool overflow;
#ifdef JEMALLOC_ATOMIC_U64
before = atomic_load_u64(&p->val, ATOMIC_RELAXED);
do {
after = before + x;
assert(after >= before);
overflow = (after >= modulus);
if (overflow) {
after %= modulus;
}
} while (!atomic_compare_exchange_weak_u64(&p->val, &before, after,
ATOMIC_RELAXED, ATOMIC_RELAXED));
#else
before = p->val;
after = before + x;
overflow = (after >= modulus);
if (overflow) {
after %= modulus;
}
p->val = after;
#endif
return overflow;
}
/*
* Non-atomically sets *dst += src. *dst needs external synchronization.
* This lets us avoid the cost of a fetch_add when its unnecessary (note that
* the types here are atomic).
*/
static inline void
locked_inc_u64_unsynchronized(locked_u64_t *dst, uint64_t src) {
#ifdef JEMALLOC_ATOMIC_U64
uint64_t cur_dst = atomic_load_u64(&dst->val, ATOMIC_RELAXED);
atomic_store_u64(&dst->val, src + cur_dst, ATOMIC_RELAXED);
#else
dst->val += src;
#endif
}
static inline uint64_t
locked_read_u64_unsynchronized(locked_u64_t *p) {
#ifdef JEMALLOC_ATOMIC_U64
return atomic_load_u64(&p->val, ATOMIC_RELAXED);
#else
return p->val;
#endif
}
static inline void
locked_init_u64_unsynchronized(locked_u64_t *p, uint64_t x) {
#ifdef JEMALLOC_ATOMIC_U64
atomic_store_u64(&p->val, x, ATOMIC_RELAXED);
#else
p->val = x;
#endif
}
static inline size_t
locked_read_zu(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_zu_t *p) {
LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
#ifdef JEMALLOC_ATOMIC_U64
return atomic_load_zu(&p->val, ATOMIC_RELAXED);
#else
return atomic_load_zu(&p->val, ATOMIC_RELAXED);
#endif
}
static inline void
locked_inc_zu(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_zu_t *p,
size_t x) {
LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
#ifdef JEMALLOC_ATOMIC_U64
atomic_fetch_add_zu(&p->val, x, ATOMIC_RELAXED);
#else
size_t cur = atomic_load_zu(&p->val, ATOMIC_RELAXED);
atomic_store_zu(&p->val, cur + x, ATOMIC_RELAXED);
#endif
}
static inline void
locked_dec_zu(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_zu_t *p,
size_t x) {
LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
#ifdef JEMALLOC_ATOMIC_U64
size_t r = atomic_fetch_sub_zu(&p->val, x, ATOMIC_RELAXED);
assert(r - x <= r);
#else
size_t cur = atomic_load_zu(&p->val, ATOMIC_RELAXED);
atomic_store_zu(&p->val, cur - x, ATOMIC_RELAXED);
#endif
}
/* Like the _u64 variant, needs an externally synchronized *dst. */
static inline void
locked_inc_zu_unsynchronized(locked_zu_t *dst, size_t src) {
size_t cur_dst = atomic_load_zu(&dst->val, ATOMIC_RELAXED);
atomic_store_zu(&dst->val, src + cur_dst, ATOMIC_RELAXED);
}
/*
* Unlike the _u64 variant, this is safe to call unconditionally.
*/
static inline size_t
locked_read_atomic_zu(locked_zu_t *p) {
return atomic_load_zu(&p->val, ATOMIC_RELAXED);
}
#endif /* JEMALLOC_INTERNAL_LOCKEDINT_H */
#ifndef JEMALLOC_INTERNAL_MALLOC_IO_H #ifndef JEMALLOC_INTERNAL_MALLOC_IO_H
#define JEMALLOC_INTERNAL_MALLOC_IO_H #define JEMALLOC_INTERNAL_MALLOC_IO_H
#include "jemalloc/internal/jemalloc_internal_types.h"
#ifdef _WIN32 #ifdef _WIN32
# ifdef _WIN64 # ifdef _WIN64
# define FMT64_PREFIX "ll" # define FMT64_PREFIX "ll"
...@@ -40,6 +42,7 @@ ...@@ -40,6 +42,7 @@
*/ */
#define MALLOC_PRINTF_BUFSIZE 4096 #define MALLOC_PRINTF_BUFSIZE 4096
write_cb_t wrtmessage;
int buferror(int err, char *buf, size_t buflen); int buferror(int err, char *buf, size_t buflen);
uintmax_t malloc_strtoumax(const char *restrict nptr, char **restrict endptr, uintmax_t malloc_strtoumax(const char *restrict nptr, char **restrict endptr,
int base); int base);
...@@ -57,10 +60,10 @@ size_t malloc_snprintf(char *str, size_t size, const char *format, ...) ...@@ -57,10 +60,10 @@ size_t malloc_snprintf(char *str, size_t size, const char *format, ...)
* The caller can set write_cb to null to choose to print with the * The caller can set write_cb to null to choose to print with the
* je_malloc_message hook. * je_malloc_message hook.
*/ */
void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, void malloc_vcprintf(write_cb_t *write_cb, void *cbopaque, const char *format,
const char *format, va_list ap); va_list ap);
void malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque, void malloc_cprintf(write_cb_t *write_cb, void *cbopaque, const char *format,
const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4); ...) JEMALLOC_FORMAT_PRINTF(3, 4);
void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
static inline ssize_t static inline ssize_t
......
#ifndef JEMALLOC_INTERNAL_MPSC_QUEUE_H
#define JEMALLOC_INTERNAL_MPSC_QUEUE_H
#include "jemalloc/internal/atomic.h"
/*
* A concurrent implementation of a multi-producer, single-consumer queue. It
* supports three concurrent operations:
* - Push
* - Push batch
* - Pop batch
*
* These operations are all lock-free.
*
* The implementation is the simple two-stack queue built on a Treiber stack.
* It's not terribly efficient, but this isn't expected to go into anywhere with
* hot code. In fact, we don't really even need queue semantics in any
* anticipated use cases; we could get away with just the stack. But this way
* lets us frame the API in terms of the existing list types, which is a nice
* convenience. We can save on cache misses by introducing our own (parallel)
* single-linked list type here, and dropping FIFO semantics, if we need this to
* get faster. Since we're currently providing queue semantics though, we use
* the prev field in the link rather than the next field for Treiber-stack
* linkage, so that we can preserve order for bash-pushed lists (recall that the
* two-stack tricks reverses orders in the lock-free first stack).
*/
#define mpsc_queue(a_type) \
struct { \
atomic_p_t tail; \
}
#define mpsc_queue_proto(a_attr, a_prefix, a_queue_type, a_type, \
a_list_type) \
/* Initialize a queue. */ \
a_attr void \
a_prefix##new(a_queue_type *queue); \
/* Insert all items in src into the queue, clearing src. */ \
a_attr void \
a_prefix##push_batch(a_queue_type *queue, a_list_type *src); \
/* Insert node into the queue. */ \
a_attr void \
a_prefix##push(a_queue_type *queue, a_type *node); \
/* \
* Pop all items in the queue into the list at dst. dst should already \
* be initialized (and may contain existing items, which then remain \
* in dst). \
*/ \
a_attr void \
a_prefix##pop_batch(a_queue_type *queue, a_list_type *dst);
#define mpsc_queue_gen(a_attr, a_prefix, a_queue_type, a_type, \
a_list_type, a_link) \
a_attr void \
a_prefix##new(a_queue_type *queue) { \
atomic_store_p(&queue->tail, NULL, ATOMIC_RELAXED); \
} \
a_attr void \
a_prefix##push_batch(a_queue_type *queue, a_list_type *src) { \
/* \
* Reuse the ql list next field as the Treiber stack next \
* field. \
*/ \
a_type *first = ql_first(src); \
a_type *last = ql_last(src, a_link); \
void* cur_tail = atomic_load_p(&queue->tail, ATOMIC_RELAXED); \
do { \
/* \
* Note that this breaks the queue ring structure; \
* it's not a ring any more! \
*/ \
first->a_link.qre_prev = cur_tail; \
/* \
* Note: the upcoming CAS doesn't need an atomic; every \
* push only needs to synchronize with the next pop, \
* which we get from the release sequence rules. \
*/ \
} while (!atomic_compare_exchange_weak_p(&queue->tail, \
&cur_tail, last, ATOMIC_RELEASE, ATOMIC_RELAXED)); \
ql_new(src); \
} \
a_attr void \
a_prefix##push(a_queue_type *queue, a_type *node) { \
ql_elm_new(node, a_link); \
a_list_type list; \
ql_new(&list); \
ql_head_insert(&list, node, a_link); \
a_prefix##push_batch(queue, &list); \
} \
a_attr void \
a_prefix##pop_batch(a_queue_type *queue, a_list_type *dst) { \
a_type *tail = atomic_load_p(&queue->tail, ATOMIC_RELAXED); \
if (tail == NULL) { \
/* \
* In the common special case where there are no \
* pending elements, bail early without a costly RMW. \
*/ \
return; \
} \
tail = atomic_exchange_p(&queue->tail, NULL, ATOMIC_ACQUIRE); \
/* \
* It's a single-consumer queue, so if cur started non-NULL, \
* it'd better stay non-NULL. \
*/ \
assert(tail != NULL); \
/* \
* We iterate through the stack and both fix up the link \
* structure (stack insertion broke the list requirement that \
* the list be circularly linked). It's just as efficient at \
* this point to make the queue a "real" queue, so do that as \
* well. \
* If this ever gets to be a hot spot, we can omit this fixup \
* and make the queue a bag (i.e. not necessarily ordered), but \
* that would mean jettisoning the existing list API as the \
* batch pushing/popping interface. \
*/ \
a_list_type reversed; \
ql_new(&reversed); \
while (tail != NULL) { \
/* \
* Pop an item off the stack, prepend it onto the list \
* (reversing the order). Recall that we use the \
* list prev field as the Treiber stack next field to \
* preserve order of batch-pushed items when reversed. \
*/ \
a_type *next = tail->a_link.qre_prev; \
ql_elm_new(tail, a_link); \
ql_head_insert(&reversed, tail, a_link); \
tail = next; \
} \
ql_concat(dst, &reversed, a_link); \
}
#endif /* JEMALLOC_INTERNAL_MPSC_QUEUE_H */
...@@ -6,6 +6,8 @@ ...@@ -6,6 +6,8 @@
#include "jemalloc/internal/tsd.h" #include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/witness.h" #include "jemalloc/internal/witness.h"
extern int64_t opt_mutex_max_spin;
typedef enum { typedef enum {
/* Can only acquire one mutex of a given witness rank at a time. */ /* Can only acquire one mutex of a given witness rank at a time. */
malloc_mutex_rank_exclusive, malloc_mutex_rank_exclusive,
...@@ -67,12 +69,6 @@ struct malloc_mutex_s { ...@@ -67,12 +69,6 @@ struct malloc_mutex_s {
#endif #endif
}; };
/*
* Based on benchmark results, a fixed spin with this amount of retries works
* well for our critical sections.
*/
#define MALLOC_MUTEX_MAX_SPIN 250
#ifdef _WIN32 #ifdef _WIN32
# if _WIN32_WINNT >= 0x0600 # if _WIN32_WINNT >= 0x0600
# define MALLOC_MUTEX_LOCK(m) AcquireSRWLockExclusive(&(m)->lock) # define MALLOC_MUTEX_LOCK(m) AcquireSRWLockExclusive(&(m)->lock)
...@@ -245,22 +241,25 @@ malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) { ...@@ -245,22 +241,25 @@ malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness); witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
} }
/* Copy the prof data from mutex for processing. */
static inline void static inline void
malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data, malloc_mutex_prof_copy(mutex_prof_data_t *dst, mutex_prof_data_t *source) {
malloc_mutex_t *mutex) {
mutex_prof_data_t *source = &mutex->prof_data;
/* Can only read holding the mutex. */
malloc_mutex_assert_owner(tsdn, mutex);
/* /*
* Not *really* allowed (we shouldn't be doing non-atomic loads of * Not *really* allowed (we shouldn't be doing non-atomic loads of
* atomic data), but the mutex protection makes this safe, and writing * atomic data), but the mutex protection makes this safe, and writing
* a member-for-member copy is tedious for this situation. * a member-for-member copy is tedious for this situation.
*/ */
*data = *source; *dst = *source;
/* n_wait_thds is not reported (modified w/o locking). */ /* n_wait_thds is not reported (modified w/o locking). */
atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED); atomic_store_u32(&dst->n_waiting_thds, 0, ATOMIC_RELAXED);
}
/* Copy the prof data from mutex for processing. */
static inline void
malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data,
malloc_mutex_t *mutex) {
/* Can only read holding the mutex. */
malloc_mutex_assert_owner(tsdn, mutex);
malloc_mutex_prof_copy(data, &mutex->prof_data);
} }
static inline void static inline void
...@@ -285,4 +284,36 @@ malloc_mutex_prof_accum(tsdn_t *tsdn, mutex_prof_data_t *data, ...@@ -285,4 +284,36 @@ malloc_mutex_prof_accum(tsdn_t *tsdn, mutex_prof_data_t *data,
data->n_lock_ops += source->n_lock_ops; data->n_lock_ops += source->n_lock_ops;
} }
/* Compare the prof data and update to the maximum. */
static inline void
malloc_mutex_prof_max_update(tsdn_t *tsdn, mutex_prof_data_t *data,
malloc_mutex_t *mutex) {
mutex_prof_data_t *source = &mutex->prof_data;
/* Can only read holding the mutex. */
malloc_mutex_assert_owner(tsdn, mutex);
if (nstime_compare(&source->tot_wait_time, &data->tot_wait_time) > 0) {
nstime_copy(&data->tot_wait_time, &source->tot_wait_time);
}
if (nstime_compare(&source->max_wait_time, &data->max_wait_time) > 0) {
nstime_copy(&data->max_wait_time, &source->max_wait_time);
}
if (source->n_wait_times > data->n_wait_times) {
data->n_wait_times = source->n_wait_times;
}
if (source->n_spin_acquired > data->n_spin_acquired) {
data->n_spin_acquired = source->n_spin_acquired;
}
if (source->max_n_thds > data->max_n_thds) {
data->max_n_thds = source->max_n_thds;
}
if (source->n_owner_switches > data->n_owner_switches) {
data->n_owner_switches = source->n_owner_switches;
}
if (source->n_lock_ops > data->n_lock_ops) {
data->n_lock_ops = source->n_lock_ops;
}
/* n_wait_thds is not reported. */
}
#endif /* JEMALLOC_INTERNAL_MUTEX_H */ #endif /* JEMALLOC_INTERNAL_MUTEX_H */
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment