Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
b8beda3c
Commit
b8beda3c
authored
May 01, 2023
by
Oran Agra
Browse files
Merge commit jemalloc 5.3.0
parents
d659c734
6d23d3ac
Changes
195
Hide whitespace changes
Inline
Side-by-side
Too many changes to show.
To preserve performance only
195 of 195+
files are displayed.
Plain diff
Email patch
deps/jemalloc/include/jemalloc/internal/prof_sys.h
0 → 100644
View file @
b8beda3c
#ifndef JEMALLOC_INTERNAL_PROF_SYS_H
#define JEMALLOC_INTERNAL_PROF_SYS_H
extern
malloc_mutex_t
prof_dump_filename_mtx
;
extern
base_t
*
prof_base
;
void
bt_init
(
prof_bt_t
*
bt
,
void
**
vec
);
void
prof_backtrace
(
tsd_t
*
tsd
,
prof_bt_t
*
bt
);
void
prof_hooks_init
();
void
prof_unwind_init
();
void
prof_sys_thread_name_fetch
(
tsd_t
*
tsd
);
int
prof_getpid
(
void
);
void
prof_get_default_filename
(
tsdn_t
*
tsdn
,
char
*
filename
,
uint64_t
ind
);
bool
prof_prefix_set
(
tsdn_t
*
tsdn
,
const
char
*
prefix
);
void
prof_fdump_impl
(
tsd_t
*
tsd
);
void
prof_idump_impl
(
tsd_t
*
tsd
);
bool
prof_mdump_impl
(
tsd_t
*
tsd
,
const
char
*
filename
);
void
prof_gdump_impl
(
tsd_t
*
tsd
);
/* Used in unit tests. */
typedef
int
(
prof_sys_thread_name_read_t
)(
char
*
buf
,
size_t
limit
);
extern
prof_sys_thread_name_read_t
*
JET_MUTABLE
prof_sys_thread_name_read
;
typedef
int
(
prof_dump_open_file_t
)(
const
char
*
,
int
);
extern
prof_dump_open_file_t
*
JET_MUTABLE
prof_dump_open_file
;
typedef
ssize_t
(
prof_dump_write_file_t
)(
int
,
const
void
*
,
size_t
);
extern
prof_dump_write_file_t
*
JET_MUTABLE
prof_dump_write_file
;
typedef
int
(
prof_dump_open_maps_t
)();
extern
prof_dump_open_maps_t
*
JET_MUTABLE
prof_dump_open_maps
;
#endif
/* JEMALLOC_INTERNAL_PROF_SYS_H */
deps/jemalloc/include/jemalloc/internal/prof_types.h
View file @
b8beda3c
...
@@ -2,11 +2,12 @@
...
@@ -2,11 +2,12 @@
#define JEMALLOC_INTERNAL_PROF_TYPES_H
#define JEMALLOC_INTERNAL_PROF_TYPES_H
typedef
struct
prof_bt_s
prof_bt_t
;
typedef
struct
prof_bt_s
prof_bt_t
;
typedef
struct
prof_accum_s
prof_accum_t
;
typedef
struct
prof_cnt_s
prof_cnt_t
;
typedef
struct
prof_cnt_s
prof_cnt_t
;
typedef
struct
prof_tctx_s
prof_tctx_t
;
typedef
struct
prof_tctx_s
prof_tctx_t
;
typedef
struct
prof_info_s
prof_info_t
;
typedef
struct
prof_gctx_s
prof_gctx_t
;
typedef
struct
prof_gctx_s
prof_gctx_t
;
typedef
struct
prof_tdata_s
prof_tdata_t
;
typedef
struct
prof_tdata_s
prof_tdata_t
;
typedef
struct
prof_recent_s
prof_recent_t
;
/* Option defaults. */
/* Option defaults. */
#ifdef JEMALLOC_PROF
#ifdef JEMALLOC_PROF
...
@@ -28,7 +29,23 @@ typedef struct prof_tdata_s prof_tdata_t;
...
@@ -28,7 +29,23 @@ typedef struct prof_tdata_s prof_tdata_t;
#define PROF_CKH_MINITEMS 64
#define PROF_CKH_MINITEMS 64
/* Size of memory buffer to use when writing dump files. */
/* Size of memory buffer to use when writing dump files. */
#define PROF_DUMP_BUFSIZE 65536
#ifndef JEMALLOC_PROF
/* Minimize memory bloat for non-prof builds. */
# define PROF_DUMP_BUFSIZE 1
#elif defined(JEMALLOC_DEBUG)
/* Use a small buffer size in debug build, mainly to facilitate testing. */
# define PROF_DUMP_BUFSIZE 16
#else
# define PROF_DUMP_BUFSIZE 65536
#endif
/* Size of size class related tables */
#ifdef JEMALLOC_PROF
# define PROF_SC_NSIZES SC_NSIZES
#else
/* Minimize memory bloat for non-prof builds. */
# define PROF_SC_NSIZES 1
#endif
/* Size of stack-allocated buffer used by prof_printf(). */
/* Size of stack-allocated buffer used by prof_printf(). */
#define PROF_PRINTF_BUFSIZE 128
#define PROF_PRINTF_BUFSIZE 128
...
@@ -45,12 +62,14 @@ typedef struct prof_tdata_s prof_tdata_t;
...
@@ -45,12 +62,14 @@ typedef struct prof_tdata_s prof_tdata_t;
*/
*/
#define PROF_NTDATA_LOCKS 256
#define PROF_NTDATA_LOCKS 256
/*
/* Minimize memory bloat for non-prof builds. */
* prof_tdata pointers close to NULL are used to encode state information that
#ifdef JEMALLOC_PROF
* is used for cleaning up during thread shutdown.
#define PROF_DUMP_FILENAME_LEN (PATH_MAX + 1)
*/
#else
#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1)
#define PROF_DUMP_FILENAME_LEN 1
#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2)
#endif
#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY
/* Default number of recent allocations to record. */
#define PROF_RECENT_ALLOC_MAX_DEFAULT 0
#endif
/* JEMALLOC_INTERNAL_PROF_TYPES_H */
#endif
/* JEMALLOC_INTERNAL_PROF_TYPES_H */
deps/jemalloc/include/jemalloc/internal/psset.h
0 → 100644
View file @
b8beda3c
#ifndef JEMALLOC_INTERNAL_PSSET_H
#define JEMALLOC_INTERNAL_PSSET_H
#include "jemalloc/internal/hpdata.h"
/*
* A page-slab set. What the eset is to PAC, the psset is to HPA. It maintains
* a collection of page-slabs (the intent being that they are backed by
* hugepages, or at least could be), and handles allocation and deallocation
* requests.
*/
/*
* One more than the maximum pszind_t we will serve out of the HPA.
* Practically, we expect only the first few to be actually used. This
* corresponds to a maximum size of of 512MB on systems with 4k pages and
* SC_NGROUP == 4, which is already an unreasonably large maximum. Morally, you
* can think of this as being SC_NPSIZES, but there's no sense in wasting that
* much space in the arena, making bitmaps that much larger, etc.
*/
#define PSSET_NPSIZES 64
/*
* We keep two purge lists per page size class; one for hugified hpdatas (at
* index 2*pszind), and one for the non-hugified hpdatas (at index 2*pszind +
* 1). This lets us implement a preference for purging non-hugified hpdatas
* among similarly-dirty ones.
* We reserve the last two indices for empty slabs, in that case purging
* hugified ones (which are definitionally all waste) before non-hugified ones
* (i.e. reversing the order).
*/
#define PSSET_NPURGE_LISTS (2 * PSSET_NPSIZES)
typedef
struct
psset_bin_stats_s
psset_bin_stats_t
;
struct
psset_bin_stats_s
{
/* How many pageslabs are in this bin? */
size_t
npageslabs
;
/* Of them, how many pages are active? */
size_t
nactive
;
/* And how many are dirty? */
size_t
ndirty
;
};
typedef
struct
psset_stats_s
psset_stats_t
;
struct
psset_stats_s
{
/*
* The second index is huge stats; nonfull_slabs[pszind][0] contains
* stats for the non-huge slabs in bucket pszind, while
* nonfull_slabs[pszind][1] contains stats for the huge slabs.
*/
psset_bin_stats_t
nonfull_slabs
[
PSSET_NPSIZES
][
2
];
/*
* Full slabs don't live in any edata heap, but we still track their
* stats.
*/
psset_bin_stats_t
full_slabs
[
2
];
/* Empty slabs are similar. */
psset_bin_stats_t
empty_slabs
[
2
];
};
typedef
struct
psset_s
psset_t
;
struct
psset_s
{
/*
* The pageslabs, quantized by the size class of the largest contiguous
* free run of pages in a pageslab.
*/
hpdata_age_heap_t
pageslabs
[
PSSET_NPSIZES
];
/* Bitmap for which set bits correspond to non-empty heaps. */
fb_group_t
pageslab_bitmap
[
FB_NGROUPS
(
PSSET_NPSIZES
)];
/*
* The sum of all bin stats in stats. This lets us quickly answer
* queries for the number of dirty, active, and retained pages in the
* entire set.
*/
psset_bin_stats_t
merged_stats
;
psset_stats_t
stats
;
/*
* Slabs with no active allocations, but which are allowed to serve new
* allocations.
*/
hpdata_empty_list_t
empty
;
/*
* Slabs which are available to be purged, ordered by how much we want
* to purge them (with later indices indicating slabs we want to purge
* more).
*/
hpdata_purge_list_t
to_purge
[
PSSET_NPURGE_LISTS
];
/* Bitmap for which set bits correspond to non-empty purge lists. */
fb_group_t
purge_bitmap
[
FB_NGROUPS
(
PSSET_NPURGE_LISTS
)];
/* Slabs which are available to be hugified. */
hpdata_hugify_list_t
to_hugify
;
};
void
psset_init
(
psset_t
*
psset
);
void
psset_stats_accum
(
psset_stats_t
*
dst
,
psset_stats_t
*
src
);
/*
* Begin or end updating the given pageslab's metadata. While the pageslab is
* being updated, it won't be returned from psset_fit calls.
*/
void
psset_update_begin
(
psset_t
*
psset
,
hpdata_t
*
ps
);
void
psset_update_end
(
psset_t
*
psset
,
hpdata_t
*
ps
);
/* Analogous to the eset_fit; pick a hpdata to serve the request. */
hpdata_t
*
psset_pick_alloc
(
psset_t
*
psset
,
size_t
size
);
/* Pick one to purge. */
hpdata_t
*
psset_pick_purge
(
psset_t
*
psset
);
/* Pick one to hugify. */
hpdata_t
*
psset_pick_hugify
(
psset_t
*
psset
);
void
psset_insert
(
psset_t
*
psset
,
hpdata_t
*
ps
);
void
psset_remove
(
psset_t
*
psset
,
hpdata_t
*
ps
);
static
inline
size_t
psset_npageslabs
(
psset_t
*
psset
)
{
return
psset
->
merged_stats
.
npageslabs
;
}
static
inline
size_t
psset_nactive
(
psset_t
*
psset
)
{
return
psset
->
merged_stats
.
nactive
;
}
static
inline
size_t
psset_ndirty
(
psset_t
*
psset
)
{
return
psset
->
merged_stats
.
ndirty
;
}
#endif
/* JEMALLOC_INTERNAL_PSSET_H */
deps/jemalloc/include/jemalloc/internal/ql.h
View file @
b8beda3c
...
@@ -3,37 +3,85 @@
...
@@ -3,37 +3,85 @@
#include "jemalloc/internal/qr.h"
#include "jemalloc/internal/qr.h"
/*
* A linked-list implementation.
*
* This is built on top of the ring implementation, but that can be viewed as an
* implementation detail (i.e. trying to advance past the tail of the list
* doesn't wrap around).
*
* You define a struct like so:
* typedef strucy my_s my_t;
* struct my_s {
* int data;
* ql_elm(my_t) my_link;
* };
*
* // We wobble between "list" and "head" for this type; we're now mostly
* // heading towards "list".
* typedef ql_head(my_t) my_list_t;
*
* You then pass a my_list_t * for a_head arguments, a my_t * for a_elm
* arguments, the token "my_link" for a_field arguments, and the token "my_t"
* for a_type arguments.
*/
/* List definitions. */
/* List definitions. */
#define ql_head(a_type) \
#define ql_head(a_type) \
struct { \
struct { \
a_type *qlh_first; \
a_type *qlh_first; \
}
}
/* Static initializer for an empty list. */
#define ql_head_initializer(a_head) {NULL}
#define ql_head_initializer(a_head) {NULL}
/* The field definition. */
#define ql_elm(a_type) qr(a_type)
#define ql_elm(a_type) qr(a_type)
/* List functions. */
/* A pointer to the first element in the list, or NULL if the list is empty. */
#define ql_first(a_head) ((a_head)->qlh_first)
/* Dynamically initializes a list. */
#define ql_new(a_head) do { \
#define ql_new(a_head) do { \
(a_head)
->qlh_first
= NULL; \
ql_first
(a_head) = NULL; \
} while (0)
} while (0)
#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
/*
* Sets dest to be the contents of src (overwriting any elements there), leaving
* src empty.
*/
#define ql_move(a_head_dest, a_head_src) do { \
ql_first(a_head_dest) = ql_first(a_head_src); \
ql_new(a_head_src); \
} while (0)
#define ql_first(a_head) ((a_head)->qlh_first)
/* True if the list is empty, otherwise false. */
#define ql_empty(a_head) (ql_first(a_head) == NULL)
/*
* Initializes a ql_elm. Must be called even if the field is about to be
* overwritten.
*/
#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
/*
* Obtains the last item in the list.
*/
#define ql_last(a_head, a_field) \
#define ql_last(a_head, a_field) \
((ql_first(a_head) != NULL) \
(ql_empty(a_head) ? NULL : qr_prev(ql_first(a_head), a_field))
? qr_prev(ql_first(a_head), a_field) : NULL)
/*
* Gets a pointer to the next/prev element in the list. Trying to advance past
* the end or retreat before the beginning of the list returns NULL.
*/
#define ql_next(a_head, a_elm, a_field) \
#define ql_next(a_head, a_elm, a_field) \
((ql_last(a_head, a_field) != (a_elm)) \
((ql_last(a_head, a_field) != (a_elm)) \
? qr_next((a_elm), a_field) : NULL)
? qr_next((a_elm), a_field) : NULL)
#define ql_prev(a_head, a_elm, a_field) \
#define ql_prev(a_head, a_elm, a_field) \
((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \
((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \
: NULL)
: NULL)
/* Inserts a_elm before a_qlelm in the list. */
#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
qr_before_insert((a_qlelm), (a_elm), a_field); \
qr_before_insert((a_qlelm), (a_elm), a_field); \
if (ql_first(a_head) == (a_qlelm)) { \
if (ql_first(a_head) == (a_qlelm)) { \
...
@@ -41,23 +89,41 @@ struct { \
...
@@ -41,23 +89,41 @@ struct { \
} \
} \
} while (0)
} while (0)
/* Inserts a_elm after a_qlelm in the list. */
#define ql_after_insert(a_qlelm, a_elm, a_field) \
#define ql_after_insert(a_qlelm, a_elm, a_field) \
qr_after_insert((a_qlelm), (a_elm), a_field)
qr_after_insert((a_qlelm), (a_elm), a_field)
/* Inserts a_elm as the first item in the list. */
#define ql_head_insert(a_head, a_elm, a_field) do { \
#define ql_head_insert(a_head, a_elm, a_field) do { \
if (ql_
first
(a_head)
!= NULL
) { \
if (
!
ql_
empty
(a_head)) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
} \
ql_first(a_head) = (a_elm); \
ql_first(a_head) = (a_elm); \
} while (0)
} while (0)
/* Inserts a_elm as the last item in the list. */
#define ql_tail_insert(a_head, a_elm, a_field) do { \
#define ql_tail_insert(a_head, a_elm, a_field) do { \
if (ql_
first
(a_head)
!= NULL
) { \
if (
!
ql_
empty
(a_head)) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
} \
ql_first(a_head) = qr_next((a_elm), a_field); \
ql_first(a_head) = qr_next((a_elm), a_field); \
} while (0)
} while (0)
/*
* Given lists a = [a_1, ..., a_n] and [b_1, ..., b_n], results in:
* a = [a1, ..., a_n, b_1, ..., b_n] and b = [].
*/
#define ql_concat(a_head_a, a_head_b, a_field) do { \
if (ql_empty(a_head_a)) { \
ql_move(a_head_a, a_head_b); \
} else if (!ql_empty(a_head_b)) { \
qr_meld(ql_first(a_head_a), ql_first(a_head_b), \
a_field); \
ql_new(a_head_b); \
} \
} while (0)
/* Removes a_elm from the list. */
#define ql_remove(a_head, a_elm, a_field) do { \
#define ql_remove(a_head, a_elm, a_field) do { \
if (ql_first(a_head) == (a_elm)) { \
if (ql_first(a_head) == (a_elm)) { \
ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
...
@@ -65,20 +131,63 @@ struct { \
...
@@ -65,20 +131,63 @@ struct { \
if (ql_first(a_head) != (a_elm)) { \
if (ql_first(a_head) != (a_elm)) { \
qr_remove((a_elm), a_field); \
qr_remove((a_elm), a_field); \
} else { \
} else { \
ql_
first
(a_head)
= NULL;
\
ql_
new
(a_head)
;
\
} \
} \
} while (0)
} while (0)
/* Removes the first item in the list. */
#define ql_head_remove(a_head, a_type, a_field) do { \
#define ql_head_remove(a_head, a_type, a_field) do { \
a_type *t = ql_first(a_head); \
a_type *t = ql_first(a_head); \
ql_remove((a_head), t, a_field); \
ql_remove((a_head), t, a_field); \
} while (0)
} while (0)
/* Removes the last item in the list. */
#define ql_tail_remove(a_head, a_type, a_field) do { \
#define ql_tail_remove(a_head, a_type, a_field) do { \
a_type *t = ql_last(a_head, a_field); \
a_type *t = ql_last(a_head, a_field); \
ql_remove((a_head), t, a_field); \
ql_remove((a_head), t, a_field); \
} while (0)
} while (0)
/*
* Given a = [a_1, a_2, ..., a_n-1, a_n, a_n+1, ...],
* ql_split(a, a_n, b, some_field) results in
* a = [a_1, a_2, ..., a_n-1]
* and replaces b's contents with:
* b = [a_n, a_n+1, ...]
*/
#define ql_split(a_head_a, a_elm, a_head_b, a_field) do { \
if (ql_first(a_head_a) == (a_elm)) { \
ql_move(a_head_b, a_head_a); \
} else { \
qr_split(ql_first(a_head_a), (a_elm), a_field); \
ql_first(a_head_b) = (a_elm); \
} \
} while (0)
/*
* An optimized version of:
* a_type *t = ql_first(a_head);
* ql_remove((a_head), t, a_field);
* ql_tail_insert((a_head), t, a_field);
*/
#define ql_rotate(a_head, a_field) do { \
ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
} while (0)
/*
* Helper macro to iterate over each element in a list in order, starting from
* the head (or in reverse order, starting from the tail). The usage is
* (assuming my_t and my_list_t defined as above).
*
* int sum(my_list_t *list) {
* int sum = 0;
* my_t *iter;
* ql_foreach(iter, list, link) {
* sum += iter->data;
* }
* return sum;
* }
*/
#define ql_foreach(a_var, a_head, a_field) \
#define ql_foreach(a_var, a_head, a_field) \
qr_foreach((a_var), ql_first(a_head), a_field)
qr_foreach((a_var), ql_first(a_head), a_field)
...
...
deps/jemalloc/include/jemalloc/internal/qr.h
View file @
b8beda3c
#ifndef JEMALLOC_INTERNAL_QR_H
#ifndef JEMALLOC_INTERNAL_QR_H
#define JEMALLOC_INTERNAL_QR_H
#define JEMALLOC_INTERNAL_QR_H
/*
* A ring implementation based on an embedded circular doubly-linked list.
*
* You define your struct like so:
*
* typedef struct my_s my_t;
* struct my_s {
* int data;
* qr(my_t) my_link;
* };
*
* And then pass a my_t * into macros for a_qr arguments, and the token
* "my_link" into a_field fields.
*/
/* Ring definitions. */
/* Ring definitions. */
#define qr(a_type) \
#define qr(a_type) \
struct { \
struct { \
...
@@ -8,61 +23,114 @@ struct { \
...
@@ -8,61 +23,114 @@ struct { \
a_type *qre_prev; \
a_type *qre_prev; \
}
}
/* Ring functions. */
/*
* Initialize a qr link. Every link must be initialized before being used, even
* if that initialization is going to be immediately overwritten (say, by being
* passed into an insertion macro).
*/
#define qr_new(a_qr, a_field) do { \
#define qr_new(a_qr, a_field) do { \
(a_qr)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
} while (0)
/*
* Go forwards or backwards in the ring. Note that (the ring being circular), this
* always succeeds -- you just keep looping around and around the ring if you
* chase pointers without end.
*/
#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
#define qr_before_insert(a_qrelm, a_qr, a_field) do { \
/*
(a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \
* Given two rings:
(a_qr)->a_field.qre_next = (a_qrelm); \
* a -> a_1 -> ... -> a_n --
(a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \
* ^ |
(a_qrelm)->a_field.qre_prev = (a_qr); \
* |------------------------
*
* b -> b_1 -> ... -> b_n --
* ^ |
* |------------------------
*
* Results in the ring:
* a -> a_1 -> ... -> a_n -> b -> b_1 -> ... -> b_n --
* ^ |
* |-------------------------------------------------|
*
* a_qr_a can directly be a qr_next() macro, but a_qr_b cannot.
*/
#define qr_meld(a_qr_a, a_qr_b, a_field) do { \
(a_qr_b)->a_field.qre_prev->a_field.qre_next = \
(a_qr_a)->a_field.qre_prev; \
(a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \
(a_qr_b)->a_field.qre_prev = \
(a_qr_b)->a_field.qre_prev->a_field.qre_next; \
(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
} while (0)
} while (0)
#define qr_after_insert(a_qrelm, a_qr, a_field) do { \
/*
(a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
* Logically, this is just a meld. The intent, though, is that a_qrelm is a
(a_qr)->a_field.qre_prev = (a_qrelm); \
* single-element ring, so that "before" has a more obvious interpretation than
(a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \
* meld.
(a_qrelm)->a_field.qre_next = (a_qr); \
*/
} while (0)
#define qr_before_insert(a_qrelm, a_qr, a_field) \
qr_meld((a_qrelm), (a_qr), a_field)
#define qr_meld(a_qr_a, a_qr_b, a_type, a_field) do { \
/* Ditto, but inserting after rather than before. */
a_type *t; \
#define qr_after_insert(a_qrelm, a_qr, a_field) \
(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
qr_before_insert(qr_next(a_qrelm, a_field), (a_qr), a_field)
(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
t = (a_qr_a)->a_field.qre_prev; \
(a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \
(a_qr_b)->a_field.qre_prev = t; \
} while (0)
/*
/*
* Inverts meld; given the ring:
* a -> a_1 -> ... -> a_n -> b -> b_1 -> ... -> b_n --
* ^ |
* |-------------------------------------------------|
*
* Results in two rings:
* a -> a_1 -> ... -> a_n --
* ^ |
* |------------------------
*
* b -> b_1 -> ... -> b_n --
* ^ |
* |------------------------
*
* qr_meld() and qr_split() are functionally equivalent, so there's no need to
* qr_meld() and qr_split() are functionally equivalent, so there's no need to
* have two copies of the code.
* have two copies of the code.
*/
*/
#define qr_split(a_qr_a, a_qr_b,
a_type,
a_field) \
#define qr_split(a_qr_a, a_qr_b, a_field)
\
qr_meld((a_qr_a), (a_qr_b),
a_type,
a_field)
qr_meld((a_qr_a), (a_qr_b), a_field)
#define qr_remove(a_qr, a_field) do { \
/*
(a_qr)->a_field.qre_prev->a_field.qre_next \
* Splits off a_qr from the rest of its ring, so that it becomes a
= (a_qr)->a_field.qre_next; \
* single-element ring.
(a_qr)->a_field.qre_next->a_field.qre_prev \
*/
= (a_qr)->a_field.qre_prev; \
#define qr_remove(a_qr, a_field) \
(a_qr)->a_field.qre_next = (a_qr); \
qr_split(qr_next(a_qr, a_field), (a_qr), a_field)
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
/*
* Helper macro to iterate over each element in a ring exactly once, starting
* with a_qr. The usage is (assuming my_t defined as above):
*
* int sum(my_t *item) {
* int sum = 0;
* my_t *iter;
* qr_foreach(iter, item, link) {
* sum += iter->data;
* }
* return sum;
* }
*/
#define qr_foreach(var, a_qr, a_field) \
#define qr_foreach(var, a_qr, a_field) \
for ((var) = (a_qr); \
for ((var) = (a_qr); \
(var) != NULL; \
(var) != NULL; \
(var) = (((var)->a_field.qre_next != (a_qr)) \
(var) = (((var)->a_field.qre_next != (a_qr)) \
? (var)->a_field.qre_next : NULL))
? (var)->a_field.qre_next : NULL))
/*
* The same (and with the same usage) as qr_foreach, but in the opposite order,
* ending with a_qr.
*/
#define qr_reverse_foreach(var, a_qr, a_field) \
#define qr_reverse_foreach(var, a_qr, a_field) \
for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \
for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \
(var) != NULL; \
(var) != NULL; \
...
...
deps/jemalloc/include/jemalloc/internal/quantum.h
View file @
b8beda3c
...
@@ -30,11 +30,18 @@
...
@@ -30,11 +30,18 @@
# ifdef __hppa__
# ifdef __hppa__
# define LG_QUANTUM 4
# define LG_QUANTUM 4
# endif
# endif
# ifdef __loongarch__
# define LG_QUANTUM 4
# endif
# ifdef __m68k__
# ifdef __m68k__
# define LG_QUANTUM 3
# define LG_QUANTUM 3
# endif
# endif
# ifdef __mips__
# ifdef __mips__
# define LG_QUANTUM 3
# if defined(__mips_n32) || defined(__mips_n64)
# define LG_QUANTUM 4
# else
# define LG_QUANTUM 3
# endif
# endif
# endif
# ifdef __nios2__
# ifdef __nios2__
# define LG_QUANTUM 3
# define LG_QUANTUM 3
...
@@ -61,6 +68,9 @@
...
@@ -61,6 +68,9 @@
# ifdef __le32__
# ifdef __le32__
# define LG_QUANTUM 4
# define LG_QUANTUM 4
# endif
# endif
# ifdef __arc__
# define LG_QUANTUM 3
# endif
# ifndef LG_QUANTUM
# ifndef LG_QUANTUM
# error "Unknown minimum alignment for architecture; specify via "
# error "Unknown minimum alignment for architecture; specify via "
"--with-lg-quantum"
"--with-lg-quantum"
...
...
deps/jemalloc/include/jemalloc/internal/rb.h
View file @
b8beda3c
#ifndef JEMALLOC_INTERNAL_RB_H
#define JEMALLOC_INTERNAL_RB_H
/*-
/*-
*******************************************************************************
*******************************************************************************
*
*
...
@@ -19,13 +22,19 @@
...
@@ -19,13 +22,19 @@
*******************************************************************************
*******************************************************************************
*/
*/
#ifndef RB_H_
#define RB_H_
#ifndef __PGI
#ifndef __PGI
#define RB_COMPACT
#define RB_COMPACT
#endif
#endif
/*
* Each node in the RB tree consumes at least 1 byte of space (for the linkage
* if nothing else, so there are a maximum of sizeof(void *) << 3 rb tree nodes
* in any process (and thus, at most sizeof(void *) << 3 nodes in any rb tree).
* The choice of algorithm bounds the depth of a tree to twice the binary log of
* the number of elements in the tree; the following bound follows.
*/
#define RB_MAX_DEPTH (sizeof(void *) << 4)
#ifdef RB_COMPACT
#ifdef RB_COMPACT
/* Node structure. */
/* Node structure. */
#define rb_node(a_type) \
#define rb_node(a_type) \
...
@@ -159,12 +168,22 @@ struct { \
...
@@ -159,12 +168,22 @@ struct { \
rbtn_right_set(a_type, a_field, (r_node), (a_node)); \
rbtn_right_set(a_type, a_field, (r_node), (a_node)); \
} while (0)
} while (0)
#define rb_summarized_only_false(...)
#define rb_summarized_only_true(...) __VA_ARGS__
#define rb_empty_summarize(a_node, a_lchild, a_rchild) false
/*
/*
* The rb_proto() macro generates function prototypes that correspond to the
* The rb_proto() and rb_summarized_proto() macros generate function prototypes
* functions generated by an equivalently parameterized call to rb_gen().
* that correspond to the functions generated by an equivalently parameterized
* call to rb_gen() or rb_summarized_gen(), respectively.
*/
*/
#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \
#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \
rb_proto_impl(a_attr, a_prefix, a_rbt_type, a_type, false)
#define rb_summarized_proto(a_attr, a_prefix, a_rbt_type, a_type) \
rb_proto_impl(a_attr, a_prefix, a_rbt_type, a_type, true)
#define rb_proto_impl(a_attr, a_prefix, a_rbt_type, a_type, \
a_is_summarized) \
a_attr void \
a_attr void \
a_prefix##new(a_rbt_type *rbtree); \
a_prefix##new(a_rbt_type *rbtree); \
a_attr bool \
a_attr bool \
...
@@ -195,31 +214,94 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
...
@@ -195,31 +214,94 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); \
a_attr void \
a_attr void \
a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
void *arg);
void *arg); \
/* Extended API */
\
rb_summarized_only_##a_is_summarized( \
a_attr void \
a_prefix##update_summaries(a_rbt_type *rbtree, a_type *node); \
a_attr bool \
a_prefix##empty_filtered(a_rbt_type *rbtree, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx); \
a_attr a_type * \
a_prefix##first_filtered(a_rbt_type *rbtree, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx); \
a_attr a_type * \
a_prefix##last_filtered(a_rbt_type *rbtree, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx); \
a_attr a_type * \
a_prefix##next_filtered(a_rbt_type *rbtree, a_type *node, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx); \
a_attr a_type * \
a_prefix##prev_filtered(a_rbt_type *rbtree, a_type *node, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx); \
a_attr a_type * \
a_prefix##search_filtered(a_rbt_type *rbtree, const a_type *key, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx); \
a_attr a_type * \
a_prefix##nsearch_filtered(a_rbt_type *rbtree, const a_type *key, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx); \
a_attr a_type * \
a_prefix##psearch_filtered(a_rbt_type *rbtree, const a_type *key, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx); \
a_attr a_type * \
a_prefix##iter_filtered(a_rbt_type *rbtree, a_type *start, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx); \
a_attr a_type * \
a_prefix##reverse_iter_filtered(a_rbt_type *rbtree, a_type *start, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx); \
)
/*
/*
* The rb_gen() macro generates a type-specific red-black tree implementation,
* The rb_gen() macro generates a type-specific red-black tree implementation,
* based on the above cpp macros.
* based on the above cpp macros.
*
* Arguments:
* Arguments:
*
*
* a_attr : Function attribute for generated functions (ex: static).
* a_attr:
* a_prefix : Prefix for generated functions (ex: ex_).
* Function attribute for generated functions (ex: static).
* a_rb_type : Type for red-black tree data structure (ex: ex_t).
* a_prefix:
* a_type : Type for red-black tree node data structure (ex: ex_node_t).
* Prefix for generated functions (ex: ex_).
* a_field : Name of red-black tree node linkage (ex: ex_link).
* a_rb_type:
* a_cmp : Node comparison function name, with the following prototype:
* Type for red-black tree data structure (ex: ex_t).
* int (a_cmp *)(a_type *a_node, a_type *a_other);
* a_type:
* ^^^^^^
* Type for red-black tree node data structure (ex: ex_node_t).
* or a_key
* a_field:
* Interpretation of comparison function return values:
* Name of red-black tree node linkage (ex: ex_link).
* -1 : a_node < a_other
* a_cmp:
* 0 : a_node == a_other
* Node comparison function name, with the following prototype:
* 1 : a_node > a_other
*
* In all cases, the a_node or a_key macro argument is the first
* int a_cmp(a_type *a_node, a_type *a_other);
* argument to the comparison function, which makes it possible
* ^^^^^^
* to write comparison functions that treat the first argument
* or a_key
* specially.
* Interpretation of comparison function return values:
* -1 : a_node < a_other
* 0 : a_node == a_other
* 1 : a_node > a_other
* In all cases, the a_node or a_key macro argument is the first argument to
* the comparison function, which makes it possible to write comparison
* functions that treat the first argument specially. a_cmp must be a total
* order on values inserted into the tree -- duplicates are not allowed.
*
*
* Assuming the following setup:
* Assuming the following setup:
*
*
...
@@ -338,8 +420,193 @@ a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
...
@@ -338,8 +420,193 @@ a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
* during iteration. There is no way to stop iteration once it
* during iteration. There is no way to stop iteration once it
* has begun.
* has begun.
* arg : Opaque pointer passed to cb().
* arg : Opaque pointer passed to cb().
*
* The rb_summarized_gen() macro generates all the functions above, but has an
* expanded interface. In introduces the notion of summarizing subtrees, and of
* filtering searches in the tree according to the information contained in
* those summaries.
* The extra macro argument is:
* a_summarize:
* Tree summarization function name, with the following prototype:
*
* bool a_summarize(a_type *a_node, const a_type *a_left_child,
* const a_type *a_right_child);
*
* This function should update a_node with the summary of the subtree rooted
* there, using the data contained in it and the summaries in a_left_child
* and a_right_child. One or both of them may be NULL. When the tree
* changes due to an insertion or removal, it updates the summaries of all
* nodes whose subtrees have changed (always updating the summaries of
* children before their parents). If the user alters a node in the tree in
* a way that may change its summary, they can call the generated
* update_summaries function to bubble up the summary changes to the root.
* It should return true if the summary changed (or may have changed), and
* false if it didn't (which will allow the implementation to terminate
* "bubbling up" the summaries early).
* As the parameter names indicate, the children are ordered as they are in
* the tree, a_left_child, if it is not NULL, compares less than a_node,
* which in turn compares less than a_right_child (if a_right_child is not
* NULL).
*
* Using the same setup as above but replacing the macro with
* rb_summarized_gen(static, ex_, ex_t, ex_node_t, ex_link, ex_cmp,
* ex_summarize)
*
* Generates all the previous functions, but adds some more:
*
* static void
* ex_update_summaries(ex_t *tree, ex_node_t *node);
* Description: Recompute all summaries of ancestors of node.
* Args:
* tree: Pointer to an initialized red-black tree object.
* node: The element of the tree whose summary may have changed.
*
* For each of ex_empty, ex_first, ex_last, ex_next, ex_prev, ex_search,
* ex_nsearch, ex_psearch, ex_iter, and ex_reverse_iter, an additional function
* is generated as well, with the suffix _filtered (e.g. ex_empty_filtered,
* ex_first_filtered, etc.). These use the concept of a "filter"; a binary
* property some node either satisfies or does not satisfy. Clever use of the
* a_summary argument to rb_summarized_gen can allow efficient computation of
* these predicates across whole subtrees of the tree.
* The extended API functions accept three additional arguments after the
* arguments to the corresponding non-extended equivalent.
*
* ex_fn(..., bool (*filter_node)(void *, ex_node_t *),
* bool (*filter_subtree)(void *, ex_node_t *), void *filter_ctx);
* filter_node : Returns true if the node passes the filter.
* filter_subtree : Returns true if some node in the subtree rooted at
* node passes the filter.
* filter_ctx : A context argument passed to the filters.
*
* For a more concrete example of summarizing and filtering, suppose we're using
* the red-black tree to track a set of integers:
*
* struct ex_node_s {
* rb_node(ex_node_t) ex_link;
* unsigned data;
* };
*
* Suppose, for some application-specific reason, we want to be able to quickly
* find numbers in the set which are divisible by large powers of 2 (say, for
* aligned allocation purposes). We augment the node with a summary field:
*
* struct ex_node_s {
* rb_node(ex_node_t) ex_link;
* unsigned data;
* unsigned max_subtree_ffs;
* }
*
* and define our summarization function as follows:
*
* bool
* ex_summarize(ex_node_t *node, const ex_node_t *lchild,
* const ex_node_t *rchild) {
* unsigned new_max_subtree_ffs = ffs(node->data);
* if (lchild != NULL && lchild->max_subtree_ffs > new_max_subtree_ffs) {
* new_max_subtree_ffs = lchild->max_subtree_ffs;
* }
* if (rchild != NULL && rchild->max_subtree_ffs > new_max_subtree_ffs) {
* new_max_subtree_ffs = rchild->max_subtree_ffs;
* }
* bool changed = (node->max_subtree_ffs != new_max_subtree_ffs)
* node->max_subtree_ffs = new_max_subtree_ffs;
* // This could be "return true" without any correctness or big-O
* // performance changes; but practically, precisely reporting summary
* // changes reduces the amount of work that has to be done when "bubbling
* // up" summary changes.
* return changed;
* }
*
* We can now implement our filter functions as follows:
* bool
* ex_filter_node(void *filter_ctx, ex_node_t *node) {
* unsigned required_ffs = *(unsigned *)filter_ctx;
* return ffs(node->data) >= required_ffs;
* }
* bool
* ex_filter_subtree(void *filter_ctx, ex_node_t *node) {
* unsigned required_ffs = *(unsigned *)filter_ctx;
* return node->max_subtree_ffs >= required_ffs;
* }
*
* We can now easily search for, e.g., the smallest integer in the set that's
* divisible by 128:
* ex_node_t *
* find_div_128(ex_tree_t *tree) {
* unsigned min_ffs = 7;
* return ex_first_filtered(tree, &ex_filter_node, &ex_filter_subtree,
* &min_ffs);
* }
*
* We could with similar ease:
* - Fnd the next multiple of 128 in the set that's larger than 12345 (with
* ex_nsearch_filtered)
* - Iterate over just those multiples of 64 that are in the set (with
* ex_iter_filtered)
* - Determine if the set contains any multiples of 1024 (with
* ex_empty_filtered).
*
* Some possibly subtle API notes:
* - The node argument to ex_next_filtered and ex_prev_filtered need not pass
* the filter; it will find the next/prev node that passes the filter.
* - ex_search_filtered will fail even for a node in the tree, if that node does
* not pass the filter. ex_psearch_filtered and ex_nsearch_filtered behave
* similarly; they may return a node larger/smaller than the key, even if a
* node equivalent to the key is in the tree (but does not pass the filter).
* - Similarly, if the start argument to a filtered iteration function does not
* pass the filter, the callback won't be invoked on it.
*
* These should make sense after a moment's reflection; each post-condition is
* the same as with the unfiltered version, with the added constraint that the
* returned node must pass the filter.
*/
*/
#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \
#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \
rb_gen_impl(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp, \
rb_empty_summarize, false)
#define rb_summarized_gen(a_attr, a_prefix, a_rbt_type, a_type, \
a_field, a_cmp, a_summarize) \
rb_gen_impl(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp, \
a_summarize, true)
#define rb_gen_impl(a_attr, a_prefix, a_rbt_type, a_type, \
a_field, a_cmp, a_summarize, a_is_summarized) \
typedef struct { \
a_type *node; \
int cmp; \
} a_prefix##path_entry_t; \
static inline void \
a_prefix##summarize_range(a_prefix##path_entry_t *rfirst, \
a_prefix##path_entry_t *rlast) { \
while ((uintptr_t)rlast >= (uintptr_t)rfirst) { \
a_type *node = rlast->node; \
/* Avoid a warning when a_summarize is rb_empty_summarize. */
\
(void)node; \
bool changed = a_summarize(node, rbtn_left_get(a_type, a_field, \
node), rbtn_right_get(a_type, a_field, node)); \
if (!changed) { \
break; \
} \
rlast--; \
} \
} \
/* On the remove pathways, we sometimes swap the node being removed */
\
/* and its first successor; in such cases we need to do two range */
\
/* updates; one from the node to its (former) swapped successor, the */
\
/* next from that successor to the root (with either allowed to */
\
/* bail out early if appropriate. */
\
static inline void \
a_prefix##summarize_swapped_range(a_prefix##path_entry_t *rfirst, \
a_prefix##path_entry_t *rlast, a_prefix##path_entry_t *swap_loc) { \
if (swap_loc == NULL || rlast <= swap_loc) { \
a_prefix##summarize_range(rfirst, rlast); \
} else { \
a_prefix##summarize_range(swap_loc + 1, rlast); \
(void)a_summarize(swap_loc->node, \
rbtn_left_get(a_type, a_field, swap_loc->node), \
rbtn_right_get(a_type, a_field, swap_loc->node)); \
a_prefix##summarize_range(rfirst, swap_loc - 1); \
} \
} \
a_attr void \
a_attr void \
a_prefix##new(a_rbt_type *rbtree) { \
a_prefix##new(a_rbt_type *rbtree) { \
rb_new(a_type, a_field, rbtree); \
rb_new(a_type, a_field, rbtree); \
...
@@ -465,10 +732,8 @@ a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \
...
@@ -465,10 +732,8 @@ a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \
} \
} \
a_attr void \
a_attr void \
a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
struct { \
a_prefix##path_entry_t path[RB_MAX_DEPTH]; \
a_type *node; \
a_prefix##path_entry_t *pathp; \
int cmp; \
} path[sizeof(void *) << 4], *pathp; \
rbt_node_new(a_type, a_field, rbtree, node); \
rbt_node_new(a_type, a_field, rbtree, node); \
/* Wind. */
\
/* Wind. */
\
path->node = rbtree->rbt_root; \
path->node = rbtree->rbt_root; \
...
@@ -484,6 +749,13 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
...
@@ -484,6 +749,13 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
} \
} \
} \
} \
pathp->node = node; \
pathp->node = node; \
/* A loop invariant we maintain is that all nodes with */
\
/* out-of-date summaries live in path[0], path[1], ..., *pathp. */
\
/* To maintain this, we have to summarize node, since we */
\
/* decrement pathp before the first iteration. */
\
assert(rbtn_left_get(a_type, a_field, node) == NULL); \
assert(rbtn_right_get(a_type, a_field, node) == NULL); \
(void)a_summarize(node, NULL, NULL); \
/* Unwind. */
\
/* Unwind. */
\
for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \
for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \
a_type *cnode = pathp->node; \
a_type *cnode = pathp->node; \
...
@@ -498,9 +770,13 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
...
@@ -498,9 +770,13 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
a_type *tnode; \
a_type *tnode; \
rbtn_black_set(a_type, a_field, leftleft); \
rbtn_black_set(a_type, a_field, leftleft); \
rbtn_rotate_right(a_type, a_field, cnode, tnode); \
rbtn_rotate_right(a_type, a_field, cnode, tnode); \
(void)a_summarize(cnode, \
rbtn_left_get(a_type, a_field, cnode), \
rbtn_right_get(a_type, a_field, cnode)); \
cnode = tnode; \
cnode = tnode; \
} \
} \
} else { \
} else { \
a_prefix##summarize_range(path, pathp); \
return; \
return; \
} \
} \
} else { \
} else { \
...
@@ -521,13 +797,20 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
...
@@ -521,13 +797,20 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
rbtn_rotate_left(a_type, a_field, cnode, tnode); \
rbtn_rotate_left(a_type, a_field, cnode, tnode); \
rbtn_color_set(a_type, a_field, tnode, tred); \
rbtn_color_set(a_type, a_field, tnode, tred); \
rbtn_red_set(a_type, a_field, cnode); \
rbtn_red_set(a_type, a_field, cnode); \
(void)a_summarize(cnode, \
rbtn_left_get(a_type, a_field, cnode), \
rbtn_right_get(a_type, a_field, cnode)); \
cnode = tnode; \
cnode = tnode; \
} \
} \
} else { \
} else { \
a_prefix##summarize_range(path, pathp); \
return; \
return; \
} \
} \
} \
} \
pathp->node = cnode; \
pathp->node = cnode; \
(void)a_summarize(cnode, \
rbtn_left_get(a_type, a_field, cnode), \
rbtn_right_get(a_type, a_field, cnode)); \
} \
} \
/* Set root, and make it black. */
\
/* Set root, and make it black. */
\
rbtree->rbt_root = path->node; \
rbtree->rbt_root = path->node; \
...
@@ -535,12 +818,18 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
...
@@ -535,12 +818,18 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
} \
} \
a_attr void \
a_attr void \
a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
struct { \
a_prefix##path_entry_t path[RB_MAX_DEPTH]; \
a_type *node; \
a_prefix##path_entry_t *pathp; \
int cmp; \
a_prefix##path_entry_t *nodep; \
} *pathp, *nodep, path[sizeof(void *) << 4]; \
a_prefix##path_entry_t *swap_loc; \
/* This is a "real" sentinel -- NULL means we didn't swap the */
\
/* node to be pruned with one of its successors, and so */
\
/* summarization can terminate early whenever some summary */
\
/* doesn't change. */
\
swap_loc = NULL; \
/* This is just to silence a compiler warning. */
\
nodep = NULL; \
/* Wind. */
\
/* Wind. */
\
nodep = NULL;
/* Silence compiler warning. */
\
path->node = rbtree->rbt_root; \
path->node = rbtree->rbt_root; \
for (pathp = path; pathp->node != NULL; pathp++) { \
for (pathp = path; pathp->node != NULL; pathp++) { \
int cmp = pathp->cmp = a_cmp(node, pathp->node); \
int cmp = pathp->cmp = a_cmp(node, pathp->node); \
...
@@ -567,6 +856,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
...
@@ -567,6 +856,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
pathp--; \
pathp--; \
if (pathp->node != node) { \
if (pathp->node != node) { \
/* Swap node with its successor. */
\
/* Swap node with its successor. */
\
swap_loc = nodep; \
bool tred = rbtn_red_get(a_type, a_field, pathp->node); \
bool tred = rbtn_red_get(a_type, a_field, pathp->node); \
rbtn_color_set(a_type, a_field, pathp->node, \
rbtn_color_set(a_type, a_field, pathp->node, \
rbtn_red_get(a_type, a_field, node)); \
rbtn_red_get(a_type, a_field, node)); \
...
@@ -604,6 +894,9 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
...
@@ -604,6 +894,9 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_black_set(a_type, a_field, left); \
rbtn_black_set(a_type, a_field, left); \
if (pathp == path) { \
if (pathp == path) { \
rbtree->rbt_root = left; \
rbtree->rbt_root = left; \
/* Nothing to summarize -- the subtree rooted at the */
\
/* node's left child hasn't changed, and it's now the */
\
/* root. */
\
} else { \
} else { \
if (pathp[-1].cmp < 0) { \
if (pathp[-1].cmp < 0) { \
rbtn_left_set(a_type, a_field, pathp[-1].node, \
rbtn_left_set(a_type, a_field, pathp[-1].node, \
...
@@ -612,6 +905,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
...
@@ -612,6 +905,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, pathp[-1].node, \
rbtn_right_set(a_type, a_field, pathp[-1].node, \
left); \
left); \
} \
} \
a_prefix##summarize_swapped_range(path, &pathp[-1], \
swap_loc); \
} \
} \
return; \
return; \
} else if (pathp == path) { \
} else if (pathp == path) { \
...
@@ -620,10 +915,15 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
...
@@ -620,10 +915,15 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
return; \
return; \
} \
} \
} \
} \
/* We've now established the invariant that the node has no right */
\
/* child (well, morally; we didn't bother nulling it out if we */
\
/* swapped it with its successor), and that the only nodes with */
\
/* out-of-date summaries live in path[0], path[1], ..., pathp[-1].*/
\
if (rbtn_red_get(a_type, a_field, pathp->node)) { \
if (rbtn_red_get(a_type, a_field, pathp->node)) { \
/* Prune red node, which requires no fixup. */
\
/* Prune red node, which requires no fixup. */
\
assert(pathp[-1].cmp < 0); \
assert(pathp[-1].cmp < 0); \
rbtn_left_set(a_type, a_field, pathp[-1].node, NULL); \
rbtn_left_set(a_type, a_field, pathp[-1].node, NULL); \
a_prefix##summarize_swapped_range(path, &pathp[-1], swap_loc); \
return; \
return; \
} \
} \
/* The node to be pruned is black, so unwind until balance is */
\
/* The node to be pruned is black, so unwind until balance is */
\
...
@@ -657,6 +957,12 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
...
@@ -657,6 +957,12 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, pathp->node, tnode);\
rbtn_right_set(a_type, a_field, pathp->node, tnode);\
rbtn_rotate_left(a_type, a_field, pathp->node, \
rbtn_rotate_left(a_type, a_field, pathp->node, \
tnode); \
tnode); \
(void)a_summarize(pathp->node, \
rbtn_left_get(a_type, a_field, pathp->node), \
rbtn_right_get(a_type, a_field, pathp->node)); \
(void)a_summarize(right, \
rbtn_left_get(a_type, a_field, right), \
rbtn_right_get(a_type, a_field, right)); \
} else { \
} else { \
/* || */
\
/* || */
\
/* pathp(r) */
\
/* pathp(r) */
\
...
@@ -667,7 +973,12 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
...
@@ -667,7 +973,12 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
/* */
\
/* */
\
rbtn_rotate_left(a_type, a_field, pathp->node, \
rbtn_rotate_left(a_type, a_field, pathp->node, \
tnode); \
tnode); \
(void)a_summarize(pathp->node, \
rbtn_left_get(a_type, a_field, pathp->node), \
rbtn_right_get(a_type, a_field, pathp->node)); \
} \
} \
(void)a_summarize(tnode, rbtn_left_get(a_type, a_field, \
tnode), rbtn_right_get(a_type, a_field, tnode)); \
/* Balance restored, but rotation modified subtree */
\
/* Balance restored, but rotation modified subtree */
\
/* root. */
\
/* root. */
\
assert((uintptr_t)pathp > (uintptr_t)path); \
assert((uintptr_t)pathp > (uintptr_t)path); \
...
@@ -678,6 +989,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
...
@@ -678,6 +989,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, pathp[-1].node, \
rbtn_right_set(a_type, a_field, pathp[-1].node, \
tnode); \
tnode); \
} \
} \
a_prefix##summarize_swapped_range(path, &pathp[-1], \
swap_loc); \
return; \
return; \
} else { \
} else { \
a_type *right = rbtn_right_get(a_type, a_field, \
a_type *right = rbtn_right_get(a_type, a_field, \
...
@@ -698,6 +1011,15 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
...
@@ -698,6 +1011,15 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, pathp->node, tnode);\
rbtn_right_set(a_type, a_field, pathp->node, tnode);\
rbtn_rotate_left(a_type, a_field, pathp->node, \
rbtn_rotate_left(a_type, a_field, pathp->node, \
tnode); \
tnode); \
(void)a_summarize(pathp->node, \
rbtn_left_get(a_type, a_field, pathp->node), \
rbtn_right_get(a_type, a_field, pathp->node)); \
(void)a_summarize(right, \
rbtn_left_get(a_type, a_field, right), \
rbtn_right_get(a_type, a_field, right)); \
(void)a_summarize(tnode, \
rbtn_left_get(a_type, a_field, tnode), \
rbtn_right_get(a_type, a_field, tnode)); \
/* Balance restored, but rotation modified */
\
/* Balance restored, but rotation modified */
\
/* subtree root, which may actually be the tree */
\
/* subtree root, which may actually be the tree */
\
/* root. */
\
/* root. */
\
...
@@ -712,6 +1034,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
...
@@ -712,6 +1034,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, \
rbtn_right_set(a_type, a_field, \
pathp[-1].node, tnode); \
pathp[-1].node, tnode); \
} \
} \
a_prefix##summarize_swapped_range(path, \
&pathp[-1], swap_loc); \
} \
} \
return; \
return; \
} else { \
} else { \
...
@@ -725,6 +1049,12 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
...
@@ -725,6 +1049,12 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_red_set(a_type, a_field, pathp->node); \
rbtn_red_set(a_type, a_field, pathp->node); \
rbtn_rotate_left(a_type, a_field, pathp->node, \
rbtn_rotate_left(a_type, a_field, pathp->node, \
tnode); \
tnode); \
(void)a_summarize(pathp->node, \
rbtn_left_get(a_type, a_field, pathp->node), \
rbtn_right_get(a_type, a_field, pathp->node)); \
(void)a_summarize(tnode, \
rbtn_left_get(a_type, a_field, tnode), \
rbtn_right_get(a_type, a_field, tnode)); \
pathp->node = tnode; \
pathp->node = tnode; \
} \
} \
} \
} \
...
@@ -757,6 +1087,12 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
...
@@ -757,6 +1087,12 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
tnode); \
tnode); \
rbtn_right_set(a_type, a_field, unode, tnode); \
rbtn_right_set(a_type, a_field, unode, tnode); \
rbtn_rotate_left(a_type, a_field, unode, tnode); \
rbtn_rotate_left(a_type, a_field, unode, tnode); \
(void)a_summarize(pathp->node, \
rbtn_left_get(a_type, a_field, pathp->node), \
rbtn_right_get(a_type, a_field, pathp->node)); \
(void)a_summarize(unode, \
rbtn_left_get(a_type, a_field, unode), \
rbtn_right_get(a_type, a_field, unode)); \
} else { \
} else { \
/* || */
\
/* || */
\
/* pathp(b) */
\
/* pathp(b) */
\
...
@@ -771,7 +1107,13 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
...
@@ -771,7 +1107,13 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_rotate_right(a_type, a_field, pathp->node, \
rbtn_rotate_right(a_type, a_field, pathp->node, \
tnode); \
tnode); \
rbtn_black_set(a_type, a_field, tnode); \
rbtn_black_set(a_type, a_field, tnode); \
(void)a_summarize(pathp->node, \
rbtn_left_get(a_type, a_field, pathp->node), \
rbtn_right_get(a_type, a_field, pathp->node)); \
} \
} \
(void)a_summarize(tnode, \
rbtn_left_get(a_type, a_field, tnode), \
rbtn_right_get(a_type, a_field, tnode)); \
/* Balance restored, but rotation modified subtree */
\
/* Balance restored, but rotation modified subtree */
\
/* root, which may actually be the tree root. */
\
/* root, which may actually be the tree root. */
\
if (pathp == path) { \
if (pathp == path) { \
...
@@ -785,6 +1127,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
...
@@ -785,6 +1127,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, pathp[-1].node, \
rbtn_right_set(a_type, a_field, pathp[-1].node, \
tnode); \
tnode); \
} \
} \
a_prefix##summarize_swapped_range(path, &pathp[-1], \
swap_loc); \
} \
} \
return; \
return; \
} else if (rbtn_red_get(a_type, a_field, pathp->node)) { \
} else if (rbtn_red_get(a_type, a_field, pathp->node)) { \
...
@@ -803,6 +1147,12 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
...
@@ -803,6 +1147,12 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_black_set(a_type, a_field, leftleft); \
rbtn_black_set(a_type, a_field, leftleft); \
rbtn_rotate_right(a_type, a_field, pathp->node, \
rbtn_rotate_right(a_type, a_field, pathp->node, \
tnode); \
tnode); \
(void)a_summarize(pathp->node, \
rbtn_left_get(a_type, a_field, pathp->node), \
rbtn_right_get(a_type, a_field, pathp->node)); \
(void)a_summarize(tnode, \
rbtn_left_get(a_type, a_field, tnode), \
rbtn_right_get(a_type, a_field, tnode)); \
/* Balance restored, but rotation modified */
\
/* Balance restored, but rotation modified */
\
/* subtree root. */
\
/* subtree root. */
\
assert((uintptr_t)pathp > (uintptr_t)path); \
assert((uintptr_t)pathp > (uintptr_t)path); \
...
@@ -813,6 +1163,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
...
@@ -813,6 +1163,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, pathp[-1].node, \
rbtn_right_set(a_type, a_field, pathp[-1].node, \
tnode); \
tnode); \
} \
} \
a_prefix##summarize_swapped_range(path, &pathp[-1], \
swap_loc); \
return; \
return; \
} else { \
} else { \
/* || */
\
/* || */
\
...
@@ -824,6 +1176,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
...
@@ -824,6 +1176,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_red_set(a_type, a_field, left); \
rbtn_red_set(a_type, a_field, left); \
rbtn_black_set(a_type, a_field, pathp->node); \
rbtn_black_set(a_type, a_field, pathp->node); \
/* Balance restored. */
\
/* Balance restored. */
\
a_prefix##summarize_swapped_range(path, pathp, \
swap_loc); \
return; \
return; \
} \
} \
} else { \
} else { \
...
@@ -840,6 +1194,12 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
...
@@ -840,6 +1194,12 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_black_set(a_type, a_field, leftleft); \
rbtn_black_set(a_type, a_field, leftleft); \
rbtn_rotate_right(a_type, a_field, pathp->node, \
rbtn_rotate_right(a_type, a_field, pathp->node, \
tnode); \
tnode); \
(void)a_summarize(pathp->node, \
rbtn_left_get(a_type, a_field, pathp->node), \
rbtn_right_get(a_type, a_field, pathp->node)); \
(void)a_summarize(tnode, \
rbtn_left_get(a_type, a_field, tnode), \
rbtn_right_get(a_type, a_field, tnode)); \
/* Balance restored, but rotation modified */
\
/* Balance restored, but rotation modified */
\
/* subtree root, which may actually be the tree */
\
/* subtree root, which may actually be the tree */
\
/* root. */
\
/* root. */
\
...
@@ -854,6 +1214,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
...
@@ -854,6 +1214,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, \
rbtn_right_set(a_type, a_field, \
pathp[-1].node, tnode); \
pathp[-1].node, tnode); \
} \
} \
a_prefix##summarize_swapped_range(path, \
&pathp[-1], swap_loc); \
} \
} \
return; \
return; \
} else { \
} else { \
...
@@ -864,6 +1226,9 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
...
@@ -864,6 +1226,9 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
/* / */
\
/* / */
\
/* (b) */
\
/* (b) */
\
rbtn_red_set(a_type, a_field, left); \
rbtn_red_set(a_type, a_field, left); \
(void)a_summarize(pathp->node, \
rbtn_left_get(a_type, a_field, pathp->node), \
rbtn_right_get(a_type, a_field, pathp->node)); \
} \
} \
} \
} \
} \
} \
...
@@ -1001,6 +1366,491 @@ a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
...
@@ -1001,6 +1366,491 @@ a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
void *arg) { \
void *arg) { \
a_prefix##destroy_recurse(rbtree, rbtree->rbt_root, cb, arg); \
a_prefix##destroy_recurse(rbtree, rbtree->rbt_root, cb, arg); \
rbtree->rbt_root = NULL; \
rbtree->rbt_root = NULL; \
}
} \
/* BEGIN SUMMARIZED-ONLY IMPLEMENTATION */
\
rb_summarized_only_##a_is_summarized( \
static inline a_prefix##path_entry_t * \
a_prefix##wind(a_rbt_type *rbtree, \
a_prefix##path_entry_t path[RB_MAX_DEPTH], a_type *node) { \
a_prefix##path_entry_t *pathp; \
path->node = rbtree->rbt_root; \
for (pathp = path; ; pathp++) { \
assert((size_t)(pathp - path) < RB_MAX_DEPTH); \
pathp->cmp = a_cmp(node, pathp->node); \
if (pathp->cmp < 0) { \
pathp[1].node = rbtn_left_get(a_type, a_field, \
pathp->node); \
} else if (pathp->cmp == 0) { \
return pathp; \
} else { \
pathp[1].node = rbtn_right_get(a_type, a_field, \
pathp->node); \
} \
} \
unreachable(); \
} \
a_attr void \
a_prefix##update_summaries(a_rbt_type *rbtree, a_type *node) { \
a_prefix##path_entry_t path[RB_MAX_DEPTH]; \
a_prefix##path_entry_t *pathp = a_prefix##wind(rbtree, path, node); \
a_prefix##summarize_range(path, pathp); \
} \
a_attr bool \
a_prefix##empty_filtered(a_rbt_type *rbtree, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
a_type *node = rbtree->rbt_root; \
return node == NULL || !filter_subtree(filter_ctx, node); \
} \
static inline a_type * \
a_prefix##first_filtered_from_node(a_type *node, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
assert(node != NULL && filter_subtree(filter_ctx, node)); \
while (true) { \
a_type *left = rbtn_left_get(a_type, a_field, node); \
a_type *right = rbtn_right_get(a_type, a_field, node); \
if (left != NULL && filter_subtree(filter_ctx, left)) { \
node = left; \
} else if (filter_node(filter_ctx, node)) { \
return node; \
} else { \
assert(right != NULL \
&& filter_subtree(filter_ctx, right)); \
node = right; \
} \
} \
unreachable(); \
} \
a_attr a_type * \
a_prefix##first_filtered(a_rbt_type *rbtree, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
a_type *node = rbtree->rbt_root; \
if (node == NULL || !filter_subtree(filter_ctx, node)) { \
return NULL; \
} \
return a_prefix##first_filtered_from_node(node, filter_node, \
filter_subtree, filter_ctx); \
} \
static inline a_type * \
a_prefix##last_filtered_from_node(a_type *node, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
assert(node != NULL && filter_subtree(filter_ctx, node)); \
while (true) { \
a_type *left = rbtn_left_get(a_type, a_field, node); \
a_type *right = rbtn_right_get(a_type, a_field, node); \
if (right != NULL && filter_subtree(filter_ctx, right)) { \
node = right; \
} else if (filter_node(filter_ctx, node)) { \
return node; \
} else { \
assert(left != NULL \
&& filter_subtree(filter_ctx, left)); \
node = left; \
} \
} \
unreachable(); \
} \
a_attr a_type * \
a_prefix##last_filtered(a_rbt_type *rbtree, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
a_type *node = rbtree->rbt_root; \
if (node == NULL || !filter_subtree(filter_ctx, node)) { \
return NULL; \
} \
return a_prefix##last_filtered_from_node(node, filter_node, \
filter_subtree, filter_ctx); \
} \
/* Internal implementation function. Search for a node comparing */
\
/* equal to key matching the filter. If such a node is in the tree, */
\
/* return it. Additionally, the caller has the option to ask for */
\
/* bounds on the next / prev node in the tree passing the filter. */
\
/* If nextbound is true, then this function will do one of the */
\
/* following: */
\
/* - Fill in *nextbound_node with the smallest node in the tree */
\
/* greater than key passing the filter, and NULL-out */
\
/* *nextbound_subtree. */
\
/* - Fill in *nextbound_subtree with a parent of that node which is */
\
/* not a parent of the searched-for node, and NULL-out */
\
/* *nextbound_node. */
\
/* - NULL-out both *nextbound_node and *nextbound_subtree, in which */
\
/* case no node greater than key but passing the filter is in the */
\
/* tree. */
\
/* The prevbound case is similar. If the caller knows that key is in */
\
/* the tree and that the subtree rooted at key does not contain a */
\
/* node satisfying the bound being searched for, then they can pass */
\
/* false for include_subtree, in which case we won't bother searching */
\
/* there (risking a cache miss). */
\
/* */
\
/* This API is unfortunately complex; but the logic for filtered */
\
/* searches is very subtle, and otherwise we would have to repeat it */
\
/* multiple times for filtered search, nsearch, psearch, next, and */
\
/* prev. */
\
static inline a_type * \
a_prefix##search_with_filter_bounds(a_rbt_type *rbtree, \
const a_type *key, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx, \
bool include_subtree, \
bool nextbound, a_type **nextbound_node, a_type **nextbound_subtree, \
bool prevbound, a_type **prevbound_node, a_type **prevbound_subtree) {\
if (nextbound) { \
*nextbound_node = NULL; \
*nextbound_subtree = NULL; \
} \
if (prevbound) { \
*prevbound_node = NULL; \
*prevbound_subtree = NULL; \
} \
a_type *tnode = rbtree->rbt_root; \
while (tnode != NULL && filter_subtree(filter_ctx, tnode)) { \
int cmp = a_cmp(key, tnode); \
a_type *tleft = rbtn_left_get(a_type, a_field, tnode); \
a_type *tright = rbtn_right_get(a_type, a_field, tnode); \
if (cmp < 0) { \
if (nextbound) { \
if (filter_node(filter_ctx, tnode)) { \
*nextbound_node = tnode; \
*nextbound_subtree = NULL; \
} else if (tright != NULL && filter_subtree( \
filter_ctx, tright)) { \
*nextbound_node = NULL; \
*nextbound_subtree = tright; \
} \
} \
tnode = tleft; \
} else if (cmp > 0) { \
if (prevbound) { \
if (filter_node(filter_ctx, tnode)) { \
*prevbound_node = tnode; \
*prevbound_subtree = NULL; \
} else if (tleft != NULL && filter_subtree( \
filter_ctx, tleft)) { \
*prevbound_node = NULL; \
*prevbound_subtree = tleft; \
} \
} \
tnode = tright; \
} else { \
if (filter_node(filter_ctx, tnode)) { \
return tnode; \
} \
if (include_subtree) { \
if (prevbound && tleft != NULL && filter_subtree( \
filter_ctx, tleft)) { \
*prevbound_node = NULL; \
*prevbound_subtree = tleft; \
} \
if (nextbound && tright != NULL && filter_subtree( \
filter_ctx, tright)) { \
*nextbound_node = NULL; \
*nextbound_subtree = tright; \
} \
} \
return NULL; \
} \
} \
return NULL; \
} \
a_attr a_type * \
a_prefix##next_filtered(a_rbt_type *rbtree, a_type *node, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
a_type *nright = rbtn_right_get(a_type, a_field, node); \
if (nright != NULL && filter_subtree(filter_ctx, nright)) { \
return a_prefix##first_filtered_from_node(nright, filter_node, \
filter_subtree, filter_ctx); \
} \
a_type *node_candidate; \
a_type *subtree_candidate; \
a_type *search_result = a_prefix##search_with_filter_bounds( \
rbtree, node, filter_node, filter_subtree, filter_ctx, \
/* include_subtree */
false, \
/* nextbound */
true, &node_candidate, &subtree_candidate, \
/* prevbound */
false, NULL, NULL); \
assert(node == search_result \
|| !filter_node(filter_ctx, node)); \
if (node_candidate != NULL) { \
return node_candidate; \
} \
if (subtree_candidate != NULL) { \
return a_prefix##first_filtered_from_node( \
subtree_candidate, filter_node, filter_subtree, \
filter_ctx); \
} \
return NULL; \
} \
a_attr a_type * \
a_prefix##prev_filtered(a_rbt_type *rbtree, a_type *node, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
a_type *nleft = rbtn_left_get(a_type, a_field, node); \
if (nleft != NULL && filter_subtree(filter_ctx, nleft)) { \
return a_prefix##last_filtered_from_node(nleft, filter_node, \
filter_subtree, filter_ctx); \
} \
a_type *node_candidate; \
a_type *subtree_candidate; \
a_type *search_result = a_prefix##search_with_filter_bounds( \
rbtree, node, filter_node, filter_subtree, filter_ctx, \
/* include_subtree */
false, \
/* nextbound */
false, NULL, NULL, \
/* prevbound */
true, &node_candidate, &subtree_candidate); \
assert(node == search_result \
|| !filter_node(filter_ctx, node)); \
if (node_candidate != NULL) { \
return node_candidate; \
} \
if (subtree_candidate != NULL) { \
return a_prefix##last_filtered_from_node( \
subtree_candidate, filter_node, filter_subtree, \
filter_ctx); \
} \
return NULL; \
} \
a_attr a_type * \
a_prefix##search_filtered(a_rbt_type *rbtree, const a_type *key, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
a_type *result = a_prefix##search_with_filter_bounds(rbtree, key, \
filter_node, filter_subtree, filter_ctx, \
/* include_subtree */
false, \
/* nextbound */
false, NULL, NULL, \
/* prevbound */
false, NULL, NULL); \
return result; \
} \
a_attr a_type * \
a_prefix##nsearch_filtered(a_rbt_type *rbtree, const a_type *key, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
a_type *node_candidate; \
a_type *subtree_candidate; \
a_type *result = a_prefix##search_with_filter_bounds(rbtree, key, \
filter_node, filter_subtree, filter_ctx, \
/* include_subtree */
true, \
/* nextbound */
true, &node_candidate, &subtree_candidate, \
/* prevbound */
false, NULL, NULL); \
if (result != NULL) { \
return result; \
} \
if (node_candidate != NULL) { \
return node_candidate; \
} \
if (subtree_candidate != NULL) { \
return a_prefix##first_filtered_from_node( \
subtree_candidate, filter_node, filter_subtree, \
filter_ctx); \
} \
return NULL; \
} \
a_attr a_type * \
a_prefix##psearch_filtered(a_rbt_type *rbtree, const a_type *key, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
a_type *node_candidate; \
a_type *subtree_candidate; \
a_type *result = a_prefix##search_with_filter_bounds(rbtree, key, \
filter_node, filter_subtree, filter_ctx, \
/* include_subtree */
true, \
/* nextbound */
false, NULL, NULL, \
/* prevbound */
true, &node_candidate, &subtree_candidate); \
if (result != NULL) { \
return result; \
} \
if (node_candidate != NULL) { \
return node_candidate; \
} \
if (subtree_candidate != NULL) { \
return a_prefix##last_filtered_from_node( \
subtree_candidate, filter_node, filter_subtree, \
filter_ctx); \
} \
return NULL; \
} \
a_attr a_type * \
a_prefix##iter_recurse_filtered(a_rbt_type *rbtree, a_type *node, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
if (node == NULL || !filter_subtree(filter_ctx, node)) { \
return NULL; \
} \
a_type *ret; \
a_type *left = rbtn_left_get(a_type, a_field, node); \
a_type *right = rbtn_right_get(a_type, a_field, node); \
ret = a_prefix##iter_recurse_filtered(rbtree, left, cb, arg, \
filter_node, filter_subtree, filter_ctx); \
if (ret != NULL) { \
return ret; \
} \
if (filter_node(filter_ctx, node)) { \
ret = cb(rbtree, node, arg); \
} \
if (ret != NULL) { \
return ret; \
} \
return a_prefix##iter_recurse_filtered(rbtree, right, cb, arg, \
filter_node, filter_subtree, filter_ctx); \
} \
a_attr a_type * \
a_prefix##iter_start_filtered(a_rbt_type *rbtree, a_type *start, \
a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \
void *arg, bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
if (!filter_subtree(filter_ctx, node)) { \
return NULL; \
} \
int cmp = a_cmp(start, node); \
a_type *ret; \
a_type *left = rbtn_left_get(a_type, a_field, node); \
a_type *right = rbtn_right_get(a_type, a_field, node); \
if (cmp < 0) { \
ret = a_prefix##iter_start_filtered(rbtree, start, left, cb, \
arg, filter_node, filter_subtree, filter_ctx); \
if (ret != NULL) { \
return ret; \
} \
if (filter_node(filter_ctx, node)) { \
ret = cb(rbtree, node, arg); \
if (ret != NULL) { \
return ret; \
} \
} \
return a_prefix##iter_recurse_filtered(rbtree, right, cb, arg, \
filter_node, filter_subtree, filter_ctx); \
} else if (cmp > 0) { \
return a_prefix##iter_start_filtered(rbtree, start, right, \
cb, arg, filter_node, filter_subtree, filter_ctx); \
} else { \
if (filter_node(filter_ctx, node)) { \
ret = cb(rbtree, node, arg); \
if (ret != NULL) { \
return ret; \
} \
} \
return a_prefix##iter_recurse_filtered(rbtree, right, cb, arg, \
filter_node, filter_subtree, filter_ctx); \
} \
} \
a_attr a_type * \
a_prefix##iter_filtered(a_rbt_type *rbtree, a_type *start, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
a_type *ret; \
if (start != NULL) { \
ret = a_prefix##iter_start_filtered(rbtree, start, \
rbtree->rbt_root, cb, arg, filter_node, filter_subtree, \
filter_ctx); \
} else { \
ret = a_prefix##iter_recurse_filtered(rbtree, rbtree->rbt_root, \
cb, arg, filter_node, filter_subtree, filter_ctx); \
} \
return ret; \
} \
a_attr a_type * \
a_prefix##reverse_iter_recurse_filtered(a_rbt_type *rbtree, \
a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \
void *arg, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
if (node == NULL || !filter_subtree(filter_ctx, node)) { \
return NULL; \
} \
a_type *ret; \
a_type *left = rbtn_left_get(a_type, a_field, node); \
a_type *right = rbtn_right_get(a_type, a_field, node); \
ret = a_prefix##reverse_iter_recurse_filtered(rbtree, right, cb, \
arg, filter_node, filter_subtree, filter_ctx); \
if (ret != NULL) { \
return ret; \
} \
if (filter_node(filter_ctx, node)) { \
ret = cb(rbtree, node, arg); \
} \
if (ret != NULL) { \
return ret; \
} \
return a_prefix##reverse_iter_recurse_filtered(rbtree, left, cb, \
arg, filter_node, filter_subtree, filter_ctx); \
} \
a_attr a_type * \
a_prefix##reverse_iter_start_filtered(a_rbt_type *rbtree, a_type *start,\
a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \
void *arg, bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
if (!filter_subtree(filter_ctx, node)) { \
return NULL; \
} \
int cmp = a_cmp(start, node); \
a_type *ret; \
a_type *left = rbtn_left_get(a_type, a_field, node); \
a_type *right = rbtn_right_get(a_type, a_field, node); \
if (cmp > 0) { \
ret = a_prefix##reverse_iter_start_filtered(rbtree, start, \
right, cb, arg, filter_node, filter_subtree, filter_ctx); \
if (ret != NULL) { \
return ret; \
} \
if (filter_node(filter_ctx, node)) { \
ret = cb(rbtree, node, arg); \
if (ret != NULL) { \
return ret; \
} \
} \
return a_prefix##reverse_iter_recurse_filtered(rbtree, left, cb,\
arg, filter_node, filter_subtree, filter_ctx); \
} else if (cmp < 0) { \
return a_prefix##reverse_iter_start_filtered(rbtree, start, \
left, cb, arg, filter_node, filter_subtree, filter_ctx); \
} else { \
if (filter_node(filter_ctx, node)) { \
ret = cb(rbtree, node, arg); \
if (ret != NULL) { \
return ret; \
} \
} \
return a_prefix##reverse_iter_recurse_filtered(rbtree, left, cb,\
arg, filter_node, filter_subtree, filter_ctx); \
} \
} \
a_attr a_type * \
a_prefix##reverse_iter_filtered(a_rbt_type *rbtree, a_type *start, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
a_type *ret; \
if (start != NULL) { \
ret = a_prefix##reverse_iter_start_filtered(rbtree, start, \
rbtree->rbt_root, cb, arg, filter_node, filter_subtree, \
filter_ctx); \
} else { \
ret = a_prefix##reverse_iter_recurse_filtered(rbtree, \
rbtree->rbt_root, cb, arg, filter_node, filter_subtree, \
filter_ctx); \
} \
return ret; \
} \
)
/* end rb_summarized_only */
#endif
/* RB_H
_
*/
#endif
/*
JEMALLOC_INTERNAL_
RB_H */
deps/jemalloc/include/jemalloc/internal/rtree.h
View file @
b8beda3c
...
@@ -35,33 +35,52 @@
...
@@ -35,33 +35,52 @@
# define RTREE_LEAF_COMPACT
# define RTREE_LEAF_COMPACT
#endif
#endif
/* Needed for initialization only. */
#define RTREE_LEAFKEY_INVALID ((uintptr_t)1)
typedef
struct
rtree_node_elm_s
rtree_node_elm_t
;
typedef
struct
rtree_node_elm_s
rtree_node_elm_t
;
struct
rtree_node_elm_s
{
struct
rtree_node_elm_s
{
atomic_p_t
child
;
/* (rtree_{node,leaf}_elm_t *) */
atomic_p_t
child
;
/* (rtree_{node,leaf}_elm_t *) */
};
};
typedef
struct
rtree_metadata_s
rtree_metadata_t
;
struct
rtree_metadata_s
{
szind_t
szind
;
extent_state_t
state
;
/* Mirrors edata->state. */
bool
is_head
;
/* Mirrors edata->is_head. */
bool
slab
;
};
typedef
struct
rtree_contents_s
rtree_contents_t
;
struct
rtree_contents_s
{
edata_t
*
edata
;
rtree_metadata_t
metadata
;
};
#define RTREE_LEAF_STATE_WIDTH EDATA_BITS_STATE_WIDTH
#define RTREE_LEAF_STATE_SHIFT 2
#define RTREE_LEAF_STATE_MASK MASK(RTREE_LEAF_STATE_WIDTH, RTREE_LEAF_STATE_SHIFT)
struct
rtree_leaf_elm_s
{
struct
rtree_leaf_elm_s
{
#ifdef RTREE_LEAF_COMPACT
#ifdef RTREE_LEAF_COMPACT
/*
/*
* Single pointer-width field containing all three leaf element fields.
* Single pointer-width field containing all three leaf element fields.
* For example, on a 64-bit x64 system with 48 significant virtual
* For example, on a 64-bit x64 system with 48 significant virtual
* memory address bits, the index, e
xtent
, and slab fields are packed as
* memory address bits, the index, e
data
, and slab fields are packed as
* such:
* such:
*
*
* x: index
* x: index
* e: extent
* e: edata
* s: state
* h: is_head
* b: slab
* b: slab
*
*
* 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee e
eee000
b
* 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee e
00sssh
b
*/
*/
atomic_p_t
le_bits
;
atomic_p_t
le_bits
;
#else
#else
atomic_p_t
le_extent
;
/* (extent_t *) */
atomic_p_t
le_edata
;
/* (edata_t *) */
atomic_u_t
le_szind
;
/* (szind_t) */
/*
atomic_b_t
le_slab
;
/* (bool) */
* From high to low bits: szind (8 bits), state (4 bits), is_head, slab
*/
atomic_u_t
le_metadata
;
#endif
#endif
};
};
...
@@ -78,6 +97,7 @@ struct rtree_level_s {
...
@@ -78,6 +97,7 @@ struct rtree_level_s {
typedef
struct
rtree_s
rtree_t
;
typedef
struct
rtree_s
rtree_t
;
struct
rtree_s
{
struct
rtree_s
{
base_t
*
base
;
malloc_mutex_t
init_lock
;
malloc_mutex_t
init_lock
;
/* Number of elements based on rtree_levels[0].bits. */
/* Number of elements based on rtree_levels[0].bits. */
#if RTREE_HEIGHT > 1
#if RTREE_HEIGHT > 1
...
@@ -109,42 +129,29 @@ static const rtree_level_t rtree_levels[] = {
...
@@ -109,42 +129,29 @@ static const rtree_level_t rtree_levels[] = {
#endif
#endif
};
};
bool
rtree_new
(
rtree_t
*
rtree
,
bool
zeroed
);
bool
rtree_new
(
rtree_t
*
rtree
,
base_t
*
base
,
bool
zeroed
);
typedef
rtree_node_elm_t
*
(
rtree_node_alloc_t
)(
tsdn_t
*
,
rtree_t
*
,
size_t
);
extern
rtree_node_alloc_t
*
JET_MUTABLE
rtree_node_alloc
;
typedef
rtree_leaf_elm_t
*
(
rtree_leaf_alloc_t
)(
tsdn_t
*
,
rtree_t
*
,
size_t
);
extern
rtree_leaf_alloc_t
*
JET_MUTABLE
rtree_leaf_alloc
;
typedef
void
(
rtree_node_dalloc_t
)(
tsdn_t
*
,
rtree_t
*
,
rtree_node_elm_t
*
);
extern
rtree_node_dalloc_t
*
JET_MUTABLE
rtree_node_dalloc
;
typedef
void
(
rtree_leaf_dalloc_t
)(
tsdn_t
*
,
rtree_t
*
,
rtree_leaf_elm_t
*
);
extern
rtree_leaf_dalloc_t
*
JET_MUTABLE
rtree_leaf_dalloc
;
#ifdef JEMALLOC_JET
void
rtree_delete
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
);
#endif
rtree_leaf_elm_t
*
rtree_leaf_elm_lookup_hard
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
rtree_leaf_elm_lookup_hard
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
bool
dependent
,
bool
init_missing
);
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
bool
dependent
,
bool
init_missing
);
JEMALLOC_ALWAYS_INLINE
u
intptr_t
JEMALLOC_ALWAYS_INLINE
u
nsigned
rtree_leaf
key
(
uintptr_t
key
)
{
rtree_leaf
_maskbits
(
void
)
{
unsigned
ptrbits
=
ZU
(
1
)
<<
(
LG_SIZEOF_PTR
+
3
);
unsigned
ptrbits
=
ZU
(
1
)
<<
(
LG_SIZEOF_PTR
+
3
);
unsigned
cumbits
=
(
rtree_levels
[
RTREE_HEIGHT
-
1
].
cumbits
-
unsigned
cumbits
=
(
rtree_levels
[
RTREE_HEIGHT
-
1
].
cumbits
-
rtree_levels
[
RTREE_HEIGHT
-
1
].
bits
);
rtree_levels
[
RTREE_HEIGHT
-
1
].
bits
);
unsigned
maskbits
=
ptrbits
-
cumbits
;
return
ptrbits
-
cumbits
;
uintptr_t
mask
=
~
((
ZU
(
1
)
<<
maskbits
)
-
1
);
}
JEMALLOC_ALWAYS_INLINE
uintptr_t
rtree_leafkey
(
uintptr_t
key
)
{
uintptr_t
mask
=
~
((
ZU
(
1
)
<<
rtree_leaf_maskbits
())
-
1
);
return
(
key
&
mask
);
return
(
key
&
mask
);
}
}
JEMALLOC_ALWAYS_INLINE
size_t
JEMALLOC_ALWAYS_INLINE
size_t
rtree_cache_direct_map
(
uintptr_t
key
)
{
rtree_cache_direct_map
(
uintptr_t
key
)
{
unsigned
ptrbits
=
ZU
(
1
)
<<
(
LG_SIZEOF_PTR
+
3
);
return
(
size_t
)((
key
>>
rtree_leaf_maskbits
())
&
unsigned
cumbits
=
(
rtree_levels
[
RTREE_HEIGHT
-
1
].
cumbits
-
(
RTREE_CTX_NCACHE
-
1
));
rtree_levels
[
RTREE_HEIGHT
-
1
].
bits
);
unsigned
maskbits
=
ptrbits
-
cumbits
;
return
(
size_t
)((
key
>>
maskbits
)
&
(
RTREE_CTX_NCACHE
-
1
));
}
}
JEMALLOC_ALWAYS_INLINE
uintptr_t
JEMALLOC_ALWAYS_INLINE
uintptr_t
...
@@ -176,151 +183,174 @@ rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree,
...
@@ -176,151 +183,174 @@ rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree,
?
ATOMIC_RELAXED
:
ATOMIC_ACQUIRE
);
?
ATOMIC_RELAXED
:
ATOMIC_ACQUIRE
);
}
}
JEMALLOC_ALWAYS_INLINE
extent_t
*
JEMALLOC_ALWAYS_INLINE
uintptr_t
rtree_leaf_elm_bits_extent_get
(
uintptr_t
bits
)
{
rtree_leaf_elm_bits_encode
(
rtree_contents_t
contents
)
{
assert
((
uintptr_t
)
contents
.
edata
%
(
uintptr_t
)
EDATA_ALIGNMENT
==
0
);
uintptr_t
edata_bits
=
(
uintptr_t
)
contents
.
edata
&
(((
uintptr_t
)
1
<<
LG_VADDR
)
-
1
);
uintptr_t
szind_bits
=
(
uintptr_t
)
contents
.
metadata
.
szind
<<
LG_VADDR
;
uintptr_t
slab_bits
=
(
uintptr_t
)
contents
.
metadata
.
slab
;
uintptr_t
is_head_bits
=
(
uintptr_t
)
contents
.
metadata
.
is_head
<<
1
;
uintptr_t
state_bits
=
(
uintptr_t
)
contents
.
metadata
.
state
<<
RTREE_LEAF_STATE_SHIFT
;
uintptr_t
metadata_bits
=
szind_bits
|
state_bits
|
is_head_bits
|
slab_bits
;
assert
((
edata_bits
&
metadata_bits
)
==
0
);
return
edata_bits
|
metadata_bits
;
}
JEMALLOC_ALWAYS_INLINE
rtree_contents_t
rtree_leaf_elm_bits_decode
(
uintptr_t
bits
)
{
rtree_contents_t
contents
;
/* Do the easy things first. */
contents
.
metadata
.
szind
=
bits
>>
LG_VADDR
;
contents
.
metadata
.
slab
=
(
bool
)(
bits
&
1
);
contents
.
metadata
.
is_head
=
(
bool
)(
bits
&
(
1
<<
1
));
uintptr_t
state_bits
=
(
bits
&
RTREE_LEAF_STATE_MASK
)
>>
RTREE_LEAF_STATE_SHIFT
;
assert
(
state_bits
<=
extent_state_max
);
contents
.
metadata
.
state
=
(
extent_state_t
)
state_bits
;
uintptr_t
low_bit_mask
=
~
((
uintptr_t
)
EDATA_ALIGNMENT
-
1
);
# ifdef __aarch64__
# ifdef __aarch64__
/*
/*
* aarch64 doesn't sign extend the highest virtual address bit to set
* aarch64 doesn't sign extend the highest virtual address bit to set
* the higher ones. Instead, the high bits get
s
zeroed.
* the higher ones. Instead, the high bits get zeroed.
*/
*/
uintptr_t
high_bit_mask
=
((
uintptr_t
)
1
<<
LG_VADDR
)
-
1
;
uintptr_t
high_bit_mask
=
((
uintptr_t
)
1
<<
LG_VADDR
)
-
1
;
/* Mask off the slab bit. */
/* Mask off metadata. */
uintptr_t
low_bit_mask
=
~
(
uintptr_t
)
1
;
uintptr_t
mask
=
high_bit_mask
&
low_bit_mask
;
uintptr_t
mask
=
high_bit_mask
&
low_bit_mask
;
return
(
extent
_t
*
)(
bits
&
mask
);
contents
.
edata
=
(
edata
_t
*
)(
bits
&
mask
);
# else
# else
/* Restore sign-extended high bits, mask
slab
bit. */
/* Restore sign-extended high bits, mask
metadata
bit
s
. */
return
(
extent
_t
*
)((
uintptr_t
)((
intptr_t
)(
bits
<<
RTREE_NHIB
)
>>
contents
.
edata
=
(
edata
_t
*
)((
uintptr_t
)((
intptr_t
)(
bits
<<
RTREE_NHIB
)
RTREE_NHIB
)
&
~
((
uintptr_t
)
0x1
)
);
>>
RTREE_NHIB
)
&
low_bit_mask
);
# endif
# endif
assert
((
uintptr_t
)
contents
.
edata
%
(
uintptr_t
)
EDATA_ALIGNMENT
==
0
);
return
contents
;
}
}
JEMALLOC_ALWAYS_INLINE
szind_t
# endif
/* RTREE_LEAF_COMPACT */
rtree_leaf_elm_bits_szind_get
(
uintptr_t
bits
)
{
return
(
szind_t
)(
bits
>>
LG_VADDR
);
}
JEMALLOC_ALWAYS_INLINE
bool
rtree_leaf_elm_bits_slab_get
(
uintptr_t
bits
)
{
return
(
bool
)(
bits
&
(
uintptr_t
)
0x1
);
}
# endif
JEMALLOC_ALWAYS_INLINE
rtree_contents_t
rtree_leaf_elm_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
JEMALLOC_ALWAYS_INLINE
extent_t
*
bool
dependent
)
{
rtree_leaf_elm_extent_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
bool
dependent
)
{
#ifdef RTREE_LEAF_COMPACT
#ifdef RTREE_LEAF_COMPACT
uintptr_t
bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm
,
dependent
);
uintptr_t
bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm
,
dependent
);
return
rtree_leaf_elm_bits_extent_get
(
bits
);
rtree_contents_t
contents
=
rtree_leaf_elm_bits_decode
(
bits
);
return
contents
;
#else
#else
extent_t
*
extent
=
(
extent_t
*
)
atomic_load_p
(
&
elm
->
le_extent
,
dependent
rtree_contents_t
contents
;
unsigned
metadata_bits
=
atomic_load_u
(
&
elm
->
le_metadata
,
dependent
?
ATOMIC_RELAXED
:
ATOMIC_ACQUIRE
);
?
ATOMIC_RELAXED
:
ATOMIC_ACQUIRE
);
return
extent
;
contents
.
metadata
.
slab
=
(
bool
)(
metadata_bits
&
1
);
#endif
contents
.
metadata
.
is_head
=
(
bool
)(
metadata_bits
&
(
1
<<
1
));
}
JEMALLOC_ALWAYS_INLINE
szind_t
uintptr_t
state_bits
=
(
metadata_bits
&
RTREE_LEAF_STATE_MASK
)
>>
rtree_leaf_elm_szind_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
RTREE_LEAF_STATE_SHIFT
;
rtree_leaf_elm_t
*
elm
,
bool
dependent
)
{
assert
(
state_bits
<=
extent_state_max
);
#ifdef RTREE_LEAF_COMPACT
contents
.
metadata
.
state
=
(
extent_state_t
)
state_bits
;
uintptr_t
bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm
,
dependent
);
contents
.
metadata
.
szind
=
metadata_bits
>>
(
RTREE_LEAF_STATE_SHIFT
+
return
rtree_leaf_elm_bits_szind_get
(
bits
);
RTREE_LEAF_STATE_WIDTH
);
#else
return
(
szind_t
)
atomic_load_u
(
&
elm
->
le_szind
,
dependent
?
ATOMIC_RELAXED
contents
.
edata
=
(
edata_t
*
)
atomic_load_p
(
&
elm
->
le_edata
,
dependent
:
ATOMIC_ACQUIRE
);
?
ATOMIC_RELAXED
:
ATOMIC_ACQUIRE
);
return
contents
;
#endif
#endif
}
}
JEMALLOC_ALWAYS_INLINE
bool
JEMALLOC_ALWAYS_INLINE
void
rtree_
leaf_elm_slab_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_
contents_encode
(
rtree_contents_t
contents
,
void
**
bits
,
rtree_leaf_elm_t
*
elm
,
bool
dependent
)
{
unsigned
*
additional
)
{
#ifdef RTREE_LEAF_COMPACT
#ifdef RTREE_LEAF_COMPACT
uintptr_t
bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm
,
dependent
);
*
bits
=
(
void
*
)
rtree_leaf_elm_bits_encode
(
contents
);
return
rtree_leaf_elm_bits_slab_get
(
bits
);
#else
#else
return
atomic_load_b
(
&
elm
->
le_slab
,
dependent
?
ATOMIC_RELAXED
:
*
additional
=
(
unsigned
)
contents
.
metadata
.
slab
ATOMIC_ACQUIRE
);
|
((
unsigned
)
contents
.
metadata
.
is_head
<<
1
)
|
((
unsigned
)
contents
.
metadata
.
state
<<
RTREE_LEAF_STATE_SHIFT
)
|
((
unsigned
)
contents
.
metadata
.
szind
<<
(
RTREE_LEAF_STATE_SHIFT
+
RTREE_LEAF_STATE_WIDTH
));
*
bits
=
contents
.
edata
;
#endif
#endif
}
}
static
inline
void
JEMALLOC_ALWAYS_INLINE
void
rtree_leaf_elm_
extent_write
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_leaf_elm_
write_commit
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
extent_t
*
extent
)
{
rtree_leaf_elm_t
*
elm
,
void
*
bits
,
unsigned
additional
)
{
#ifdef RTREE_LEAF_COMPACT
#ifdef RTREE_LEAF_COMPACT
uintptr_t
old_bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm
,
true
);
atomic_store_p
(
&
elm
->
le_bits
,
bits
,
ATOMIC_RELEASE
);
uintptr_t
bits
=
((
uintptr_t
)
rtree_leaf_elm_bits_szind_get
(
old_bits
)
<<
LG_VADDR
)
|
((
uintptr_t
)
extent
&
(((
uintptr_t
)
0x1
<<
LG_VADDR
)
-
1
))
|
((
uintptr_t
)
rtree_leaf_elm_bits_slab_get
(
old_bits
));
atomic_store_p
(
&
elm
->
le_bits
,
(
void
*
)
bits
,
ATOMIC_RELEASE
);
#else
#else
atomic_store_p
(
&
elm
->
le_extent
,
extent
,
ATOMIC_RELEASE
);
atomic_store_u
(
&
elm
->
le_metadata
,
additional
,
ATOMIC_RELEASE
);
/*
* Write edata last, since the element is atomically considered valid
* as soon as the edata field is non-NULL.
*/
atomic_store_p
(
&
elm
->
le_edata
,
bits
,
ATOMIC_RELEASE
);
#endif
#endif
}
}
static
inline
void
JEMALLOC_ALWAYS_INLINE
void
rtree_leaf_elm_szind_write
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_leaf_elm_write
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
szind_t
szind
)
{
rtree_leaf_elm_t
*
elm
,
rtree_contents_t
contents
)
{
assert
(
szind
<=
SC_NSIZES
);
assert
((
uintptr_t
)
contents
.
edata
%
EDATA_ALIGNMENT
==
0
);
void
*
bits
;
unsigned
additional
;
#ifdef RTREE_LEAF_COMPACT
rtree_contents_encode
(
contents
,
&
bits
,
&
additional
);
uintptr_t
old_bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm
,
rtree_leaf_elm_write_commit
(
tsdn
,
rtree
,
elm
,
bits
,
additional
);
true
);
uintptr_t
bits
=
((
uintptr_t
)
szind
<<
LG_VADDR
)
|
((
uintptr_t
)
rtree_leaf_elm_bits_extent_get
(
old_bits
)
&
(((
uintptr_t
)
0x1
<<
LG_VADDR
)
-
1
))
|
((
uintptr_t
)
rtree_leaf_elm_bits_slab_get
(
old_bits
));
atomic_store_p
(
&
elm
->
le_bits
,
(
void
*
)
bits
,
ATOMIC_RELEASE
);
#else
atomic_store_u
(
&
elm
->
le_szind
,
szind
,
ATOMIC_RELEASE
);
#endif
}
}
static
inline
void
/* The state field can be updated independently (and more frequently). */
rtree_leaf_elm_slab_write
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
JEMALLOC_ALWAYS_INLINE
void
rtree_leaf_elm_t
*
elm
,
bool
slab
)
{
rtree_leaf_elm_state_update
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm1
,
rtree_leaf_elm_t
*
elm2
,
extent_state_t
state
)
{
assert
(
elm1
!=
NULL
);
#ifdef RTREE_LEAF_COMPACT
#ifdef RTREE_LEAF_COMPACT
uintptr_t
old_bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm
,
uintptr_t
bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm1
,
true
);
/* dependent */
true
);
uintptr_t
bits
=
((
uintptr_t
)
rtree_leaf_elm_bits_szind_get
(
old_bits
)
<<
bits
&=
~
RTREE_LEAF_STATE_MASK
;
LG_VADDR
)
|
((
uintptr_t
)
rtree_leaf_elm_bits_extent_get
(
old_bits
)
&
bits
|=
state
<<
RTREE_LEAF_STATE_SHIFT
;
(((
uintptr_t
)
0x1
<<
LG_VADDR
)
-
1
))
|
((
uintptr_t
)
slab
);
atomic_store_p
(
&
elm1
->
le_bits
,
(
void
*
)
bits
,
ATOMIC_RELEASE
);
atomic_store_p
(
&
elm
->
le_bits
,
(
void
*
)
bits
,
ATOMIC_RELEASE
);
if
(
elm2
!=
NULL
)
{
atomic_store_p
(
&
elm2
->
le_bits
,
(
void
*
)
bits
,
ATOMIC_RELEASE
);
}
#else
#else
atomic_store_b
(
&
elm
->
le_slab
,
slab
,
ATOMIC_RELEASE
);
unsigned
bits
=
atomic_load_u
(
&
elm1
->
le_metadata
,
ATOMIC_RELAXED
);
bits
&=
~
RTREE_LEAF_STATE_MASK
;
bits
|=
state
<<
RTREE_LEAF_STATE_SHIFT
;
atomic_store_u
(
&
elm1
->
le_metadata
,
bits
,
ATOMIC_RELEASE
);
if
(
elm2
!=
NULL
)
{
atomic_store_u
(
&
elm2
->
le_metadata
,
bits
,
ATOMIC_RELEASE
);
}
#endif
#endif
}
}
static
inline
void
/*
rtree_leaf_elm_write
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
* Tries to look up the key in the L1 cache, returning false if there's a hit, or
rtree_leaf_elm_t
*
elm
,
extent_t
*
extent
,
szind_t
szind
,
bool
slab
)
{
* true if there's a miss.
#ifdef RTREE_LEAF_COMPACT
* Key is allowed to be NULL; returns true in this case.
uintptr_t
bits
=
((
uintptr_t
)
szind
<<
LG_VADDR
)
|
*/
((
uintptr_t
)
extent
&
(((
uintptr_t
)
0x1
<<
LG_VADDR
)
-
1
))
|
JEMALLOC_ALWAYS_INLINE
bool
((
uintptr_t
)
slab
);
rtree_leaf_elm_lookup_fast
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
atomic_store_p
(
&
elm
->
le_bits
,
(
void
*
)
bits
,
ATOMIC_RELEASE
);
uintptr_t
key
,
rtree_leaf_elm_t
**
elm
)
{
#else
size_t
slot
=
rtree_cache_direct_map
(
key
);
rtree_leaf_elm_slab_write
(
tsdn
,
rtree
,
elm
,
slab
);
uintptr_t
leafkey
=
rtree_leafkey
(
key
);
rtree_leaf_elm_szind_write
(
tsdn
,
rtree
,
elm
,
szind
);
assert
(
leafkey
!=
RTREE_LEAFKEY_INVALID
);
/*
* Write extent last, since the element is atomically considered valid
* as soon as the extent field is non-NULL.
*/
rtree_leaf_elm_extent_write
(
tsdn
,
rtree
,
elm
,
extent
);
#endif
}
static
inline
void
if
(
unlikely
(
rtree_ctx
->
cache
[
slot
].
leafkey
!=
leafkey
))
{
rtree_leaf_elm_szind_slab_update
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
return
true
;
rtree_leaf_elm_t
*
elm
,
szind_t
szind
,
bool
slab
)
{
}
assert
(
!
slab
||
szind
<
SC_NBINS
);
/*
rtree_leaf_elm_t
*
leaf
=
rtree_ctx
->
cache
[
slot
].
leaf
;
* The caller implicitly assures that it is the only writer to the szind
assert
(
leaf
!=
NULL
);
* and slab fields, and that the extent field cannot currently change.
uintptr_t
subkey
=
rtree_subkey
(
key
,
RTREE_HEIGHT
-
1
);
*/
*
elm
=
&
leaf
[
subkey
];
rtree_leaf_elm_slab_write
(
tsdn
,
rtree
,
elm
,
slab
);
r
tree_leaf_elm_szind_write
(
tsdn
,
rtree
,
elm
,
szind
)
;
r
eturn
false
;
}
}
JEMALLOC_ALWAYS_INLINE
rtree_leaf_elm_t
*
JEMALLOC_ALWAYS_INLINE
rtree_leaf_elm_t
*
...
@@ -382,147 +412,143 @@ rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
...
@@ -382,147 +412,143 @@ rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
dependent
,
init_missing
);
dependent
,
init_missing
);
}
}
/*
* Returns true on lookup failure.
*/
static
inline
bool
static
inline
bool
rtree_write
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
rtree_read_independent
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
extent_t
*
extent
,
szind_t
szind
,
bool
slab
)
{
uintptr_t
key
,
rtree_contents_t
*
r_contents
)
{
/* Use rtree_clear() to set the extent to NULL. */
assert
(
extent
!=
NULL
);
rtree_leaf_elm_t
*
elm
=
rtree_leaf_elm_lookup
(
tsdn
,
rtree
,
rtree_ctx
,
rtree_leaf_elm_t
*
elm
=
rtree_leaf_elm_lookup
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
false
,
tru
e
);
key
,
/* dependent */
false
,
/* init_missing */
fals
e
);
if
(
elm
==
NULL
)
{
if
(
elm
==
NULL
)
{
return
true
;
return
true
;
}
}
*
r_contents
=
rtree_leaf_elm_read
(
tsdn
,
rtree
,
elm
,
assert
(
rtree_leaf_elm_extent_read
(
tsdn
,
rtree
,
elm
,
false
)
==
NULL
);
/* dependent */
false
);
rtree_leaf_elm_write
(
tsdn
,
rtree
,
elm
,
extent
,
szind
,
slab
);
return
false
;
return
false
;
}
}
JEMALLOC_ALWAYS_INLINE
rtree_leaf_elm_t
*
static
inline
rtree_contents_t
rtree_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
rtree_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
bool
dependent
)
{
uintptr_t
key
)
{
rtree_leaf_elm_t
*
elm
=
rtree_leaf_elm_lookup
(
tsdn
,
rtree
,
rtree_ctx
,
rtree_leaf_elm_t
*
elm
=
rtree_leaf_elm_lookup
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
dependent
,
false
);
key
,
/* dependent */
true
,
/* init_missing */
false
);
if
(
!
dependent
&&
elm
==
NULL
)
{
return
NULL
;
}
assert
(
elm
!=
NULL
);
assert
(
elm
!=
NULL
);
return
elm
;
return
rtree_leaf_elm_read
(
tsdn
,
rtree
,
elm
,
/* dependent */
true
)
;
}
}
JEMALLOC_ALWAYS_INLINE
extent_t
*
static
inline
rtree_metadata_t
rtree_extent_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
rtree_metadata_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
bool
dependent
)
{
uintptr_t
key
)
{
rtree_leaf_elm_t
*
elm
=
rtree_read
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
rtree_leaf_elm_t
*
elm
=
rtree_leaf_elm_lookup
(
tsdn
,
rtree
,
rtree_ctx
,
dependent
);
key
,
/* dependent */
true
,
/* init_missing */
false
);
if
(
!
dependent
&&
elm
==
NULL
)
{
assert
(
elm
!=
NULL
);
return
NULL
;
return
rtree_leaf_elm_read
(
tsdn
,
rtree
,
elm
,
}
/* dependent */
true
).
metadata
;
return
rtree_leaf_elm_extent_read
(
tsdn
,
rtree
,
elm
,
dependent
);
}
JEMALLOC_ALWAYS_INLINE
szind_t
rtree_szind_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
bool
dependent
)
{
rtree_leaf_elm_t
*
elm
=
rtree_read
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
dependent
);
if
(
!
dependent
&&
elm
==
NULL
)
{
return
SC_NSIZES
;
}
return
rtree_leaf_elm_szind_read
(
tsdn
,
rtree
,
elm
,
dependent
);
}
}
/*
/*
* rtree_slab_read() is intentionally omitted because slab is always read in
* Returns true when the request cannot be fulfilled by fastpath.
* conjunction with szind, which makes rtree_szind_slab_read() a better choice.
*/
*/
static
inline
bool
JEMALLOC_ALWAYS_INLINE
bool
rtree_metadata_try_read_fast
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
rtree_extent_szind_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
rtree_metadata_t
*
r_rtree_metadata
)
{
uintptr_t
key
,
bool
dependent
,
extent_t
**
r_extent
,
szind_t
*
r_szind
)
{
rtree_leaf_elm_t
*
elm
;
rtree_leaf_elm_t
*
elm
=
rtree_read
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
/*
dependent
);
* Should check the bool return value (lookup success or not) instead of
if
(
!
dependent
&&
elm
==
NULL
)
{
* elm == NULL (which will result in an extra branch). This is because
* when the cache lookup succeeds, there will never be a NULL pointer
* returned (which is unknown to the compiler).
*/
if
(
rtree_leaf_elm_lookup_fast
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
&
elm
))
{
return
true
;
return
true
;
}
}
*
r_extent
=
rtree_leaf_elm_extent_read
(
tsdn
,
rtree
,
elm
,
dependent
);
assert
(
elm
!=
NULL
);
*
r_szind
=
rtree_leaf_elm_szind_read
(
tsdn
,
rtree
,
elm
,
dependent
);
*
r_rtree_metadata
=
rtree_leaf_elm_read
(
tsdn
,
rtree
,
elm
,
/* dependent */
true
).
metadata
;
return
false
;
return
false
;
}
}
/*
JEMALLOC_ALWAYS_INLINE
void
* Try to read szind_slab from the L1 cache. Returns true on a hit,
rtree_write_range_impl
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
* and fills in r_szind and r_slab. Otherwise returns false.
uintptr_t
base
,
uintptr_t
end
,
rtree_contents_t
contents
,
bool
clearing
)
{
*
assert
((
base
&
PAGE_MASK
)
==
0
&&
(
end
&
PAGE_MASK
)
==
0
);
* Key is allowed to be NULL in order to save an extra branch on the
/*
* fastpath. returns false in this case.
* Only used for emap_(de)register_interior, which implies the
*/
* boundaries have been registered already. Therefore all the lookups
JEMALLOC_ALWAYS_INLINE
bool
* are dependent w/o init_missing, assuming the range spans across at
rtree_szind_slab_read_fast
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
* most 2 rtree leaf nodes (each covers 1 GiB of vaddr).
uintptr_t
key
,
szind_t
*
r_szind
,
bool
*
r_slab
)
{
*/
rtree_leaf_elm_t
*
elm
;
void
*
bits
;
unsigned
additional
;
size_t
slot
=
rtree_cache_direct_map
(
key
);
rtree_contents_encode
(
contents
,
&
bits
,
&
additional
);
uintptr_t
leafkey
=
rtree_leafkey
(
key
);
assert
(
leafkey
!=
RTREE_LEAFKEY_INVALID
);
rtree_leaf_elm_t
*
elm
=
NULL
;
/* Dead store. */
for
(
uintptr_t
addr
=
base
;
addr
<=
end
;
addr
+=
PAGE
)
{
if
(
likely
(
rtree_ctx
->
cache
[
slot
].
leafkey
==
leafkey
))
{
if
(
addr
==
base
||
rtree_leaf_elm_t
*
leaf
=
rtree_ctx
->
cache
[
slot
].
leaf
;
(
addr
&
((
ZU
(
1
)
<<
rtree_leaf_maskbits
())
-
1
))
==
0
)
{
assert
(
leaf
!=
NULL
);
elm
=
rtree_leaf_elm_lookup
(
tsdn
,
rtree
,
rtree_ctx
,
addr
,
uintptr_t
subkey
=
rtree_subkey
(
key
,
RTREE_HEIGHT
-
1
);
/* dependent */
true
,
/* init_missing */
false
);
elm
=
&
leaf
[
subkey
];
assert
(
elm
!=
NULL
);
}
#ifdef RTREE_LEAF_COMPACT
assert
(
elm
==
rtree_leaf_elm_lookup
(
tsdn
,
rtree
,
rtree_ctx
,
addr
,
uintptr_t
bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
/* dependent */
true
,
/* init_missing */
false
));
elm
,
true
);
assert
(
!
clearing
||
rtree_leaf_elm_read
(
tsdn
,
rtree
,
elm
,
*
r_szind
=
rtree_leaf_elm_bits_szind_get
(
bits
);
/* dependent */
true
).
edata
!=
NULL
);
*
r_slab
=
rtree_leaf_elm_bits_slab_get
(
bits
);
rtree_leaf_elm_write_commit
(
tsdn
,
rtree
,
elm
,
bits
,
additional
);
#else
elm
++
;
*
r_szind
=
rtree_leaf_elm_szind_read
(
tsdn
,
rtree
,
elm
,
true
);
*
r_slab
=
rtree_leaf_elm_slab_read
(
tsdn
,
rtree
,
elm
,
true
);
#endif
return
true
;
}
else
{
return
false
;
}
}
}
}
JEMALLOC_ALWAYS_INLINE
void
rtree_write_range
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
base
,
uintptr_t
end
,
rtree_contents_t
contents
)
{
rtree_write_range_impl
(
tsdn
,
rtree
,
rtree_ctx
,
base
,
end
,
contents
,
/* clearing */
false
);
}
JEMALLOC_ALWAYS_INLINE
bool
JEMALLOC_ALWAYS_INLINE
bool
rtree_
szind_slab_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
rtree_
write
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
uintptr_t
key
,
bool
dependent
,
szind_t
*
r_szind
,
bool
*
r_slab
)
{
rtree_contents_t
contents
)
{
rtree_leaf_elm_t
*
elm
=
rtree_
r
ea
d
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
rtree_leaf_elm_t
*
elm
=
rtree_
l
ea
f_elm_lookup
(
tsdn
,
rtree
,
rtree_ctx
,
dependent
);
key
,
/*
dependent
*/
false
,
/* init_missing */
true
);
if
(
!
dependent
&&
elm
==
NULL
)
{
if
(
elm
==
NULL
)
{
return
true
;
return
true
;
}
}
#ifdef RTREE_LEAF_COMPACT
uintptr_t
bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm
,
dependent
);
*
r_szind
=
rtree_leaf_elm_bits_szind_get
(
bits
);
*
r_slab
=
rtree_leaf_elm_bits_slab_get
(
bits
);
#else
*
r_szind
=
rtree_leaf_elm_szind_read
(
tsdn
,
rtree
,
elm
,
dependent
);
*
r_slab
=
rtree_leaf_elm_slab_read
(
tsdn
,
rtree
,
elm
,
dependent
);
#endif
return
false
;
}
static
inline
void
rtree_leaf_elm_write
(
tsdn
,
rtree
,
elm
,
contents
);
rtree_szind_slab_update
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
szind_t
szind
,
bool
slab
)
{
assert
(
!
slab
||
szind
<
SC_NBINS
);
rtree_leaf_elm_t
*
elm
=
rtree_read
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
true
);
return
false
;
rtree_leaf_elm_szind_slab_update
(
tsdn
,
rtree
,
elm
,
szind
,
slab
);
}
}
static
inline
void
static
inline
void
rtree_clear
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
rtree_clear
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
)
{
uintptr_t
key
)
{
rtree_leaf_elm_t
*
elm
=
rtree_read
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
true
);
rtree_leaf_elm_t
*
elm
=
rtree_leaf_elm_lookup
(
tsdn
,
rtree
,
rtree_ctx
,
assert
(
rtree_leaf_elm_extent_read
(
tsdn
,
rtree
,
elm
,
false
)
!=
key
,
/* dependent */
true
,
/* init_missing */
false
);
NULL
);
assert
(
elm
!=
NULL
);
rtree_leaf_elm_write
(
tsdn
,
rtree
,
elm
,
NULL
,
SC_NSIZES
,
false
);
assert
(
rtree_leaf_elm_read
(
tsdn
,
rtree
,
elm
,
/* dependent */
true
).
edata
!=
NULL
);
rtree_contents_t
contents
;
contents
.
edata
=
NULL
;
contents
.
metadata
.
szind
=
SC_NSIZES
;
contents
.
metadata
.
slab
=
false
;
contents
.
metadata
.
is_head
=
false
;
contents
.
metadata
.
state
=
(
extent_state_t
)
0
;
rtree_leaf_elm_write
(
tsdn
,
rtree
,
elm
,
contents
);
}
static
inline
void
rtree_clear_range
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
base
,
uintptr_t
end
)
{
rtree_contents_t
contents
;
contents
.
edata
=
NULL
;
contents
.
metadata
.
szind
=
SC_NSIZES
;
contents
.
metadata
.
slab
=
false
;
contents
.
metadata
.
is_head
=
false
;
contents
.
metadata
.
state
=
(
extent_state_t
)
0
;
rtree_write_range_impl
(
tsdn
,
rtree
,
rtree_ctx
,
base
,
end
,
contents
,
/* clearing */
true
);
}
}
#endif
/* JEMALLOC_INTERNAL_RTREE_H */
#endif
/* JEMALLOC_INTERNAL_RTREE_H */
deps/jemalloc/include/jemalloc/internal/rtree_tsd.h
View file @
b8beda3c
...
@@ -18,16 +18,28 @@
...
@@ -18,16 +18,28 @@
* cache misses if made overly large, plus the cost of linear search in the LRU
* cache misses if made overly large, plus the cost of linear search in the LRU
* cache.
* cache.
*/
*/
#define RTREE_CTX_LG_NCACHE 4
#define RTREE_CTX_NCACHE 16
#define RTREE_CTX_NCACHE (1 << RTREE_CTX_LG_NCACHE)
#define RTREE_CTX_NCACHE_L2 8
#define RTREE_CTX_NCACHE_L2 8
/* Needed for initialization only. */
#define RTREE_LEAFKEY_INVALID ((uintptr_t)1)
#define RTREE_CTX_CACHE_ELM_INVALID {RTREE_LEAFKEY_INVALID, NULL}
#define RTREE_CTX_INIT_ELM_1 RTREE_CTX_CACHE_ELM_INVALID
#define RTREE_CTX_INIT_ELM_2 RTREE_CTX_INIT_ELM_1, RTREE_CTX_INIT_ELM_1
#define RTREE_CTX_INIT_ELM_4 RTREE_CTX_INIT_ELM_2, RTREE_CTX_INIT_ELM_2
#define RTREE_CTX_INIT_ELM_8 RTREE_CTX_INIT_ELM_4, RTREE_CTX_INIT_ELM_4
#define RTREE_CTX_INIT_ELM_16 RTREE_CTX_INIT_ELM_8, RTREE_CTX_INIT_ELM_8
#define _RTREE_CTX_INIT_ELM_DATA(n) RTREE_CTX_INIT_ELM_##n
#define RTREE_CTX_INIT_ELM_DATA(n) _RTREE_CTX_INIT_ELM_DATA(n)
/*
/*
*
Zero
initializer
required for tsd initi
ali
z
at
ion only. Proper initialization
*
Static
initializer
(to inv
ali
d
at
e the cache entries) is required because the
*
done via rtree_ctx_data_init()
.
*
free fastpath may access the rtree cache before a full tsd initialization
.
*/
*/
#define RTREE_CTX_
ZERO_
INITIALIZER {{
{0, 0}}, {{0, 0}}}
#define RTREE_CTX_INITIALIZER {{
RTREE_CTX_INIT_ELM_DATA(RTREE_CTX_NCACHE)}, \
{RTREE_CTX_INIT_ELM_DATA(RTREE_CTX_NCACHE_L2)}}
typedef
struct
rtree_leaf_elm_s
rtree_leaf_elm_t
;
typedef
struct
rtree_leaf_elm_s
rtree_leaf_elm_t
;
...
...
deps/jemalloc/include/jemalloc/internal/safety_check.h
View file @
b8beda3c
#ifndef JEMALLOC_INTERNAL_SAFETY_CHECK_H
#ifndef JEMALLOC_INTERNAL_SAFETY_CHECK_H
#define JEMALLOC_INTERNAL_SAFETY_CHECK_H
#define JEMALLOC_INTERNAL_SAFETY_CHECK_H
void
safety_check_fail_sized_dealloc
(
bool
current_dealloc
,
const
void
*
ptr
,
size_t
true_size
,
size_t
input_size
);
void
safety_check_fail
(
const
char
*
format
,
...);
void
safety_check_fail
(
const
char
*
format
,
...);
typedef
void
(
*
safety_check_abort_hook_t
)(
const
char
*
message
);
/* Can set to NULL for a default. */
/* Can set to NULL for a default. */
void
safety_check_set_abort
(
void
(
*
abort_fn
)
())
;
void
safety_check_set_abort
(
safety_check_abort_hook_t
abort_fn
);
JEMALLOC_ALWAYS_INLINE
void
JEMALLOC_ALWAYS_INLINE
void
safety_check_set_redzone
(
void
*
ptr
,
size_t
usize
,
size_t
bumped_usize
)
{
safety_check_set_redzone
(
void
*
ptr
,
size_t
usize
,
size_t
bumped_usize
)
{
...
...
deps/jemalloc/include/jemalloc/internal/san.h
0 → 100644
View file @
b8beda3c
#ifndef JEMALLOC_INTERNAL_GUARD_H
#define JEMALLOC_INTERNAL_GUARD_H
#include "jemalloc/internal/ehooks.h"
#include "jemalloc/internal/emap.h"
#define SAN_PAGE_GUARD PAGE
#define SAN_PAGE_GUARDS_SIZE (SAN_PAGE_GUARD * 2)
#define SAN_GUARD_LARGE_EVERY_N_EXTENTS_DEFAULT 0
#define SAN_GUARD_SMALL_EVERY_N_EXTENTS_DEFAULT 0
#define SAN_LG_UAF_ALIGN_DEFAULT (-1)
#define SAN_CACHE_BIN_NONFAST_MASK_DEFAULT (uintptr_t)(-1)
static
const
uintptr_t
uaf_detect_junk
=
(
uintptr_t
)
0x5b5b5b5b5b5b5b5bULL
;
/* 0 means disabled, i.e. never guarded. */
extern
size_t
opt_san_guard_large
;
extern
size_t
opt_san_guard_small
;
/* -1 means disabled, i.e. never check for use-after-free. */
extern
ssize_t
opt_lg_san_uaf_align
;
void
san_guard_pages
(
tsdn_t
*
tsdn
,
ehooks_t
*
ehooks
,
edata_t
*
edata
,
emap_t
*
emap
,
bool
left
,
bool
right
,
bool
remap
);
void
san_unguard_pages
(
tsdn_t
*
tsdn
,
ehooks_t
*
ehooks
,
edata_t
*
edata
,
emap_t
*
emap
,
bool
left
,
bool
right
);
/*
* Unguard the extent, but don't modify emap boundaries. Must be called on an
* extent that has been erased from emap and shouldn't be placed back.
*/
void
san_unguard_pages_pre_destroy
(
tsdn_t
*
tsdn
,
ehooks_t
*
ehooks
,
edata_t
*
edata
,
emap_t
*
emap
);
void
san_check_stashed_ptrs
(
void
**
ptrs
,
size_t
nstashed
,
size_t
usize
);
void
tsd_san_init
(
tsd_t
*
tsd
);
void
san_init
(
ssize_t
lg_san_uaf_align
);
static
inline
void
san_guard_pages_two_sided
(
tsdn_t
*
tsdn
,
ehooks_t
*
ehooks
,
edata_t
*
edata
,
emap_t
*
emap
,
bool
remap
)
{
san_guard_pages
(
tsdn
,
ehooks
,
edata
,
emap
,
true
,
true
,
remap
);
}
static
inline
void
san_unguard_pages_two_sided
(
tsdn_t
*
tsdn
,
ehooks_t
*
ehooks
,
edata_t
*
edata
,
emap_t
*
emap
)
{
san_unguard_pages
(
tsdn
,
ehooks
,
edata
,
emap
,
true
,
true
);
}
static
inline
size_t
san_two_side_unguarded_sz
(
size_t
size
)
{
assert
(
size
%
PAGE
==
0
);
assert
(
size
>=
SAN_PAGE_GUARDS_SIZE
);
return
size
-
SAN_PAGE_GUARDS_SIZE
;
}
static
inline
size_t
san_two_side_guarded_sz
(
size_t
size
)
{
assert
(
size
%
PAGE
==
0
);
return
size
+
SAN_PAGE_GUARDS_SIZE
;
}
static
inline
size_t
san_one_side_unguarded_sz
(
size_t
size
)
{
assert
(
size
%
PAGE
==
0
);
assert
(
size
>=
SAN_PAGE_GUARD
);
return
size
-
SAN_PAGE_GUARD
;
}
static
inline
size_t
san_one_side_guarded_sz
(
size_t
size
)
{
assert
(
size
%
PAGE
==
0
);
return
size
+
SAN_PAGE_GUARD
;
}
static
inline
bool
san_guard_enabled
(
void
)
{
return
(
opt_san_guard_large
!=
0
||
opt_san_guard_small
!=
0
);
}
static
inline
bool
san_large_extent_decide_guard
(
tsdn_t
*
tsdn
,
ehooks_t
*
ehooks
,
size_t
size
,
size_t
alignment
)
{
if
(
opt_san_guard_large
==
0
||
ehooks_guard_will_fail
(
ehooks
)
||
tsdn_null
(
tsdn
))
{
return
false
;
}
tsd_t
*
tsd
=
tsdn_tsd
(
tsdn
);
uint64_t
n
=
tsd_san_extents_until_guard_large_get
(
tsd
);
assert
(
n
>=
1
);
if
(
n
>
1
)
{
/*
* Subtract conditionally because the guard may not happen due
* to alignment or size restriction below.
*/
*
tsd_san_extents_until_guard_largep_get
(
tsd
)
=
n
-
1
;
}
if
(
n
==
1
&&
(
alignment
<=
PAGE
)
&&
(
san_two_side_guarded_sz
(
size
)
<=
SC_LARGE_MAXCLASS
))
{
*
tsd_san_extents_until_guard_largep_get
(
tsd
)
=
opt_san_guard_large
;
return
true
;
}
else
{
assert
(
tsd_san_extents_until_guard_large_get
(
tsd
)
>=
1
);
return
false
;
}
}
static
inline
bool
san_slab_extent_decide_guard
(
tsdn_t
*
tsdn
,
ehooks_t
*
ehooks
)
{
if
(
opt_san_guard_small
==
0
||
ehooks_guard_will_fail
(
ehooks
)
||
tsdn_null
(
tsdn
))
{
return
false
;
}
tsd_t
*
tsd
=
tsdn_tsd
(
tsdn
);
uint64_t
n
=
tsd_san_extents_until_guard_small_get
(
tsd
);
assert
(
n
>=
1
);
if
(
n
==
1
)
{
*
tsd_san_extents_until_guard_smallp_get
(
tsd
)
=
opt_san_guard_small
;
return
true
;
}
else
{
*
tsd_san_extents_until_guard_smallp_get
(
tsd
)
=
n
-
1
;
assert
(
tsd_san_extents_until_guard_small_get
(
tsd
)
>=
1
);
return
false
;
}
}
static
inline
void
san_junk_ptr_locations
(
void
*
ptr
,
size_t
usize
,
void
**
first
,
void
**
mid
,
void
**
last
)
{
size_t
ptr_sz
=
sizeof
(
void
*
);
*
first
=
ptr
;
*
mid
=
(
void
*
)((
uintptr_t
)
ptr
+
((
usize
>>
1
)
&
~
(
ptr_sz
-
1
)));
assert
(
*
first
!=
*
mid
||
usize
==
ptr_sz
);
assert
((
uintptr_t
)
*
first
<=
(
uintptr_t
)
*
mid
);
/*
* When usize > 32K, the gap between requested_size and usize might be
* greater than 4K -- this means the last write may access an
* likely-untouched page (default settings w/ 4K pages). However by
* default the tcache only goes up to the 32K size class, and is usually
* tuned lower instead of higher, which makes it less of a concern.
*/
*
last
=
(
void
*
)((
uintptr_t
)
ptr
+
usize
-
sizeof
(
uaf_detect_junk
));
assert
(
*
first
!=
*
last
||
usize
==
ptr_sz
);
assert
(
*
mid
!=
*
last
||
usize
<=
ptr_sz
*
2
);
assert
((
uintptr_t
)
*
mid
<=
(
uintptr_t
)
*
last
);
}
static
inline
bool
san_junk_ptr_should_slow
(
void
)
{
/*
* The latter condition (pointer size greater than the min size class)
* is not expected -- fall back to the slow path for simplicity.
*/
return
config_debug
||
(
LG_SIZEOF_PTR
>
SC_LG_TINY_MIN
);
}
static
inline
void
san_junk_ptr
(
void
*
ptr
,
size_t
usize
)
{
if
(
san_junk_ptr_should_slow
())
{
memset
(
ptr
,
(
char
)
uaf_detect_junk
,
usize
);
return
;
}
void
*
first
,
*
mid
,
*
last
;
san_junk_ptr_locations
(
ptr
,
usize
,
&
first
,
&
mid
,
&
last
);
*
(
uintptr_t
*
)
first
=
uaf_detect_junk
;
*
(
uintptr_t
*
)
mid
=
uaf_detect_junk
;
*
(
uintptr_t
*
)
last
=
uaf_detect_junk
;
}
static
inline
bool
san_uaf_detection_enabled
(
void
)
{
bool
ret
=
config_uaf_detection
&&
(
opt_lg_san_uaf_align
!=
-
1
);
if
(
config_uaf_detection
&&
ret
)
{
assert
(
san_cache_bin_nonfast_mask
==
((
uintptr_t
)
1
<<
opt_lg_san_uaf_align
)
-
1
);
}
return
ret
;
}
#endif
/* JEMALLOC_INTERNAL_GUARD_H */
deps/jemalloc/include/jemalloc/internal/san_bump.h
0 → 100644
View file @
b8beda3c
#ifndef JEMALLOC_INTERNAL_SAN_BUMP_H
#define JEMALLOC_INTERNAL_SAN_BUMP_H
#include "jemalloc/internal/edata.h"
#include "jemalloc/internal/exp_grow.h"
#include "jemalloc/internal/mutex.h"
#define SBA_RETAINED_ALLOC_SIZE ((size_t)4 << 20)
extern
bool
opt_retain
;
typedef
struct
ehooks_s
ehooks_t
;
typedef
struct
pac_s
pac_t
;
typedef
struct
san_bump_alloc_s
san_bump_alloc_t
;
struct
san_bump_alloc_s
{
malloc_mutex_t
mtx
;
edata_t
*
curr_reg
;
};
static
inline
bool
san_bump_enabled
()
{
/*
* We enable san_bump allocator only when it's possible to break up a
* mapping and unmap a part of it (maps_coalesce). This is needed to
* ensure the arena destruction process can destroy all retained guarded
* extents one by one and to unmap a trailing part of a retained guarded
* region when it's too small to fit a pending allocation.
* opt_retain is required, because this allocator retains a large
* virtual memory mapping and returns smaller parts of it.
*/
return
maps_coalesce
&&
opt_retain
;
}
static
inline
bool
san_bump_alloc_init
(
san_bump_alloc_t
*
sba
)
{
bool
err
=
malloc_mutex_init
(
&
sba
->
mtx
,
"sanitizer_bump_allocator"
,
WITNESS_RANK_SAN_BUMP_ALLOC
,
malloc_mutex_rank_exclusive
);
if
(
err
)
{
return
true
;
}
sba
->
curr_reg
=
NULL
;
return
false
;
}
edata_t
*
san_bump_alloc
(
tsdn_t
*
tsdn
,
san_bump_alloc_t
*
sba
,
pac_t
*
pac
,
ehooks_t
*
ehooks
,
size_t
size
,
bool
zero
);
#endif
/* JEMALLOC_INTERNAL_SAN_BUMP_H */
deps/jemalloc/include/jemalloc/internal/sc.h
View file @
b8beda3c
...
@@ -197,30 +197,34 @@
...
@@ -197,30 +197,34 @@
(SC_LG_BASE_MAX - SC_LG_FIRST_REGULAR_BASE + 1) - 1)
(SC_LG_BASE_MAX - SC_LG_FIRST_REGULAR_BASE + 1) - 1)
#define SC_NSIZES (SC_NTINY + SC_NPSEUDO + SC_NREGULAR)
#define SC_NSIZES (SC_NTINY + SC_NPSEUDO + SC_NREGULAR)
/* The number of size classes that are a multiple of the page size. */
#define SC_NPSIZES ( \
/* Start with all the size classes. */
\
SC_NSIZES \
/* Subtract out those groups with too small a base. */
\
- (LG_PAGE - 1 - SC_LG_FIRST_REGULAR_BASE) * SC_NGROUP \
/* And the pseudo-group. */
\
- SC_NPSEUDO \
/* And the tiny group. */
\
- SC_NTINY \
/* Sizes where ndelta*delta is not a multiple of the page size. */
\
- (SC_LG_NGROUP * SC_NGROUP))
/*
/*
* Note that the last line is computed as the sum of the second column in the
* The number of size classes that are a multiple of the page size.
* following table:
*
* lg(base) | count of sizes to exclude
* Here are the first few bases that have a page-sized SC.
* ------------------------------|-----------------------------
*
* LG_PAGE - 1 | SC_NGROUP - 1
* lg(base) | base | highest SC | page-multiple SCs
* LG_PAGE | SC_NGROUP - 1
* --------------|------------------------------------------
* LG_PAGE + 1 | SC_NGROUP - 2
* LG_PAGE - 1 | PAGE / 2 | PAGE | 1
* LG_PAGE + 2 | SC_NGROUP - 4
* LG_PAGE | PAGE | 2 * PAGE | 1
* ... | ...
* LG_PAGE + 1 | 2 * PAGE | 4 * PAGE | 2
* LG_PAGE + (SC_LG_NGROUP - 1) | SC_NGROUP - (SC_NGROUP / 2)
* LG_PAGE + 2 | 4 * PAGE | 8 * PAGE | 4
*
* The number of page-multiple SCs continues to grow in powers of two, up until
* lg_delta == lg_page, which corresponds to setting lg_base to lg_page +
* SC_LG_NGROUP. So, then, the number of size classes that are multiples of the
* page size whose lg_delta is less than the page size are
* is 1 + (2**0 + 2**1 + ... + 2**(lg_ngroup - 1) == 2**lg_ngroup.
*
* For each base with lg_base in [lg_page + lg_ngroup, lg_base_max), there are
* NGROUP page-sized size classes, and when lg_base == lg_base_max, there are
* NGROUP - 1.
*
* This gives us the quantity we seek.
*/
*/
#define SC_NPSIZES ( \
SC_NGROUP \
+ (SC_LG_BASE_MAX - (LG_PAGE + SC_LG_NGROUP)) * SC_NGROUP \
+ SC_NGROUP - 1)
/*
/*
* We declare a size class is binnable if size < page size * group. Or, in other
* We declare a size class is binnable if size < page size * group. Or, in other
...
@@ -242,17 +246,23 @@
...
@@ -242,17 +246,23 @@
# error "Too many small size classes"
# error "Too many small size classes"
#endif
#endif
/* The largest size class in the lookup table. */
/* The largest size class in the lookup table, and its binary log. */
#define SC_LOOKUP_MAXCLASS ((size_t)1 << 12)
#define SC_LG_MAX_LOOKUP 12
#define SC_LOOKUP_MAXCLASS (1 << SC_LG_MAX_LOOKUP)
/* Internal, only used for the definition of SC_SMALL_MAXCLASS. */
/* Internal, only used for the definition of SC_SMALL_MAXCLASS. */
#define SC_SMALL_MAX_BASE (
(size_t)
1 << (LG_PAGE + SC_LG_NGROUP - 1))
#define SC_SMALL_MAX_BASE (1 << (LG_PAGE + SC_LG_NGROUP - 1))
#define SC_SMALL_MAX_DELTA (
(size_t)
1 << (LG_PAGE - 1))
#define SC_SMALL_MAX_DELTA (1 << (LG_PAGE - 1))
/* The largest size class allocated out of a slab. */
/* The largest size class allocated out of a slab. */
#define SC_SMALL_MAXCLASS (SC_SMALL_MAX_BASE \
#define SC_SMALL_MAXCLASS (SC_SMALL_MAX_BASE \
+ (SC_NGROUP - 1) * SC_SMALL_MAX_DELTA)
+ (SC_NGROUP - 1) * SC_SMALL_MAX_DELTA)
/* The fastpath assumes all lookup-able sizes are small. */
#if (SC_SMALL_MAXCLASS < SC_LOOKUP_MAXCLASS)
# error "Lookup table sizes must be small"
#endif
/* The smallest size class not allocated out of a slab. */
/* The smallest size class not allocated out of a slab. */
#define SC_LARGE_MINCLASS ((size_t)1ULL << (LG_PAGE + SC_LG_NGROUP))
#define SC_LARGE_MINCLASS ((size_t)1ULL << (LG_PAGE + SC_LG_NGROUP))
#define SC_LG_LARGE_MINCLASS (LG_PAGE + SC_LG_NGROUP)
#define SC_LG_LARGE_MINCLASS (LG_PAGE + SC_LG_NGROUP)
...
@@ -264,6 +274,19 @@
...
@@ -264,6 +274,19 @@
/* The largest size class supported. */
/* The largest size class supported. */
#define SC_LARGE_MAXCLASS (SC_MAX_BASE + (SC_NGROUP - 1) * SC_MAX_DELTA)
#define SC_LARGE_MAXCLASS (SC_MAX_BASE + (SC_NGROUP - 1) * SC_MAX_DELTA)
/* Maximum number of regions in one slab. */
#ifndef CONFIG_LG_SLAB_MAXREGS
# define SC_LG_SLAB_MAXREGS (LG_PAGE - SC_LG_TINY_MIN)
#else
# if CONFIG_LG_SLAB_MAXREGS < (LG_PAGE - SC_LG_TINY_MIN)
# error "Unsupported SC_LG_SLAB_MAXREGS"
# else
# define SC_LG_SLAB_MAXREGS CONFIG_LG_SLAB_MAXREGS
# endif
#endif
#define SC_SLAB_MAXREGS (1U << SC_LG_SLAB_MAXREGS)
typedef
struct
sc_s
sc_t
;
typedef
struct
sc_s
sc_t
;
struct
sc_s
{
struct
sc_s
{
/* Size class index, or -1 if not a valid size class. */
/* Size class index, or -1 if not a valid size class. */
...
@@ -321,10 +344,11 @@ struct sc_data_s {
...
@@ -321,10 +344,11 @@ struct sc_data_s {
sc_t
sc
[
SC_NSIZES
];
sc_t
sc
[
SC_NSIZES
];
};
};
size_t
reg_size_compute
(
int
lg_base
,
int
lg_delta
,
int
ndelta
);
void
sc_data_init
(
sc_data_t
*
data
);
void
sc_data_init
(
sc_data_t
*
data
);
/*
/*
* Updates slab sizes in [begin, end] to be pgs pages in length, if possible.
* Updates slab sizes in [begin, end] to be pgs pages in length, if possible.
* Otherwise, does its best to accomodate the request.
* Otherwise, does its best to accom
m
odate the request.
*/
*/
void
sc_data_update_slab_size
(
sc_data_t
*
data
,
size_t
begin
,
size_t
end
,
void
sc_data_update_slab_size
(
sc_data_t
*
data
,
size_t
begin
,
size_t
end
,
int
pgs
);
int
pgs
);
...
...
deps/jemalloc/include/jemalloc/internal/sec.h
0 → 100644
View file @
b8beda3c
#ifndef JEMALLOC_INTERNAL_SEC_H
#define JEMALLOC_INTERNAL_SEC_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/pai.h"
/*
* Small extent cache.
*
* This includes some utilities to cache small extents. We have a per-pszind
* bin with its own list of extents of that size. We don't try to do any
* coalescing of extents (since it would in general require cross-shard locks or
* knowledge of the underlying PAI implementation).
*/
/*
* For now, this is just one field; eventually, we'll probably want to get more
* fine-grained data out (like per-size class statistics).
*/
typedef
struct
sec_stats_s
sec_stats_t
;
struct
sec_stats_s
{
/* Sum of bytes_cur across all shards. */
size_t
bytes
;
};
static
inline
void
sec_stats_accum
(
sec_stats_t
*
dst
,
sec_stats_t
*
src
)
{
dst
->
bytes
+=
src
->
bytes
;
}
/* A collections of free extents, all of the same size. */
typedef
struct
sec_bin_s
sec_bin_t
;
struct
sec_bin_s
{
/*
* When we fail to fulfill an allocation, we do a batch-alloc on the
* underlying allocator to fill extra items, as well. We drop the SEC
* lock while doing so, to allow operations on other bins to succeed.
* That introduces the possibility of other threads also trying to
* allocate out of this bin, failing, and also going to the backing
* allocator. To avoid a thundering herd problem in which lots of
* threads do batch allocs and overfill this bin as a result, we only
* allow one batch allocation at a time for a bin. This bool tracks
* whether or not some thread is already batch allocating.
*
* Eventually, the right answer may be a smarter sharding policy for the
* bins (e.g. a mutex per bin, which would also be more scalable
* generally; the batch-allocating thread could hold it while
* batch-allocating).
*/
bool
being_batch_filled
;
/*
* Number of bytes in this particular bin (as opposed to the
* sec_shard_t's bytes_cur. This isn't user visible or reported in
* stats; rather, it allows us to quickly determine the change in the
* centralized counter when flushing.
*/
size_t
bytes_cur
;
edata_list_active_t
freelist
;
};
typedef
struct
sec_shard_s
sec_shard_t
;
struct
sec_shard_s
{
/*
* We don't keep per-bin mutexes, even though that would allow more
* sharding; this allows global cache-eviction, which in turn allows for
* better balancing across free lists.
*/
malloc_mutex_t
mtx
;
/*
* A SEC may need to be shut down (i.e. flushed of its contents and
* prevented from further caching). To avoid tricky synchronization
* issues, we just track enabled-status in each shard, guarded by a
* mutex. In practice, this is only ever checked during brief races,
* since the arena-level atomic boolean tracking HPA enabled-ness means
* that we won't go down these pathways very often after custom extent
* hooks are installed.
*/
bool
enabled
;
sec_bin_t
*
bins
;
/* Number of bytes in all bins in the shard. */
size_t
bytes_cur
;
/* The next pszind to flush in the flush-some pathways. */
pszind_t
to_flush_next
;
};
typedef
struct
sec_s
sec_t
;
struct
sec_s
{
pai_t
pai
;
pai_t
*
fallback
;
sec_opts_t
opts
;
sec_shard_t
*
shards
;
pszind_t
npsizes
;
};
bool
sec_init
(
tsdn_t
*
tsdn
,
sec_t
*
sec
,
base_t
*
base
,
pai_t
*
fallback
,
const
sec_opts_t
*
opts
);
void
sec_flush
(
tsdn_t
*
tsdn
,
sec_t
*
sec
);
void
sec_disable
(
tsdn_t
*
tsdn
,
sec_t
*
sec
);
/*
* Morally, these two stats methods probably ought to be a single one (and the
* mutex_prof_data ought to live in the sec_stats_t. But splitting them apart
* lets them fit easily into the pa_shard stats framework (which also has this
* split), which simplifies the stats management.
*/
void
sec_stats_merge
(
tsdn_t
*
tsdn
,
sec_t
*
sec
,
sec_stats_t
*
stats
);
void
sec_mutex_stats_read
(
tsdn_t
*
tsdn
,
sec_t
*
sec
,
mutex_prof_data_t
*
mutex_prof_data
);
/*
* We use the arena lock ordering; these are acquired in phase 2 of forking, but
* should be acquired before the underlying allocator mutexes.
*/
void
sec_prefork2
(
tsdn_t
*
tsdn
,
sec_t
*
sec
);
void
sec_postfork_parent
(
tsdn_t
*
tsdn
,
sec_t
*
sec
);
void
sec_postfork_child
(
tsdn_t
*
tsdn
,
sec_t
*
sec
);
#endif
/* JEMALLOC_INTERNAL_SEC_H */
deps/jemalloc/include/jemalloc/internal/sec_opts.h
0 → 100644
View file @
b8beda3c
#ifndef JEMALLOC_INTERNAL_SEC_OPTS_H
#define JEMALLOC_INTERNAL_SEC_OPTS_H
/*
* The configuration settings used by an sec_t. Morally, this is part of the
* SEC interface, but we put it here for header-ordering reasons.
*/
typedef
struct
sec_opts_s
sec_opts_t
;
struct
sec_opts_s
{
/*
* We don't necessarily always use all the shards; requests are
* distributed across shards [0, nshards - 1).
*/
size_t
nshards
;
/*
* We'll automatically refuse to cache any objects in this sec if
* they're larger than max_alloc bytes, instead forwarding such objects
* directly to the fallback.
*/
size_t
max_alloc
;
/*
* Exceeding this amount of cached extents in a shard causes us to start
* flushing bins in that shard until we fall below bytes_after_flush.
*/
size_t
max_bytes
;
/*
* The number of bytes (in all bins) we flush down to when we exceed
* bytes_cur. We want this to be less than bytes_cur, because
* otherwise we could get into situations where a shard undergoing
* net-deallocation keeps bytes_cur very near to max_bytes, so that
* most deallocations get immediately forwarded to the underlying PAI
* implementation, defeating the point of the SEC.
*/
size_t
bytes_after_flush
;
/*
* When we can't satisfy an allocation out of the SEC because there are
* no available ones cached, we allocate multiple of that size out of
* the fallback allocator. Eventually we might want to do something
* cleverer, but for now we just grab a fixed number.
*/
size_t
batch_fill_extra
;
};
#define SEC_OPTS_DEFAULT { \
/* nshards */
\
4, \
/* max_alloc */
\
(32 * 1024) < PAGE ? PAGE : (32 * 1024), \
/* max_bytes */
\
256 * 1024, \
/* bytes_after_flush */
\
128 * 1024, \
/* batch_fill_extra */
\
0 \
}
#endif
/* JEMALLOC_INTERNAL_SEC_OPTS_H */
deps/jemalloc/include/jemalloc/internal/slab_data.h
0 → 100644
View file @
b8beda3c
#ifndef JEMALLOC_INTERNAL_SLAB_DATA_H
#define JEMALLOC_INTERNAL_SLAB_DATA_H
#include "jemalloc/internal/bitmap.h"
typedef
struct
slab_data_s
slab_data_t
;
struct
slab_data_s
{
/* Per region allocated/deallocated bitmap. */
bitmap_t
bitmap
[
BITMAP_GROUPS_MAX
];
};
#endif
/* JEMALLOC_INTERNAL_SLAB_DATA_H */
deps/jemalloc/include/jemalloc/internal/stats.h
View file @
b8beda3c
...
@@ -11,7 +11,8 @@
...
@@ -11,7 +11,8 @@
OPTION('b', bins, true, false) \
OPTION('b', bins, true, false) \
OPTION('l', large, true, false) \
OPTION('l', large, true, false) \
OPTION('x', mutex, true, false) \
OPTION('x', mutex, true, false) \
OPTION('e', extents, true, false)
OPTION('e', extents, true, false) \
OPTION('h', hpa, config_stats, false)
enum
{
enum
{
#define OPTION(o, v, d, s) stats_print_option_num_##v,
#define OPTION(o, v, d, s) stats_print_option_num_##v,
...
@@ -24,8 +25,30 @@ enum {
...
@@ -24,8 +25,30 @@ enum {
extern
bool
opt_stats_print
;
extern
bool
opt_stats_print
;
extern
char
opt_stats_print_opts
[
stats_print_tot_num_options
+
1
];
extern
char
opt_stats_print_opts
[
stats_print_tot_num_options
+
1
];
/* Utilities for stats_interval. */
extern
int64_t
opt_stats_interval
;
extern
char
opt_stats_interval_opts
[
stats_print_tot_num_options
+
1
];
#define STATS_INTERVAL_DEFAULT -1
/*
* Batch-increment the counter to reduce synchronization overhead. Each thread
* merges after (interval >> LG_BATCH_SIZE) bytes of allocations; also limit the
* BATCH_MAX for accuracy when the interval is huge (which is expected).
*/
#define STATS_INTERVAL_ACCUM_LG_BATCH_SIZE 6
#define STATS_INTERVAL_ACCUM_BATCH_MAX (4 << 20)
/* Only accessed by thread event. */
uint64_t
stats_interval_new_event_wait
(
tsd_t
*
tsd
);
uint64_t
stats_interval_postponed_event_wait
(
tsd_t
*
tsd
);
void
stats_interval_event_handler
(
tsd_t
*
tsd
,
uint64_t
elapsed
);
/* Implements je_malloc_stats_print. */
/* Implements je_malloc_stats_print. */
void
stats_print
(
void
(
*
write_cb
)(
void
*
,
const
char
*
),
void
*
cbopaque
,
void
stats_print
(
write_cb_t
*
write_cb
,
void
*
cbopaque
,
const
char
*
opts
);
const
char
*
opts
);
bool
stats_boot
(
void
);
void
stats_prefork
(
tsdn_t
*
tsdn
);
void
stats_postfork_parent
(
tsdn_t
*
tsdn
);
void
stats_postfork_child
(
tsdn_t
*
tsdn
);
#endif
/* JEMALLOC_INTERNAL_STATS_H */
#endif
/* JEMALLOC_INTERNAL_STATS_H */
deps/jemalloc/include/jemalloc/internal/sz.h
View file @
b8beda3c
...
@@ -22,6 +22,12 @@
...
@@ -22,6 +22,12 @@
* size that would result from such an allocation.
* size that would result from such an allocation.
*/
*/
/* Page size index type. */
typedef
unsigned
pszind_t
;
/* Size class index type. */
typedef
unsigned
szind_t
;
/*
/*
* sz_pind2sz_tab encodes the same information as could be computed by
* sz_pind2sz_tab encodes the same information as could be computed by
* sz_pind2sz_compute().
* sz_pind2sz_compute().
...
@@ -39,34 +45,62 @@ extern size_t sz_index2size_tab[SC_NSIZES];
...
@@ -39,34 +45,62 @@ extern size_t sz_index2size_tab[SC_NSIZES];
*/
*/
extern
uint8_t
sz_size2index_tab
[];
extern
uint8_t
sz_size2index_tab
[];
static
const
size_t
sz_large_pad
=
/*
#ifdef JEMALLOC_CACHE_OBLIVIOUS
* Padding for large allocations: PAGE when opt_cache_oblivious == true (to
PAGE
* enable cache index randomization); 0 otherwise.
#else
*/
0
extern
size_t
sz_large_pad
;
#endif
;
extern
void
sz_boot
(
const
sc_data_t
*
sc_data
);
extern
void
sz_boot
(
const
sc_data_t
*
sc_data
,
bool
cache_oblivious
);
JEMALLOC_ALWAYS_INLINE
pszind_t
JEMALLOC_ALWAYS_INLINE
pszind_t
sz_psz2ind
(
size_t
psz
)
{
sz_psz2ind
(
size_t
psz
)
{
assert
(
psz
>
0
);
if
(
unlikely
(
psz
>
SC_LARGE_MAXCLASS
))
{
if
(
unlikely
(
psz
>
SC_LARGE_MAXCLASS
))
{
return
SC_NPSIZES
;
return
SC_NPSIZES
;
}
}
pszind_t
x
=
lg_floor
((
psz
<<
1
)
-
1
);
/* x is the lg of the first base >= psz. */
pszind_t
shift
=
(
x
<
SC_LG_NGROUP
+
LG_PAGE
)
?
pszind_t
x
=
lg_ceil
(
psz
);
/*
* sc.h introduces a lot of size classes. These size classes are divided
* into different size class groups. There is a very special size class
* group, each size class in or after it is an integer multiple of PAGE.
* We call it first_ps_rg. It means first page size regular group. The
* range of first_ps_rg is (base, base * 2], and base == PAGE *
* SC_NGROUP. off_to_first_ps_rg begins from 1, instead of 0. e.g.
* off_to_first_ps_rg is 1 when psz is (PAGE * SC_NGROUP + 1).
*/
pszind_t
off_to_first_ps_rg
=
(
x
<
SC_LG_NGROUP
+
LG_PAGE
)
?
0
:
x
-
(
SC_LG_NGROUP
+
LG_PAGE
);
0
:
x
-
(
SC_LG_NGROUP
+
LG_PAGE
);
pszind_t
grp
=
shift
<<
SC_LG_NGROUP
;
pszind_t
lg_delta
=
(
x
<
SC_LG_NGROUP
+
LG_PAGE
+
1
)
?
/*
LG_PAGE
:
x
-
SC_LG_NGROUP
-
1
;
* Same as sc_s::lg_delta.
* Delta for off_to_first_ps_rg == 1 is PAGE,
* for each increase in offset, it's multiplied by two.
* Therefore, lg_delta = LG_PAGE + (off_to_first_ps_rg - 1).
*/
pszind_t
lg_delta
=
(
off_to_first_ps_rg
==
0
)
?
LG_PAGE
:
LG_PAGE
+
(
off_to_first_ps_rg
-
1
);
size_t
delta_inverse_mask
=
ZU
(
-
1
)
<<
lg_delta
;
/*
pszind_t
mod
=
((((
psz
-
1
)
&
delta_inverse_mask
)
>>
lg_delta
))
&
* Let's write psz in binary, e.g. 0011 for 0x3, 0111 for 0x7.
((
ZU
(
1
)
<<
SC_LG_NGROUP
)
-
1
);
* The leftmost bits whose len is lg_base decide the base of psz.
* The rightmost bits whose len is lg_delta decide (pgz % PAGE).
* The middle bits whose len is SC_LG_NGROUP decide ndelta.
* ndelta is offset to the first size class in the size class group,
* starts from 1.
* If you don't know lg_base, ndelta or lg_delta, see sc.h.
* |xxxxxxxxxxxxxxxxxxxx|------------------------|yyyyyyyyyyyyyyyyyyyyy|
* |<-- len: lg_base -->|<-- len: SC_LG_NGROUP-->|<-- len: lg_delta -->|
* |<-- ndelta -->|
* rg_inner_off = ndelta - 1
* Why use (psz - 1)?
* To handle case: psz % (1 << lg_delta) == 0.
*/
pszind_t
rg_inner_off
=
(((
psz
-
1
))
>>
lg_delta
)
&
(
SC_NGROUP
-
1
);
pszind_t
ind
=
grp
+
mod
;
pszind_t
base_ind
=
off_to_first_ps_rg
<<
SC_LG_NGROUP
;
pszind_t
ind
=
base_ind
+
rg_inner_off
;
return
ind
;
return
ind
;
}
}
...
@@ -152,10 +186,15 @@ sz_size2index_compute(size_t size) {
...
@@ -152,10 +186,15 @@ sz_size2index_compute(size_t size) {
}
}
JEMALLOC_ALWAYS_INLINE
szind_t
JEMALLOC_ALWAYS_INLINE
szind_t
sz_size2index_lookup
(
size_t
size
)
{
sz_size2index_lookup
_impl
(
size_t
size
)
{
assert
(
size
<=
SC_LOOKUP_MAXCLASS
);
assert
(
size
<=
SC_LOOKUP_MAXCLASS
);
szind_t
ret
=
(
sz_size2index_tab
[(
size
+
(
ZU
(
1
)
<<
SC_LG_TINY_MIN
)
-
1
)
return
sz_size2index_tab
[(
size
+
(
ZU
(
1
)
<<
SC_LG_TINY_MIN
)
-
1
)
>>
SC_LG_TINY_MIN
]);
>>
SC_LG_TINY_MIN
];
}
JEMALLOC_ALWAYS_INLINE
szind_t
sz_size2index_lookup
(
size_t
size
)
{
szind_t
ret
=
sz_size2index_lookup_impl
(
size
);
assert
(
ret
==
sz_size2index_compute
(
size
));
assert
(
ret
==
sz_size2index_compute
(
size
));
return
ret
;
return
ret
;
}
}
...
@@ -194,9 +233,14 @@ sz_index2size_compute(szind_t index) {
...
@@ -194,9 +233,14 @@ sz_index2size_compute(szind_t index) {
}
}
}
}
JEMALLOC_ALWAYS_INLINE
size_t
sz_index2size_lookup_impl
(
szind_t
index
)
{
return
sz_index2size_tab
[
index
];
}
JEMALLOC_ALWAYS_INLINE
size_t
JEMALLOC_ALWAYS_INLINE
size_t
sz_index2size_lookup
(
szind_t
index
)
{
sz_index2size_lookup
(
szind_t
index
)
{
size_t
ret
=
(
size_t
)
sz_index2size_
tab
[
index
]
;
size_t
ret
=
sz_index2size_
lookup_impl
(
index
)
;
assert
(
ret
==
sz_index2size_compute
(
index
));
assert
(
ret
==
sz_index2size_compute
(
index
));
return
ret
;
return
ret
;
}
}
...
@@ -207,6 +251,12 @@ sz_index2size(szind_t index) {
...
@@ -207,6 +251,12 @@ sz_index2size(szind_t index) {
return
sz_index2size_lookup
(
index
);
return
sz_index2size_lookup
(
index
);
}
}
JEMALLOC_ALWAYS_INLINE
void
sz_size2index_usize_fastpath
(
size_t
size
,
szind_t
*
ind
,
size_t
*
usize
)
{
*
ind
=
sz_size2index_lookup_impl
(
size
);
*
usize
=
sz_index2size_lookup_impl
(
*
ind
);
}
JEMALLOC_ALWAYS_INLINE
size_t
JEMALLOC_ALWAYS_INLINE
size_t
sz_s2u_compute
(
size_t
size
)
{
sz_s2u_compute
(
size_t
size
)
{
if
(
unlikely
(
size
>
SC_LARGE_MAXCLASS
))
{
if
(
unlikely
(
size
>
SC_LARGE_MAXCLASS
))
{
...
@@ -266,7 +316,7 @@ sz_sa2u(size_t size, size_t alignment) {
...
@@ -266,7 +316,7 @@ sz_sa2u(size_t size, size_t alignment) {
assert
(
alignment
!=
0
&&
((
alignment
-
1
)
&
alignment
)
==
0
);
assert
(
alignment
!=
0
&&
((
alignment
-
1
)
&
alignment
)
==
0
);
/* Try for a small size class. */
/* Try for a small size class. */
if
(
size
<=
SC_SMALL_MAXCLASS
&&
alignment
<
PAGE
)
{
if
(
size
<=
SC_SMALL_MAXCLASS
&&
alignment
<
=
PAGE
)
{
/*
/*
* Round size up to the nearest multiple of alignment.
* Round size up to the nearest multiple of alignment.
*
*
...
@@ -315,4 +365,7 @@ sz_sa2u(size_t size, size_t alignment) {
...
@@ -315,4 +365,7 @@ sz_sa2u(size_t size, size_t alignment) {
return
usize
;
return
usize
;
}
}
size_t
sz_psz_quantize_floor
(
size_t
size
);
size_t
sz_psz_quantize_ceil
(
size_t
size
);
#endif
/* JEMALLOC_INTERNAL_SIZE_H */
#endif
/* JEMALLOC_INTERNAL_SIZE_H */
deps/jemalloc/include/jemalloc/internal/tcache_externs.h
View file @
b8beda3c
#ifndef JEMALLOC_INTERNAL_TCACHE_EXTERNS_H
#ifndef JEMALLOC_INTERNAL_TCACHE_EXTERNS_H
#define JEMALLOC_INTERNAL_TCACHE_EXTERNS_H
#define JEMALLOC_INTERNAL_TCACHE_EXTERNS_H
extern
bool
opt_tcache
;
extern
bool
opt_tcache
;
extern
ssize_t
opt_lg_tcache_max
;
extern
size_t
opt_tcache_max
;
extern
ssize_t
opt_lg_tcache_nslots_mul
;
extern
cache_bin_info_t
*
tcache_bin_info
;
extern
unsigned
opt_tcache_nslots_small_min
;
extern
unsigned
opt_tcache_nslots_small_max
;
extern
unsigned
opt_tcache_nslots_large
;
extern
ssize_t
opt_lg_tcache_shift
;
extern
size_t
opt_tcache_gc_incr_bytes
;
extern
size_t
opt_tcache_gc_delay_bytes
;
extern
unsigned
opt_lg_tcache_flush_small_div
;
extern
unsigned
opt_lg_tcache_flush_large_div
;
/*
/*
* Number of tcache bins. There are SC_NBINS small-object bins, plus 0 or more
* Number of tcache bins. There are SC_NBINS small-object bins, plus 0 or more
...
@@ -15,6 +22,8 @@ extern unsigned nhbins;
...
@@ -15,6 +22,8 @@ extern unsigned nhbins;
/* Maximum cached size class. */
/* Maximum cached size class. */
extern
size_t
tcache_maxclass
;
extern
size_t
tcache_maxclass
;
extern
cache_bin_info_t
*
tcache_bin_info
;
/*
/*
* Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and
* Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and
* usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are
* usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are
...
@@ -25,24 +34,27 @@ extern size_t tcache_maxclass;
...
@@ -25,24 +34,27 @@ extern size_t tcache_maxclass;
*/
*/
extern
tcaches_t
*
tcaches
;
extern
tcaches_t
*
tcaches
;
size_t
tcache_salloc
(
tsdn_t
*
tsdn
,
const
void
*
ptr
);
size_t
tcache_salloc
(
tsdn_t
*
tsdn
,
const
void
*
ptr
);
void
tcache_event_hard
(
tsd_t
*
tsd
,
tcache_t
*
tcache
);
void
*
tcache_alloc_small_hard
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
tcache_t
*
tcache
,
void
*
tcache_alloc_small_hard
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
tcache_t
*
tcache
,
cache_bin_t
*
tbin
,
szind_t
binind
,
bool
*
tcache_success
);
cache_bin_t
*
tbin
,
szind_t
binind
,
bool
*
tcache_success
);
void
tcache_bin_flush_small
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
cache_bin_t
*
tbin
,
void
tcache_bin_flush_small
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
cache_bin_t
*
tbin
,
szind_t
binind
,
unsigned
rem
);
szind_t
binind
,
unsigned
rem
);
void
tcache_bin_flush_large
(
tsd_t
*
tsd
,
cache_bin_t
*
tbin
,
szind_t
binind
,
void
tcache_bin_flush_large
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
cache_bin_t
*
tbin
,
unsigned
rem
,
tcache_t
*
tcache
);
szind_t
binind
,
unsigned
rem
);
void
tcache_arena_reassociate
(
tsdn_t
*
tsdn
,
tcache_t
*
tcache
,
void
tcache_bin_flush_stashed
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
cache_bin_t
*
bin
,
arena_t
*
arena
);
szind_t
binind
,
bool
is_small
);
void
tcache_arena_reassociate
(
tsdn_t
*
tsdn
,
tcache_slow_t
*
tcache_slow
,
tcache_t
*
tcache
,
arena_t
*
arena
);
tcache_t
*
tcache_create_explicit
(
tsd_t
*
tsd
);
tcache_t
*
tcache_create_explicit
(
tsd_t
*
tsd
);
void
tcache_cleanup
(
tsd_t
*
tsd
);
void
tcache_cleanup
(
tsd_t
*
tsd
);
void
tcache_stats_merge
(
tsdn_t
*
tsdn
,
tcache_t
*
tcache
,
arena_t
*
arena
);
void
tcache_stats_merge
(
tsdn_t
*
tsdn
,
tcache_t
*
tcache
,
arena_t
*
arena
);
bool
tcaches_create
(
tsd_t
*
tsd
,
unsigned
*
r_ind
);
bool
tcaches_create
(
tsd_t
*
tsd
,
base_t
*
base
,
unsigned
*
r_ind
);
void
tcaches_flush
(
tsd_t
*
tsd
,
unsigned
ind
);
void
tcaches_flush
(
tsd_t
*
tsd
,
unsigned
ind
);
void
tcaches_destroy
(
tsd_t
*
tsd
,
unsigned
ind
);
void
tcaches_destroy
(
tsd_t
*
tsd
,
unsigned
ind
);
bool
tcache_boot
(
tsdn_t
*
tsdn
);
bool
tcache_boot
(
tsdn_t
*
tsdn
,
base_t
*
base
);
void
tcache_arena_associate
(
tsdn_t
*
tsdn
,
tcache_t
*
tcache
,
arena_t
*
arena
);
void
tcache_arena_associate
(
tsdn_t
*
tsdn
,
tcache_slow_t
*
tcache_slow
,
tcache_t
*
tcache
,
arena_t
*
arena
);
void
tcache_prefork
(
tsdn_t
*
tsdn
);
void
tcache_prefork
(
tsdn_t
*
tsdn
);
void
tcache_postfork_parent
(
tsdn_t
*
tsdn
);
void
tcache_postfork_parent
(
tsdn_t
*
tsdn
);
void
tcache_postfork_child
(
tsdn_t
*
tsdn
);
void
tcache_postfork_child
(
tsdn_t
*
tsdn
);
...
@@ -50,4 +62,14 @@ void tcache_flush(tsd_t *tsd);
...
@@ -50,4 +62,14 @@ void tcache_flush(tsd_t *tsd);
bool
tsd_tcache_data_init
(
tsd_t
*
tsd
);
bool
tsd_tcache_data_init
(
tsd_t
*
tsd
);
bool
tsd_tcache_enabled_data_init
(
tsd_t
*
tsd
);
bool
tsd_tcache_enabled_data_init
(
tsd_t
*
tsd
);
void
tcache_assert_initialized
(
tcache_t
*
tcache
);
/* Only accessed by thread event. */
uint64_t
tcache_gc_new_event_wait
(
tsd_t
*
tsd
);
uint64_t
tcache_gc_postponed_event_wait
(
tsd_t
*
tsd
);
void
tcache_gc_event_handler
(
tsd_t
*
tsd
,
uint64_t
elapsed
);
uint64_t
tcache_gc_dalloc_new_event_wait
(
tsd_t
*
tsd
);
uint64_t
tcache_gc_dalloc_postponed_event_wait
(
tsd_t
*
tsd
);
void
tcache_gc_dalloc_event_handler
(
tsd_t
*
tsd
,
uint64_t
elapsed
);
#endif
/* JEMALLOC_INTERNAL_TCACHE_EXTERNS_H */
#endif
/* JEMALLOC_INTERNAL_TCACHE_EXTERNS_H */
deps/jemalloc/include/jemalloc/internal/tcache_inlines.h
View file @
b8beda3c
...
@@ -3,9 +3,9 @@
...
@@ -3,9 +3,9 @@
#include "jemalloc/internal/bin.h"
#include "jemalloc/internal/bin.h"
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/san.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/util.h"
static
inline
bool
static
inline
bool
...
@@ -27,28 +27,29 @@ tcache_enabled_set(tsd_t *tsd, bool enabled) {
...
@@ -27,28 +27,29 @@ tcache_enabled_set(tsd_t *tsd, bool enabled) {
tsd_slow_update
(
tsd
);
tsd_slow_update
(
tsd
);
}
}
JEMALLOC_ALWAYS_INLINE
void
JEMALLOC_ALWAYS_INLINE
bool
tcache_event
(
tsd_t
*
tsd
,
tcache_t
*
tcache
)
{
tcache_small_bin_disabled
(
szind_t
ind
,
cache_bin_t
*
bin
)
{
if
(
TCACHE_GC_INCR
==
0
)
{
assert
(
ind
<
SC_NBINS
);
return
;
bool
ret
=
(
cache_bin_info_ncached_max
(
&
tcache_bin_info
[
ind
])
==
0
);
if
(
ret
&&
bin
!=
NULL
)
{
/* small size class but cache bin disabled. */
assert
(
ind
>=
nhbins
);
assert
((
uintptr_t
)(
*
bin
->
stack_head
)
==
cache_bin_preceding_junk
);
}
}
if
(
unlikely
(
ticker_tick
(
&
tcache
->
gc_ticker
)))
{
return
ret
;
tcache_event_hard
(
tsd
,
tcache
);
}
}
}
JEMALLOC_ALWAYS_INLINE
void
*
JEMALLOC_ALWAYS_INLINE
void
*
tcache_alloc_small
(
tsd_t
*
tsd
,
arena_t
*
arena
,
tcache_t
*
tcache
,
tcache_alloc_small
(
tsd_t
*
tsd
,
arena_t
*
arena
,
tcache_t
*
tcache
,
size_t
size
,
szind_t
binind
,
bool
zero
,
bool
slow_path
)
{
size_t
size
,
szind_t
binind
,
bool
zero
,
bool
slow_path
)
{
void
*
ret
;
void
*
ret
;
cache_bin_t
*
bin
;
bool
tcache_success
;
bool
tcache_success
;
size_t
usize
JEMALLOC_CC_SILENCE_INIT
(
0
);
assert
(
binind
<
SC_NBINS
);
assert
(
binind
<
SC_NBINS
);
bin
=
tcache_small_bin_get
(
tcache
,
binind
)
;
cache_bin_t
*
bin
=
&
tcache
->
bins
[
binind
]
;
ret
=
cache_bin_alloc
_easy
(
bin
,
&
tcache_success
);
ret
=
cache_bin_alloc
(
bin
,
&
tcache_success
);
assert
(
tcache_success
==
(
ret
!=
NULL
));
assert
(
tcache_success
==
(
ret
!=
NULL
));
if
(
unlikely
(
!
tcache_success
))
{
if
(
unlikely
(
!
tcache_success
))
{
bool
tcache_hard_success
;
bool
tcache_hard_success
;
...
@@ -56,6 +57,13 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
...
@@ -56,6 +57,13 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
if
(
unlikely
(
arena
==
NULL
))
{
if
(
unlikely
(
arena
==
NULL
))
{
return
NULL
;
return
NULL
;
}
}
if
(
unlikely
(
tcache_small_bin_disabled
(
binind
,
bin
)))
{
/* stats and zero are handled directly by the arena. */
return
arena_malloc_hard
(
tsd_tsdn
(
tsd
),
arena
,
size
,
binind
,
zero
);
}
tcache_bin_flush_stashed
(
tsd
,
tcache
,
bin
,
binind
,
/* is_small */
true
);
ret
=
tcache_alloc_small_hard
(
tsd_tsdn
(
tsd
),
arena
,
tcache
,
ret
=
tcache_alloc_small_hard
(
tsd_tsdn
(
tsd
),
arena
,
tcache
,
bin
,
binind
,
&
tcache_hard_success
);
bin
,
binind
,
&
tcache_hard_success
);
...
@@ -65,38 +73,14 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
...
@@ -65,38 +73,14 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
}
}
assert
(
ret
);
assert
(
ret
);
/*
if
(
unlikely
(
zero
))
{
* Only compute usize if required. The checks in the following if
size_t
usize
=
sz_index2size
(
binind
);
* statement are all static.
*/
if
(
config_prof
||
(
slow_path
&&
config_fill
)
||
unlikely
(
zero
))
{
usize
=
sz_index2size
(
binind
);
assert
(
tcache_salloc
(
tsd_tsdn
(
tsd
),
ret
)
==
usize
);
assert
(
tcache_salloc
(
tsd_tsdn
(
tsd
),
ret
)
==
usize
);
}
if
(
likely
(
!
zero
))
{
if
(
slow_path
&&
config_fill
)
{
if
(
unlikely
(
opt_junk_alloc
))
{
arena_alloc_junk_small
(
ret
,
&
bin_infos
[
binind
],
false
);
}
else
if
(
unlikely
(
opt_zero
))
{
memset
(
ret
,
0
,
usize
);
}
}
}
else
{
if
(
slow_path
&&
config_fill
&&
unlikely
(
opt_junk_alloc
))
{
arena_alloc_junk_small
(
ret
,
&
bin_infos
[
binind
],
true
);
}
memset
(
ret
,
0
,
usize
);
memset
(
ret
,
0
,
usize
);
}
}
if
(
config_stats
)
{
if
(
config_stats
)
{
bin
->
tstats
.
nrequests
++
;
bin
->
tstats
.
nrequests
++
;
}
}
if
(
config_prof
)
{
tcache
->
prof_accumbytes
+=
usize
;
}
tcache_event
(
tsd
,
tcache
);
return
ret
;
return
ret
;
}
}
...
@@ -104,12 +88,11 @@ JEMALLOC_ALWAYS_INLINE void *
...
@@ -104,12 +88,11 @@ JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_large
(
tsd_t
*
tsd
,
arena_t
*
arena
,
tcache_t
*
tcache
,
size_t
size
,
tcache_alloc_large
(
tsd_t
*
tsd
,
arena_t
*
arena
,
tcache_t
*
tcache
,
size_t
size
,
szind_t
binind
,
bool
zero
,
bool
slow_path
)
{
szind_t
binind
,
bool
zero
,
bool
slow_path
)
{
void
*
ret
;
void
*
ret
;
cache_bin_t
*
bin
;
bool
tcache_success
;
bool
tcache_success
;
assert
(
binind
>=
SC_NBINS
&&
binind
<
nhbins
);
assert
(
binind
>=
SC_NBINS
&&
binind
<
nhbins
);
bin
=
tcache_large_bin_get
(
tcache
,
binind
)
;
cache_bin_t
*
bin
=
&
tcache
->
bins
[
binind
]
;
ret
=
cache_bin_alloc
_easy
(
bin
,
&
tcache_success
);
ret
=
cache_bin_alloc
(
bin
,
&
tcache_success
);
assert
(
tcache_success
==
(
ret
!=
NULL
));
assert
(
tcache_success
==
(
ret
!=
NULL
));
if
(
unlikely
(
!
tcache_success
))
{
if
(
unlikely
(
!
tcache_success
))
{
/*
/*
...
@@ -120,96 +103,79 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
...
@@ -120,96 +103,79 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
if
(
unlikely
(
arena
==
NULL
))
{
if
(
unlikely
(
arena
==
NULL
))
{
return
NULL
;
return
NULL
;
}
}
tcache_bin_flush_stashed
(
tsd
,
tcache
,
bin
,
binind
,
/* is_small */
false
);
ret
=
large_malloc
(
tsd_tsdn
(
tsd
),
arena
,
sz_s2u
(
size
),
zero
);
ret
=
large_malloc
(
tsd_tsdn
(
tsd
),
arena
,
sz_s2u
(
size
),
zero
);
if
(
ret
==
NULL
)
{
if
(
ret
==
NULL
)
{
return
NULL
;
return
NULL
;
}
}
}
else
{
}
else
{
size_t
usize
JEMALLOC_CC_SILENCE_INIT
(
0
);
if
(
unlikely
(
zero
))
{
size_t
usize
=
sz_index2size
(
binind
);
/* Only compute usize on demand */
if
(
config_prof
||
(
slow_path
&&
config_fill
)
||
unlikely
(
zero
))
{
usize
=
sz_index2size
(
binind
);
assert
(
usize
<=
tcache_maxclass
);
assert
(
usize
<=
tcache_maxclass
);
}
if
(
likely
(
!
zero
))
{
if
(
slow_path
&&
config_fill
)
{
if
(
unlikely
(
opt_junk_alloc
))
{
memset
(
ret
,
JEMALLOC_ALLOC_JUNK
,
usize
);
}
else
if
(
unlikely
(
opt_zero
))
{
memset
(
ret
,
0
,
usize
);
}
}
}
else
{
memset
(
ret
,
0
,
usize
);
memset
(
ret
,
0
,
usize
);
}
}
if
(
config_stats
)
{
if
(
config_stats
)
{
bin
->
tstats
.
nrequests
++
;
bin
->
tstats
.
nrequests
++
;
}
}
if
(
config_prof
)
{
tcache
->
prof_accumbytes
+=
usize
;
}
}
}
tcache_event
(
tsd
,
tcache
);
return
ret
;
return
ret
;
}
}
JEMALLOC_ALWAYS_INLINE
void
JEMALLOC_ALWAYS_INLINE
void
tcache_dalloc_small
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
void
*
ptr
,
szind_t
binind
,
tcache_dalloc_small
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
void
*
ptr
,
szind_t
binind
,
bool
slow_path
)
{
bool
slow_path
)
{
cache_bin_t
*
bin
;
assert
(
tcache_salloc
(
tsd_tsdn
(
tsd
),
ptr
)
<=
SC_SMALL_MAXCLASS
);
cache_bin_info_t
*
bin_info
;
assert
(
tcache_salloc
(
tsd_tsdn
(
tsd
),
ptr
)
cache_bin_t
*
bin
=
&
tcache
->
bins
[
binind
];
<=
SC_SMALL_MAXCLASS
);
/*
* Not marking the branch unlikely because this is past free_fastpath()
if
(
slow_path
&&
config_fill
&&
unlikely
(
opt_junk_free
))
{
* (which handles the most common cases), i.e. at this point it's often
arena_dalloc_junk_small
(
ptr
,
&
bin_infos
[
binind
]);
* uncommon cases.
*/
if
(
cache_bin_nonfast_aligned
(
ptr
))
{
/* Junk unconditionally, even if bin is full. */
san_junk_ptr
(
ptr
,
sz_index2size
(
binind
));
if
(
cache_bin_stash
(
bin
,
ptr
))
{
return
;
}
assert
(
cache_bin_full
(
bin
));
/* Bin full; fall through into the flush branch. */
}
}
bin
=
tcache_small_bin_get
(
tcache
,
binind
);
if
(
unlikely
(
!
cache_bin_dalloc_easy
(
bin
,
ptr
)))
{
bin_info
=
&
tcache_bin_info
[
binind
];
if
(
unlikely
(
tcache_small_bin_disabled
(
binind
,
bin
)))
{
if
(
unlikely
(
!
cache_bin_dalloc_easy
(
bin
,
bin_info
,
ptr
)))
{
arena_dalloc_small
(
tsd_tsdn
(
tsd
),
ptr
);
tcache_bin_flush_small
(
tsd
,
tcache
,
bin
,
binind
,
return
;
(
bin_info
->
ncached_max
>>
1
));
}
bool
ret
=
cache_bin_dalloc_easy
(
bin
,
bin_info
,
ptr
);
cache_bin_sz_t
max
=
cache_bin_info_ncached_max
(
&
tcache_bin_info
[
binind
]);
unsigned
remain
=
max
>>
opt_lg_tcache_flush_small_div
;
tcache_bin_flush_small
(
tsd
,
tcache
,
bin
,
binind
,
remain
);
bool
ret
=
cache_bin_dalloc_easy
(
bin
,
ptr
);
assert
(
ret
);
assert
(
ret
);
}
}
tcache_event
(
tsd
,
tcache
);
}
}
JEMALLOC_ALWAYS_INLINE
void
JEMALLOC_ALWAYS_INLINE
void
tcache_dalloc_large
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
void
*
ptr
,
szind_t
binind
,
tcache_dalloc_large
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
void
*
ptr
,
szind_t
binind
,
bool
slow_path
)
{
bool
slow_path
)
{
cache_bin_t
*
bin
;
cache_bin_info_t
*
bin_info
;
assert
(
tcache_salloc
(
tsd_tsdn
(
tsd
),
ptr
)
assert
(
tcache_salloc
(
tsd_tsdn
(
tsd
),
ptr
)
>
SC_SMALL_MAXCLASS
);
>
SC_SMALL_MAXCLASS
);
assert
(
tcache_salloc
(
tsd_tsdn
(
tsd
),
ptr
)
<=
tcache_maxclass
);
assert
(
tcache_salloc
(
tsd_tsdn
(
tsd
),
ptr
)
<=
tcache_maxclass
);
if
(
slow_path
&&
config_fill
&&
unlikely
(
opt_junk_free
))
{
cache_bin_t
*
bin
=
&
tcache
->
bins
[
binind
];
large_dalloc_junk
(
ptr
,
sz_index2size
(
binind
));
if
(
unlikely
(
!
cache_bin_dalloc_easy
(
bin
,
ptr
)))
{
}
unsigned
remain
=
cache_bin_info_ncached_max
(
&
tcache_bin_info
[
binind
])
>>
opt_lg_tcache_flush_large_div
;
bin
=
tcache_large_bin_get
(
tcache
,
binind
);
tcache_bin_flush_large
(
tsd
,
tcache
,
bin
,
binind
,
remain
);
bin_info
=
&
tcache_bin_info
[
binind
];
bool
ret
=
cache_bin_dalloc_easy
(
bin
,
ptr
);
if
(
unlikely
(
bin
->
ncached
==
bin_info
->
ncached_max
))
{
assert
(
ret
);
tcache_bin_flush_large
(
tsd
,
bin
,
binind
,
(
bin_info
->
ncached_max
>>
1
),
tcache
);
}
}
assert
(
bin
->
ncached
<
bin_info
->
ncached_max
);
bin
->
ncached
++
;
*
(
bin
->
avail
-
bin
->
ncached
)
=
ptr
;
tcache_event
(
tsd
,
tcache
);
}
}
JEMALLOC_ALWAYS_INLINE
tcache_t
*
JEMALLOC_ALWAYS_INLINE
tcache_t
*
...
...
Prev
1
2
3
4
5
6
7
8
9
10
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment