Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
4d5911b4
Commit
4d5911b4
authored
Oct 10, 2021
by
Yoav Steinberg
Browse files
Merge commit '
220a0f08
' as 'deps/jemalloc'
parents
4a884343
220a0f08
Changes
163
Show whitespace changes
Inline
Side-by-side
Too many changes to show.
To preserve performance only
163 of 163+
files are displayed.
Plain diff
Email patch
deps/jemalloc/include/jemalloc/internal/pages.h
0 → 100644
View file @
4d5911b4
#ifndef JEMALLOC_INTERNAL_PAGES_EXTERNS_H
#define JEMALLOC_INTERNAL_PAGES_EXTERNS_H
/* Page size. LG_PAGE is determined by the configure script. */
#ifdef PAGE_MASK
# undef PAGE_MASK
#endif
#define PAGE ((size_t)(1U << LG_PAGE))
#define PAGE_MASK ((size_t)(PAGE - 1))
/* Return the page base address for the page containing address a. */
#define PAGE_ADDR2BASE(a) \
((void *)((uintptr_t)(a) & ~PAGE_MASK))
/* Return the smallest pagesize multiple that is >= s. */
#define PAGE_CEILING(s) \
(((s) + PAGE_MASK) & ~PAGE_MASK)
/* Huge page size. LG_HUGEPAGE is determined by the configure script. */
#define HUGEPAGE ((size_t)(1U << LG_HUGEPAGE))
#define HUGEPAGE_MASK ((size_t)(HUGEPAGE - 1))
/* Return the huge page base address for the huge page containing address a. */
#define HUGEPAGE_ADDR2BASE(a) \
((void *)((uintptr_t)(a) & ~HUGEPAGE_MASK))
/* Return the smallest pagesize multiple that is >= s. */
#define HUGEPAGE_CEILING(s) \
(((s) + HUGEPAGE_MASK) & ~HUGEPAGE_MASK)
/* PAGES_CAN_PURGE_LAZY is defined if lazy purging is supported. */
#if defined(_WIN32) || defined(JEMALLOC_PURGE_MADVISE_FREE)
# define PAGES_CAN_PURGE_LAZY
#endif
/*
* PAGES_CAN_PURGE_FORCED is defined if forced purging is supported.
*
* The only supported way to hard-purge on Windows is to decommit and then
* re-commit, but doing so is racy, and if re-commit fails it's a pain to
* propagate the "poisoned" memory state. Since we typically decommit as the
* next step after purging on Windows anyway, there's no point in adding such
* complexity.
*/
#if !defined(_WIN32) && ((defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)) || \
defined(JEMALLOC_MAPS_COALESCE))
# define PAGES_CAN_PURGE_FORCED
#endif
static
const
bool
pages_can_purge_lazy
=
#ifdef PAGES_CAN_PURGE_LAZY
true
#else
false
#endif
;
static
const
bool
pages_can_purge_forced
=
#ifdef PAGES_CAN_PURGE_FORCED
true
#else
false
#endif
;
typedef
enum
{
thp_mode_default
=
0
,
/* Do not change hugepage settings. */
thp_mode_always
=
1
,
/* Always set MADV_HUGEPAGE. */
thp_mode_never
=
2
,
/* Always set MADV_NOHUGEPAGE. */
thp_mode_names_limit
=
3
,
/* Used for option processing. */
thp_mode_not_supported
=
3
/* No THP support detected. */
}
thp_mode_t
;
#define THP_MODE_DEFAULT thp_mode_default
extern
thp_mode_t
opt_thp
;
extern
thp_mode_t
init_system_thp_mode
;
/* Initial system wide state. */
extern
const
char
*
thp_mode_names
[];
void
*
pages_map
(
void
*
addr
,
size_t
size
,
size_t
alignment
,
bool
*
commit
);
void
pages_unmap
(
void
*
addr
,
size_t
size
);
bool
pages_commit
(
void
*
addr
,
size_t
size
);
bool
pages_decommit
(
void
*
addr
,
size_t
size
);
bool
pages_purge_lazy
(
void
*
addr
,
size_t
size
);
bool
pages_purge_forced
(
void
*
addr
,
size_t
size
);
bool
pages_huge
(
void
*
addr
,
size_t
size
);
bool
pages_nohuge
(
void
*
addr
,
size_t
size
);
bool
pages_dontdump
(
void
*
addr
,
size_t
size
);
bool
pages_dodump
(
void
*
addr
,
size_t
size
);
bool
pages_boot
(
void
);
void
pages_set_thp_state
(
void
*
ptr
,
size_t
size
);
#endif
/* JEMALLOC_INTERNAL_PAGES_EXTERNS_H */
deps/jemalloc/include/jemalloc/internal/ph.h
0 → 100644
View file @
4d5911b4
/*
* A Pairing Heap implementation.
*
* "The Pairing Heap: A New Form of Self-Adjusting Heap"
* https://www.cs.cmu.edu/~sleator/papers/pairing-heaps.pdf
*
* With auxiliary twopass list, described in a follow on paper.
*
* "Pairing Heaps: Experiments and Analysis"
* http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf
*
*******************************************************************************
*/
#ifndef PH_H_
#define PH_H_
/* Node structure. */
#define phn(a_type) \
struct { \
a_type *phn_prev; \
a_type *phn_next; \
a_type *phn_lchild; \
}
/* Root structure. */
#define ph(a_type) \
struct { \
a_type *ph_root; \
}
/* Internal utility macros. */
#define phn_lchild_get(a_type, a_field, a_phn) \
(a_phn->a_field.phn_lchild)
#define phn_lchild_set(a_type, a_field, a_phn, a_lchild) do { \
a_phn->a_field.phn_lchild = a_lchild; \
} while (0)
#define phn_next_get(a_type, a_field, a_phn) \
(a_phn->a_field.phn_next)
#define phn_prev_set(a_type, a_field, a_phn, a_prev) do { \
a_phn->a_field.phn_prev = a_prev; \
} while (0)
#define phn_prev_get(a_type, a_field, a_phn) \
(a_phn->a_field.phn_prev)
#define phn_next_set(a_type, a_field, a_phn, a_next) do { \
a_phn->a_field.phn_next = a_next; \
} while (0)
#define phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do { \
a_type *phn0child; \
\
assert(a_phn0 != NULL); \
assert(a_phn1 != NULL); \
assert(a_cmp(a_phn0, a_phn1) <= 0); \
\
phn_prev_set(a_type, a_field, a_phn1, a_phn0); \
phn0child = phn_lchild_get(a_type, a_field, a_phn0); \
phn_next_set(a_type, a_field, a_phn1, phn0child); \
if (phn0child != NULL) { \
phn_prev_set(a_type, a_field, phn0child, a_phn1); \
} \
phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \
} while (0)
#define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \
if (a_phn0 == NULL) { \
r_phn = a_phn1; \
} else if (a_phn1 == NULL) { \
r_phn = a_phn0; \
} else if (a_cmp(a_phn0, a_phn1) < 0) { \
phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \
a_cmp); \
r_phn = a_phn0; \
} else { \
phn_merge_ordered(a_type, a_field, a_phn1, a_phn0, \
a_cmp); \
r_phn = a_phn1; \
} \
} while (0)
#define ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do { \
a_type *head = NULL; \
a_type *tail = NULL; \
a_type *phn0 = a_phn; \
a_type *phn1 = phn_next_get(a_type, a_field, phn0); \
\
/* \
* Multipass merge, wherein the first two elements of a FIFO \
* are repeatedly merged, and each result is appended to the \
* singly linked FIFO, until the FIFO contains only a single \
* element. We start with a sibling list but no reference to \
* its tail, so we do a single pass over the sibling list to \
* populate the FIFO. \
*/
\
if (phn1 != NULL) { \
a_type *phnrest = phn_next_get(a_type, a_field, phn1); \
if (phnrest != NULL) { \
phn_prev_set(a_type, a_field, phnrest, NULL); \
} \
phn_prev_set(a_type, a_field, phn0, NULL); \
phn_next_set(a_type, a_field, phn0, NULL); \
phn_prev_set(a_type, a_field, phn1, NULL); \
phn_next_set(a_type, a_field, phn1, NULL); \
phn_merge(a_type, a_field, phn0, phn1, a_cmp, phn0); \
head = tail = phn0; \
phn0 = phnrest; \
while (phn0 != NULL) { \
phn1 = phn_next_get(a_type, a_field, phn0); \
if (phn1 != NULL) { \
phnrest = phn_next_get(a_type, a_field, \
phn1); \
if (phnrest != NULL) { \
phn_prev_set(a_type, a_field, \
phnrest, NULL); \
} \
phn_prev_set(a_type, a_field, phn0, \
NULL); \
phn_next_set(a_type, a_field, phn0, \
NULL); \
phn_prev_set(a_type, a_field, phn1, \
NULL); \
phn_next_set(a_type, a_field, phn1, \
NULL); \
phn_merge(a_type, a_field, phn0, phn1, \
a_cmp, phn0); \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
phn0 = phnrest; \
} else { \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
phn0 = NULL; \
} \
} \
phn0 = head; \
phn1 = phn_next_get(a_type, a_field, phn0); \
if (phn1 != NULL) { \
while (true) { \
head = phn_next_get(a_type, a_field, \
phn1); \
assert(phn_prev_get(a_type, a_field, \
phn0) == NULL); \
phn_next_set(a_type, a_field, phn0, \
NULL); \
assert(phn_prev_get(a_type, a_field, \
phn1) == NULL); \
phn_next_set(a_type, a_field, phn1, \
NULL); \
phn_merge(a_type, a_field, phn0, phn1, \
a_cmp, phn0); \
if (head == NULL) { \
break; \
} \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
phn0 = head; \
phn1 = phn_next_get(a_type, a_field, \
phn0); \
} \
} \
} \
r_phn = phn0; \
} while (0)
#define ph_merge_aux(a_type, a_field, a_ph, a_cmp) do { \
a_type *phn = phn_next_get(a_type, a_field, a_ph->ph_root); \
if (phn != NULL) { \
phn_prev_set(a_type, a_field, a_ph->ph_root, NULL); \
phn_next_set(a_type, a_field, a_ph->ph_root, NULL); \
phn_prev_set(a_type, a_field, phn, NULL); \
ph_merge_siblings(a_type, a_field, phn, a_cmp, phn); \
assert(phn_next_get(a_type, a_field, phn) == NULL); \
phn_merge(a_type, a_field, a_ph->ph_root, phn, a_cmp, \
a_ph->ph_root); \
} \
} while (0)
#define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \
a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \
if (lchild == NULL) { \
r_phn = NULL; \
} else { \
ph_merge_siblings(a_type, a_field, lchild, a_cmp, \
r_phn); \
} \
} while (0)
/*
* The ph_proto() macro generates function prototypes that correspond to the
* functions generated by an equivalently parameterized call to ph_gen().
*/
#define ph_proto(a_attr, a_prefix, a_ph_type, a_type) \
a_attr void a_prefix##new(a_ph_type *ph); \
a_attr bool a_prefix##empty(a_ph_type *ph); \
a_attr a_type *a_prefix##first(a_ph_type *ph); \
a_attr a_type *a_prefix##any(a_ph_type *ph); \
a_attr void a_prefix##insert(a_ph_type *ph, a_type *phn); \
a_attr a_type *a_prefix##remove_first(a_ph_type *ph); \
a_attr a_type *a_prefix##remove_any(a_ph_type *ph); \
a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn);
/*
* The ph_gen() macro generates a type-specific pairing heap implementation,
* based on the above cpp macros.
*/
#define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \
a_attr void \
a_prefix##new(a_ph_type *ph) { \
memset(ph, 0, sizeof(ph(a_type))); \
} \
a_attr bool \
a_prefix##empty(a_ph_type *ph) { \
return (ph->ph_root == NULL); \
} \
a_attr a_type * \
a_prefix##first(a_ph_type *ph) { \
if (ph->ph_root == NULL) { \
return NULL; \
} \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
return ph->ph_root; \
} \
a_attr a_type * \
a_prefix##any(a_ph_type *ph) { \
if (ph->ph_root == NULL) { \
return NULL; \
} \
a_type *aux = phn_next_get(a_type, a_field, ph->ph_root); \
if (aux != NULL) { \
return aux; \
} \
return ph->ph_root; \
} \
a_attr void \
a_prefix##insert(a_ph_type *ph, a_type *phn) { \
memset(&phn->a_field, 0, sizeof(phn(a_type))); \
\
/* \
* Treat the root as an aux list during insertion, and lazily \
* merge during a_prefix##remove_first(). For elements that \
* are inserted, then removed via a_prefix##remove() before the \
* aux list is ever processed, this makes insert/remove \
* constant-time, whereas eager merging would make insert \
* O(log n). \
*/
\
if (ph->ph_root == NULL) { \
ph->ph_root = phn; \
} else { \
phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \
a_field, ph->ph_root)); \
if (phn_next_get(a_type, a_field, ph->ph_root) != \
NULL) { \
phn_prev_set(a_type, a_field, \
phn_next_get(a_type, a_field, ph->ph_root), \
phn); \
} \
phn_prev_set(a_type, a_field, phn, ph->ph_root); \
phn_next_set(a_type, a_field, ph->ph_root, phn); \
} \
} \
a_attr a_type * \
a_prefix##remove_first(a_ph_type *ph) { \
a_type *ret; \
\
if (ph->ph_root == NULL) { \
return NULL; \
} \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
\
ret = ph->ph_root; \
\
ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \
ph->ph_root); \
\
return ret; \
} \
a_attr a_type * \
a_prefix##remove_any(a_ph_type *ph) { \
/* \
* Remove the most recently inserted aux list element, or the \
* root if the aux list is empty. This has the effect of \
* behaving as a LIFO (and insertion/removal is therefore \
* constant-time) if a_prefix##[remove_]first() are never \
* called. \
*/
\
if (ph->ph_root == NULL) { \
return NULL; \
} \
a_type *ret = phn_next_get(a_type, a_field, ph->ph_root); \
if (ret != NULL) { \
a_type *aux = phn_next_get(a_type, a_field, ret); \
phn_next_set(a_type, a_field, ph->ph_root, aux); \
if (aux != NULL) { \
phn_prev_set(a_type, a_field, aux, \
ph->ph_root); \
} \
return ret; \
} \
ret = ph->ph_root; \
ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \
ph->ph_root); \
return ret; \
} \
a_attr void \
a_prefix##remove(a_ph_type *ph, a_type *phn) { \
a_type *replace, *parent; \
\
if (ph->ph_root == phn) { \
/* \
* We can delete from aux list without merging it, but \
* we need to merge if we are dealing with the root \
* node and it has children. \
*/
\
if (phn_lchild_get(a_type, a_field, phn) == NULL) { \
ph->ph_root = phn_next_get(a_type, a_field, \
phn); \
if (ph->ph_root != NULL) { \
phn_prev_set(a_type, a_field, \
ph->ph_root, NULL); \
} \
return; \
} \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
if (ph->ph_root == phn) { \
ph_merge_children(a_type, a_field, ph->ph_root, \
a_cmp, ph->ph_root); \
return; \
} \
} \
\
/* Get parent (if phn is leftmost child) before mutating. */
\
if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \
if (phn_lchild_get(a_type, a_field, parent) != phn) { \
parent = NULL; \
} \
} \
/* Find a possible replacement node, and link to parent. */
\
ph_merge_children(a_type, a_field, phn, a_cmp, replace); \
/* Set next/prev for sibling linked list. */
\
if (replace != NULL) { \
if (parent != NULL) { \
phn_prev_set(a_type, a_field, replace, parent); \
phn_lchild_set(a_type, a_field, parent, \
replace); \
} else { \
phn_prev_set(a_type, a_field, replace, \
phn_prev_get(a_type, a_field, phn)); \
if (phn_prev_get(a_type, a_field, phn) != \
NULL) { \
phn_next_set(a_type, a_field, \
phn_prev_get(a_type, a_field, phn), \
replace); \
} \
} \
phn_next_set(a_type, a_field, replace, \
phn_next_get(a_type, a_field, phn)); \
if (phn_next_get(a_type, a_field, phn) != NULL) { \
phn_prev_set(a_type, a_field, \
phn_next_get(a_type, a_field, phn), \
replace); \
} \
} else { \
if (parent != NULL) { \
a_type *next = phn_next_get(a_type, a_field, \
phn); \
phn_lchild_set(a_type, a_field, parent, next); \
if (next != NULL) { \
phn_prev_set(a_type, a_field, next, \
parent); \
} \
} else { \
assert(phn_prev_get(a_type, a_field, phn) != \
NULL); \
phn_next_set(a_type, a_field, \
phn_prev_get(a_type, a_field, phn), \
phn_next_get(a_type, a_field, phn)); \
} \
if (phn_next_get(a_type, a_field, phn) != NULL) { \
phn_prev_set(a_type, a_field, \
phn_next_get(a_type, a_field, phn), \
phn_prev_get(a_type, a_field, phn)); \
} \
} \
}
#endif
/* PH_H_ */
deps/jemalloc/include/jemalloc/internal/private_namespace.sh
0 → 100755
View file @
4d5911b4
#!/bin/sh
for
symbol
in
`
cat
"
$@
"
`
;
do
echo
"#define
${
symbol
}
JEMALLOC_N(
${
symbol
}
)"
done
deps/jemalloc/include/jemalloc/internal/private_symbols.sh
0 → 100755
View file @
4d5911b4
#!/bin/sh
#
# Generate private_symbols[_jet].awk.
#
# Usage: private_symbols.sh <sym_prefix> <sym>*
#
# <sym_prefix> is typically "" or "_".
sym_prefix
=
$1
shift
cat
<<
EOF
#!/usr/bin/env awk -f
BEGIN {
sym_prefix = "
${
sym_prefix
}
"
split("
\\
EOF
for
public_sym
in
"
$@
"
;
do
cat
<<
EOF
${
sym_prefix
}${
public_sym
}
\\
EOF
done
cat
<<
"
EOF
"
", exported_symbol_names)
# Store exported symbol names as keys in exported_symbols.
for (i in exported_symbol_names) {
exported_symbols[exported_symbol_names[i]] = 1
}
}
# Process 'nm -a <c_source.o>' output.
#
# Handle lines like:
# 0000000000000008 D opt_junk
# 0000000000007574 T malloc_initialized
(NF == 3 &&
$2
~ /^[ABCDGRSTVW]
$/
&& !(
$3
in exported_symbols) &&
$3
~ /^[A-Za-z0-9_]+
$/
) {
print substr(
$3
, 1+length(sym_prefix), length(
$3
)-length(sym_prefix))
}
# Process 'dumpbin /SYMBOLS <c_source.obj>' output.
#
# Handle lines like:
# 353 00008098 SECT4 notype External | opt_junk
# 3F1 00000000 SECT7 notype () External | malloc_initialized
(
$3
~ /^SECT[0-9]+/ &&
$(
NF-2
)
== "External" && !(
$NF
in exported_symbols)) {
print
$NF
}
EOF
deps/jemalloc/include/jemalloc/internal/prng.h
0 → 100644
View file @
4d5911b4
#ifndef JEMALLOC_INTERNAL_PRNG_H
#define JEMALLOC_INTERNAL_PRNG_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bit_util.h"
/*
* Simple linear congruential pseudo-random number generator:
*
* prng(y) = (a*x + c) % m
*
* where the following constants ensure maximal period:
*
* a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4.
* c == Odd number (relatively prime to 2^n).
* m == 2^32
*
* See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints.
*
* This choice of m has the disadvantage that the quality of the bits is
* proportional to bit position. For example, the lowest bit has a cycle of 2,
* the next has a cycle of 4, etc. For this reason, we prefer to use the upper
* bits.
*/
/******************************************************************************/
/* INTERNAL DEFINITIONS -- IGNORE */
/******************************************************************************/
#define PRNG_A_32 UINT32_C(1103515241)
#define PRNG_C_32 UINT32_C(12347)
#define PRNG_A_64 UINT64_C(6364136223846793005)
#define PRNG_C_64 UINT64_C(1442695040888963407)
JEMALLOC_ALWAYS_INLINE
uint32_t
prng_state_next_u32
(
uint32_t
state
)
{
return
(
state
*
PRNG_A_32
)
+
PRNG_C_32
;
}
JEMALLOC_ALWAYS_INLINE
uint64_t
prng_state_next_u64
(
uint64_t
state
)
{
return
(
state
*
PRNG_A_64
)
+
PRNG_C_64
;
}
JEMALLOC_ALWAYS_INLINE
size_t
prng_state_next_zu
(
size_t
state
)
{
#if LG_SIZEOF_PTR == 2
return
(
state
*
PRNG_A_32
)
+
PRNG_C_32
;
#elif LG_SIZEOF_PTR == 3
return
(
state
*
PRNG_A_64
)
+
PRNG_C_64
;
#else
#error Unsupported pointer size
#endif
}
/******************************************************************************/
/* BEGIN PUBLIC API */
/******************************************************************************/
/*
* The prng_lg_range functions give a uniform int in the half-open range [0,
* 2**lg_range). If atomic is true, they do so safely from multiple threads.
* Multithreaded 64-bit prngs aren't supported.
*/
JEMALLOC_ALWAYS_INLINE
uint32_t
prng_lg_range_u32
(
atomic_u32_t
*
state
,
unsigned
lg_range
,
bool
atomic
)
{
uint32_t
ret
,
state0
,
state1
;
assert
(
lg_range
>
0
);
assert
(
lg_range
<=
32
);
state0
=
atomic_load_u32
(
state
,
ATOMIC_RELAXED
);
if
(
atomic
)
{
do
{
state1
=
prng_state_next_u32
(
state0
);
}
while
(
!
atomic_compare_exchange_weak_u32
(
state
,
&
state0
,
state1
,
ATOMIC_RELAXED
,
ATOMIC_RELAXED
));
}
else
{
state1
=
prng_state_next_u32
(
state0
);
atomic_store_u32
(
state
,
state1
,
ATOMIC_RELAXED
);
}
ret
=
state1
>>
(
32
-
lg_range
);
return
ret
;
}
JEMALLOC_ALWAYS_INLINE
uint64_t
prng_lg_range_u64
(
uint64_t
*
state
,
unsigned
lg_range
)
{
uint64_t
ret
,
state1
;
assert
(
lg_range
>
0
);
assert
(
lg_range
<=
64
);
state1
=
prng_state_next_u64
(
*
state
);
*
state
=
state1
;
ret
=
state1
>>
(
64
-
lg_range
);
return
ret
;
}
JEMALLOC_ALWAYS_INLINE
size_t
prng_lg_range_zu
(
atomic_zu_t
*
state
,
unsigned
lg_range
,
bool
atomic
)
{
size_t
ret
,
state0
,
state1
;
assert
(
lg_range
>
0
);
assert
(
lg_range
<=
ZU
(
1
)
<<
(
3
+
LG_SIZEOF_PTR
));
state0
=
atomic_load_zu
(
state
,
ATOMIC_RELAXED
);
if
(
atomic
)
{
do
{
state1
=
prng_state_next_zu
(
state0
);
}
while
(
atomic_compare_exchange_weak_zu
(
state
,
&
state0
,
state1
,
ATOMIC_RELAXED
,
ATOMIC_RELAXED
));
}
else
{
state1
=
prng_state_next_zu
(
state0
);
atomic_store_zu
(
state
,
state1
,
ATOMIC_RELAXED
);
}
ret
=
state1
>>
((
ZU
(
1
)
<<
(
3
+
LG_SIZEOF_PTR
))
-
lg_range
);
return
ret
;
}
/*
* The prng_range functions behave like the prng_lg_range, but return a result
* in [0, range) instead of [0, 2**lg_range).
*/
JEMALLOC_ALWAYS_INLINE
uint32_t
prng_range_u32
(
atomic_u32_t
*
state
,
uint32_t
range
,
bool
atomic
)
{
uint32_t
ret
;
unsigned
lg_range
;
assert
(
range
>
1
);
/* Compute the ceiling of lg(range). */
lg_range
=
ffs_u32
(
pow2_ceil_u32
(
range
))
-
1
;
/* Generate a result in [0..range) via repeated trial. */
do
{
ret
=
prng_lg_range_u32
(
state
,
lg_range
,
atomic
);
}
while
(
ret
>=
range
);
return
ret
;
}
JEMALLOC_ALWAYS_INLINE
uint64_t
prng_range_u64
(
uint64_t
*
state
,
uint64_t
range
)
{
uint64_t
ret
;
unsigned
lg_range
;
assert
(
range
>
1
);
/* Compute the ceiling of lg(range). */
lg_range
=
ffs_u64
(
pow2_ceil_u64
(
range
))
-
1
;
/* Generate a result in [0..range) via repeated trial. */
do
{
ret
=
prng_lg_range_u64
(
state
,
lg_range
);
}
while
(
ret
>=
range
);
return
ret
;
}
JEMALLOC_ALWAYS_INLINE
size_t
prng_range_zu
(
atomic_zu_t
*
state
,
size_t
range
,
bool
atomic
)
{
size_t
ret
;
unsigned
lg_range
;
assert
(
range
>
1
);
/* Compute the ceiling of lg(range). */
lg_range
=
ffs_u64
(
pow2_ceil_u64
(
range
))
-
1
;
/* Generate a result in [0..range) via repeated trial. */
do
{
ret
=
prng_lg_range_zu
(
state
,
lg_range
,
atomic
);
}
while
(
ret
>=
range
);
return
ret
;
}
#endif
/* JEMALLOC_INTERNAL_PRNG_H */
deps/jemalloc/include/jemalloc/internal/prof_externs.h
0 → 100644
View file @
4d5911b4
#ifndef JEMALLOC_INTERNAL_PROF_EXTERNS_H
#define JEMALLOC_INTERNAL_PROF_EXTERNS_H
#include "jemalloc/internal/mutex.h"
extern
malloc_mutex_t
bt2gctx_mtx
;
extern
bool
opt_prof
;
extern
bool
opt_prof_active
;
extern
bool
opt_prof_thread_active_init
;
extern
size_t
opt_lg_prof_sample
;
/* Mean bytes between samples. */
extern
ssize_t
opt_lg_prof_interval
;
/* lg(prof_interval). */
extern
bool
opt_prof_gdump
;
/* High-water memory dumping. */
extern
bool
opt_prof_final
;
/* Final profile dumping. */
extern
bool
opt_prof_leak
;
/* Dump leak summary at exit. */
extern
bool
opt_prof_accum
;
/* Report cumulative bytes. */
extern
bool
opt_prof_log
;
/* Turn logging on at boot. */
extern
char
opt_prof_prefix
[
/* Minimize memory bloat for non-prof builds. */
#ifdef JEMALLOC_PROF
PATH_MAX
+
#endif
1
];
/* Accessed via prof_active_[gs]et{_unlocked,}(). */
extern
bool
prof_active
;
/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */
extern
bool
prof_gdump_val
;
/*
* Profile dump interval, measured in bytes allocated. Each arena triggers a
* profile dump when it reaches this threshold. The effect is that the
* interval between profile dumps averages prof_interval, though the actual
* interval between dumps will tend to be sporadic, and the interval will be a
* maximum of approximately (prof_interval * narenas).
*/
extern
uint64_t
prof_interval
;
/*
* Initialized as opt_lg_prof_sample, and potentially modified during profiling
* resets.
*/
extern
size_t
lg_prof_sample
;
void
prof_alloc_rollback
(
tsd_t
*
tsd
,
prof_tctx_t
*
tctx
,
bool
updated
);
void
prof_malloc_sample_object
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
);
void
prof_free_sampled_object
(
tsd_t
*
tsd
,
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
);
void
bt_init
(
prof_bt_t
*
bt
,
void
**
vec
);
void
prof_backtrace
(
prof_bt_t
*
bt
);
prof_tctx_t
*
prof_lookup
(
tsd_t
*
tsd
,
prof_bt_t
*
bt
);
#ifdef JEMALLOC_JET
size_t
prof_tdata_count
(
void
);
size_t
prof_bt_count
(
void
);
#endif
typedef
int
(
prof_dump_open_t
)(
bool
,
const
char
*
);
extern
prof_dump_open_t
*
JET_MUTABLE
prof_dump_open
;
typedef
bool
(
prof_dump_header_t
)(
tsdn_t
*
,
bool
,
const
prof_cnt_t
*
);
extern
prof_dump_header_t
*
JET_MUTABLE
prof_dump_header
;
#ifdef JEMALLOC_JET
void
prof_cnt_all
(
uint64_t
*
curobjs
,
uint64_t
*
curbytes
,
uint64_t
*
accumobjs
,
uint64_t
*
accumbytes
);
#endif
bool
prof_accum_init
(
tsdn_t
*
tsdn
,
prof_accum_t
*
prof_accum
);
void
prof_idump
(
tsdn_t
*
tsdn
);
bool
prof_mdump
(
tsd_t
*
tsd
,
const
char
*
filename
);
void
prof_gdump
(
tsdn_t
*
tsdn
);
prof_tdata_t
*
prof_tdata_init
(
tsd_t
*
tsd
);
prof_tdata_t
*
prof_tdata_reinit
(
tsd_t
*
tsd
,
prof_tdata_t
*
tdata
);
void
prof_reset
(
tsd_t
*
tsd
,
size_t
lg_sample
);
void
prof_tdata_cleanup
(
tsd_t
*
tsd
);
bool
prof_active_get
(
tsdn_t
*
tsdn
);
bool
prof_active_set
(
tsdn_t
*
tsdn
,
bool
active
);
const
char
*
prof_thread_name_get
(
tsd_t
*
tsd
);
int
prof_thread_name_set
(
tsd_t
*
tsd
,
const
char
*
thread_name
);
bool
prof_thread_active_get
(
tsd_t
*
tsd
);
bool
prof_thread_active_set
(
tsd_t
*
tsd
,
bool
active
);
bool
prof_thread_active_init_get
(
tsdn_t
*
tsdn
);
bool
prof_thread_active_init_set
(
tsdn_t
*
tsdn
,
bool
active_init
);
bool
prof_gdump_get
(
tsdn_t
*
tsdn
);
bool
prof_gdump_set
(
tsdn_t
*
tsdn
,
bool
active
);
void
prof_boot0
(
void
);
void
prof_boot1
(
void
);
bool
prof_boot2
(
tsd_t
*
tsd
);
void
prof_prefork0
(
tsdn_t
*
tsdn
);
void
prof_prefork1
(
tsdn_t
*
tsdn
);
void
prof_postfork_parent
(
tsdn_t
*
tsdn
);
void
prof_postfork_child
(
tsdn_t
*
tsdn
);
void
prof_sample_threshold_update
(
prof_tdata_t
*
tdata
);
bool
prof_log_start
(
tsdn_t
*
tsdn
,
const
char
*
filename
);
bool
prof_log_stop
(
tsdn_t
*
tsdn
);
#ifdef JEMALLOC_JET
size_t
prof_log_bt_count
(
void
);
size_t
prof_log_alloc_count
(
void
);
size_t
prof_log_thr_count
(
void
);
bool
prof_log_is_logging
(
void
);
bool
prof_log_rep_check
(
void
);
void
prof_log_dummy_set
(
bool
new_value
);
#endif
#endif
/* JEMALLOC_INTERNAL_PROF_EXTERNS_H */
deps/jemalloc/include/jemalloc/internal/prof_inlines_a.h
0 → 100644
View file @
4d5911b4
#ifndef JEMALLOC_INTERNAL_PROF_INLINES_A_H
#define JEMALLOC_INTERNAL_PROF_INLINES_A_H
#include "jemalloc/internal/mutex.h"
static
inline
bool
prof_accum_add
(
tsdn_t
*
tsdn
,
prof_accum_t
*
prof_accum
,
uint64_t
accumbytes
)
{
cassert
(
config_prof
);
bool
overflow
;
uint64_t
a0
,
a1
;
/*
* If the application allocates fast enough (and/or if idump is slow
* enough), extreme overflow here (a1 >= prof_interval * 2) can cause
* idump trigger coalescing. This is an intentional mechanism that
* avoids rate-limiting allocation.
*/
#ifdef JEMALLOC_ATOMIC_U64
a0
=
atomic_load_u64
(
&
prof_accum
->
accumbytes
,
ATOMIC_RELAXED
);
do
{
a1
=
a0
+
accumbytes
;
assert
(
a1
>=
a0
);
overflow
=
(
a1
>=
prof_interval
);
if
(
overflow
)
{
a1
%=
prof_interval
;
}
}
while
(
!
atomic_compare_exchange_weak_u64
(
&
prof_accum
->
accumbytes
,
&
a0
,
a1
,
ATOMIC_RELAXED
,
ATOMIC_RELAXED
));
#else
malloc_mutex_lock
(
tsdn
,
&
prof_accum
->
mtx
);
a0
=
prof_accum
->
accumbytes
;
a1
=
a0
+
accumbytes
;
overflow
=
(
a1
>=
prof_interval
);
if
(
overflow
)
{
a1
%=
prof_interval
;
}
prof_accum
->
accumbytes
=
a1
;
malloc_mutex_unlock
(
tsdn
,
&
prof_accum
->
mtx
);
#endif
return
overflow
;
}
static
inline
void
prof_accum_cancel
(
tsdn_t
*
tsdn
,
prof_accum_t
*
prof_accum
,
size_t
usize
)
{
cassert
(
config_prof
);
/*
* Cancel out as much of the excessive prof_accumbytes increase as
* possible without underflowing. Interval-triggered dumps occur
* slightly more often than intended as a result of incomplete
* canceling.
*/
uint64_t
a0
,
a1
;
#ifdef JEMALLOC_ATOMIC_U64
a0
=
atomic_load_u64
(
&
prof_accum
->
accumbytes
,
ATOMIC_RELAXED
);
do
{
a1
=
(
a0
>=
SC_LARGE_MINCLASS
-
usize
)
?
a0
-
(
SC_LARGE_MINCLASS
-
usize
)
:
0
;
}
while
(
!
atomic_compare_exchange_weak_u64
(
&
prof_accum
->
accumbytes
,
&
a0
,
a1
,
ATOMIC_RELAXED
,
ATOMIC_RELAXED
));
#else
malloc_mutex_lock
(
tsdn
,
&
prof_accum
->
mtx
);
a0
=
prof_accum
->
accumbytes
;
a1
=
(
a0
>=
SC_LARGE_MINCLASS
-
usize
)
?
a0
-
(
SC_LARGE_MINCLASS
-
usize
)
:
0
;
prof_accum
->
accumbytes
=
a1
;
malloc_mutex_unlock
(
tsdn
,
&
prof_accum
->
mtx
);
#endif
}
JEMALLOC_ALWAYS_INLINE
bool
prof_active_get_unlocked
(
void
)
{
/*
* Even if opt_prof is true, sampling can be temporarily disabled by
* setting prof_active to false. No locking is used when reading
* prof_active in the fast path, so there are no guarantees regarding
* how long it will take for all threads to notice state changes.
*/
return
prof_active
;
}
#endif
/* JEMALLOC_INTERNAL_PROF_INLINES_A_H */
deps/jemalloc/include/jemalloc/internal/prof_inlines_b.h
0 → 100644
View file @
4d5911b4
#ifndef JEMALLOC_INTERNAL_PROF_INLINES_B_H
#define JEMALLOC_INTERNAL_PROF_INLINES_B_H
#include "jemalloc/internal/safety_check.h"
#include "jemalloc/internal/sz.h"
JEMALLOC_ALWAYS_INLINE
bool
prof_gdump_get_unlocked
(
void
)
{
/*
* No locking is used when reading prof_gdump_val in the fast path, so
* there are no guarantees regarding how long it will take for all
* threads to notice state changes.
*/
return
prof_gdump_val
;
}
JEMALLOC_ALWAYS_INLINE
prof_tdata_t
*
prof_tdata_get
(
tsd_t
*
tsd
,
bool
create
)
{
prof_tdata_t
*
tdata
;
cassert
(
config_prof
);
tdata
=
tsd_prof_tdata_get
(
tsd
);
if
(
create
)
{
if
(
unlikely
(
tdata
==
NULL
))
{
if
(
tsd_nominal
(
tsd
))
{
tdata
=
prof_tdata_init
(
tsd
);
tsd_prof_tdata_set
(
tsd
,
tdata
);
}
}
else
if
(
unlikely
(
tdata
->
expired
))
{
tdata
=
prof_tdata_reinit
(
tsd
,
tdata
);
tsd_prof_tdata_set
(
tsd
,
tdata
);
}
assert
(
tdata
==
NULL
||
tdata
->
attached
);
}
return
tdata
;
}
JEMALLOC_ALWAYS_INLINE
prof_tctx_t
*
prof_tctx_get
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
alloc_ctx_t
*
alloc_ctx
)
{
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
return
arena_prof_tctx_get
(
tsdn
,
ptr
,
alloc_ctx
);
}
JEMALLOC_ALWAYS_INLINE
void
prof_tctx_set
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
size_t
usize
,
alloc_ctx_t
*
alloc_ctx
,
prof_tctx_t
*
tctx
)
{
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
arena_prof_tctx_set
(
tsdn
,
ptr
,
usize
,
alloc_ctx
,
tctx
);
}
JEMALLOC_ALWAYS_INLINE
void
prof_tctx_reset
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
prof_tctx_t
*
tctx
)
{
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
arena_prof_tctx_reset
(
tsdn
,
ptr
,
tctx
);
}
JEMALLOC_ALWAYS_INLINE
nstime_t
prof_alloc_time_get
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
alloc_ctx_t
*
alloc_ctx
)
{
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
return
arena_prof_alloc_time_get
(
tsdn
,
ptr
,
alloc_ctx
);
}
JEMALLOC_ALWAYS_INLINE
void
prof_alloc_time_set
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
alloc_ctx_t
*
alloc_ctx
,
nstime_t
t
)
{
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
arena_prof_alloc_time_set
(
tsdn
,
ptr
,
alloc_ctx
,
t
);
}
JEMALLOC_ALWAYS_INLINE
bool
prof_sample_check
(
tsd_t
*
tsd
,
size_t
usize
,
bool
update
)
{
ssize_t
check
=
update
?
0
:
usize
;
int64_t
bytes_until_sample
=
tsd_bytes_until_sample_get
(
tsd
);
if
(
update
)
{
bytes_until_sample
-=
usize
;
if
(
tsd_nominal
(
tsd
))
{
tsd_bytes_until_sample_set
(
tsd
,
bytes_until_sample
);
}
}
if
(
likely
(
bytes_until_sample
>=
check
))
{
return
true
;
}
return
false
;
}
JEMALLOC_ALWAYS_INLINE
bool
prof_sample_accum_update
(
tsd_t
*
tsd
,
size_t
usize
,
bool
update
,
prof_tdata_t
**
tdata_out
)
{
prof_tdata_t
*
tdata
;
cassert
(
config_prof
);
/* Fastpath: no need to load tdata */
if
(
likely
(
prof_sample_check
(
tsd
,
usize
,
update
)))
{
return
true
;
}
bool
booted
=
tsd_prof_tdata_get
(
tsd
);
tdata
=
prof_tdata_get
(
tsd
,
true
);
if
(
unlikely
((
uintptr_t
)
tdata
<=
(
uintptr_t
)
PROF_TDATA_STATE_MAX
))
{
tdata
=
NULL
;
}
if
(
tdata_out
!=
NULL
)
{
*
tdata_out
=
tdata
;
}
if
(
unlikely
(
tdata
==
NULL
))
{
return
true
;
}
/*
* If this was the first creation of tdata, then
* prof_tdata_get() reset bytes_until_sample, so decrement and
* check it again
*/
if
(
!
booted
&&
prof_sample_check
(
tsd
,
usize
,
update
))
{
return
true
;
}
if
(
tsd_reentrancy_level_get
(
tsd
)
>
0
)
{
return
true
;
}
/* Compute new sample threshold. */
if
(
update
)
{
prof_sample_threshold_update
(
tdata
);
}
return
!
tdata
->
active
;
}
JEMALLOC_ALWAYS_INLINE
prof_tctx_t
*
prof_alloc_prep
(
tsd_t
*
tsd
,
size_t
usize
,
bool
prof_active
,
bool
update
)
{
prof_tctx_t
*
ret
;
prof_tdata_t
*
tdata
;
prof_bt_t
bt
;
assert
(
usize
==
sz_s2u
(
usize
));
if
(
!
prof_active
||
likely
(
prof_sample_accum_update
(
tsd
,
usize
,
update
,
&
tdata
)))
{
ret
=
(
prof_tctx_t
*
)(
uintptr_t
)
1U
;
}
else
{
bt_init
(
&
bt
,
tdata
->
vec
);
prof_backtrace
(
&
bt
);
ret
=
prof_lookup
(
tsd
,
&
bt
);
}
return
ret
;
}
JEMALLOC_ALWAYS_INLINE
void
prof_malloc
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
size_t
usize
,
alloc_ctx_t
*
alloc_ctx
,
prof_tctx_t
*
tctx
)
{
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
assert
(
usize
==
isalloc
(
tsdn
,
ptr
));
if
(
unlikely
((
uintptr_t
)
tctx
>
(
uintptr_t
)
1U
))
{
prof_malloc_sample_object
(
tsdn
,
ptr
,
usize
,
tctx
);
}
else
{
prof_tctx_set
(
tsdn
,
ptr
,
usize
,
alloc_ctx
,
(
prof_tctx_t
*
)(
uintptr_t
)
1U
);
}
}
JEMALLOC_ALWAYS_INLINE
void
prof_realloc
(
tsd_t
*
tsd
,
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
,
bool
prof_active
,
bool
updated
,
const
void
*
old_ptr
,
size_t
old_usize
,
prof_tctx_t
*
old_tctx
)
{
bool
sampled
,
old_sampled
,
moved
;
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
||
(
uintptr_t
)
tctx
<=
(
uintptr_t
)
1U
);
if
(
prof_active
&&
!
updated
&&
ptr
!=
NULL
)
{
assert
(
usize
==
isalloc
(
tsd_tsdn
(
tsd
),
ptr
));
if
(
prof_sample_accum_update
(
tsd
,
usize
,
true
,
NULL
))
{
/*
* Don't sample. The usize passed to prof_alloc_prep()
* was larger than what actually got allocated, so a
* backtrace was captured for this allocation, even
* though its actual usize was insufficient to cross the
* sample threshold.
*/
prof_alloc_rollback
(
tsd
,
tctx
,
true
);
tctx
=
(
prof_tctx_t
*
)(
uintptr_t
)
1U
;
}
}
sampled
=
((
uintptr_t
)
tctx
>
(
uintptr_t
)
1U
);
old_sampled
=
((
uintptr_t
)
old_tctx
>
(
uintptr_t
)
1U
);
moved
=
(
ptr
!=
old_ptr
);
if
(
unlikely
(
sampled
))
{
prof_malloc_sample_object
(
tsd_tsdn
(
tsd
),
ptr
,
usize
,
tctx
);
}
else
if
(
moved
)
{
prof_tctx_set
(
tsd_tsdn
(
tsd
),
ptr
,
usize
,
NULL
,
(
prof_tctx_t
*
)(
uintptr_t
)
1U
);
}
else
if
(
unlikely
(
old_sampled
))
{
/*
* prof_tctx_set() would work for the !moved case as well, but
* prof_tctx_reset() is slightly cheaper, and the proper thing
* to do here in the presence of explicit knowledge re: moved
* state.
*/
prof_tctx_reset
(
tsd_tsdn
(
tsd
),
ptr
,
tctx
);
}
else
{
assert
((
uintptr_t
)
prof_tctx_get
(
tsd_tsdn
(
tsd
),
ptr
,
NULL
)
==
(
uintptr_t
)
1U
);
}
/*
* The prof_free_sampled_object() call must come after the
* prof_malloc_sample_object() call, because tctx and old_tctx may be
* the same, in which case reversing the call order could cause the tctx
* to be prematurely destroyed as a side effect of momentarily zeroed
* counters.
*/
if
(
unlikely
(
old_sampled
))
{
prof_free_sampled_object
(
tsd
,
ptr
,
old_usize
,
old_tctx
);
}
}
JEMALLOC_ALWAYS_INLINE
void
prof_free
(
tsd_t
*
tsd
,
const
void
*
ptr
,
size_t
usize
,
alloc_ctx_t
*
alloc_ctx
)
{
prof_tctx_t
*
tctx
=
prof_tctx_get
(
tsd_tsdn
(
tsd
),
ptr
,
alloc_ctx
);
cassert
(
config_prof
);
assert
(
usize
==
isalloc
(
tsd_tsdn
(
tsd
),
ptr
));
if
(
unlikely
((
uintptr_t
)
tctx
>
(
uintptr_t
)
1U
))
{
prof_free_sampled_object
(
tsd
,
ptr
,
usize
,
tctx
);
}
}
#endif
/* JEMALLOC_INTERNAL_PROF_INLINES_B_H */
deps/jemalloc/include/jemalloc/internal/prof_structs.h
0 → 100644
View file @
4d5911b4
#ifndef JEMALLOC_INTERNAL_PROF_STRUCTS_H
#define JEMALLOC_INTERNAL_PROF_STRUCTS_H
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/rb.h"
struct
prof_bt_s
{
/* Backtrace, stored as len program counters. */
void
**
vec
;
unsigned
len
;
};
#ifdef JEMALLOC_PROF_LIBGCC
/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
typedef
struct
{
prof_bt_t
*
bt
;
unsigned
max
;
}
prof_unwind_data_t
;
#endif
struct
prof_accum_s
{
#ifndef JEMALLOC_ATOMIC_U64
malloc_mutex_t
mtx
;
uint64_t
accumbytes
;
#else
atomic_u64_t
accumbytes
;
#endif
};
struct
prof_cnt_s
{
/* Profiling counters. */
uint64_t
curobjs
;
uint64_t
curbytes
;
uint64_t
accumobjs
;
uint64_t
accumbytes
;
};
typedef
enum
{
prof_tctx_state_initializing
,
prof_tctx_state_nominal
,
prof_tctx_state_dumping
,
prof_tctx_state_purgatory
/* Dumper must finish destroying. */
}
prof_tctx_state_t
;
struct
prof_tctx_s
{
/* Thread data for thread that performed the allocation. */
prof_tdata_t
*
tdata
;
/*
* Copy of tdata->thr_{uid,discrim}, necessary because tdata may be
* defunct during teardown.
*/
uint64_t
thr_uid
;
uint64_t
thr_discrim
;
/* Profiling counters, protected by tdata->lock. */
prof_cnt_t
cnts
;
/* Associated global context. */
prof_gctx_t
*
gctx
;
/*
* UID that distinguishes multiple tctx's created by the same thread,
* but coexisting in gctx->tctxs. There are two ways that such
* coexistence can occur:
* - A dumper thread can cause a tctx to be retained in the purgatory
* state.
* - Although a single "producer" thread must create all tctx's which
* share the same thr_uid, multiple "consumers" can each concurrently
* execute portions of prof_tctx_destroy(). prof_tctx_destroy() only
* gets called once each time cnts.cur{objs,bytes} drop to 0, but this
* threshold can be hit again before the first consumer finishes
* executing prof_tctx_destroy().
*/
uint64_t
tctx_uid
;
/* Linkage into gctx's tctxs. */
rb_node
(
prof_tctx_t
)
tctx_link
;
/*
* True during prof_alloc_prep()..prof_malloc_sample_object(), prevents
* sample vs destroy race.
*/
bool
prepared
;
/* Current dump-related state, protected by gctx->lock. */
prof_tctx_state_t
state
;
/*
* Copy of cnts snapshotted during early dump phase, protected by
* dump_mtx.
*/
prof_cnt_t
dump_cnts
;
};
typedef
rb_tree
(
prof_tctx_t
)
prof_tctx_tree_t
;
struct
prof_gctx_s
{
/* Protects nlimbo, cnt_summed, and tctxs. */
malloc_mutex_t
*
lock
;
/*
* Number of threads that currently cause this gctx to be in a state of
* limbo due to one of:
* - Initializing this gctx.
* - Initializing per thread counters associated with this gctx.
* - Preparing to destroy this gctx.
* - Dumping a heap profile that includes this gctx.
* nlimbo must be 1 (single destroyer) in order to safely destroy the
* gctx.
*/
unsigned
nlimbo
;
/*
* Tree of profile counters, one for each thread that has allocated in
* this context.
*/
prof_tctx_tree_t
tctxs
;
/* Linkage for tree of contexts to be dumped. */
rb_node
(
prof_gctx_t
)
dump_link
;
/* Temporary storage for summation during dump. */
prof_cnt_t
cnt_summed
;
/* Associated backtrace. */
prof_bt_t
bt
;
/* Backtrace vector, variable size, referred to by bt. */
void
*
vec
[
1
];
};
typedef
rb_tree
(
prof_gctx_t
)
prof_gctx_tree_t
;
struct
prof_tdata_s
{
malloc_mutex_t
*
lock
;
/* Monotonically increasing unique thread identifier. */
uint64_t
thr_uid
;
/*
* Monotonically increasing discriminator among tdata structures
* associated with the same thr_uid.
*/
uint64_t
thr_discrim
;
/* Included in heap profile dumps if non-NULL. */
char
*
thread_name
;
bool
attached
;
bool
expired
;
rb_node
(
prof_tdata_t
)
tdata_link
;
/*
* Counter used to initialize prof_tctx_t's tctx_uid. No locking is
* necessary when incrementing this field, because only one thread ever
* does so.
*/
uint64_t
tctx_uid_next
;
/*
* Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks
* backtraces for which it has non-zero allocation/deallocation counters
* associated with thread-specific prof_tctx_t objects. Other threads
* may write to prof_tctx_t contents when freeing associated objects.
*/
ckh_t
bt2tctx
;
/* Sampling state. */
uint64_t
prng_state
;
/* State used to avoid dumping while operating on prof internals. */
bool
enq
;
bool
enq_idump
;
bool
enq_gdump
;
/*
* Set to true during an early dump phase for tdata's which are
* currently being dumped. New threads' tdata's have this initialized
* to false so that they aren't accidentally included in later dump
* phases.
*/
bool
dumping
;
/*
* True if profiling is active for this tdata's thread
* (thread.prof.active mallctl).
*/
bool
active
;
/* Temporary storage for summation during dump. */
prof_cnt_t
cnt_summed
;
/* Backtrace vector, used for calls to prof_backtrace(). */
void
*
vec
[
PROF_BT_MAX
];
};
typedef
rb_tree
(
prof_tdata_t
)
prof_tdata_tree_t
;
#endif
/* JEMALLOC_INTERNAL_PROF_STRUCTS_H */
deps/jemalloc/include/jemalloc/internal/prof_types.h
0 → 100644
View file @
4d5911b4
#ifndef JEMALLOC_INTERNAL_PROF_TYPES_H
#define JEMALLOC_INTERNAL_PROF_TYPES_H
typedef
struct
prof_bt_s
prof_bt_t
;
typedef
struct
prof_accum_s
prof_accum_t
;
typedef
struct
prof_cnt_s
prof_cnt_t
;
typedef
struct
prof_tctx_s
prof_tctx_t
;
typedef
struct
prof_gctx_s
prof_gctx_t
;
typedef
struct
prof_tdata_s
prof_tdata_t
;
/* Option defaults. */
#ifdef JEMALLOC_PROF
# define PROF_PREFIX_DEFAULT "jeprof"
#else
# define PROF_PREFIX_DEFAULT ""
#endif
#define LG_PROF_SAMPLE_DEFAULT 19
#define LG_PROF_INTERVAL_DEFAULT -1
/*
* Hard limit on stack backtrace depth. The version of prof_backtrace() that
* is based on __builtin_return_address() necessarily has a hard-coded number
* of backtrace frame handlers, and should be kept in sync with this setting.
*/
#define PROF_BT_MAX 128
/* Initial hash table size. */
#define PROF_CKH_MINITEMS 64
/* Size of memory buffer to use when writing dump files. */
#define PROF_DUMP_BUFSIZE 65536
/* Size of stack-allocated buffer used by prof_printf(). */
#define PROF_PRINTF_BUFSIZE 128
/*
* Number of mutexes shared among all gctx's. No space is allocated for these
* unless profiling is enabled, so it's okay to over-provision.
*/
#define PROF_NCTX_LOCKS 1024
/*
* Number of mutexes shared among all tdata's. No space is allocated for these
* unless profiling is enabled, so it's okay to over-provision.
*/
#define PROF_NTDATA_LOCKS 256
/*
* prof_tdata pointers close to NULL are used to encode state information that
* is used for cleaning up during thread shutdown.
*/
#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1)
#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2)
#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY
#endif
/* JEMALLOC_INTERNAL_PROF_TYPES_H */
deps/jemalloc/include/jemalloc/internal/public_namespace.sh
0 → 100755
View file @
4d5911b4
#!/bin/sh
for
nm
in
`
cat
$1
`
;
do
n
=
`
echo
${
nm
}
|tr
':'
' '
|awk
'{print $1}'
`
echo
"#define je_
${
n
}
JEMALLOC_N(
${
n
}
)"
done
deps/jemalloc/include/jemalloc/internal/public_unnamespace.sh
0 → 100755
View file @
4d5911b4
#!/bin/sh
for
nm
in
`
cat
$1
`
;
do
n
=
`
echo
${
nm
}
|tr
':'
' '
|awk
'{print $1}'
`
echo
"#undef je_
${
n
}
"
done
deps/jemalloc/include/jemalloc/internal/ql.h
0 → 100644
View file @
4d5911b4
#ifndef JEMALLOC_INTERNAL_QL_H
#define JEMALLOC_INTERNAL_QL_H
#include "jemalloc/internal/qr.h"
/* List definitions. */
#define ql_head(a_type) \
struct { \
a_type *qlh_first; \
}
#define ql_head_initializer(a_head) {NULL}
#define ql_elm(a_type) qr(a_type)
/* List functions. */
#define ql_new(a_head) do { \
(a_head)->qlh_first = NULL; \
} while (0)
#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
#define ql_first(a_head) ((a_head)->qlh_first)
#define ql_last(a_head, a_field) \
((ql_first(a_head) != NULL) \
? qr_prev(ql_first(a_head), a_field) : NULL)
#define ql_next(a_head, a_elm, a_field) \
((ql_last(a_head, a_field) != (a_elm)) \
? qr_next((a_elm), a_field) : NULL)
#define ql_prev(a_head, a_elm, a_field) \
((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \
: NULL)
#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
qr_before_insert((a_qlelm), (a_elm), a_field); \
if (ql_first(a_head) == (a_qlelm)) { \
ql_first(a_head) = (a_elm); \
} \
} while (0)
#define ql_after_insert(a_qlelm, a_elm, a_field) \
qr_after_insert((a_qlelm), (a_elm), a_field)
#define ql_head_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = (a_elm); \
} while (0)
#define ql_tail_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = qr_next((a_elm), a_field); \
} while (0)
#define ql_remove(a_head, a_elm, a_field) do { \
if (ql_first(a_head) == (a_elm)) { \
ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
} \
if (ql_first(a_head) != (a_elm)) { \
qr_remove((a_elm), a_field); \
} else { \
ql_first(a_head) = NULL; \
} \
} while (0)
#define ql_head_remove(a_head, a_type, a_field) do { \
a_type *t = ql_first(a_head); \
ql_remove((a_head), t, a_field); \
} while (0)
#define ql_tail_remove(a_head, a_type, a_field) do { \
a_type *t = ql_last(a_head, a_field); \
ql_remove((a_head), t, a_field); \
} while (0)
#define ql_foreach(a_var, a_head, a_field) \
qr_foreach((a_var), ql_first(a_head), a_field)
#define ql_reverse_foreach(a_var, a_head, a_field) \
qr_reverse_foreach((a_var), ql_first(a_head), a_field)
#endif
/* JEMALLOC_INTERNAL_QL_H */
deps/jemalloc/include/jemalloc/internal/qr.h
0 → 100644
View file @
4d5911b4
#ifndef JEMALLOC_INTERNAL_QR_H
#define JEMALLOC_INTERNAL_QR_H
/* Ring definitions. */
#define qr(a_type) \
struct { \
a_type *qre_next; \
a_type *qre_prev; \
}
/* Ring functions. */
#define qr_new(a_qr, a_field) do { \
(a_qr)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
#define qr_before_insert(a_qrelm, a_qr, a_field) do { \
(a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \
(a_qr)->a_field.qre_next = (a_qrelm); \
(a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \
(a_qrelm)->a_field.qre_prev = (a_qr); \
} while (0)
#define qr_after_insert(a_qrelm, a_qr, a_field) do { \
(a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
(a_qr)->a_field.qre_prev = (a_qrelm); \
(a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \
(a_qrelm)->a_field.qre_next = (a_qr); \
} while (0)
#define qr_meld(a_qr_a, a_qr_b, a_type, a_field) do { \
a_type *t; \
(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
t = (a_qr_a)->a_field.qre_prev; \
(a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \
(a_qr_b)->a_field.qre_prev = t; \
} while (0)
/*
* qr_meld() and qr_split() are functionally equivalent, so there's no need to
* have two copies of the code.
*/
#define qr_split(a_qr_a, a_qr_b, a_type, a_field) \
qr_meld((a_qr_a), (a_qr_b), a_type, a_field)
#define qr_remove(a_qr, a_field) do { \
(a_qr)->a_field.qre_prev->a_field.qre_next \
= (a_qr)->a_field.qre_next; \
(a_qr)->a_field.qre_next->a_field.qre_prev \
= (a_qr)->a_field.qre_prev; \
(a_qr)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
#define qr_foreach(var, a_qr, a_field) \
for ((var) = (a_qr); \
(var) != NULL; \
(var) = (((var)->a_field.qre_next != (a_qr)) \
? (var)->a_field.qre_next : NULL))
#define qr_reverse_foreach(var, a_qr, a_field) \
for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \
(var) != NULL; \
(var) = (((var) != (a_qr)) \
? (var)->a_field.qre_prev : NULL))
#endif
/* JEMALLOC_INTERNAL_QR_H */
deps/jemalloc/include/jemalloc/internal/quantum.h
0 → 100644
View file @
4d5911b4
#ifndef JEMALLOC_INTERNAL_QUANTUM_H
#define JEMALLOC_INTERNAL_QUANTUM_H
/*
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
* classes).
*/
#ifndef LG_QUANTUM
# if (defined(__i386__) || defined(_M_IX86))
# define LG_QUANTUM 4
# endif
# ifdef __ia64__
# define LG_QUANTUM 4
# endif
# ifdef __alpha__
# define LG_QUANTUM 4
# endif
# if (defined(__sparc64__) || defined(__sparcv9) || defined(__sparc_v9__))
# define LG_QUANTUM 4
# endif
# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
# define LG_QUANTUM 4
# endif
# ifdef __arm__
# define LG_QUANTUM 3
# endif
# ifdef __aarch64__
# define LG_QUANTUM 4
# endif
# ifdef __hppa__
# define LG_QUANTUM 4
# endif
# ifdef __m68k__
# define LG_QUANTUM 3
# endif
# ifdef __mips__
# define LG_QUANTUM 3
# endif
# ifdef __nios2__
# define LG_QUANTUM 3
# endif
# ifdef __or1k__
# define LG_QUANTUM 3
# endif
# ifdef __powerpc__
# define LG_QUANTUM 4
# endif
# if defined(__riscv) || defined(__riscv__)
# define LG_QUANTUM 4
# endif
# ifdef __s390__
# define LG_QUANTUM 4
# endif
# if (defined (__SH3E__) || defined(__SH4_SINGLE__) || defined(__SH4__) || \
defined(__SH4_SINGLE_ONLY__))
# define LG_QUANTUM 4
# endif
# ifdef __tile__
# define LG_QUANTUM 4
# endif
# ifdef __le32__
# define LG_QUANTUM 4
# endif
# ifndef LG_QUANTUM
# error "Unknown minimum alignment for architecture; specify via "
"--with-lg-quantum"
# endif
#endif
#define QUANTUM ((size_t)(1U << LG_QUANTUM))
#define QUANTUM_MASK (QUANTUM - 1)
/* Return the smallest quantum multiple that is >= a. */
#define QUANTUM_CEILING(a) \
(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
#endif
/* JEMALLOC_INTERNAL_QUANTUM_H */
deps/jemalloc/include/jemalloc/internal/rb.h
0 → 100644
View file @
4d5911b4
/*-
*******************************************************************************
*
* cpp macro implementation of left-leaning 2-3 red-black trees. Parent
* pointers are not used, and color bits are stored in the least significant
* bit of right-child pointers (if RB_COMPACT is defined), thus making node
* linkage as compact as is possible for red-black trees.
*
* Usage:
*
* #include <stdint.h>
* #include <stdbool.h>
* #define NDEBUG // (Optional, see assert(3).)
* #include <assert.h>
* #define RB_COMPACT // (Optional, embed color bits in right-child pointers.)
* #include <rb.h>
* ...
*
*******************************************************************************
*/
#ifndef RB_H_
#define RB_H_
#ifndef __PGI
#define RB_COMPACT
#endif
#ifdef RB_COMPACT
/* Node structure. */
#define rb_node(a_type) \
struct { \
a_type *rbn_left; \
a_type *rbn_right_red; \
}
#else
#define rb_node(a_type) \
struct { \
a_type *rbn_left; \
a_type *rbn_right; \
bool rbn_red; \
}
#endif
/* Root structure. */
#define rb_tree(a_type) \
struct { \
a_type *rbt_root; \
}
/* Left accessors. */
#define rbtn_left_get(a_type, a_field, a_node) \
((a_node)->a_field.rbn_left)
#define rbtn_left_set(a_type, a_field, a_node, a_left) do { \
(a_node)->a_field.rbn_left = a_left; \
} while (0)
#ifdef RB_COMPACT
/* Right accessors. */
#define rbtn_right_get(a_type, a_field, a_node) \
((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \
& ((ssize_t)-2)))
#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \
(a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \
| (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \
} while (0)
/* Color accessors. */
#define rbtn_red_get(a_type, a_field, a_node) \
((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \
& ((size_t)1)))
#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \
(a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \
(a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \
| ((ssize_t)a_red)); \
} while (0)
#define rbtn_red_set(a_type, a_field, a_node) do { \
(a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \
(a_node)->a_field.rbn_right_red) | ((size_t)1)); \
} while (0)
#define rbtn_black_set(a_type, a_field, a_node) do { \
(a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \
(a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \
} while (0)
/* Node initializer. */
#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
/* Bookkeeping bit cannot be used by node pointer. */
\
assert(((uintptr_t)(a_node) & 0x1) == 0); \
rbtn_left_set(a_type, a_field, (a_node), NULL); \
rbtn_right_set(a_type, a_field, (a_node), NULL); \
rbtn_red_set(a_type, a_field, (a_node)); \
} while (0)
#else
/* Right accessors. */
#define rbtn_right_get(a_type, a_field, a_node) \
((a_node)->a_field.rbn_right)
#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \
(a_node)->a_field.rbn_right = a_right; \
} while (0)
/* Color accessors. */
#define rbtn_red_get(a_type, a_field, a_node) \
((a_node)->a_field.rbn_red)
#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \
(a_node)->a_field.rbn_red = (a_red); \
} while (0)
#define rbtn_red_set(a_type, a_field, a_node) do { \
(a_node)->a_field.rbn_red = true; \
} while (0)
#define rbtn_black_set(a_type, a_field, a_node) do { \
(a_node)->a_field.rbn_red = false; \
} while (0)
/* Node initializer. */
#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
rbtn_left_set(a_type, a_field, (a_node), NULL); \
rbtn_right_set(a_type, a_field, (a_node), NULL); \
rbtn_red_set(a_type, a_field, (a_node)); \
} while (0)
#endif
/* Tree initializer. */
#define rb_new(a_type, a_field, a_rbt) do { \
(a_rbt)->rbt_root = NULL; \
} while (0)
/* Internal utility macros. */
#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \
(r_node) = (a_root); \
if ((r_node) != NULL) { \
for (; \
rbtn_left_get(a_type, a_field, (r_node)) != NULL; \
(r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \
} \
} \
} while (0)
#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \
(r_node) = (a_root); \
if ((r_node) != NULL) { \
for (; rbtn_right_get(a_type, a_field, (r_node)) != NULL; \
(r_node) = rbtn_right_get(a_type, a_field, (r_node))) { \
} \
} \
} while (0)
#define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \
(r_node) = rbtn_right_get(a_type, a_field, (a_node)); \
rbtn_right_set(a_type, a_field, (a_node), \
rbtn_left_get(a_type, a_field, (r_node))); \
rbtn_left_set(a_type, a_field, (r_node), (a_node)); \
} while (0)
#define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \
(r_node) = rbtn_left_get(a_type, a_field, (a_node)); \
rbtn_left_set(a_type, a_field, (a_node), \
rbtn_right_get(a_type, a_field, (r_node))); \
rbtn_right_set(a_type, a_field, (r_node), (a_node)); \
} while (0)
/*
* The rb_proto() macro generates function prototypes that correspond to the
* functions generated by an equivalently parameterized call to rb_gen().
*/
#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \
a_attr void \
a_prefix##new(a_rbt_type *rbtree); \
a_attr bool \
a_prefix##empty(a_rbt_type *rbtree); \
a_attr a_type * \
a_prefix##first(a_rbt_type *rbtree); \
a_attr a_type * \
a_prefix##last(a_rbt_type *rbtree); \
a_attr a_type * \
a_prefix##next(a_rbt_type *rbtree, a_type *node); \
a_attr a_type * \
a_prefix##prev(a_rbt_type *rbtree, a_type *node); \
a_attr a_type * \
a_prefix##search(a_rbt_type *rbtree, const a_type *key); \
a_attr a_type * \
a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key); \
a_attr a_type * \
a_prefix##psearch(a_rbt_type *rbtree, const a_type *key); \
a_attr void \
a_prefix##insert(a_rbt_type *rbtree, a_type *node); \
a_attr void \
a_prefix##remove(a_rbt_type *rbtree, a_type *node); \
a_attr a_type * \
a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
a_rbt_type *, a_type *, void *), void *arg); \
a_attr a_type * \
a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); \
a_attr void \
a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
void *arg);
/*
* The rb_gen() macro generates a type-specific red-black tree implementation,
* based on the above cpp macros.
*
* Arguments:
*
* a_attr : Function attribute for generated functions (ex: static).
* a_prefix : Prefix for generated functions (ex: ex_).
* a_rb_type : Type for red-black tree data structure (ex: ex_t).
* a_type : Type for red-black tree node data structure (ex: ex_node_t).
* a_field : Name of red-black tree node linkage (ex: ex_link).
* a_cmp : Node comparison function name, with the following prototype:
* int (a_cmp *)(a_type *a_node, a_type *a_other);
* ^^^^^^
* or a_key
* Interpretation of comparison function return values:
* -1 : a_node < a_other
* 0 : a_node == a_other
* 1 : a_node > a_other
* In all cases, the a_node or a_key macro argument is the first
* argument to the comparison function, which makes it possible
* to write comparison functions that treat the first argument
* specially.
*
* Assuming the following setup:
*
* typedef struct ex_node_s ex_node_t;
* struct ex_node_s {
* rb_node(ex_node_t) ex_link;
* };
* typedef rb_tree(ex_node_t) ex_t;
* rb_gen(static, ex_, ex_t, ex_node_t, ex_link, ex_cmp)
*
* The following API is generated:
*
* static void
* ex_new(ex_t *tree);
* Description: Initialize a red-black tree structure.
* Args:
* tree: Pointer to an uninitialized red-black tree object.
*
* static bool
* ex_empty(ex_t *tree);
* Description: Determine whether tree is empty.
* Args:
* tree: Pointer to an initialized red-black tree object.
* Ret: True if tree is empty, false otherwise.
*
* static ex_node_t *
* ex_first(ex_t *tree);
* static ex_node_t *
* ex_last(ex_t *tree);
* Description: Get the first/last node in tree.
* Args:
* tree: Pointer to an initialized red-black tree object.
* Ret: First/last node in tree, or NULL if tree is empty.
*
* static ex_node_t *
* ex_next(ex_t *tree, ex_node_t *node);
* static ex_node_t *
* ex_prev(ex_t *tree, ex_node_t *node);
* Description: Get node's successor/predecessor.
* Args:
* tree: Pointer to an initialized red-black tree object.
* node: A node in tree.
* Ret: node's successor/predecessor in tree, or NULL if node is
* last/first.
*
* static ex_node_t *
* ex_search(ex_t *tree, const ex_node_t *key);
* Description: Search for node that matches key.
* Args:
* tree: Pointer to an initialized red-black tree object.
* key : Search key.
* Ret: Node in tree that matches key, or NULL if no match.
*
* static ex_node_t *
* ex_nsearch(ex_t *tree, const ex_node_t *key);
* static ex_node_t *
* ex_psearch(ex_t *tree, const ex_node_t *key);
* Description: Search for node that matches key. If no match is found,
* return what would be key's successor/predecessor, were
* key in tree.
* Args:
* tree: Pointer to an initialized red-black tree object.
* key : Search key.
* Ret: Node in tree that matches key, or if no match, hypothetical node's
* successor/predecessor (NULL if no successor/predecessor).
*
* static void
* ex_insert(ex_t *tree, ex_node_t *node);
* Description: Insert node into tree.
* Args:
* tree: Pointer to an initialized red-black tree object.
* node: Node to be inserted into tree.
*
* static void
* ex_remove(ex_t *tree, ex_node_t *node);
* Description: Remove node from tree.
* Args:
* tree: Pointer to an initialized red-black tree object.
* node: Node in tree to be removed.
*
* static ex_node_t *
* ex_iter(ex_t *tree, ex_node_t *start, ex_node_t *(*cb)(ex_t *,
* ex_node_t *, void *), void *arg);
* static ex_node_t *
* ex_reverse_iter(ex_t *tree, ex_node_t *start, ex_node *(*cb)(ex_t *,
* ex_node_t *, void *), void *arg);
* Description: Iterate forward/backward over tree, starting at node. If
* tree is modified, iteration must be immediately
* terminated by the callback function that causes the
* modification.
* Args:
* tree : Pointer to an initialized red-black tree object.
* start: Node at which to start iteration, or NULL to start at
* first/last node.
* cb : Callback function, which is called for each node during
* iteration. Under normal circumstances the callback function
* should return NULL, which causes iteration to continue. If a
* callback function returns non-NULL, iteration is immediately
* terminated and the non-NULL return value is returned by the
* iterator. This is useful for re-starting iteration after
* modifying tree.
* arg : Opaque pointer passed to cb().
* Ret: NULL if iteration completed, or the non-NULL callback return value
* that caused termination of the iteration.
*
* static void
* ex_destroy(ex_t *tree, void (*cb)(ex_node_t *, void *), void *arg);
* Description: Iterate over the tree with post-order traversal, remove
* each node, and run the callback if non-null. This is
* used for destroying a tree without paying the cost to
* rebalance it. The tree must not be otherwise altered
* during traversal.
* Args:
* tree: Pointer to an initialized red-black tree object.
* cb : Callback function, which, if non-null, is called for each node
* during iteration. There is no way to stop iteration once it
* has begun.
* arg : Opaque pointer passed to cb().
*/
#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \
a_attr void \
a_prefix##new(a_rbt_type *rbtree) { \
rb_new(a_type, a_field, rbtree); \
} \
a_attr bool \
a_prefix##empty(a_rbt_type *rbtree) { \
return (rbtree->rbt_root == NULL); \
} \
a_attr a_type * \
a_prefix##first(a_rbt_type *rbtree) { \
a_type *ret; \
rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
return ret; \
} \
a_attr a_type * \
a_prefix##last(a_rbt_type *rbtree) { \
a_type *ret; \
rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
return ret; \
} \
a_attr a_type * \
a_prefix##next(a_rbt_type *rbtree, a_type *node) { \
a_type *ret; \
if (rbtn_right_get(a_type, a_field, node) != NULL) { \
rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \
a_field, node), ret); \
} else { \
a_type *tnode = rbtree->rbt_root; \
assert(tnode != NULL); \
ret = NULL; \
while (true) { \
int cmp = (a_cmp)(node, tnode); \
if (cmp < 0) { \
ret = tnode; \
tnode = rbtn_left_get(a_type, a_field, tnode); \
} else if (cmp > 0) { \
tnode = rbtn_right_get(a_type, a_field, tnode); \
} else { \
break; \
} \
assert(tnode != NULL); \
} \
} \
return ret; \
} \
a_attr a_type * \
a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \
a_type *ret; \
if (rbtn_left_get(a_type, a_field, node) != NULL) { \
rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \
a_field, node), ret); \
} else { \
a_type *tnode = rbtree->rbt_root; \
assert(tnode != NULL); \
ret = NULL; \
while (true) { \
int cmp = (a_cmp)(node, tnode); \
if (cmp < 0) { \
tnode = rbtn_left_get(a_type, a_field, tnode); \
} else if (cmp > 0) { \
ret = tnode; \
tnode = rbtn_right_get(a_type, a_field, tnode); \
} else { \
break; \
} \
assert(tnode != NULL); \
} \
} \
return ret; \
} \
a_attr a_type * \
a_prefix##search(a_rbt_type *rbtree, const a_type *key) { \
a_type *ret; \
int cmp; \
ret = rbtree->rbt_root; \
while (ret != NULL \
&& (cmp = (a_cmp)(key, ret)) != 0) { \
if (cmp < 0) { \
ret = rbtn_left_get(a_type, a_field, ret); \
} else { \
ret = rbtn_right_get(a_type, a_field, ret); \
} \
} \
return ret; \
} \
a_attr a_type * \
a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key) { \
a_type *ret; \
a_type *tnode = rbtree->rbt_root; \
ret = NULL; \
while (tnode != NULL) { \
int cmp = (a_cmp)(key, tnode); \
if (cmp < 0) { \
ret = tnode; \
tnode = rbtn_left_get(a_type, a_field, tnode); \
} else if (cmp > 0) { \
tnode = rbtn_right_get(a_type, a_field, tnode); \
} else { \
ret = tnode; \
break; \
} \
} \
return ret; \
} \
a_attr a_type * \
a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \
a_type *ret; \
a_type *tnode = rbtree->rbt_root; \
ret = NULL; \
while (tnode != NULL) { \
int cmp = (a_cmp)(key, tnode); \
if (cmp < 0) { \
tnode = rbtn_left_get(a_type, a_field, tnode); \
} else if (cmp > 0) { \
ret = tnode; \
tnode = rbtn_right_get(a_type, a_field, tnode); \
} else { \
ret = tnode; \
break; \
} \
} \
return ret; \
} \
a_attr void \
a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
struct { \
a_type *node; \
int cmp; \
} path[sizeof(void *) << 4], *pathp; \
rbt_node_new(a_type, a_field, rbtree, node); \
/* Wind. */
\
path->node = rbtree->rbt_root; \
for (pathp = path; pathp->node != NULL; pathp++) { \
int cmp = pathp->cmp = a_cmp(node, pathp->node); \
assert(cmp != 0); \
if (cmp < 0) { \
pathp[1].node = rbtn_left_get(a_type, a_field, \
pathp->node); \
} else { \
pathp[1].node = rbtn_right_get(a_type, a_field, \
pathp->node); \
} \
} \
pathp->node = node; \
/* Unwind. */
\
for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \
a_type *cnode = pathp->node; \
if (pathp->cmp < 0) { \
a_type *left = pathp[1].node; \
rbtn_left_set(a_type, a_field, cnode, left); \
if (rbtn_red_get(a_type, a_field, left)) { \
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
leftleft)) { \
/* Fix up 4-node. */
\
a_type *tnode; \
rbtn_black_set(a_type, a_field, leftleft); \
rbtn_rotate_right(a_type, a_field, cnode, tnode); \
cnode = tnode; \
} \
} else { \
return; \
} \
} else { \
a_type *right = pathp[1].node; \
rbtn_right_set(a_type, a_field, cnode, right); \
if (rbtn_red_get(a_type, a_field, right)) { \
a_type *left = rbtn_left_get(a_type, a_field, cnode); \
if (left != NULL && rbtn_red_get(a_type, a_field, \
left)) { \
/* Split 4-node. */
\
rbtn_black_set(a_type, a_field, left); \
rbtn_black_set(a_type, a_field, right); \
rbtn_red_set(a_type, a_field, cnode); \
} else { \
/* Lean left. */
\
a_type *tnode; \
bool tred = rbtn_red_get(a_type, a_field, cnode); \
rbtn_rotate_left(a_type, a_field, cnode, tnode); \
rbtn_color_set(a_type, a_field, tnode, tred); \
rbtn_red_set(a_type, a_field, cnode); \
cnode = tnode; \
} \
} else { \
return; \
} \
} \
pathp->node = cnode; \
} \
/* Set root, and make it black. */
\
rbtree->rbt_root = path->node; \
rbtn_black_set(a_type, a_field, rbtree->rbt_root); \
} \
a_attr void \
a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
struct { \
a_type *node; \
int cmp; \
} *pathp, *nodep, path[sizeof(void *) << 4]; \
/* Wind. */
\
nodep = NULL;
/* Silence compiler warning. */
\
path->node = rbtree->rbt_root; \
for (pathp = path; pathp->node != NULL; pathp++) { \
int cmp = pathp->cmp = a_cmp(node, pathp->node); \
if (cmp < 0) { \
pathp[1].node = rbtn_left_get(a_type, a_field, \
pathp->node); \
} else { \
pathp[1].node = rbtn_right_get(a_type, a_field, \
pathp->node); \
if (cmp == 0) { \
/* Find node's successor, in preparation for swap. */
\
pathp->cmp = 1; \
nodep = pathp; \
for (pathp++; pathp->node != NULL; pathp++) { \
pathp->cmp = -1; \
pathp[1].node = rbtn_left_get(a_type, a_field, \
pathp->node); \
} \
break; \
} \
} \
} \
assert(nodep->node == node); \
pathp--; \
if (pathp->node != node) { \
/* Swap node with its successor. */
\
bool tred = rbtn_red_get(a_type, a_field, pathp->node); \
rbtn_color_set(a_type, a_field, pathp->node, \
rbtn_red_get(a_type, a_field, node)); \
rbtn_left_set(a_type, a_field, pathp->node, \
rbtn_left_get(a_type, a_field, node)); \
/* If node's successor is its right child, the following code */
\
/* will do the wrong thing for the right child pointer. */
\
/* However, it doesn't matter, because the pointer will be */
\
/* properly set when the successor is pruned. */
\
rbtn_right_set(a_type, a_field, pathp->node, \
rbtn_right_get(a_type, a_field, node)); \
rbtn_color_set(a_type, a_field, node, tred); \
/* The pruned leaf node's child pointers are never accessed */
\
/* again, so don't bother setting them to nil. */
\
nodep->node = pathp->node; \
pathp->node = node; \
if (nodep == path) { \
rbtree->rbt_root = nodep->node; \
} else { \
if (nodep[-1].cmp < 0) { \
rbtn_left_set(a_type, a_field, nodep[-1].node, \
nodep->node); \
} else { \
rbtn_right_set(a_type, a_field, nodep[-1].node, \
nodep->node); \
} \
} \
} else { \
a_type *left = rbtn_left_get(a_type, a_field, node); \
if (left != NULL) { \
/* node has no successor, but it has a left child. */
\
/* Splice node out, without losing the left child. */
\
assert(!rbtn_red_get(a_type, a_field, node)); \
assert(rbtn_red_get(a_type, a_field, left)); \
rbtn_black_set(a_type, a_field, left); \
if (pathp == path) { \
rbtree->rbt_root = left; \
} else { \
if (pathp[-1].cmp < 0) { \
rbtn_left_set(a_type, a_field, pathp[-1].node, \
left); \
} else { \
rbtn_right_set(a_type, a_field, pathp[-1].node, \
left); \
} \
} \
return; \
} else if (pathp == path) { \
/* The tree only contained one node. */
\
rbtree->rbt_root = NULL; \
return; \
} \
} \
if (rbtn_red_get(a_type, a_field, pathp->node)) { \
/* Prune red node, which requires no fixup. */
\
assert(pathp[-1].cmp < 0); \
rbtn_left_set(a_type, a_field, pathp[-1].node, NULL); \
return; \
} \
/* The node to be pruned is black, so unwind until balance is */
\
/* restored. */
\
pathp->node = NULL; \
for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \
assert(pathp->cmp != 0); \
if (pathp->cmp < 0) { \
rbtn_left_set(a_type, a_field, pathp->node, \
pathp[1].node); \
if (rbtn_red_get(a_type, a_field, pathp->node)) { \
a_type *right = rbtn_right_get(a_type, a_field, \
pathp->node); \
a_type *rightleft = rbtn_left_get(a_type, a_field, \
right); \
a_type *tnode; \
if (rightleft != NULL && rbtn_red_get(a_type, a_field, \
rightleft)) { \
/* In the following diagrams, ||, //, and \\ */
\
/* indicate the path to the removed node. */
\
/* */
\
/* || */
\
/* pathp(r) */
\
/* // \ */
\
/* (b) (b) */
\
/* / */
\
/* (r) */
\
/* */
\
rbtn_black_set(a_type, a_field, pathp->node); \
rbtn_rotate_right(a_type, a_field, right, tnode); \
rbtn_right_set(a_type, a_field, pathp->node, tnode);\
rbtn_rotate_left(a_type, a_field, pathp->node, \
tnode); \
} else { \
/* || */
\
/* pathp(r) */
\
/* // \ */
\
/* (b) (b) */
\
/* / */
\
/* (b) */
\
/* */
\
rbtn_rotate_left(a_type, a_field, pathp->node, \
tnode); \
} \
/* Balance restored, but rotation modified subtree */
\
/* root. */
\
assert((uintptr_t)pathp > (uintptr_t)path); \
if (pathp[-1].cmp < 0) { \
rbtn_left_set(a_type, a_field, pathp[-1].node, \
tnode); \
} else { \
rbtn_right_set(a_type, a_field, pathp[-1].node, \
tnode); \
} \
return; \
} else { \
a_type *right = rbtn_right_get(a_type, a_field, \
pathp->node); \
a_type *rightleft = rbtn_left_get(a_type, a_field, \
right); \
if (rightleft != NULL && rbtn_red_get(a_type, a_field, \
rightleft)) { \
/* || */
\
/* pathp(b) */
\
/* // \ */
\
/* (b) (b) */
\
/* / */
\
/* (r) */
\
a_type *tnode; \
rbtn_black_set(a_type, a_field, rightleft); \
rbtn_rotate_right(a_type, a_field, right, tnode); \
rbtn_right_set(a_type, a_field, pathp->node, tnode);\
rbtn_rotate_left(a_type, a_field, pathp->node, \
tnode); \
/* Balance restored, but rotation modified */
\
/* subtree root, which may actually be the tree */
\
/* root. */
\
if (pathp == path) { \
/* Set root. */
\
rbtree->rbt_root = tnode; \
} else { \
if (pathp[-1].cmp < 0) { \
rbtn_left_set(a_type, a_field, \
pathp[-1].node, tnode); \
} else { \
rbtn_right_set(a_type, a_field, \
pathp[-1].node, tnode); \
} \
} \
return; \
} else { \
/* || */
\
/* pathp(b) */
\
/* // \ */
\
/* (b) (b) */
\
/* / */
\
/* (b) */
\
a_type *tnode; \
rbtn_red_set(a_type, a_field, pathp->node); \
rbtn_rotate_left(a_type, a_field, pathp->node, \
tnode); \
pathp->node = tnode; \
} \
} \
} else { \
a_type *left; \
rbtn_right_set(a_type, a_field, pathp->node, \
pathp[1].node); \
left = rbtn_left_get(a_type, a_field, pathp->node); \
if (rbtn_red_get(a_type, a_field, left)) { \
a_type *tnode; \
a_type *leftright = rbtn_right_get(a_type, a_field, \
left); \
a_type *leftrightleft = rbtn_left_get(a_type, a_field, \
leftright); \
if (leftrightleft != NULL && rbtn_red_get(a_type, \
a_field, leftrightleft)) { \
/* || */
\
/* pathp(b) */
\
/* / \\ */
\
/* (r) (b) */
\
/* \ */
\
/* (b) */
\
/* / */
\
/* (r) */
\
a_type *unode; \
rbtn_black_set(a_type, a_field, leftrightleft); \
rbtn_rotate_right(a_type, a_field, pathp->node, \
unode); \
rbtn_rotate_right(a_type, a_field, pathp->node, \
tnode); \
rbtn_right_set(a_type, a_field, unode, tnode); \
rbtn_rotate_left(a_type, a_field, unode, tnode); \
} else { \
/* || */
\
/* pathp(b) */
\
/* / \\ */
\
/* (r) (b) */
\
/* \ */
\
/* (b) */
\
/* / */
\
/* (b) */
\
assert(leftright != NULL); \
rbtn_red_set(a_type, a_field, leftright); \
rbtn_rotate_right(a_type, a_field, pathp->node, \
tnode); \
rbtn_black_set(a_type, a_field, tnode); \
} \
/* Balance restored, but rotation modified subtree */
\
/* root, which may actually be the tree root. */
\
if (pathp == path) { \
/* Set root. */
\
rbtree->rbt_root = tnode; \
} else { \
if (pathp[-1].cmp < 0) { \
rbtn_left_set(a_type, a_field, pathp[-1].node, \
tnode); \
} else { \
rbtn_right_set(a_type, a_field, pathp[-1].node, \
tnode); \
} \
} \
return; \
} else if (rbtn_red_get(a_type, a_field, pathp->node)) { \
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
leftleft)) { \
/* || */
\
/* pathp(r) */
\
/* / \\ */
\
/* (b) (b) */
\
/* / */
\
/* (r) */
\
a_type *tnode; \
rbtn_black_set(a_type, a_field, pathp->node); \
rbtn_red_set(a_type, a_field, left); \
rbtn_black_set(a_type, a_field, leftleft); \
rbtn_rotate_right(a_type, a_field, pathp->node, \
tnode); \
/* Balance restored, but rotation modified */
\
/* subtree root. */
\
assert((uintptr_t)pathp > (uintptr_t)path); \
if (pathp[-1].cmp < 0) { \
rbtn_left_set(a_type, a_field, pathp[-1].node, \
tnode); \
} else { \
rbtn_right_set(a_type, a_field, pathp[-1].node, \
tnode); \
} \
return; \
} else { \
/* || */
\
/* pathp(r) */
\
/* / \\ */
\
/* (b) (b) */
\
/* / */
\
/* (b) */
\
rbtn_red_set(a_type, a_field, left); \
rbtn_black_set(a_type, a_field, pathp->node); \
/* Balance restored. */
\
return; \
} \
} else { \
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
leftleft)) { \
/* || */
\
/* pathp(b) */
\
/* / \\ */
\
/* (b) (b) */
\
/* / */
\
/* (r) */
\
a_type *tnode; \
rbtn_black_set(a_type, a_field, leftleft); \
rbtn_rotate_right(a_type, a_field, pathp->node, \
tnode); \
/* Balance restored, but rotation modified */
\
/* subtree root, which may actually be the tree */
\
/* root. */
\
if (pathp == path) { \
/* Set root. */
\
rbtree->rbt_root = tnode; \
} else { \
if (pathp[-1].cmp < 0) { \
rbtn_left_set(a_type, a_field, \
pathp[-1].node, tnode); \
} else { \
rbtn_right_set(a_type, a_field, \
pathp[-1].node, tnode); \
} \
} \
return; \
} else { \
/* || */
\
/* pathp(b) */
\
/* / \\ */
\
/* (b) (b) */
\
/* / */
\
/* (b) */
\
rbtn_red_set(a_type, a_field, left); \
} \
} \
} \
} \
/* Set root. */
\
rbtree->rbt_root = path->node; \
assert(!rbtn_red_get(a_type, a_field, rbtree->rbt_root)); \
} \
a_attr a_type * \
a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
if (node == NULL) { \
return NULL; \
} else { \
a_type *ret; \
if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \
a_field, node), cb, arg)) != NULL || (ret = cb(rbtree, node, \
arg)) != NULL) { \
return ret; \
} \
return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
a_field, node), cb, arg); \
} \
} \
a_attr a_type * \
a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
int cmp = a_cmp(start, node); \
if (cmp < 0) { \
a_type *ret; \
if ((ret = a_prefix##iter_start(rbtree, start, \
rbtn_left_get(a_type, a_field, node), cb, arg)) != NULL || \
(ret = cb(rbtree, node, arg)) != NULL) { \
return ret; \
} \
return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
a_field, node), cb, arg); \
} else if (cmp > 0) { \
return a_prefix##iter_start(rbtree, start, \
rbtn_right_get(a_type, a_field, node), cb, arg); \
} else { \
a_type *ret; \
if ((ret = cb(rbtree, node, arg)) != NULL) { \
return ret; \
} \
return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
a_field, node), cb, arg); \
} \
} \
a_attr a_type * \
a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
a_rbt_type *, a_type *, void *), void *arg) { \
a_type *ret; \
if (start != NULL) { \
ret = a_prefix##iter_start(rbtree, start, rbtree->rbt_root, \
cb, arg); \
} else { \
ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\
} \
return ret; \
} \
a_attr a_type * \
a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
if (node == NULL) { \
return NULL; \
} else { \
a_type *ret; \
if ((ret = a_prefix##reverse_iter_recurse(rbtree, \
rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \
(ret = cb(rbtree, node, arg)) != NULL) { \
return ret; \
} \
return a_prefix##reverse_iter_recurse(rbtree, \
rbtn_left_get(a_type, a_field, node), cb, arg); \
} \
} \
a_attr a_type * \
a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \
a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \
void *arg) { \
int cmp = a_cmp(start, node); \
if (cmp > 0) { \
a_type *ret; \
if ((ret = a_prefix##reverse_iter_start(rbtree, start, \
rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \
(ret = cb(rbtree, node, arg)) != NULL) { \
return ret; \
} \
return a_prefix##reverse_iter_recurse(rbtree, \
rbtn_left_get(a_type, a_field, node), cb, arg); \
} else if (cmp < 0) { \
return a_prefix##reverse_iter_start(rbtree, start, \
rbtn_left_get(a_type, a_field, node), cb, arg); \
} else { \
a_type *ret; \
if ((ret = cb(rbtree, node, arg)) != NULL) { \
return ret; \
} \
return a_prefix##reverse_iter_recurse(rbtree, \
rbtn_left_get(a_type, a_field, node), cb, arg); \
} \
} \
a_attr a_type * \
a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
a_type *ret; \
if (start != NULL) { \
ret = a_prefix##reverse_iter_start(rbtree, start, \
rbtree->rbt_root, cb, arg); \
} else { \
ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \
cb, arg); \
} \
return ret; \
} \
a_attr void \
a_prefix##destroy_recurse(a_rbt_type *rbtree, a_type *node, void (*cb)( \
a_type *, void *), void *arg) { \
if (node == NULL) { \
return; \
} \
a_prefix##destroy_recurse(rbtree, rbtn_left_get(a_type, a_field, \
node), cb, arg); \
rbtn_left_set(a_type, a_field, (node), NULL); \
a_prefix##destroy_recurse(rbtree, rbtn_right_get(a_type, a_field, \
node), cb, arg); \
rbtn_right_set(a_type, a_field, (node), NULL); \
if (cb) { \
cb(node, arg); \
} \
} \
a_attr void \
a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
void *arg) { \
a_prefix##destroy_recurse(rbtree, rbtree->rbt_root, cb, arg); \
rbtree->rbt_root = NULL; \
}
#endif
/* RB_H_ */
deps/jemalloc/include/jemalloc/internal/rtree.h
0 → 100644
View file @
4d5911b4
#ifndef JEMALLOC_INTERNAL_RTREE_H
#define JEMALLOC_INTERNAL_RTREE_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/rtree_tsd.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/tsd.h"
/*
* This radix tree implementation is tailored to the singular purpose of
* associating metadata with extents that are currently owned by jemalloc.
*
*******************************************************************************
*/
/* Number of high insignificant bits. */
#define RTREE_NHIB ((1U << (LG_SIZEOF_PTR+3)) - LG_VADDR)
/* Number of low insigificant bits. */
#define RTREE_NLIB LG_PAGE
/* Number of significant bits. */
#define RTREE_NSB (LG_VADDR - RTREE_NLIB)
/* Number of levels in radix tree. */
#if RTREE_NSB <= 10
# define RTREE_HEIGHT 1
#elif RTREE_NSB <= 36
# define RTREE_HEIGHT 2
#elif RTREE_NSB <= 52
# define RTREE_HEIGHT 3
#else
# error Unsupported number of significant virtual address bits
#endif
/* Use compact leaf representation if virtual address encoding allows. */
#if RTREE_NHIB >= LG_CEIL(SC_NSIZES)
# define RTREE_LEAF_COMPACT
#endif
/* Needed for initialization only. */
#define RTREE_LEAFKEY_INVALID ((uintptr_t)1)
typedef
struct
rtree_node_elm_s
rtree_node_elm_t
;
struct
rtree_node_elm_s
{
atomic_p_t
child
;
/* (rtree_{node,leaf}_elm_t *) */
};
struct
rtree_leaf_elm_s
{
#ifdef RTREE_LEAF_COMPACT
/*
* Single pointer-width field containing all three leaf element fields.
* For example, on a 64-bit x64 system with 48 significant virtual
* memory address bits, the index, extent, and slab fields are packed as
* such:
*
* x: index
* e: extent
* b: slab
*
* 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee eeee000b
*/
atomic_p_t
le_bits
;
#else
atomic_p_t
le_extent
;
/* (extent_t *) */
atomic_u_t
le_szind
;
/* (szind_t) */
atomic_b_t
le_slab
;
/* (bool) */
#endif
};
typedef
struct
rtree_level_s
rtree_level_t
;
struct
rtree_level_s
{
/* Number of key bits distinguished by this level. */
unsigned
bits
;
/*
* Cumulative number of key bits distinguished by traversing to
* corresponding tree level.
*/
unsigned
cumbits
;
};
typedef
struct
rtree_s
rtree_t
;
struct
rtree_s
{
malloc_mutex_t
init_lock
;
/* Number of elements based on rtree_levels[0].bits. */
#if RTREE_HEIGHT > 1
rtree_node_elm_t
root
[
1U
<<
(
RTREE_NSB
/
RTREE_HEIGHT
)];
#else
rtree_leaf_elm_t
root
[
1U
<<
(
RTREE_NSB
/
RTREE_HEIGHT
)];
#endif
};
/*
* Split the bits into one to three partitions depending on number of
* significant bits. It the number of bits does not divide evenly into the
* number of levels, place one remainder bit per level starting at the leaf
* level.
*/
static
const
rtree_level_t
rtree_levels
[]
=
{
#if RTREE_HEIGHT == 1
{
RTREE_NSB
,
RTREE_NHIB
+
RTREE_NSB
}
#elif RTREE_HEIGHT == 2
{
RTREE_NSB
/
2
,
RTREE_NHIB
+
RTREE_NSB
/
2
},
{
RTREE_NSB
/
2
+
RTREE_NSB
%
2
,
RTREE_NHIB
+
RTREE_NSB
}
#elif RTREE_HEIGHT == 3
{
RTREE_NSB
/
3
,
RTREE_NHIB
+
RTREE_NSB
/
3
},
{
RTREE_NSB
/
3
+
RTREE_NSB
%
3
/
2
,
RTREE_NHIB
+
RTREE_NSB
/
3
*
2
+
RTREE_NSB
%
3
/
2
},
{
RTREE_NSB
/
3
+
RTREE_NSB
%
3
-
RTREE_NSB
%
3
/
2
,
RTREE_NHIB
+
RTREE_NSB
}
#else
# error Unsupported rtree height
#endif
};
bool
rtree_new
(
rtree_t
*
rtree
,
bool
zeroed
);
typedef
rtree_node_elm_t
*
(
rtree_node_alloc_t
)(
tsdn_t
*
,
rtree_t
*
,
size_t
);
extern
rtree_node_alloc_t
*
JET_MUTABLE
rtree_node_alloc
;
typedef
rtree_leaf_elm_t
*
(
rtree_leaf_alloc_t
)(
tsdn_t
*
,
rtree_t
*
,
size_t
);
extern
rtree_leaf_alloc_t
*
JET_MUTABLE
rtree_leaf_alloc
;
typedef
void
(
rtree_node_dalloc_t
)(
tsdn_t
*
,
rtree_t
*
,
rtree_node_elm_t
*
);
extern
rtree_node_dalloc_t
*
JET_MUTABLE
rtree_node_dalloc
;
typedef
void
(
rtree_leaf_dalloc_t
)(
tsdn_t
*
,
rtree_t
*
,
rtree_leaf_elm_t
*
);
extern
rtree_leaf_dalloc_t
*
JET_MUTABLE
rtree_leaf_dalloc
;
#ifdef JEMALLOC_JET
void
rtree_delete
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
);
#endif
rtree_leaf_elm_t
*
rtree_leaf_elm_lookup_hard
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
bool
dependent
,
bool
init_missing
);
JEMALLOC_ALWAYS_INLINE
uintptr_t
rtree_leafkey
(
uintptr_t
key
)
{
unsigned
ptrbits
=
ZU
(
1
)
<<
(
LG_SIZEOF_PTR
+
3
);
unsigned
cumbits
=
(
rtree_levels
[
RTREE_HEIGHT
-
1
].
cumbits
-
rtree_levels
[
RTREE_HEIGHT
-
1
].
bits
);
unsigned
maskbits
=
ptrbits
-
cumbits
;
uintptr_t
mask
=
~
((
ZU
(
1
)
<<
maskbits
)
-
1
);
return
(
key
&
mask
);
}
JEMALLOC_ALWAYS_INLINE
size_t
rtree_cache_direct_map
(
uintptr_t
key
)
{
unsigned
ptrbits
=
ZU
(
1
)
<<
(
LG_SIZEOF_PTR
+
3
);
unsigned
cumbits
=
(
rtree_levels
[
RTREE_HEIGHT
-
1
].
cumbits
-
rtree_levels
[
RTREE_HEIGHT
-
1
].
bits
);
unsigned
maskbits
=
ptrbits
-
cumbits
;
return
(
size_t
)((
key
>>
maskbits
)
&
(
RTREE_CTX_NCACHE
-
1
));
}
JEMALLOC_ALWAYS_INLINE
uintptr_t
rtree_subkey
(
uintptr_t
key
,
unsigned
level
)
{
unsigned
ptrbits
=
ZU
(
1
)
<<
(
LG_SIZEOF_PTR
+
3
);
unsigned
cumbits
=
rtree_levels
[
level
].
cumbits
;
unsigned
shiftbits
=
ptrbits
-
cumbits
;
unsigned
maskbits
=
rtree_levels
[
level
].
bits
;
uintptr_t
mask
=
(
ZU
(
1
)
<<
maskbits
)
-
1
;
return
((
key
>>
shiftbits
)
&
mask
);
}
/*
* Atomic getters.
*
* dependent: Reading a value on behalf of a pointer to a valid allocation
* is guaranteed to be a clean read even without synchronization,
* because the rtree update became visible in memory before the
* pointer came into existence.
* !dependent: An arbitrary read, e.g. on behalf of ivsalloc(), may not be
* dependent on a previous rtree write, which means a stale read
* could result if synchronization were omitted here.
*/
# ifdef RTREE_LEAF_COMPACT
JEMALLOC_ALWAYS_INLINE
uintptr_t
rtree_leaf_elm_bits_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
bool
dependent
)
{
return
(
uintptr_t
)
atomic_load_p
(
&
elm
->
le_bits
,
dependent
?
ATOMIC_RELAXED
:
ATOMIC_ACQUIRE
);
}
JEMALLOC_ALWAYS_INLINE
extent_t
*
rtree_leaf_elm_bits_extent_get
(
uintptr_t
bits
)
{
# ifdef __aarch64__
/*
* aarch64 doesn't sign extend the highest virtual address bit to set
* the higher ones. Instead, the high bits gets zeroed.
*/
uintptr_t
high_bit_mask
=
((
uintptr_t
)
1
<<
LG_VADDR
)
-
1
;
/* Mask off the slab bit. */
uintptr_t
low_bit_mask
=
~
(
uintptr_t
)
1
;
uintptr_t
mask
=
high_bit_mask
&
low_bit_mask
;
return
(
extent_t
*
)(
bits
&
mask
);
# else
/* Restore sign-extended high bits, mask slab bit. */
return
(
extent_t
*
)((
uintptr_t
)((
intptr_t
)(
bits
<<
RTREE_NHIB
)
>>
RTREE_NHIB
)
&
~
((
uintptr_t
)
0x1
));
# endif
}
JEMALLOC_ALWAYS_INLINE
szind_t
rtree_leaf_elm_bits_szind_get
(
uintptr_t
bits
)
{
return
(
szind_t
)(
bits
>>
LG_VADDR
);
}
JEMALLOC_ALWAYS_INLINE
bool
rtree_leaf_elm_bits_slab_get
(
uintptr_t
bits
)
{
return
(
bool
)(
bits
&
(
uintptr_t
)
0x1
);
}
# endif
JEMALLOC_ALWAYS_INLINE
extent_t
*
rtree_leaf_elm_extent_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
bool
dependent
)
{
#ifdef RTREE_LEAF_COMPACT
uintptr_t
bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm
,
dependent
);
return
rtree_leaf_elm_bits_extent_get
(
bits
);
#else
extent_t
*
extent
=
(
extent_t
*
)
atomic_load_p
(
&
elm
->
le_extent
,
dependent
?
ATOMIC_RELAXED
:
ATOMIC_ACQUIRE
);
return
extent
;
#endif
}
JEMALLOC_ALWAYS_INLINE
szind_t
rtree_leaf_elm_szind_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
bool
dependent
)
{
#ifdef RTREE_LEAF_COMPACT
uintptr_t
bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm
,
dependent
);
return
rtree_leaf_elm_bits_szind_get
(
bits
);
#else
return
(
szind_t
)
atomic_load_u
(
&
elm
->
le_szind
,
dependent
?
ATOMIC_RELAXED
:
ATOMIC_ACQUIRE
);
#endif
}
JEMALLOC_ALWAYS_INLINE
bool
rtree_leaf_elm_slab_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
bool
dependent
)
{
#ifdef RTREE_LEAF_COMPACT
uintptr_t
bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm
,
dependent
);
return
rtree_leaf_elm_bits_slab_get
(
bits
);
#else
return
atomic_load_b
(
&
elm
->
le_slab
,
dependent
?
ATOMIC_RELAXED
:
ATOMIC_ACQUIRE
);
#endif
}
static
inline
void
rtree_leaf_elm_extent_write
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
extent_t
*
extent
)
{
#ifdef RTREE_LEAF_COMPACT
uintptr_t
old_bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm
,
true
);
uintptr_t
bits
=
((
uintptr_t
)
rtree_leaf_elm_bits_szind_get
(
old_bits
)
<<
LG_VADDR
)
|
((
uintptr_t
)
extent
&
(((
uintptr_t
)
0x1
<<
LG_VADDR
)
-
1
))
|
((
uintptr_t
)
rtree_leaf_elm_bits_slab_get
(
old_bits
));
atomic_store_p
(
&
elm
->
le_bits
,
(
void
*
)
bits
,
ATOMIC_RELEASE
);
#else
atomic_store_p
(
&
elm
->
le_extent
,
extent
,
ATOMIC_RELEASE
);
#endif
}
static
inline
void
rtree_leaf_elm_szind_write
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
szind_t
szind
)
{
assert
(
szind
<=
SC_NSIZES
);
#ifdef RTREE_LEAF_COMPACT
uintptr_t
old_bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm
,
true
);
uintptr_t
bits
=
((
uintptr_t
)
szind
<<
LG_VADDR
)
|
((
uintptr_t
)
rtree_leaf_elm_bits_extent_get
(
old_bits
)
&
(((
uintptr_t
)
0x1
<<
LG_VADDR
)
-
1
))
|
((
uintptr_t
)
rtree_leaf_elm_bits_slab_get
(
old_bits
));
atomic_store_p
(
&
elm
->
le_bits
,
(
void
*
)
bits
,
ATOMIC_RELEASE
);
#else
atomic_store_u
(
&
elm
->
le_szind
,
szind
,
ATOMIC_RELEASE
);
#endif
}
static
inline
void
rtree_leaf_elm_slab_write
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
bool
slab
)
{
#ifdef RTREE_LEAF_COMPACT
uintptr_t
old_bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm
,
true
);
uintptr_t
bits
=
((
uintptr_t
)
rtree_leaf_elm_bits_szind_get
(
old_bits
)
<<
LG_VADDR
)
|
((
uintptr_t
)
rtree_leaf_elm_bits_extent_get
(
old_bits
)
&
(((
uintptr_t
)
0x1
<<
LG_VADDR
)
-
1
))
|
((
uintptr_t
)
slab
);
atomic_store_p
(
&
elm
->
le_bits
,
(
void
*
)
bits
,
ATOMIC_RELEASE
);
#else
atomic_store_b
(
&
elm
->
le_slab
,
slab
,
ATOMIC_RELEASE
);
#endif
}
static
inline
void
rtree_leaf_elm_write
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
extent_t
*
extent
,
szind_t
szind
,
bool
slab
)
{
#ifdef RTREE_LEAF_COMPACT
uintptr_t
bits
=
((
uintptr_t
)
szind
<<
LG_VADDR
)
|
((
uintptr_t
)
extent
&
(((
uintptr_t
)
0x1
<<
LG_VADDR
)
-
1
))
|
((
uintptr_t
)
slab
);
atomic_store_p
(
&
elm
->
le_bits
,
(
void
*
)
bits
,
ATOMIC_RELEASE
);
#else
rtree_leaf_elm_slab_write
(
tsdn
,
rtree
,
elm
,
slab
);
rtree_leaf_elm_szind_write
(
tsdn
,
rtree
,
elm
,
szind
);
/*
* Write extent last, since the element is atomically considered valid
* as soon as the extent field is non-NULL.
*/
rtree_leaf_elm_extent_write
(
tsdn
,
rtree
,
elm
,
extent
);
#endif
}
static
inline
void
rtree_leaf_elm_szind_slab_update
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
szind_t
szind
,
bool
slab
)
{
assert
(
!
slab
||
szind
<
SC_NBINS
);
/*
* The caller implicitly assures that it is the only writer to the szind
* and slab fields, and that the extent field cannot currently change.
*/
rtree_leaf_elm_slab_write
(
tsdn
,
rtree
,
elm
,
slab
);
rtree_leaf_elm_szind_write
(
tsdn
,
rtree
,
elm
,
szind
);
}
JEMALLOC_ALWAYS_INLINE
rtree_leaf_elm_t
*
rtree_leaf_elm_lookup
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
bool
dependent
,
bool
init_missing
)
{
assert
(
key
!=
0
);
assert
(
!
dependent
||
!
init_missing
);
size_t
slot
=
rtree_cache_direct_map
(
key
);
uintptr_t
leafkey
=
rtree_leafkey
(
key
);
assert
(
leafkey
!=
RTREE_LEAFKEY_INVALID
);
/* Fast path: L1 direct mapped cache. */
if
(
likely
(
rtree_ctx
->
cache
[
slot
].
leafkey
==
leafkey
))
{
rtree_leaf_elm_t
*
leaf
=
rtree_ctx
->
cache
[
slot
].
leaf
;
assert
(
leaf
!=
NULL
);
uintptr_t
subkey
=
rtree_subkey
(
key
,
RTREE_HEIGHT
-
1
);
return
&
leaf
[
subkey
];
}
/*
* Search the L2 LRU cache. On hit, swap the matching element into the
* slot in L1 cache, and move the position in L2 up by 1.
*/
#define RTREE_CACHE_CHECK_L2(i) do { \
if (likely(rtree_ctx->l2_cache[i].leafkey == leafkey)) { \
rtree_leaf_elm_t *leaf = rtree_ctx->l2_cache[i].leaf; \
assert(leaf != NULL); \
if (i > 0) { \
/* Bubble up by one. */
\
rtree_ctx->l2_cache[i].leafkey = \
rtree_ctx->l2_cache[i - 1].leafkey; \
rtree_ctx->l2_cache[i].leaf = \
rtree_ctx->l2_cache[i - 1].leaf; \
rtree_ctx->l2_cache[i - 1].leafkey = \
rtree_ctx->cache[slot].leafkey; \
rtree_ctx->l2_cache[i - 1].leaf = \
rtree_ctx->cache[slot].leaf; \
} else { \
rtree_ctx->l2_cache[0].leafkey = \
rtree_ctx->cache[slot].leafkey; \
rtree_ctx->l2_cache[0].leaf = \
rtree_ctx->cache[slot].leaf; \
} \
rtree_ctx->cache[slot].leafkey = leafkey; \
rtree_ctx->cache[slot].leaf = leaf; \
uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1); \
return &leaf[subkey]; \
} \
} while (0)
/* Check the first cache entry. */
RTREE_CACHE_CHECK_L2
(
0
);
/* Search the remaining cache elements. */
for
(
unsigned
i
=
1
;
i
<
RTREE_CTX_NCACHE_L2
;
i
++
)
{
RTREE_CACHE_CHECK_L2
(
i
);
}
#undef RTREE_CACHE_CHECK_L2
return
rtree_leaf_elm_lookup_hard
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
dependent
,
init_missing
);
}
static
inline
bool
rtree_write
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
extent_t
*
extent
,
szind_t
szind
,
bool
slab
)
{
/* Use rtree_clear() to set the extent to NULL. */
assert
(
extent
!=
NULL
);
rtree_leaf_elm_t
*
elm
=
rtree_leaf_elm_lookup
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
false
,
true
);
if
(
elm
==
NULL
)
{
return
true
;
}
assert
(
rtree_leaf_elm_extent_read
(
tsdn
,
rtree
,
elm
,
false
)
==
NULL
);
rtree_leaf_elm_write
(
tsdn
,
rtree
,
elm
,
extent
,
szind
,
slab
);
return
false
;
}
JEMALLOC_ALWAYS_INLINE
rtree_leaf_elm_t
*
rtree_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
bool
dependent
)
{
rtree_leaf_elm_t
*
elm
=
rtree_leaf_elm_lookup
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
dependent
,
false
);
if
(
!
dependent
&&
elm
==
NULL
)
{
return
NULL
;
}
assert
(
elm
!=
NULL
);
return
elm
;
}
JEMALLOC_ALWAYS_INLINE
extent_t
*
rtree_extent_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
bool
dependent
)
{
rtree_leaf_elm_t
*
elm
=
rtree_read
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
dependent
);
if
(
!
dependent
&&
elm
==
NULL
)
{
return
NULL
;
}
return
rtree_leaf_elm_extent_read
(
tsdn
,
rtree
,
elm
,
dependent
);
}
JEMALLOC_ALWAYS_INLINE
szind_t
rtree_szind_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
bool
dependent
)
{
rtree_leaf_elm_t
*
elm
=
rtree_read
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
dependent
);
if
(
!
dependent
&&
elm
==
NULL
)
{
return
SC_NSIZES
;
}
return
rtree_leaf_elm_szind_read
(
tsdn
,
rtree
,
elm
,
dependent
);
}
/*
* rtree_slab_read() is intentionally omitted because slab is always read in
* conjunction with szind, which makes rtree_szind_slab_read() a better choice.
*/
JEMALLOC_ALWAYS_INLINE
bool
rtree_extent_szind_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
bool
dependent
,
extent_t
**
r_extent
,
szind_t
*
r_szind
)
{
rtree_leaf_elm_t
*
elm
=
rtree_read
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
dependent
);
if
(
!
dependent
&&
elm
==
NULL
)
{
return
true
;
}
*
r_extent
=
rtree_leaf_elm_extent_read
(
tsdn
,
rtree
,
elm
,
dependent
);
*
r_szind
=
rtree_leaf_elm_szind_read
(
tsdn
,
rtree
,
elm
,
dependent
);
return
false
;
}
/*
* Try to read szind_slab from the L1 cache. Returns true on a hit,
* and fills in r_szind and r_slab. Otherwise returns false.
*
* Key is allowed to be NULL in order to save an extra branch on the
* fastpath. returns false in this case.
*/
JEMALLOC_ALWAYS_INLINE
bool
rtree_szind_slab_read_fast
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
szind_t
*
r_szind
,
bool
*
r_slab
)
{
rtree_leaf_elm_t
*
elm
;
size_t
slot
=
rtree_cache_direct_map
(
key
);
uintptr_t
leafkey
=
rtree_leafkey
(
key
);
assert
(
leafkey
!=
RTREE_LEAFKEY_INVALID
);
if
(
likely
(
rtree_ctx
->
cache
[
slot
].
leafkey
==
leafkey
))
{
rtree_leaf_elm_t
*
leaf
=
rtree_ctx
->
cache
[
slot
].
leaf
;
assert
(
leaf
!=
NULL
);
uintptr_t
subkey
=
rtree_subkey
(
key
,
RTREE_HEIGHT
-
1
);
elm
=
&
leaf
[
subkey
];
#ifdef RTREE_LEAF_COMPACT
uintptr_t
bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm
,
true
);
*
r_szind
=
rtree_leaf_elm_bits_szind_get
(
bits
);
*
r_slab
=
rtree_leaf_elm_bits_slab_get
(
bits
);
#else
*
r_szind
=
rtree_leaf_elm_szind_read
(
tsdn
,
rtree
,
elm
,
true
);
*
r_slab
=
rtree_leaf_elm_slab_read
(
tsdn
,
rtree
,
elm
,
true
);
#endif
return
true
;
}
else
{
return
false
;
}
}
JEMALLOC_ALWAYS_INLINE
bool
rtree_szind_slab_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
bool
dependent
,
szind_t
*
r_szind
,
bool
*
r_slab
)
{
rtree_leaf_elm_t
*
elm
=
rtree_read
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
dependent
);
if
(
!
dependent
&&
elm
==
NULL
)
{
return
true
;
}
#ifdef RTREE_LEAF_COMPACT
uintptr_t
bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm
,
dependent
);
*
r_szind
=
rtree_leaf_elm_bits_szind_get
(
bits
);
*
r_slab
=
rtree_leaf_elm_bits_slab_get
(
bits
);
#else
*
r_szind
=
rtree_leaf_elm_szind_read
(
tsdn
,
rtree
,
elm
,
dependent
);
*
r_slab
=
rtree_leaf_elm_slab_read
(
tsdn
,
rtree
,
elm
,
dependent
);
#endif
return
false
;
}
static
inline
void
rtree_szind_slab_update
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
szind_t
szind
,
bool
slab
)
{
assert
(
!
slab
||
szind
<
SC_NBINS
);
rtree_leaf_elm_t
*
elm
=
rtree_read
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
true
);
rtree_leaf_elm_szind_slab_update
(
tsdn
,
rtree
,
elm
,
szind
,
slab
);
}
static
inline
void
rtree_clear
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
)
{
rtree_leaf_elm_t
*
elm
=
rtree_read
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
true
);
assert
(
rtree_leaf_elm_extent_read
(
tsdn
,
rtree
,
elm
,
false
)
!=
NULL
);
rtree_leaf_elm_write
(
tsdn
,
rtree
,
elm
,
NULL
,
SC_NSIZES
,
false
);
}
#endif
/* JEMALLOC_INTERNAL_RTREE_H */
deps/jemalloc/include/jemalloc/internal/rtree_tsd.h
0 → 100644
View file @
4d5911b4
#ifndef JEMALLOC_INTERNAL_RTREE_CTX_H
#define JEMALLOC_INTERNAL_RTREE_CTX_H
/*
* Number of leafkey/leaf pairs to cache in L1 and L2 level respectively. Each
* entry supports an entire leaf, so the cache hit rate is typically high even
* with a small number of entries. In rare cases extent activity will straddle
* the boundary between two leaf nodes. Furthermore, an arena may use a
* combination of dss and mmap. Note that as memory usage grows past the amount
* that this cache can directly cover, the cache will become less effective if
* locality of reference is low, but the consequence is merely cache misses
* while traversing the tree nodes.
*
* The L1 direct mapped cache offers consistent and low cost on cache hit.
* However collision could affect hit rate negatively. This is resolved by
* combining with a L2 LRU cache, which requires linear search and re-ordering
* on access but suffers no collision. Note that, the cache will itself suffer
* cache misses if made overly large, plus the cost of linear search in the LRU
* cache.
*/
#define RTREE_CTX_LG_NCACHE 4
#define RTREE_CTX_NCACHE (1 << RTREE_CTX_LG_NCACHE)
#define RTREE_CTX_NCACHE_L2 8
/*
* Zero initializer required for tsd initialization only. Proper initialization
* done via rtree_ctx_data_init().
*/
#define RTREE_CTX_ZERO_INITIALIZER {{{0, 0}}, {{0, 0}}}
typedef
struct
rtree_leaf_elm_s
rtree_leaf_elm_t
;
typedef
struct
rtree_ctx_cache_elm_s
rtree_ctx_cache_elm_t
;
struct
rtree_ctx_cache_elm_s
{
uintptr_t
leafkey
;
rtree_leaf_elm_t
*
leaf
;
};
typedef
struct
rtree_ctx_s
rtree_ctx_t
;
struct
rtree_ctx_s
{
/* Direct mapped cache. */
rtree_ctx_cache_elm_t
cache
[
RTREE_CTX_NCACHE
];
/* L2 LRU cache. */
rtree_ctx_cache_elm_t
l2_cache
[
RTREE_CTX_NCACHE_L2
];
};
void
rtree_ctx_data_init
(
rtree_ctx_t
*
ctx
);
#endif
/* JEMALLOC_INTERNAL_RTREE_CTX_H */
deps/jemalloc/include/jemalloc/internal/safety_check.h
0 → 100644
View file @
4d5911b4
#ifndef JEMALLOC_INTERNAL_SAFETY_CHECK_H
#define JEMALLOC_INTERNAL_SAFETY_CHECK_H
void
safety_check_fail
(
const
char
*
format
,
...);
/* Can set to NULL for a default. */
void
safety_check_set_abort
(
void
(
*
abort_fn
)());
JEMALLOC_ALWAYS_INLINE
void
safety_check_set_redzone
(
void
*
ptr
,
size_t
usize
,
size_t
bumped_usize
)
{
assert
(
usize
<
bumped_usize
);
for
(
size_t
i
=
usize
;
i
<
bumped_usize
&&
i
<
usize
+
32
;
++
i
)
{
*
((
unsigned
char
*
)
ptr
+
i
)
=
0xBC
;
}
}
JEMALLOC_ALWAYS_INLINE
void
safety_check_verify_redzone
(
const
void
*
ptr
,
size_t
usize
,
size_t
bumped_usize
)
{
for
(
size_t
i
=
usize
;
i
<
bumped_usize
&&
i
<
usize
+
32
;
++
i
)
{
if
(
unlikely
(
*
((
unsigned
char
*
)
ptr
+
i
)
!=
0xBC
))
{
safety_check_fail
(
"Use after free error
\n
"
);
}
}
}
#endif
/*JEMALLOC_INTERNAL_SAFETY_CHECK_H */
deps/jemalloc/include/jemalloc/internal/sc.h
0 → 100644
View file @
4d5911b4
#ifndef JEMALLOC_INTERNAL_SC_H
#define JEMALLOC_INTERNAL_SC_H
#include "jemalloc/internal/jemalloc_internal_types.h"
/*
* Size class computations:
*
* These are a little tricky; we'll first start by describing how things
* generally work, and then describe some of the details.
*
* Ignore the first few size classes for a moment. We can then split all the
* remaining size classes into groups. The size classes in a group are spaced
* such that they cover allocation request sizes in a power-of-2 range. The
* power of two is called the base of the group, and the size classes in it
* satisfy allocations in the half-open range (base, base * 2]. There are
* SC_NGROUP size classes in each group, equally spaced in the range, so that
* each one covers allocations for base / SC_NGROUP possible allocation sizes.
* We call that value (base / SC_NGROUP) the delta of the group. Each size class
* is delta larger than the one before it (including the initial size class in a
* group, which is delta larger than base, the largest size class in the
* previous group).
* To make the math all work out nicely, we require that SC_NGROUP is a power of
* two, and define it in terms of SC_LG_NGROUP. We'll often talk in terms of
* lg_base and lg_delta. For each of these groups then, we have that
* lg_delta == lg_base - SC_LG_NGROUP.
* The size classes in a group with a given lg_base and lg_delta (which, recall,
* can be computed from lg_base for these groups) are therefore:
* base + 1 * delta
* which covers allocations in (base, base + 1 * delta]
* base + 2 * delta
* which covers allocations in (base + 1 * delta, base + 2 * delta].
* base + 3 * delta
* which covers allocations in (base + 2 * delta, base + 3 * delta].
* ...
* base + SC_NGROUP * delta ( == 2 * base)
* which covers allocations in (base + (SC_NGROUP - 1) * delta, 2 * base].
* (Note that currently SC_NGROUP is always 4, so the "..." is empty in
* practice.)
* Note that the last size class in the group is the next power of two (after
* base), so that we've set up the induction correctly for the next group's
* selection of delta.
*
* Now, let's start considering the first few size classes. Two extra constants
* come into play here: LG_QUANTUM and SC_LG_TINY_MIN. LG_QUANTUM ensures
* correct platform alignment; all objects of size (1 << LG_QUANTUM) or larger
* are at least (1 << LG_QUANTUM) aligned; this can be used to ensure that we
* never return improperly aligned memory, by making (1 << LG_QUANTUM) equal the
* highest required alignment of a platform. For allocation sizes smaller than
* (1 << LG_QUANTUM) though, we can be more relaxed (since we don't support
* platforms with types with alignment larger than their size). To allow such
* allocations (without wasting space unnecessarily), we introduce tiny size
* classes; one per power of two, up until we hit the quantum size. There are
* therefore LG_QUANTUM - SC_LG_TINY_MIN such size classes.
*
* Next, we have a size class of size (1 << LG_QUANTUM). This can't be the
* start of a group in the sense we described above (covering a power of two
* range) since, if we divided into it to pick a value of delta, we'd get a
* delta smaller than (1 << LG_QUANTUM) for sizes >= (1 << LG_QUANTUM), which
* is against the rules.
*
* The first base we can divide by SC_NGROUP while still being at least
* (1 << LG_QUANTUM) is SC_NGROUP * (1 << LG_QUANTUM). We can get there by
* having SC_NGROUP size classes, spaced (1 << LG_QUANTUM) apart. These size
* classes are:
* 1 * (1 << LG_QUANTUM)
* 2 * (1 << LG_QUANTUM)
* 3 * (1 << LG_QUANTUM)
* ... (although, as above, this "..." is empty in practice)
* SC_NGROUP * (1 << LG_QUANTUM).
*
* There are SC_NGROUP of these size classes, so we can regard it as a sort of
* pseudo-group, even though it spans multiple powers of 2, is divided
* differently, and both starts and ends on a power of 2 (as opposed to just
* ending). SC_NGROUP is itself a power of two, so the first group after the
* pseudo-group has the power-of-two base SC_NGROUP * (1 << LG_QUANTUM), for a
* lg_base of LG_QUANTUM + SC_LG_NGROUP. We can divide this base into SC_NGROUP
* sizes without violating our LG_QUANTUM requirements, so we can safely set
* lg_delta = lg_base - SC_LG_GROUP (== LG_QUANTUM).
*
* So, in order, the size classes are:
*
* Tiny size classes:
* - Count: LG_QUANTUM - SC_LG_TINY_MIN.
* - Sizes:
* 1 << SC_LG_TINY_MIN
* 1 << (SC_LG_TINY_MIN + 1)
* 1 << (SC_LG_TINY_MIN + 2)
* ...
* 1 << (LG_QUANTUM - 1)
*
* Initial pseudo-group:
* - Count: SC_NGROUP
* - Sizes:
* 1 * (1 << LG_QUANTUM)
* 2 * (1 << LG_QUANTUM)
* 3 * (1 << LG_QUANTUM)
* ...
* SC_NGROUP * (1 << LG_QUANTUM)
*
* Regular group 0:
* - Count: SC_NGROUP
* - Sizes:
* (relative to lg_base of LG_QUANTUM + SC_LG_NGROUP and lg_delta of
* lg_base - SC_LG_NGROUP)
* (1 << lg_base) + 1 * (1 << lg_delta)
* (1 << lg_base) + 2 * (1 << lg_delta)
* (1 << lg_base) + 3 * (1 << lg_delta)
* ...
* (1 << lg_base) + SC_NGROUP * (1 << lg_delta) [ == (1 << (lg_base + 1)) ]
*
* Regular group 1:
* - Count: SC_NGROUP
* - Sizes:
* (relative to lg_base of LG_QUANTUM + SC_LG_NGROUP + 1 and lg_delta of
* lg_base - SC_LG_NGROUP)
* (1 << lg_base) + 1 * (1 << lg_delta)
* (1 << lg_base) + 2 * (1 << lg_delta)
* (1 << lg_base) + 3 * (1 << lg_delta)
* ...
* (1 << lg_base) + SC_NGROUP * (1 << lg_delta) [ == (1 << (lg_base + 1)) ]
*
* ...
*
* Regular group N:
* - Count: SC_NGROUP
* - Sizes:
* (relative to lg_base of LG_QUANTUM + SC_LG_NGROUP + N and lg_delta of
* lg_base - SC_LG_NGROUP)
* (1 << lg_base) + 1 * (1 << lg_delta)
* (1 << lg_base) + 2 * (1 << lg_delta)
* (1 << lg_base) + 3 * (1 << lg_delta)
* ...
* (1 << lg_base) + SC_NGROUP * (1 << lg_delta) [ == (1 << (lg_base + 1)) ]
*
*
* Representation of metadata:
* To make the math easy, we'll mostly work in lg quantities. We record lg_base,
* lg_delta, and ndelta (i.e. number of deltas above the base) on a
* per-size-class basis, and maintain the invariant that, across all size
* classes, size == (1 << lg_base) + ndelta * (1 << lg_delta).
*
* For regular groups (i.e. those with lg_base >= LG_QUANTUM + SC_LG_NGROUP),
* lg_delta is lg_base - SC_LG_NGROUP, and ndelta goes from 1 to SC_NGROUP.
*
* For the initial tiny size classes (if any), lg_base is lg(size class size).
* lg_delta is lg_base for the first size class, and lg_base - 1 for all
* subsequent ones. ndelta is always 0.
*
* For the pseudo-group, if there are no tiny size classes, then we set
* lg_base == LG_QUANTUM, lg_delta == LG_QUANTUM, and have ndelta range from 0
* to SC_NGROUP - 1. (Note that delta == base, so base + (SC_NGROUP - 1) * delta
* is just SC_NGROUP * base, or (1 << (SC_LG_NGROUP + LG_QUANTUM)), so we do
* indeed get a power of two that way). If there *are* tiny size classes, then
* the first size class needs to have lg_delta relative to the largest tiny size
* class. We therefore set lg_base == LG_QUANTUM - 1,
* lg_delta == LG_QUANTUM - 1, and ndelta == 1, keeping the rest of the
* pseudo-group the same.
*
*
* Other terminology:
* "Small" size classes mean those that are allocated out of bins, which is the
* same as those that are slab allocated.
* "Large" size classes are those that are not small. The cutoff for counting as
* large is page size * group size.
*/
/*
* Size class N + (1 << SC_LG_NGROUP) twice the size of size class N.
*/
#define SC_LG_NGROUP 2
#define SC_LG_TINY_MIN 3
#if SC_LG_TINY_MIN == 0
/* The div module doesn't support division by 1, which this would require. */
#error "Unsupported LG_TINY_MIN"
#endif
/*
* The definitions below are all determined by the above settings and system
* characteristics.
*/
#define SC_NGROUP (1ULL << SC_LG_NGROUP)
#define SC_PTR_BITS ((1ULL << LG_SIZEOF_PTR) * 8)
#define SC_NTINY (LG_QUANTUM - SC_LG_TINY_MIN)
#define SC_LG_TINY_MAXCLASS (LG_QUANTUM > SC_LG_TINY_MIN ? LG_QUANTUM - 1 : -1)
#define SC_NPSEUDO SC_NGROUP
#define SC_LG_FIRST_REGULAR_BASE (LG_QUANTUM + SC_LG_NGROUP)
/*
* We cap allocations to be less than 2 ** (ptr_bits - 1), so the highest base
* we need is 2 ** (ptr_bits - 2). (This also means that the last group is 1
* size class shorter than the others).
* We could probably save some space in arenas by capping this at LG_VADDR size.
*/
#define SC_LG_BASE_MAX (SC_PTR_BITS - 2)
#define SC_NREGULAR (SC_NGROUP * \
(SC_LG_BASE_MAX - SC_LG_FIRST_REGULAR_BASE + 1) - 1)
#define SC_NSIZES (SC_NTINY + SC_NPSEUDO + SC_NREGULAR)
/* The number of size classes that are a multiple of the page size. */
#define SC_NPSIZES ( \
/* Start with all the size classes. */
\
SC_NSIZES \
/* Subtract out those groups with too small a base. */
\
- (LG_PAGE - 1 - SC_LG_FIRST_REGULAR_BASE) * SC_NGROUP \
/* And the pseudo-group. */
\
- SC_NPSEUDO \
/* And the tiny group. */
\
- SC_NTINY \
/* Sizes where ndelta*delta is not a multiple of the page size. */
\
- (SC_LG_NGROUP * SC_NGROUP))
/*
* Note that the last line is computed as the sum of the second column in the
* following table:
* lg(base) | count of sizes to exclude
* ------------------------------|-----------------------------
* LG_PAGE - 1 | SC_NGROUP - 1
* LG_PAGE | SC_NGROUP - 1
* LG_PAGE + 1 | SC_NGROUP - 2
* LG_PAGE + 2 | SC_NGROUP - 4
* ... | ...
* LG_PAGE + (SC_LG_NGROUP - 1) | SC_NGROUP - (SC_NGROUP / 2)
*/
/*
* We declare a size class is binnable if size < page size * group. Or, in other
* words, lg(size) < lg(page size) + lg(group size).
*/
#define SC_NBINS ( \
/* Sub-regular size classes. */
\
SC_NTINY + SC_NPSEUDO \
/* Groups with lg_regular_min_base <= lg_base <= lg_base_max */
\
+ SC_NGROUP * (LG_PAGE + SC_LG_NGROUP - SC_LG_FIRST_REGULAR_BASE) \
/* Last SC of the last group hits the bound exactly; exclude it. */
\
- 1)
/*
* The size2index_tab lookup table uses uint8_t to encode each bin index, so we
* cannot support more than 256 small size classes.
*/
#if (SC_NBINS > 256)
# error "Too many small size classes"
#endif
/* The largest size class in the lookup table. */
#define SC_LOOKUP_MAXCLASS ((size_t)1 << 12)
/* Internal, only used for the definition of SC_SMALL_MAXCLASS. */
#define SC_SMALL_MAX_BASE ((size_t)1 << (LG_PAGE + SC_LG_NGROUP - 1))
#define SC_SMALL_MAX_DELTA ((size_t)1 << (LG_PAGE - 1))
/* The largest size class allocated out of a slab. */
#define SC_SMALL_MAXCLASS (SC_SMALL_MAX_BASE \
+ (SC_NGROUP - 1) * SC_SMALL_MAX_DELTA)
/* The smallest size class not allocated out of a slab. */
#define SC_LARGE_MINCLASS ((size_t)1ULL << (LG_PAGE + SC_LG_NGROUP))
#define SC_LG_LARGE_MINCLASS (LG_PAGE + SC_LG_NGROUP)
/* Internal; only used for the definition of SC_LARGE_MAXCLASS. */
#define SC_MAX_BASE ((size_t)1 << (SC_PTR_BITS - 2))
#define SC_MAX_DELTA ((size_t)1 << (SC_PTR_BITS - 2 - SC_LG_NGROUP))
/* The largest size class supported. */
#define SC_LARGE_MAXCLASS (SC_MAX_BASE + (SC_NGROUP - 1) * SC_MAX_DELTA)
typedef
struct
sc_s
sc_t
;
struct
sc_s
{
/* Size class index, or -1 if not a valid size class. */
int
index
;
/* Lg group base size (no deltas added). */
int
lg_base
;
/* Lg delta to previous size class. */
int
lg_delta
;
/* Delta multiplier. size == 1<<lg_base + ndelta<<lg_delta */
int
ndelta
;
/*
* True if the size class is a multiple of the page size, false
* otherwise.
*/
bool
psz
;
/*
* True if the size class is a small, bin, size class. False otherwise.
*/
bool
bin
;
/* The slab page count if a small bin size class, 0 otherwise. */
int
pgs
;
/* Same as lg_delta if a lookup table size class, 0 otherwise. */
int
lg_delta_lookup
;
};
typedef
struct
sc_data_s
sc_data_t
;
struct
sc_data_s
{
/* Number of tiny size classes. */
unsigned
ntiny
;
/* Number of bins supported by the lookup table. */
int
nlbins
;
/* Number of small size class bins. */
int
nbins
;
/* Number of size classes. */
int
nsizes
;
/* Number of bits required to store NSIZES. */
int
lg_ceil_nsizes
;
/* Number of size classes that are a multiple of (1U << LG_PAGE). */
unsigned
npsizes
;
/* Lg of maximum tiny size class (or -1, if none). */
int
lg_tiny_maxclass
;
/* Maximum size class included in lookup table. */
size_t
lookup_maxclass
;
/* Maximum small size class. */
size_t
small_maxclass
;
/* Lg of minimum large size class. */
int
lg_large_minclass
;
/* The minimum large size class. */
size_t
large_minclass
;
/* Maximum (large) size class. */
size_t
large_maxclass
;
/* True if the sc_data_t has been initialized (for debugging only). */
bool
initialized
;
sc_t
sc
[
SC_NSIZES
];
};
void
sc_data_init
(
sc_data_t
*
data
);
/*
* Updates slab sizes in [begin, end] to be pgs pages in length, if possible.
* Otherwise, does its best to accomodate the request.
*/
void
sc_data_update_slab_size
(
sc_data_t
*
data
,
size_t
begin
,
size_t
end
,
int
pgs
);
void
sc_boot
(
sc_data_t
*
data
);
#endif
/* JEMALLOC_INTERNAL_SC_H */
Prev
1
2
3
4
5
6
7
8
9
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment