Commit 4a884343 authored by Yoav Steinberg's avatar Yoav Steinberg
Browse files

Delete old jemalloc before pulling in subtree.

parent 7ff7536e
#ifndef JEMALLOC_INTERNAL_PAGES_EXTERNS_H
#define JEMALLOC_INTERNAL_PAGES_EXTERNS_H
/* Page size. LG_PAGE is determined by the configure script. */
#ifdef PAGE_MASK
# undef PAGE_MASK
#endif
#define PAGE ((size_t)(1U << LG_PAGE))
#define PAGE_MASK ((size_t)(PAGE - 1))
/* Return the page base address for the page containing address a. */
#define PAGE_ADDR2BASE(a) \
((void *)((uintptr_t)(a) & ~PAGE_MASK))
/* Return the smallest pagesize multiple that is >= s. */
#define PAGE_CEILING(s) \
(((s) + PAGE_MASK) & ~PAGE_MASK)
/* Huge page size. LG_HUGEPAGE is determined by the configure script. */
#define HUGEPAGE ((size_t)(1U << LG_HUGEPAGE))
#define HUGEPAGE_MASK ((size_t)(HUGEPAGE - 1))
/* Return the huge page base address for the huge page containing address a. */
#define HUGEPAGE_ADDR2BASE(a) \
((void *)((uintptr_t)(a) & ~HUGEPAGE_MASK))
/* Return the smallest pagesize multiple that is >= s. */
#define HUGEPAGE_CEILING(s) \
(((s) + HUGEPAGE_MASK) & ~HUGEPAGE_MASK)
/* PAGES_CAN_PURGE_LAZY is defined if lazy purging is supported. */
#if defined(_WIN32) || defined(JEMALLOC_PURGE_MADVISE_FREE)
# define PAGES_CAN_PURGE_LAZY
#endif
/*
* PAGES_CAN_PURGE_FORCED is defined if forced purging is supported.
*
* The only supported way to hard-purge on Windows is to decommit and then
* re-commit, but doing so is racy, and if re-commit fails it's a pain to
* propagate the "poisoned" memory state. Since we typically decommit as the
* next step after purging on Windows anyway, there's no point in adding such
* complexity.
*/
#if !defined(_WIN32) && ((defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)) || \
defined(JEMALLOC_MAPS_COALESCE))
# define PAGES_CAN_PURGE_FORCED
#endif
static const bool pages_can_purge_lazy =
#ifdef PAGES_CAN_PURGE_LAZY
true
#else
false
#endif
;
static const bool pages_can_purge_forced =
#ifdef PAGES_CAN_PURGE_FORCED
true
#else
false
#endif
;
typedef enum {
thp_mode_default = 0, /* Do not change hugepage settings. */
thp_mode_always = 1, /* Always set MADV_HUGEPAGE. */
thp_mode_never = 2, /* Always set MADV_NOHUGEPAGE. */
thp_mode_names_limit = 3, /* Used for option processing. */
thp_mode_not_supported = 3 /* No THP support detected. */
} thp_mode_t;
#define THP_MODE_DEFAULT thp_mode_default
extern thp_mode_t opt_thp;
extern thp_mode_t init_system_thp_mode; /* Initial system wide state. */
extern const char *thp_mode_names[];
void *pages_map(void *addr, size_t size, size_t alignment, bool *commit);
void pages_unmap(void *addr, size_t size);
bool pages_commit(void *addr, size_t size);
bool pages_decommit(void *addr, size_t size);
bool pages_purge_lazy(void *addr, size_t size);
bool pages_purge_forced(void *addr, size_t size);
bool pages_huge(void *addr, size_t size);
bool pages_nohuge(void *addr, size_t size);
bool pages_dontdump(void *addr, size_t size);
bool pages_dodump(void *addr, size_t size);
bool pages_boot(void);
void pages_set_thp_state (void *ptr, size_t size);
#endif /* JEMALLOC_INTERNAL_PAGES_EXTERNS_H */
/*
* A Pairing Heap implementation.
*
* "The Pairing Heap: A New Form of Self-Adjusting Heap"
* https://www.cs.cmu.edu/~sleator/papers/pairing-heaps.pdf
*
* With auxiliary twopass list, described in a follow on paper.
*
* "Pairing Heaps: Experiments and Analysis"
* http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf
*
*******************************************************************************
*/
#ifndef PH_H_
#define PH_H_
/* Node structure. */
#define phn(a_type) \
struct { \
a_type *phn_prev; \
a_type *phn_next; \
a_type *phn_lchild; \
}
/* Root structure. */
#define ph(a_type) \
struct { \
a_type *ph_root; \
}
/* Internal utility macros. */
#define phn_lchild_get(a_type, a_field, a_phn) \
(a_phn->a_field.phn_lchild)
#define phn_lchild_set(a_type, a_field, a_phn, a_lchild) do { \
a_phn->a_field.phn_lchild = a_lchild; \
} while (0)
#define phn_next_get(a_type, a_field, a_phn) \
(a_phn->a_field.phn_next)
#define phn_prev_set(a_type, a_field, a_phn, a_prev) do { \
a_phn->a_field.phn_prev = a_prev; \
} while (0)
#define phn_prev_get(a_type, a_field, a_phn) \
(a_phn->a_field.phn_prev)
#define phn_next_set(a_type, a_field, a_phn, a_next) do { \
a_phn->a_field.phn_next = a_next; \
} while (0)
#define phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do { \
a_type *phn0child; \
\
assert(a_phn0 != NULL); \
assert(a_phn1 != NULL); \
assert(a_cmp(a_phn0, a_phn1) <= 0); \
\
phn_prev_set(a_type, a_field, a_phn1, a_phn0); \
phn0child = phn_lchild_get(a_type, a_field, a_phn0); \
phn_next_set(a_type, a_field, a_phn1, phn0child); \
if (phn0child != NULL) { \
phn_prev_set(a_type, a_field, phn0child, a_phn1); \
} \
phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \
} while (0)
#define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \
if (a_phn0 == NULL) { \
r_phn = a_phn1; \
} else if (a_phn1 == NULL) { \
r_phn = a_phn0; \
} else if (a_cmp(a_phn0, a_phn1) < 0) { \
phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \
a_cmp); \
r_phn = a_phn0; \
} else { \
phn_merge_ordered(a_type, a_field, a_phn1, a_phn0, \
a_cmp); \
r_phn = a_phn1; \
} \
} while (0)
#define ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do { \
a_type *head = NULL; \
a_type *tail = NULL; \
a_type *phn0 = a_phn; \
a_type *phn1 = phn_next_get(a_type, a_field, phn0); \
\
/* \
* Multipass merge, wherein the first two elements of a FIFO \
* are repeatedly merged, and each result is appended to the \
* singly linked FIFO, until the FIFO contains only a single \
* element. We start with a sibling list but no reference to \
* its tail, so we do a single pass over the sibling list to \
* populate the FIFO. \
*/ \
if (phn1 != NULL) { \
a_type *phnrest = phn_next_get(a_type, a_field, phn1); \
if (phnrest != NULL) { \
phn_prev_set(a_type, a_field, phnrest, NULL); \
} \
phn_prev_set(a_type, a_field, phn0, NULL); \
phn_next_set(a_type, a_field, phn0, NULL); \
phn_prev_set(a_type, a_field, phn1, NULL); \
phn_next_set(a_type, a_field, phn1, NULL); \
phn_merge(a_type, a_field, phn0, phn1, a_cmp, phn0); \
head = tail = phn0; \
phn0 = phnrest; \
while (phn0 != NULL) { \
phn1 = phn_next_get(a_type, a_field, phn0); \
if (phn1 != NULL) { \
phnrest = phn_next_get(a_type, a_field, \
phn1); \
if (phnrest != NULL) { \
phn_prev_set(a_type, a_field, \
phnrest, NULL); \
} \
phn_prev_set(a_type, a_field, phn0, \
NULL); \
phn_next_set(a_type, a_field, phn0, \
NULL); \
phn_prev_set(a_type, a_field, phn1, \
NULL); \
phn_next_set(a_type, a_field, phn1, \
NULL); \
phn_merge(a_type, a_field, phn0, phn1, \
a_cmp, phn0); \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
phn0 = phnrest; \
} else { \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
phn0 = NULL; \
} \
} \
phn0 = head; \
phn1 = phn_next_get(a_type, a_field, phn0); \
if (phn1 != NULL) { \
while (true) { \
head = phn_next_get(a_type, a_field, \
phn1); \
assert(phn_prev_get(a_type, a_field, \
phn0) == NULL); \
phn_next_set(a_type, a_field, phn0, \
NULL); \
assert(phn_prev_get(a_type, a_field, \
phn1) == NULL); \
phn_next_set(a_type, a_field, phn1, \
NULL); \
phn_merge(a_type, a_field, phn0, phn1, \
a_cmp, phn0); \
if (head == NULL) { \
break; \
} \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
phn0 = head; \
phn1 = phn_next_get(a_type, a_field, \
phn0); \
} \
} \
} \
r_phn = phn0; \
} while (0)
#define ph_merge_aux(a_type, a_field, a_ph, a_cmp) do { \
a_type *phn = phn_next_get(a_type, a_field, a_ph->ph_root); \
if (phn != NULL) { \
phn_prev_set(a_type, a_field, a_ph->ph_root, NULL); \
phn_next_set(a_type, a_field, a_ph->ph_root, NULL); \
phn_prev_set(a_type, a_field, phn, NULL); \
ph_merge_siblings(a_type, a_field, phn, a_cmp, phn); \
assert(phn_next_get(a_type, a_field, phn) == NULL); \
phn_merge(a_type, a_field, a_ph->ph_root, phn, a_cmp, \
a_ph->ph_root); \
} \
} while (0)
#define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \
a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \
if (lchild == NULL) { \
r_phn = NULL; \
} else { \
ph_merge_siblings(a_type, a_field, lchild, a_cmp, \
r_phn); \
} \
} while (0)
/*
* The ph_proto() macro generates function prototypes that correspond to the
* functions generated by an equivalently parameterized call to ph_gen().
*/
#define ph_proto(a_attr, a_prefix, a_ph_type, a_type) \
a_attr void a_prefix##new(a_ph_type *ph); \
a_attr bool a_prefix##empty(a_ph_type *ph); \
a_attr a_type *a_prefix##first(a_ph_type *ph); \
a_attr a_type *a_prefix##any(a_ph_type *ph); \
a_attr void a_prefix##insert(a_ph_type *ph, a_type *phn); \
a_attr a_type *a_prefix##remove_first(a_ph_type *ph); \
a_attr a_type *a_prefix##remove_any(a_ph_type *ph); \
a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn);
/*
* The ph_gen() macro generates a type-specific pairing heap implementation,
* based on the above cpp macros.
*/
#define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \
a_attr void \
a_prefix##new(a_ph_type *ph) { \
memset(ph, 0, sizeof(ph(a_type))); \
} \
a_attr bool \
a_prefix##empty(a_ph_type *ph) { \
return (ph->ph_root == NULL); \
} \
a_attr a_type * \
a_prefix##first(a_ph_type *ph) { \
if (ph->ph_root == NULL) { \
return NULL; \
} \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
return ph->ph_root; \
} \
a_attr a_type * \
a_prefix##any(a_ph_type *ph) { \
if (ph->ph_root == NULL) { \
return NULL; \
} \
a_type *aux = phn_next_get(a_type, a_field, ph->ph_root); \
if (aux != NULL) { \
return aux; \
} \
return ph->ph_root; \
} \
a_attr void \
a_prefix##insert(a_ph_type *ph, a_type *phn) { \
memset(&phn->a_field, 0, sizeof(phn(a_type))); \
\
/* \
* Treat the root as an aux list during insertion, and lazily \
* merge during a_prefix##remove_first(). For elements that \
* are inserted, then removed via a_prefix##remove() before the \
* aux list is ever processed, this makes insert/remove \
* constant-time, whereas eager merging would make insert \
* O(log n). \
*/ \
if (ph->ph_root == NULL) { \
ph->ph_root = phn; \
} else { \
phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \
a_field, ph->ph_root)); \
if (phn_next_get(a_type, a_field, ph->ph_root) != \
NULL) { \
phn_prev_set(a_type, a_field, \
phn_next_get(a_type, a_field, ph->ph_root), \
phn); \
} \
phn_prev_set(a_type, a_field, phn, ph->ph_root); \
phn_next_set(a_type, a_field, ph->ph_root, phn); \
} \
} \
a_attr a_type * \
a_prefix##remove_first(a_ph_type *ph) { \
a_type *ret; \
\
if (ph->ph_root == NULL) { \
return NULL; \
} \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
\
ret = ph->ph_root; \
\
ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \
ph->ph_root); \
\
return ret; \
} \
a_attr a_type * \
a_prefix##remove_any(a_ph_type *ph) { \
/* \
* Remove the most recently inserted aux list element, or the \
* root if the aux list is empty. This has the effect of \
* behaving as a LIFO (and insertion/removal is therefore \
* constant-time) if a_prefix##[remove_]first() are never \
* called. \
*/ \
if (ph->ph_root == NULL) { \
return NULL; \
} \
a_type *ret = phn_next_get(a_type, a_field, ph->ph_root); \
if (ret != NULL) { \
a_type *aux = phn_next_get(a_type, a_field, ret); \
phn_next_set(a_type, a_field, ph->ph_root, aux); \
if (aux != NULL) { \
phn_prev_set(a_type, a_field, aux, \
ph->ph_root); \
} \
return ret; \
} \
ret = ph->ph_root; \
ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \
ph->ph_root); \
return ret; \
} \
a_attr void \
a_prefix##remove(a_ph_type *ph, a_type *phn) { \
a_type *replace, *parent; \
\
if (ph->ph_root == phn) { \
/* \
* We can delete from aux list without merging it, but \
* we need to merge if we are dealing with the root \
* node and it has children. \
*/ \
if (phn_lchild_get(a_type, a_field, phn) == NULL) { \
ph->ph_root = phn_next_get(a_type, a_field, \
phn); \
if (ph->ph_root != NULL) { \
phn_prev_set(a_type, a_field, \
ph->ph_root, NULL); \
} \
return; \
} \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
if (ph->ph_root == phn) { \
ph_merge_children(a_type, a_field, ph->ph_root, \
a_cmp, ph->ph_root); \
return; \
} \
} \
\
/* Get parent (if phn is leftmost child) before mutating. */ \
if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \
if (phn_lchild_get(a_type, a_field, parent) != phn) { \
parent = NULL; \
} \
} \
/* Find a possible replacement node, and link to parent. */ \
ph_merge_children(a_type, a_field, phn, a_cmp, replace); \
/* Set next/prev for sibling linked list. */ \
if (replace != NULL) { \
if (parent != NULL) { \
phn_prev_set(a_type, a_field, replace, parent); \
phn_lchild_set(a_type, a_field, parent, \
replace); \
} else { \
phn_prev_set(a_type, a_field, replace, \
phn_prev_get(a_type, a_field, phn)); \
if (phn_prev_get(a_type, a_field, phn) != \
NULL) { \
phn_next_set(a_type, a_field, \
phn_prev_get(a_type, a_field, phn), \
replace); \
} \
} \
phn_next_set(a_type, a_field, replace, \
phn_next_get(a_type, a_field, phn)); \
if (phn_next_get(a_type, a_field, phn) != NULL) { \
phn_prev_set(a_type, a_field, \
phn_next_get(a_type, a_field, phn), \
replace); \
} \
} else { \
if (parent != NULL) { \
a_type *next = phn_next_get(a_type, a_field, \
phn); \
phn_lchild_set(a_type, a_field, parent, next); \
if (next != NULL) { \
phn_prev_set(a_type, a_field, next, \
parent); \
} \
} else { \
assert(phn_prev_get(a_type, a_field, phn) != \
NULL); \
phn_next_set(a_type, a_field, \
phn_prev_get(a_type, a_field, phn), \
phn_next_get(a_type, a_field, phn)); \
} \
if (phn_next_get(a_type, a_field, phn) != NULL) { \
phn_prev_set(a_type, a_field, \
phn_next_get(a_type, a_field, phn), \
phn_prev_get(a_type, a_field, phn)); \
} \
} \
}
#endif /* PH_H_ */
#!/bin/sh
for symbol in `cat "$@"` ; do
echo "#define ${symbol} JEMALLOC_N(${symbol})"
done
#!/bin/sh
#
# Generate private_symbols[_jet].awk.
#
# Usage: private_symbols.sh <sym_prefix> <sym>*
#
# <sym_prefix> is typically "" or "_".
sym_prefix=$1
shift
cat <<EOF
#!/usr/bin/env awk -f
BEGIN {
sym_prefix = "${sym_prefix}"
split("\\
EOF
for public_sym in "$@" ; do
cat <<EOF
${sym_prefix}${public_sym} \\
EOF
done
cat <<"EOF"
", exported_symbol_names)
# Store exported symbol names as keys in exported_symbols.
for (i in exported_symbol_names) {
exported_symbols[exported_symbol_names[i]] = 1
}
}
# Process 'nm -a <c_source.o>' output.
#
# Handle lines like:
# 0000000000000008 D opt_junk
# 0000000000007574 T malloc_initialized
(NF == 3 && $2 ~ /^[ABCDGRSTVW]$/ && !($3 in exported_symbols) && $3 ~ /^[A-Za-z0-9_]+$/) {
print substr($3, 1+length(sym_prefix), length($3)-length(sym_prefix))
}
# Process 'dumpbin /SYMBOLS <c_source.obj>' output.
#
# Handle lines like:
# 353 00008098 SECT4 notype External | opt_junk
# 3F1 00000000 SECT7 notype () External | malloc_initialized
($3 ~ /^SECT[0-9]+/ && $(NF-2) == "External" && !($NF in exported_symbols)) {
print $NF
}
EOF
#ifndef JEMALLOC_INTERNAL_PRNG_H
#define JEMALLOC_INTERNAL_PRNG_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bit_util.h"
/*
* Simple linear congruential pseudo-random number generator:
*
* prng(y) = (a*x + c) % m
*
* where the following constants ensure maximal period:
*
* a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4.
* c == Odd number (relatively prime to 2^n).
* m == 2^32
*
* See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints.
*
* This choice of m has the disadvantage that the quality of the bits is
* proportional to bit position. For example, the lowest bit has a cycle of 2,
* the next has a cycle of 4, etc. For this reason, we prefer to use the upper
* bits.
*/
/******************************************************************************/
/* INTERNAL DEFINITIONS -- IGNORE */
/******************************************************************************/
#define PRNG_A_32 UINT32_C(1103515241)
#define PRNG_C_32 UINT32_C(12347)
#define PRNG_A_64 UINT64_C(6364136223846793005)
#define PRNG_C_64 UINT64_C(1442695040888963407)
JEMALLOC_ALWAYS_INLINE uint32_t
prng_state_next_u32(uint32_t state) {
return (state * PRNG_A_32) + PRNG_C_32;
}
JEMALLOC_ALWAYS_INLINE uint64_t
prng_state_next_u64(uint64_t state) {
return (state * PRNG_A_64) + PRNG_C_64;
}
JEMALLOC_ALWAYS_INLINE size_t
prng_state_next_zu(size_t state) {
#if LG_SIZEOF_PTR == 2
return (state * PRNG_A_32) + PRNG_C_32;
#elif LG_SIZEOF_PTR == 3
return (state * PRNG_A_64) + PRNG_C_64;
#else
#error Unsupported pointer size
#endif
}
/******************************************************************************/
/* BEGIN PUBLIC API */
/******************************************************************************/
/*
* The prng_lg_range functions give a uniform int in the half-open range [0,
* 2**lg_range). If atomic is true, they do so safely from multiple threads.
* Multithreaded 64-bit prngs aren't supported.
*/
JEMALLOC_ALWAYS_INLINE uint32_t
prng_lg_range_u32(atomic_u32_t *state, unsigned lg_range, bool atomic) {
uint32_t ret, state0, state1;
assert(lg_range > 0);
assert(lg_range <= 32);
state0 = atomic_load_u32(state, ATOMIC_RELAXED);
if (atomic) {
do {
state1 = prng_state_next_u32(state0);
} while (!atomic_compare_exchange_weak_u32(state, &state0,
state1, ATOMIC_RELAXED, ATOMIC_RELAXED));
} else {
state1 = prng_state_next_u32(state0);
atomic_store_u32(state, state1, ATOMIC_RELAXED);
}
ret = state1 >> (32 - lg_range);
return ret;
}
JEMALLOC_ALWAYS_INLINE uint64_t
prng_lg_range_u64(uint64_t *state, unsigned lg_range) {
uint64_t ret, state1;
assert(lg_range > 0);
assert(lg_range <= 64);
state1 = prng_state_next_u64(*state);
*state = state1;
ret = state1 >> (64 - lg_range);
return ret;
}
JEMALLOC_ALWAYS_INLINE size_t
prng_lg_range_zu(atomic_zu_t *state, unsigned lg_range, bool atomic) {
size_t ret, state0, state1;
assert(lg_range > 0);
assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR));
state0 = atomic_load_zu(state, ATOMIC_RELAXED);
if (atomic) {
do {
state1 = prng_state_next_zu(state0);
} while (atomic_compare_exchange_weak_zu(state, &state0,
state1, ATOMIC_RELAXED, ATOMIC_RELAXED));
} else {
state1 = prng_state_next_zu(state0);
atomic_store_zu(state, state1, ATOMIC_RELAXED);
}
ret = state1 >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range);
return ret;
}
/*
* The prng_range functions behave like the prng_lg_range, but return a result
* in [0, range) instead of [0, 2**lg_range).
*/
JEMALLOC_ALWAYS_INLINE uint32_t
prng_range_u32(atomic_u32_t *state, uint32_t range, bool atomic) {
uint32_t ret;
unsigned lg_range;
assert(range > 1);
/* Compute the ceiling of lg(range). */
lg_range = ffs_u32(pow2_ceil_u32(range)) - 1;
/* Generate a result in [0..range) via repeated trial. */
do {
ret = prng_lg_range_u32(state, lg_range, atomic);
} while (ret >= range);
return ret;
}
JEMALLOC_ALWAYS_INLINE uint64_t
prng_range_u64(uint64_t *state, uint64_t range) {
uint64_t ret;
unsigned lg_range;
assert(range > 1);
/* Compute the ceiling of lg(range). */
lg_range = ffs_u64(pow2_ceil_u64(range)) - 1;
/* Generate a result in [0..range) via repeated trial. */
do {
ret = prng_lg_range_u64(state, lg_range);
} while (ret >= range);
return ret;
}
JEMALLOC_ALWAYS_INLINE size_t
prng_range_zu(atomic_zu_t *state, size_t range, bool atomic) {
size_t ret;
unsigned lg_range;
assert(range > 1);
/* Compute the ceiling of lg(range). */
lg_range = ffs_u64(pow2_ceil_u64(range)) - 1;
/* Generate a result in [0..range) via repeated trial. */
do {
ret = prng_lg_range_zu(state, lg_range, atomic);
} while (ret >= range);
return ret;
}
#endif /* JEMALLOC_INTERNAL_PRNG_H */
#ifndef JEMALLOC_INTERNAL_PROF_EXTERNS_H
#define JEMALLOC_INTERNAL_PROF_EXTERNS_H
#include "jemalloc/internal/mutex.h"
extern malloc_mutex_t bt2gctx_mtx;
extern bool opt_prof;
extern bool opt_prof_active;
extern bool opt_prof_thread_active_init;
extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */
extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */
extern bool opt_prof_gdump; /* High-water memory dumping. */
extern bool opt_prof_final; /* Final profile dumping. */
extern bool opt_prof_leak; /* Dump leak summary at exit. */
extern bool opt_prof_accum; /* Report cumulative bytes. */
extern char opt_prof_prefix[
/* Minimize memory bloat for non-prof builds. */
#ifdef JEMALLOC_PROF
PATH_MAX +
#endif
1];
/* Accessed via prof_active_[gs]et{_unlocked,}(). */
extern bool prof_active;
/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */
extern bool prof_gdump_val;
/*
* Profile dump interval, measured in bytes allocated. Each arena triggers a
* profile dump when it reaches this threshold. The effect is that the
* interval between profile dumps averages prof_interval, though the actual
* interval between dumps will tend to be sporadic, and the interval will be a
* maximum of approximately (prof_interval * narenas).
*/
extern uint64_t prof_interval;
/*
* Initialized as opt_lg_prof_sample, and potentially modified during profiling
* resets.
*/
extern size_t lg_prof_sample;
void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated);
void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
prof_tctx_t *tctx);
void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx);
void bt_init(prof_bt_t *bt, void **vec);
void prof_backtrace(prof_bt_t *bt);
prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt);
#ifdef JEMALLOC_JET
size_t prof_tdata_count(void);
size_t prof_bt_count(void);
#endif
typedef int (prof_dump_open_t)(bool, const char *);
extern prof_dump_open_t *JET_MUTABLE prof_dump_open;
typedef bool (prof_dump_header_t)(tsdn_t *, bool, const prof_cnt_t *);
extern prof_dump_header_t *JET_MUTABLE prof_dump_header;
#ifdef JEMALLOC_JET
void prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs,
uint64_t *accumbytes);
#endif
bool prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum);
void prof_idump(tsdn_t *tsdn);
bool prof_mdump(tsd_t *tsd, const char *filename);
void prof_gdump(tsdn_t *tsdn);
prof_tdata_t *prof_tdata_init(tsd_t *tsd);
prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata);
void prof_reset(tsd_t *tsd, size_t lg_sample);
void prof_tdata_cleanup(tsd_t *tsd);
bool prof_active_get(tsdn_t *tsdn);
bool prof_active_set(tsdn_t *tsdn, bool active);
const char *prof_thread_name_get(tsd_t *tsd);
int prof_thread_name_set(tsd_t *tsd, const char *thread_name);
bool prof_thread_active_get(tsd_t *tsd);
bool prof_thread_active_set(tsd_t *tsd, bool active);
bool prof_thread_active_init_get(tsdn_t *tsdn);
bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init);
bool prof_gdump_get(tsdn_t *tsdn);
bool prof_gdump_set(tsdn_t *tsdn, bool active);
void prof_boot0(void);
void prof_boot1(void);
bool prof_boot2(tsd_t *tsd);
void prof_prefork0(tsdn_t *tsdn);
void prof_prefork1(tsdn_t *tsdn);
void prof_postfork_parent(tsdn_t *tsdn);
void prof_postfork_child(tsdn_t *tsdn);
void prof_sample_threshold_update(prof_tdata_t *tdata);
#endif /* JEMALLOC_INTERNAL_PROF_EXTERNS_H */
#ifndef JEMALLOC_INTERNAL_PROF_INLINES_A_H
#define JEMALLOC_INTERNAL_PROF_INLINES_A_H
#include "jemalloc/internal/mutex.h"
static inline bool
prof_accum_add(tsdn_t *tsdn, prof_accum_t *prof_accum, uint64_t accumbytes) {
cassert(config_prof);
bool overflow;
uint64_t a0, a1;
/*
* If the application allocates fast enough (and/or if idump is slow
* enough), extreme overflow here (a1 >= prof_interval * 2) can cause
* idump trigger coalescing. This is an intentional mechanism that
* avoids rate-limiting allocation.
*/
#ifdef JEMALLOC_ATOMIC_U64
a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED);
do {
a1 = a0 + accumbytes;
assert(a1 >= a0);
overflow = (a1 >= prof_interval);
if (overflow) {
a1 %= prof_interval;
}
} while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0,
a1, ATOMIC_RELAXED, ATOMIC_RELAXED));
#else
malloc_mutex_lock(tsdn, &prof_accum->mtx);
a0 = prof_accum->accumbytes;
a1 = a0 + accumbytes;
overflow = (a1 >= prof_interval);
if (overflow) {
a1 %= prof_interval;
}
prof_accum->accumbytes = a1;
malloc_mutex_unlock(tsdn, &prof_accum->mtx);
#endif
return overflow;
}
static inline void
prof_accum_cancel(tsdn_t *tsdn, prof_accum_t *prof_accum, size_t usize) {
cassert(config_prof);
/*
* Cancel out as much of the excessive prof_accumbytes increase as
* possible without underflowing. Interval-triggered dumps occur
* slightly more often than intended as a result of incomplete
* canceling.
*/
uint64_t a0, a1;
#ifdef JEMALLOC_ATOMIC_U64
a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED);
do {
a1 = (a0 >= LARGE_MINCLASS - usize) ? a0 - (LARGE_MINCLASS -
usize) : 0;
} while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0,
a1, ATOMIC_RELAXED, ATOMIC_RELAXED));
#else
malloc_mutex_lock(tsdn, &prof_accum->mtx);
a0 = prof_accum->accumbytes;
a1 = (a0 >= LARGE_MINCLASS - usize) ? a0 - (LARGE_MINCLASS - usize) :
0;
prof_accum->accumbytes = a1;
malloc_mutex_unlock(tsdn, &prof_accum->mtx);
#endif
}
JEMALLOC_ALWAYS_INLINE bool
prof_active_get_unlocked(void) {
/*
* Even if opt_prof is true, sampling can be temporarily disabled by
* setting prof_active to false. No locking is used when reading
* prof_active in the fast path, so there are no guarantees regarding
* how long it will take for all threads to notice state changes.
*/
return prof_active;
}
#endif /* JEMALLOC_INTERNAL_PROF_INLINES_A_H */
#ifndef JEMALLOC_INTERNAL_PROF_INLINES_B_H
#define JEMALLOC_INTERNAL_PROF_INLINES_B_H
#include "jemalloc/internal/sz.h"
JEMALLOC_ALWAYS_INLINE bool
prof_gdump_get_unlocked(void) {
/*
* No locking is used when reading prof_gdump_val in the fast path, so
* there are no guarantees regarding how long it will take for all
* threads to notice state changes.
*/
return prof_gdump_val;
}
JEMALLOC_ALWAYS_INLINE prof_tdata_t *
prof_tdata_get(tsd_t *tsd, bool create) {
prof_tdata_t *tdata;
cassert(config_prof);
tdata = tsd_prof_tdata_get(tsd);
if (create) {
if (unlikely(tdata == NULL)) {
if (tsd_nominal(tsd)) {
tdata = prof_tdata_init(tsd);
tsd_prof_tdata_set(tsd, tdata);
}
} else if (unlikely(tdata->expired)) {
tdata = prof_tdata_reinit(tsd, tdata);
tsd_prof_tdata_set(tsd, tdata);
}
assert(tdata == NULL || tdata->attached);
}
return tdata;
}
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
cassert(config_prof);
assert(ptr != NULL);
return arena_prof_tctx_get(tsdn, ptr, alloc_ctx);
}
JEMALLOC_ALWAYS_INLINE void
prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) {
cassert(config_prof);
assert(ptr != NULL);
arena_prof_tctx_set(tsdn, ptr, usize, alloc_ctx, tctx);
}
JEMALLOC_ALWAYS_INLINE void
prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) {
cassert(config_prof);
assert(ptr != NULL);
arena_prof_tctx_reset(tsdn, ptr, tctx);
}
JEMALLOC_ALWAYS_INLINE bool
prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
prof_tdata_t **tdata_out) {
prof_tdata_t *tdata;
cassert(config_prof);
tdata = prof_tdata_get(tsd, true);
if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)) {
tdata = NULL;
}
if (tdata_out != NULL) {
*tdata_out = tdata;
}
if (unlikely(tdata == NULL)) {
return true;
}
if (likely(tdata->bytes_until_sample >= usize)) {
if (update) {
tdata->bytes_until_sample -= usize;
}
return true;
} else {
if (tsd_reentrancy_level_get(tsd) > 0) {
return true;
}
/* Compute new sample threshold. */
if (update) {
prof_sample_threshold_update(tdata);
}
return !tdata->active;
}
}
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update) {
prof_tctx_t *ret;
prof_tdata_t *tdata;
prof_bt_t bt;
assert(usize == sz_s2u(usize));
if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update,
&tdata))) {
ret = (prof_tctx_t *)(uintptr_t)1U;
} else {
bt_init(&bt, tdata->vec);
prof_backtrace(&bt);
ret = prof_lookup(tsd, &bt);
}
return ret;
}
JEMALLOC_ALWAYS_INLINE void
prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx,
prof_tctx_t *tctx) {
cassert(config_prof);
assert(ptr != NULL);
assert(usize == isalloc(tsdn, ptr));
if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) {
prof_malloc_sample_object(tsdn, ptr, usize, tctx);
} else {
prof_tctx_set(tsdn, ptr, usize, alloc_ctx,
(prof_tctx_t *)(uintptr_t)1U);
}
}
JEMALLOC_ALWAYS_INLINE void
prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
bool prof_active, bool updated, const void *old_ptr, size_t old_usize,
prof_tctx_t *old_tctx) {
bool sampled, old_sampled, moved;
cassert(config_prof);
assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
if (prof_active && !updated && ptr != NULL) {
assert(usize == isalloc(tsd_tsdn(tsd), ptr));
if (prof_sample_accum_update(tsd, usize, true, NULL)) {
/*
* Don't sample. The usize passed to prof_alloc_prep()
* was larger than what actually got allocated, so a
* backtrace was captured for this allocation, even
* though its actual usize was insufficient to cross the
* sample threshold.
*/
prof_alloc_rollback(tsd, tctx, true);
tctx = (prof_tctx_t *)(uintptr_t)1U;
}
}
sampled = ((uintptr_t)tctx > (uintptr_t)1U);
old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U);
moved = (ptr != old_ptr);
if (unlikely(sampled)) {
prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx);
} else if (moved) {
prof_tctx_set(tsd_tsdn(tsd), ptr, usize, NULL,
(prof_tctx_t *)(uintptr_t)1U);
} else if (unlikely(old_sampled)) {
/*
* prof_tctx_set() would work for the !moved case as well, but
* prof_tctx_reset() is slightly cheaper, and the proper thing
* to do here in the presence of explicit knowledge re: moved
* state.
*/
prof_tctx_reset(tsd_tsdn(tsd), ptr, tctx);
} else {
assert((uintptr_t)prof_tctx_get(tsd_tsdn(tsd), ptr, NULL) ==
(uintptr_t)1U);
}
/*
* The prof_free_sampled_object() call must come after the
* prof_malloc_sample_object() call, because tctx and old_tctx may be
* the same, in which case reversing the call order could cause the tctx
* to be prematurely destroyed as a side effect of momentarily zeroed
* counters.
*/
if (unlikely(old_sampled)) {
prof_free_sampled_object(tsd, old_usize, old_tctx);
}
}
JEMALLOC_ALWAYS_INLINE void
prof_free(tsd_t *tsd, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx) {
prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx);
cassert(config_prof);
assert(usize == isalloc(tsd_tsdn(tsd), ptr));
if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) {
prof_free_sampled_object(tsd, usize, tctx);
}
}
#endif /* JEMALLOC_INTERNAL_PROF_INLINES_B_H */
#ifndef JEMALLOC_INTERNAL_PROF_STRUCTS_H
#define JEMALLOC_INTERNAL_PROF_STRUCTS_H
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/rb.h"
struct prof_bt_s {
/* Backtrace, stored as len program counters. */
void **vec;
unsigned len;
};
#ifdef JEMALLOC_PROF_LIBGCC
/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
typedef struct {
prof_bt_t *bt;
unsigned max;
} prof_unwind_data_t;
#endif
struct prof_accum_s {
#ifndef JEMALLOC_ATOMIC_U64
malloc_mutex_t mtx;
uint64_t accumbytes;
#else
atomic_u64_t accumbytes;
#endif
};
struct prof_cnt_s {
/* Profiling counters. */
uint64_t curobjs;
uint64_t curbytes;
uint64_t accumobjs;
uint64_t accumbytes;
};
typedef enum {
prof_tctx_state_initializing,
prof_tctx_state_nominal,
prof_tctx_state_dumping,
prof_tctx_state_purgatory /* Dumper must finish destroying. */
} prof_tctx_state_t;
struct prof_tctx_s {
/* Thread data for thread that performed the allocation. */
prof_tdata_t *tdata;
/*
* Copy of tdata->thr_{uid,discrim}, necessary because tdata may be
* defunct during teardown.
*/
uint64_t thr_uid;
uint64_t thr_discrim;
/* Profiling counters, protected by tdata->lock. */
prof_cnt_t cnts;
/* Associated global context. */
prof_gctx_t *gctx;
/*
* UID that distinguishes multiple tctx's created by the same thread,
* but coexisting in gctx->tctxs. There are two ways that such
* coexistence can occur:
* - A dumper thread can cause a tctx to be retained in the purgatory
* state.
* - Although a single "producer" thread must create all tctx's which
* share the same thr_uid, multiple "consumers" can each concurrently
* execute portions of prof_tctx_destroy(). prof_tctx_destroy() only
* gets called once each time cnts.cur{objs,bytes} drop to 0, but this
* threshold can be hit again before the first consumer finishes
* executing prof_tctx_destroy().
*/
uint64_t tctx_uid;
/* Linkage into gctx's tctxs. */
rb_node(prof_tctx_t) tctx_link;
/*
* True during prof_alloc_prep()..prof_malloc_sample_object(), prevents
* sample vs destroy race.
*/
bool prepared;
/* Current dump-related state, protected by gctx->lock. */
prof_tctx_state_t state;
/*
* Copy of cnts snapshotted during early dump phase, protected by
* dump_mtx.
*/
prof_cnt_t dump_cnts;
};
typedef rb_tree(prof_tctx_t) prof_tctx_tree_t;
struct prof_gctx_s {
/* Protects nlimbo, cnt_summed, and tctxs. */
malloc_mutex_t *lock;
/*
* Number of threads that currently cause this gctx to be in a state of
* limbo due to one of:
* - Initializing this gctx.
* - Initializing per thread counters associated with this gctx.
* - Preparing to destroy this gctx.
* - Dumping a heap profile that includes this gctx.
* nlimbo must be 1 (single destroyer) in order to safely destroy the
* gctx.
*/
unsigned nlimbo;
/*
* Tree of profile counters, one for each thread that has allocated in
* this context.
*/
prof_tctx_tree_t tctxs;
/* Linkage for tree of contexts to be dumped. */
rb_node(prof_gctx_t) dump_link;
/* Temporary storage for summation during dump. */
prof_cnt_t cnt_summed;
/* Associated backtrace. */
prof_bt_t bt;
/* Backtrace vector, variable size, referred to by bt. */
void *vec[1];
};
typedef rb_tree(prof_gctx_t) prof_gctx_tree_t;
struct prof_tdata_s {
malloc_mutex_t *lock;
/* Monotonically increasing unique thread identifier. */
uint64_t thr_uid;
/*
* Monotonically increasing discriminator among tdata structures
* associated with the same thr_uid.
*/
uint64_t thr_discrim;
/* Included in heap profile dumps if non-NULL. */
char *thread_name;
bool attached;
bool expired;
rb_node(prof_tdata_t) tdata_link;
/*
* Counter used to initialize prof_tctx_t's tctx_uid. No locking is
* necessary when incrementing this field, because only one thread ever
* does so.
*/
uint64_t tctx_uid_next;
/*
* Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks
* backtraces for which it has non-zero allocation/deallocation counters
* associated with thread-specific prof_tctx_t objects. Other threads
* may write to prof_tctx_t contents when freeing associated objects.
*/
ckh_t bt2tctx;
/* Sampling state. */
uint64_t prng_state;
uint64_t bytes_until_sample;
/* State used to avoid dumping while operating on prof internals. */
bool enq;
bool enq_idump;
bool enq_gdump;
/*
* Set to true during an early dump phase for tdata's which are
* currently being dumped. New threads' tdata's have this initialized
* to false so that they aren't accidentally included in later dump
* phases.
*/
bool dumping;
/*
* True if profiling is active for this tdata's thread
* (thread.prof.active mallctl).
*/
bool active;
/* Temporary storage for summation during dump. */
prof_cnt_t cnt_summed;
/* Backtrace vector, used for calls to prof_backtrace(). */
void *vec[PROF_BT_MAX];
};
typedef rb_tree(prof_tdata_t) prof_tdata_tree_t;
#endif /* JEMALLOC_INTERNAL_PROF_STRUCTS_H */
#ifndef JEMALLOC_INTERNAL_PROF_TYPES_H
#define JEMALLOC_INTERNAL_PROF_TYPES_H
typedef struct prof_bt_s prof_bt_t;
typedef struct prof_accum_s prof_accum_t;
typedef struct prof_cnt_s prof_cnt_t;
typedef struct prof_tctx_s prof_tctx_t;
typedef struct prof_gctx_s prof_gctx_t;
typedef struct prof_tdata_s prof_tdata_t;
/* Option defaults. */
#ifdef JEMALLOC_PROF
# define PROF_PREFIX_DEFAULT "jeprof"
#else
# define PROF_PREFIX_DEFAULT ""
#endif
#define LG_PROF_SAMPLE_DEFAULT 19
#define LG_PROF_INTERVAL_DEFAULT -1
/*
* Hard limit on stack backtrace depth. The version of prof_backtrace() that
* is based on __builtin_return_address() necessarily has a hard-coded number
* of backtrace frame handlers, and should be kept in sync with this setting.
*/
#define PROF_BT_MAX 128
/* Initial hash table size. */
#define PROF_CKH_MINITEMS 64
/* Size of memory buffer to use when writing dump files. */
#define PROF_DUMP_BUFSIZE 65536
/* Size of stack-allocated buffer used by prof_printf(). */
#define PROF_PRINTF_BUFSIZE 128
/*
* Number of mutexes shared among all gctx's. No space is allocated for these
* unless profiling is enabled, so it's okay to over-provision.
*/
#define PROF_NCTX_LOCKS 1024
/*
* Number of mutexes shared among all tdata's. No space is allocated for these
* unless profiling is enabled, so it's okay to over-provision.
*/
#define PROF_NTDATA_LOCKS 256
/*
* prof_tdata pointers close to NULL are used to encode state information that
* is used for cleaning up during thread shutdown.
*/
#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1)
#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2)
#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY
#endif /* JEMALLOC_INTERNAL_PROF_TYPES_H */
#!/bin/sh
for nm in `cat $1` ; do
n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
echo "#define je_${n} JEMALLOC_N(${n})"
done
#!/bin/sh
for nm in `cat $1` ; do
n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
echo "#undef je_${n}"
done
#ifndef JEMALLOC_INTERNAL_QL_H
#define JEMALLOC_INTERNAL_QL_H
#include "jemalloc/internal/qr.h"
/* List definitions. */
#define ql_head(a_type) \
struct { \
a_type *qlh_first; \
}
#define ql_head_initializer(a_head) {NULL}
#define ql_elm(a_type) qr(a_type)
/* List functions. */
#define ql_new(a_head) do { \
(a_head)->qlh_first = NULL; \
} while (0)
#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
#define ql_first(a_head) ((a_head)->qlh_first)
#define ql_last(a_head, a_field) \
((ql_first(a_head) != NULL) \
? qr_prev(ql_first(a_head), a_field) : NULL)
#define ql_next(a_head, a_elm, a_field) \
((ql_last(a_head, a_field) != (a_elm)) \
? qr_next((a_elm), a_field) : NULL)
#define ql_prev(a_head, a_elm, a_field) \
((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \
: NULL)
#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
qr_before_insert((a_qlelm), (a_elm), a_field); \
if (ql_first(a_head) == (a_qlelm)) { \
ql_first(a_head) = (a_elm); \
} \
} while (0)
#define ql_after_insert(a_qlelm, a_elm, a_field) \
qr_after_insert((a_qlelm), (a_elm), a_field)
#define ql_head_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = (a_elm); \
} while (0)
#define ql_tail_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = qr_next((a_elm), a_field); \
} while (0)
#define ql_remove(a_head, a_elm, a_field) do { \
if (ql_first(a_head) == (a_elm)) { \
ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
} \
if (ql_first(a_head) != (a_elm)) { \
qr_remove((a_elm), a_field); \
} else { \
ql_first(a_head) = NULL; \
} \
} while (0)
#define ql_head_remove(a_head, a_type, a_field) do { \
a_type *t = ql_first(a_head); \
ql_remove((a_head), t, a_field); \
} while (0)
#define ql_tail_remove(a_head, a_type, a_field) do { \
a_type *t = ql_last(a_head, a_field); \
ql_remove((a_head), t, a_field); \
} while (0)
#define ql_foreach(a_var, a_head, a_field) \
qr_foreach((a_var), ql_first(a_head), a_field)
#define ql_reverse_foreach(a_var, a_head, a_field) \
qr_reverse_foreach((a_var), ql_first(a_head), a_field)
#endif /* JEMALLOC_INTERNAL_QL_H */
#ifndef JEMALLOC_INTERNAL_QR_H
#define JEMALLOC_INTERNAL_QR_H
/* Ring definitions. */
#define qr(a_type) \
struct { \
a_type *qre_next; \
a_type *qre_prev; \
}
/* Ring functions. */
#define qr_new(a_qr, a_field) do { \
(a_qr)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
#define qr_before_insert(a_qrelm, a_qr, a_field) do { \
(a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \
(a_qr)->a_field.qre_next = (a_qrelm); \
(a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \
(a_qrelm)->a_field.qre_prev = (a_qr); \
} while (0)
#define qr_after_insert(a_qrelm, a_qr, a_field) do { \
(a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
(a_qr)->a_field.qre_prev = (a_qrelm); \
(a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \
(a_qrelm)->a_field.qre_next = (a_qr); \
} while (0)
#define qr_meld(a_qr_a, a_qr_b, a_type, a_field) do { \
a_type *t; \
(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
t = (a_qr_a)->a_field.qre_prev; \
(a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \
(a_qr_b)->a_field.qre_prev = t; \
} while (0)
/*
* qr_meld() and qr_split() are functionally equivalent, so there's no need to
* have two copies of the code.
*/
#define qr_split(a_qr_a, a_qr_b, a_type, a_field) \
qr_meld((a_qr_a), (a_qr_b), a_type, a_field)
#define qr_remove(a_qr, a_field) do { \
(a_qr)->a_field.qre_prev->a_field.qre_next \
= (a_qr)->a_field.qre_next; \
(a_qr)->a_field.qre_next->a_field.qre_prev \
= (a_qr)->a_field.qre_prev; \
(a_qr)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
#define qr_foreach(var, a_qr, a_field) \
for ((var) = (a_qr); \
(var) != NULL; \
(var) = (((var)->a_field.qre_next != (a_qr)) \
? (var)->a_field.qre_next : NULL))
#define qr_reverse_foreach(var, a_qr, a_field) \
for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \
(var) != NULL; \
(var) = (((var) != (a_qr)) \
? (var)->a_field.qre_prev : NULL))
#endif /* JEMALLOC_INTERNAL_QR_H */
/*-
*******************************************************************************
*
* cpp macro implementation of left-leaning 2-3 red-black trees. Parent
* pointers are not used, and color bits are stored in the least significant
* bit of right-child pointers (if RB_COMPACT is defined), thus making node
* linkage as compact as is possible for red-black trees.
*
* Usage:
*
* #include <stdint.h>
* #include <stdbool.h>
* #define NDEBUG // (Optional, see assert(3).)
* #include <assert.h>
* #define RB_COMPACT // (Optional, embed color bits in right-child pointers.)
* #include <rb.h>
* ...
*
*******************************************************************************
*/
#ifndef RB_H_
#define RB_H_
#ifndef __PGI
#define RB_COMPACT
#endif
#ifdef RB_COMPACT
/* Node structure. */
#define rb_node(a_type) \
struct { \
a_type *rbn_left; \
a_type *rbn_right_red; \
}
#else
#define rb_node(a_type) \
struct { \
a_type *rbn_left; \
a_type *rbn_right; \
bool rbn_red; \
}
#endif
/* Root structure. */
#define rb_tree(a_type) \
struct { \
a_type *rbt_root; \
}
/* Left accessors. */
#define rbtn_left_get(a_type, a_field, a_node) \
((a_node)->a_field.rbn_left)
#define rbtn_left_set(a_type, a_field, a_node, a_left) do { \
(a_node)->a_field.rbn_left = a_left; \
} while (0)
#ifdef RB_COMPACT
/* Right accessors. */
#define rbtn_right_get(a_type, a_field, a_node) \
((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \
& ((ssize_t)-2)))
#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \
(a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \
| (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \
} while (0)
/* Color accessors. */
#define rbtn_red_get(a_type, a_field, a_node) \
((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \
& ((size_t)1)))
#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \
(a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \
(a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \
| ((ssize_t)a_red)); \
} while (0)
#define rbtn_red_set(a_type, a_field, a_node) do { \
(a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \
(a_node)->a_field.rbn_right_red) | ((size_t)1)); \
} while (0)
#define rbtn_black_set(a_type, a_field, a_node) do { \
(a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \
(a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \
} while (0)
/* Node initializer. */
#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
/* Bookkeeping bit cannot be used by node pointer. */ \
assert(((uintptr_t)(a_node) & 0x1) == 0); \
rbtn_left_set(a_type, a_field, (a_node), NULL); \
rbtn_right_set(a_type, a_field, (a_node), NULL); \
rbtn_red_set(a_type, a_field, (a_node)); \
} while (0)
#else
/* Right accessors. */
#define rbtn_right_get(a_type, a_field, a_node) \
((a_node)->a_field.rbn_right)
#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \
(a_node)->a_field.rbn_right = a_right; \
} while (0)
/* Color accessors. */
#define rbtn_red_get(a_type, a_field, a_node) \
((a_node)->a_field.rbn_red)
#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \
(a_node)->a_field.rbn_red = (a_red); \
} while (0)
#define rbtn_red_set(a_type, a_field, a_node) do { \
(a_node)->a_field.rbn_red = true; \
} while (0)
#define rbtn_black_set(a_type, a_field, a_node) do { \
(a_node)->a_field.rbn_red = false; \
} while (0)
/* Node initializer. */
#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
rbtn_left_set(a_type, a_field, (a_node), NULL); \
rbtn_right_set(a_type, a_field, (a_node), NULL); \
rbtn_red_set(a_type, a_field, (a_node)); \
} while (0)
#endif
/* Tree initializer. */
#define rb_new(a_type, a_field, a_rbt) do { \
(a_rbt)->rbt_root = NULL; \
} while (0)
/* Internal utility macros. */
#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \
(r_node) = (a_root); \
if ((r_node) != NULL) { \
for (; \
rbtn_left_get(a_type, a_field, (r_node)) != NULL; \
(r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \
} \
} \
} while (0)
#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \
(r_node) = (a_root); \
if ((r_node) != NULL) { \
for (; rbtn_right_get(a_type, a_field, (r_node)) != NULL; \
(r_node) = rbtn_right_get(a_type, a_field, (r_node))) { \
} \
} \
} while (0)
#define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \
(r_node) = rbtn_right_get(a_type, a_field, (a_node)); \
rbtn_right_set(a_type, a_field, (a_node), \
rbtn_left_get(a_type, a_field, (r_node))); \
rbtn_left_set(a_type, a_field, (r_node), (a_node)); \
} while (0)
#define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \
(r_node) = rbtn_left_get(a_type, a_field, (a_node)); \
rbtn_left_set(a_type, a_field, (a_node), \
rbtn_right_get(a_type, a_field, (r_node))); \
rbtn_right_set(a_type, a_field, (r_node), (a_node)); \
} while (0)
/*
* The rb_proto() macro generates function prototypes that correspond to the
* functions generated by an equivalently parameterized call to rb_gen().
*/
#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \
a_attr void \
a_prefix##new(a_rbt_type *rbtree); \
a_attr bool \
a_prefix##empty(a_rbt_type *rbtree); \
a_attr a_type * \
a_prefix##first(a_rbt_type *rbtree); \
a_attr a_type * \
a_prefix##last(a_rbt_type *rbtree); \
a_attr a_type * \
a_prefix##next(a_rbt_type *rbtree, a_type *node); \
a_attr a_type * \
a_prefix##prev(a_rbt_type *rbtree, a_type *node); \
a_attr a_type * \
a_prefix##search(a_rbt_type *rbtree, const a_type *key); \
a_attr a_type * \
a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key); \
a_attr a_type * \
a_prefix##psearch(a_rbt_type *rbtree, const a_type *key); \
a_attr void \
a_prefix##insert(a_rbt_type *rbtree, a_type *node); \
a_attr void \
a_prefix##remove(a_rbt_type *rbtree, a_type *node); \
a_attr a_type * \
a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
a_rbt_type *, a_type *, void *), void *arg); \
a_attr a_type * \
a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); \
a_attr void \
a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
void *arg);
/*
* The rb_gen() macro generates a type-specific red-black tree implementation,
* based on the above cpp macros.
*
* Arguments:
*
* a_attr : Function attribute for generated functions (ex: static).
* a_prefix : Prefix for generated functions (ex: ex_).
* a_rb_type : Type for red-black tree data structure (ex: ex_t).
* a_type : Type for red-black tree node data structure (ex: ex_node_t).
* a_field : Name of red-black tree node linkage (ex: ex_link).
* a_cmp : Node comparison function name, with the following prototype:
* int (a_cmp *)(a_type *a_node, a_type *a_other);
* ^^^^^^
* or a_key
* Interpretation of comparison function return values:
* -1 : a_node < a_other
* 0 : a_node == a_other
* 1 : a_node > a_other
* In all cases, the a_node or a_key macro argument is the first
* argument to the comparison function, which makes it possible
* to write comparison functions that treat the first argument
* specially.
*
* Assuming the following setup:
*
* typedef struct ex_node_s ex_node_t;
* struct ex_node_s {
* rb_node(ex_node_t) ex_link;
* };
* typedef rb_tree(ex_node_t) ex_t;
* rb_gen(static, ex_, ex_t, ex_node_t, ex_link, ex_cmp)
*
* The following API is generated:
*
* static void
* ex_new(ex_t *tree);
* Description: Initialize a red-black tree structure.
* Args:
* tree: Pointer to an uninitialized red-black tree object.
*
* static bool
* ex_empty(ex_t *tree);
* Description: Determine whether tree is empty.
* Args:
* tree: Pointer to an initialized red-black tree object.
* Ret: True if tree is empty, false otherwise.
*
* static ex_node_t *
* ex_first(ex_t *tree);
* static ex_node_t *
* ex_last(ex_t *tree);
* Description: Get the first/last node in tree.
* Args:
* tree: Pointer to an initialized red-black tree object.
* Ret: First/last node in tree, or NULL if tree is empty.
*
* static ex_node_t *
* ex_next(ex_t *tree, ex_node_t *node);
* static ex_node_t *
* ex_prev(ex_t *tree, ex_node_t *node);
* Description: Get node's successor/predecessor.
* Args:
* tree: Pointer to an initialized red-black tree object.
* node: A node in tree.
* Ret: node's successor/predecessor in tree, or NULL if node is
* last/first.
*
* static ex_node_t *
* ex_search(ex_t *tree, const ex_node_t *key);
* Description: Search for node that matches key.
* Args:
* tree: Pointer to an initialized red-black tree object.
* key : Search key.
* Ret: Node in tree that matches key, or NULL if no match.
*
* static ex_node_t *
* ex_nsearch(ex_t *tree, const ex_node_t *key);
* static ex_node_t *
* ex_psearch(ex_t *tree, const ex_node_t *key);
* Description: Search for node that matches key. If no match is found,
* return what would be key's successor/predecessor, were
* key in tree.
* Args:
* tree: Pointer to an initialized red-black tree object.
* key : Search key.
* Ret: Node in tree that matches key, or if no match, hypothetical node's
* successor/predecessor (NULL if no successor/predecessor).
*
* static void
* ex_insert(ex_t *tree, ex_node_t *node);
* Description: Insert node into tree.
* Args:
* tree: Pointer to an initialized red-black tree object.
* node: Node to be inserted into tree.
*
* static void
* ex_remove(ex_t *tree, ex_node_t *node);
* Description: Remove node from tree.
* Args:
* tree: Pointer to an initialized red-black tree object.
* node: Node in tree to be removed.
*
* static ex_node_t *
* ex_iter(ex_t *tree, ex_node_t *start, ex_node_t *(*cb)(ex_t *,
* ex_node_t *, void *), void *arg);
* static ex_node_t *
* ex_reverse_iter(ex_t *tree, ex_node_t *start, ex_node *(*cb)(ex_t *,
* ex_node_t *, void *), void *arg);
* Description: Iterate forward/backward over tree, starting at node. If
* tree is modified, iteration must be immediately
* terminated by the callback function that causes the
* modification.
* Args:
* tree : Pointer to an initialized red-black tree object.
* start: Node at which to start iteration, or NULL to start at
* first/last node.
* cb : Callback function, which is called for each node during
* iteration. Under normal circumstances the callback function
* should return NULL, which causes iteration to continue. If a
* callback function returns non-NULL, iteration is immediately
* terminated and the non-NULL return value is returned by the
* iterator. This is useful for re-starting iteration after
* modifying tree.
* arg : Opaque pointer passed to cb().
* Ret: NULL if iteration completed, or the non-NULL callback return value
* that caused termination of the iteration.
*
* static void
* ex_destroy(ex_t *tree, void (*cb)(ex_node_t *, void *), void *arg);
* Description: Iterate over the tree with post-order traversal, remove
* each node, and run the callback if non-null. This is
* used for destroying a tree without paying the cost to
* rebalance it. The tree must not be otherwise altered
* during traversal.
* Args:
* tree: Pointer to an initialized red-black tree object.
* cb : Callback function, which, if non-null, is called for each node
* during iteration. There is no way to stop iteration once it
* has begun.
* arg : Opaque pointer passed to cb().
*/
#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \
a_attr void \
a_prefix##new(a_rbt_type *rbtree) { \
rb_new(a_type, a_field, rbtree); \
} \
a_attr bool \
a_prefix##empty(a_rbt_type *rbtree) { \
return (rbtree->rbt_root == NULL); \
} \
a_attr a_type * \
a_prefix##first(a_rbt_type *rbtree) { \
a_type *ret; \
rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
return ret; \
} \
a_attr a_type * \
a_prefix##last(a_rbt_type *rbtree) { \
a_type *ret; \
rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
return ret; \
} \
a_attr a_type * \
a_prefix##next(a_rbt_type *rbtree, a_type *node) { \
a_type *ret; \
if (rbtn_right_get(a_type, a_field, node) != NULL) { \
rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \
a_field, node), ret); \
} else { \
a_type *tnode = rbtree->rbt_root; \
assert(tnode != NULL); \
ret = NULL; \
while (true) { \
int cmp = (a_cmp)(node, tnode); \
if (cmp < 0) { \
ret = tnode; \
tnode = rbtn_left_get(a_type, a_field, tnode); \
} else if (cmp > 0) { \
tnode = rbtn_right_get(a_type, a_field, tnode); \
} else { \
break; \
} \
assert(tnode != NULL); \
} \
} \
return ret; \
} \
a_attr a_type * \
a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \
a_type *ret; \
if (rbtn_left_get(a_type, a_field, node) != NULL) { \
rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \
a_field, node), ret); \
} else { \
a_type *tnode = rbtree->rbt_root; \
assert(tnode != NULL); \
ret = NULL; \
while (true) { \
int cmp = (a_cmp)(node, tnode); \
if (cmp < 0) { \
tnode = rbtn_left_get(a_type, a_field, tnode); \
} else if (cmp > 0) { \
ret = tnode; \
tnode = rbtn_right_get(a_type, a_field, tnode); \
} else { \
break; \
} \
assert(tnode != NULL); \
} \
} \
return ret; \
} \
a_attr a_type * \
a_prefix##search(a_rbt_type *rbtree, const a_type *key) { \
a_type *ret; \
int cmp; \
ret = rbtree->rbt_root; \
while (ret != NULL \
&& (cmp = (a_cmp)(key, ret)) != 0) { \
if (cmp < 0) { \
ret = rbtn_left_get(a_type, a_field, ret); \
} else { \
ret = rbtn_right_get(a_type, a_field, ret); \
} \
} \
return ret; \
} \
a_attr a_type * \
a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key) { \
a_type *ret; \
a_type *tnode = rbtree->rbt_root; \
ret = NULL; \
while (tnode != NULL) { \
int cmp = (a_cmp)(key, tnode); \
if (cmp < 0) { \
ret = tnode; \
tnode = rbtn_left_get(a_type, a_field, tnode); \
} else if (cmp > 0) { \
tnode = rbtn_right_get(a_type, a_field, tnode); \
} else { \
ret = tnode; \
break; \
} \
} \
return ret; \
} \
a_attr a_type * \
a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \
a_type *ret; \
a_type *tnode = rbtree->rbt_root; \
ret = NULL; \
while (tnode != NULL) { \
int cmp = (a_cmp)(key, tnode); \
if (cmp < 0) { \
tnode = rbtn_left_get(a_type, a_field, tnode); \
} else if (cmp > 0) { \
ret = tnode; \
tnode = rbtn_right_get(a_type, a_field, tnode); \
} else { \
ret = tnode; \
break; \
} \
} \
return ret; \
} \
a_attr void \
a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
struct { \
a_type *node; \
int cmp; \
} path[sizeof(void *) << 4], *pathp; \
rbt_node_new(a_type, a_field, rbtree, node); \
/* Wind. */ \
path->node = rbtree->rbt_root; \
for (pathp = path; pathp->node != NULL; pathp++) { \
int cmp = pathp->cmp = a_cmp(node, pathp->node); \
assert(cmp != 0); \
if (cmp < 0) { \
pathp[1].node = rbtn_left_get(a_type, a_field, \
pathp->node); \
} else { \
pathp[1].node = rbtn_right_get(a_type, a_field, \
pathp->node); \
} \
} \
pathp->node = node; \
/* Unwind. */ \
for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \
a_type *cnode = pathp->node; \
if (pathp->cmp < 0) { \
a_type *left = pathp[1].node; \
rbtn_left_set(a_type, a_field, cnode, left); \
if (rbtn_red_get(a_type, a_field, left)) { \
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
leftleft)) { \
/* Fix up 4-node. */ \
a_type *tnode; \
rbtn_black_set(a_type, a_field, leftleft); \
rbtn_rotate_right(a_type, a_field, cnode, tnode); \
cnode = tnode; \
} \
} else { \
return; \
} \
} else { \
a_type *right = pathp[1].node; \
rbtn_right_set(a_type, a_field, cnode, right); \
if (rbtn_red_get(a_type, a_field, right)) { \
a_type *left = rbtn_left_get(a_type, a_field, cnode); \
if (left != NULL && rbtn_red_get(a_type, a_field, \
left)) { \
/* Split 4-node. */ \
rbtn_black_set(a_type, a_field, left); \
rbtn_black_set(a_type, a_field, right); \
rbtn_red_set(a_type, a_field, cnode); \
} else { \
/* Lean left. */ \
a_type *tnode; \
bool tred = rbtn_red_get(a_type, a_field, cnode); \
rbtn_rotate_left(a_type, a_field, cnode, tnode); \
rbtn_color_set(a_type, a_field, tnode, tred); \
rbtn_red_set(a_type, a_field, cnode); \
cnode = tnode; \
} \
} else { \
return; \
} \
} \
pathp->node = cnode; \
} \
/* Set root, and make it black. */ \
rbtree->rbt_root = path->node; \
rbtn_black_set(a_type, a_field, rbtree->rbt_root); \
} \
a_attr void \
a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
struct { \
a_type *node; \
int cmp; \
} *pathp, *nodep, path[sizeof(void *) << 4]; \
/* Wind. */ \
nodep = NULL; /* Silence compiler warning. */ \
path->node = rbtree->rbt_root; \
for (pathp = path; pathp->node != NULL; pathp++) { \
int cmp = pathp->cmp = a_cmp(node, pathp->node); \
if (cmp < 0) { \
pathp[1].node = rbtn_left_get(a_type, a_field, \
pathp->node); \
} else { \
pathp[1].node = rbtn_right_get(a_type, a_field, \
pathp->node); \
if (cmp == 0) { \
/* Find node's successor, in preparation for swap. */ \
pathp->cmp = 1; \
nodep = pathp; \
for (pathp++; pathp->node != NULL; pathp++) { \
pathp->cmp = -1; \
pathp[1].node = rbtn_left_get(a_type, a_field, \
pathp->node); \
} \
break; \
} \
} \
} \
assert(nodep->node == node); \
pathp--; \
if (pathp->node != node) { \
/* Swap node with its successor. */ \
bool tred = rbtn_red_get(a_type, a_field, pathp->node); \
rbtn_color_set(a_type, a_field, pathp->node, \
rbtn_red_get(a_type, a_field, node)); \
rbtn_left_set(a_type, a_field, pathp->node, \
rbtn_left_get(a_type, a_field, node)); \
/* If node's successor is its right child, the following code */\
/* will do the wrong thing for the right child pointer. */\
/* However, it doesn't matter, because the pointer will be */\
/* properly set when the successor is pruned. */\
rbtn_right_set(a_type, a_field, pathp->node, \
rbtn_right_get(a_type, a_field, node)); \
rbtn_color_set(a_type, a_field, node, tred); \
/* The pruned leaf node's child pointers are never accessed */\
/* again, so don't bother setting them to nil. */\
nodep->node = pathp->node; \
pathp->node = node; \
if (nodep == path) { \
rbtree->rbt_root = nodep->node; \
} else { \
if (nodep[-1].cmp < 0) { \
rbtn_left_set(a_type, a_field, nodep[-1].node, \
nodep->node); \
} else { \
rbtn_right_set(a_type, a_field, nodep[-1].node, \
nodep->node); \
} \
} \
} else { \
a_type *left = rbtn_left_get(a_type, a_field, node); \
if (left != NULL) { \
/* node has no successor, but it has a left child. */\
/* Splice node out, without losing the left child. */\
assert(!rbtn_red_get(a_type, a_field, node)); \
assert(rbtn_red_get(a_type, a_field, left)); \
rbtn_black_set(a_type, a_field, left); \
if (pathp == path) { \
rbtree->rbt_root = left; \
} else { \
if (pathp[-1].cmp < 0) { \
rbtn_left_set(a_type, a_field, pathp[-1].node, \
left); \
} else { \
rbtn_right_set(a_type, a_field, pathp[-1].node, \
left); \
} \
} \
return; \
} else if (pathp == path) { \
/* The tree only contained one node. */ \
rbtree->rbt_root = NULL; \
return; \
} \
} \
if (rbtn_red_get(a_type, a_field, pathp->node)) { \
/* Prune red node, which requires no fixup. */ \
assert(pathp[-1].cmp < 0); \
rbtn_left_set(a_type, a_field, pathp[-1].node, NULL); \
return; \
} \
/* The node to be pruned is black, so unwind until balance is */\
/* restored. */\
pathp->node = NULL; \
for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \
assert(pathp->cmp != 0); \
if (pathp->cmp < 0) { \
rbtn_left_set(a_type, a_field, pathp->node, \
pathp[1].node); \
if (rbtn_red_get(a_type, a_field, pathp->node)) { \
a_type *right = rbtn_right_get(a_type, a_field, \
pathp->node); \
a_type *rightleft = rbtn_left_get(a_type, a_field, \
right); \
a_type *tnode; \
if (rightleft != NULL && rbtn_red_get(a_type, a_field, \
rightleft)) { \
/* In the following diagrams, ||, //, and \\ */\
/* indicate the path to the removed node. */\
/* */\
/* || */\
/* pathp(r) */\
/* // \ */\
/* (b) (b) */\
/* / */\
/* (r) */\
/* */\
rbtn_black_set(a_type, a_field, pathp->node); \
rbtn_rotate_right(a_type, a_field, right, tnode); \
rbtn_right_set(a_type, a_field, pathp->node, tnode);\
rbtn_rotate_left(a_type, a_field, pathp->node, \
tnode); \
} else { \
/* || */\
/* pathp(r) */\
/* // \ */\
/* (b) (b) */\
/* / */\
/* (b) */\
/* */\
rbtn_rotate_left(a_type, a_field, pathp->node, \
tnode); \
} \
/* Balance restored, but rotation modified subtree */\
/* root. */\
assert((uintptr_t)pathp > (uintptr_t)path); \
if (pathp[-1].cmp < 0) { \
rbtn_left_set(a_type, a_field, pathp[-1].node, \
tnode); \
} else { \
rbtn_right_set(a_type, a_field, pathp[-1].node, \
tnode); \
} \
return; \
} else { \
a_type *right = rbtn_right_get(a_type, a_field, \
pathp->node); \
a_type *rightleft = rbtn_left_get(a_type, a_field, \
right); \
if (rightleft != NULL && rbtn_red_get(a_type, a_field, \
rightleft)) { \
/* || */\
/* pathp(b) */\
/* // \ */\
/* (b) (b) */\
/* / */\
/* (r) */\
a_type *tnode; \
rbtn_black_set(a_type, a_field, rightleft); \
rbtn_rotate_right(a_type, a_field, right, tnode); \
rbtn_right_set(a_type, a_field, pathp->node, tnode);\
rbtn_rotate_left(a_type, a_field, pathp->node, \
tnode); \
/* Balance restored, but rotation modified */\
/* subtree root, which may actually be the tree */\
/* root. */\
if (pathp == path) { \
/* Set root. */ \
rbtree->rbt_root = tnode; \
} else { \
if (pathp[-1].cmp < 0) { \
rbtn_left_set(a_type, a_field, \
pathp[-1].node, tnode); \
} else { \
rbtn_right_set(a_type, a_field, \
pathp[-1].node, tnode); \
} \
} \
return; \
} else { \
/* || */\
/* pathp(b) */\
/* // \ */\
/* (b) (b) */\
/* / */\
/* (b) */\
a_type *tnode; \
rbtn_red_set(a_type, a_field, pathp->node); \
rbtn_rotate_left(a_type, a_field, pathp->node, \
tnode); \
pathp->node = tnode; \
} \
} \
} else { \
a_type *left; \
rbtn_right_set(a_type, a_field, pathp->node, \
pathp[1].node); \
left = rbtn_left_get(a_type, a_field, pathp->node); \
if (rbtn_red_get(a_type, a_field, left)) { \
a_type *tnode; \
a_type *leftright = rbtn_right_get(a_type, a_field, \
left); \
a_type *leftrightleft = rbtn_left_get(a_type, a_field, \
leftright); \
if (leftrightleft != NULL && rbtn_red_get(a_type, \
a_field, leftrightleft)) { \
/* || */\
/* pathp(b) */\
/* / \\ */\
/* (r) (b) */\
/* \ */\
/* (b) */\
/* / */\
/* (r) */\
a_type *unode; \
rbtn_black_set(a_type, a_field, leftrightleft); \
rbtn_rotate_right(a_type, a_field, pathp->node, \
unode); \
rbtn_rotate_right(a_type, a_field, pathp->node, \
tnode); \
rbtn_right_set(a_type, a_field, unode, tnode); \
rbtn_rotate_left(a_type, a_field, unode, tnode); \
} else { \
/* || */\
/* pathp(b) */\
/* / \\ */\
/* (r) (b) */\
/* \ */\
/* (b) */\
/* / */\
/* (b) */\
assert(leftright != NULL); \
rbtn_red_set(a_type, a_field, leftright); \
rbtn_rotate_right(a_type, a_field, pathp->node, \
tnode); \
rbtn_black_set(a_type, a_field, tnode); \
} \
/* Balance restored, but rotation modified subtree */\
/* root, which may actually be the tree root. */\
if (pathp == path) { \
/* Set root. */ \
rbtree->rbt_root = tnode; \
} else { \
if (pathp[-1].cmp < 0) { \
rbtn_left_set(a_type, a_field, pathp[-1].node, \
tnode); \
} else { \
rbtn_right_set(a_type, a_field, pathp[-1].node, \
tnode); \
} \
} \
return; \
} else if (rbtn_red_get(a_type, a_field, pathp->node)) { \
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
leftleft)) { \
/* || */\
/* pathp(r) */\
/* / \\ */\
/* (b) (b) */\
/* / */\
/* (r) */\
a_type *tnode; \
rbtn_black_set(a_type, a_field, pathp->node); \
rbtn_red_set(a_type, a_field, left); \
rbtn_black_set(a_type, a_field, leftleft); \
rbtn_rotate_right(a_type, a_field, pathp->node, \
tnode); \
/* Balance restored, but rotation modified */\
/* subtree root. */\
assert((uintptr_t)pathp > (uintptr_t)path); \
if (pathp[-1].cmp < 0) { \
rbtn_left_set(a_type, a_field, pathp[-1].node, \
tnode); \
} else { \
rbtn_right_set(a_type, a_field, pathp[-1].node, \
tnode); \
} \
return; \
} else { \
/* || */\
/* pathp(r) */\
/* / \\ */\
/* (b) (b) */\
/* / */\
/* (b) */\
rbtn_red_set(a_type, a_field, left); \
rbtn_black_set(a_type, a_field, pathp->node); \
/* Balance restored. */ \
return; \
} \
} else { \
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
leftleft)) { \
/* || */\
/* pathp(b) */\
/* / \\ */\
/* (b) (b) */\
/* / */\
/* (r) */\
a_type *tnode; \
rbtn_black_set(a_type, a_field, leftleft); \
rbtn_rotate_right(a_type, a_field, pathp->node, \
tnode); \
/* Balance restored, but rotation modified */\
/* subtree root, which may actually be the tree */\
/* root. */\
if (pathp == path) { \
/* Set root. */ \
rbtree->rbt_root = tnode; \
} else { \
if (pathp[-1].cmp < 0) { \
rbtn_left_set(a_type, a_field, \
pathp[-1].node, tnode); \
} else { \
rbtn_right_set(a_type, a_field, \
pathp[-1].node, tnode); \
} \
} \
return; \
} else { \
/* || */\
/* pathp(b) */\
/* / \\ */\
/* (b) (b) */\
/* / */\
/* (b) */\
rbtn_red_set(a_type, a_field, left); \
} \
} \
} \
} \
/* Set root. */ \
rbtree->rbt_root = path->node; \
assert(!rbtn_red_get(a_type, a_field, rbtree->rbt_root)); \
} \
a_attr a_type * \
a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
if (node == NULL) { \
return NULL; \
} else { \
a_type *ret; \
if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \
a_field, node), cb, arg)) != NULL || (ret = cb(rbtree, node, \
arg)) != NULL) { \
return ret; \
} \
return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
a_field, node), cb, arg); \
} \
} \
a_attr a_type * \
a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
int cmp = a_cmp(start, node); \
if (cmp < 0) { \
a_type *ret; \
if ((ret = a_prefix##iter_start(rbtree, start, \
rbtn_left_get(a_type, a_field, node), cb, arg)) != NULL || \
(ret = cb(rbtree, node, arg)) != NULL) { \
return ret; \
} \
return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
a_field, node), cb, arg); \
} else if (cmp > 0) { \
return a_prefix##iter_start(rbtree, start, \
rbtn_right_get(a_type, a_field, node), cb, arg); \
} else { \
a_type *ret; \
if ((ret = cb(rbtree, node, arg)) != NULL) { \
return ret; \
} \
return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
a_field, node), cb, arg); \
} \
} \
a_attr a_type * \
a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
a_rbt_type *, a_type *, void *), void *arg) { \
a_type *ret; \
if (start != NULL) { \
ret = a_prefix##iter_start(rbtree, start, rbtree->rbt_root, \
cb, arg); \
} else { \
ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\
} \
return ret; \
} \
a_attr a_type * \
a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
if (node == NULL) { \
return NULL; \
} else { \
a_type *ret; \
if ((ret = a_prefix##reverse_iter_recurse(rbtree, \
rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \
(ret = cb(rbtree, node, arg)) != NULL) { \
return ret; \
} \
return a_prefix##reverse_iter_recurse(rbtree, \
rbtn_left_get(a_type, a_field, node), cb, arg); \
} \
} \
a_attr a_type * \
a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \
a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \
void *arg) { \
int cmp = a_cmp(start, node); \
if (cmp > 0) { \
a_type *ret; \
if ((ret = a_prefix##reverse_iter_start(rbtree, start, \
rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \
(ret = cb(rbtree, node, arg)) != NULL) { \
return ret; \
} \
return a_prefix##reverse_iter_recurse(rbtree, \
rbtn_left_get(a_type, a_field, node), cb, arg); \
} else if (cmp < 0) { \
return a_prefix##reverse_iter_start(rbtree, start, \
rbtn_left_get(a_type, a_field, node), cb, arg); \
} else { \
a_type *ret; \
if ((ret = cb(rbtree, node, arg)) != NULL) { \
return ret; \
} \
return a_prefix##reverse_iter_recurse(rbtree, \
rbtn_left_get(a_type, a_field, node), cb, arg); \
} \
} \
a_attr a_type * \
a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
a_type *ret; \
if (start != NULL) { \
ret = a_prefix##reverse_iter_start(rbtree, start, \
rbtree->rbt_root, cb, arg); \
} else { \
ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \
cb, arg); \
} \
return ret; \
} \
a_attr void \
a_prefix##destroy_recurse(a_rbt_type *rbtree, a_type *node, void (*cb)( \
a_type *, void *), void *arg) { \
if (node == NULL) { \
return; \
} \
a_prefix##destroy_recurse(rbtree, rbtn_left_get(a_type, a_field, \
node), cb, arg); \
rbtn_left_set(a_type, a_field, (node), NULL); \
a_prefix##destroy_recurse(rbtree, rbtn_right_get(a_type, a_field, \
node), cb, arg); \
rbtn_right_set(a_type, a_field, (node), NULL); \
if (cb) { \
cb(node, arg); \
} \
} \
a_attr void \
a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
void *arg) { \
a_prefix##destroy_recurse(rbtree, rbtree->rbt_root, cb, arg); \
rbtree->rbt_root = NULL; \
}
#endif /* RB_H_ */
#ifndef JEMALLOC_INTERNAL_RTREE_H
#define JEMALLOC_INTERNAL_RTREE_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/rtree_tsd.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/tsd.h"
/*
* This radix tree implementation is tailored to the singular purpose of
* associating metadata with extents that are currently owned by jemalloc.
*
*******************************************************************************
*/
/* Number of high insignificant bits. */
#define RTREE_NHIB ((1U << (LG_SIZEOF_PTR+3)) - LG_VADDR)
/* Number of low insigificant bits. */
#define RTREE_NLIB LG_PAGE
/* Number of significant bits. */
#define RTREE_NSB (LG_VADDR - RTREE_NLIB)
/* Number of levels in radix tree. */
#if RTREE_NSB <= 10
# define RTREE_HEIGHT 1
#elif RTREE_NSB <= 36
# define RTREE_HEIGHT 2
#elif RTREE_NSB <= 52
# define RTREE_HEIGHT 3
#else
# error Unsupported number of significant virtual address bits
#endif
/* Use compact leaf representation if virtual address encoding allows. */
#if RTREE_NHIB >= LG_CEIL_NSIZES
# define RTREE_LEAF_COMPACT
#endif
/* Needed for initialization only. */
#define RTREE_LEAFKEY_INVALID ((uintptr_t)1)
typedef struct rtree_node_elm_s rtree_node_elm_t;
struct rtree_node_elm_s {
atomic_p_t child; /* (rtree_{node,leaf}_elm_t *) */
};
struct rtree_leaf_elm_s {
#ifdef RTREE_LEAF_COMPACT
/*
* Single pointer-width field containing all three leaf element fields.
* For example, on a 64-bit x64 system with 48 significant virtual
* memory address bits, the index, extent, and slab fields are packed as
* such:
*
* x: index
* e: extent
* b: slab
*
* 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee eeee000b
*/
atomic_p_t le_bits;
#else
atomic_p_t le_extent; /* (extent_t *) */
atomic_u_t le_szind; /* (szind_t) */
atomic_b_t le_slab; /* (bool) */
#endif
};
typedef struct rtree_level_s rtree_level_t;
struct rtree_level_s {
/* Number of key bits distinguished by this level. */
unsigned bits;
/*
* Cumulative number of key bits distinguished by traversing to
* corresponding tree level.
*/
unsigned cumbits;
};
typedef struct rtree_s rtree_t;
struct rtree_s {
malloc_mutex_t init_lock;
/* Number of elements based on rtree_levels[0].bits. */
#if RTREE_HEIGHT > 1
rtree_node_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)];
#else
rtree_leaf_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)];
#endif
};
/*
* Split the bits into one to three partitions depending on number of
* significant bits. It the number of bits does not divide evenly into the
* number of levels, place one remainder bit per level starting at the leaf
* level.
*/
static const rtree_level_t rtree_levels[] = {
#if RTREE_HEIGHT == 1
{RTREE_NSB, RTREE_NHIB + RTREE_NSB}
#elif RTREE_HEIGHT == 2
{RTREE_NSB/2, RTREE_NHIB + RTREE_NSB/2},
{RTREE_NSB/2 + RTREE_NSB%2, RTREE_NHIB + RTREE_NSB}
#elif RTREE_HEIGHT == 3
{RTREE_NSB/3, RTREE_NHIB + RTREE_NSB/3},
{RTREE_NSB/3 + RTREE_NSB%3/2,
RTREE_NHIB + RTREE_NSB/3*2 + RTREE_NSB%3/2},
{RTREE_NSB/3 + RTREE_NSB%3 - RTREE_NSB%3/2, RTREE_NHIB + RTREE_NSB}
#else
# error Unsupported rtree height
#endif
};
bool rtree_new(rtree_t *rtree, bool zeroed);
typedef rtree_node_elm_t *(rtree_node_alloc_t)(tsdn_t *, rtree_t *, size_t);
extern rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc;
typedef rtree_leaf_elm_t *(rtree_leaf_alloc_t)(tsdn_t *, rtree_t *, size_t);
extern rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc;
typedef void (rtree_node_dalloc_t)(tsdn_t *, rtree_t *, rtree_node_elm_t *);
extern rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc;
typedef void (rtree_leaf_dalloc_t)(tsdn_t *, rtree_t *, rtree_leaf_elm_t *);
extern rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc;
#ifdef JEMALLOC_JET
void rtree_delete(tsdn_t *tsdn, rtree_t *rtree);
#endif
rtree_leaf_elm_t *rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree,
rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, bool init_missing);
JEMALLOC_ALWAYS_INLINE uintptr_t
rtree_leafkey(uintptr_t key) {
unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits -
rtree_levels[RTREE_HEIGHT-1].bits);
unsigned maskbits = ptrbits - cumbits;
uintptr_t mask = ~((ZU(1) << maskbits) - 1);
return (key & mask);
}
JEMALLOC_ALWAYS_INLINE size_t
rtree_cache_direct_map(uintptr_t key) {
unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits -
rtree_levels[RTREE_HEIGHT-1].bits);
unsigned maskbits = ptrbits - cumbits;
return (size_t)((key >> maskbits) & (RTREE_CTX_NCACHE - 1));
}
JEMALLOC_ALWAYS_INLINE uintptr_t
rtree_subkey(uintptr_t key, unsigned level) {
unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
unsigned cumbits = rtree_levels[level].cumbits;
unsigned shiftbits = ptrbits - cumbits;
unsigned maskbits = rtree_levels[level].bits;
uintptr_t mask = (ZU(1) << maskbits) - 1;
return ((key >> shiftbits) & mask);
}
/*
* Atomic getters.
*
* dependent: Reading a value on behalf of a pointer to a valid allocation
* is guaranteed to be a clean read even without synchronization,
* because the rtree update became visible in memory before the
* pointer came into existence.
* !dependent: An arbitrary read, e.g. on behalf of ivsalloc(), may not be
* dependent on a previous rtree write, which means a stale read
* could result if synchronization were omitted here.
*/
# ifdef RTREE_LEAF_COMPACT
JEMALLOC_ALWAYS_INLINE uintptr_t
rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
bool dependent) {
return (uintptr_t)atomic_load_p(&elm->le_bits, dependent
? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
}
JEMALLOC_ALWAYS_INLINE extent_t *
rtree_leaf_elm_bits_extent_get(uintptr_t bits) {
# ifdef __aarch64__
/*
* aarch64 doesn't sign extend the highest virtual address bit to set
* the higher ones. Instead, the high bits gets zeroed.
*/
uintptr_t high_bit_mask = ((uintptr_t)1 << LG_VADDR) - 1;
/* Mask off the slab bit. */
uintptr_t low_bit_mask = ~(uintptr_t)1;
uintptr_t mask = high_bit_mask & low_bit_mask;
return (extent_t *)(bits & mask);
# else
/* Restore sign-extended high bits, mask slab bit. */
return (extent_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB) >>
RTREE_NHIB) & ~((uintptr_t)0x1));
# endif
}
JEMALLOC_ALWAYS_INLINE szind_t
rtree_leaf_elm_bits_szind_get(uintptr_t bits) {
return (szind_t)(bits >> LG_VADDR);
}
JEMALLOC_ALWAYS_INLINE bool
rtree_leaf_elm_bits_slab_get(uintptr_t bits) {
return (bool)(bits & (uintptr_t)0x1);
}
# endif
JEMALLOC_ALWAYS_INLINE extent_t *
rtree_leaf_elm_extent_read(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree,
rtree_leaf_elm_t *elm, bool dependent) {
#ifdef RTREE_LEAF_COMPACT
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
return rtree_leaf_elm_bits_extent_get(bits);
#else
extent_t *extent = (extent_t *)atomic_load_p(&elm->le_extent, dependent
? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
return extent;
#endif
}
JEMALLOC_ALWAYS_INLINE szind_t
rtree_leaf_elm_szind_read(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree,
rtree_leaf_elm_t *elm, bool dependent) {
#ifdef RTREE_LEAF_COMPACT
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
return rtree_leaf_elm_bits_szind_get(bits);
#else
return (szind_t)atomic_load_u(&elm->le_szind, dependent ? ATOMIC_RELAXED
: ATOMIC_ACQUIRE);
#endif
}
JEMALLOC_ALWAYS_INLINE bool
rtree_leaf_elm_slab_read(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree,
rtree_leaf_elm_t *elm, bool dependent) {
#ifdef RTREE_LEAF_COMPACT
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
return rtree_leaf_elm_bits_slab_get(bits);
#else
return atomic_load_b(&elm->le_slab, dependent ? ATOMIC_RELAXED :
ATOMIC_ACQUIRE);
#endif
}
static inline void
rtree_leaf_elm_extent_write(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree,
rtree_leaf_elm_t *elm, extent_t *extent) {
#ifdef RTREE_LEAF_COMPACT
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, true);
uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) <<
LG_VADDR) | ((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1))
| ((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits));
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
#else
atomic_store_p(&elm->le_extent, extent, ATOMIC_RELEASE);
#endif
}
static inline void
rtree_leaf_elm_szind_write(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree,
rtree_leaf_elm_t *elm, szind_t szind) {
assert(szind <= NSIZES);
#ifdef RTREE_LEAF_COMPACT
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm,
true);
uintptr_t bits = ((uintptr_t)szind << LG_VADDR) |
((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) &
(((uintptr_t)0x1 << LG_VADDR) - 1)) |
((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits));
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
#else
atomic_store_u(&elm->le_szind, szind, ATOMIC_RELEASE);
#endif
}
static inline void
rtree_leaf_elm_slab_write(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree,
rtree_leaf_elm_t *elm, bool slab) {
#ifdef RTREE_LEAF_COMPACT
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm,
true);
uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) <<
LG_VADDR) | ((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) &
(((uintptr_t)0x1 << LG_VADDR) - 1)) | ((uintptr_t)slab);
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
#else
atomic_store_b(&elm->le_slab, slab, ATOMIC_RELEASE);
#endif
}
static inline void
rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
extent_t *extent, szind_t szind, bool slab) {
#ifdef RTREE_LEAF_COMPACT
uintptr_t bits = ((uintptr_t)szind << LG_VADDR) |
((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1)) |
((uintptr_t)slab);
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
#else
rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab);
rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind);
/*
* Write extent last, since the element is atomically considered valid
* as soon as the extent field is non-NULL.
*/
rtree_leaf_elm_extent_write(tsdn, rtree, elm, extent);
#endif
}
static inline void
rtree_leaf_elm_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree,
rtree_leaf_elm_t *elm, szind_t szind, bool slab) {
assert(!slab || szind < NBINS);
/*
* The caller implicitly assures that it is the only writer to the szind
* and slab fields, and that the extent field cannot currently change.
*/
rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab);
rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind);
}
JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t *
rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key, bool dependent, bool init_missing) {
assert(key != 0);
assert(!dependent || !init_missing);
size_t slot = rtree_cache_direct_map(key);
uintptr_t leafkey = rtree_leafkey(key);
assert(leafkey != RTREE_LEAFKEY_INVALID);
/* Fast path: L1 direct mapped cache. */
if (likely(rtree_ctx->cache[slot].leafkey == leafkey)) {
rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf;
assert(leaf != NULL);
uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1);
return &leaf[subkey];
}
/*
* Search the L2 LRU cache. On hit, swap the matching element into the
* slot in L1 cache, and move the position in L2 up by 1.
*/
#define RTREE_CACHE_CHECK_L2(i) do { \
if (likely(rtree_ctx->l2_cache[i].leafkey == leafkey)) { \
rtree_leaf_elm_t *leaf = rtree_ctx->l2_cache[i].leaf; \
assert(leaf != NULL); \
if (i > 0) { \
/* Bubble up by one. */ \
rtree_ctx->l2_cache[i].leafkey = \
rtree_ctx->l2_cache[i - 1].leafkey; \
rtree_ctx->l2_cache[i].leaf = \
rtree_ctx->l2_cache[i - 1].leaf; \
rtree_ctx->l2_cache[i - 1].leafkey = \
rtree_ctx->cache[slot].leafkey; \
rtree_ctx->l2_cache[i - 1].leaf = \
rtree_ctx->cache[slot].leaf; \
} else { \
rtree_ctx->l2_cache[0].leafkey = \
rtree_ctx->cache[slot].leafkey; \
rtree_ctx->l2_cache[0].leaf = \
rtree_ctx->cache[slot].leaf; \
} \
rtree_ctx->cache[slot].leafkey = leafkey; \
rtree_ctx->cache[slot].leaf = leaf; \
uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1); \
return &leaf[subkey]; \
} \
} while (0)
/* Check the first cache entry. */
RTREE_CACHE_CHECK_L2(0);
/* Search the remaining cache elements. */
for (unsigned i = 1; i < RTREE_CTX_NCACHE_L2; i++) {
RTREE_CACHE_CHECK_L2(i);
}
#undef RTREE_CACHE_CHECK_L2
return rtree_leaf_elm_lookup_hard(tsdn, rtree, rtree_ctx, key,
dependent, init_missing);
}
static inline bool
rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
extent_t *extent, szind_t szind, bool slab) {
/* Use rtree_clear() to set the extent to NULL. */
assert(extent != NULL);
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
key, false, true);
if (elm == NULL) {
return true;
}
assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) == NULL);
rtree_leaf_elm_write(tsdn, rtree, elm, extent, szind, slab);
return false;
}
JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t *
rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
bool dependent) {
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
key, dependent, false);
if (!dependent && elm == NULL) {
return NULL;
}
assert(elm != NULL);
return elm;
}
JEMALLOC_ALWAYS_INLINE extent_t *
rtree_extent_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key, bool dependent) {
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
dependent);
if (!dependent && elm == NULL) {
return NULL;
}
return rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent);
}
JEMALLOC_ALWAYS_INLINE szind_t
rtree_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key, bool dependent) {
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
dependent);
if (!dependent && elm == NULL) {
return NSIZES;
}
return rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
}
/*
* rtree_slab_read() is intentionally omitted because slab is always read in
* conjunction with szind, which makes rtree_szind_slab_read() a better choice.
*/
JEMALLOC_ALWAYS_INLINE bool
rtree_extent_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key, bool dependent, extent_t **r_extent, szind_t *r_szind) {
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
dependent);
if (!dependent && elm == NULL) {
return true;
}
*r_extent = rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent);
*r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
return false;
}
JEMALLOC_ALWAYS_INLINE bool
rtree_szind_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key, bool dependent, szind_t *r_szind, bool *r_slab) {
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
dependent);
if (!dependent && elm == NULL) {
return true;
}
#ifdef RTREE_LEAF_COMPACT
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
*r_szind = rtree_leaf_elm_bits_szind_get(bits);
*r_slab = rtree_leaf_elm_bits_slab_get(bits);
#else
*r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
*r_slab = rtree_leaf_elm_slab_read(tsdn, rtree, elm, dependent);
#endif
return false;
}
static inline void
rtree_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key, szind_t szind, bool slab) {
assert(!slab || szind < NBINS);
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true);
rtree_leaf_elm_szind_slab_update(tsdn, rtree, elm, szind, slab);
}
static inline void
rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key) {
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true);
assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) !=
NULL);
rtree_leaf_elm_write(tsdn, rtree, elm, NULL, NSIZES, false);
}
#endif /* JEMALLOC_INTERNAL_RTREE_H */
#ifndef JEMALLOC_INTERNAL_RTREE_CTX_H
#define JEMALLOC_INTERNAL_RTREE_CTX_H
/*
* Number of leafkey/leaf pairs to cache in L1 and L2 level respectively. Each
* entry supports an entire leaf, so the cache hit rate is typically high even
* with a small number of entries. In rare cases extent activity will straddle
* the boundary between two leaf nodes. Furthermore, an arena may use a
* combination of dss and mmap. Note that as memory usage grows past the amount
* that this cache can directly cover, the cache will become less effective if
* locality of reference is low, but the consequence is merely cache misses
* while traversing the tree nodes.
*
* The L1 direct mapped cache offers consistent and low cost on cache hit.
* However collision could affect hit rate negatively. This is resolved by
* combining with a L2 LRU cache, which requires linear search and re-ordering
* on access but suffers no collision. Note that, the cache will itself suffer
* cache misses if made overly large, plus the cost of linear search in the LRU
* cache.
*/
#define RTREE_CTX_LG_NCACHE 4
#define RTREE_CTX_NCACHE (1 << RTREE_CTX_LG_NCACHE)
#define RTREE_CTX_NCACHE_L2 8
/*
* Zero initializer required for tsd initialization only. Proper initialization
* done via rtree_ctx_data_init().
*/
#define RTREE_CTX_ZERO_INITIALIZER {{{0}}, {{0}}}
typedef struct rtree_leaf_elm_s rtree_leaf_elm_t;
typedef struct rtree_ctx_cache_elm_s rtree_ctx_cache_elm_t;
struct rtree_ctx_cache_elm_s {
uintptr_t leafkey;
rtree_leaf_elm_t *leaf;
};
typedef struct rtree_ctx_s rtree_ctx_t;
struct rtree_ctx_s {
/* Direct mapped cache. */
rtree_ctx_cache_elm_t cache[RTREE_CTX_NCACHE];
/* L2 LRU cache. */
rtree_ctx_cache_elm_t l2_cache[RTREE_CTX_NCACHE_L2];
};
void rtree_ctx_data_init(rtree_ctx_t *ctx);
#endif /* JEMALLOC_INTERNAL_RTREE_CTX_H */
#!/bin/sh
#
# Usage: size_classes.sh <lg_qarr> <lg_tmin> <lg_parr> <lg_g>
# The following limits are chosen such that they cover all supported platforms.
# Pointer sizes.
lg_zarr="2 3"
# Quanta.
lg_qarr=$1
# The range of tiny size classes is [2^lg_tmin..2^(lg_q-1)].
lg_tmin=$2
# Maximum lookup size.
lg_kmax=12
# Page sizes.
lg_parr=`echo $3 | tr ',' ' '`
# Size class group size (number of size classes for each size doubling).
lg_g=$4
pow2() {
e=$1
pow2_result=1
while [ ${e} -gt 0 ] ; do
pow2_result=$((${pow2_result} + ${pow2_result}))
e=$((${e} - 1))
done
}
lg() {
x=$1
lg_result=0
while [ ${x} -gt 1 ] ; do
lg_result=$((${lg_result} + 1))
x=$((${x} / 2))
done
}
lg_ceil() {
y=$1
lg ${y}; lg_floor=${lg_result}
pow2 ${lg_floor}; pow2_floor=${pow2_result}
if [ ${pow2_floor} -lt ${y} ] ; then
lg_ceil_result=$((${lg_floor} + 1))
else
lg_ceil_result=${lg_floor}
fi
}
reg_size_compute() {
lg_grp=$1
lg_delta=$2
ndelta=$3
pow2 ${lg_grp}; grp=${pow2_result}
pow2 ${lg_delta}; delta=${pow2_result}
reg_size=$((${grp} + ${delta}*${ndelta}))
}
slab_size() {
lg_p=$1
lg_grp=$2
lg_delta=$3
ndelta=$4
pow2 ${lg_p}; p=${pow2_result}
reg_size_compute ${lg_grp} ${lg_delta} ${ndelta}
# Compute smallest slab size that is an integer multiple of reg_size.
try_slab_size=${p}
try_nregs=$((${try_slab_size} / ${reg_size}))
perfect=0
while [ ${perfect} -eq 0 ] ; do
perfect_slab_size=${try_slab_size}
perfect_nregs=${try_nregs}
try_slab_size=$((${try_slab_size} + ${p}))
try_nregs=$((${try_slab_size} / ${reg_size}))
if [ ${perfect_slab_size} -eq $((${perfect_nregs} * ${reg_size})) ] ; then
perfect=1
fi
done
slab_size_pgs=$((${perfect_slab_size} / ${p}))
}
size_class() {
index=$1
lg_grp=$2
lg_delta=$3
ndelta=$4
lg_p=$5
lg_kmax=$6
if [ ${lg_delta} -ge ${lg_p} ] ; then
psz="yes"
else
pow2 ${lg_p}; p=${pow2_result}
pow2 ${lg_grp}; grp=${pow2_result}
pow2 ${lg_delta}; delta=${pow2_result}
sz=$((${grp} + ${delta} * ${ndelta}))
npgs=$((${sz} / ${p}))
if [ ${sz} -eq $((${npgs} * ${p})) ] ; then
psz="yes"
else
psz="no"
fi
fi
lg ${ndelta}; lg_ndelta=${lg_result}; pow2 ${lg_ndelta}
if [ ${pow2_result} -lt ${ndelta} ] ; then
rem="yes"
else
rem="no"
fi
lg_size=${lg_grp}
if [ $((${lg_delta} + ${lg_ndelta})) -eq ${lg_grp} ] ; then
lg_size=$((${lg_grp} + 1))
else
lg_size=${lg_grp}
rem="yes"
fi
if [ ${lg_size} -lt $((${lg_p} + ${lg_g})) ] ; then
bin="yes"
slab_size ${lg_p} ${lg_grp} ${lg_delta} ${ndelta}; pgs=${slab_size_pgs}
else
bin="no"
pgs=0
fi
if [ ${lg_size} -lt ${lg_kmax} \
-o ${lg_size} -eq ${lg_kmax} -a ${rem} = "no" ] ; then
lg_delta_lookup=${lg_delta}
else
lg_delta_lookup="no"
fi
printf ' SC(%3d, %6d, %8d, %6d, %3s, %3s, %3d, %2s) \\\n' ${index} ${lg_grp} ${lg_delta} ${ndelta} ${psz} ${bin} ${pgs} ${lg_delta_lookup}
# Defined upon return:
# - psz ("yes" or "no")
# - bin ("yes" or "no")
# - pgs
# - lg_delta_lookup (${lg_delta} or "no")
}
sep_line() {
echo " \\"
}
size_classes() {
lg_z=$1
lg_q=$2
lg_t=$3
lg_p=$4
lg_g=$5
pow2 $((${lg_z} + 3)); ptr_bits=${pow2_result}
pow2 ${lg_g}; g=${pow2_result}
echo "#define SIZE_CLASSES \\"
echo " /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \\"
ntbins=0
nlbins=0
lg_tiny_maxclass='"NA"'
nbins=0
npsizes=0
# Tiny size classes.
ndelta=0
index=0
lg_grp=${lg_t}
lg_delta=${lg_grp}
while [ ${lg_grp} -lt ${lg_q} ] ; do
size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax}
if [ ${lg_delta_lookup} != "no" ] ; then
nlbins=$((${index} + 1))
fi
if [ ${psz} = "yes" ] ; then
npsizes=$((${npsizes} + 1))
fi
if [ ${bin} != "no" ] ; then
nbins=$((${index} + 1))
fi
ntbins=$((${ntbins} + 1))
lg_tiny_maxclass=${lg_grp} # Final written value is correct.
index=$((${index} + 1))
lg_delta=${lg_grp}
lg_grp=$((${lg_grp} + 1))
done
# First non-tiny group.
if [ ${ntbins} -gt 0 ] ; then
sep_line
# The first size class has an unusual encoding, because the size has to be
# split between grp and delta*ndelta.
lg_grp=$((${lg_grp} - 1))
ndelta=1
size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax}
index=$((${index} + 1))
lg_grp=$((${lg_grp} + 1))
lg_delta=$((${lg_delta} + 1))
if [ ${psz} = "yes" ] ; then
npsizes=$((${npsizes} + 1))
fi
fi
while [ ${ndelta} -lt ${g} ] ; do
size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax}
index=$((${index} + 1))
ndelta=$((${ndelta} + 1))
if [ ${psz} = "yes" ] ; then
npsizes=$((${npsizes} + 1))
fi
done
# All remaining groups.
lg_grp=$((${lg_grp} + ${lg_g}))
while [ ${lg_grp} -lt $((${ptr_bits} - 1)) ] ; do
sep_line
ndelta=1
if [ ${lg_grp} -eq $((${ptr_bits} - 2)) ] ; then
ndelta_limit=$((${g} - 1))
else
ndelta_limit=${g}
fi
while [ ${ndelta} -le ${ndelta_limit} ] ; do
size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax}
if [ ${lg_delta_lookup} != "no" ] ; then
nlbins=$((${index} + 1))
# Final written value is correct:
lookup_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))"
fi
if [ ${psz} = "yes" ] ; then
npsizes=$((${npsizes} + 1))
fi
if [ ${bin} != "no" ] ; then
nbins=$((${index} + 1))
# Final written value is correct:
small_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))"
if [ ${lg_g} -gt 0 ] ; then
lg_large_minclass=$((${lg_grp} + 1))
else
lg_large_minclass=$((${lg_grp} + 2))
fi
fi
# Final written value is correct:
large_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))"
index=$((${index} + 1))
ndelta=$((${ndelta} + 1))
done
lg_grp=$((${lg_grp} + 1))
lg_delta=$((${lg_delta} + 1))
done
echo
nsizes=${index}
lg_ceil ${nsizes}; lg_ceil_nsizes=${lg_ceil_result}
# Defined upon completion:
# - ntbins
# - nlbins
# - nbins
# - nsizes
# - lg_ceil_nsizes
# - npsizes
# - lg_tiny_maxclass
# - lookup_maxclass
# - small_maxclass
# - lg_large_minclass
# - large_maxclass
}
cat <<EOF
#ifndef JEMALLOC_INTERNAL_SIZE_CLASSES_H
#define JEMALLOC_INTERNAL_SIZE_CLASSES_H
/* This file was automatically generated by size_classes.sh. */
#include "jemalloc/internal/jemalloc_internal_types.h"
/*
* This header file defines:
*
* LG_SIZE_CLASS_GROUP: Lg of size class count for each size doubling.
* LG_TINY_MIN: Lg of minimum size class to support.
* SIZE_CLASSES: Complete table of SC(index, lg_grp, lg_delta, ndelta, psz,
* bin, pgs, lg_delta_lookup) tuples.
* index: Size class index.
* lg_grp: Lg group base size (no deltas added).
* lg_delta: Lg delta to previous size class.
* ndelta: Delta multiplier. size == 1<<lg_grp + ndelta<<lg_delta
* psz: 'yes' if a multiple of the page size, 'no' otherwise.
* bin: 'yes' if a small bin size class, 'no' otherwise.
* pgs: Slab page count if a small bin size class, 0 otherwise.
* lg_delta_lookup: Same as lg_delta if a lookup table size class, 'no'
* otherwise.
* NTBINS: Number of tiny bins.
* NLBINS: Number of bins supported by the lookup table.
* NBINS: Number of small size class bins.
* NSIZES: Number of size classes.
* LG_CEIL_NSIZES: Number of bits required to store NSIZES.
* NPSIZES: Number of size classes that are a multiple of (1U << LG_PAGE).
* LG_TINY_MAXCLASS: Lg of maximum tiny size class.
* LOOKUP_MAXCLASS: Maximum size class included in lookup table.
* SMALL_MAXCLASS: Maximum small size class.
* LG_LARGE_MINCLASS: Lg of minimum large size class.
* LARGE_MAXCLASS: Maximum (large) size class.
*/
#define LG_SIZE_CLASS_GROUP ${lg_g}
#define LG_TINY_MIN ${lg_tmin}
EOF
for lg_z in ${lg_zarr} ; do
for lg_q in ${lg_qarr} ; do
lg_t=${lg_tmin}
while [ ${lg_t} -le ${lg_q} ] ; do
# Iterate through page sizes and compute how many bins there are.
for lg_p in ${lg_parr} ; do
echo "#if (LG_SIZEOF_PTR == ${lg_z} && LG_TINY_MIN == ${lg_t} && LG_QUANTUM == ${lg_q} && LG_PAGE == ${lg_p})"
size_classes ${lg_z} ${lg_q} ${lg_t} ${lg_p} ${lg_g}
echo "#define SIZE_CLASSES_DEFINED"
echo "#define NTBINS ${ntbins}"
echo "#define NLBINS ${nlbins}"
echo "#define NBINS ${nbins}"
echo "#define NSIZES ${nsizes}"
echo "#define LG_CEIL_NSIZES ${lg_ceil_nsizes}"
echo "#define NPSIZES ${npsizes}"
echo "#define LG_TINY_MAXCLASS ${lg_tiny_maxclass}"
echo "#define LOOKUP_MAXCLASS ${lookup_maxclass}"
echo "#define SMALL_MAXCLASS ${small_maxclass}"
echo "#define LG_LARGE_MINCLASS ${lg_large_minclass}"
echo "#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)"
echo "#define LARGE_MAXCLASS ${large_maxclass}"
echo "#endif"
echo
done
lg_t=$((${lg_t} + 1))
done
done
done
cat <<EOF
#ifndef SIZE_CLASSES_DEFINED
# error "No size class definitions match configuration"
#endif
#undef SIZE_CLASSES_DEFINED
/*
* The size2index_tab lookup table uses uint8_t to encode each bin index, so we
* cannot support more than 256 small size classes.
*/
#if (NBINS > 256)
# error "Too many small size classes"
#endif
#endif /* JEMALLOC_INTERNAL_SIZE_CLASSES_H */
EOF
#ifndef JEMALLOC_INTERNAL_SMOOTHSTEP_H
#define JEMALLOC_INTERNAL_SMOOTHSTEP_H
/*
* This file was generated by the following command:
* sh smoothstep.sh smoother 200 24 3 15
*/
/******************************************************************************/
/*
* This header defines a precomputed table based on the smoothstep family of
* sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0
* to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so
* that floating point math can be avoided.
*
* 3 2
* smoothstep(x) = -2x + 3x
*
* 5 4 3
* smootherstep(x) = 6x - 15x + 10x
*
* 7 6 5 4
* smootheststep(x) = -20x + 70x - 84x + 35x
*/
#define SMOOTHSTEP_VARIANT "smoother"
#define SMOOTHSTEP_NSTEPS 200
#define SMOOTHSTEP_BFP 24
#define SMOOTHSTEP \
/* STEP(step, h, x, y) */ \
STEP( 1, UINT64_C(0x0000000000000014), 0.005, 0.000001240643750) \
STEP( 2, UINT64_C(0x00000000000000a5), 0.010, 0.000009850600000) \
STEP( 3, UINT64_C(0x0000000000000229), 0.015, 0.000032995181250) \
STEP( 4, UINT64_C(0x0000000000000516), 0.020, 0.000077619200000) \
STEP( 5, UINT64_C(0x00000000000009dc), 0.025, 0.000150449218750) \
STEP( 6, UINT64_C(0x00000000000010e8), 0.030, 0.000257995800000) \
STEP( 7, UINT64_C(0x0000000000001aa4), 0.035, 0.000406555756250) \
STEP( 8, UINT64_C(0x0000000000002777), 0.040, 0.000602214400000) \
STEP( 9, UINT64_C(0x00000000000037c2), 0.045, 0.000850847793750) \
STEP( 10, UINT64_C(0x0000000000004be6), 0.050, 0.001158125000000) \
STEP( 11, UINT64_C(0x000000000000643c), 0.055, 0.001529510331250) \
STEP( 12, UINT64_C(0x000000000000811f), 0.060, 0.001970265600000) \
STEP( 13, UINT64_C(0x000000000000a2e2), 0.065, 0.002485452368750) \
STEP( 14, UINT64_C(0x000000000000c9d8), 0.070, 0.003079934200000) \
STEP( 15, UINT64_C(0x000000000000f64f), 0.075, 0.003758378906250) \
STEP( 16, UINT64_C(0x0000000000012891), 0.080, 0.004525260800000) \
STEP( 17, UINT64_C(0x00000000000160e7), 0.085, 0.005384862943750) \
STEP( 18, UINT64_C(0x0000000000019f95), 0.090, 0.006341279400000) \
STEP( 19, UINT64_C(0x000000000001e4dc), 0.095, 0.007398417481250) \
STEP( 20, UINT64_C(0x00000000000230fc), 0.100, 0.008560000000000) \
STEP( 21, UINT64_C(0x0000000000028430), 0.105, 0.009829567518750) \
STEP( 22, UINT64_C(0x000000000002deb0), 0.110, 0.011210480600000) \
STEP( 23, UINT64_C(0x00000000000340b1), 0.115, 0.012705922056250) \
STEP( 24, UINT64_C(0x000000000003aa67), 0.120, 0.014318899200000) \
STEP( 25, UINT64_C(0x0000000000041c00), 0.125, 0.016052246093750) \
STEP( 26, UINT64_C(0x00000000000495a8), 0.130, 0.017908625800000) \
STEP( 27, UINT64_C(0x000000000005178b), 0.135, 0.019890532631250) \
STEP( 28, UINT64_C(0x000000000005a1cf), 0.140, 0.022000294400000) \
STEP( 29, UINT64_C(0x0000000000063498), 0.145, 0.024240074668750) \
STEP( 30, UINT64_C(0x000000000006d009), 0.150, 0.026611875000000) \
STEP( 31, UINT64_C(0x000000000007743f), 0.155, 0.029117537206250) \
STEP( 32, UINT64_C(0x0000000000082157), 0.160, 0.031758745600000) \
STEP( 33, UINT64_C(0x000000000008d76b), 0.165, 0.034537029243750) \
STEP( 34, UINT64_C(0x0000000000099691), 0.170, 0.037453764200000) \
STEP( 35, UINT64_C(0x00000000000a5edf), 0.175, 0.040510175781250) \
STEP( 36, UINT64_C(0x00000000000b3067), 0.180, 0.043707340800000) \
STEP( 37, UINT64_C(0x00000000000c0b38), 0.185, 0.047046189818750) \
STEP( 38, UINT64_C(0x00000000000cef5e), 0.190, 0.050527509400000) \
STEP( 39, UINT64_C(0x00000000000ddce6), 0.195, 0.054151944356250) \
STEP( 40, UINT64_C(0x00000000000ed3d8), 0.200, 0.057920000000000) \
STEP( 41, UINT64_C(0x00000000000fd439), 0.205, 0.061832044393750) \
STEP( 42, UINT64_C(0x000000000010de0e), 0.210, 0.065888310600000) \
STEP( 43, UINT64_C(0x000000000011f158), 0.215, 0.070088898931250) \
STEP( 44, UINT64_C(0x0000000000130e17), 0.220, 0.074433779200000) \
STEP( 45, UINT64_C(0x0000000000143448), 0.225, 0.078922792968750) \
STEP( 46, UINT64_C(0x00000000001563e7), 0.230, 0.083555655800000) \
STEP( 47, UINT64_C(0x0000000000169cec), 0.235, 0.088331959506250) \
STEP( 48, UINT64_C(0x000000000017df4f), 0.240, 0.093251174400000) \
STEP( 49, UINT64_C(0x0000000000192b04), 0.245, 0.098312651543750) \
STEP( 50, UINT64_C(0x00000000001a8000), 0.250, 0.103515625000000) \
STEP( 51, UINT64_C(0x00000000001bde32), 0.255, 0.108859214081250) \
STEP( 52, UINT64_C(0x00000000001d458b), 0.260, 0.114342425600000) \
STEP( 53, UINT64_C(0x00000000001eb5f8), 0.265, 0.119964156118750) \
STEP( 54, UINT64_C(0x0000000000202f65), 0.270, 0.125723194200000) \
STEP( 55, UINT64_C(0x000000000021b1bb), 0.275, 0.131618222656250) \
STEP( 56, UINT64_C(0x0000000000233ce3), 0.280, 0.137647820800000) \
STEP( 57, UINT64_C(0x000000000024d0c3), 0.285, 0.143810466693750) \
STEP( 58, UINT64_C(0x0000000000266d40), 0.290, 0.150104539400000) \
STEP( 59, UINT64_C(0x000000000028123d), 0.295, 0.156528321231250) \
STEP( 60, UINT64_C(0x000000000029bf9c), 0.300, 0.163080000000000) \
STEP( 61, UINT64_C(0x00000000002b753d), 0.305, 0.169757671268750) \
STEP( 62, UINT64_C(0x00000000002d32fe), 0.310, 0.176559340600000) \
STEP( 63, UINT64_C(0x00000000002ef8bc), 0.315, 0.183482925806250) \
STEP( 64, UINT64_C(0x000000000030c654), 0.320, 0.190526259200000) \
STEP( 65, UINT64_C(0x0000000000329b9f), 0.325, 0.197687089843750) \
STEP( 66, UINT64_C(0x0000000000347875), 0.330, 0.204963085800000) \
STEP( 67, UINT64_C(0x0000000000365cb0), 0.335, 0.212351836381250) \
STEP( 68, UINT64_C(0x0000000000384825), 0.340, 0.219850854400000) \
STEP( 69, UINT64_C(0x00000000003a3aa8), 0.345, 0.227457578418750) \
STEP( 70, UINT64_C(0x00000000003c340f), 0.350, 0.235169375000000) \
STEP( 71, UINT64_C(0x00000000003e342b), 0.355, 0.242983540956250) \
STEP( 72, UINT64_C(0x0000000000403ace), 0.360, 0.250897305600000) \
STEP( 73, UINT64_C(0x00000000004247c8), 0.365, 0.258907832993750) \
STEP( 74, UINT64_C(0x0000000000445ae9), 0.370, 0.267012224200000) \
STEP( 75, UINT64_C(0x0000000000467400), 0.375, 0.275207519531250) \
STEP( 76, UINT64_C(0x00000000004892d8), 0.380, 0.283490700800000) \
STEP( 77, UINT64_C(0x00000000004ab740), 0.385, 0.291858693568750) \
STEP( 78, UINT64_C(0x00000000004ce102), 0.390, 0.300308369400000) \
STEP( 79, UINT64_C(0x00000000004f0fe9), 0.395, 0.308836548106250) \
STEP( 80, UINT64_C(0x00000000005143bf), 0.400, 0.317440000000000) \
STEP( 81, UINT64_C(0x0000000000537c4d), 0.405, 0.326115448143750) \
STEP( 82, UINT64_C(0x000000000055b95b), 0.410, 0.334859570600000) \
STEP( 83, UINT64_C(0x000000000057fab1), 0.415, 0.343669002681250) \
STEP( 84, UINT64_C(0x00000000005a4015), 0.420, 0.352540339200000) \
STEP( 85, UINT64_C(0x00000000005c894e), 0.425, 0.361470136718750) \
STEP( 86, UINT64_C(0x00000000005ed622), 0.430, 0.370454915800000) \
STEP( 87, UINT64_C(0x0000000000612655), 0.435, 0.379491163256250) \
STEP( 88, UINT64_C(0x00000000006379ac), 0.440, 0.388575334400000) \
STEP( 89, UINT64_C(0x000000000065cfeb), 0.445, 0.397703855293750) \
STEP( 90, UINT64_C(0x00000000006828d6), 0.450, 0.406873125000000) \
STEP( 91, UINT64_C(0x00000000006a842f), 0.455, 0.416079517831250) \
STEP( 92, UINT64_C(0x00000000006ce1bb), 0.460, 0.425319385600000) \
STEP( 93, UINT64_C(0x00000000006f413a), 0.465, 0.434589059868750) \
STEP( 94, UINT64_C(0x000000000071a270), 0.470, 0.443884854200000) \
STEP( 95, UINT64_C(0x000000000074051d), 0.475, 0.453203066406250) \
STEP( 96, UINT64_C(0x0000000000766905), 0.480, 0.462539980800000) \
STEP( 97, UINT64_C(0x000000000078cde7), 0.485, 0.471891870443750) \
STEP( 98, UINT64_C(0x00000000007b3387), 0.490, 0.481254999400000) \
STEP( 99, UINT64_C(0x00000000007d99a4), 0.495, 0.490625624981250) \
STEP( 100, UINT64_C(0x0000000000800000), 0.500, 0.500000000000000) \
STEP( 101, UINT64_C(0x000000000082665b), 0.505, 0.509374375018750) \
STEP( 102, UINT64_C(0x000000000084cc78), 0.510, 0.518745000600000) \
STEP( 103, UINT64_C(0x0000000000873218), 0.515, 0.528108129556250) \
STEP( 104, UINT64_C(0x00000000008996fa), 0.520, 0.537460019200000) \
STEP( 105, UINT64_C(0x00000000008bfae2), 0.525, 0.546796933593750) \
STEP( 106, UINT64_C(0x00000000008e5d8f), 0.530, 0.556115145800000) \
STEP( 107, UINT64_C(0x000000000090bec5), 0.535, 0.565410940131250) \
STEP( 108, UINT64_C(0x0000000000931e44), 0.540, 0.574680614400000) \
STEP( 109, UINT64_C(0x0000000000957bd0), 0.545, 0.583920482168750) \
STEP( 110, UINT64_C(0x000000000097d729), 0.550, 0.593126875000000) \
STEP( 111, UINT64_C(0x00000000009a3014), 0.555, 0.602296144706250) \
STEP( 112, UINT64_C(0x00000000009c8653), 0.560, 0.611424665600000) \
STEP( 113, UINT64_C(0x00000000009ed9aa), 0.565, 0.620508836743750) \
STEP( 114, UINT64_C(0x0000000000a129dd), 0.570, 0.629545084200000) \
STEP( 115, UINT64_C(0x0000000000a376b1), 0.575, 0.638529863281250) \
STEP( 116, UINT64_C(0x0000000000a5bfea), 0.580, 0.647459660800000) \
STEP( 117, UINT64_C(0x0000000000a8054e), 0.585, 0.656330997318750) \
STEP( 118, UINT64_C(0x0000000000aa46a4), 0.590, 0.665140429400000) \
STEP( 119, UINT64_C(0x0000000000ac83b2), 0.595, 0.673884551856250) \
STEP( 120, UINT64_C(0x0000000000aebc40), 0.600, 0.682560000000000) \
STEP( 121, UINT64_C(0x0000000000b0f016), 0.605, 0.691163451893750) \
STEP( 122, UINT64_C(0x0000000000b31efd), 0.610, 0.699691630600000) \
STEP( 123, UINT64_C(0x0000000000b548bf), 0.615, 0.708141306431250) \
STEP( 124, UINT64_C(0x0000000000b76d27), 0.620, 0.716509299200000) \
STEP( 125, UINT64_C(0x0000000000b98c00), 0.625, 0.724792480468750) \
STEP( 126, UINT64_C(0x0000000000bba516), 0.630, 0.732987775800000) \
STEP( 127, UINT64_C(0x0000000000bdb837), 0.635, 0.741092167006250) \
STEP( 128, UINT64_C(0x0000000000bfc531), 0.640, 0.749102694400000) \
STEP( 129, UINT64_C(0x0000000000c1cbd4), 0.645, 0.757016459043750) \
STEP( 130, UINT64_C(0x0000000000c3cbf0), 0.650, 0.764830625000000) \
STEP( 131, UINT64_C(0x0000000000c5c557), 0.655, 0.772542421581250) \
STEP( 132, UINT64_C(0x0000000000c7b7da), 0.660, 0.780149145600000) \
STEP( 133, UINT64_C(0x0000000000c9a34f), 0.665, 0.787648163618750) \
STEP( 134, UINT64_C(0x0000000000cb878a), 0.670, 0.795036914200000) \
STEP( 135, UINT64_C(0x0000000000cd6460), 0.675, 0.802312910156250) \
STEP( 136, UINT64_C(0x0000000000cf39ab), 0.680, 0.809473740800000) \
STEP( 137, UINT64_C(0x0000000000d10743), 0.685, 0.816517074193750) \
STEP( 138, UINT64_C(0x0000000000d2cd01), 0.690, 0.823440659400000) \
STEP( 139, UINT64_C(0x0000000000d48ac2), 0.695, 0.830242328731250) \
STEP( 140, UINT64_C(0x0000000000d64063), 0.700, 0.836920000000000) \
STEP( 141, UINT64_C(0x0000000000d7edc2), 0.705, 0.843471678768750) \
STEP( 142, UINT64_C(0x0000000000d992bf), 0.710, 0.849895460600000) \
STEP( 143, UINT64_C(0x0000000000db2f3c), 0.715, 0.856189533306250) \
STEP( 144, UINT64_C(0x0000000000dcc31c), 0.720, 0.862352179200000) \
STEP( 145, UINT64_C(0x0000000000de4e44), 0.725, 0.868381777343750) \
STEP( 146, UINT64_C(0x0000000000dfd09a), 0.730, 0.874276805800000) \
STEP( 147, UINT64_C(0x0000000000e14a07), 0.735, 0.880035843881250) \
STEP( 148, UINT64_C(0x0000000000e2ba74), 0.740, 0.885657574400000) \
STEP( 149, UINT64_C(0x0000000000e421cd), 0.745, 0.891140785918750) \
STEP( 150, UINT64_C(0x0000000000e58000), 0.750, 0.896484375000000) \
STEP( 151, UINT64_C(0x0000000000e6d4fb), 0.755, 0.901687348456250) \
STEP( 152, UINT64_C(0x0000000000e820b0), 0.760, 0.906748825600000) \
STEP( 153, UINT64_C(0x0000000000e96313), 0.765, 0.911668040493750) \
STEP( 154, UINT64_C(0x0000000000ea9c18), 0.770, 0.916444344200000) \
STEP( 155, UINT64_C(0x0000000000ebcbb7), 0.775, 0.921077207031250) \
STEP( 156, UINT64_C(0x0000000000ecf1e8), 0.780, 0.925566220800000) \
STEP( 157, UINT64_C(0x0000000000ee0ea7), 0.785, 0.929911101068750) \
STEP( 158, UINT64_C(0x0000000000ef21f1), 0.790, 0.934111689400000) \
STEP( 159, UINT64_C(0x0000000000f02bc6), 0.795, 0.938167955606250) \
STEP( 160, UINT64_C(0x0000000000f12c27), 0.800, 0.942080000000000) \
STEP( 161, UINT64_C(0x0000000000f22319), 0.805, 0.945848055643750) \
STEP( 162, UINT64_C(0x0000000000f310a1), 0.810, 0.949472490600000) \
STEP( 163, UINT64_C(0x0000000000f3f4c7), 0.815, 0.952953810181250) \
STEP( 164, UINT64_C(0x0000000000f4cf98), 0.820, 0.956292659200000) \
STEP( 165, UINT64_C(0x0000000000f5a120), 0.825, 0.959489824218750) \
STEP( 166, UINT64_C(0x0000000000f6696e), 0.830, 0.962546235800000) \
STEP( 167, UINT64_C(0x0000000000f72894), 0.835, 0.965462970756250) \
STEP( 168, UINT64_C(0x0000000000f7dea8), 0.840, 0.968241254400000) \
STEP( 169, UINT64_C(0x0000000000f88bc0), 0.845, 0.970882462793750) \
STEP( 170, UINT64_C(0x0000000000f92ff6), 0.850, 0.973388125000000) \
STEP( 171, UINT64_C(0x0000000000f9cb67), 0.855, 0.975759925331250) \
STEP( 172, UINT64_C(0x0000000000fa5e30), 0.860, 0.977999705600000) \
STEP( 173, UINT64_C(0x0000000000fae874), 0.865, 0.980109467368750) \
STEP( 174, UINT64_C(0x0000000000fb6a57), 0.870, 0.982091374200000) \
STEP( 175, UINT64_C(0x0000000000fbe400), 0.875, 0.983947753906250) \
STEP( 176, UINT64_C(0x0000000000fc5598), 0.880, 0.985681100800000) \
STEP( 177, UINT64_C(0x0000000000fcbf4e), 0.885, 0.987294077943750) \
STEP( 178, UINT64_C(0x0000000000fd214f), 0.890, 0.988789519400000) \
STEP( 179, UINT64_C(0x0000000000fd7bcf), 0.895, 0.990170432481250) \
STEP( 180, UINT64_C(0x0000000000fdcf03), 0.900, 0.991440000000000) \
STEP( 181, UINT64_C(0x0000000000fe1b23), 0.905, 0.992601582518750) \
STEP( 182, UINT64_C(0x0000000000fe606a), 0.910, 0.993658720600000) \
STEP( 183, UINT64_C(0x0000000000fe9f18), 0.915, 0.994615137056250) \
STEP( 184, UINT64_C(0x0000000000fed76e), 0.920, 0.995474739200000) \
STEP( 185, UINT64_C(0x0000000000ff09b0), 0.925, 0.996241621093750) \
STEP( 186, UINT64_C(0x0000000000ff3627), 0.930, 0.996920065800000) \
STEP( 187, UINT64_C(0x0000000000ff5d1d), 0.935, 0.997514547631250) \
STEP( 188, UINT64_C(0x0000000000ff7ee0), 0.940, 0.998029734400000) \
STEP( 189, UINT64_C(0x0000000000ff9bc3), 0.945, 0.998470489668750) \
STEP( 190, UINT64_C(0x0000000000ffb419), 0.950, 0.998841875000000) \
STEP( 191, UINT64_C(0x0000000000ffc83d), 0.955, 0.999149152206250) \
STEP( 192, UINT64_C(0x0000000000ffd888), 0.960, 0.999397785600000) \
STEP( 193, UINT64_C(0x0000000000ffe55b), 0.965, 0.999593444243750) \
STEP( 194, UINT64_C(0x0000000000ffef17), 0.970, 0.999742004200000) \
STEP( 195, UINT64_C(0x0000000000fff623), 0.975, 0.999849550781250) \
STEP( 196, UINT64_C(0x0000000000fffae9), 0.980, 0.999922380800000) \
STEP( 197, UINT64_C(0x0000000000fffdd6), 0.985, 0.999967004818750) \
STEP( 198, UINT64_C(0x0000000000ffff5a), 0.990, 0.999990149400000) \
STEP( 199, UINT64_C(0x0000000000ffffeb), 0.995, 0.999998759356250) \
STEP( 200, UINT64_C(0x0000000001000000), 1.000, 1.000000000000000) \
#endif /* JEMALLOC_INTERNAL_SMOOTHSTEP_H */
#!/bin/sh
#
# Generate a discrete lookup table for a sigmoid function in the smoothstep
# family (https://en.wikipedia.org/wiki/Smoothstep), where the lookup table
# entries correspond to x in [1/nsteps, 2/nsteps, ..., nsteps/nsteps]. Encode
# the entries using a binary fixed point representation.
#
# Usage: smoothstep.sh <variant> <nsteps> <bfp> <xprec> <yprec>
#
# <variant> is in {smooth, smoother, smoothest}.
# <nsteps> must be greater than zero.
# <bfp> must be in [0..62]; reasonable values are roughly [10..30].
# <xprec> is x decimal precision.
# <yprec> is y decimal precision.
#set -x
cmd="sh smoothstep.sh $*"
variant=$1
nsteps=$2
bfp=$3
xprec=$4
yprec=$5
case "${variant}" in
smooth)
;;
smoother)
;;
smoothest)
;;
*)
echo "Unsupported variant"
exit 1
;;
esac
smooth() {
step=$1
y=`echo ${yprec} k ${step} ${nsteps} / sx _2 lx 3 ^ '*' 3 lx 2 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' `
}
smoother() {
step=$1
y=`echo ${yprec} k ${step} ${nsteps} / sx 6 lx 5 ^ '*' _15 lx 4 ^ '*' + 10 lx 3 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' `
}
smoothest() {
step=$1
y=`echo ${yprec} k ${step} ${nsteps} / sx _20 lx 7 ^ '*' 70 lx 6 ^ '*' + _84 lx 5 ^ '*' + 35 lx 4 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' `
}
cat <<EOF
#ifndef JEMALLOC_INTERNAL_SMOOTHSTEP_H
#define JEMALLOC_INTERNAL_SMOOTHSTEP_H
/*
* This file was generated by the following command:
* $cmd
*/
/******************************************************************************/
/*
* This header defines a precomputed table based on the smoothstep family of
* sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0
* to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so
* that floating point math can be avoided.
*
* 3 2
* smoothstep(x) = -2x + 3x
*
* 5 4 3
* smootherstep(x) = 6x - 15x + 10x
*
* 7 6 5 4
* smootheststep(x) = -20x + 70x - 84x + 35x
*/
#define SMOOTHSTEP_VARIANT "${variant}"
#define SMOOTHSTEP_NSTEPS ${nsteps}
#define SMOOTHSTEP_BFP ${bfp}
#define SMOOTHSTEP \\
/* STEP(step, h, x, y) */ \\
EOF
s=1
while [ $s -le $nsteps ] ; do
$variant ${s}
x=`echo ${xprec} k ${s} ${nsteps} / p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
printf ' STEP(%4d, UINT64_C(0x%016x), %s, %s) \\\n' ${s} ${h} ${x} ${y}
s=$((s+1))
done
echo
cat <<EOF
#endif /* JEMALLOC_INTERNAL_SMOOTHSTEP_H */
EOF
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment