Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
fb1f4f4e
Unverified
Commit
fb1f4f4e
authored
Oct 25, 2019
by
Wander Hillen
Committed by
GitHub
Oct 25, 2019
Browse files
Merge branch 'unstable' into minor-typos
parents
dda8cc18
6e98214f
Changes
203
Hide whitespace changes
Inline
Side-by-side
Too many changes to show.
To preserve performance only
203 of 203+
files are displayed.
Plain diff
Email patch
deps/jemalloc/include/jemalloc/internal/ql.h
View file @
fb1f4f4e
#ifndef JEMALLOC_INTERNAL_QL_H
#define JEMALLOC_INTERNAL_QL_H
#include "jemalloc/internal/qr.h"
/* List definitions. */
#define
ql_head(a_type) \
#define
ql_head(a_type) \
struct { \
a_type *qlh_first; \
}
#define
ql_head_initializer(a_head) {NULL}
#define
ql_head_initializer(a_head) {NULL}
#define
ql_elm(a_type) qr(a_type)
#define
ql_elm(a_type) qr(a_type)
/* List functions. */
#define
ql_new(a_head) do { \
#define
ql_new(a_head) do { \
(a_head)->qlh_first = NULL; \
} while (0)
#define
ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
#define
ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
#define
ql_first(a_head) ((a_head)->qlh_first)
#define
ql_first(a_head) ((a_head)->qlh_first)
#define
ql_last(a_head, a_field) \
#define
ql_last(a_head, a_field) \
((ql_first(a_head) != NULL) \
? qr_prev(ql_first(a_head), a_field) : NULL)
#define
ql_next(a_head, a_elm, a_field) \
#define
ql_next(a_head, a_elm, a_field) \
((ql_last(a_head, a_field) != (a_elm)) \
? qr_next((a_elm), a_field) : NULL)
#define
ql_prev(a_head, a_elm, a_field) \
#define
ql_prev(a_head, a_elm, a_field) \
((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \
: NULL)
#define
ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
#define
ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
qr_before_insert((a_qlelm), (a_elm), a_field); \
if (ql_first(a_head) == (a_qlelm)) { \
ql_first(a_head) = (a_elm); \
} \
} while (0)
#define
ql_after_insert(a_qlelm, a_elm, a_field) \
#define
ql_after_insert(a_qlelm, a_elm, a_field) \
qr_after_insert((a_qlelm), (a_elm), a_field)
#define
ql_head_insert(a_head, a_elm, a_field) do { \
#define
ql_head_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = (a_elm); \
} while (0)
#define
ql_tail_insert(a_head, a_elm, a_field) do { \
#define
ql_tail_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = qr_next((a_elm), a_field); \
} while (0)
#define
ql_remove(a_head, a_elm, a_field) do { \
#define
ql_remove(a_head, a_elm, a_field) do { \
if (ql_first(a_head) == (a_elm)) { \
ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
} \
...
...
@@ -64,18 +69,20 @@ struct { \
} \
} while (0)
#define
ql_head_remove(a_head, a_type, a_field) do { \
#define
ql_head_remove(a_head, a_type, a_field) do { \
a_type *t = ql_first(a_head); \
ql_remove((a_head), t, a_field); \
} while (0)
#define
ql_tail_remove(a_head, a_type, a_field) do { \
#define
ql_tail_remove(a_head, a_type, a_field) do { \
a_type *t = ql_last(a_head, a_field); \
ql_remove((a_head), t, a_field); \
} while (0)
#define
ql_foreach(a_var, a_head, a_field) \
#define
ql_foreach(a_var, a_head, a_field) \
qr_foreach((a_var), ql_first(a_head), a_field)
#define
ql_reverse_foreach(a_var, a_head, a_field) \
#define
ql_reverse_foreach(a_var, a_head, a_field) \
qr_reverse_foreach((a_var), ql_first(a_head), a_field)
#endif
/* JEMALLOC_INTERNAL_QL_H */
deps/jemalloc/include/jemalloc/internal/qr.h
View file @
fb1f4f4e
#ifndef JEMALLOC_INTERNAL_QR_H
#define JEMALLOC_INTERNAL_QR_H
/* Ring definitions. */
#define
qr(a_type) \
#define
qr(a_type) \
struct { \
a_type *qre_next; \
a_type *qre_prev; \
}
/* Ring functions. */
#define
qr_new(a_qr, a_field) do { \
#define
qr_new(a_qr, a_field) do { \
(a_qr)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
#define
qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
#define
qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
#define
qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
#define
qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
#define
qr_before_insert(a_qrelm, a_qr, a_field) do { \
#define
qr_before_insert(a_qrelm, a_qr, a_field) do { \
(a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \
(a_qr)->a_field.qre_next = (a_qrelm); \
(a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \
(a_qrelm)->a_field.qre_prev = (a_qr); \
} while (0)
#define qr_after_insert(a_qrelm, a_qr, a_field) \
do \
{ \
#define qr_after_insert(a_qrelm, a_qr, a_field) do { \
(a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
(a_qr)->a_field.qre_prev = (a_qrelm); \
(a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \
(a_qrelm)->a_field.qre_next = (a_qr); \
} while (0)
} while (0)
#define
qr_meld(a_qr_a, a_qr_b, a_field) do {
\
void
*t; \
#define
qr_meld(a_qr_a, a_qr_b,
a_type,
a_field) do { \
a_type
*t; \
(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
t = (a_qr_a)->a_field.qre_prev; \
...
...
@@ -44,10 +45,10 @@ struct { \
* qr_meld() and qr_split() are functionally equivalent, so there's no need to
* have two copies of the code.
*/
#define
qr_split(a_qr_a, a_qr_b, a_field)
\
qr_meld((a_qr_a), (a_qr_b), a_field)
#define
qr_split(a_qr_a, a_qr_b,
a_type,
a_field) \
qr_meld((a_qr_a), (a_qr_b),
a_type,
a_field)
#define
qr_remove(a_qr, a_field) do { \
#define
qr_remove(a_qr, a_field) do { \
(a_qr)->a_field.qre_prev->a_field.qre_next \
= (a_qr)->a_field.qre_next; \
(a_qr)->a_field.qre_next->a_field.qre_prev \
...
...
@@ -56,14 +57,16 @@ struct { \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
#define
qr_foreach(var, a_qr, a_field) \
#define
qr_foreach(var, a_qr, a_field) \
for ((var) = (a_qr); \
(var) != NULL; \
(var) = (((var)->a_field.qre_next != (a_qr)) \
? (var)->a_field.qre_next : NULL))
#define
qr_reverse_foreach(var, a_qr, a_field) \
#define
qr_reverse_foreach(var, a_qr, a_field) \
for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \
(var) != NULL; \
(var) = (((var) != (a_qr)) \
? (var)->a_field.qre_prev : NULL))
#endif
/* JEMALLOC_INTERNAL_QR_H */
deps/jemalloc/include/jemalloc/internal/quarantine.h
deleted
100644 → 0
View file @
dda8cc18
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef
struct
quarantine_obj_s
quarantine_obj_t
;
typedef
struct
quarantine_s
quarantine_t
;
/* Default per thread quarantine size if valgrind is enabled. */
#define JEMALLOC_VALGRIND_QUARANTINE_DEFAULT (ZU(1) << 24)
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct
quarantine_obj_s
{
void
*
ptr
;
size_t
usize
;
};
struct
quarantine_s
{
size_t
curbytes
;
size_t
curobjs
;
size_t
first
;
#define LG_MAXOBJS_INIT 10
size_t
lg_maxobjs
;
quarantine_obj_t
objs
[
1
];
/* Dynamically sized ring buffer. */
};
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void
quarantine_alloc_hook_work
(
tsd_t
*
tsd
);
void
quarantine
(
tsd_t
*
tsd
,
void
*
ptr
);
void
quarantine_cleanup
(
tsd_t
*
tsd
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
void
quarantine_alloc_hook
(
void
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_QUARANTINE_C_))
JEMALLOC_ALWAYS_INLINE
void
quarantine_alloc_hook
(
void
)
{
tsd_t
*
tsd
;
assert
(
config_fill
&&
opt_quarantine
);
tsd
=
tsd_fetch
();
if
(
tsd_quarantine_get
(
tsd
)
==
NULL
)
quarantine_alloc_hook_work
(
tsd
);
}
#endif
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/rb.h
View file @
fb1f4f4e
...
...
@@ -20,17 +20,21 @@
*/
#ifndef RB_H_
#define RB_H_
#define RB_H_
#ifndef __PGI
#define RB_COMPACT
#endif
#ifdef RB_COMPACT
/* Node structure. */
#define
rb_node(a_type) \
#define
rb_node(a_type) \
struct { \
a_type *rbn_left; \
a_type *rbn_right_red; \
}
#else
#define
rb_node(a_type) \
#define
rb_node(a_type) \
struct { \
a_type *rbn_left; \
a_type *rbn_right; \
...
...
@@ -39,111 +43,116 @@ struct { \
#endif
/* Root structure. */
#define
rb_tree(a_type) \
#define
rb_tree(a_type) \
struct { \
a_type *rbt_root; \
a_type rbt_nil; \
}
/* Left accessors. */
#define
rbtn_left_get(a_type, a_field, a_node) \
#define
rbtn_left_get(a_type, a_field, a_node) \
((a_node)->a_field.rbn_left)
#define
rbtn_left_set(a_type, a_field, a_node, a_left) do { \
#define
rbtn_left_set(a_type, a_field, a_node, a_left) do { \
(a_node)->a_field.rbn_left = a_left; \
} while (0)
#ifdef RB_COMPACT
/* Right accessors. */
#define
rbtn_right_get(a_type, a_field, a_node) \
#define
rbtn_right_get(a_type, a_field, a_node) \
((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \
& ((ssize_t)-2)))
#define
rbtn_right_set(a_type, a_field, a_node, a_right) do { \
#define
rbtn_right_set(a_type, a_field, a_node, a_right) do { \
(a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \
| (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \
} while (0)
/* Color accessors. */
#define
rbtn_red_get(a_type, a_field, a_node) \
#define
rbtn_red_get(a_type, a_field, a_node) \
((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \
& ((size_t)1)))
#define
rbtn_color_set(a_type, a_field, a_node, a_red) do { \
#define
rbtn_color_set(a_type, a_field, a_node, a_red) do { \
(a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \
(a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \
| ((ssize_t)a_red)); \
} while (0)
#define
rbtn_red_set(a_type, a_field, a_node) do { \
#define
rbtn_red_set(a_type, a_field, a_node) do { \
(a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \
(a_node)->a_field.rbn_right_red) | ((size_t)1)); \
} while (0)
#define
rbtn_black_set(a_type, a_field, a_node) do { \
#define
rbtn_black_set(a_type, a_field, a_node) do { \
(a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \
(a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \
} while (0)
/* Node initializer. */
#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
/* Bookkeeping bit cannot be used by node pointer. */
\
assert(((uintptr_t)(a_node) & 0x1) == 0); \
rbtn_left_set(a_type, a_field, (a_node), NULL); \
rbtn_right_set(a_type, a_field, (a_node), NULL); \
rbtn_red_set(a_type, a_field, (a_node)); \
} while (0)
#else
/* Right accessors. */
#define
rbtn_right_get(a_type, a_field, a_node) \
#define
rbtn_right_get(a_type, a_field, a_node) \
((a_node)->a_field.rbn_right)
#define
rbtn_right_set(a_type, a_field, a_node, a_right) do { \
#define
rbtn_right_set(a_type, a_field, a_node, a_right) do { \
(a_node)->a_field.rbn_right = a_right; \
} while (0)
/* Color accessors. */
#define
rbtn_red_get(a_type, a_field, a_node) \
#define
rbtn_red_get(a_type, a_field, a_node) \
((a_node)->a_field.rbn_red)
#define
rbtn_color_set(a_type, a_field, a_node, a_red) do { \
#define
rbtn_color_set(a_type, a_field, a_node, a_red) do { \
(a_node)->a_field.rbn_red = (a_red); \
} while (0)
#define
rbtn_red_set(a_type, a_field, a_node) do { \
#define
rbtn_red_set(a_type, a_field, a_node) do { \
(a_node)->a_field.rbn_red = true; \
} while (0)
#define
rbtn_black_set(a_type, a_field, a_node) do { \
#define
rbtn_black_set(a_type, a_field, a_node) do { \
(a_node)->a_field.rbn_red = false; \
} while (0)
#endif
/* Node initializer. */
#define
rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
rbtn_left_set(a_type, a_field, (a_node),
&(a_rbt)->rbt_nil
); \
rbtn_right_set(a_type, a_field, (a_node),
&(a_rbt)->rbt_nil
); \
#define
rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
rbtn_left_set(a_type, a_field, (a_node),
NULL
); \
rbtn_right_set(a_type, a_field, (a_node),
NULL
); \
rbtn_red_set(a_type, a_field, (a_node)); \
} while (0)
#endif
/* Tree initializer. */
#define rb_new(a_type, a_field, a_rbt) do { \
(a_rbt)->rbt_root = &(a_rbt)->rbt_nil; \
rbt_node_new(a_type, a_field, a_rbt, &(a_rbt)->rbt_nil); \
rbtn_black_set(a_type, a_field, &(a_rbt)->rbt_nil); \
#define rb_new(a_type, a_field, a_rbt) do { \
(a_rbt)->rbt_root = NULL; \
} while (0)
/* Internal utility macros. */
#define
rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \
#define
rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \
(r_node) = (a_root); \
if ((r_node) !=
&(a_rbt)->rbt_nil
) { \
if ((r_node) !=
NULL
) {
\
for (; \
rbtn_left_get(a_type, a_field, (r_node)) !=
&(a_rbt)->rbt_nil;
\
rbtn_left_get(a_type, a_field, (r_node)) !=
NULL;
\
(r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \
} \
} \
} while (0)
#define
rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \
#define
rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \
(r_node) = (a_root); \
if ((r_node) != &(a_rbt)->rbt_nil) { \
for (; rbtn_right_get(a_type, a_field, (r_node)) != \
&(a_rbt)->rbt_nil; (r_node) = rbtn_right_get(a_type, a_field, \
(r_node))) { \
if ((r_node) != NULL) { \
for (; rbtn_right_get(a_type, a_field, (r_node)) != NULL; \
(r_node) = rbtn_right_get(a_type, a_field, (r_node))) { \
} \
} \
} while (0)
#define
rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \
#define
rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \
(r_node) = rbtn_right_get(a_type, a_field, (a_node)); \
rbtn_right_set(a_type, a_field, (a_node), \
rbtn_left_get(a_type, a_field, (r_node))); \
rbtn_left_set(a_type, a_field, (r_node), (a_node)); \
} while (0)
#define
rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \
#define
rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \
(r_node) = rbtn_left_get(a_type, a_field, (a_node)); \
rbtn_left_set(a_type, a_field, (a_node), \
rbtn_right_get(a_type, a_field, (r_node))); \
...
...
@@ -155,7 +164,7 @@ struct { \
* functions generated by an equivalently parameterized call to rb_gen().
*/
#define
rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \
#define
rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \
a_attr void \
a_prefix##new(a_rbt_type *rbtree); \
a_attr bool \
...
...
@@ -169,11 +178,11 @@ a_prefix##next(a_rbt_type *rbtree, a_type *node); \
a_attr a_type * \
a_prefix##prev(a_rbt_type *rbtree, a_type *node); \
a_attr a_type * \
a_prefix##search(a_rbt_type *rbtree, a_type *key);
\
a_prefix##search(a_rbt_type *rbtree,
const
a_type *key); \
a_attr a_type * \
a_prefix##nsearch(a_rbt_type *rbtree, a_type *key);
\
a_prefix##nsearch(a_rbt_type *rbtree,
const
a_type *key); \
a_attr a_type * \
a_prefix##psearch(a_rbt_type *rbtree, a_type *key);
\
a_prefix##psearch(a_rbt_type *rbtree,
const
a_type *key); \
a_attr void \
a_prefix##insert(a_rbt_type *rbtree, a_type *node); \
a_attr void \
...
...
@@ -183,7 +192,10 @@ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
a_rbt_type *, a_type *, void *), void *arg); \
a_attr a_type * \
a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg);
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); \
a_attr void \
a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
void *arg);
/*
* The rb_gen() macro generates a type-specific red-black tree implementation,
...
...
@@ -254,7 +266,7 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
* last/first.
*
* static ex_node_t *
* ex_search(ex_t *tree, ex_node_t *key);
* ex_search(ex_t *tree,
const
ex_node_t *key);
* Description: Search for node that matches key.
* Args:
* tree: Pointer to an initialized red-black tree object.
...
...
@@ -262,9 +274,9 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
* Ret: Node in tree that matches key, or NULL if no match.
*
* static ex_node_t *
* ex_nsearch(ex_t *tree, ex_node_t *key);
* ex_nsearch(ex_t *tree,
const
ex_node_t *key);
* static ex_node_t *
* ex_psearch(ex_t *tree, ex_node_t *key);
* ex_psearch(ex_t *tree,
const
ex_node_t *key);
* Description: Search for node that matches key. If no match is found,
* return what would be key's successor/predecessor, were
* key in tree.
...
...
@@ -312,44 +324,52 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
* arg : Opaque pointer passed to cb().
* Ret: NULL if iteration completed, or the non-NULL callback return value
* that caused termination of the iteration.
*
* static void
* ex_destroy(ex_t *tree, void (*cb)(ex_node_t *, void *), void *arg);
* Description: Iterate over the tree with post-order traversal, remove
* each node, and run the callback if non-null. This is
* used for destroying a tree without paying the cost to
* rebalance it. The tree must not be otherwise altered
* during traversal.
* Args:
* tree: Pointer to an initialized red-black tree object.
* cb : Callback function, which, if non-null, is called for each node
* during iteration. There is no way to stop iteration once it
* has begun.
* arg : Opaque pointer passed to cb().
*/
#define
rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \
#define
rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \
a_attr void \
a_prefix##new(a_rbt_type *rbtree) { \
rb_new(a_type, a_field, rbtree); \
} \
a_attr bool \
a_prefix##empty(a_rbt_type *rbtree) { \
return (rbtree->rbt_root ==
&rbtree->rbt_nil);
\
return (rbtree->rbt_root ==
NULL);
\
} \
a_attr a_type * \
a_prefix##first(a_rbt_type *rbtree) { \
a_type *ret; \
rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
if (ret == &rbtree->rbt_nil) { \
ret = NULL; \
} \
return (ret); \
return ret; \
} \
a_attr a_type * \
a_prefix##last(a_rbt_type *rbtree) { \
a_type *ret; \
rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
if (ret == &rbtree->rbt_nil) { \
ret = NULL; \
} \
return (ret); \
return ret; \
} \
a_attr a_type * \
a_prefix##next(a_rbt_type *rbtree, a_type *node) { \
a_type *ret; \
if (rbtn_right_get(a_type, a_field, node) !=
&rbtree->rbt_nil
) { \
if (rbtn_right_get(a_type, a_field, node) !=
NULL
) {
\
rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \
a_field, node), ret); \
} else { \
a_type *tnode = rbtree->rbt_root; \
assert(tnode !=
&rbtree->rbt_nil);
\
ret =
&rbtree->rbt_nil;
\
assert(tnode !=
NULL);
\
ret =
NULL;
\
while (true) { \
int cmp = (a_cmp)(node, tnode); \
if (cmp < 0) { \
...
...
@@ -360,24 +380,21 @@ a_prefix##next(a_rbt_type *rbtree, a_type *node) { \
} else { \
break; \
} \
assert(tnode !=
&rbtree->rbt_nil
); \
assert(tnode !=
NULL
);
\
} \
} \
if (ret == &rbtree->rbt_nil) { \
ret = (NULL); \
} \
return (ret); \
return ret; \
} \
a_attr a_type * \
a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \
a_type *ret; \
if (rbtn_left_get(a_type, a_field, node) !=
&rbtree->rbt_nil
) { \
if (rbtn_left_get(a_type, a_field, node) !=
NULL
) {
\
rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \
a_field, node), ret); \
} else { \
a_type *tnode = rbtree->rbt_root; \
assert(tnode !=
&rbtree->rbt_nil);
\
ret =
&rbtree->rbt_nil;
\
assert(tnode !=
NULL);
\
ret =
NULL;
\
while (true) { \
int cmp = (a_cmp)(node, tnode); \
if (cmp < 0) { \
...
...
@@ -388,20 +405,17 @@ a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \
} else { \
break; \
} \
assert(tnode !=
&rbtree->rbt_nil
); \
assert(tnode !=
NULL
);
\
} \
} \
if (ret == &rbtree->rbt_nil) { \
ret = (NULL); \
} \
return (ret); \
return ret; \
} \
a_attr a_type * \
a_prefix##search(a_rbt_type *rbtree, a_type *key) {
\
a_prefix##search(a_rbt_type *rbtree,
const
a_type *key) { \
a_type *ret; \
int cmp; \
ret = rbtree->rbt_root; \
while (ret !=
&rbtree->rbt_nil
\
while (ret !=
NULL
\
&& (cmp = (a_cmp)(key, ret)) != 0) { \
if (cmp < 0) { \
ret = rbtn_left_get(a_type, a_field, ret); \
...
...
@@ -409,17 +423,14 @@ a_prefix##search(a_rbt_type *rbtree, a_type *key) { \
ret = rbtn_right_get(a_type, a_field, ret); \
} \
} \
if (ret == &rbtree->rbt_nil) { \
ret = (NULL); \
} \
return (ret); \
return ret; \
} \
a_attr a_type * \
a_prefix##nsearch(a_rbt_type *rbtree, a_type *key) {
\
a_prefix##nsearch(a_rbt_type *rbtree,
const
a_type *key) { \
a_type *ret; \
a_type *tnode = rbtree->rbt_root; \
ret =
&rbtree->rbt_nil;
\
while (tnode !=
&rbtree->rbt_nil
) { \
ret =
NULL;
\
while (tnode !=
NULL
) {
\
int cmp = (a_cmp)(key, tnode); \
if (cmp < 0) { \
ret = tnode; \
...
...
@@ -431,17 +442,14 @@ a_prefix##nsearch(a_rbt_type *rbtree, a_type *key) { \
break; \
} \
} \
if (ret == &rbtree->rbt_nil) { \
ret = (NULL); \
} \
return (ret); \
return ret; \
} \
a_attr a_type * \
a_prefix##psearch(a_rbt_type *rbtree, a_type *key) {
\
a_prefix##psearch(a_rbt_type *rbtree,
const
a_type *key) { \
a_type *ret; \
a_type *tnode = rbtree->rbt_root; \
ret =
&rbtree->rbt_nil;
\
while (tnode !=
&rbtree->rbt_nil
) { \
ret =
NULL;
\
while (tnode !=
NULL
) {
\
int cmp = (a_cmp)(key, tnode); \
if (cmp < 0) { \
tnode = rbtn_left_get(a_type, a_field, tnode); \
...
...
@@ -453,10 +461,7 @@ a_prefix##psearch(a_rbt_type *rbtree, a_type *key) { \
break; \
} \
} \
if (ret == &rbtree->rbt_nil) { \
ret = (NULL); \
} \
return (ret); \
return ret; \
} \
a_attr void \
a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
...
...
@@ -467,7 +472,7 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
rbt_node_new(a_type, a_field, rbtree, node); \
/* Wind. */
\
path->node = rbtree->rbt_root; \
for (pathp = path; pathp->node !=
&rbtree->rbt_nil
; pathp++) { \
for (pathp = path; pathp->node !=
NULL
; pathp++) {
\
int cmp = pathp->cmp = a_cmp(node, pathp->node); \
assert(cmp != 0); \
if (cmp < 0) { \
...
...
@@ -487,7 +492,8 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
rbtn_left_set(a_type, a_field, cnode, left); \
if (rbtn_red_get(a_type, a_field, left)) { \
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
if (rbtn_red_get(a_type, a_field, leftleft)) { \
if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
leftleft)) { \
/* Fix up 4-node. */
\
a_type *tnode; \
rbtn_black_set(a_type, a_field, leftleft); \
...
...
@@ -502,7 +508,8 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, cnode, right); \
if (rbtn_red_get(a_type, a_field, right)) { \
a_type *left = rbtn_left_get(a_type, a_field, cnode); \
if (rbtn_red_get(a_type, a_field, left)) { \
if (left != NULL && rbtn_red_get(a_type, a_field, \
left)) { \
/* Split 4-node. */
\
rbtn_black_set(a_type, a_field, left); \
rbtn_black_set(a_type, a_field, right); \
...
...
@@ -535,7 +542,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
/* Wind. */
\
nodep = NULL;
/* Silence compiler warning. */
\
path->node = rbtree->rbt_root; \
for (pathp = path; pathp->node !=
&rbtree->rbt_nil
; pathp++) { \
for (pathp = path; pathp->node !=
NULL
; pathp++) {
\
int cmp = pathp->cmp = a_cmp(node, pathp->node); \
if (cmp < 0) { \
pathp[1].node = rbtn_left_get(a_type, a_field, \
...
...
@@ -547,8 +554,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
/* Find node's successor, in preparation for swap. */
\
pathp->cmp = 1; \
nodep = pathp; \
for (pathp++; pathp->node != &rbtree->rbt_nil; \
pathp++) { \
for (pathp++; pathp->node != NULL; pathp++) { \
pathp->cmp = -1; \
pathp[1].node = rbtn_left_get(a_type, a_field, \
pathp->node); \
...
...
@@ -590,7 +596,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
} \
} else { \
a_type *left = rbtn_left_get(a_type, a_field, node); \
if (left !=
&rbtree->rbt_nil
) { \
if (left !=
NULL
) {
\
/* node has no successor, but it has a left child. */
\
/* Splice node out, without losing the left child. */
\
assert(!rbtn_red_get(a_type, a_field, node)); \
...
...
@@ -610,33 +616,32 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
return; \
} else if (pathp == path) { \
/* The tree only contained one node. */
\
rbtree->rbt_root =
&rbtree->rbt_nil;
\
rbtree->rbt_root =
NULL;
\
return; \
} \
} \
if (rbtn_red_get(a_type, a_field, pathp->node)) { \
/* Prune red node, which requires no fixup. */
\
assert(pathp[-1].cmp < 0); \
rbtn_left_set(a_type, a_field, pathp[-1].node, \
&rbtree->rbt_nil); \
rbtn_left_set(a_type, a_field, pathp[-1].node, NULL); \
return; \
} \
/* The node to be pruned is black, so unwind until balance is */
\
/* restored. */
\
pathp->node =
&rbtree->rbt_nil;
\
pathp->node =
NULL;
\
for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \
assert(pathp->cmp != 0); \
if (pathp->cmp < 0) { \
rbtn_left_set(a_type, a_field, pathp->node, \
pathp[1].node); \
assert(!rbtn_red_get(a_type, a_field, pathp[1].node)); \
if (rbtn_red_get(a_type, a_field, pathp->node)) { \
a_type *right = rbtn_right_get(a_type, a_field, \
pathp->node); \
a_type *rightleft = rbtn_left_get(a_type, a_field, \
right); \
a_type *tnode; \
if (rbtn_red_get(a_type, a_field, rightleft)) { \
if (rightleft != NULL && rbtn_red_get(a_type, a_field, \
rightleft)) { \
/* In the following diagrams, ||, //, and \\ */
\
/* indicate the path to the removed node. */
\
/* */
\
...
...
@@ -679,7 +684,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
pathp->node); \
a_type *rightleft = rbtn_left_get(a_type, a_field, \
right); \
if (rbtn_red_get(a_type, a_field, rightleft)) { \
if (rightleft != NULL && rbtn_red_get(a_type, a_field, \
rightleft)) { \
/* || */
\
/* pathp(b) */
\
/* // \ */
\
...
...
@@ -733,7 +739,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
left); \
a_type *leftrightleft = rbtn_left_get(a_type, a_field, \
leftright); \
if (rbtn_red_get(a_type, a_field, leftrightleft)) { \
if (leftrightleft != NULL && rbtn_red_get(a_type, \
a_field, leftrightleft)) { \
/* || */
\
/* pathp(b) */
\
/* / \\ */
\
...
...
@@ -759,7 +766,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
/* (b) */
\
/* / */
\
/* (b) */
\
assert(leftright !=
&rbtree->rbt_nil);
\
assert(leftright !=
NULL);
\
rbtn_red_set(a_type, a_field, leftright); \
rbtn_rotate_right(a_type, a_field, pathp->node, \
tnode); \
...
...
@@ -782,7 +789,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
return; \
} else if (rbtn_red_get(a_type, a_field, pathp->node)) { \
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
if (rbtn_red_get(a_type, a_field, leftleft)) { \
if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
leftleft)) { \
/* || */
\
/* pathp(r) */
\
/* / \\ */
\
...
...
@@ -820,7 +828,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
} \
} else { \
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
if (rbtn_red_get(a_type, a_field, leftleft)) { \
if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
leftleft)) { \
/* || */
\
/* pathp(b) */
\
/* / \\ */
\
...
...
@@ -866,17 +875,17 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
a_attr a_type * \
a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
if (node ==
&rbtree->rbt_nil
) { \
return
(&rbtree->rbt_nil);
\
if (node ==
NULL
) {
\
return
NULL;
\
} else { \
a_type *ret; \
if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \
a_field, node), cb, arg)) !=
&rbtree->rbt_nil
\
|| (ret = cb(rbtree, node,
arg)) != NULL) { \
return
(
ret
)
; \
a_field, node), cb, arg)) !=
NULL || (ret = cb(rbtree, node,
\
arg)) != NULL) {
\
return ret;
\
} \
return
(
a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
a_field, node), cb, arg)
)
; \
return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
a_field, node), cb, arg); \
} \
} \
a_attr a_type * \
...
...
@@ -886,22 +895,22 @@ a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \
if (cmp < 0) { \
a_type *ret; \
if ((ret = a_prefix##iter_start(rbtree, start, \
rbtn_left_get(a_type, a_field, node), cb, arg)) !=
\
&rbtree->rbt_nil ||
(ret = cb(rbtree, node, arg)) != NULL) { \
return
(
ret
)
; \
rbtn_left_get(a_type, a_field, node), cb, arg)) !=
NULL ||
\
(ret = cb(rbtree, node, arg)) != NULL) {
\
return ret;
\
} \
return
(
a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
a_field, node), cb, arg)
)
; \
return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
a_field, node), cb, arg); \
} else if (cmp > 0) { \
return
(
a_prefix##iter_start(rbtree, start, \
rbtn_right_get(a_type, a_field, node), cb, arg)
)
; \
return a_prefix##iter_start(rbtree, start, \
rbtn_right_get(a_type, a_field, node), cb, arg); \
} else { \
a_type *ret; \
if ((ret = cb(rbtree, node, arg)) != NULL) { \
return
(
ret
)
; \
return ret;
\
} \
return
(
a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
a_field, node), cb, arg)
)
; \
return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
a_field, node), cb, arg); \
} \
} \
a_attr a_type * \
...
...
@@ -914,25 +923,22 @@ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
} else { \
ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\
} \
if (ret == &rbtree->rbt_nil) { \
ret = NULL; \
} \
return (ret); \
return ret; \
} \
a_attr a_type * \
a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
if (node ==
&rbtree->rbt_nil
) { \
return
(&rbtree->rbt_nil);
\
if (node ==
NULL
) {
\
return
NULL;
\
} else { \
a_type *ret; \
if ((ret = a_prefix##reverse_iter_recurse(rbtree, \
rbtn_right_get(a_type, a_field, node), cb, arg)) !=
\
&rbtree->rbt_nil ||
(ret = cb(rbtree, node, arg)) != NULL) { \
return
(
ret
)
; \
rbtn_right_get(a_type, a_field, node), cb, arg)) !=
NULL ||
\
(ret = cb(rbtree, node, arg)) != NULL) {
\
return ret;
\
} \
return
(
a_prefix##reverse_iter_recurse(rbtree, \
rbtn_left_get(a_type, a_field, node), cb, arg)
)
; \
return a_prefix##reverse_iter_recurse(rbtree, \
rbtn_left_get(a_type, a_field, node), cb, arg); \
} \
} \
a_attr a_type * \
...
...
@@ -943,22 +949,22 @@ a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \
if (cmp > 0) { \
a_type *ret; \
if ((ret = a_prefix##reverse_iter_start(rbtree, start, \
rbtn_right_get(a_type, a_field, node), cb, arg)) !=
\
&rbtree->rbt_nil ||
(ret = cb(rbtree, node, arg)) != NULL) { \
return
(
ret
)
; \
rbtn_right_get(a_type, a_field, node), cb, arg)) !=
NULL ||
\
(ret = cb(rbtree, node, arg)) != NULL) {
\
return ret;
\
} \
return
(
a_prefix##reverse_iter_recurse(rbtree, \
rbtn_left_get(a_type, a_field, node), cb, arg)
)
; \
return a_prefix##reverse_iter_recurse(rbtree, \
rbtn_left_get(a_type, a_field, node), cb, arg); \
} else if (cmp < 0) { \
return
(
a_prefix##reverse_iter_start(rbtree, start, \
rbtn_left_get(a_type, a_field, node), cb, arg)
)
; \
return a_prefix##reverse_iter_start(rbtree, start, \
rbtn_left_get(a_type, a_field, node), cb, arg); \
} else { \
a_type *ret; \
if ((ret = cb(rbtree, node, arg)) != NULL) { \
return
(
ret
)
; \
return ret;
\
} \
return
(
a_prefix##reverse_iter_recurse(rbtree, \
rbtn_left_get(a_type, a_field, node), cb, arg)
)
; \
return a_prefix##reverse_iter_recurse(rbtree, \
rbtn_left_get(a_type, a_field, node), cb, arg); \
} \
} \
a_attr a_type * \
...
...
@@ -972,10 +978,29 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \
cb, arg); \
} \
if (ret == &rbtree->rbt_nil) { \
ret = NULL; \
return ret; \
} \
a_attr void \
a_prefix##destroy_recurse(a_rbt_type *rbtree, a_type *node, void (*cb)( \
a_type *, void *), void *arg) { \
if (node == NULL) { \
return; \
} \
return (ret); \
a_prefix##destroy_recurse(rbtree, rbtn_left_get(a_type, a_field, \
node), cb, arg); \
rbtn_left_set(a_type, a_field, (node), NULL); \
a_prefix##destroy_recurse(rbtree, rbtn_right_get(a_type, a_field, \
node), cb, arg); \
rbtn_right_set(a_type, a_field, (node), NULL); \
if (cb) { \
cb(node, arg); \
} \
} \
a_attr void \
a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
void *arg) { \
a_prefix##destroy_recurse(rbtree, rbtree->rbt_root, cb, arg); \
rbtree->rbt_root = NULL; \
}
#endif
/* RB_H_ */
deps/jemalloc/include/jemalloc/internal/rtree.h
View file @
fb1f4f4e
#ifndef JEMALLOC_INTERNAL_RTREE_H
#define JEMALLOC_INTERNAL_RTREE_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/rtree_tsd.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/tsd.h"
/*
* This radix tree implementation is tailored to the singular purpose of
* associating metadata with
chunk
s that are currently owned by jemalloc.
* associating metadata with
extent
s that are currently owned by jemalloc.
*
*******************************************************************************
*/
#ifdef JEMALLOC_H_TYPES
typedef
struct
rtree_node_elm_s
rtree_node_elm_t
;
typedef
struct
rtree_level_s
rtree_level_t
;
typedef
struct
rtree_s
rtree_t
;
/*
* RTREE_BITS_PER_LEVEL must be a power of two that is no larger than the
* machine address width.
*/
#define LG_RTREE_BITS_PER_LEVEL 4
#define RTREE_BITS_PER_LEVEL (ZU(1) << LG_RTREE_BITS_PER_LEVEL)
#define RTREE_HEIGHT_MAX \
((ZU(1) << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL)
/* Used for two-stage lock-free node initialization. */
#define RTREE_NODE_INITIALIZING ((rtree_node_elm_t *)0x1)
/*
* The node allocation callback function's argument is the number of contiguous
* rtree_node_elm_t structures to allocate, and the resulting memory must be
* zeroed.
*/
typedef
rtree_node_elm_t
*
(
rtree_node_alloc_t
)(
size_t
);
typedef
void
(
rtree_node_dalloc_t
)(
rtree_node_elm_t
*
);
/* Number of high insignificant bits. */
#define RTREE_NHIB ((1U << (LG_SIZEOF_PTR+3)) - LG_VADDR)
/* Number of low insigificant bits. */
#define RTREE_NLIB LG_PAGE
/* Number of significant bits. */
#define RTREE_NSB (LG_VADDR - RTREE_NLIB)
/* Number of levels in radix tree. */
#if RTREE_NSB <= 10
# define RTREE_HEIGHT 1
#elif RTREE_NSB <= 36
# define RTREE_HEIGHT 2
#elif RTREE_NSB <= 52
# define RTREE_HEIGHT 3
#else
# error Unsupported number of significant virtual address bits
#endif
/* Use compact leaf representation if virtual address encoding allows. */
#if RTREE_NHIB >= LG_CEIL_NSIZES
# define RTREE_LEAF_COMPACT
#endif
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
/* Needed for initialization only. */
#define RTREE_LEAFKEY_INVALID ((uintptr_t)1)
typedef
struct
rtree_node_elm_s
rtree_node_elm_t
;
struct
rtree_node_elm_s
{
union
{
void
*
pun
;
rtree_node_elm_t
*
child
;
extent_node_t
*
val
;
};
atomic_p_t
child
;
/* (rtree_{node,leaf}_elm_t *) */
};
struct
rtree_level_s
{
struct
rtree_leaf_elm_s
{
#ifdef RTREE_LEAF_COMPACT
/*
* A non-NULL subtree points to a subtree rooted along the hypothetical
* path to the leaf node corresponding to key 0. Depending on what keys
* have been used to store to the tree, an arbitrary combination of
* subtree pointers may remain NULL.
*
* Suppose keys comprise 48 bits, and LG_RTREE_BITS_PER_LEVEL is 4.
* This results in a 3-level tree, and the leftmost leaf can be directly
* accessed via subtrees[2], the subtree prefixed by 0x0000 (excluding
* 0x00000000) can be accessed via subtrees[1], and the remainder of the
* tree can be accessed via subtrees[0].
* Single pointer-width field containing all three leaf element fields.
* For example, on a 64-bit x64 system with 48 significant virtual
* memory address bits, the index, extent, and slab fields are packed as
* such:
*
* levels[0] : [<unused> | 0x0001******** | 0x0002******** | ...]
* x: index
* e: extent
* b: slab
*
* levels[1] : [<unused> | 0x00000001**** | 0x00000002**** | ... ]
*
* levels[2] : [val(0x000000000000) | val(0x000000000001) | ...]
*
* This has practical implications on x64, which currently uses only the
* lower 47 bits of virtual address space in userland, thus leaving
* subtrees[0] unused and avoiding a level of tree traversal.
* 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee eeee000b
*/
union
{
void
*
subtree_pun
;
rtree_node_elm_t
*
subtree
;
};
atomic_p_t
le_bits
;
#else
atomic_p_t
le_extent
;
/* (extent_t *) */
atomic_u_t
le_szind
;
/* (szind_t) */
atomic_b_t
le_slab
;
/* (bool) */
#endif
};
typedef
struct
rtree_level_s
rtree_level_t
;
struct
rtree_level_s
{
/* Number of key bits distinguished by this level. */
unsigned
bits
;
/*
...
...
@@ -78,217 +76,417 @@ struct rtree_level_s {
unsigned
cumbits
;
};
typedef
struct
rtree_s
rtree_t
;
struct
rtree_s
{
rtree_node_alloc_t
*
alloc
;
rtree_node_dalloc_t
*
dalloc
;
unsigned
height
;
/*
* Precomputed table used to convert from the number of leading 0 key
* bits to which subtree level to start at.
*/
unsigned
start_level
[
RTREE_HEIGHT_MAX
];
rtree_level_t
levels
[
RTREE_HEIGHT_MAX
];
malloc_mutex_t
init_lock
;
/* Number of elements based on rtree_levels[0].bits. */
#if RTREE_HEIGHT > 1
rtree_node_elm_t
root
[
1U
<<
(
RTREE_NSB
/
RTREE_HEIGHT
)];
#else
rtree_leaf_elm_t
root
[
1U
<<
(
RTREE_NSB
/
RTREE_HEIGHT
)];
#endif
};
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
bool
rtree_new
(
rtree_t
*
rtree
,
unsigned
bits
,
rtree_node_alloc_t
*
alloc
,
rtree_node_dalloc_t
*
dalloc
);
void
rtree_delete
(
rtree_t
*
rtree
);
rtree_node_elm_t
*
rtree_subtree_read_hard
(
rtree_t
*
rtree
,
unsigned
level
);
rtree_node_elm_t
*
rtree_child_read_hard
(
rtree_t
*
rtree
,
rtree_node_elm_t
*
elm
,
unsigned
level
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
unsigned
rtree_start_level
(
rtree_t
*
rtree
,
uintptr_t
key
);
uintptr_t
rtree_subkey
(
rtree_t
*
rtree
,
uintptr_t
key
,
unsigned
level
);
bool
rtree_node_valid
(
rtree_node_elm_t
*
node
);
rtree_node_elm_t
*
rtree_child_tryread
(
rtree_node_elm_t
*
elm
);
rtree_node_elm_t
*
rtree_child_read
(
rtree_t
*
rtree
,
rtree_node_elm_t
*
elm
,
unsigned
level
);
extent_node_t
*
rtree_val_read
(
rtree_t
*
rtree
,
rtree_node_elm_t
*
elm
,
bool
dependent
);
void
rtree_val_write
(
rtree_t
*
rtree
,
rtree_node_elm_t
*
elm
,
const
extent_node_t
*
val
);
rtree_node_elm_t
*
rtree_subtree_tryread
(
rtree_t
*
rtree
,
unsigned
level
);
rtree_node_elm_t
*
rtree_subtree_read
(
rtree_t
*
rtree
,
unsigned
level
);
extent_node_t
*
rtree_get
(
rtree_t
*
rtree
,
uintptr_t
key
,
bool
dependent
);
bool
rtree_set
(
rtree_t
*
rtree
,
uintptr_t
key
,
const
extent_node_t
*
val
);
/*
* Split the bits into one to three partitions depending on number of
* significant bits. It the number of bits does not divide evenly into the
* number of levels, place one remainder bit per level starting at the leaf
* level.
*/
static
const
rtree_level_t
rtree_levels
[]
=
{
#if RTREE_HEIGHT == 1
{
RTREE_NSB
,
RTREE_NHIB
+
RTREE_NSB
}
#elif RTREE_HEIGHT == 2
{
RTREE_NSB
/
2
,
RTREE_NHIB
+
RTREE_NSB
/
2
},
{
RTREE_NSB
/
2
+
RTREE_NSB
%
2
,
RTREE_NHIB
+
RTREE_NSB
}
#elif RTREE_HEIGHT == 3
{
RTREE_NSB
/
3
,
RTREE_NHIB
+
RTREE_NSB
/
3
},
{
RTREE_NSB
/
3
+
RTREE_NSB
%
3
/
2
,
RTREE_NHIB
+
RTREE_NSB
/
3
*
2
+
RTREE_NSB
%
3
/
2
},
{
RTREE_NSB
/
3
+
RTREE_NSB
%
3
-
RTREE_NSB
%
3
/
2
,
RTREE_NHIB
+
RTREE_NSB
}
#else
# error Unsupported rtree height
#endif
};
bool
rtree_new
(
rtree_t
*
rtree
,
bool
zeroed
);
typedef
rtree_node_elm_t
*
(
rtree_node_alloc_t
)(
tsdn_t
*
,
rtree_t
*
,
size_t
);
extern
rtree_node_alloc_t
*
JET_MUTABLE
rtree_node_alloc
;
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
JEMALLOC_INLINE
unsigned
rtree_start_level
(
rtree_t
*
rtree
,
uintptr_t
key
)
{
unsigned
start_level
;
typedef
rtree_leaf_elm_t
*
(
rtree_leaf_alloc_t
)(
tsdn_t
*
,
rtree_t
*
,
size_t
);
extern
rtree_leaf_alloc_t
*
JET_MUTABLE
rtree_leaf_alloc
;
if
(
unlikely
(
key
==
0
))
retu
rn
(
rtree
->
height
-
1
)
;
typedef
void
(
rtree_node_dalloc_t
)(
tsdn_t
*
,
rtree_t
*
,
rtree_node_elm_t
*
);
exte
rn
rtree
_node_dalloc_t
*
JET_MUTABLE
rtree_node_dalloc
;
start_level
=
rtree
->
start_level
[
lg_floor
(
key
)
>>
LG_RTREE_BITS_PER_LEVEL
];
assert
(
start_level
<
rtree
->
height
);
return
(
start_level
);
typedef
void
(
rtree_leaf_dalloc_t
)(
tsdn_t
*
,
rtree_t
*
,
rtree_leaf_elm_t
*
);
extern
rtree_leaf_dalloc_t
*
JET_MUTABLE
rtree_leaf_dalloc
;
#ifdef JEMALLOC_JET
void
rtree_delete
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
);
#endif
rtree_leaf_elm_t
*
rtree_leaf_elm_lookup_hard
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
bool
dependent
,
bool
init_missing
);
JEMALLOC_ALWAYS_INLINE
uintptr_t
rtree_leafkey
(
uintptr_t
key
)
{
unsigned
ptrbits
=
ZU
(
1
)
<<
(
LG_SIZEOF_PTR
+
3
);
unsigned
cumbits
=
(
rtree_levels
[
RTREE_HEIGHT
-
1
].
cumbits
-
rtree_levels
[
RTREE_HEIGHT
-
1
].
bits
);
unsigned
maskbits
=
ptrbits
-
cumbits
;
uintptr_t
mask
=
~
((
ZU
(
1
)
<<
maskbits
)
-
1
);
return
(
key
&
mask
);
}
JEMALLOC_INLINE
uintptr_t
rtree_subkey
(
rtree_t
*
rtree
,
uintptr_t
key
,
unsigned
level
)
{
JEMALLOC_ALWAYS_INLINE
size_t
rtree_cache_direct_map
(
uintptr_t
key
)
{
unsigned
ptrbits
=
ZU
(
1
)
<<
(
LG_SIZEOF_PTR
+
3
);
unsigned
cumbits
=
(
rtree_levels
[
RTREE_HEIGHT
-
1
].
cumbits
-
rtree_levels
[
RTREE_HEIGHT
-
1
].
bits
);
unsigned
maskbits
=
ptrbits
-
cumbits
;
return
(
size_t
)((
key
>>
maskbits
)
&
(
RTREE_CTX_NCACHE
-
1
));
}
return
((
key
>>
((
ZU
(
1
)
<<
(
LG_SIZEOF_PTR
+
3
))
-
rtree
->
levels
[
level
].
cumbits
))
&
((
ZU
(
1
)
<<
rtree
->
levels
[
level
].
bits
)
-
1
));
JEMALLOC_ALWAYS_INLINE
uintptr_t
rtree_subkey
(
uintptr_t
key
,
unsigned
level
)
{
unsigned
ptrbits
=
ZU
(
1
)
<<
(
LG_SIZEOF_PTR
+
3
);
unsigned
cumbits
=
rtree_levels
[
level
].
cumbits
;
unsigned
shiftbits
=
ptrbits
-
cumbits
;
unsigned
maskbits
=
rtree_levels
[
level
].
bits
;
uintptr_t
mask
=
(
ZU
(
1
)
<<
maskbits
)
-
1
;
return
((
key
>>
shiftbits
)
&
mask
);
}
JEMALLOC_INLINE
bool
rtree_node_valid
(
rtree_node_elm_t
*
node
)
{
/*
* Atomic getters.
*
* dependent: Reading a value on behalf of a pointer to a valid allocation
* is guaranteed to be a clean read even without synchronization,
* because the rtree update became visible in memory before the
* pointer came into existence.
* !dependent: An arbitrary read, e.g. on behalf of ivsalloc(), may not be
* dependent on a previous rtree write, which means a stale read
* could result if synchronization were omitted here.
*/
# ifdef RTREE_LEAF_COMPACT
JEMALLOC_ALWAYS_INLINE
uintptr_t
rtree_leaf_elm_bits_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
bool
dependent
)
{
return
(
uintptr_t
)
atomic_load_p
(
&
elm
->
le_bits
,
dependent
?
ATOMIC_RELAXED
:
ATOMIC_ACQUIRE
);
}
return
((
uintptr_t
)
node
>
(
uintptr_t
)
RTREE_NODE_INITIALIZING
);
JEMALLOC_ALWAYS_INLINE
extent_t
*
rtree_leaf_elm_bits_extent_get
(
uintptr_t
bits
)
{
# ifdef __aarch64__
/*
* aarch64 doesn't sign extend the highest virtual address bit to set
* the higher ones. Instead, the high bits gets zeroed.
*/
uintptr_t
high_bit_mask
=
((
uintptr_t
)
1
<<
LG_VADDR
)
-
1
;
/* Mask off the slab bit. */
uintptr_t
low_bit_mask
=
~
(
uintptr_t
)
1
;
uintptr_t
mask
=
high_bit_mask
&
low_bit_mask
;
return
(
extent_t
*
)(
bits
&
mask
);
# else
/* Restore sign-extended high bits, mask slab bit. */
return
(
extent_t
*
)((
uintptr_t
)((
intptr_t
)(
bits
<<
RTREE_NHIB
)
>>
RTREE_NHIB
)
&
~
((
uintptr_t
)
0x1
));
# endif
}
JEMALLOC_
INLINE
rtree_node_elm_t
*
rtree_
child_tryread
(
rtree_node_elm_t
*
elm
)
{
rtree_node_elm_t
*
child
;
JEMALLOC_
ALWAYS_INLINE
szind_t
rtree_
leaf_elm_bits_szind_get
(
uintptr_t
bits
)
{
return
(
szind_t
)(
bits
>>
LG_VADDR
);
}
/* Double-checked read (first read may be stale. */
child
=
elm
->
child
;
if
(
!
rtree_node_valid
(
child
))
child
=
atomic_read_p
(
&
elm
->
pun
);
return
(
child
);
JEMALLOC_ALWAYS_INLINE
bool
rtree_leaf_elm_bits_slab_get
(
uintptr_t
bits
)
{
return
(
bool
)(
bits
&
(
uintptr_t
)
0x1
);
}
JEMALLOC_INLINE
rtree_node_elm_t
*
rtree_child_read
(
rtree_t
*
rtree
,
rtree_node_elm_t
*
elm
,
unsigned
level
)
{
rtree_node_elm_t
*
child
;
# endif
JEMALLOC_ALWAYS_INLINE
extent_t
*
rtree_leaf_elm_extent_read
(
UNUSED
tsdn_t
*
tsdn
,
UNUSED
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
bool
dependent
)
{
#ifdef RTREE_LEAF_COMPACT
uintptr_t
bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm
,
dependent
);
return
rtree_leaf_elm_bits_extent_get
(
bits
);
#else
extent_t
*
extent
=
(
extent_t
*
)
atomic_load_p
(
&
elm
->
le_extent
,
dependent
?
ATOMIC_RELAXED
:
ATOMIC_ACQUIRE
);
return
extent
;
#endif
}
child
=
rtree_child_tryread
(
elm
);
if
(
unlikely
(
!
rtree_node_valid
(
child
)))
child
=
rtree_child_read_hard
(
rtree
,
elm
,
level
);
return
(
child
);
JEMALLOC_ALWAYS_INLINE
szind_t
rtree_leaf_elm_szind_read
(
UNUSED
tsdn_t
*
tsdn
,
UNUSED
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
bool
dependent
)
{
#ifdef RTREE_LEAF_COMPACT
uintptr_t
bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm
,
dependent
);
return
rtree_leaf_elm_bits_szind_get
(
bits
);
#else
return
(
szind_t
)
atomic_load_u
(
&
elm
->
le_szind
,
dependent
?
ATOMIC_RELAXED
:
ATOMIC_ACQUIRE
);
#endif
}
JEMALLOC_INLINE
extent_node_t
*
rtree_val_read
(
rtree_t
*
rtree
,
rtree_node_elm_t
*
elm
,
bool
dependent
)
{
if
(
dependent
)
{
/*
* Reading a val on behalf of a pointer to a valid allocation is
* guaranteed to be a clean read even without synchronization,
* because the rtree update became visible in memory before the
* pointer came into existence.
*/
return
(
elm
->
val
);
}
else
{
/*
* An arbitrary read, e.g. on behalf of ivsalloc(), may not be
* dependent on a previous rtree write, which means a stale read
* could result if synchronization were omitted here.
*/
return
(
atomic_read_p
(
&
elm
->
pun
));
}
JEMALLOC_ALWAYS_INLINE
bool
rtree_leaf_elm_slab_read
(
UNUSED
tsdn_t
*
tsdn
,
UNUSED
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
bool
dependent
)
{
#ifdef RTREE_LEAF_COMPACT
uintptr_t
bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm
,
dependent
);
return
rtree_leaf_elm_bits_slab_get
(
bits
);
#else
return
atomic_load_b
(
&
elm
->
le_slab
,
dependent
?
ATOMIC_RELAXED
:
ATOMIC_ACQUIRE
);
#endif
}
JEMALLOC_INLINE
void
rtree_val_write
(
rtree_t
*
rtree
,
rtree_node_elm_t
*
elm
,
const
extent_node_t
*
val
)
{
static
inline
void
rtree_leaf_elm_extent_write
(
UNUSED
tsdn_t
*
tsdn
,
UNUSED
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
extent_t
*
extent
)
{
#ifdef RTREE_LEAF_COMPACT
uintptr_t
old_bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm
,
true
);
uintptr_t
bits
=
((
uintptr_t
)
rtree_leaf_elm_bits_szind_get
(
old_bits
)
<<
LG_VADDR
)
|
((
uintptr_t
)
extent
&
(((
uintptr_t
)
0x1
<<
LG_VADDR
)
-
1
))
|
((
uintptr_t
)
rtree_leaf_elm_bits_slab_get
(
old_bits
));
atomic_store_p
(
&
elm
->
le_bits
,
(
void
*
)
bits
,
ATOMIC_RELEASE
);
#else
atomic_store_p
(
&
elm
->
le_extent
,
extent
,
ATOMIC_RELEASE
);
#endif
}
atomic_write_p
(
&
elm
->
pun
,
val
);
static
inline
void
rtree_leaf_elm_szind_write
(
UNUSED
tsdn_t
*
tsdn
,
UNUSED
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
szind_t
szind
)
{
assert
(
szind
<=
NSIZES
);
#ifdef RTREE_LEAF_COMPACT
uintptr_t
old_bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm
,
true
);
uintptr_t
bits
=
((
uintptr_t
)
szind
<<
LG_VADDR
)
|
((
uintptr_t
)
rtree_leaf_elm_bits_extent_get
(
old_bits
)
&
(((
uintptr_t
)
0x1
<<
LG_VADDR
)
-
1
))
|
((
uintptr_t
)
rtree_leaf_elm_bits_slab_get
(
old_bits
));
atomic_store_p
(
&
elm
->
le_bits
,
(
void
*
)
bits
,
ATOMIC_RELEASE
);
#else
atomic_store_u
(
&
elm
->
le_szind
,
szind
,
ATOMIC_RELEASE
);
#endif
}
JEMALLOC_INLINE
rtree_node_elm_t
*
rtree_subtree_tryread
(
rtree_t
*
rtree
,
unsigned
level
)
{
rtree_node_elm_t
*
subtree
;
static
inline
void
rtree_leaf_elm_slab_write
(
UNUSED
tsdn_t
*
tsdn
,
UNUSED
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
bool
slab
)
{
#ifdef RTREE_LEAF_COMPACT
uintptr_t
old_bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm
,
true
);
uintptr_t
bits
=
((
uintptr_t
)
rtree_leaf_elm_bits_szind_get
(
old_bits
)
<<
LG_VADDR
)
|
((
uintptr_t
)
rtree_leaf_elm_bits_extent_get
(
old_bits
)
&
(((
uintptr_t
)
0x1
<<
LG_VADDR
)
-
1
))
|
((
uintptr_t
)
slab
);
atomic_store_p
(
&
elm
->
le_bits
,
(
void
*
)
bits
,
ATOMIC_RELEASE
);
#else
atomic_store_b
(
&
elm
->
le_slab
,
slab
,
ATOMIC_RELEASE
);
#endif
}
/* Double-checked read (first read may be stale. */
subtree
=
rtree
->
levels
[
level
].
subtree
;
if
(
!
rtree_node_valid
(
subtree
))
subtree
=
atomic_read_p
(
&
rtree
->
levels
[
level
].
subtree_pun
);
return
(
subtree
);
static
inline
void
rtree_leaf_elm_write
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
extent_t
*
extent
,
szind_t
szind
,
bool
slab
)
{
#ifdef RTREE_LEAF_COMPACT
uintptr_t
bits
=
((
uintptr_t
)
szind
<<
LG_VADDR
)
|
((
uintptr_t
)
extent
&
(((
uintptr_t
)
0x1
<<
LG_VADDR
)
-
1
))
|
((
uintptr_t
)
slab
);
atomic_store_p
(
&
elm
->
le_bits
,
(
void
*
)
bits
,
ATOMIC_RELEASE
);
#else
rtree_leaf_elm_slab_write
(
tsdn
,
rtree
,
elm
,
slab
);
rtree_leaf_elm_szind_write
(
tsdn
,
rtree
,
elm
,
szind
);
/*
* Write extent last, since the element is atomically considered valid
* as soon as the extent field is non-NULL.
*/
rtree_leaf_elm_extent_write
(
tsdn
,
rtree
,
elm
,
extent
);
#endif
}
JEMALLOC_INLINE
rtree_node_elm_t
*
rtree_
subtree_read
(
rtree_t
*
rtree
,
unsigned
level
)
{
rtree_node_elm_t
*
subtree
;
static
inline
void
rtree_
leaf_elm_szind_slab_update
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
szind_t
szind
,
bool
slab
)
{
assert
(
!
slab
||
szind
<
NBINS
)
;
subtree
=
rtree_subtree_tryread
(
rtree
,
level
);
if
(
unlikely
(
!
rtree_node_valid
(
subtree
)))
subtree
=
rtree_subtree_read_hard
(
rtree
,
level
);
return
(
subtree
);
/*
* The caller implicitly assures that it is the only writer to the szind
* and slab fields, and that the extent field cannot currently change.
*/
rtree_leaf_elm_slab_write
(
tsdn
,
rtree
,
elm
,
slab
);
rtree_leaf_elm_szind_write
(
tsdn
,
rtree
,
elm
,
szind
);
}
JEMALLOC_INLINE
extent_node_t
*
rtree_get
(
rtree_t
*
rtree
,
uintptr_t
key
,
bool
dependent
)
{
uintptr_t
subkey
;
unsigned
i
,
start_level
;
rtree_node_elm_t
*
node
,
*
child
;
start_level
=
rtree_start_level
(
rtree
,
key
);
for
(
i
=
start_level
,
node
=
rtree_subtree_tryread
(
rtree
,
start_level
);
/**/
;
i
++
,
node
=
child
)
{
if
(
!
dependent
&&
unlikely
(
!
rtree_node_valid
(
node
)))
return
(
NULL
);
subkey
=
rtree_subkey
(
rtree
,
key
,
i
);
if
(
i
==
rtree
->
height
-
1
)
{
/*
* node is a leaf, so it contains values rather than
* child pointers.
*/
return
(
rtree_val_read
(
rtree
,
&
node
[
subkey
],
dependent
));
}
assert
(
i
<
rtree
->
height
-
1
);
child
=
rtree_child_tryread
(
&
node
[
subkey
]);
JEMALLOC_ALWAYS_INLINE
rtree_leaf_elm_t
*
rtree_leaf_elm_lookup
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
bool
dependent
,
bool
init_missing
)
{
assert
(
key
!=
0
);
assert
(
!
dependent
||
!
init_missing
);
size_t
slot
=
rtree_cache_direct_map
(
key
);
uintptr_t
leafkey
=
rtree_leafkey
(
key
);
assert
(
leafkey
!=
RTREE_LEAFKEY_INVALID
);
/* Fast path: L1 direct mapped cache. */
if
(
likely
(
rtree_ctx
->
cache
[
slot
].
leafkey
==
leafkey
))
{
rtree_leaf_elm_t
*
leaf
=
rtree_ctx
->
cache
[
slot
].
leaf
;
assert
(
leaf
!=
NULL
);
uintptr_t
subkey
=
rtree_subkey
(
key
,
RTREE_HEIGHT
-
1
);
return
&
leaf
[
subkey
];
}
/*
* Search the L2 LRU cache. On hit, swap the matching element into the
* slot in L1 cache, and move the position in L2 up by 1.
*/
#define RTREE_CACHE_CHECK_L2(i) do { \
if (likely(rtree_ctx->l2_cache[i].leafkey == leafkey)) { \
rtree_leaf_elm_t *leaf = rtree_ctx->l2_cache[i].leaf; \
assert(leaf != NULL); \
if (i > 0) { \
/* Bubble up by one. */
\
rtree_ctx->l2_cache[i].leafkey = \
rtree_ctx->l2_cache[i - 1].leafkey; \
rtree_ctx->l2_cache[i].leaf = \
rtree_ctx->l2_cache[i - 1].leaf; \
rtree_ctx->l2_cache[i - 1].leafkey = \
rtree_ctx->cache[slot].leafkey; \
rtree_ctx->l2_cache[i - 1].leaf = \
rtree_ctx->cache[slot].leaf; \
} else { \
rtree_ctx->l2_cache[0].leafkey = \
rtree_ctx->cache[slot].leafkey; \
rtree_ctx->l2_cache[0].leaf = \
rtree_ctx->cache[slot].leaf; \
} \
rtree_ctx->cache[slot].leafkey = leafkey; \
rtree_ctx->cache[slot].leaf = leaf; \
uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1); \
return &leaf[subkey]; \
} \
} while (0)
/* Check the first cache entry. */
RTREE_CACHE_CHECK_L2
(
0
);
/* Search the remaining cache elements. */
for
(
unsigned
i
=
1
;
i
<
RTREE_CTX_NCACHE_L2
;
i
++
)
{
RTREE_CACHE_CHECK_L2
(
i
);
}
not_reached
();
#undef RTREE_CACHE_CHECK_L2
return
rtree_leaf_elm_lookup_hard
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
dependent
,
init_missing
);
}
JEMALLOC_INLINE
bool
rtree_set
(
rtree_t
*
rtree
,
uintptr_t
key
,
const
extent_node_t
*
val
)
{
uintptr_t
subkey
;
unsigned
i
,
start_level
;
rtree_node_elm_t
*
node
,
*
child
;
start_level
=
rtree_start_level
(
rtree
,
key
);
node
=
rtree_subtree_read
(
rtree
,
start_level
);
if
(
node
==
NULL
)
return
(
true
);
for
(
i
=
start_level
;
/**/
;
i
++
,
node
=
child
)
{
subkey
=
rtree_subkey
(
rtree
,
key
,
i
);
if
(
i
==
rtree
->
height
-
1
)
{
/*
* node is a leaf, so it contains values rather than
* child pointers.
*/
rtree_val_write
(
rtree
,
&
node
[
subkey
],
val
);
return
(
false
);
}
assert
(
i
+
1
<
rtree
->
height
);
child
=
rtree_child_read
(
rtree
,
&
node
[
subkey
],
i
);
if
(
child
==
NULL
)
return
(
true
);
static
inline
bool
rtree_write
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
extent_t
*
extent
,
szind_t
szind
,
bool
slab
)
{
/* Use rtree_clear() to set the extent to NULL. */
assert
(
extent
!=
NULL
);
rtree_leaf_elm_t
*
elm
=
rtree_leaf_elm_lookup
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
false
,
true
);
if
(
elm
==
NULL
)
{
return
true
;
}
not_reached
();
assert
(
rtree_leaf_elm_extent_read
(
tsdn
,
rtree
,
elm
,
false
)
==
NULL
);
rtree_leaf_elm_write
(
tsdn
,
rtree
,
elm
,
extent
,
szind
,
slab
);
return
false
;
}
JEMALLOC_ALWAYS_INLINE
rtree_leaf_elm_t
*
rtree_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
bool
dependent
)
{
rtree_leaf_elm_t
*
elm
=
rtree_leaf_elm_lookup
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
dependent
,
false
);
if
(
!
dependent
&&
elm
==
NULL
)
{
return
NULL
;
}
assert
(
elm
!=
NULL
);
return
elm
;
}
JEMALLOC_ALWAYS_INLINE
extent_t
*
rtree_extent_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
bool
dependent
)
{
rtree_leaf_elm_t
*
elm
=
rtree_read
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
dependent
);
if
(
!
dependent
&&
elm
==
NULL
)
{
return
NULL
;
}
return
rtree_leaf_elm_extent_read
(
tsdn
,
rtree
,
elm
,
dependent
);
}
JEMALLOC_ALWAYS_INLINE
szind_t
rtree_szind_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
bool
dependent
)
{
rtree_leaf_elm_t
*
elm
=
rtree_read
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
dependent
);
if
(
!
dependent
&&
elm
==
NULL
)
{
return
NSIZES
;
}
return
rtree_leaf_elm_szind_read
(
tsdn
,
rtree
,
elm
,
dependent
);
}
/*
* rtree_slab_read() is intentionally omitted because slab is always read in
* conjunction with szind, which makes rtree_szind_slab_read() a better choice.
*/
JEMALLOC_ALWAYS_INLINE
bool
rtree_extent_szind_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
bool
dependent
,
extent_t
**
r_extent
,
szind_t
*
r_szind
)
{
rtree_leaf_elm_t
*
elm
=
rtree_read
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
dependent
);
if
(
!
dependent
&&
elm
==
NULL
)
{
return
true
;
}
*
r_extent
=
rtree_leaf_elm_extent_read
(
tsdn
,
rtree
,
elm
,
dependent
);
*
r_szind
=
rtree_leaf_elm_szind_read
(
tsdn
,
rtree
,
elm
,
dependent
);
return
false
;
}
JEMALLOC_ALWAYS_INLINE
bool
rtree_szind_slab_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
bool
dependent
,
szind_t
*
r_szind
,
bool
*
r_slab
)
{
rtree_leaf_elm_t
*
elm
=
rtree_read
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
dependent
);
if
(
!
dependent
&&
elm
==
NULL
)
{
return
true
;
}
#ifdef RTREE_LEAF_COMPACT
uintptr_t
bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm
,
dependent
);
*
r_szind
=
rtree_leaf_elm_bits_szind_get
(
bits
);
*
r_slab
=
rtree_leaf_elm_bits_slab_get
(
bits
);
#else
*
r_szind
=
rtree_leaf_elm_szind_read
(
tsdn
,
rtree
,
elm
,
dependent
);
*
r_slab
=
rtree_leaf_elm_slab_read
(
tsdn
,
rtree
,
elm
,
dependent
);
#endif
return
false
;
}
static
inline
void
rtree_szind_slab_update
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
szind_t
szind
,
bool
slab
)
{
assert
(
!
slab
||
szind
<
NBINS
);
rtree_leaf_elm_t
*
elm
=
rtree_read
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
true
);
rtree_leaf_elm_szind_slab_update
(
tsdn
,
rtree
,
elm
,
szind
,
slab
);
}
static
inline
void
rtree_clear
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
)
{
rtree_leaf_elm_t
*
elm
=
rtree_read
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
true
);
assert
(
rtree_leaf_elm_extent_read
(
tsdn
,
rtree
,
elm
,
false
)
!=
NULL
);
rtree_leaf_elm_write
(
tsdn
,
rtree
,
elm
,
NULL
,
NSIZES
,
false
);
}
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
#endif
/* JEMALLOC_INTERNAL_RTREE_H */
deps/jemalloc/include/jemalloc/internal/rtree_tsd.h
0 → 100644
View file @
fb1f4f4e
#ifndef JEMALLOC_INTERNAL_RTREE_CTX_H
#define JEMALLOC_INTERNAL_RTREE_CTX_H
/*
* Number of leafkey/leaf pairs to cache in L1 and L2 level respectively. Each
* entry supports an entire leaf, so the cache hit rate is typically high even
* with a small number of entries. In rare cases extent activity will straddle
* the boundary between two leaf nodes. Furthermore, an arena may use a
* combination of dss and mmap. Note that as memory usage grows past the amount
* that this cache can directly cover, the cache will become less effective if
* locality of reference is low, but the consequence is merely cache misses
* while traversing the tree nodes.
*
* The L1 direct mapped cache offers consistent and low cost on cache hit.
* However collision could affect hit rate negatively. This is resolved by
* combining with a L2 LRU cache, which requires linear search and re-ordering
* on access but suffers no collision. Note that, the cache will itself suffer
* cache misses if made overly large, plus the cost of linear search in the LRU
* cache.
*/
#define RTREE_CTX_LG_NCACHE 4
#define RTREE_CTX_NCACHE (1 << RTREE_CTX_LG_NCACHE)
#define RTREE_CTX_NCACHE_L2 8
/*
* Zero initializer required for tsd initialization only. Proper initialization
* done via rtree_ctx_data_init().
*/
#define RTREE_CTX_ZERO_INITIALIZER {{{0}}, {{0}}}
typedef
struct
rtree_leaf_elm_s
rtree_leaf_elm_t
;
typedef
struct
rtree_ctx_cache_elm_s
rtree_ctx_cache_elm_t
;
struct
rtree_ctx_cache_elm_s
{
uintptr_t
leafkey
;
rtree_leaf_elm_t
*
leaf
;
};
typedef
struct
rtree_ctx_s
rtree_ctx_t
;
struct
rtree_ctx_s
{
/* Direct mapped cache. */
rtree_ctx_cache_elm_t
cache
[
RTREE_CTX_NCACHE
];
/* L2 LRU cache. */
rtree_ctx_cache_elm_t
l2_cache
[
RTREE_CTX_NCACHE_L2
];
};
void
rtree_ctx_data_init
(
rtree_ctx_t
*
ctx
);
#endif
/* JEMALLOC_INTERNAL_RTREE_CTX_H */
deps/jemalloc/include/jemalloc/internal/size_classes.sh
View file @
fb1f4f4e
...
...
@@ -40,6 +40,54 @@ lg() {
done
}
lg_ceil
()
{
y
=
$1
lg
${
y
}
;
lg_floor
=
${
lg_result
}
pow2
${
lg_floor
}
;
pow2_floor
=
${
pow2_result
}
if
[
${
pow2_floor
}
-lt
${
y
}
]
;
then
lg_ceil_result
=
$((${
lg_floor
}
+
1
))
else
lg_ceil_result
=
${
lg_floor
}
fi
}
reg_size_compute
()
{
lg_grp
=
$1
lg_delta
=
$2
ndelta
=
$3
pow2
${
lg_grp
}
;
grp
=
${
pow2_result
}
pow2
${
lg_delta
}
;
delta
=
${
pow2_result
}
reg_size
=
$((${
grp
}
+
${
delta
}
*
${
ndelta
}))
}
slab_size
()
{
lg_p
=
$1
lg_grp
=
$2
lg_delta
=
$3
ndelta
=
$4
pow2
${
lg_p
}
;
p
=
${
pow2_result
}
reg_size_compute
${
lg_grp
}
${
lg_delta
}
${
ndelta
}
# Compute smallest slab size that is an integer multiple of reg_size.
try_slab_size
=
${
p
}
try_nregs
=
$((${
try_slab_size
}
/
${
reg_size
}))
perfect
=
0
while
[
${
perfect
}
-eq
0
]
;
do
perfect_slab_size
=
${
try_slab_size
}
perfect_nregs
=
${
try_nregs
}
try_slab_size
=
$((${
try_slab_size
}
+
${
p
}))
try_nregs
=
$((${
try_slab_size
}
/
${
reg_size
}))
if
[
${
perfect_slab_size
}
-eq
$((${
perfect_nregs
}
*
${
reg_size
}))
]
;
then
perfect
=
1
fi
done
slab_size_pgs
=
$((${
perfect_slab_size
}
/
${
p
}))
}
size_class
()
{
index
=
$1
lg_grp
=
$2
...
...
@@ -48,6 +96,21 @@ size_class() {
lg_p
=
$5
lg_kmax
=
$6
if
[
${
lg_delta
}
-ge
${
lg_p
}
]
;
then
psz
=
"yes"
else
pow2
${
lg_p
}
;
p
=
${
pow2_result
}
pow2
${
lg_grp
}
;
grp
=
${
pow2_result
}
pow2
${
lg_delta
}
;
delta
=
${
pow2_result
}
sz
=
$((${
grp
}
+
${
delta
}
*
${
ndelta
}))
npgs
=
$((${
sz
}
/
${
p
}))
if
[
${
sz
}
-eq
$((${
npgs
}
*
${
p
}))
]
;
then
psz
=
"yes"
else
psz
=
"no"
fi
fi
lg
${
ndelta
}
;
lg_ndelta
=
${
lg_result
}
;
pow2
${
lg_ndelta
}
if
[
${
pow2_result
}
-lt
${
ndelta
}
]
;
then
rem
=
"yes"
...
...
@@ -65,8 +128,10 @@ size_class() {
if
[
${
lg_size
}
-lt
$((${
lg_p
}
+
${
lg_g
}))
]
;
then
bin
=
"yes"
slab_size
${
lg_p
}
${
lg_grp
}
${
lg_delta
}
${
ndelta
}
;
pgs
=
${
slab_size_pgs
}
else
bin
=
"no"
pgs
=
0
fi
if
[
${
lg_size
}
-lt
${
lg_kmax
}
\
-o
${
lg_size
}
-eq
${
lg_kmax
}
-a
${
rem
}
=
"no"
]
;
then
...
...
@@ -74,14 +139,16 @@ size_class() {
else
lg_delta_lookup
=
"no"
fi
printf
' SC(%3d, %6d, %8d, %6d, %3s, %2s) \\\n'
${
index
}
${
lg_grp
}
${
lg_delta
}
${
ndelta
}
${
bin
}
${
lg_delta_lookup
}
printf
' SC(%3d, %6d, %8d, %6d, %3s,
%3s, %3d,
%2s) \\\n'
${
index
}
${
lg_grp
}
${
lg_delta
}
${
ndelta
}
${
psz
}
${
bin
}
${
pgs
}
${
lg_delta_lookup
}
# Defined upon return:
# -
lg_delta_lookup (${lg_delta}
or "no")
# -
psz ("yes"
or "no")
# - bin ("yes" or "no")
# - pgs
# - lg_delta_lookup (${lg_delta} or "no")
}
sep_line
()
{
echo
"
\\
"
echo
"
\\
"
}
size_classes
()
{
...
...
@@ -94,13 +161,14 @@ size_classes() {
pow2
$((${
lg_z
}
+
3
))
;
ptr_bits
=
${
pow2_result
}
pow2
${
lg_g
}
;
g
=
${
pow2_result
}
echo
"#define
SIZE_CLASSES
\\
"
echo
" /* index, lg_grp, lg_delta, ndelta,
bin
, lg_delta_lookup */
\\
"
echo
"#define
SIZE_CLASSES
\\
"
echo
" /* index, lg_grp, lg_delta, ndelta,
psz, bin, pgs
, lg_delta_lookup */
\\
"
ntbins
=
0
nlbins
=
0
lg_tiny_maxclass
=
'"NA"'
nbins
=
0
npsizes
=
0
# Tiny size classes.
ndelta
=
0
...
...
@@ -112,6 +180,9 @@ size_classes() {
if
[
${
lg_delta_lookup
}
!=
"no"
]
;
then
nlbins
=
$((${
index
}
+
1
))
fi
if
[
${
psz
}
=
"yes"
]
;
then
npsizes
=
$((${
npsizes
}
+
1
))
fi
if
[
${
bin
}
!=
"no"
]
;
then
nbins
=
$((${
index
}
+
1
))
fi
...
...
@@ -133,19 +204,25 @@ size_classes() {
index
=
$((${
index
}
+
1
))
lg_grp
=
$((${
lg_grp
}
+
1
))
lg_delta
=
$((${
lg_delta
}
+
1
))
if
[
${
psz
}
=
"yes"
]
;
then
npsizes
=
$((${
npsizes
}
+
1
))
fi
fi
while
[
${
ndelta
}
-lt
${
g
}
]
;
do
size_class
${
index
}
${
lg_grp
}
${
lg_delta
}
${
ndelta
}
${
lg_p
}
${
lg_kmax
}
index
=
$((${
index
}
+
1
))
ndelta
=
$((${
ndelta
}
+
1
))
if
[
${
psz
}
=
"yes"
]
;
then
npsizes
=
$((${
npsizes
}
+
1
))
fi
done
# All remaining groups.
lg_grp
=
$((${
lg_grp
}
+
${
lg_g
}))
while
[
${
lg_grp
}
-lt
${
ptr_bits
}
]
;
do
while
[
${
lg_grp
}
-lt
$((
${
ptr_bits
}
-
1
))
]
;
do
sep_line
ndelta
=
1
if
[
${
lg_grp
}
-eq
$((${
ptr_bits
}
-
1
))
]
;
then
if
[
${
lg_grp
}
-eq
$((${
ptr_bits
}
-
2
))
]
;
then
ndelta_limit
=
$((${
g
}
-
1
))
else
ndelta_limit
=
${
g
}
...
...
@@ -157,6 +234,9 @@ size_classes() {
# Final written value is correct:
lookup_maxclass
=
"((((size_t)1) <<
${
lg_grp
}
) + (((size_t)
${
ndelta
}
) <<
${
lg_delta
}
))"
fi
if
[
${
psz
}
=
"yes"
]
;
then
npsizes
=
$((${
npsizes
}
+
1
))
fi
if
[
${
bin
}
!=
"no"
]
;
then
nbins
=
$((${
index
}
+
1
))
# Final written value is correct:
...
...
@@ -168,7 +248,7 @@ size_classes() {
fi
fi
# Final written value is correct:
hu
ge_maxclass
=
"((((size_t)1) <<
${
lg_grp
}
) + (((size_t)
${
ndelta
}
) <<
${
lg_delta
}
))"
lar
ge_maxclass
=
"((((size_t)1) <<
${
lg_grp
}
) + (((size_t)
${
ndelta
}
) <<
${
lg_delta
}
))"
index
=
$((${
index
}
+
1
))
ndelta
=
$((${
ndelta
}
+
1
))
done
...
...
@@ -177,51 +257,61 @@ size_classes() {
done
echo
nsizes
=
${
index
}
lg_ceil
${
nsizes
}
;
lg_ceil_nsizes
=
${
lg_ceil_result
}
# Defined upon completion:
# - ntbins
# - nlbins
# - nbins
# - nsizes
# - lg_ceil_nsizes
# - npsizes
# - lg_tiny_maxclass
# - lookup_maxclass
# - small_maxclass
# - lg_large_minclass
# -
hu
ge_maxclass
# -
lar
ge_maxclass
}
cat
<<
EOF
#ifndef JEMALLOC_INTERNAL_SIZE_CLASSES_H
#define JEMALLOC_INTERNAL_SIZE_CLASSES_H
/* This file was automatically generated by size_classes.sh. */
/******************************************************************************/
#i
fdef JEMALLOC_H_TYPES
#i
nclude "jemalloc/internal/jemalloc_internal_types.h"
/*
* This header requires LG_SIZEOF_PTR, LG_TINY_MIN, LG_QUANTUM, and LG_PAGE to
* be defined prior to inclusion, and it in turn defines:
* This header file defines:
*
* LG_SIZE_CLASS_GROUP: Lg of size class count for each size doubling.
*
SIZE_CLASSES: Complete table of
*
SC(index, lg_grp, lg_delta, ndelta,
bin, lg_delta_lookup)
* tuples.
*
LG_TINY_MIN: Lg of minimum size class to support.
*
SIZE_CLASSES: Complete table of
SC(index, lg_grp, lg_delta, ndelta,
psz,
*
bin, pgs, lg_delta_lookup)
tuples.
* index: Size class index.
* lg_grp: Lg group base size (no deltas added).
* lg_delta: Lg delta to previous size class.
* ndelta: Delta multiplier. size == 1<<lg_grp + ndelta<<lg_delta
* psz: 'yes' if a multiple of the page size, 'no' otherwise.
* bin: 'yes' if a small bin size class, 'no' otherwise.
* pgs: Slab page count if a small bin size class, 0 otherwise.
* lg_delta_lookup: Same as lg_delta if a lookup table size class, 'no'
* otherwise.
* NTBINS: Number of tiny bins.
* NLBINS: Number of bins supported by the lookup table.
* NBINS: Number of small size class bins.
* NSIZES: Number of size classes.
* LG_CEIL_NSIZES: Number of bits required to store NSIZES.
* NPSIZES: Number of size classes that are a multiple of (1U << LG_PAGE).
* LG_TINY_MAXCLASS: Lg of maximum tiny size class.
* LOOKUP_MAXCLASS: Maximum size class included in lookup table.
* SMALL_MAXCLASS: Maximum small size class.
* LG_LARGE_MINCLASS: Lg of minimum large size class.
*
HU
GE_MAXCLASS: Maximum (
hu
ge) size class.
*
LAR
GE_MAXCLASS: Maximum (
lar
ge) size class.
*/
#define LG_SIZE_CLASS_GROUP
${
lg_g
}
#define LG_SIZE_CLASS_GROUP
${
lg_g
}
#define LG_TINY_MIN
${
lg_tmin
}
EOF
...
...
@@ -233,16 +323,19 @@ for lg_z in ${lg_zarr} ; do
for
lg_p
in
${
lg_parr
}
;
do
echo
"#if (LG_SIZEOF_PTR ==
${
lg_z
}
&& LG_TINY_MIN ==
${
lg_t
}
&& LG_QUANTUM ==
${
lg_q
}
&& LG_PAGE ==
${
lg_p
}
)"
size_classes
${
lg_z
}
${
lg_q
}
${
lg_t
}
${
lg_p
}
${
lg_g
}
echo
"#define SIZE_CLASSES_DEFINED"
echo
"#define NTBINS
${
ntbins
}
"
echo
"#define NLBINS
${
nlbins
}
"
echo
"#define NBINS
${
nbins
}
"
echo
"#define NSIZES
${
nsizes
}
"
echo
"#define LG_TINY_MAXCLASS
${
lg_tiny_maxclass
}
"
echo
"#define LOOKUP_MAXCLASS
${
lookup_maxclass
}
"
echo
"#define SMALL_MAXCLASS
${
small_maxclass
}
"
echo
"#define LG_LARGE_MINCLASS
${
lg_large_minclass
}
"
echo
"#define HUGE_MAXCLASS
${
huge_maxclass
}
"
echo
"#define SIZE_CLASSES_DEFINED"
echo
"#define NTBINS
${
ntbins
}
"
echo
"#define NLBINS
${
nlbins
}
"
echo
"#define NBINS
${
nbins
}
"
echo
"#define NSIZES
${
nsizes
}
"
echo
"#define LG_CEIL_NSIZES
${
lg_ceil_nsizes
}
"
echo
"#define NPSIZES
${
npsizes
}
"
echo
"#define LG_TINY_MAXCLASS
${
lg_tiny_maxclass
}
"
echo
"#define LOOKUP_MAXCLASS
${
lookup_maxclass
}
"
echo
"#define SMALL_MAXCLASS
${
small_maxclass
}
"
echo
"#define LG_LARGE_MINCLASS
${
lg_large_minclass
}
"
echo
"#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)"
echo
"#define LARGE_MAXCLASS
${
large_maxclass
}
"
echo
"#endif"
echo
done
...
...
@@ -258,29 +351,11 @@ cat <<EOF
#undef SIZE_CLASSES_DEFINED
/*
* The size2index_tab lookup table uses uint8_t to encode each bin index, so we
* cannot support more than 256 small size classes. Further constrain NBINS to
* 255 since all small size classes, plus a "not small" size class must be
* stored in 8 bits of arena_chunk_map_bits_t's bits field.
* cannot support more than 256 small size classes.
*/
#if (NBINS > 25
5
)
#if (NBINS > 25
6
)
# error "Too many small size classes"
#endif
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
#endif /* JEMALLOC_INTERNAL_SIZE_CLASSES_H */
EOF
deps/jemalloc/include/jemalloc/internal/smoothstep.h
0 → 100644
View file @
fb1f4f4e
#ifndef JEMALLOC_INTERNAL_SMOOTHSTEP_H
#define JEMALLOC_INTERNAL_SMOOTHSTEP_H
/*
* This file was generated by the following command:
* sh smoothstep.sh smoother 200 24 3 15
*/
/******************************************************************************/
/*
* This header defines a precomputed table based on the smoothstep family of
* sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0
* to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so
* that floating point math can be avoided.
*
* 3 2
* smoothstep(x) = -2x + 3x
*
* 5 4 3
* smootherstep(x) = 6x - 15x + 10x
*
* 7 6 5 4
* smootheststep(x) = -20x + 70x - 84x + 35x
*/
#define SMOOTHSTEP_VARIANT "smoother"
#define SMOOTHSTEP_NSTEPS 200
#define SMOOTHSTEP_BFP 24
#define SMOOTHSTEP \
/* STEP(step, h, x, y) */
\
STEP( 1, UINT64_C(0x0000000000000014), 0.005, 0.000001240643750) \
STEP( 2, UINT64_C(0x00000000000000a5), 0.010, 0.000009850600000) \
STEP( 3, UINT64_C(0x0000000000000229), 0.015, 0.000032995181250) \
STEP( 4, UINT64_C(0x0000000000000516), 0.020, 0.000077619200000) \
STEP( 5, UINT64_C(0x00000000000009dc), 0.025, 0.000150449218750) \
STEP( 6, UINT64_C(0x00000000000010e8), 0.030, 0.000257995800000) \
STEP( 7, UINT64_C(0x0000000000001aa4), 0.035, 0.000406555756250) \
STEP( 8, UINT64_C(0x0000000000002777), 0.040, 0.000602214400000) \
STEP( 9, UINT64_C(0x00000000000037c2), 0.045, 0.000850847793750) \
STEP( 10, UINT64_C(0x0000000000004be6), 0.050, 0.001158125000000) \
STEP( 11, UINT64_C(0x000000000000643c), 0.055, 0.001529510331250) \
STEP( 12, UINT64_C(0x000000000000811f), 0.060, 0.001970265600000) \
STEP( 13, UINT64_C(0x000000000000a2e2), 0.065, 0.002485452368750) \
STEP( 14, UINT64_C(0x000000000000c9d8), 0.070, 0.003079934200000) \
STEP( 15, UINT64_C(0x000000000000f64f), 0.075, 0.003758378906250) \
STEP( 16, UINT64_C(0x0000000000012891), 0.080, 0.004525260800000) \
STEP( 17, UINT64_C(0x00000000000160e7), 0.085, 0.005384862943750) \
STEP( 18, UINT64_C(0x0000000000019f95), 0.090, 0.006341279400000) \
STEP( 19, UINT64_C(0x000000000001e4dc), 0.095, 0.007398417481250) \
STEP( 20, UINT64_C(0x00000000000230fc), 0.100, 0.008560000000000) \
STEP( 21, UINT64_C(0x0000000000028430), 0.105, 0.009829567518750) \
STEP( 22, UINT64_C(0x000000000002deb0), 0.110, 0.011210480600000) \
STEP( 23, UINT64_C(0x00000000000340b1), 0.115, 0.012705922056250) \
STEP( 24, UINT64_C(0x000000000003aa67), 0.120, 0.014318899200000) \
STEP( 25, UINT64_C(0x0000000000041c00), 0.125, 0.016052246093750) \
STEP( 26, UINT64_C(0x00000000000495a8), 0.130, 0.017908625800000) \
STEP( 27, UINT64_C(0x000000000005178b), 0.135, 0.019890532631250) \
STEP( 28, UINT64_C(0x000000000005a1cf), 0.140, 0.022000294400000) \
STEP( 29, UINT64_C(0x0000000000063498), 0.145, 0.024240074668750) \
STEP( 30, UINT64_C(0x000000000006d009), 0.150, 0.026611875000000) \
STEP( 31, UINT64_C(0x000000000007743f), 0.155, 0.029117537206250) \
STEP( 32, UINT64_C(0x0000000000082157), 0.160, 0.031758745600000) \
STEP( 33, UINT64_C(0x000000000008d76b), 0.165, 0.034537029243750) \
STEP( 34, UINT64_C(0x0000000000099691), 0.170, 0.037453764200000) \
STEP( 35, UINT64_C(0x00000000000a5edf), 0.175, 0.040510175781250) \
STEP( 36, UINT64_C(0x00000000000b3067), 0.180, 0.043707340800000) \
STEP( 37, UINT64_C(0x00000000000c0b38), 0.185, 0.047046189818750) \
STEP( 38, UINT64_C(0x00000000000cef5e), 0.190, 0.050527509400000) \
STEP( 39, UINT64_C(0x00000000000ddce6), 0.195, 0.054151944356250) \
STEP( 40, UINT64_C(0x00000000000ed3d8), 0.200, 0.057920000000000) \
STEP( 41, UINT64_C(0x00000000000fd439), 0.205, 0.061832044393750) \
STEP( 42, UINT64_C(0x000000000010de0e), 0.210, 0.065888310600000) \
STEP( 43, UINT64_C(0x000000000011f158), 0.215, 0.070088898931250) \
STEP( 44, UINT64_C(0x0000000000130e17), 0.220, 0.074433779200000) \
STEP( 45, UINT64_C(0x0000000000143448), 0.225, 0.078922792968750) \
STEP( 46, UINT64_C(0x00000000001563e7), 0.230, 0.083555655800000) \
STEP( 47, UINT64_C(0x0000000000169cec), 0.235, 0.088331959506250) \
STEP( 48, UINT64_C(0x000000000017df4f), 0.240, 0.093251174400000) \
STEP( 49, UINT64_C(0x0000000000192b04), 0.245, 0.098312651543750) \
STEP( 50, UINT64_C(0x00000000001a8000), 0.250, 0.103515625000000) \
STEP( 51, UINT64_C(0x00000000001bde32), 0.255, 0.108859214081250) \
STEP( 52, UINT64_C(0x00000000001d458b), 0.260, 0.114342425600000) \
STEP( 53, UINT64_C(0x00000000001eb5f8), 0.265, 0.119964156118750) \
STEP( 54, UINT64_C(0x0000000000202f65), 0.270, 0.125723194200000) \
STEP( 55, UINT64_C(0x000000000021b1bb), 0.275, 0.131618222656250) \
STEP( 56, UINT64_C(0x0000000000233ce3), 0.280, 0.137647820800000) \
STEP( 57, UINT64_C(0x000000000024d0c3), 0.285, 0.143810466693750) \
STEP( 58, UINT64_C(0x0000000000266d40), 0.290, 0.150104539400000) \
STEP( 59, UINT64_C(0x000000000028123d), 0.295, 0.156528321231250) \
STEP( 60, UINT64_C(0x000000000029bf9c), 0.300, 0.163080000000000) \
STEP( 61, UINT64_C(0x00000000002b753d), 0.305, 0.169757671268750) \
STEP( 62, UINT64_C(0x00000000002d32fe), 0.310, 0.176559340600000) \
STEP( 63, UINT64_C(0x00000000002ef8bc), 0.315, 0.183482925806250) \
STEP( 64, UINT64_C(0x000000000030c654), 0.320, 0.190526259200000) \
STEP( 65, UINT64_C(0x0000000000329b9f), 0.325, 0.197687089843750) \
STEP( 66, UINT64_C(0x0000000000347875), 0.330, 0.204963085800000) \
STEP( 67, UINT64_C(0x0000000000365cb0), 0.335, 0.212351836381250) \
STEP( 68, UINT64_C(0x0000000000384825), 0.340, 0.219850854400000) \
STEP( 69, UINT64_C(0x00000000003a3aa8), 0.345, 0.227457578418750) \
STEP( 70, UINT64_C(0x00000000003c340f), 0.350, 0.235169375000000) \
STEP( 71, UINT64_C(0x00000000003e342b), 0.355, 0.242983540956250) \
STEP( 72, UINT64_C(0x0000000000403ace), 0.360, 0.250897305600000) \
STEP( 73, UINT64_C(0x00000000004247c8), 0.365, 0.258907832993750) \
STEP( 74, UINT64_C(0x0000000000445ae9), 0.370, 0.267012224200000) \
STEP( 75, UINT64_C(0x0000000000467400), 0.375, 0.275207519531250) \
STEP( 76, UINT64_C(0x00000000004892d8), 0.380, 0.283490700800000) \
STEP( 77, UINT64_C(0x00000000004ab740), 0.385, 0.291858693568750) \
STEP( 78, UINT64_C(0x00000000004ce102), 0.390, 0.300308369400000) \
STEP( 79, UINT64_C(0x00000000004f0fe9), 0.395, 0.308836548106250) \
STEP( 80, UINT64_C(0x00000000005143bf), 0.400, 0.317440000000000) \
STEP( 81, UINT64_C(0x0000000000537c4d), 0.405, 0.326115448143750) \
STEP( 82, UINT64_C(0x000000000055b95b), 0.410, 0.334859570600000) \
STEP( 83, UINT64_C(0x000000000057fab1), 0.415, 0.343669002681250) \
STEP( 84, UINT64_C(0x00000000005a4015), 0.420, 0.352540339200000) \
STEP( 85, UINT64_C(0x00000000005c894e), 0.425, 0.361470136718750) \
STEP( 86, UINT64_C(0x00000000005ed622), 0.430, 0.370454915800000) \
STEP( 87, UINT64_C(0x0000000000612655), 0.435, 0.379491163256250) \
STEP( 88, UINT64_C(0x00000000006379ac), 0.440, 0.388575334400000) \
STEP( 89, UINT64_C(0x000000000065cfeb), 0.445, 0.397703855293750) \
STEP( 90, UINT64_C(0x00000000006828d6), 0.450, 0.406873125000000) \
STEP( 91, UINT64_C(0x00000000006a842f), 0.455, 0.416079517831250) \
STEP( 92, UINT64_C(0x00000000006ce1bb), 0.460, 0.425319385600000) \
STEP( 93, UINT64_C(0x00000000006f413a), 0.465, 0.434589059868750) \
STEP( 94, UINT64_C(0x000000000071a270), 0.470, 0.443884854200000) \
STEP( 95, UINT64_C(0x000000000074051d), 0.475, 0.453203066406250) \
STEP( 96, UINT64_C(0x0000000000766905), 0.480, 0.462539980800000) \
STEP( 97, UINT64_C(0x000000000078cde7), 0.485, 0.471891870443750) \
STEP( 98, UINT64_C(0x00000000007b3387), 0.490, 0.481254999400000) \
STEP( 99, UINT64_C(0x00000000007d99a4), 0.495, 0.490625624981250) \
STEP( 100, UINT64_C(0x0000000000800000), 0.500, 0.500000000000000) \
STEP( 101, UINT64_C(0x000000000082665b), 0.505, 0.509374375018750) \
STEP( 102, UINT64_C(0x000000000084cc78), 0.510, 0.518745000600000) \
STEP( 103, UINT64_C(0x0000000000873218), 0.515, 0.528108129556250) \
STEP( 104, UINT64_C(0x00000000008996fa), 0.520, 0.537460019200000) \
STEP( 105, UINT64_C(0x00000000008bfae2), 0.525, 0.546796933593750) \
STEP( 106, UINT64_C(0x00000000008e5d8f), 0.530, 0.556115145800000) \
STEP( 107, UINT64_C(0x000000000090bec5), 0.535, 0.565410940131250) \
STEP( 108, UINT64_C(0x0000000000931e44), 0.540, 0.574680614400000) \
STEP( 109, UINT64_C(0x0000000000957bd0), 0.545, 0.583920482168750) \
STEP( 110, UINT64_C(0x000000000097d729), 0.550, 0.593126875000000) \
STEP( 111, UINT64_C(0x00000000009a3014), 0.555, 0.602296144706250) \
STEP( 112, UINT64_C(0x00000000009c8653), 0.560, 0.611424665600000) \
STEP( 113, UINT64_C(0x00000000009ed9aa), 0.565, 0.620508836743750) \
STEP( 114, UINT64_C(0x0000000000a129dd), 0.570, 0.629545084200000) \
STEP( 115, UINT64_C(0x0000000000a376b1), 0.575, 0.638529863281250) \
STEP( 116, UINT64_C(0x0000000000a5bfea), 0.580, 0.647459660800000) \
STEP( 117, UINT64_C(0x0000000000a8054e), 0.585, 0.656330997318750) \
STEP( 118, UINT64_C(0x0000000000aa46a4), 0.590, 0.665140429400000) \
STEP( 119, UINT64_C(0x0000000000ac83b2), 0.595, 0.673884551856250) \
STEP( 120, UINT64_C(0x0000000000aebc40), 0.600, 0.682560000000000) \
STEP( 121, UINT64_C(0x0000000000b0f016), 0.605, 0.691163451893750) \
STEP( 122, UINT64_C(0x0000000000b31efd), 0.610, 0.699691630600000) \
STEP( 123, UINT64_C(0x0000000000b548bf), 0.615, 0.708141306431250) \
STEP( 124, UINT64_C(0x0000000000b76d27), 0.620, 0.716509299200000) \
STEP( 125, UINT64_C(0x0000000000b98c00), 0.625, 0.724792480468750) \
STEP( 126, UINT64_C(0x0000000000bba516), 0.630, 0.732987775800000) \
STEP( 127, UINT64_C(0x0000000000bdb837), 0.635, 0.741092167006250) \
STEP( 128, UINT64_C(0x0000000000bfc531), 0.640, 0.749102694400000) \
STEP( 129, UINT64_C(0x0000000000c1cbd4), 0.645, 0.757016459043750) \
STEP( 130, UINT64_C(0x0000000000c3cbf0), 0.650, 0.764830625000000) \
STEP( 131, UINT64_C(0x0000000000c5c557), 0.655, 0.772542421581250) \
STEP( 132, UINT64_C(0x0000000000c7b7da), 0.660, 0.780149145600000) \
STEP( 133, UINT64_C(0x0000000000c9a34f), 0.665, 0.787648163618750) \
STEP( 134, UINT64_C(0x0000000000cb878a), 0.670, 0.795036914200000) \
STEP( 135, UINT64_C(0x0000000000cd6460), 0.675, 0.802312910156250) \
STEP( 136, UINT64_C(0x0000000000cf39ab), 0.680, 0.809473740800000) \
STEP( 137, UINT64_C(0x0000000000d10743), 0.685, 0.816517074193750) \
STEP( 138, UINT64_C(0x0000000000d2cd01), 0.690, 0.823440659400000) \
STEP( 139, UINT64_C(0x0000000000d48ac2), 0.695, 0.830242328731250) \
STEP( 140, UINT64_C(0x0000000000d64063), 0.700, 0.836920000000000) \
STEP( 141, UINT64_C(0x0000000000d7edc2), 0.705, 0.843471678768750) \
STEP( 142, UINT64_C(0x0000000000d992bf), 0.710, 0.849895460600000) \
STEP( 143, UINT64_C(0x0000000000db2f3c), 0.715, 0.856189533306250) \
STEP( 144, UINT64_C(0x0000000000dcc31c), 0.720, 0.862352179200000) \
STEP( 145, UINT64_C(0x0000000000de4e44), 0.725, 0.868381777343750) \
STEP( 146, UINT64_C(0x0000000000dfd09a), 0.730, 0.874276805800000) \
STEP( 147, UINT64_C(0x0000000000e14a07), 0.735, 0.880035843881250) \
STEP( 148, UINT64_C(0x0000000000e2ba74), 0.740, 0.885657574400000) \
STEP( 149, UINT64_C(0x0000000000e421cd), 0.745, 0.891140785918750) \
STEP( 150, UINT64_C(0x0000000000e58000), 0.750, 0.896484375000000) \
STEP( 151, UINT64_C(0x0000000000e6d4fb), 0.755, 0.901687348456250) \
STEP( 152, UINT64_C(0x0000000000e820b0), 0.760, 0.906748825600000) \
STEP( 153, UINT64_C(0x0000000000e96313), 0.765, 0.911668040493750) \
STEP( 154, UINT64_C(0x0000000000ea9c18), 0.770, 0.916444344200000) \
STEP( 155, UINT64_C(0x0000000000ebcbb7), 0.775, 0.921077207031250) \
STEP( 156, UINT64_C(0x0000000000ecf1e8), 0.780, 0.925566220800000) \
STEP( 157, UINT64_C(0x0000000000ee0ea7), 0.785, 0.929911101068750) \
STEP( 158, UINT64_C(0x0000000000ef21f1), 0.790, 0.934111689400000) \
STEP( 159, UINT64_C(0x0000000000f02bc6), 0.795, 0.938167955606250) \
STEP( 160, UINT64_C(0x0000000000f12c27), 0.800, 0.942080000000000) \
STEP( 161, UINT64_C(0x0000000000f22319), 0.805, 0.945848055643750) \
STEP( 162, UINT64_C(0x0000000000f310a1), 0.810, 0.949472490600000) \
STEP( 163, UINT64_C(0x0000000000f3f4c7), 0.815, 0.952953810181250) \
STEP( 164, UINT64_C(0x0000000000f4cf98), 0.820, 0.956292659200000) \
STEP( 165, UINT64_C(0x0000000000f5a120), 0.825, 0.959489824218750) \
STEP( 166, UINT64_C(0x0000000000f6696e), 0.830, 0.962546235800000) \
STEP( 167, UINT64_C(0x0000000000f72894), 0.835, 0.965462970756250) \
STEP( 168, UINT64_C(0x0000000000f7dea8), 0.840, 0.968241254400000) \
STEP( 169, UINT64_C(0x0000000000f88bc0), 0.845, 0.970882462793750) \
STEP( 170, UINT64_C(0x0000000000f92ff6), 0.850, 0.973388125000000) \
STEP( 171, UINT64_C(0x0000000000f9cb67), 0.855, 0.975759925331250) \
STEP( 172, UINT64_C(0x0000000000fa5e30), 0.860, 0.977999705600000) \
STEP( 173, UINT64_C(0x0000000000fae874), 0.865, 0.980109467368750) \
STEP( 174, UINT64_C(0x0000000000fb6a57), 0.870, 0.982091374200000) \
STEP( 175, UINT64_C(0x0000000000fbe400), 0.875, 0.983947753906250) \
STEP( 176, UINT64_C(0x0000000000fc5598), 0.880, 0.985681100800000) \
STEP( 177, UINT64_C(0x0000000000fcbf4e), 0.885, 0.987294077943750) \
STEP( 178, UINT64_C(0x0000000000fd214f), 0.890, 0.988789519400000) \
STEP( 179, UINT64_C(0x0000000000fd7bcf), 0.895, 0.990170432481250) \
STEP( 180, UINT64_C(0x0000000000fdcf03), 0.900, 0.991440000000000) \
STEP( 181, UINT64_C(0x0000000000fe1b23), 0.905, 0.992601582518750) \
STEP( 182, UINT64_C(0x0000000000fe606a), 0.910, 0.993658720600000) \
STEP( 183, UINT64_C(0x0000000000fe9f18), 0.915, 0.994615137056250) \
STEP( 184, UINT64_C(0x0000000000fed76e), 0.920, 0.995474739200000) \
STEP( 185, UINT64_C(0x0000000000ff09b0), 0.925, 0.996241621093750) \
STEP( 186, UINT64_C(0x0000000000ff3627), 0.930, 0.996920065800000) \
STEP( 187, UINT64_C(0x0000000000ff5d1d), 0.935, 0.997514547631250) \
STEP( 188, UINT64_C(0x0000000000ff7ee0), 0.940, 0.998029734400000) \
STEP( 189, UINT64_C(0x0000000000ff9bc3), 0.945, 0.998470489668750) \
STEP( 190, UINT64_C(0x0000000000ffb419), 0.950, 0.998841875000000) \
STEP( 191, UINT64_C(0x0000000000ffc83d), 0.955, 0.999149152206250) \
STEP( 192, UINT64_C(0x0000000000ffd888), 0.960, 0.999397785600000) \
STEP( 193, UINT64_C(0x0000000000ffe55b), 0.965, 0.999593444243750) \
STEP( 194, UINT64_C(0x0000000000ffef17), 0.970, 0.999742004200000) \
STEP( 195, UINT64_C(0x0000000000fff623), 0.975, 0.999849550781250) \
STEP( 196, UINT64_C(0x0000000000fffae9), 0.980, 0.999922380800000) \
STEP( 197, UINT64_C(0x0000000000fffdd6), 0.985, 0.999967004818750) \
STEP( 198, UINT64_C(0x0000000000ffff5a), 0.990, 0.999990149400000) \
STEP( 199, UINT64_C(0x0000000000ffffeb), 0.995, 0.999998759356250) \
STEP( 200, UINT64_C(0x0000000001000000), 1.000, 1.000000000000000) \
#endif
/* JEMALLOC_INTERNAL_SMOOTHSTEP_H */
deps/jemalloc/include/jemalloc/internal/smoothstep.sh
0 → 100755
View file @
fb1f4f4e
#!/bin/sh
#
# Generate a discrete lookup table for a sigmoid function in the smoothstep
# family (https://en.wikipedia.org/wiki/Smoothstep), where the lookup table
# entries correspond to x in [1/nsteps, 2/nsteps, ..., nsteps/nsteps]. Encode
# the entries using a binary fixed point representation.
#
# Usage: smoothstep.sh <variant> <nsteps> <bfp> <xprec> <yprec>
#
# <variant> is in {smooth, smoother, smoothest}.
# <nsteps> must be greater than zero.
# <bfp> must be in [0..62]; reasonable values are roughly [10..30].
# <xprec> is x decimal precision.
# <yprec> is y decimal precision.
#set -x
cmd
=
"sh smoothstep.sh
$*
"
variant
=
$1
nsteps
=
$2
bfp
=
$3
xprec
=
$4
yprec
=
$5
case
"
${
variant
}
"
in
smooth
)
;;
smoother
)
;;
smoothest
)
;;
*
)
echo
"Unsupported variant"
exit
1
;;
esac
smooth
()
{
step
=
$1
y
=
`
echo
${
yprec
}
k
${
step
}
${
nsteps
}
/ sx _2 lx 3 ^
'*'
3 lx 2 ^
'*'
+ p | dc |
tr
-d
'\\\\\n'
|
sed
-e
's#^\.#0.#g'
`
h
=
`
echo
${
yprec
}
k 2
${
bfp
}
^
${
y
}
'*'
p | dc |
tr
-d
'\\\\\n'
|
sed
-e
's#^\.#0.#g'
|
tr
'.'
' '
|
awk
'{print $1}'
`
}
smoother
()
{
step
=
$1
y
=
`
echo
${
yprec
}
k
${
step
}
${
nsteps
}
/ sx 6 lx 5 ^
'*'
_15 lx 4 ^
'*'
+ 10 lx 3 ^
'*'
+ p | dc |
tr
-d
'\\\\\n'
|
sed
-e
's#^\.#0.#g'
`
h
=
`
echo
${
yprec
}
k 2
${
bfp
}
^
${
y
}
'*'
p | dc |
tr
-d
'\\\\\n'
|
sed
-e
's#^\.#0.#g'
|
tr
'.'
' '
|
awk
'{print $1}'
`
}
smoothest
()
{
step
=
$1
y
=
`
echo
${
yprec
}
k
${
step
}
${
nsteps
}
/ sx _20 lx 7 ^
'*'
70 lx 6 ^
'*'
+ _84 lx 5 ^
'*'
+ 35 lx 4 ^
'*'
+ p | dc |
tr
-d
'\\\\\n'
|
sed
-e
's#^\.#0.#g'
`
h
=
`
echo
${
yprec
}
k 2
${
bfp
}
^
${
y
}
'*'
p | dc |
tr
-d
'\\\\\n'
|
sed
-e
's#^\.#0.#g'
|
tr
'.'
' '
|
awk
'{print $1}'
`
}
cat
<<
EOF
#ifndef JEMALLOC_INTERNAL_SMOOTHSTEP_H
#define JEMALLOC_INTERNAL_SMOOTHSTEP_H
/*
* This file was generated by the following command:
*
$cmd
*/
/******************************************************************************/
/*
* This header defines a precomputed table based on the smoothstep family of
* sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0
* to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so
* that floating point math can be avoided.
*
* 3 2
* smoothstep(x) = -2x + 3x
*
* 5 4 3
* smootherstep(x) = 6x - 15x + 10x
*
* 7 6 5 4
* smootheststep(x) = -20x + 70x - 84x + 35x
*/
#define SMOOTHSTEP_VARIANT "
${
variant
}
"
#define SMOOTHSTEP_NSTEPS
${
nsteps
}
#define SMOOTHSTEP_BFP
${
bfp
}
#define SMOOTHSTEP
\\
/* STEP(step, h, x, y) */
\\
EOF
s
=
1
while
[
$s
-le
$nsteps
]
;
do
$variant
${
s
}
x
=
`
echo
${
xprec
}
k
${
s
}
${
nsteps
}
/ p | dc |
tr
-d
'\\\\\n'
|
sed
-e
's#^\.#0.#g'
`
printf
' STEP(%4d, UINT64_C(0x%016x), %s, %s) \\\n'
${
s
}
${
h
}
${
x
}
${
y
}
s
=
$((
s+1
))
done
echo
cat
<<
EOF
#endif /* JEMALLOC_INTERNAL_SMOOTHSTEP_H */
EOF
deps/jemalloc/include/jemalloc/internal/spin.h
0 → 100644
View file @
fb1f4f4e
#ifndef JEMALLOC_INTERNAL_SPIN_H
#define JEMALLOC_INTERNAL_SPIN_H
#define SPIN_INITIALIZER {0U}
typedef
struct
{
unsigned
iteration
;
}
spin_t
;
static
inline
void
spin_cpu_spinwait
()
{
# if HAVE_CPU_SPINWAIT
CPU_SPINWAIT
;
# else
volatile
int
x
=
0
;
x
=
x
;
# endif
}
static
inline
void
spin_adaptive
(
spin_t
*
spin
)
{
volatile
uint32_t
i
;
if
(
spin
->
iteration
<
5
)
{
for
(
i
=
0
;
i
<
(
1U
<<
spin
->
iteration
);
i
++
)
{
spin_cpu_spinwait
();
}
spin
->
iteration
++
;
}
else
{
#ifdef _WIN32
SwitchToThread
();
#else
sched_yield
();
#endif
}
}
#undef SPIN_INLINE
#endif
/* JEMALLOC_INTERNAL_SPIN_H */
deps/jemalloc/include/jemalloc/internal/stats.h
View file @
fb1f4f4e
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef
struct
tcache_bin_stats_s
tcache_bin_stats_t
;
typedef
struct
malloc_bin_stats_s
malloc_bin_stats_t
;
typedef
struct
malloc_large_stats_s
malloc_large_stats_t
;
typedef
struct
malloc_huge_stats_s
malloc_huge_stats_t
;
typedef
struct
arena_stats_s
arena_stats_t
;
typedef
struct
chunk_stats_s
chunk_stats_t
;
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct
tcache_bin_stats_s
{
/*
* Number of allocation requests that corresponded to the size of this
* bin.
*/
uint64_t
nrequests
;
};
struct
malloc_bin_stats_s
{
/*
* Total number of allocation/deallocation requests served directly by
* the bin. Note that tcache may allocate an object, then recycle it
* many times, resulting many increments to nrequests, but only one
* each to nmalloc and ndalloc.
*/
uint64_t
nmalloc
;
uint64_t
ndalloc
;
/*
* Number of allocation requests that correspond to the size of this
* bin. This includes requests served by tcache, though tcache only
* periodically merges into this counter.
*/
uint64_t
nrequests
;
/*
* Current number of regions of this size class, including regions
* currently cached by tcache.
*/
size_t
curregs
;
/* Number of tcache fills from this bin. */
uint64_t
nfills
;
/* Number of tcache flushes to this bin. */
uint64_t
nflushes
;
/* Total number of runs created for this bin's size class. */
uint64_t
nruns
;
/*
* Total number of runs reused by extracting them from the runs tree for
* this bin's size class.
*/
uint64_t
reruns
;
/* Current number of runs in this bin. */
size_t
curruns
;
};
struct
malloc_large_stats_s
{
/*
* Total number of allocation/deallocation requests served directly by
* the arena. Note that tcache may allocate an object, then recycle it
* many times, resulting many increments to nrequests, but only one
* each to nmalloc and ndalloc.
*/
uint64_t
nmalloc
;
uint64_t
ndalloc
;
/*
* Number of allocation requests that correspond to this size class.
* This includes requests served by tcache, though tcache only
* periodically merges into this counter.
*/
uint64_t
nrequests
;
/*
* Current number of runs of this size class, including runs currently
* cached by tcache.
*/
size_t
curruns
;
};
struct
malloc_huge_stats_s
{
/*
* Total number of allocation/deallocation requests served directly by
* the arena.
*/
uint64_t
nmalloc
;
uint64_t
ndalloc
;
/* Current number of (multi-)chunk allocations of this size class. */
size_t
curhchunks
;
#ifndef JEMALLOC_INTERNAL_STATS_H
#define JEMALLOC_INTERNAL_STATS_H
/* OPTION(opt, var_name, default, set_value_to) */
#define STATS_PRINT_OPTIONS \
OPTION('J', json, false, true) \
OPTION('g', general, true, false) \
OPTION('m', merged, config_stats, false) \
OPTION('d', destroyed, config_stats, false) \
OPTION('a', unmerged, config_stats, false) \
OPTION('b', bins, true, false) \
OPTION('l', large, true, false) \
OPTION('x', mutex, true, false)
enum
{
#define OPTION(o, v, d, s) stats_print_option_num_##v,
STATS_PRINT_OPTIONS
#undef OPTION
stats_print_tot_num_options
};
struct
arena_stats_s
{
/* Number of bytes currently mapped. */
size_t
mapped
;
/* Options for stats_print. */
extern
bool
opt_stats_print
;
extern
char
opt_stats_print_opts
[
stats_print_tot_num_options
+
1
]
;
/*
* Total number of purge sweeps, total number of madvise calls made,
* and total pages purged in order to keep dirty unused memory under
* control.
*/
uint64_t
npurge
;
uint64_t
nmadvise
;
uint64_t
purged
;
/*
* Number of bytes currently mapped purely for metadata purposes, and
* number of bytes currently allocated for internal metadata.
*/
size_t
metadata_mapped
;
size_t
metadata_allocated
;
/* Protected via atomic_*_z(). */
/* Per-size-category statistics. */
size_t
allocated_large
;
uint64_t
nmalloc_large
;
uint64_t
ndalloc_large
;
uint64_t
nrequests_large
;
size_t
allocated_huge
;
uint64_t
nmalloc_huge
;
uint64_t
ndalloc_huge
;
/* One element for each large size class. */
malloc_large_stats_t
*
lstats
;
/* One element for each huge size class. */
malloc_huge_stats_t
*
hstats
;
};
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern
bool
opt_stats_print
;
extern
size_t
stats_cactive
;
void
stats_print
(
void
(
*
write
)(
void
*
,
const
char
*
),
void
*
cbopaque
,
/* Implements je_malloc_stats_print. */
void
stats_print
(
void
(
*
write_cb
)(
void
*
,
const
char
*
),
void
*
cbopaque
,
const
char
*
opts
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
size_t
stats_cactive_get
(
void
);
void
stats_cactive_add
(
size_t
size
);
void
stats_cactive_sub
(
size_t
size
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_))
JEMALLOC_INLINE
size_t
stats_cactive_get
(
void
)
{
return
(
atomic_read_z
(
&
stats_cactive
));
}
JEMALLOC_INLINE
void
stats_cactive_add
(
size_t
size
)
{
atomic_add_z
(
&
stats_cactive
,
size
);
}
JEMALLOC_INLINE
void
stats_cactive_sub
(
size_t
size
)
{
atomic_sub_z
(
&
stats_cactive
,
size
);
}
#endif
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
#endif
/* JEMALLOC_INTERNAL_STATS_H */
deps/jemalloc/include/jemalloc/internal/sz.h
0 → 100644
View file @
fb1f4f4e
#ifndef JEMALLOC_INTERNAL_SIZE_H
#define JEMALLOC_INTERNAL_SIZE_H
#include "jemalloc/internal/bit_util.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/util.h"
/*
* sz module: Size computations.
*
* Some abbreviations used here:
* p: Page
* ind: Index
* s, sz: Size
* u: Usable size
* a: Aligned
*
* These are not always used completely consistently, but should be enough to
* interpret function names. E.g. sz_psz2ind converts page size to page size
* index; sz_sa2u converts a (size, alignment) allocation request to the usable
* size that would result from such an allocation.
*/
/*
* sz_pind2sz_tab encodes the same information as could be computed by
* sz_pind2sz_compute().
*/
extern
size_t
const
sz_pind2sz_tab
[
NPSIZES
+
1
];
/*
* sz_index2size_tab encodes the same information as could be computed (at
* unacceptable cost in some code paths) by sz_index2size_compute().
*/
extern
size_t
const
sz_index2size_tab
[
NSIZES
];
/*
* sz_size2index_tab is a compact lookup table that rounds request sizes up to
* size classes. In order to reduce cache footprint, the table is compressed,
* and all accesses are via sz_size2index().
*/
extern
uint8_t
const
sz_size2index_tab
[];
static
const
size_t
sz_large_pad
=
#ifdef JEMALLOC_CACHE_OBLIVIOUS
PAGE
#else
0
#endif
;
JEMALLOC_ALWAYS_INLINE
pszind_t
sz_psz2ind
(
size_t
psz
)
{
if
(
unlikely
(
psz
>
LARGE_MAXCLASS
))
{
return
NPSIZES
;
}
{
pszind_t
x
=
lg_floor
((
psz
<<
1
)
-
1
);
pszind_t
shift
=
(
x
<
LG_SIZE_CLASS_GROUP
+
LG_PAGE
)
?
0
:
x
-
(
LG_SIZE_CLASS_GROUP
+
LG_PAGE
);
pszind_t
grp
=
shift
<<
LG_SIZE_CLASS_GROUP
;
pszind_t
lg_delta
=
(
x
<
LG_SIZE_CLASS_GROUP
+
LG_PAGE
+
1
)
?
LG_PAGE
:
x
-
LG_SIZE_CLASS_GROUP
-
1
;
size_t
delta_inverse_mask
=
ZU
(
-
1
)
<<
lg_delta
;
pszind_t
mod
=
((((
psz
-
1
)
&
delta_inverse_mask
)
>>
lg_delta
))
&
((
ZU
(
1
)
<<
LG_SIZE_CLASS_GROUP
)
-
1
);
pszind_t
ind
=
grp
+
mod
;
return
ind
;
}
}
static
inline
size_t
sz_pind2sz_compute
(
pszind_t
pind
)
{
if
(
unlikely
(
pind
==
NPSIZES
))
{
return
LARGE_MAXCLASS
+
PAGE
;
}
{
size_t
grp
=
pind
>>
LG_SIZE_CLASS_GROUP
;
size_t
mod
=
pind
&
((
ZU
(
1
)
<<
LG_SIZE_CLASS_GROUP
)
-
1
);
size_t
grp_size_mask
=
~
((
!!
grp
)
-
1
);
size_t
grp_size
=
((
ZU
(
1
)
<<
(
LG_PAGE
+
(
LG_SIZE_CLASS_GROUP
-
1
)))
<<
grp
)
&
grp_size_mask
;
size_t
shift
=
(
grp
==
0
)
?
1
:
grp
;
size_t
lg_delta
=
shift
+
(
LG_PAGE
-
1
);
size_t
mod_size
=
(
mod
+
1
)
<<
lg_delta
;
size_t
sz
=
grp_size
+
mod_size
;
return
sz
;
}
}
static
inline
size_t
sz_pind2sz_lookup
(
pszind_t
pind
)
{
size_t
ret
=
(
size_t
)
sz_pind2sz_tab
[
pind
];
assert
(
ret
==
sz_pind2sz_compute
(
pind
));
return
ret
;
}
static
inline
size_t
sz_pind2sz
(
pszind_t
pind
)
{
assert
(
pind
<
NPSIZES
+
1
);
return
sz_pind2sz_lookup
(
pind
);
}
static
inline
size_t
sz_psz2u
(
size_t
psz
)
{
if
(
unlikely
(
psz
>
LARGE_MAXCLASS
))
{
return
LARGE_MAXCLASS
+
PAGE
;
}
{
size_t
x
=
lg_floor
((
psz
<<
1
)
-
1
);
size_t
lg_delta
=
(
x
<
LG_SIZE_CLASS_GROUP
+
LG_PAGE
+
1
)
?
LG_PAGE
:
x
-
LG_SIZE_CLASS_GROUP
-
1
;
size_t
delta
=
ZU
(
1
)
<<
lg_delta
;
size_t
delta_mask
=
delta
-
1
;
size_t
usize
=
(
psz
+
delta_mask
)
&
~
delta_mask
;
return
usize
;
}
}
static
inline
szind_t
sz_size2index_compute
(
size_t
size
)
{
if
(
unlikely
(
size
>
LARGE_MAXCLASS
))
{
return
NSIZES
;
}
#if (NTBINS != 0)
if
(
size
<=
(
ZU
(
1
)
<<
LG_TINY_MAXCLASS
))
{
szind_t
lg_tmin
=
LG_TINY_MAXCLASS
-
NTBINS
+
1
;
szind_t
lg_ceil
=
lg_floor
(
pow2_ceil_zu
(
size
));
return
(
lg_ceil
<
lg_tmin
?
0
:
lg_ceil
-
lg_tmin
);
}
#endif
{
szind_t
x
=
lg_floor
((
size
<<
1
)
-
1
);
szind_t
shift
=
(
x
<
LG_SIZE_CLASS_GROUP
+
LG_QUANTUM
)
?
0
:
x
-
(
LG_SIZE_CLASS_GROUP
+
LG_QUANTUM
);
szind_t
grp
=
shift
<<
LG_SIZE_CLASS_GROUP
;
szind_t
lg_delta
=
(
x
<
LG_SIZE_CLASS_GROUP
+
LG_QUANTUM
+
1
)
?
LG_QUANTUM
:
x
-
LG_SIZE_CLASS_GROUP
-
1
;
size_t
delta_inverse_mask
=
ZU
(
-
1
)
<<
lg_delta
;
szind_t
mod
=
((((
size
-
1
)
&
delta_inverse_mask
)
>>
lg_delta
))
&
((
ZU
(
1
)
<<
LG_SIZE_CLASS_GROUP
)
-
1
);
szind_t
index
=
NTBINS
+
grp
+
mod
;
return
index
;
}
}
JEMALLOC_ALWAYS_INLINE
szind_t
sz_size2index_lookup
(
size_t
size
)
{
assert
(
size
<=
LOOKUP_MAXCLASS
);
{
szind_t
ret
=
(
sz_size2index_tab
[(
size
-
1
)
>>
LG_TINY_MIN
]);
assert
(
ret
==
sz_size2index_compute
(
size
));
return
ret
;
}
}
JEMALLOC_ALWAYS_INLINE
szind_t
sz_size2index
(
size_t
size
)
{
assert
(
size
>
0
);
if
(
likely
(
size
<=
LOOKUP_MAXCLASS
))
{
return
sz_size2index_lookup
(
size
);
}
return
sz_size2index_compute
(
size
);
}
static
inline
size_t
sz_index2size_compute
(
szind_t
index
)
{
#if (NTBINS > 0)
if
(
index
<
NTBINS
)
{
return
(
ZU
(
1
)
<<
(
LG_TINY_MAXCLASS
-
NTBINS
+
1
+
index
));
}
#endif
{
size_t
reduced_index
=
index
-
NTBINS
;
size_t
grp
=
reduced_index
>>
LG_SIZE_CLASS_GROUP
;
size_t
mod
=
reduced_index
&
((
ZU
(
1
)
<<
LG_SIZE_CLASS_GROUP
)
-
1
);
size_t
grp_size_mask
=
~
((
!!
grp
)
-
1
);
size_t
grp_size
=
((
ZU
(
1
)
<<
(
LG_QUANTUM
+
(
LG_SIZE_CLASS_GROUP
-
1
)))
<<
grp
)
&
grp_size_mask
;
size_t
shift
=
(
grp
==
0
)
?
1
:
grp
;
size_t
lg_delta
=
shift
+
(
LG_QUANTUM
-
1
);
size_t
mod_size
=
(
mod
+
1
)
<<
lg_delta
;
size_t
usize
=
grp_size
+
mod_size
;
return
usize
;
}
}
JEMALLOC_ALWAYS_INLINE
size_t
sz_index2size_lookup
(
szind_t
index
)
{
size_t
ret
=
(
size_t
)
sz_index2size_tab
[
index
];
assert
(
ret
==
sz_index2size_compute
(
index
));
return
ret
;
}
JEMALLOC_ALWAYS_INLINE
size_t
sz_index2size
(
szind_t
index
)
{
assert
(
index
<
NSIZES
);
return
sz_index2size_lookup
(
index
);
}
JEMALLOC_ALWAYS_INLINE
size_t
sz_s2u_compute
(
size_t
size
)
{
if
(
unlikely
(
size
>
LARGE_MAXCLASS
))
{
return
0
;
}
#if (NTBINS > 0)
if
(
size
<=
(
ZU
(
1
)
<<
LG_TINY_MAXCLASS
))
{
size_t
lg_tmin
=
LG_TINY_MAXCLASS
-
NTBINS
+
1
;
size_t
lg_ceil
=
lg_floor
(
pow2_ceil_zu
(
size
));
return
(
lg_ceil
<
lg_tmin
?
(
ZU
(
1
)
<<
lg_tmin
)
:
(
ZU
(
1
)
<<
lg_ceil
));
}
#endif
{
size_t
x
=
lg_floor
((
size
<<
1
)
-
1
);
size_t
lg_delta
=
(
x
<
LG_SIZE_CLASS_GROUP
+
LG_QUANTUM
+
1
)
?
LG_QUANTUM
:
x
-
LG_SIZE_CLASS_GROUP
-
1
;
size_t
delta
=
ZU
(
1
)
<<
lg_delta
;
size_t
delta_mask
=
delta
-
1
;
size_t
usize
=
(
size
+
delta_mask
)
&
~
delta_mask
;
return
usize
;
}
}
JEMALLOC_ALWAYS_INLINE
size_t
sz_s2u_lookup
(
size_t
size
)
{
size_t
ret
=
sz_index2size_lookup
(
sz_size2index_lookup
(
size
));
assert
(
ret
==
sz_s2u_compute
(
size
));
return
ret
;
}
/*
* Compute usable size that would result from allocating an object with the
* specified size.
*/
JEMALLOC_ALWAYS_INLINE
size_t
sz_s2u
(
size_t
size
)
{
assert
(
size
>
0
);
if
(
likely
(
size
<=
LOOKUP_MAXCLASS
))
{
return
sz_s2u_lookup
(
size
);
}
return
sz_s2u_compute
(
size
);
}
/*
* Compute usable size that would result from allocating an object with the
* specified size and alignment.
*/
JEMALLOC_ALWAYS_INLINE
size_t
sz_sa2u
(
size_t
size
,
size_t
alignment
)
{
size_t
usize
;
assert
(
alignment
!=
0
&&
((
alignment
-
1
)
&
alignment
)
==
0
);
/* Try for a small size class. */
if
(
size
<=
SMALL_MAXCLASS
&&
alignment
<
PAGE
)
{
/*
* Round size up to the nearest multiple of alignment.
*
* This done, we can take advantage of the fact that for each
* small size class, every object is aligned at the smallest
* power of two that is non-zero in the base two representation
* of the size. For example:
*
* Size | Base 2 | Minimum alignment
* -----+----------+------------------
* 96 | 1100000 | 32
* 144 | 10100000 | 32
* 192 | 11000000 | 64
*/
usize
=
sz_s2u
(
ALIGNMENT_CEILING
(
size
,
alignment
));
if
(
usize
<
LARGE_MINCLASS
)
{
return
usize
;
}
}
/* Large size class. Beware of overflow. */
if
(
unlikely
(
alignment
>
LARGE_MAXCLASS
))
{
return
0
;
}
/* Make sure result is a large size class. */
if
(
size
<=
LARGE_MINCLASS
)
{
usize
=
LARGE_MINCLASS
;
}
else
{
usize
=
sz_s2u
(
size
);
if
(
usize
<
size
)
{
/* size_t overflow. */
return
0
;
}
}
/*
* Calculate the multi-page mapping that large_palloc() would need in
* order to guarantee the alignment.
*/
if
(
usize
+
sz_large_pad
+
PAGE_CEILING
(
alignment
)
-
PAGE
<
usize
)
{
/* size_t overflow. */
return
0
;
}
return
usize
;
}
#endif
/* JEMALLOC_INTERNAL_SIZE_H */
deps/jemalloc/include/jemalloc/internal/tcache.h
deleted
100644 → 0
View file @
dda8cc18
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef
struct
tcache_bin_info_s
tcache_bin_info_t
;
typedef
struct
tcache_bin_s
tcache_bin_t
;
typedef
struct
tcache_s
tcache_t
;
typedef
struct
tcaches_s
tcaches_t
;
/*
* tcache pointers close to NULL are used to encode state information that is
* used for two purposes: preventing thread caching on a per thread basis and
* cleaning up during thread shutdown.
*/
#define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1)
#define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2)
#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3)
#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY
/*
* Absolute minimum number of cache slots for each small bin.
*/
#define TCACHE_NSLOTS_SMALL_MIN 20
/*
* Absolute maximum number of cache slots for each small bin in the thread
* cache. This is an additional constraint beyond that imposed as: twice the
* number of regions per run for this size class.
*
* This constant must be an even number.
*/
#define TCACHE_NSLOTS_SMALL_MAX 200
/* Number of cache slots for large size classes. */
#define TCACHE_NSLOTS_LARGE 20
/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
#define LG_TCACHE_MAXCLASS_DEFAULT 15
/*
* TCACHE_GC_SWEEP is the approximate number of allocation events between
* full GC sweeps. Integer rounding may cause the actual number to be
* slightly higher, since GC is performed incrementally.
*/
#define TCACHE_GC_SWEEP 8192
/* Number of tcache allocation/deallocation events between incremental GCs. */
#define TCACHE_GC_INCR \
((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1))
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
typedef
enum
{
tcache_enabled_false
=
0
,
/* Enable cast to/from bool. */
tcache_enabled_true
=
1
,
tcache_enabled_default
=
2
}
tcache_enabled_t
;
/*
* Read-only information associated with each element of tcache_t's tbins array
* is stored separately, mainly to reduce memory usage.
*/
struct
tcache_bin_info_s
{
unsigned
ncached_max
;
/* Upper limit on ncached. */
};
struct
tcache_bin_s
{
tcache_bin_stats_t
tstats
;
int
low_water
;
/* Min # cached since last GC. */
unsigned
lg_fill_div
;
/* Fill (ncached_max >> lg_fill_div). */
unsigned
ncached
;
/* # of cached objects. */
void
**
avail
;
/* Stack of available objects. */
};
struct
tcache_s
{
ql_elm
(
tcache_t
)
link
;
/* Used for aggregating stats. */
uint64_t
prof_accumbytes
;
/* Cleared after arena_prof_accum(). */
unsigned
ev_cnt
;
/* Event count since incremental GC. */
szind_t
next_gc_bin
;
/* Next bin to GC. */
tcache_bin_t
tbins
[
1
];
/* Dynamically sized. */
/*
* The pointer stacks associated with tbins follow as a contiguous
* array. During tcache initialization, the avail pointer in each
* element of tbins is initialized to point to the proper offset within
* this array.
*/
};
/* Linkage for list of available (previously used) explicit tcache IDs. */
struct
tcaches_s
{
union
{
tcache_t
*
tcache
;
tcaches_t
*
next
;
};
};
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern
bool
opt_tcache
;
extern
ssize_t
opt_lg_tcache_max
;
extern
tcache_bin_info_t
*
tcache_bin_info
;
/*
* Number of tcache bins. There are NBINS small-object bins, plus 0 or more
* large-object bins.
*/
extern
size_t
nhbins
;
/* Maximum cached size class. */
extern
size_t
tcache_maxclass
;
/*
* Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and
* usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are
* completely disjoint from this data structure. tcaches starts off as a sparse
* array, so it has no physical memory footprint until individual pages are
* touched. This allows the entire array to be allocated the first time an
* explicit tcache is created without a disproportionate impact on memory usage.
*/
extern
tcaches_t
*
tcaches
;
size_t
tcache_salloc
(
const
void
*
ptr
);
void
tcache_event_hard
(
tsd_t
*
tsd
,
tcache_t
*
tcache
);
void
*
tcache_alloc_small_hard
(
tsd_t
*
tsd
,
arena_t
*
arena
,
tcache_t
*
tcache
,
tcache_bin_t
*
tbin
,
szind_t
binind
);
void
tcache_bin_flush_small
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
tcache_bin_t
*
tbin
,
szind_t
binind
,
unsigned
rem
);
void
tcache_bin_flush_large
(
tsd_t
*
tsd
,
tcache_bin_t
*
tbin
,
szind_t
binind
,
unsigned
rem
,
tcache_t
*
tcache
);
void
tcache_arena_associate
(
tcache_t
*
tcache
,
arena_t
*
arena
);
void
tcache_arena_reassociate
(
tcache_t
*
tcache
,
arena_t
*
oldarena
,
arena_t
*
newarena
);
void
tcache_arena_dissociate
(
tcache_t
*
tcache
,
arena_t
*
arena
);
tcache_t
*
tcache_get_hard
(
tsd_t
*
tsd
);
tcache_t
*
tcache_create
(
tsd_t
*
tsd
,
arena_t
*
arena
);
void
tcache_cleanup
(
tsd_t
*
tsd
);
void
tcache_enabled_cleanup
(
tsd_t
*
tsd
);
void
tcache_stats_merge
(
tcache_t
*
tcache
,
arena_t
*
arena
);
bool
tcaches_create
(
tsd_t
*
tsd
,
unsigned
*
r_ind
);
void
tcaches_flush
(
tsd_t
*
tsd
,
unsigned
ind
);
void
tcaches_destroy
(
tsd_t
*
tsd
,
unsigned
ind
);
bool
tcache_boot
(
void
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
void
tcache_event
(
tsd_t
*
tsd
,
tcache_t
*
tcache
);
void
tcache_flush
(
void
);
bool
tcache_enabled_get
(
void
);
tcache_t
*
tcache_get
(
tsd_t
*
tsd
,
bool
create
);
void
tcache_enabled_set
(
bool
enabled
);
void
*
tcache_alloc_easy
(
tcache_bin_t
*
tbin
);
void
*
tcache_alloc_small
(
tsd_t
*
tsd
,
arena_t
*
arena
,
tcache_t
*
tcache
,
size_t
size
,
bool
zero
);
void
*
tcache_alloc_large
(
tsd_t
*
tsd
,
arena_t
*
arena
,
tcache_t
*
tcache
,
size_t
size
,
bool
zero
);
void
tcache_dalloc_small
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
void
*
ptr
,
szind_t
binind
);
void
tcache_dalloc_large
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
void
*
ptr
,
size_t
size
);
tcache_t
*
tcaches_get
(
tsd_t
*
tsd
,
unsigned
ind
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
JEMALLOC_INLINE
void
tcache_flush
(
void
)
{
tsd_t
*
tsd
;
cassert
(
config_tcache
);
tsd
=
tsd_fetch
();
tcache_cleanup
(
tsd
);
}
JEMALLOC_INLINE
bool
tcache_enabled_get
(
void
)
{
tsd_t
*
tsd
;
tcache_enabled_t
tcache_enabled
;
cassert
(
config_tcache
);
tsd
=
tsd_fetch
();
tcache_enabled
=
tsd_tcache_enabled_get
(
tsd
);
if
(
tcache_enabled
==
tcache_enabled_default
)
{
tcache_enabled
=
(
tcache_enabled_t
)
opt_tcache
;
tsd_tcache_enabled_set
(
tsd
,
tcache_enabled
);
}
return
((
bool
)
tcache_enabled
);
}
JEMALLOC_INLINE
void
tcache_enabled_set
(
bool
enabled
)
{
tsd_t
*
tsd
;
tcache_enabled_t
tcache_enabled
;
cassert
(
config_tcache
);
tsd
=
tsd_fetch
();
tcache_enabled
=
(
tcache_enabled_t
)
enabled
;
tsd_tcache_enabled_set
(
tsd
,
tcache_enabled
);
if
(
!
enabled
)
tcache_cleanup
(
tsd
);
}
JEMALLOC_ALWAYS_INLINE
tcache_t
*
tcache_get
(
tsd_t
*
tsd
,
bool
create
)
{
tcache_t
*
tcache
;
if
(
!
config_tcache
)
return
(
NULL
);
tcache
=
tsd_tcache_get
(
tsd
);
if
(
!
create
)
return
(
tcache
);
if
(
unlikely
(
tcache
==
NULL
)
&&
tsd_nominal
(
tsd
))
{
tcache
=
tcache_get_hard
(
tsd
);
tsd_tcache_set
(
tsd
,
tcache
);
}
return
(
tcache
);
}
JEMALLOC_ALWAYS_INLINE
void
tcache_event
(
tsd_t
*
tsd
,
tcache_t
*
tcache
)
{
if
(
TCACHE_GC_INCR
==
0
)
return
;
tcache
->
ev_cnt
++
;
assert
(
tcache
->
ev_cnt
<=
TCACHE_GC_INCR
);
if
(
unlikely
(
tcache
->
ev_cnt
==
TCACHE_GC_INCR
))
tcache_event_hard
(
tsd
,
tcache
);
}
JEMALLOC_ALWAYS_INLINE
void
*
tcache_alloc_easy
(
tcache_bin_t
*
tbin
)
{
void
*
ret
;
if
(
unlikely
(
tbin
->
ncached
==
0
))
{
tbin
->
low_water
=
-
1
;
return
(
NULL
);
}
tbin
->
ncached
--
;
if
(
unlikely
((
int
)
tbin
->
ncached
<
tbin
->
low_water
))
tbin
->
low_water
=
tbin
->
ncached
;
ret
=
tbin
->
avail
[
tbin
->
ncached
];
return
(
ret
);
}
JEMALLOC_ALWAYS_INLINE
void
*
tcache_alloc_small
(
tsd_t
*
tsd
,
arena_t
*
arena
,
tcache_t
*
tcache
,
size_t
size
,
bool
zero
)
{
void
*
ret
;
szind_t
binind
;
size_t
usize
;
tcache_bin_t
*
tbin
;
binind
=
size2index
(
size
);
assert
(
binind
<
NBINS
);
tbin
=
&
tcache
->
tbins
[
binind
];
usize
=
index2size
(
binind
);
ret
=
tcache_alloc_easy
(
tbin
);
if
(
unlikely
(
ret
==
NULL
))
{
ret
=
tcache_alloc_small_hard
(
tsd
,
arena
,
tcache
,
tbin
,
binind
);
if
(
ret
==
NULL
)
return
(
NULL
);
}
assert
(
tcache_salloc
(
ret
)
==
usize
);
if
(
likely
(
!
zero
))
{
if
(
config_fill
)
{
if
(
unlikely
(
opt_junk_alloc
))
{
arena_alloc_junk_small
(
ret
,
&
arena_bin_info
[
binind
],
false
);
}
else
if
(
unlikely
(
opt_zero
))
memset
(
ret
,
0
,
usize
);
}
}
else
{
if
(
config_fill
&&
unlikely
(
opt_junk_alloc
))
{
arena_alloc_junk_small
(
ret
,
&
arena_bin_info
[
binind
],
true
);
}
memset
(
ret
,
0
,
usize
);
}
if
(
config_stats
)
tbin
->
tstats
.
nrequests
++
;
if
(
config_prof
)
tcache
->
prof_accumbytes
+=
usize
;
tcache_event
(
tsd
,
tcache
);
return
(
ret
);
}
JEMALLOC_ALWAYS_INLINE
void
*
tcache_alloc_large
(
tsd_t
*
tsd
,
arena_t
*
arena
,
tcache_t
*
tcache
,
size_t
size
,
bool
zero
)
{
void
*
ret
;
szind_t
binind
;
size_t
usize
;
tcache_bin_t
*
tbin
;
binind
=
size2index
(
size
);
usize
=
index2size
(
binind
);
assert
(
usize
<=
tcache_maxclass
);
assert
(
binind
<
nhbins
);
tbin
=
&
tcache
->
tbins
[
binind
];
ret
=
tcache_alloc_easy
(
tbin
);
if
(
unlikely
(
ret
==
NULL
))
{
/*
* Only allocate one large object at a time, because it's quite
* expensive to create one and not use it.
*/
ret
=
arena_malloc_large
(
arena
,
usize
,
zero
);
if
(
ret
==
NULL
)
return
(
NULL
);
}
else
{
if
(
config_prof
&&
usize
==
LARGE_MINCLASS
)
{
arena_chunk_t
*
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
ret
);
size_t
pageind
=
(((
uintptr_t
)
ret
-
(
uintptr_t
)
chunk
)
>>
LG_PAGE
);
arena_mapbits_large_binind_set
(
chunk
,
pageind
,
BININD_INVALID
);
}
if
(
likely
(
!
zero
))
{
if
(
config_fill
)
{
if
(
unlikely
(
opt_junk_alloc
))
memset
(
ret
,
0xa5
,
usize
);
else
if
(
unlikely
(
opt_zero
))
memset
(
ret
,
0
,
usize
);
}
}
else
memset
(
ret
,
0
,
usize
);
if
(
config_stats
)
tbin
->
tstats
.
nrequests
++
;
if
(
config_prof
)
tcache
->
prof_accumbytes
+=
usize
;
}
tcache_event
(
tsd
,
tcache
);
return
(
ret
);
}
JEMALLOC_ALWAYS_INLINE
void
tcache_dalloc_small
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
void
*
ptr
,
szind_t
binind
)
{
tcache_bin_t
*
tbin
;
tcache_bin_info_t
*
tbin_info
;
assert
(
tcache_salloc
(
ptr
)
<=
SMALL_MAXCLASS
);
if
(
config_fill
&&
unlikely
(
opt_junk_free
))
arena_dalloc_junk_small
(
ptr
,
&
arena_bin_info
[
binind
]);
tbin
=
&
tcache
->
tbins
[
binind
];
tbin_info
=
&
tcache_bin_info
[
binind
];
if
(
unlikely
(
tbin
->
ncached
==
tbin_info
->
ncached_max
))
{
tcache_bin_flush_small
(
tsd
,
tcache
,
tbin
,
binind
,
(
tbin_info
->
ncached_max
>>
1
));
}
assert
(
tbin
->
ncached
<
tbin_info
->
ncached_max
);
tbin
->
avail
[
tbin
->
ncached
]
=
ptr
;
tbin
->
ncached
++
;
tcache_event
(
tsd
,
tcache
);
}
JEMALLOC_ALWAYS_INLINE
void
tcache_dalloc_large
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
void
*
ptr
,
size_t
size
)
{
szind_t
binind
;
tcache_bin_t
*
tbin
;
tcache_bin_info_t
*
tbin_info
;
assert
((
size
&
PAGE_MASK
)
==
0
);
assert
(
tcache_salloc
(
ptr
)
>
SMALL_MAXCLASS
);
assert
(
tcache_salloc
(
ptr
)
<=
tcache_maxclass
);
binind
=
size2index
(
size
);
if
(
config_fill
&&
unlikely
(
opt_junk_free
))
arena_dalloc_junk_large
(
ptr
,
size
);
tbin
=
&
tcache
->
tbins
[
binind
];
tbin_info
=
&
tcache_bin_info
[
binind
];
if
(
unlikely
(
tbin
->
ncached
==
tbin_info
->
ncached_max
))
{
tcache_bin_flush_large
(
tsd
,
tbin
,
binind
,
(
tbin_info
->
ncached_max
>>
1
),
tcache
);
}
assert
(
tbin
->
ncached
<
tbin_info
->
ncached_max
);
tbin
->
avail
[
tbin
->
ncached
]
=
ptr
;
tbin
->
ncached
++
;
tcache_event
(
tsd
,
tcache
);
}
JEMALLOC_ALWAYS_INLINE
tcache_t
*
tcaches_get
(
tsd_t
*
tsd
,
unsigned
ind
)
{
tcaches_t
*
elm
=
&
tcaches
[
ind
];
if
(
unlikely
(
elm
->
tcache
==
NULL
))
elm
->
tcache
=
tcache_create
(
tsd
,
arena_choose
(
tsd
,
NULL
));
return
(
elm
->
tcache
);
}
#endif
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/tcache_externs.h
0 → 100644
View file @
fb1f4f4e
#ifndef JEMALLOC_INTERNAL_TCACHE_EXTERNS_H
#define JEMALLOC_INTERNAL_TCACHE_EXTERNS_H
#include "jemalloc/internal/size_classes.h"
extern
bool
opt_tcache
;
extern
ssize_t
opt_lg_tcache_max
;
extern
cache_bin_info_t
*
tcache_bin_info
;
/*
* Number of tcache bins. There are NBINS small-object bins, plus 0 or more
* large-object bins.
*/
extern
unsigned
nhbins
;
/* Maximum cached size class. */
extern
size_t
tcache_maxclass
;
/*
* Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and
* usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are
* completely disjoint from this data structure. tcaches starts off as a sparse
* array, so it has no physical memory footprint until individual pages are
* touched. This allows the entire array to be allocated the first time an
* explicit tcache is created without a disproportionate impact on memory usage.
*/
extern
tcaches_t
*
tcaches
;
size_t
tcache_salloc
(
tsdn_t
*
tsdn
,
const
void
*
ptr
);
void
tcache_event_hard
(
tsd_t
*
tsd
,
tcache_t
*
tcache
);
void
*
tcache_alloc_small_hard
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
tcache_t
*
tcache
,
cache_bin_t
*
tbin
,
szind_t
binind
,
bool
*
tcache_success
);
void
tcache_bin_flush_small
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
cache_bin_t
*
tbin
,
szind_t
binind
,
unsigned
rem
);
void
tcache_bin_flush_large
(
tsd_t
*
tsd
,
cache_bin_t
*
tbin
,
szind_t
binind
,
unsigned
rem
,
tcache_t
*
tcache
);
void
tcache_arena_reassociate
(
tsdn_t
*
tsdn
,
tcache_t
*
tcache
,
arena_t
*
arena
);
tcache_t
*
tcache_create_explicit
(
tsd_t
*
tsd
);
void
tcache_cleanup
(
tsd_t
*
tsd
);
void
tcache_stats_merge
(
tsdn_t
*
tsdn
,
tcache_t
*
tcache
,
arena_t
*
arena
);
bool
tcaches_create
(
tsd_t
*
tsd
,
unsigned
*
r_ind
);
void
tcaches_flush
(
tsd_t
*
tsd
,
unsigned
ind
);
void
tcaches_destroy
(
tsd_t
*
tsd
,
unsigned
ind
);
bool
tcache_boot
(
tsdn_t
*
tsdn
);
void
tcache_arena_associate
(
tsdn_t
*
tsdn
,
tcache_t
*
tcache
,
arena_t
*
arena
);
void
tcache_prefork
(
tsdn_t
*
tsdn
);
void
tcache_postfork_parent
(
tsdn_t
*
tsdn
);
void
tcache_postfork_child
(
tsdn_t
*
tsdn
);
void
tcache_flush
(
tsd_t
*
tsd
);
bool
tsd_tcache_data_init
(
tsd_t
*
tsd
);
bool
tsd_tcache_enabled_data_init
(
tsd_t
*
tsd
);
#endif
/* JEMALLOC_INTERNAL_TCACHE_EXTERNS_H */
deps/jemalloc/include/jemalloc/internal/tcache_inlines.h
0 → 100644
View file @
fb1f4f4e
#ifndef JEMALLOC_INTERNAL_TCACHE_INLINES_H
#define JEMALLOC_INTERNAL_TCACHE_INLINES_H
#include "jemalloc/internal/bin.h"
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/util.h"
static
inline
bool
tcache_enabled_get
(
tsd_t
*
tsd
)
{
return
tsd_tcache_enabled_get
(
tsd
);
}
static
inline
void
tcache_enabled_set
(
tsd_t
*
tsd
,
bool
enabled
)
{
bool
was_enabled
=
tsd_tcache_enabled_get
(
tsd
);
if
(
!
was_enabled
&&
enabled
)
{
tsd_tcache_data_init
(
tsd
);
}
else
if
(
was_enabled
&&
!
enabled
)
{
tcache_cleanup
(
tsd
);
}
/* Commit the state last. Above calls check current state. */
tsd_tcache_enabled_set
(
tsd
,
enabled
);
tsd_slow_update
(
tsd
);
}
JEMALLOC_ALWAYS_INLINE
void
tcache_event
(
tsd_t
*
tsd
,
tcache_t
*
tcache
)
{
if
(
TCACHE_GC_INCR
==
0
)
{
return
;
}
if
(
unlikely
(
ticker_tick
(
&
tcache
->
gc_ticker
)))
{
tcache_event_hard
(
tsd
,
tcache
);
}
}
JEMALLOC_ALWAYS_INLINE
void
*
tcache_alloc_small
(
tsd_t
*
tsd
,
arena_t
*
arena
,
tcache_t
*
tcache
,
UNUSED
size_t
size
,
szind_t
binind
,
bool
zero
,
bool
slow_path
)
{
void
*
ret
;
cache_bin_t
*
bin
;
bool
tcache_success
;
size_t
usize
JEMALLOC_CC_SILENCE_INIT
(
0
);
assert
(
binind
<
NBINS
);
bin
=
tcache_small_bin_get
(
tcache
,
binind
);
ret
=
cache_bin_alloc_easy
(
bin
,
&
tcache_success
);
assert
(
tcache_success
==
(
ret
!=
NULL
));
if
(
unlikely
(
!
tcache_success
))
{
bool
tcache_hard_success
;
arena
=
arena_choose
(
tsd
,
arena
);
if
(
unlikely
(
arena
==
NULL
))
{
return
NULL
;
}
ret
=
tcache_alloc_small_hard
(
tsd_tsdn
(
tsd
),
arena
,
tcache
,
bin
,
binind
,
&
tcache_hard_success
);
if
(
tcache_hard_success
==
false
)
{
return
NULL
;
}
}
assert
(
ret
);
/*
* Only compute usize if required. The checks in the following if
* statement are all static.
*/
if
(
config_prof
||
(
slow_path
&&
config_fill
)
||
unlikely
(
zero
))
{
usize
=
sz_index2size
(
binind
);
assert
(
tcache_salloc
(
tsd_tsdn
(
tsd
),
ret
)
==
usize
);
}
if
(
likely
(
!
zero
))
{
if
(
slow_path
&&
config_fill
)
{
if
(
unlikely
(
opt_junk_alloc
))
{
arena_alloc_junk_small
(
ret
,
&
bin_infos
[
binind
],
false
);
}
else
if
(
unlikely
(
opt_zero
))
{
memset
(
ret
,
0
,
usize
);
}
}
}
else
{
if
(
slow_path
&&
config_fill
&&
unlikely
(
opt_junk_alloc
))
{
arena_alloc_junk_small
(
ret
,
&
bin_infos
[
binind
],
true
);
}
memset
(
ret
,
0
,
usize
);
}
if
(
config_stats
)
{
bin
->
tstats
.
nrequests
++
;
}
if
(
config_prof
)
{
tcache
->
prof_accumbytes
+=
usize
;
}
tcache_event
(
tsd
,
tcache
);
return
ret
;
}
JEMALLOC_ALWAYS_INLINE
void
*
tcache_alloc_large
(
tsd_t
*
tsd
,
arena_t
*
arena
,
tcache_t
*
tcache
,
size_t
size
,
szind_t
binind
,
bool
zero
,
bool
slow_path
)
{
void
*
ret
;
cache_bin_t
*
bin
;
bool
tcache_success
;
assert
(
binind
>=
NBINS
&&
binind
<
nhbins
);
bin
=
tcache_large_bin_get
(
tcache
,
binind
);
ret
=
cache_bin_alloc_easy
(
bin
,
&
tcache_success
);
assert
(
tcache_success
==
(
ret
!=
NULL
));
if
(
unlikely
(
!
tcache_success
))
{
/*
* Only allocate one large object at a time, because it's quite
* expensive to create one and not use it.
*/
arena
=
arena_choose
(
tsd
,
arena
);
if
(
unlikely
(
arena
==
NULL
))
{
return
NULL
;
}
ret
=
large_malloc
(
tsd_tsdn
(
tsd
),
arena
,
sz_s2u
(
size
),
zero
);
if
(
ret
==
NULL
)
{
return
NULL
;
}
}
else
{
size_t
usize
JEMALLOC_CC_SILENCE_INIT
(
0
);
/* Only compute usize on demand */
if
(
config_prof
||
(
slow_path
&&
config_fill
)
||
unlikely
(
zero
))
{
usize
=
sz_index2size
(
binind
);
assert
(
usize
<=
tcache_maxclass
);
}
if
(
likely
(
!
zero
))
{
if
(
slow_path
&&
config_fill
)
{
if
(
unlikely
(
opt_junk_alloc
))
{
memset
(
ret
,
JEMALLOC_ALLOC_JUNK
,
usize
);
}
else
if
(
unlikely
(
opt_zero
))
{
memset
(
ret
,
0
,
usize
);
}
}
}
else
{
memset
(
ret
,
0
,
usize
);
}
if
(
config_stats
)
{
bin
->
tstats
.
nrequests
++
;
}
if
(
config_prof
)
{
tcache
->
prof_accumbytes
+=
usize
;
}
}
tcache_event
(
tsd
,
tcache
);
return
ret
;
}
JEMALLOC_ALWAYS_INLINE
void
tcache_dalloc_small
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
void
*
ptr
,
szind_t
binind
,
bool
slow_path
)
{
cache_bin_t
*
bin
;
cache_bin_info_t
*
bin_info
;
assert
(
tcache_salloc
(
tsd_tsdn
(
tsd
),
ptr
)
<=
SMALL_MAXCLASS
);
if
(
slow_path
&&
config_fill
&&
unlikely
(
opt_junk_free
))
{
arena_dalloc_junk_small
(
ptr
,
&
bin_infos
[
binind
]);
}
bin
=
tcache_small_bin_get
(
tcache
,
binind
);
bin_info
=
&
tcache_bin_info
[
binind
];
if
(
unlikely
(
bin
->
ncached
==
bin_info
->
ncached_max
))
{
tcache_bin_flush_small
(
tsd
,
tcache
,
bin
,
binind
,
(
bin_info
->
ncached_max
>>
1
));
}
assert
(
bin
->
ncached
<
bin_info
->
ncached_max
);
bin
->
ncached
++
;
*
(
bin
->
avail
-
bin
->
ncached
)
=
ptr
;
tcache_event
(
tsd
,
tcache
);
}
JEMALLOC_ALWAYS_INLINE
void
tcache_dalloc_large
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
void
*
ptr
,
szind_t
binind
,
bool
slow_path
)
{
cache_bin_t
*
bin
;
cache_bin_info_t
*
bin_info
;
assert
(
tcache_salloc
(
tsd_tsdn
(
tsd
),
ptr
)
>
SMALL_MAXCLASS
);
assert
(
tcache_salloc
(
tsd_tsdn
(
tsd
),
ptr
)
<=
tcache_maxclass
);
if
(
slow_path
&&
config_fill
&&
unlikely
(
opt_junk_free
))
{
large_dalloc_junk
(
ptr
,
sz_index2size
(
binind
));
}
bin
=
tcache_large_bin_get
(
tcache
,
binind
);
bin_info
=
&
tcache_bin_info
[
binind
];
if
(
unlikely
(
bin
->
ncached
==
bin_info
->
ncached_max
))
{
tcache_bin_flush_large
(
tsd
,
bin
,
binind
,
(
bin_info
->
ncached_max
>>
1
),
tcache
);
}
assert
(
bin
->
ncached
<
bin_info
->
ncached_max
);
bin
->
ncached
++
;
*
(
bin
->
avail
-
bin
->
ncached
)
=
ptr
;
tcache_event
(
tsd
,
tcache
);
}
JEMALLOC_ALWAYS_INLINE
tcache_t
*
tcaches_get
(
tsd_t
*
tsd
,
unsigned
ind
)
{
tcaches_t
*
elm
=
&
tcaches
[
ind
];
if
(
unlikely
(
elm
->
tcache
==
NULL
))
{
elm
->
tcache
=
tcache_create_explicit
(
tsd
);
}
return
elm
->
tcache
;
}
#endif
/* JEMALLOC_INTERNAL_TCACHE_INLINES_H */
deps/jemalloc/include/jemalloc/internal/tcache_structs.h
0 → 100644
View file @
fb1f4f4e
#ifndef JEMALLOC_INTERNAL_TCACHE_STRUCTS_H
#define JEMALLOC_INTERNAL_TCACHE_STRUCTS_H
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/cache_bin.h"
#include "jemalloc/internal/ticker.h"
struct
tcache_s
{
/*
* To minimize our cache-footprint, we put the frequently accessed data
* together at the start of this struct.
*/
/* Cleared after arena_prof_accum(). */
uint64_t
prof_accumbytes
;
/* Drives incremental GC. */
ticker_t
gc_ticker
;
/*
* The pointer stacks associated with bins follow as a contiguous array.
* During tcache initialization, the avail pointer in each element of
* tbins is initialized to point to the proper offset within this array.
*/
cache_bin_t
bins_small
[
NBINS
];
/*
* This data is less hot; we can be a little less careful with our
* footprint here.
*/
/* Lets us track all the tcaches in an arena. */
ql_elm
(
tcache_t
)
link
;
/*
* The descriptor lets the arena find our cache bins without seeing the
* tcache definition. This enables arenas to aggregate stats across
* tcaches without having a tcache dependency.
*/
cache_bin_array_descriptor_t
cache_bin_array_descriptor
;
/* The arena this tcache is associated with. */
arena_t
*
arena
;
/* Next bin to GC. */
szind_t
next_gc_bin
;
/* For small bins, fill (ncached_max >> lg_fill_div). */
uint8_t
lg_fill_div
[
NBINS
];
/*
* We put the cache bins for large size classes at the end of the
* struct, since some of them might not get used. This might end up
* letting us avoid touching an extra page if we don't have to.
*/
cache_bin_t
bins_large
[
NSIZES
-
NBINS
];
};
/* Linkage for list of available (previously used) explicit tcache IDs. */
struct
tcaches_s
{
union
{
tcache_t
*
tcache
;
tcaches_t
*
next
;
};
};
#endif
/* JEMALLOC_INTERNAL_TCACHE_STRUCTS_H */
deps/jemalloc/include/jemalloc/internal/tcache_types.h
0 → 100644
View file @
fb1f4f4e
#ifndef JEMALLOC_INTERNAL_TCACHE_TYPES_H
#define JEMALLOC_INTERNAL_TCACHE_TYPES_H
#include "jemalloc/internal/size_classes.h"
typedef
struct
tcache_s
tcache_t
;
typedef
struct
tcaches_s
tcaches_t
;
/*
* tcache pointers close to NULL are used to encode state information that is
* used for two purposes: preventing thread caching on a per thread basis and
* cleaning up during thread shutdown.
*/
#define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1)
#define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2)
#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3)
#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY
/*
* Absolute minimum number of cache slots for each small bin.
*/
#define TCACHE_NSLOTS_SMALL_MIN 20
/*
* Absolute maximum number of cache slots for each small bin in the thread
* cache. This is an additional constraint beyond that imposed as: twice the
* number of regions per slab for this size class.
*
* This constant must be an even number.
*/
#define TCACHE_NSLOTS_SMALL_MAX 200
/* Number of cache slots for large size classes. */
#define TCACHE_NSLOTS_LARGE 20
/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
#define LG_TCACHE_MAXCLASS_DEFAULT 15
/*
* TCACHE_GC_SWEEP is the approximate number of allocation events between
* full GC sweeps. Integer rounding may cause the actual number to be
* slightly higher, since GC is performed incrementally.
*/
#define TCACHE_GC_SWEEP 8192
/* Number of tcache allocation/deallocation events between incremental GCs. */
#define TCACHE_GC_INCR \
((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1))
/* Used in TSD static initializer only. Real init in tcache_data_init(). */
#define TCACHE_ZERO_INITIALIZER {0}
/* Used in TSD static initializer only. Will be initialized to opt_tcache. */
#define TCACHE_ENABLED_ZERO_INITIALIZER false
#endif
/* JEMALLOC_INTERNAL_TCACHE_TYPES_H */
deps/jemalloc/include/jemalloc/internal/ticker.h
0 → 100644
View file @
fb1f4f4e
#ifndef JEMALLOC_INTERNAL_TICKER_H
#define JEMALLOC_INTERNAL_TICKER_H
#include "jemalloc/internal/util.h"
/**
* A ticker makes it easy to count-down events until some limit. You
* ticker_init the ticker to trigger every nticks events. You then notify it
* that an event has occurred with calls to ticker_tick (or that nticks events
* have occurred with a call to ticker_ticks), which will return true (and reset
* the counter) if the countdown hit zero.
*/
typedef
struct
{
int32_t
tick
;
int32_t
nticks
;
}
ticker_t
;
static
inline
void
ticker_init
(
ticker_t
*
ticker
,
int32_t
nticks
)
{
ticker
->
tick
=
nticks
;
ticker
->
nticks
=
nticks
;
}
static
inline
void
ticker_copy
(
ticker_t
*
ticker
,
const
ticker_t
*
other
)
{
*
ticker
=
*
other
;
}
static
inline
int32_t
ticker_read
(
const
ticker_t
*
ticker
)
{
return
ticker
->
tick
;
}
/*
* Not intended to be a public API. Unfortunately, on x86, neither gcc nor
* clang seems smart enough to turn
* ticker->tick -= nticks;
* if (unlikely(ticker->tick < 0)) {
* fixup ticker
* return true;
* }
* return false;
* into
* subq %nticks_reg, (%ticker_reg)
* js fixup ticker
*
* unless we force "fixup ticker" out of line. In that case, gcc gets it right,
* but clang now does worse than before. So, on x86 with gcc, we force it out
* of line, but otherwise let the inlining occur. Ordinarily this wouldn't be
* worth the hassle, but this is on the fast path of both malloc and free (via
* tcache_event).
*/
#if defined(__GNUC__) && !defined(__clang__) \
&& (defined(__x86_64__) || defined(__i386__))
JEMALLOC_NOINLINE
#endif
static
bool
ticker_fixup
(
ticker_t
*
ticker
)
{
ticker
->
tick
=
ticker
->
nticks
;
return
true
;
}
static
inline
bool
ticker_ticks
(
ticker_t
*
ticker
,
int32_t
nticks
)
{
ticker
->
tick
-=
nticks
;
if
(
unlikely
(
ticker
->
tick
<
0
))
{
return
ticker_fixup
(
ticker
);
}
return
false
;
}
static
inline
bool
ticker_tick
(
ticker_t
*
ticker
)
{
return
ticker_ticks
(
ticker
,
1
);
}
#endif
/* JEMALLOC_INTERNAL_TICKER_H */
deps/jemalloc/include/jemalloc/internal/tsd.h
View file @
fb1f4f4e
/******************************************************************************/
#
if
def JEMALLOC_
H_TYPES
#ifndef JEMALLOC_INTERNAL_TSD_H
#def
ine
JEMALLOC_
INTERNAL_TSD_H
/* Maximum number of malloc_tsd users with cleanup functions. */
#define MALLOC_TSD_CLEANUPS_MAX 2
typedef
bool
(
*
malloc_tsd_cleanup_t
)(
void
);
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
!defined(_WIN32))
typedef
struct
tsd_init_block_s
tsd_init_block_t
;
typedef
struct
tsd_init_head_s
tsd_init_head_t
;
#endif
typedef
struct
tsd_s
tsd_t
;
typedef
enum
{
tsd_state_uninitialized
,
tsd_state_nominal
,
tsd_state_purgatory
,
tsd_state_reincarnated
}
tsd_state_t
;
#include "jemalloc/internal/arena_types.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/jemalloc_internal_externs.h"
#include "jemalloc/internal/prof_types.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/rtree_tsd.h"
#include "jemalloc/internal/tcache_types.h"
#include "jemalloc/internal/tcache_structs.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/witness.h"
/*
* TLS/TSD-agnostic macro-based implementation of thread-specific data. There
* are five macros that support (at least) three use cases: file-private,
* library-private, and library-private inlined. Following is an example
* library-private tsd variable:
*
* In example.h:
* typedef struct {
* int x;
* int y;
* } example_t;
* #define EX_INITIALIZER JEMALLOC_CONCAT({0, 0})
* malloc_tsd_types(example_, example_t)
* malloc_tsd_protos(, example_, example_t)
* malloc_tsd_externs(example_, example_t)
* In example.c:
* malloc_tsd_data(, example_, example_t, EX_INITIALIZER)
* malloc_tsd_funcs(, example_, example_t, EX_INITIALIZER,
* example_tsd_cleanup)
*
* The result is a set of generated functions, e.g.:
*
* bool example_tsd_boot(void) {...}
* example_t *example_tsd_get() {...}
* void example_tsd_set(example_t *val) {...}
*
* Note that all of the functions deal in terms of (a_type *) rather than
* (a_type) so that it is possible to support non-pointer types (unlike
* pthreads TSD). example_tsd_cleanup() is passed an (a_type *) pointer that is
* cast to (void *). This means that the cleanup function needs to cast the
* function argument to (a_type *), then dereference the resulting pointer to
* access fields, e.g.
* Thread-Specific-Data layout
* --- data accessed on tcache fast path: state, rtree_ctx, stats, prof ---
* s: state
* e: tcache_enabled
* m: thread_allocated (config_stats)
* f: thread_deallocated (config_stats)
* p: prof_tdata (config_prof)
* c: rtree_ctx (rtree cache accessed on deallocation)
* t: tcache
* --- data not accessed on tcache fast path: arena-related fields ---
* d: arenas_tdata_bypass
* r: reentrancy_level
* x: narenas_tdata
* i: iarena
* a: arena
* o: arenas_tdata
* Loading TSD data is on the critical path of basically all malloc operations.
* In particular, tcache and rtree_ctx rely on hot CPU cache to be effective.
* Use a compact layout to reduce cache footprint.
* +--- 64-bit and 64B cacheline; 1B each letter; First byte on the left. ---+
* |---------------------------- 1st cacheline ----------------------------|
* | sedrxxxx mmmmmmmm ffffffff pppppppp [c * 32 ........ ........ .......] |
* |---------------------------- 2nd cacheline ----------------------------|
* | [c * 64 ........ ........ ........ ........ ........ ........ .......] |
* |---------------------------- 3nd cacheline ----------------------------|
* | [c * 32 ........ ........ .......] iiiiiiii aaaaaaaa oooooooo [t...... |
* +-------------------------------------------------------------------------+
* Note: the entire tcache is embedded into TSD and spans multiple cachelines.
*
* void
* example_tsd_cleanup(void *arg)
* {
* example_t *example = (example_t *)arg;
*
* example->x = 42;
* [...]
* if ([want the cleanup function to be called again])
* example_tsd_set(example);
* }
*
* If example_tsd_set() is called within example_tsd_cleanup(), it will be
* called again. This is similar to how pthreads TSD destruction works, except
* that pthreads only calls the cleanup function again if the value was set to
* non-NULL.
* The last 3 members (i, a and o) before tcache isn't really needed on tcache
* fast path. However we have a number of unused tcache bins and witnesses
* (never touched unless config_debug) at the end of tcache, so we place them
* there to avoid breaking the cachelines and possibly paging in an extra page.
*/
/* malloc_tsd_types(). */
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
#define malloc_tsd_types(a_name, a_type)
#elif (defined(JEMALLOC_TLS))
#define malloc_tsd_types(a_name, a_type)
#elif (defined(_WIN32))
#define malloc_tsd_types(a_name, a_type) \
typedef struct { \
bool initialized; \
a_type val; \
} a_name##tsd_wrapper_t;
#ifdef JEMALLOC_JET
typedef
void
(
*
test_callback_t
)(
int
*
);
# define MALLOC_TSD_TEST_DATA_INIT 0x72b65c10
# define MALLOC_TEST_TSD \
O(test_data, int, int) \
O(test_callback, test_callback_t, int)
# define MALLOC_TEST_TSD_INITIALIZER , MALLOC_TSD_TEST_DATA_INIT, NULL
#else
#define malloc_tsd_types(a_name, a_type) \
typedef struct { \
bool initialized; \
a_type val; \
} a_name##tsd_wrapper_t;
# define MALLOC_TEST_TSD
# define MALLOC_TEST_TSD_INITIALIZER
#endif
/* malloc_tsd_protos(). */
#define malloc_tsd_protos(a_attr, a_name, a_type) \
a_attr bool \
a_name##tsd_boot0(void); \
a_attr void \
a_name##tsd_boot1(void); \
a_attr bool \
a_name##tsd_boot(void); \
a_attr a_type * \
a_name##tsd_get(void); \
a_attr void \
a_name##tsd_set(a_type *val);
/* malloc_tsd_externs(). */
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
#define malloc_tsd_externs(a_name, a_type) \
extern __thread a_type a_name##tsd_tls; \
extern __thread bool a_name##tsd_initialized; \
extern bool a_name##tsd_booted;
#elif (defined(JEMALLOC_TLS))
#define malloc_tsd_externs(a_name, a_type) \
extern __thread a_type a_name##tsd_tls; \
extern pthread_key_t a_name##tsd_tsd; \
extern bool a_name##tsd_booted;
#elif (defined(_WIN32))
#define malloc_tsd_externs(a_name, a_type) \
extern DWORD a_name##tsd_tsd; \
extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \
extern bool a_name##tsd_booted;
#else
#define malloc_tsd_externs(a_name, a_type) \
extern pthread_key_t a_name##tsd_tsd; \
extern tsd_init_head_t a_name##tsd_init_head; \
extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \
extern bool a_name##tsd_booted;
#endif
/* O(name, type, nullable type */
#define MALLOC_TSD \
O(tcache_enabled, bool, bool) \
O(arenas_tdata_bypass, bool, bool) \
O(reentrancy_level, int8_t, int8_t) \
O(narenas_tdata, uint32_t, uint32_t) \
O(offset_state, uint64_t, uint64_t) \
O(thread_allocated, uint64_t, uint64_t) \
O(thread_deallocated, uint64_t, uint64_t) \
O(prof_tdata, prof_tdata_t *, prof_tdata_t *) \
O(rtree_ctx, rtree_ctx_t, rtree_ctx_t) \
O(iarena, arena_t *, arena_t *) \
O(arena, arena_t *, arena_t *) \
O(arenas_tdata, arena_tdata_t *, arena_tdata_t *)\
O(tcache, tcache_t, tcache_t) \
O(witness_tsd, witness_tsd_t, witness_tsdn_t) \
MALLOC_TEST_TSD
/* malloc_tsd_data(). */
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
a_attr __thread a_type JEMALLOC_TLS_MODEL \
a_name##tsd_tls = a_initializer; \
a_attr __thread bool JEMALLOC_TLS_MODEL \
a_name##tsd_initialized = false; \
a_attr bool a_name##tsd_booted = false;
#elif (defined(JEMALLOC_TLS))
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
a_attr __thread a_type JEMALLOC_TLS_MODEL \
a_name##tsd_tls = a_initializer; \
a_attr pthread_key_t a_name##tsd_tsd; \
a_attr bool a_name##tsd_booted = false;
#elif (defined(_WIN32))
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
a_attr DWORD a_name##tsd_tsd; \
a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \
false, \
a_initializer \
}; \
a_attr bool a_name##tsd_booted = false;
#else
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
a_attr pthread_key_t a_name##tsd_tsd; \
a_attr tsd_init_head_t a_name##tsd_init_head = { \
ql_head_initializer(blocks), \
MALLOC_MUTEX_INITIALIZER \
}; \
a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \
false, \
a_initializer \
}; \
a_attr bool a_name##tsd_booted = false;
#endif
/* malloc_tsd_funcs(). */
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
a_cleanup) \
/* Initialization/cleanup. */
\
a_attr bool \
a_name##tsd_cleanup_wrapper(void) \
{ \
\
if (a_name##tsd_initialized) { \
a_name##tsd_initialized = false; \
a_cleanup(&a_name##tsd_tls); \
} \
return (a_name##tsd_initialized); \
} \
a_attr bool \
a_name##tsd_boot0(void) \
{ \
\
if (a_cleanup != malloc_tsd_no_cleanup) { \
malloc_tsd_cleanup_register( \
&a_name##tsd_cleanup_wrapper); \
} \
a_name##tsd_booted = true; \
return (false); \
} \
a_attr void \
a_name##tsd_boot1(void) \
{ \
\
/* Do nothing. */
\
} \
a_attr bool \
a_name##tsd_boot(void) \
{ \
\
return (a_name##tsd_boot0()); \
} \
/* Get/set. */
\
a_attr a_type * \
a_name##tsd_get(void) \
{ \
\
assert(a_name##tsd_booted); \
return (&a_name##tsd_tls); \
} \
a_attr void \
a_name##tsd_set(a_type *val) \
{ \
\
assert(a_name##tsd_booted); \
a_name##tsd_tls = (*val); \
if (a_cleanup != malloc_tsd_no_cleanup) \
a_name##tsd_initialized = true; \
}
#elif (defined(JEMALLOC_TLS))
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
a_cleanup) \
/* Initialization/cleanup. */
\
a_attr bool \
a_name##tsd_boot0(void) \
{ \
\
if (a_cleanup != malloc_tsd_no_cleanup) { \
if (pthread_key_create(&a_name##tsd_tsd, a_cleanup) != \
0) \
return (true); \
} \
a_name##tsd_booted = true; \
return (false); \
} \
a_attr void \
a_name##tsd_boot1(void) \
{ \
\
/* Do nothing. */
\
} \
a_attr bool \
a_name##tsd_boot(void) \
{ \
\
return (a_name##tsd_boot0()); \
} \
/* Get/set. */
\
a_attr a_type * \
a_name##tsd_get(void) \
{ \
\
assert(a_name##tsd_booted); \
return (&a_name##tsd_tls); \
} \
a_attr void \
a_name##tsd_set(a_type *val) \
{ \
\
assert(a_name##tsd_booted); \
a_name##tsd_tls = (*val); \
if (a_cleanup != malloc_tsd_no_cleanup) { \
if (pthread_setspecific(a_name##tsd_tsd, \
(void *)(&a_name##tsd_tls))) { \
malloc_write("<jemalloc>: Error" \
" setting TSD for "#a_name"\n"); \
if (opt_abort) \
abort(); \
} \
} \
}
#elif (defined(_WIN32))
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
a_cleanup) \
/* Initialization/cleanup. */
\
a_attr bool \
a_name##tsd_cleanup_wrapper(void) \
{ \
DWORD error = GetLastError(); \
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
TlsGetValue(a_name##tsd_tsd); \
SetLastError(error); \
\
if (wrapper == NULL) \
return (false); \
if (a_cleanup != malloc_tsd_no_cleanup && \
wrapper->initialized) { \
wrapper->initialized = false; \
a_cleanup(&wrapper->val); \
if (wrapper->initialized) { \
/* Trigger another cleanup round. */
\
return (true); \
} \
} \
malloc_tsd_dalloc(wrapper); \
return (false); \
} \
a_attr void \
a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \
{ \
\
if (!TlsSetValue(a_name##tsd_tsd, (void *)wrapper)) { \
malloc_write("<jemalloc>: Error setting" \
" TSD for "#a_name"\n"); \
abort(); \
} \
} \
a_attr a_name##tsd_wrapper_t * \
a_name##tsd_wrapper_get(void) \
{ \
DWORD error = GetLastError(); \
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
TlsGetValue(a_name##tsd_tsd); \
SetLastError(error); \
\
if (unlikely(wrapper == NULL)) { \
wrapper = (a_name##tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \
abort(); \
} else { \
wrapper->initialized = false; \
wrapper->val = a_initializer; \
} \
a_name##tsd_wrapper_set(wrapper); \
} \
return (wrapper); \
} \
a_attr bool \
a_name##tsd_boot0(void) \
{ \
\
a_name##tsd_tsd = TlsAlloc(); \
if (a_name##tsd_tsd == TLS_OUT_OF_INDEXES) \
return (true); \
if (a_cleanup != malloc_tsd_no_cleanup) { \
malloc_tsd_cleanup_register( \
&a_name##tsd_cleanup_wrapper); \
} \
a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \
a_name##tsd_booted = true; \
return (false); \
} \
a_attr void \
a_name##tsd_boot1(void) \
{ \
a_name##tsd_wrapper_t *wrapper; \
wrapper = (a_name##tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \
abort(); \
} \
memcpy(wrapper, &a_name##tsd_boot_wrapper, \
sizeof(a_name##tsd_wrapper_t)); \
a_name##tsd_wrapper_set(wrapper); \
} \
a_attr bool \
a_name##tsd_boot(void) \
{ \
\
if (a_name##tsd_boot0()) \
return (true); \
a_name##tsd_boot1(); \
return (false); \
} \
/* Get/set. */
\
a_attr a_type * \
a_name##tsd_get(void) \
{ \
a_name##tsd_wrapper_t *wrapper; \
\
assert(a_name##tsd_booted); \
wrapper = a_name##tsd_wrapper_get(); \
return (&wrapper->val); \
} \
a_attr void \
a_name##tsd_set(a_type *val) \
{ \
a_name##tsd_wrapper_t *wrapper; \
\
assert(a_name##tsd_booted); \
wrapper = a_name##tsd_wrapper_get(); \
wrapper->val = *(val); \
if (a_cleanup != malloc_tsd_no_cleanup) \
wrapper->initialized = true; \
}
#else
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
a_cleanup) \
/* Initialization/cleanup. */
\
a_attr void \
a_name##tsd_cleanup_wrapper(void *arg) \
{ \
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *)arg; \
\
if (a_cleanup != malloc_tsd_no_cleanup && \
wrapper->initialized) { \
wrapper->initialized = false; \
a_cleanup(&wrapper->val); \
if (wrapper->initialized) { \
/* Trigger another cleanup round. */
\
if (pthread_setspecific(a_name##tsd_tsd, \
(void *)wrapper)) { \
malloc_write("<jemalloc>: Error" \
" setting TSD for "#a_name"\n"); \
if (opt_abort) \
abort(); \
} \
return; \
} \
} \
malloc_tsd_dalloc(wrapper); \
} \
a_attr void \
a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \
{ \
\
if (pthread_setspecific(a_name##tsd_tsd, \
(void *)wrapper)) { \
malloc_write("<jemalloc>: Error setting" \
" TSD for "#a_name"\n"); \
abort(); \
} \
} \
a_attr a_name##tsd_wrapper_t * \
a_name##tsd_wrapper_get(void) \
{ \
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
pthread_getspecific(a_name##tsd_tsd); \
\
if (unlikely(wrapper == NULL)) { \
tsd_init_block_t block; \
wrapper = tsd_init_check_recursion( \
&a_name##tsd_init_head, &block); \
if (wrapper) \
return (wrapper); \
wrapper = (a_name##tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
block.data = wrapper; \
if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \
abort(); \
} else { \
wrapper->initialized = false; \
wrapper->val = a_initializer; \
} \
a_name##tsd_wrapper_set(wrapper); \
tsd_init_finish(&a_name##tsd_init_head, &block); \
} \
return (wrapper); \
} \
a_attr bool \
a_name##tsd_boot0(void) \
{ \
\
if (pthread_key_create(&a_name##tsd_tsd, \
a_name##tsd_cleanup_wrapper) != 0) \
return (true); \
a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \
a_name##tsd_booted = true; \
return (false); \
} \
a_attr void \
a_name##tsd_boot1(void) \
{ \
a_name##tsd_wrapper_t *wrapper; \
wrapper = (a_name##tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \
abort(); \
} \
memcpy(wrapper, &a_name##tsd_boot_wrapper, \
sizeof(a_name##tsd_wrapper_t)); \
a_name##tsd_wrapper_set(wrapper); \
} \
a_attr bool \
a_name##tsd_boot(void) \
{ \
\
if (a_name##tsd_boot0()) \
return (true); \
a_name##tsd_boot1(); \
return (false); \
} \
/* Get/set. */
\
a_attr a_type * \
a_name##tsd_get(void) \
{ \
a_name##tsd_wrapper_t *wrapper; \
\
assert(a_name##tsd_booted); \
wrapper = a_name##tsd_wrapper_get(); \
return (&wrapper->val); \
} \
a_attr void \
a_name##tsd_set(a_type *val) \
{ \
a_name##tsd_wrapper_t *wrapper; \
\
assert(a_name##tsd_booted); \
wrapper = a_name##tsd_wrapper_get(); \
wrapper->val = *(val); \
if (a_cleanup != malloc_tsd_no_cleanup) \
wrapper->initialized = true; \
}
#endif
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
!defined(_WIN32))
struct
tsd_init_block_s
{
ql_elm
(
tsd_init_block_t
)
link
;
pthread_t
thread
;
void
*
data
;
};
struct
tsd_init_head_s
{
ql_head
(
tsd_init_block_t
)
blocks
;
malloc_mutex_t
lock
;
};
#endif
#define MALLOC_TSD \
/* O(name, type) */
\
O(tcache, tcache_t *) \
O(thread_allocated, uint64_t) \
O(thread_deallocated, uint64_t) \
O(prof_tdata, prof_tdata_t *) \
O(arena, arena_t *) \
O(arenas_cache, arena_t **) \
O(narenas_cache, unsigned) \
O(arenas_cache_bypass, bool) \
O(tcache_enabled, tcache_enabled_t) \
O(quarantine, quarantine_t *) \
#define TSD_INITIALIZER { \
#define TSD_INITIALIZER { \
tsd_state_uninitialized, \
NULL, \
TCACHE_ENABLED_ZERO_INITIALIZER, \
false, \
0, \
0, \
0, \
0, \
0, \
NULL, \
RTREE_CTX_ZERO_INITIALIZER, \
NULL, \
NULL, \
0,
\
false,
\
tcache_enabled_default,
\
NULL
\
NULL,
\
TCACHE_ZERO_INITIALIZER,
\
WITNESS_TSD_INITIALIZER
\
MALLOC_TEST_TSD_INITIALIZER
\
}
enum
{
tsd_state_nominal
=
0
,
/* Common case --> jnz. */
tsd_state_nominal_slow
=
1
,
/* Initialized but on slow path. */
/* the above 2 nominal states should be lower values. */
tsd_state_nominal_max
=
1
,
/* used for comparison only. */
tsd_state_minimal_initialized
=
2
,
tsd_state_purgatory
=
3
,
tsd_state_reincarnated
=
4
,
tsd_state_uninitialized
=
5
};
/* Manually limit tsd_state_t to a single byte. */
typedef
uint8_t
tsd_state_t
;
/* The actual tsd. */
struct
tsd_s
{
/*
* The contents should be treated as totally opaque outside the tsd
* module. Access any thread-local state through the getters and
* setters below.
*/
tsd_state_t
state
;
#define
O(n, t)
\
t
n;
#define
O(n, t
, nt
) \
t
use_a_getter_or_setter_instead_##
n;
MALLOC_TSD
#undef O
};
static
const
tsd_t
tsd_initializer
=
TSD_INITIALIZER
;
malloc_tsd_types
(,
tsd_t
)
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void
*
malloc_tsd_malloc
(
size_t
size
);
void
malloc_tsd_dalloc
(
void
*
wrapper
);
void
malloc_tsd_no_cleanup
(
void
*
arg
);
void
malloc_tsd_cleanup_register
(
bool
(
*
f
)(
void
));
bool
malloc_tsd_boot0
(
void
);
void
malloc_tsd_boot1
(
void
);
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
!defined(_WIN32))
void
*
tsd_init_check_recursion
(
tsd_init_head_t
*
head
,
tsd_init_block_t
*
block
);
void
tsd_init_finish
(
tsd_init_head_t
*
head
,
tsd_init_block_t
*
block
);
/*
* Wrapper around tsd_t that makes it possible to avoid implicit conversion
* between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be
* explicitly converted to tsd_t, which is non-nullable.
*/
struct
tsdn_s
{
tsd_t
tsd
;
};
#define TSDN_NULL ((tsdn_t *)0)
JEMALLOC_ALWAYS_INLINE
tsdn_t
*
tsd_tsdn
(
tsd_t
*
tsd
)
{
return
(
tsdn_t
*
)
tsd
;
}
JEMALLOC_ALWAYS_INLINE
bool
tsdn_null
(
const
tsdn_t
*
tsdn
)
{
return
tsdn
==
NULL
;
}
JEMALLOC_ALWAYS_INLINE
tsd_t
*
tsdn_tsd
(
tsdn_t
*
tsdn
)
{
assert
(
!
tsdn_null
(
tsdn
));
return
&
tsdn
->
tsd
;
}
void
*
malloc_tsd_malloc
(
size_t
size
);
void
malloc_tsd_dalloc
(
void
*
wrapper
);
void
malloc_tsd_cleanup_register
(
bool
(
*
f
)(
void
));
tsd_t
*
malloc_tsd_boot0
(
void
);
void
malloc_tsd_boot1
(
void
);
void
tsd_cleanup
(
void
*
arg
);
tsd_t
*
tsd_fetch_slow
(
tsd_t
*
tsd
,
bool
internal
);
void
tsd_slow_update
(
tsd_t
*
tsd
);
/*
* We put the platform-specific data declarations and inlines into their own
* header files to avoid cluttering this file. They define tsd_boot0,
* tsd_boot1, tsd_boot, tsd_booted_get, tsd_get_allocates, tsd_get, and tsd_set.
*/
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
#include "jemalloc/internal/tsd_malloc_thread_cleanup.h"
#elif (defined(JEMALLOC_TLS))
#include "jemalloc/internal/tsd_tls.h"
#elif (defined(_WIN32))
#include "jemalloc/internal/tsd_win.h"
#else
#include "jemalloc/internal/tsd_generic.h"
#endif
void
tsd_cleanup
(
void
*
arg
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
/*
* tsd_foop_get_unsafe(tsd) returns a pointer to the thread-local instance of
* foo. This omits some safety checks, and so can be used during tsd
* initialization and cleanup.
*/
#define O(n, t, nt) \
JEMALLOC_ALWAYS_INLINE t * \
tsd_##n##p_get_unsafe(tsd_t *tsd) { \
return &tsd->use_a_getter_or_setter_instead_##n; \
}
MALLOC_TSD
#undef O
/* tsd_foop_get(tsd) returns a pointer to the thread-local instance of foo. */
#define O(n, t, nt) \
JEMALLOC_ALWAYS_INLINE t * \
tsd_##n##p_get(tsd_t *tsd) { \
assert(tsd->state == tsd_state_nominal || \
tsd->state == tsd_state_nominal_slow || \
tsd->state == tsd_state_reincarnated || \
tsd->state == tsd_state_minimal_initialized); \
return tsd_##n##p_get_unsafe(tsd); \
}
MALLOC_TSD
#undef O
#ifndef JEMALLOC_ENABLE_INLINE
malloc_tsd_protos
(
JEMALLOC_ATTR
(
unused
),
,
tsd_t
)
/*
* tsdn_foop_get(tsdn) returns either the thread-local instance of foo (if tsdn
* isn't NULL), or NULL (if tsdn is NULL), cast to the nullable pointer type.
*/
#define O(n, t, nt) \
JEMALLOC_ALWAYS_INLINE nt * \
tsdn_##n##p_get(tsdn_t *tsdn) { \
if (tsdn_null(tsdn)) { \
return NULL; \
} \
tsd_t *tsd = tsdn_tsd(tsdn); \
return (nt *)tsd_##n##p_get(tsd); \
}
MALLOC_TSD
#undef O
tsd_
t
*
tsd_fetch
(
void
);
bool
tsd_nominal
(
tsd_t
*
tsd
);
#define O(n, t)
\
t *
tsd_##n##
p
_get(tsd_t *tsd)
;
\
t
tsd_##n##_get(tsd
_t *tsd
); \
void tsd_##n##_set(tsd_t *tsd, t n);
/*
tsd_
foo_get(tsd) returns the value of the thread-local instance of foo. */
#define O(n, t, nt) \
JEMALLOC_ALWAYS_INLINE t
\
tsd_##n##_get(tsd_t *tsd)
{
\
return *
tsd_##n##
p
_get(tsd); \
}
MALLOC_TSD
#undef O
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TSD_C_))
malloc_tsd_externs
(,
tsd_t
)
malloc_tsd_funcs
(
JEMALLOC_ALWAYS_INLINE
,
,
tsd_t
,
tsd_initializer
,
tsd_cleanup
)
/* tsd_foo_set(tsd, val) updates the thread-local instance of foo to be val. */
#define O(n, t, nt) \
JEMALLOC_ALWAYS_INLINE void \
tsd_##n##_set(tsd_t *tsd, t val) { \
assert(tsd->state != tsd_state_reincarnated && \
tsd->state != tsd_state_minimal_initialized); \
*tsd_##n##p_get(tsd) = val; \
}
MALLOC_TSD
#undef O
JEMALLOC_ALWAYS_INLINE
void
tsd_assert_fast
(
tsd_t
*
tsd
)
{
assert
(
!
malloc_slow
&&
tsd_tcache_enabled_get
(
tsd
)
&&
tsd_reentrancy_level_get
(
tsd
)
==
0
);
}
JEMALLOC_ALWAYS_INLINE
bool
tsd_fast
(
tsd_t
*
tsd
)
{
bool
fast
=
(
tsd
->
state
==
tsd_state_nominal
);
if
(
fast
)
{
tsd_assert_fast
(
tsd
);
}
return
fast
;
}
JEMALLOC_ALWAYS_INLINE
tsd_t
*
tsd_fetch
(
void
)
{
tsd_t
*
tsd
=
tsd_get
();
tsd_fetch_impl
(
bool
init
,
bool
minimal
)
{
tsd_t
*
tsd
=
tsd_get
(
init
);
if
(
!
init
&&
tsd_get_allocates
()
&&
tsd
==
NULL
)
{
return
NULL
;
}
assert
(
tsd
!=
NULL
);
if
(
unlikely
(
tsd
->
state
!=
tsd_state_nominal
))
{
if
(
tsd
->
state
==
tsd_state_uninitialized
)
{
tsd
->
state
=
tsd_state_nominal
;
/* Trigger cleanup handler registration. */
tsd_set
(
tsd
);
}
else
if
(
tsd
->
state
==
tsd_state_purgatory
)
{
tsd
->
state
=
tsd_state_reincarnated
;
tsd_set
(
tsd
);
}
else
assert
(
tsd
->
state
==
tsd_state_reincarnated
);
return
tsd_fetch_slow
(
tsd
,
minimal
);
}
assert
(
tsd_fast
(
tsd
));
tsd_assert_fast
(
tsd
);
return
tsd
;
}
return
(
tsd
);
/* Get a minimal TSD that requires no cleanup. See comments in free(). */
JEMALLOC_ALWAYS_INLINE
tsd_t
*
tsd_fetch_min
(
void
)
{
return
tsd_fetch_impl
(
true
,
true
);
}
JEMALLOC_INLINE
bool
tsd_nominal
(
tsd_t
*
tsd
)
{
/* For internal background threads use only. */
JEMALLOC_ALWAYS_INLINE
tsd_t
*
tsd_internal_fetch
(
void
)
{
tsd_t
*
tsd
=
tsd_fetch_min
();
/* Use reincarnated state to prevent full initialization. */
tsd
->
state
=
tsd_state_reincarnated
;
return
(
tsd
->
state
==
tsd_state_nominal
)
;
return
tsd
;
}
#define O(n, t) \
JEMALLOC_ALWAYS_INLINE t * \
tsd_##n##p_get(tsd_t *tsd) \
{ \
\
return (&tsd->n); \
} \
\
JEMALLOC_ALWAYS_INLINE t \
tsd_##n##_get(tsd_t *tsd) \
{ \
\
return (*tsd_##n##p_get(tsd)); \
} \
\
JEMALLOC_ALWAYS_INLINE void \
tsd_##n##_set(tsd_t *tsd, t n) \
{ \
\
assert(tsd->state == tsd_state_nominal); \
tsd->n = n; \
JEMALLOC_ALWAYS_INLINE
tsd_t
*
tsd_fetch
(
void
)
{
return
tsd_fetch_impl
(
true
,
false
);
}
static
inline
bool
tsd_nominal
(
tsd_t
*
tsd
)
{
return
(
tsd
->
state
<=
tsd_state_nominal_max
);
}
JEMALLOC_ALWAYS_INLINE
tsdn_t
*
tsdn_fetch
(
void
)
{
if
(
!
tsd_booted_get
())
{
return
NULL
;
}
return
tsd_tsdn
(
tsd_fetch_impl
(
false
,
false
));
}
JEMALLOC_ALWAYS_INLINE
rtree_ctx_t
*
tsd_rtree_ctx
(
tsd_t
*
tsd
)
{
return
tsd_rtree_ctxp_get
(
tsd
);
}
JEMALLOC_ALWAYS_INLINE
rtree_ctx_t
*
tsdn_rtree_ctx
(
tsdn_t
*
tsdn
,
rtree_ctx_t
*
fallback
)
{
/*
* If tsd cannot be accessed, initialize the fallback rtree_ctx and
* return a pointer to it.
*/
if
(
unlikely
(
tsdn_null
(
tsdn
)))
{
rtree_ctx_data_init
(
fallback
);
return
fallback
;
}
return
tsd_rtree_ctx
(
tsdn_tsd
(
tsdn
));
}
MALLOC_TSD
#undef O
#endif
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
#endif
/* JEMALLOC_INTERNAL_TSD_H */
deps/jemalloc/include/jemalloc/internal/tsd_generic.h
0 → 100644
View file @
fb1f4f4e
#ifdef JEMALLOC_INTERNAL_TSD_GENERIC_H
#error This file should be included only once, by tsd.h.
#endif
#define JEMALLOC_INTERNAL_TSD_GENERIC_H
typedef
struct
tsd_init_block_s
tsd_init_block_t
;
struct
tsd_init_block_s
{
ql_elm
(
tsd_init_block_t
)
link
;
pthread_t
thread
;
void
*
data
;
};
/* Defined in tsd.c, to allow the mutex headers to have tsd dependencies. */
typedef
struct
tsd_init_head_s
tsd_init_head_t
;
typedef
struct
{
bool
initialized
;
tsd_t
val
;
}
tsd_wrapper_t
;
void
*
tsd_init_check_recursion
(
tsd_init_head_t
*
head
,
tsd_init_block_t
*
block
);
void
tsd_init_finish
(
tsd_init_head_t
*
head
,
tsd_init_block_t
*
block
);
extern
pthread_key_t
tsd_tsd
;
extern
tsd_init_head_t
tsd_init_head
;
extern
tsd_wrapper_t
tsd_boot_wrapper
;
extern
bool
tsd_booted
;
/* Initialization/cleanup. */
JEMALLOC_ALWAYS_INLINE
void
tsd_cleanup_wrapper
(
void
*
arg
)
{
tsd_wrapper_t
*
wrapper
=
(
tsd_wrapper_t
*
)
arg
;
if
(
wrapper
->
initialized
)
{
wrapper
->
initialized
=
false
;
tsd_cleanup
(
&
wrapper
->
val
);
if
(
wrapper
->
initialized
)
{
/* Trigger another cleanup round. */
if
(
pthread_setspecific
(
tsd_tsd
,
(
void
*
)
wrapper
)
!=
0
)
{
malloc_write
(
"<jemalloc>: Error setting TSD
\n
"
);
if
(
opt_abort
)
{
abort
();
}
}
return
;
}
}
malloc_tsd_dalloc
(
wrapper
);
}
JEMALLOC_ALWAYS_INLINE
void
tsd_wrapper_set
(
tsd_wrapper_t
*
wrapper
)
{
if
(
pthread_setspecific
(
tsd_tsd
,
(
void
*
)
wrapper
)
!=
0
)
{
malloc_write
(
"<jemalloc>: Error setting TSD
\n
"
);
abort
();
}
}
JEMALLOC_ALWAYS_INLINE
tsd_wrapper_t
*
tsd_wrapper_get
(
bool
init
)
{
tsd_wrapper_t
*
wrapper
=
(
tsd_wrapper_t
*
)
pthread_getspecific
(
tsd_tsd
);
if
(
init
&&
unlikely
(
wrapper
==
NULL
))
{
tsd_init_block_t
block
;
wrapper
=
(
tsd_wrapper_t
*
)
tsd_init_check_recursion
(
&
tsd_init_head
,
&
block
);
if
(
wrapper
)
{
return
wrapper
;
}
wrapper
=
(
tsd_wrapper_t
*
)
malloc_tsd_malloc
(
sizeof
(
tsd_wrapper_t
));
block
.
data
=
(
void
*
)
wrapper
;
if
(
wrapper
==
NULL
)
{
malloc_write
(
"<jemalloc>: Error allocating TSD
\n
"
);
abort
();
}
else
{
wrapper
->
initialized
=
false
;
tsd_t
initializer
=
TSD_INITIALIZER
;
wrapper
->
val
=
initializer
;
}
tsd_wrapper_set
(
wrapper
);
tsd_init_finish
(
&
tsd_init_head
,
&
block
);
}
return
wrapper
;
}
JEMALLOC_ALWAYS_INLINE
bool
tsd_boot0
(
void
)
{
if
(
pthread_key_create
(
&
tsd_tsd
,
tsd_cleanup_wrapper
)
!=
0
)
{
return
true
;
}
tsd_wrapper_set
(
&
tsd_boot_wrapper
);
tsd_booted
=
true
;
return
false
;
}
JEMALLOC_ALWAYS_INLINE
void
tsd_boot1
(
void
)
{
tsd_wrapper_t
*
wrapper
;
wrapper
=
(
tsd_wrapper_t
*
)
malloc_tsd_malloc
(
sizeof
(
tsd_wrapper_t
));
if
(
wrapper
==
NULL
)
{
malloc_write
(
"<jemalloc>: Error allocating TSD
\n
"
);
abort
();
}
tsd_boot_wrapper
.
initialized
=
false
;
tsd_cleanup
(
&
tsd_boot_wrapper
.
val
);
wrapper
->
initialized
=
false
;
tsd_t
initializer
=
TSD_INITIALIZER
;
wrapper
->
val
=
initializer
;
tsd_wrapper_set
(
wrapper
);
}
JEMALLOC_ALWAYS_INLINE
bool
tsd_boot
(
void
)
{
if
(
tsd_boot0
())
{
return
true
;
}
tsd_boot1
();
return
false
;
}
JEMALLOC_ALWAYS_INLINE
bool
tsd_booted_get
(
void
)
{
return
tsd_booted
;
}
JEMALLOC_ALWAYS_INLINE
bool
tsd_get_allocates
(
void
)
{
return
true
;
}
/* Get/set. */
JEMALLOC_ALWAYS_INLINE
tsd_t
*
tsd_get
(
bool
init
)
{
tsd_wrapper_t
*
wrapper
;
assert
(
tsd_booted
);
wrapper
=
tsd_wrapper_get
(
init
);
if
(
tsd_get_allocates
()
&&
!
init
&&
wrapper
==
NULL
)
{
return
NULL
;
}
return
&
wrapper
->
val
;
}
JEMALLOC_ALWAYS_INLINE
void
tsd_set
(
tsd_t
*
val
)
{
tsd_wrapper_t
*
wrapper
;
assert
(
tsd_booted
);
wrapper
=
tsd_wrapper_get
(
true
);
if
(
likely
(
&
wrapper
->
val
!=
val
))
{
wrapper
->
val
=
*
(
val
);
}
wrapper
->
initialized
=
true
;
}
Prev
1
…
4
5
6
7
8
9
10
11
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment