Commit 1f72ec7d authored by flowly's avatar flowly Committed by GitHub
Browse files

Merge pull request #1 from antirez/unstable

update to upstream
parents dfc98dcc f917e0da
#!/bin/sh
for nm in `cat $1` ; do
n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
echo "#undef je_${n}"
done
/* /* List definitions. */
* List definitions.
*/
#define ql_head(a_type) \ #define ql_head(a_type) \
struct { \ struct { \
a_type *qlh_first; \ a_type *qlh_first; \
......
...@@ -40,8 +40,10 @@ struct { \ ...@@ -40,8 +40,10 @@ struct { \
(a_qr_b)->a_field.qre_prev = t; \ (a_qr_b)->a_field.qre_prev = t; \
} while (0) } while (0)
/* qr_meld() and qr_split() are functionally equivalent, so there's no need to /*
* have two copies of the code. */ * qr_meld() and qr_split() are functionally equivalent, so there's no need to
* have two copies of the code.
*/
#define qr_split(a_qr_a, a_qr_b, a_field) \ #define qr_split(a_qr_a, a_qr_b, a_field) \
qr_meld((a_qr_a), (a_qr_b), a_field) qr_meld((a_qr_a), (a_qr_b), a_field)
......
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_TYPES #ifdef JEMALLOC_H_TYPES
typedef struct quarantine_obj_s quarantine_obj_t;
typedef struct quarantine_s quarantine_t;
/* Default per thread quarantine size if valgrind is enabled. */ /* Default per thread quarantine size if valgrind is enabled. */
#define JEMALLOC_VALGRIND_QUARANTINE_DEFAULT (ZU(1) << 24) #define JEMALLOC_VALGRIND_QUARANTINE_DEFAULT (ZU(1) << 24)
...@@ -8,17 +11,50 @@ ...@@ -8,17 +11,50 @@
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS #ifdef JEMALLOC_H_STRUCTS
struct quarantine_obj_s {
void *ptr;
size_t usize;
};
struct quarantine_s {
size_t curbytes;
size_t curobjs;
size_t first;
#define LG_MAXOBJS_INIT 10
size_t lg_maxobjs;
quarantine_obj_t objs[1]; /* Dynamically sized ring buffer. */
};
#endif /* JEMALLOC_H_STRUCTS */ #endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS #ifdef JEMALLOC_H_EXTERNS
void quarantine(void *ptr); void quarantine_alloc_hook_work(tsd_t *tsd);
bool quarantine_boot(void); void quarantine(tsd_t *tsd, void *ptr);
void quarantine_cleanup(tsd_t *tsd);
#endif /* JEMALLOC_H_EXTERNS */ #endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_INLINES #ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
void quarantine_alloc_hook(void);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_QUARANTINE_C_))
JEMALLOC_ALWAYS_INLINE void
quarantine_alloc_hook(void)
{
tsd_t *tsd;
assert(config_fill && opt_quarantine);
tsd = tsd_fetch();
if (tsd_quarantine_get(tsd) == NULL)
quarantine_alloc_hook_work(tsd);
}
#endif
#endif /* JEMALLOC_H_INLINES */ #endif /* JEMALLOC_H_INLINES */
/******************************************************************************/ /******************************************************************************/
...@@ -22,10 +22,6 @@ ...@@ -22,10 +22,6 @@
#ifndef RB_H_ #ifndef RB_H_
#define RB_H_ #define RB_H_
#if 0
__FBSDID("$FreeBSD: head/lib/libc/stdlib/rb.h 204493 2010-02-28 22:57:13Z jasone $");
#endif
#ifdef RB_COMPACT #ifdef RB_COMPACT
/* Node structure. */ /* Node structure. */
#define rb_node(a_type) \ #define rb_node(a_type) \
...@@ -46,7 +42,6 @@ struct { \ ...@@ -46,7 +42,6 @@ struct { \
#define rb_tree(a_type) \ #define rb_tree(a_type) \
struct { \ struct { \
a_type *rbt_root; \ a_type *rbt_root; \
a_type rbt_nil; \
} }
/* Left accessors. */ /* Left accessors. */
...@@ -83,6 +78,15 @@ struct { \ ...@@ -83,6 +78,15 @@ struct { \
(a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \ (a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \
(a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \ (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \
} while (0) } while (0)
/* Node initializer. */
#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
/* Bookkeeping bit cannot be used by node pointer. */ \
assert(((uintptr_t)(a_node) & 0x1) == 0); \
rbtn_left_set(a_type, a_field, (a_node), NULL); \
rbtn_right_set(a_type, a_field, (a_node), NULL); \
rbtn_red_set(a_type, a_field, (a_node)); \
} while (0)
#else #else
/* Right accessors. */ /* Right accessors. */
#define rbtn_right_get(a_type, a_field, a_node) \ #define rbtn_right_get(a_type, a_field, a_node) \
...@@ -103,28 +107,26 @@ struct { \ ...@@ -103,28 +107,26 @@ struct { \
#define rbtn_black_set(a_type, a_field, a_node) do { \ #define rbtn_black_set(a_type, a_field, a_node) do { \
(a_node)->a_field.rbn_red = false; \ (a_node)->a_field.rbn_red = false; \
} while (0) } while (0)
#endif
/* Node initializer. */ /* Node initializer. */
#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \ #define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
rbtn_left_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil); \ rbtn_left_set(a_type, a_field, (a_node), NULL); \
rbtn_right_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil); \ rbtn_right_set(a_type, a_field, (a_node), NULL); \
rbtn_red_set(a_type, a_field, (a_node)); \ rbtn_red_set(a_type, a_field, (a_node)); \
} while (0) } while (0)
#endif
/* Tree initializer. */ /* Tree initializer. */
#define rb_new(a_type, a_field, a_rbt) do { \ #define rb_new(a_type, a_field, a_rbt) do { \
(a_rbt)->rbt_root = &(a_rbt)->rbt_nil; \ (a_rbt)->rbt_root = NULL; \
rbt_node_new(a_type, a_field, a_rbt, &(a_rbt)->rbt_nil); \
rbtn_black_set(a_type, a_field, &(a_rbt)->rbt_nil); \
} while (0) } while (0)
/* Internal utility macros. */ /* Internal utility macros. */
#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \ #define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \
(r_node) = (a_root); \ (r_node) = (a_root); \
if ((r_node) != &(a_rbt)->rbt_nil) { \ if ((r_node) != NULL) { \
for (; \ for (; \
rbtn_left_get(a_type, a_field, (r_node)) != &(a_rbt)->rbt_nil;\ rbtn_left_get(a_type, a_field, (r_node)) != NULL; \
(r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \ (r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \
} \ } \
} \ } \
...@@ -132,10 +134,9 @@ struct { \ ...@@ -132,10 +134,9 @@ struct { \
#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \ #define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \
(r_node) = (a_root); \ (r_node) = (a_root); \
if ((r_node) != &(a_rbt)->rbt_nil) { \ if ((r_node) != NULL) { \
for (; rbtn_right_get(a_type, a_field, (r_node)) != \ for (; rbtn_right_get(a_type, a_field, (r_node)) != NULL; \
&(a_rbt)->rbt_nil; (r_node) = rbtn_right_get(a_type, a_field, \ (r_node) = rbtn_right_get(a_type, a_field, (r_node))) { \
(r_node))) { \
} \ } \
} \ } \
} while (0) } while (0)
...@@ -162,6 +163,8 @@ struct { \ ...@@ -162,6 +163,8 @@ struct { \
#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \ #define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \
a_attr void \ a_attr void \
a_prefix##new(a_rbt_type *rbtree); \ a_prefix##new(a_rbt_type *rbtree); \
a_attr bool \
a_prefix##empty(a_rbt_type *rbtree); \
a_attr a_type * \ a_attr a_type * \
a_prefix##first(a_rbt_type *rbtree); \ a_prefix##first(a_rbt_type *rbtree); \
a_attr a_type * \ a_attr a_type * \
...@@ -171,11 +174,11 @@ a_prefix##next(a_rbt_type *rbtree, a_type *node); \ ...@@ -171,11 +174,11 @@ a_prefix##next(a_rbt_type *rbtree, a_type *node); \
a_attr a_type * \ a_attr a_type * \
a_prefix##prev(a_rbt_type *rbtree, a_type *node); \ a_prefix##prev(a_rbt_type *rbtree, a_type *node); \
a_attr a_type * \ a_attr a_type * \
a_prefix##search(a_rbt_type *rbtree, a_type *key); \ a_prefix##search(a_rbt_type *rbtree, const a_type *key); \
a_attr a_type * \ a_attr a_type * \
a_prefix##nsearch(a_rbt_type *rbtree, a_type *key); \ a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key); \
a_attr a_type * \ a_attr a_type * \
a_prefix##psearch(a_rbt_type *rbtree, a_type *key); \ a_prefix##psearch(a_rbt_type *rbtree, const a_type *key); \
a_attr void \ a_attr void \
a_prefix##insert(a_rbt_type *rbtree, a_type *node); \ a_prefix##insert(a_rbt_type *rbtree, a_type *node); \
a_attr void \ a_attr void \
...@@ -185,7 +188,10 @@ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ ...@@ -185,7 +188,10 @@ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
a_rbt_type *, a_type *, void *), void *arg); \ a_rbt_type *, a_type *, void *), void *arg); \
a_attr a_type * \ a_attr a_type * \
a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); \
a_attr void \
a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
void *arg);
/* /*
* The rb_gen() macro generates a type-specific red-black tree implementation, * The rb_gen() macro generates a type-specific red-black tree implementation,
...@@ -202,7 +208,7 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ ...@@ -202,7 +208,7 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
* int (a_cmp *)(a_type *a_node, a_type *a_other); * int (a_cmp *)(a_type *a_node, a_type *a_other);
* ^^^^^^ * ^^^^^^
* or a_key * or a_key
* Interpretation of comparision function return values: * Interpretation of comparison function return values:
* -1 : a_node < a_other * -1 : a_node < a_other
* 0 : a_node == a_other * 0 : a_node == a_other
* 1 : a_node > a_other * 1 : a_node > a_other
...@@ -228,6 +234,13 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ ...@@ -228,6 +234,13 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
* Args: * Args:
* tree: Pointer to an uninitialized red-black tree object. * tree: Pointer to an uninitialized red-black tree object.
* *
* static bool
* ex_empty(ex_t *tree);
* Description: Determine whether tree is empty.
* Args:
* tree: Pointer to an initialized red-black tree object.
* Ret: True if tree is empty, false otherwise.
*
* static ex_node_t * * static ex_node_t *
* ex_first(ex_t *tree); * ex_first(ex_t *tree);
* static ex_node_t * * static ex_node_t *
...@@ -249,7 +262,7 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ ...@@ -249,7 +262,7 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
* last/first. * last/first.
* *
* static ex_node_t * * static ex_node_t *
* ex_search(ex_t *tree, ex_node_t *key); * ex_search(ex_t *tree, const ex_node_t *key);
* Description: Search for node that matches key. * Description: Search for node that matches key.
* Args: * Args:
* tree: Pointer to an initialized red-black tree object. * tree: Pointer to an initialized red-black tree object.
...@@ -257,9 +270,9 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ ...@@ -257,9 +270,9 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
* Ret: Node in tree that matches key, or NULL if no match. * Ret: Node in tree that matches key, or NULL if no match.
* *
* static ex_node_t * * static ex_node_t *
* ex_nsearch(ex_t *tree, ex_node_t *key); * ex_nsearch(ex_t *tree, const ex_node_t *key);
* static ex_node_t * * static ex_node_t *
* ex_psearch(ex_t *tree, ex_node_t *key); * ex_psearch(ex_t *tree, const ex_node_t *key);
* Description: Search for node that matches key. If no match is found, * Description: Search for node that matches key. If no match is found,
* return what would be key's successor/predecessor, were * return what would be key's successor/predecessor, were
* key in tree. * key in tree.
...@@ -307,40 +320,52 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ ...@@ -307,40 +320,52 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
* arg : Opaque pointer passed to cb(). * arg : Opaque pointer passed to cb().
* Ret: NULL if iteration completed, or the non-NULL callback return value * Ret: NULL if iteration completed, or the non-NULL callback return value
* that caused termination of the iteration. * that caused termination of the iteration.
*
* static void
* ex_destroy(ex_t *tree, void (*cb)(ex_node_t *, void *), void *arg);
* Description: Iterate over the tree with post-order traversal, remove
* each node, and run the callback if non-null. This is
* used for destroying a tree without paying the cost to
* rebalance it. The tree must not be otherwise altered
* during traversal.
* Args:
* tree: Pointer to an initialized red-black tree object.
* cb : Callback function, which, if non-null, is called for each node
* during iteration. There is no way to stop iteration once it
* has begun.
* arg : Opaque pointer passed to cb().
*/ */
#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \ #define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \
a_attr void \ a_attr void \
a_prefix##new(a_rbt_type *rbtree) { \ a_prefix##new(a_rbt_type *rbtree) { \
rb_new(a_type, a_field, rbtree); \ rb_new(a_type, a_field, rbtree); \
} \ } \
a_attr bool \
a_prefix##empty(a_rbt_type *rbtree) { \
return (rbtree->rbt_root == NULL); \
} \
a_attr a_type * \ a_attr a_type * \
a_prefix##first(a_rbt_type *rbtree) { \ a_prefix##first(a_rbt_type *rbtree) { \
a_type *ret; \ a_type *ret; \
rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
if (ret == &rbtree->rbt_nil) { \
ret = NULL; \
} \
return (ret); \ return (ret); \
} \ } \
a_attr a_type * \ a_attr a_type * \
a_prefix##last(a_rbt_type *rbtree) { \ a_prefix##last(a_rbt_type *rbtree) { \
a_type *ret; \ a_type *ret; \
rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
if (ret == &rbtree->rbt_nil) { \
ret = NULL; \
} \
return (ret); \ return (ret); \
} \ } \
a_attr a_type * \ a_attr a_type * \
a_prefix##next(a_rbt_type *rbtree, a_type *node) { \ a_prefix##next(a_rbt_type *rbtree, a_type *node) { \
a_type *ret; \ a_type *ret; \
if (rbtn_right_get(a_type, a_field, node) != &rbtree->rbt_nil) { \ if (rbtn_right_get(a_type, a_field, node) != NULL) { \
rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \ rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \
a_field, node), ret); \ a_field, node), ret); \
} else { \ } else { \
a_type *tnode = rbtree->rbt_root; \ a_type *tnode = rbtree->rbt_root; \
assert(tnode != &rbtree->rbt_nil); \ assert(tnode != NULL); \
ret = &rbtree->rbt_nil; \ ret = NULL; \
while (true) { \ while (true) { \
int cmp = (a_cmp)(node, tnode); \ int cmp = (a_cmp)(node, tnode); \
if (cmp < 0) { \ if (cmp < 0) { \
...@@ -351,24 +376,21 @@ a_prefix##next(a_rbt_type *rbtree, a_type *node) { \ ...@@ -351,24 +376,21 @@ a_prefix##next(a_rbt_type *rbtree, a_type *node) { \
} else { \ } else { \
break; \ break; \
} \ } \
assert(tnode != &rbtree->rbt_nil); \ assert(tnode != NULL); \
} \ } \
} \ } \
if (ret == &rbtree->rbt_nil) { \
ret = (NULL); \
} \
return (ret); \ return (ret); \
} \ } \
a_attr a_type * \ a_attr a_type * \
a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \ a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \
a_type *ret; \ a_type *ret; \
if (rbtn_left_get(a_type, a_field, node) != &rbtree->rbt_nil) { \ if (rbtn_left_get(a_type, a_field, node) != NULL) { \
rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \ rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \
a_field, node), ret); \ a_field, node), ret); \
} else { \ } else { \
a_type *tnode = rbtree->rbt_root; \ a_type *tnode = rbtree->rbt_root; \
assert(tnode != &rbtree->rbt_nil); \ assert(tnode != NULL); \
ret = &rbtree->rbt_nil; \ ret = NULL; \
while (true) { \ while (true) { \
int cmp = (a_cmp)(node, tnode); \ int cmp = (a_cmp)(node, tnode); \
if (cmp < 0) { \ if (cmp < 0) { \
...@@ -379,20 +401,17 @@ a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \ ...@@ -379,20 +401,17 @@ a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \
} else { \ } else { \
break; \ break; \
} \ } \
assert(tnode != &rbtree->rbt_nil); \ assert(tnode != NULL); \
} \
} \ } \
if (ret == &rbtree->rbt_nil) { \
ret = (NULL); \
} \ } \
return (ret); \ return (ret); \
} \ } \
a_attr a_type * \ a_attr a_type * \
a_prefix##search(a_rbt_type *rbtree, a_type *key) { \ a_prefix##search(a_rbt_type *rbtree, const a_type *key) { \
a_type *ret; \ a_type *ret; \
int cmp; \ int cmp; \
ret = rbtree->rbt_root; \ ret = rbtree->rbt_root; \
while (ret != &rbtree->rbt_nil \ while (ret != NULL \
&& (cmp = (a_cmp)(key, ret)) != 0) { \ && (cmp = (a_cmp)(key, ret)) != 0) { \
if (cmp < 0) { \ if (cmp < 0) { \
ret = rbtn_left_get(a_type, a_field, ret); \ ret = rbtn_left_get(a_type, a_field, ret); \
...@@ -400,17 +419,14 @@ a_prefix##search(a_rbt_type *rbtree, a_type *key) { \ ...@@ -400,17 +419,14 @@ a_prefix##search(a_rbt_type *rbtree, a_type *key) { \
ret = rbtn_right_get(a_type, a_field, ret); \ ret = rbtn_right_get(a_type, a_field, ret); \
} \ } \
} \ } \
if (ret == &rbtree->rbt_nil) { \
ret = (NULL); \
} \
return (ret); \ return (ret); \
} \ } \
a_attr a_type * \ a_attr a_type * \
a_prefix##nsearch(a_rbt_type *rbtree, a_type *key) { \ a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key) { \
a_type *ret; \ a_type *ret; \
a_type *tnode = rbtree->rbt_root; \ a_type *tnode = rbtree->rbt_root; \
ret = &rbtree->rbt_nil; \ ret = NULL; \
while (tnode != &rbtree->rbt_nil) { \ while (tnode != NULL) { \
int cmp = (a_cmp)(key, tnode); \ int cmp = (a_cmp)(key, tnode); \
if (cmp < 0) { \ if (cmp < 0) { \
ret = tnode; \ ret = tnode; \
...@@ -422,17 +438,14 @@ a_prefix##nsearch(a_rbt_type *rbtree, a_type *key) { \ ...@@ -422,17 +438,14 @@ a_prefix##nsearch(a_rbt_type *rbtree, a_type *key) { \
break; \ break; \
} \ } \
} \ } \
if (ret == &rbtree->rbt_nil) { \
ret = (NULL); \
} \
return (ret); \ return (ret); \
} \ } \
a_attr a_type * \ a_attr a_type * \
a_prefix##psearch(a_rbt_type *rbtree, a_type *key) { \ a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \
a_type *ret; \ a_type *ret; \
a_type *tnode = rbtree->rbt_root; \ a_type *tnode = rbtree->rbt_root; \
ret = &rbtree->rbt_nil; \ ret = NULL; \
while (tnode != &rbtree->rbt_nil) { \ while (tnode != NULL) { \
int cmp = (a_cmp)(key, tnode); \ int cmp = (a_cmp)(key, tnode); \
if (cmp < 0) { \ if (cmp < 0) { \
tnode = rbtn_left_get(a_type, a_field, tnode); \ tnode = rbtn_left_get(a_type, a_field, tnode); \
...@@ -444,9 +457,6 @@ a_prefix##psearch(a_rbt_type *rbtree, a_type *key) { \ ...@@ -444,9 +457,6 @@ a_prefix##psearch(a_rbt_type *rbtree, a_type *key) { \
break; \ break; \
} \ } \
} \ } \
if (ret == &rbtree->rbt_nil) { \
ret = (NULL); \
} \
return (ret); \ return (ret); \
} \ } \
a_attr void \ a_attr void \
...@@ -458,7 +468,7 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \ ...@@ -458,7 +468,7 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
rbt_node_new(a_type, a_field, rbtree, node); \ rbt_node_new(a_type, a_field, rbtree, node); \
/* Wind. */ \ /* Wind. */ \
path->node = rbtree->rbt_root; \ path->node = rbtree->rbt_root; \
for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \ for (pathp = path; pathp->node != NULL; pathp++) { \
int cmp = pathp->cmp = a_cmp(node, pathp->node); \ int cmp = pathp->cmp = a_cmp(node, pathp->node); \
assert(cmp != 0); \ assert(cmp != 0); \
if (cmp < 0) { \ if (cmp < 0) { \
...@@ -478,7 +488,8 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \ ...@@ -478,7 +488,8 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
rbtn_left_set(a_type, a_field, cnode, left); \ rbtn_left_set(a_type, a_field, cnode, left); \
if (rbtn_red_get(a_type, a_field, left)) { \ if (rbtn_red_get(a_type, a_field, left)) { \
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
if (rbtn_red_get(a_type, a_field, leftleft)) { \ if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
leftleft)) { \
/* Fix up 4-node. */ \ /* Fix up 4-node. */ \
a_type *tnode; \ a_type *tnode; \
rbtn_black_set(a_type, a_field, leftleft); \ rbtn_black_set(a_type, a_field, leftleft); \
...@@ -493,7 +504,8 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \ ...@@ -493,7 +504,8 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, cnode, right); \ rbtn_right_set(a_type, a_field, cnode, right); \
if (rbtn_red_get(a_type, a_field, right)) { \ if (rbtn_red_get(a_type, a_field, right)) { \
a_type *left = rbtn_left_get(a_type, a_field, cnode); \ a_type *left = rbtn_left_get(a_type, a_field, cnode); \
if (rbtn_red_get(a_type, a_field, left)) { \ if (left != NULL && rbtn_red_get(a_type, a_field, \
left)) { \
/* Split 4-node. */ \ /* Split 4-node. */ \
rbtn_black_set(a_type, a_field, left); \ rbtn_black_set(a_type, a_field, left); \
rbtn_black_set(a_type, a_field, right); \ rbtn_black_set(a_type, a_field, right); \
...@@ -526,7 +538,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ ...@@ -526,7 +538,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
/* Wind. */ \ /* Wind. */ \
nodep = NULL; /* Silence compiler warning. */ \ nodep = NULL; /* Silence compiler warning. */ \
path->node = rbtree->rbt_root; \ path->node = rbtree->rbt_root; \
for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \ for (pathp = path; pathp->node != NULL; pathp++) { \
int cmp = pathp->cmp = a_cmp(node, pathp->node); \ int cmp = pathp->cmp = a_cmp(node, pathp->node); \
if (cmp < 0) { \ if (cmp < 0) { \
pathp[1].node = rbtn_left_get(a_type, a_field, \ pathp[1].node = rbtn_left_get(a_type, a_field, \
...@@ -538,7 +550,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ ...@@ -538,7 +550,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
/* Find node's successor, in preparation for swap. */ \ /* Find node's successor, in preparation for swap. */ \
pathp->cmp = 1; \ pathp->cmp = 1; \
nodep = pathp; \ nodep = pathp; \
for (pathp++; pathp->node != &rbtree->rbt_nil; \ for (pathp++; pathp->node != NULL; \
pathp++) { \ pathp++) { \
pathp->cmp = -1; \ pathp->cmp = -1; \
pathp[1].node = rbtn_left_get(a_type, a_field, \ pathp[1].node = rbtn_left_get(a_type, a_field, \
...@@ -581,10 +593,10 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ ...@@ -581,10 +593,10 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
} \ } \
} else { \ } else { \
a_type *left = rbtn_left_get(a_type, a_field, node); \ a_type *left = rbtn_left_get(a_type, a_field, node); \
if (left != &rbtree->rbt_nil) { \ if (left != NULL) { \
/* node has no successor, but it has a left child. */\ /* node has no successor, but it has a left child. */\
/* Splice node out, without losing the left child. */\ /* Splice node out, without losing the left child. */\
assert(rbtn_red_get(a_type, a_field, node) == false); \ assert(!rbtn_red_get(a_type, a_field, node)); \
assert(rbtn_red_get(a_type, a_field, left)); \ assert(rbtn_red_get(a_type, a_field, left)); \
rbtn_black_set(a_type, a_field, left); \ rbtn_black_set(a_type, a_field, left); \
if (pathp == path) { \ if (pathp == path) { \
...@@ -601,34 +613,32 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ ...@@ -601,34 +613,32 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
return; \ return; \
} else if (pathp == path) { \ } else if (pathp == path) { \
/* The tree only contained one node. */ \ /* The tree only contained one node. */ \
rbtree->rbt_root = &rbtree->rbt_nil; \ rbtree->rbt_root = NULL; \
return; \ return; \
} \ } \
} \ } \
if (rbtn_red_get(a_type, a_field, pathp->node)) { \ if (rbtn_red_get(a_type, a_field, pathp->node)) { \
/* Prune red node, which requires no fixup. */ \ /* Prune red node, which requires no fixup. */ \
assert(pathp[-1].cmp < 0); \ assert(pathp[-1].cmp < 0); \
rbtn_left_set(a_type, a_field, pathp[-1].node, \ rbtn_left_set(a_type, a_field, pathp[-1].node, NULL); \
&rbtree->rbt_nil); \
return; \ return; \
} \ } \
/* The node to be pruned is black, so unwind until balance is */\ /* The node to be pruned is black, so unwind until balance is */\
/* restored. */\ /* restored. */\
pathp->node = &rbtree->rbt_nil; \ pathp->node = NULL; \
for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \
assert(pathp->cmp != 0); \ assert(pathp->cmp != 0); \
if (pathp->cmp < 0) { \ if (pathp->cmp < 0) { \
rbtn_left_set(a_type, a_field, pathp->node, \ rbtn_left_set(a_type, a_field, pathp->node, \
pathp[1].node); \ pathp[1].node); \
assert(rbtn_red_get(a_type, a_field, pathp[1].node) \
== false); \
if (rbtn_red_get(a_type, a_field, pathp->node)) { \ if (rbtn_red_get(a_type, a_field, pathp->node)) { \
a_type *right = rbtn_right_get(a_type, a_field, \ a_type *right = rbtn_right_get(a_type, a_field, \
pathp->node); \ pathp->node); \
a_type *rightleft = rbtn_left_get(a_type, a_field, \ a_type *rightleft = rbtn_left_get(a_type, a_field, \
right); \ right); \
a_type *tnode; \ a_type *tnode; \
if (rbtn_red_get(a_type, a_field, rightleft)) { \ if (rightleft != NULL && rbtn_red_get(a_type, a_field, \
rightleft)) { \
/* In the following diagrams, ||, //, and \\ */\ /* In the following diagrams, ||, //, and \\ */\
/* indicate the path to the removed node. */\ /* indicate the path to the removed node. */\
/* */\ /* */\
...@@ -671,7 +681,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ ...@@ -671,7 +681,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
pathp->node); \ pathp->node); \
a_type *rightleft = rbtn_left_get(a_type, a_field, \ a_type *rightleft = rbtn_left_get(a_type, a_field, \
right); \ right); \
if (rbtn_red_get(a_type, a_field, rightleft)) { \ if (rightleft != NULL && rbtn_red_get(a_type, a_field, \
rightleft)) { \
/* || */\ /* || */\
/* pathp(b) */\ /* pathp(b) */\
/* // \ */\ /* // \ */\
...@@ -685,7 +696,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ ...@@ -685,7 +696,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_rotate_left(a_type, a_field, pathp->node, \ rbtn_rotate_left(a_type, a_field, pathp->node, \
tnode); \ tnode); \
/* Balance restored, but rotation modified */\ /* Balance restored, but rotation modified */\
/* subree root, which may actually be the tree */\ /* subtree root, which may actually be the tree */\
/* root. */\ /* root. */\
if (pathp == path) { \ if (pathp == path) { \
/* Set root. */ \ /* Set root. */ \
...@@ -725,7 +736,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ ...@@ -725,7 +736,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
left); \ left); \
a_type *leftrightleft = rbtn_left_get(a_type, a_field, \ a_type *leftrightleft = rbtn_left_get(a_type, a_field, \
leftright); \ leftright); \
if (rbtn_red_get(a_type, a_field, leftrightleft)) { \ if (leftrightleft != NULL && rbtn_red_get(a_type, \
a_field, leftrightleft)) { \
/* || */\ /* || */\
/* pathp(b) */\ /* pathp(b) */\
/* / \\ */\ /* / \\ */\
...@@ -751,7 +763,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ ...@@ -751,7 +763,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
/* (b) */\ /* (b) */\
/* / */\ /* / */\
/* (b) */\ /* (b) */\
assert(leftright != &rbtree->rbt_nil); \ assert(leftright != NULL); \
rbtn_red_set(a_type, a_field, leftright); \ rbtn_red_set(a_type, a_field, leftright); \
rbtn_rotate_right(a_type, a_field, pathp->node, \ rbtn_rotate_right(a_type, a_field, pathp->node, \
tnode); \ tnode); \
...@@ -774,7 +786,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ ...@@ -774,7 +786,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
return; \ return; \
} else if (rbtn_red_get(a_type, a_field, pathp->node)) { \ } else if (rbtn_red_get(a_type, a_field, pathp->node)) { \
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
if (rbtn_red_get(a_type, a_field, leftleft)) { \ if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
leftleft)) { \
/* || */\ /* || */\
/* pathp(r) */\ /* pathp(r) */\
/* / \\ */\ /* / \\ */\
...@@ -812,7 +825,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ ...@@ -812,7 +825,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
} \ } \
} else { \ } else { \
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
if (rbtn_red_get(a_type, a_field, leftleft)) { \ if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
leftleft)) { \
/* || */\ /* || */\
/* pathp(b) */\ /* pathp(b) */\
/* / \\ */\ /* / \\ */\
...@@ -853,18 +867,18 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ ...@@ -853,18 +867,18 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
} \ } \
/* Set root. */ \ /* Set root. */ \
rbtree->rbt_root = path->node; \ rbtree->rbt_root = path->node; \
assert(rbtn_red_get(a_type, a_field, rbtree->rbt_root) == false); \ assert(!rbtn_red_get(a_type, a_field, rbtree->rbt_root)); \
} \ } \
a_attr a_type * \ a_attr a_type * \
a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \ a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
if (node == &rbtree->rbt_nil) { \ if (node == NULL) { \
return (&rbtree->rbt_nil); \ return (NULL); \
} else { \ } else { \
a_type *ret; \ a_type *ret; \
if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \ if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \
a_field, node), cb, arg)) != &rbtree->rbt_nil \ a_field, node), cb, arg)) != NULL || (ret = cb(rbtree, node, \
|| (ret = cb(rbtree, node, arg)) != NULL) { \ arg)) != NULL) { \
return (ret); \ return (ret); \
} \ } \
return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
...@@ -878,8 +892,8 @@ a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \ ...@@ -878,8 +892,8 @@ a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \
if (cmp < 0) { \ if (cmp < 0) { \
a_type *ret; \ a_type *ret; \
if ((ret = a_prefix##iter_start(rbtree, start, \ if ((ret = a_prefix##iter_start(rbtree, start, \
rbtn_left_get(a_type, a_field, node), cb, arg)) != \ rbtn_left_get(a_type, a_field, node), cb, arg)) != NULL || \
&rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \ (ret = cb(rbtree, node, arg)) != NULL) { \
return (ret); \ return (ret); \
} \ } \
return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
...@@ -906,21 +920,18 @@ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ ...@@ -906,21 +920,18 @@ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
} else { \ } else { \
ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\ ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\
} \ } \
if (ret == &rbtree->rbt_nil) { \
ret = NULL; \
} \
return (ret); \ return (ret); \
} \ } \
a_attr a_type * \ a_attr a_type * \
a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \ a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
if (node == &rbtree->rbt_nil) { \ if (node == NULL) { \
return (&rbtree->rbt_nil); \ return (NULL); \
} else { \ } else { \
a_type *ret; \ a_type *ret; \
if ((ret = a_prefix##reverse_iter_recurse(rbtree, \ if ((ret = a_prefix##reverse_iter_recurse(rbtree, \
rbtn_right_get(a_type, a_field, node), cb, arg)) != \ rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \
&rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \ (ret = cb(rbtree, node, arg)) != NULL) { \
return (ret); \ return (ret); \
} \ } \
return (a_prefix##reverse_iter_recurse(rbtree, \ return (a_prefix##reverse_iter_recurse(rbtree, \
...@@ -935,8 +946,8 @@ a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \ ...@@ -935,8 +946,8 @@ a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \
if (cmp > 0) { \ if (cmp > 0) { \
a_type *ret; \ a_type *ret; \
if ((ret = a_prefix##reverse_iter_start(rbtree, start, \ if ((ret = a_prefix##reverse_iter_start(rbtree, start, \
rbtn_right_get(a_type, a_field, node), cb, arg)) != \ rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \
&rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \ (ret = cb(rbtree, node, arg)) != NULL) { \
return (ret); \ return (ret); \
} \ } \
return (a_prefix##reverse_iter_recurse(rbtree, \ return (a_prefix##reverse_iter_recurse(rbtree, \
...@@ -964,10 +975,29 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ ...@@ -964,10 +975,29 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \ ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \
cb, arg); \ cb, arg); \
} \ } \
if (ret == &rbtree->rbt_nil) { \
ret = NULL; \
} \
return (ret); \ return (ret); \
} \
a_attr void \
a_prefix##destroy_recurse(a_rbt_type *rbtree, a_type *node, void (*cb)( \
a_type *, void *), void *arg) { \
if (node == NULL) { \
return; \
} \
a_prefix##destroy_recurse(rbtree, rbtn_left_get(a_type, a_field, \
node), cb, arg); \
rbtn_left_set(a_type, a_field, (node), NULL); \
a_prefix##destroy_recurse(rbtree, rbtn_right_get(a_type, a_field, \
node), cb, arg); \
rbtn_right_set(a_type, a_field, (node), NULL); \
if (cb) { \
cb(node, arg); \
} \
} \
a_attr void \
a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
void *arg) { \
a_prefix##destroy_recurse(rbtree, rbtree->rbt_root, cb, arg); \
rbtree->rbt_root = NULL; \
} }
#endif /* RB_H_ */ #endif /* RB_H_ */
/* /*
* This radix tree implementation is tailored to the singular purpose of * This radix tree implementation is tailored to the singular purpose of
* tracking which chunks are currently owned by jemalloc. This functionality * associating metadata with chunks that are currently owned by jemalloc.
* is mandatory for OS X, where jemalloc must be able to respond to object
* ownership queries.
* *
******************************************************************************* *******************************************************************************
*/ */
#ifdef JEMALLOC_H_TYPES #ifdef JEMALLOC_H_TYPES
typedef struct rtree_node_elm_s rtree_node_elm_t;
typedef struct rtree_level_s rtree_level_t;
typedef struct rtree_s rtree_t; typedef struct rtree_s rtree_t;
/* /*
* Size of each radix tree node (must be a power of 2). This impacts tree * RTREE_BITS_PER_LEVEL must be a power of two that is no larger than the
* depth. * machine address width.
*/ */
#if (LG_SIZEOF_PTR == 2) #define LG_RTREE_BITS_PER_LEVEL 4
# define RTREE_NODESIZE (1U << 14) #define RTREE_BITS_PER_LEVEL (1U << LG_RTREE_BITS_PER_LEVEL)
#else /* Maximum rtree height. */
# define RTREE_NODESIZE CACHELINE #define RTREE_HEIGHT_MAX \
#endif ((1U << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL)
/* Used for two-stage lock-free node initialization. */
#define RTREE_NODE_INITIALIZING ((rtree_node_elm_t *)0x1)
/*
* The node allocation callback function's argument is the number of contiguous
* rtree_node_elm_t structures to allocate, and the resulting memory must be
* zeroed.
*/
typedef rtree_node_elm_t *(rtree_node_alloc_t)(size_t);
typedef void (rtree_node_dalloc_t)(rtree_node_elm_t *);
#endif /* JEMALLOC_H_TYPES */ #endif /* JEMALLOC_H_TYPES */
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS #ifdef JEMALLOC_H_STRUCTS
struct rtree_node_elm_s {
union {
void *pun;
rtree_node_elm_t *child;
extent_node_t *val;
};
};
struct rtree_level_s {
/*
* A non-NULL subtree points to a subtree rooted along the hypothetical
* path to the leaf node corresponding to key 0. Depending on what keys
* have been used to store to the tree, an arbitrary combination of
* subtree pointers may remain NULL.
*
* Suppose keys comprise 48 bits, and LG_RTREE_BITS_PER_LEVEL is 4.
* This results in a 3-level tree, and the leftmost leaf can be directly
* accessed via subtrees[2], the subtree prefixed by 0x0000 (excluding
* 0x00000000) can be accessed via subtrees[1], and the remainder of the
* tree can be accessed via subtrees[0].
*
* levels[0] : [<unused> | 0x0001******** | 0x0002******** | ...]
*
* levels[1] : [<unused> | 0x00000001**** | 0x00000002**** | ... ]
*
* levels[2] : [val(0x000000000000) | val(0x000000000001) | ...]
*
* This has practical implications on x64, which currently uses only the
* lower 47 bits of virtual address space in userland, thus leaving
* subtrees[0] unused and avoiding a level of tree traversal.
*/
union {
void *subtree_pun;
rtree_node_elm_t *subtree;
};
/* Number of key bits distinguished by this level. */
unsigned bits;
/*
* Cumulative number of key bits distinguished by traversing to
* corresponding tree level.
*/
unsigned cumbits;
};
struct rtree_s { struct rtree_s {
malloc_mutex_t mutex; rtree_node_alloc_t *alloc;
void **root; rtree_node_dalloc_t *dalloc;
unsigned height; unsigned height;
unsigned level2bits[1]; /* Dynamically sized. */ /*
* Precomputed table used to convert from the number of leading 0 key
* bits to which subtree level to start at.
*/
unsigned start_level[RTREE_HEIGHT_MAX];
rtree_level_t levels[RTREE_HEIGHT_MAX];
}; };
#endif /* JEMALLOC_H_STRUCTS */ #endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS #ifdef JEMALLOC_H_EXTERNS
rtree_t *rtree_new(unsigned bits); bool rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc,
void rtree_prefork(rtree_t *rtree); rtree_node_dalloc_t *dalloc);
void rtree_postfork_parent(rtree_t *rtree); void rtree_delete(rtree_t *rtree);
void rtree_postfork_child(rtree_t *rtree); rtree_node_elm_t *rtree_subtree_read_hard(rtree_t *rtree,
unsigned level);
rtree_node_elm_t *rtree_child_read_hard(rtree_t *rtree,
rtree_node_elm_t *elm, unsigned level);
#endif /* JEMALLOC_H_EXTERNS */ #endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_INLINES #ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE #ifndef JEMALLOC_ENABLE_INLINE
#ifndef JEMALLOC_DEBUG unsigned rtree_start_level(rtree_t *rtree, uintptr_t key);
void *rtree_get_locked(rtree_t *rtree, uintptr_t key); uintptr_t rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level);
#endif
void *rtree_get(rtree_t *rtree, uintptr_t key); bool rtree_node_valid(rtree_node_elm_t *node);
bool rtree_set(rtree_t *rtree, uintptr_t key, void *val); rtree_node_elm_t *rtree_child_tryread(rtree_node_elm_t *elm,
bool dependent);
rtree_node_elm_t *rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm,
unsigned level, bool dependent);
extent_node_t *rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm,
bool dependent);
void rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm,
const extent_node_t *val);
rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level,
bool dependent);
rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level,
bool dependent);
extent_node_t *rtree_get(rtree_t *rtree, uintptr_t key, bool dependent);
bool rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val);
#endif #endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_)) #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
#define RTREE_GET_GENERATE(f) \ JEMALLOC_ALWAYS_INLINE unsigned
/* The least significant bits of the key are ignored. */ \ rtree_start_level(rtree_t *rtree, uintptr_t key)
JEMALLOC_INLINE void * \ {
f(rtree_t *rtree, uintptr_t key) \ unsigned start_level;
{ \
void *ret; \ if (unlikely(key == 0))
uintptr_t subkey; \ return (rtree->height - 1);
unsigned i, lshift, height, bits; \
void **node, **child; \ start_level = rtree->start_level[lg_floor(key) >>
\ LG_RTREE_BITS_PER_LEVEL];
RTREE_LOCK(&rtree->mutex); \ assert(start_level < rtree->height);
for (i = lshift = 0, height = rtree->height, node = rtree->root;\ return (start_level);
i < height - 1; \
i++, lshift += bits, node = child) { \
bits = rtree->level2bits[i]; \
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR + \
3)) - bits); \
child = (void**)node[subkey]; \
if (child == NULL) { \
RTREE_UNLOCK(&rtree->mutex); \
return (NULL); \
} \
} \
\
/* \
* node is a leaf, so it contains values rather than node \
* pointers. \
*/ \
bits = rtree->level2bits[i]; \
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - \
bits); \
ret = node[subkey]; \
RTREE_UNLOCK(&rtree->mutex); \
\
RTREE_GET_VALIDATE \
return (ret); \
} }
#ifdef JEMALLOC_DEBUG JEMALLOC_ALWAYS_INLINE uintptr_t
# define RTREE_LOCK(l) malloc_mutex_lock(l) rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level)
# define RTREE_UNLOCK(l) malloc_mutex_unlock(l) {
# define RTREE_GET_VALIDATE
RTREE_GET_GENERATE(rtree_get_locked)
# undef RTREE_LOCK
# undef RTREE_UNLOCK
# undef RTREE_GET_VALIDATE
#endif
#define RTREE_LOCK(l) return ((key >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -
#define RTREE_UNLOCK(l) rtree->levels[level].cumbits)) & ((ZU(1) <<
#ifdef JEMALLOC_DEBUG rtree->levels[level].bits) - 1));
}
JEMALLOC_ALWAYS_INLINE bool
rtree_node_valid(rtree_node_elm_t *node)
{
return ((uintptr_t)node > (uintptr_t)RTREE_NODE_INITIALIZING);
}
JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
rtree_child_tryread(rtree_node_elm_t *elm, bool dependent)
{
rtree_node_elm_t *child;
/* Double-checked read (first read may be stale. */
child = elm->child;
if (!dependent && !rtree_node_valid(child))
child = atomic_read_p(&elm->pun);
assert(!dependent || child != NULL);
return (child);
}
JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level,
bool dependent)
{
rtree_node_elm_t *child;
child = rtree_child_tryread(elm, dependent);
if (!dependent && unlikely(!rtree_node_valid(child)))
child = rtree_child_read_hard(rtree, elm, level);
assert(!dependent || child != NULL);
return (child);
}
JEMALLOC_ALWAYS_INLINE extent_node_t *
rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, bool dependent)
{
if (dependent) {
/* /*
* Suppose that it were possible for a jemalloc-allocated chunk to be * Reading a val on behalf of a pointer to a valid allocation is
* munmap()ped, followed by a different allocator in another thread re-using * guaranteed to be a clean read even without synchronization,
* overlapping virtual memory, all without invalidating the cached rtree * because the rtree update became visible in memory before the
* value. The result would be a false positive (the rtree would claim that * pointer came into existence.
* jemalloc owns memory that it had actually discarded). This scenario
* seems impossible, but the following assertion is a prudent sanity check.
*/ */
# define RTREE_GET_VALIDATE \ return (elm->val);
assert(rtree_get_locked(rtree, key) == ret); } else {
#else /*
# define RTREE_GET_VALIDATE * An arbitrary read, e.g. on behalf of ivsalloc(), may not be
* dependent on a previous rtree write, which means a stale read
* could result if synchronization were omitted here.
*/
return (atomic_read_p(&elm->pun));
}
}
JEMALLOC_INLINE void
rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_node_t *val)
{
atomic_write_p(&elm->pun, val);
}
JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
rtree_subtree_tryread(rtree_t *rtree, unsigned level, bool dependent)
{
rtree_node_elm_t *subtree;
/* Double-checked read (first read may be stale. */
subtree = rtree->levels[level].subtree;
if (!dependent && unlikely(!rtree_node_valid(subtree)))
subtree = atomic_read_p(&rtree->levels[level].subtree_pun);
assert(!dependent || subtree != NULL);
return (subtree);
}
JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
rtree_subtree_read(rtree_t *rtree, unsigned level, bool dependent)
{
rtree_node_elm_t *subtree;
subtree = rtree_subtree_tryread(rtree, level, dependent);
if (!dependent && unlikely(!rtree_node_valid(subtree)))
subtree = rtree_subtree_read_hard(rtree, level);
assert(!dependent || subtree != NULL);
return (subtree);
}
JEMALLOC_ALWAYS_INLINE extent_node_t *
rtree_get(rtree_t *rtree, uintptr_t key, bool dependent)
{
uintptr_t subkey;
unsigned start_level;
rtree_node_elm_t *node;
start_level = rtree_start_level(rtree, key);
node = rtree_subtree_tryread(rtree, start_level, dependent);
#define RTREE_GET_BIAS (RTREE_HEIGHT_MAX - rtree->height)
switch (start_level + RTREE_GET_BIAS) {
#define RTREE_GET_SUBTREE(level) \
case level: \
assert(level < (RTREE_HEIGHT_MAX-1)); \
if (!dependent && unlikely(!rtree_node_valid(node))) \
return (NULL); \
subkey = rtree_subkey(rtree, key, level - \
RTREE_GET_BIAS); \
node = rtree_child_tryread(&node[subkey], dependent); \
/* Fall through. */
#define RTREE_GET_LEAF(level) \
case level: \
assert(level == (RTREE_HEIGHT_MAX-1)); \
if (!dependent && unlikely(!rtree_node_valid(node))) \
return (NULL); \
subkey = rtree_subkey(rtree, key, level - \
RTREE_GET_BIAS); \
/* \
* node is a leaf, so it contains values rather than \
* child pointers. \
*/ \
return (rtree_val_read(rtree, &node[subkey], \
dependent));
#if RTREE_HEIGHT_MAX > 1
RTREE_GET_SUBTREE(0)
#endif
#if RTREE_HEIGHT_MAX > 2
RTREE_GET_SUBTREE(1)
#endif
#if RTREE_HEIGHT_MAX > 3
RTREE_GET_SUBTREE(2)
#endif
#if RTREE_HEIGHT_MAX > 4
RTREE_GET_SUBTREE(3)
#endif #endif
RTREE_GET_GENERATE(rtree_get) #if RTREE_HEIGHT_MAX > 5
#undef RTREE_LOCK RTREE_GET_SUBTREE(4)
#undef RTREE_UNLOCK #endif
#undef RTREE_GET_VALIDATE #if RTREE_HEIGHT_MAX > 6
RTREE_GET_SUBTREE(5)
#endif
#if RTREE_HEIGHT_MAX > 7
RTREE_GET_SUBTREE(6)
#endif
#if RTREE_HEIGHT_MAX > 8
RTREE_GET_SUBTREE(7)
#endif
#if RTREE_HEIGHT_MAX > 9
RTREE_GET_SUBTREE(8)
#endif
#if RTREE_HEIGHT_MAX > 10
RTREE_GET_SUBTREE(9)
#endif
#if RTREE_HEIGHT_MAX > 11
RTREE_GET_SUBTREE(10)
#endif
#if RTREE_HEIGHT_MAX > 12
RTREE_GET_SUBTREE(11)
#endif
#if RTREE_HEIGHT_MAX > 13
RTREE_GET_SUBTREE(12)
#endif
#if RTREE_HEIGHT_MAX > 14
RTREE_GET_SUBTREE(13)
#endif
#if RTREE_HEIGHT_MAX > 15
RTREE_GET_SUBTREE(14)
#endif
#if RTREE_HEIGHT_MAX > 16
# error Unsupported RTREE_HEIGHT_MAX
#endif
RTREE_GET_LEAF(RTREE_HEIGHT_MAX-1)
#undef RTREE_GET_SUBTREE
#undef RTREE_GET_LEAF
default: not_reached();
}
#undef RTREE_GET_BIAS
not_reached();
}
JEMALLOC_INLINE bool JEMALLOC_INLINE bool
rtree_set(rtree_t *rtree, uintptr_t key, void *val) rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val)
{ {
uintptr_t subkey; uintptr_t subkey;
unsigned i, lshift, height, bits; unsigned i, start_level;
void **node, **child; rtree_node_elm_t *node, *child;
malloc_mutex_lock(&rtree->mutex);
for (i = lshift = 0, height = rtree->height, node = rtree->root;
i < height - 1;
i++, lshift += bits, node = child) {
bits = rtree->level2bits[i];
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -
bits);
child = (void**)node[subkey];
if (child == NULL) {
child = (void**)base_alloc(sizeof(void *) <<
rtree->level2bits[i+1]);
if (child == NULL) {
malloc_mutex_unlock(&rtree->mutex);
return (true);
}
memset(child, 0, sizeof(void *) <<
rtree->level2bits[i+1]);
node[subkey] = child;
}
}
/* node is a leaf, so it contains values rather than node pointers. */ start_level = rtree_start_level(rtree, key);
bits = rtree->level2bits[i];
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - bits);
node[subkey] = val;
malloc_mutex_unlock(&rtree->mutex);
node = rtree_subtree_read(rtree, start_level, false);
if (node == NULL)
return (true);
for (i = start_level; /**/; i++, node = child) {
subkey = rtree_subkey(rtree, key, i);
if (i == rtree->height - 1) {
/*
* node is a leaf, so it contains values rather than
* child pointers.
*/
rtree_val_write(rtree, &node[subkey], val);
return (false); return (false);
}
assert(i + 1 < rtree->height);
child = rtree_child_read(rtree, &node[subkey], i, false);
if (child == NULL)
return (true);
}
not_reached();
} }
#endif #endif
......
#!/bin/sh #!/bin/sh
#
# Usage: size_classes.sh <lg_qarr> <lg_tmin> <lg_parr> <lg_g>
# The following limits are chosen such that they cover all supported platforms. # The following limits are chosen such that they cover all supported platforms.
# Range of quanta. # Pointer sizes.
lg_qmin=3 lg_zarr="2 3"
lg_qmax=4
# Quanta.
lg_qarr=$1
# The range of tiny size classes is [2^lg_tmin..2^(lg_q-1)]. # The range of tiny size classes is [2^lg_tmin..2^(lg_q-1)].
lg_tmin=3 lg_tmin=$2
# Maximum lookup size.
lg_kmax=12
# Page sizes.
lg_parr=`echo $3 | tr ',' ' '`
# Range of page sizes. # Size class group size (number of size classes for each size doubling).
lg_pmin=12 lg_g=$4
lg_pmax=16
pow2() { pow2() {
e=$1 e=$1
...@@ -22,68 +31,256 @@ pow2() { ...@@ -22,68 +31,256 @@ pow2() {
done done
} }
cat <<EOF lg() {
/* This file was automatically generated by size_classes.sh. */ x=$1
/******************************************************************************/ lg_result=0
#ifdef JEMALLOC_H_TYPES while [ ${x} -gt 1 ] ; do
lg_result=$((${lg_result} + 1))
x=$((${x} / 2))
done
}
EOF size_class() {
index=$1
lg_grp=$2
lg_delta=$3
ndelta=$4
lg_p=$5
lg_kmax=$6
lg_q=${lg_qmin} if [ ${lg_delta} -ge ${lg_p} ] ; then
while [ ${lg_q} -le ${lg_qmax} ] ; do psz="yes"
lg_t=${lg_tmin} else
while [ ${lg_t} -le ${lg_q} ] ; do
lg_p=${lg_pmin}
while [ ${lg_p} -le ${lg_pmax} ] ; do
echo "#if (LG_TINY_MIN == ${lg_t} && LG_QUANTUM == ${lg_q} && LG_PAGE == ${lg_p})"
echo "#define SIZE_CLASSES_DEFINED"
pow2 ${lg_q}; q=${pow2_result}
pow2 ${lg_t}; t=${pow2_result}
pow2 ${lg_p}; p=${pow2_result} pow2 ${lg_p}; p=${pow2_result}
bin=0 pow2 ${lg_grp}; grp=${pow2_result}
psz=0 pow2 ${lg_delta}; delta=${pow2_result}
sz=${t} sz=$((${grp} + ${delta} * ${ndelta}))
delta=$((${sz} - ${psz})) npgs=$((${sz} / ${p}))
echo "/* SIZE_CLASS(bin, delta, sz) */" if [ ${sz} -eq $((${npgs} * ${p})) ] ; then
psz="yes"
else
psz="no"
fi
fi
lg ${ndelta}; lg_ndelta=${lg_result}; pow2 ${lg_ndelta}
if [ ${pow2_result} -lt ${ndelta} ] ; then
rem="yes"
else
rem="no"
fi
lg_size=${lg_grp}
if [ $((${lg_delta} + ${lg_ndelta})) -eq ${lg_grp} ] ; then
lg_size=$((${lg_grp} + 1))
else
lg_size=${lg_grp}
rem="yes"
fi
if [ ${lg_size} -lt $((${lg_p} + ${lg_g})) ] ; then
bin="yes"
else
bin="no"
fi
if [ ${lg_size} -lt ${lg_kmax} \
-o ${lg_size} -eq ${lg_kmax} -a ${rem} = "no" ] ; then
lg_delta_lookup=${lg_delta}
else
lg_delta_lookup="no"
fi
printf ' SC(%3d, %6d, %8d, %6d, %3s, %3s, %2s) \\\n' ${index} ${lg_grp} ${lg_delta} ${ndelta} ${psz} ${bin} ${lg_delta_lookup}
# Defined upon return:
# - psz ("yes" or "no")
# - bin ("yes" or "no")
# - lg_delta_lookup (${lg_delta} or "no")
}
sep_line() {
echo " \\"
}
size_classes() {
lg_z=$1
lg_q=$2
lg_t=$3
lg_p=$4
lg_g=$5
pow2 $((${lg_z} + 3)); ptr_bits=${pow2_result}
pow2 ${lg_g}; g=${pow2_result}
echo "#define SIZE_CLASSES \\" echo "#define SIZE_CLASSES \\"
echo " /* index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup */ \\"
ntbins=0
nlbins=0
lg_tiny_maxclass='"NA"'
nbins=0
npsizes=0
# Tiny size classes. # Tiny size classes.
while [ ${sz} -lt ${q} ] ; do ndelta=0
echo " SIZE_CLASS(${bin}, ${delta}, ${sz}) \\" index=0
bin=$((${bin} + 1)) lg_grp=${lg_t}
psz=${sz} lg_delta=${lg_grp}
sz=$((${sz} + ${sz})) while [ ${lg_grp} -lt ${lg_q} ] ; do
delta=$((${sz} - ${psz})) size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax}
if [ ${lg_delta_lookup} != "no" ] ; then
nlbins=$((${index} + 1))
fi
if [ ${psz} = "yes" ] ; then
npsizes=$((${npsizes} + 1))
fi
if [ ${bin} != "no" ] ; then
nbins=$((${index} + 1))
fi
ntbins=$((${ntbins} + 1))
lg_tiny_maxclass=${lg_grp} # Final written value is correct.
index=$((${index} + 1))
lg_delta=${lg_grp}
lg_grp=$((${lg_grp} + 1))
done
# First non-tiny group.
if [ ${ntbins} -gt 0 ] ; then
sep_line
# The first size class has an unusual encoding, because the size has to be
# split between grp and delta*ndelta.
lg_grp=$((${lg_grp} - 1))
ndelta=1
size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax}
index=$((${index} + 1))
lg_grp=$((${lg_grp} + 1))
lg_delta=$((${lg_delta} + 1))
if [ ${psz} = "yes" ] ; then
npsizes=$((${npsizes} + 1))
fi
fi
while [ ${ndelta} -lt ${g} ] ; do
size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax}
index=$((${index} + 1))
ndelta=$((${ndelta} + 1))
if [ ${psz} = "yes" ] ; then
npsizes=$((${npsizes} + 1))
fi
done done
# Quantum-multiple size classes. For each doubling of sz, as many as 4
# size classes exist. Their spacing is the greater of: # All remaining groups.
# - q lg_grp=$((${lg_grp} + ${lg_g}))
# - sz/4, where sz is a power of 2 while [ ${lg_grp} -lt $((${ptr_bits} - 1)) ] ; do
while [ ${sz} -lt ${p} ] ; do sep_line
if [ ${sz} -ge $((${q} * 4)) ] ; then ndelta=1
i=$((${sz} / 4)) if [ ${lg_grp} -eq $((${ptr_bits} - 2)) ] ; then
ndelta_limit=$((${g} - 1))
else
ndelta_limit=${g}
fi
while [ ${ndelta} -le ${ndelta_limit} ] ; do
size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax}
if [ ${lg_delta_lookup} != "no" ] ; then
nlbins=$((${index} + 1))
# Final written value is correct:
lookup_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))"
fi
if [ ${psz} = "yes" ] ; then
npsizes=$((${npsizes} + 1))
fi
if [ ${bin} != "no" ] ; then
nbins=$((${index} + 1))
# Final written value is correct:
small_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))"
if [ ${lg_g} -gt 0 ] ; then
lg_large_minclass=$((${lg_grp} + 1))
else else
i=${q} lg_large_minclass=$((${lg_grp} + 2))
fi fi
next_2pow=$((${sz} * 2)) fi
while [ ${sz} -lt $next_2pow ] ; do # Final written value is correct:
echo " SIZE_CLASS(${bin}, ${delta}, ${sz}) \\" huge_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))"
bin=$((${bin} + 1)) index=$((${index} + 1))
psz=${sz} ndelta=$((${ndelta} + 1))
sz=$((${sz} + ${i}))
delta=$((${sz} - ${psz}))
done done
lg_grp=$((${lg_grp} + 1))
lg_delta=$((${lg_delta} + 1))
done done
echo echo
echo "#define NBINS ${bin}" nsizes=${index}
echo "#define SMALL_MAXCLASS ${psz}"
# Defined upon completion:
# - ntbins
# - nlbins
# - nbins
# - nsizes
# - npsizes
# - lg_tiny_maxclass
# - lookup_maxclass
# - small_maxclass
# - lg_large_minclass
# - huge_maxclass
}
cat <<EOF
/* This file was automatically generated by size_classes.sh. */
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/*
* This header requires LG_SIZEOF_PTR, LG_TINY_MIN, LG_QUANTUM, and LG_PAGE to
* be defined prior to inclusion, and it in turn defines:
*
* LG_SIZE_CLASS_GROUP: Lg of size class count for each size doubling.
* SIZE_CLASSES: Complete table of SC(index, lg_grp, lg_delta, ndelta, psz,
* bin, lg_delta_lookup) tuples.
* index: Size class index.
* lg_grp: Lg group base size (no deltas added).
* lg_delta: Lg delta to previous size class.
* ndelta: Delta multiplier. size == 1<<lg_grp + ndelta<<lg_delta
* psz: 'yes' if a multiple of the page size, 'no' otherwise.
* bin: 'yes' if a small bin size class, 'no' otherwise.
* lg_delta_lookup: Same as lg_delta if a lookup table size class, 'no'
* otherwise.
* NTBINS: Number of tiny bins.
* NLBINS: Number of bins supported by the lookup table.
* NBINS: Number of small size class bins.
* NSIZES: Number of size classes.
* NPSIZES: Number of size classes that are a multiple of (1U << LG_PAGE).
* LG_TINY_MAXCLASS: Lg of maximum tiny size class.
* LOOKUP_MAXCLASS: Maximum size class included in lookup table.
* SMALL_MAXCLASS: Maximum small size class.
* LG_LARGE_MINCLASS: Lg of minimum large size class.
* HUGE_MAXCLASS: Maximum (huge) size class.
*/
#define LG_SIZE_CLASS_GROUP ${lg_g}
EOF
for lg_z in ${lg_zarr} ; do
for lg_q in ${lg_qarr} ; do
lg_t=${lg_tmin}
while [ ${lg_t} -le ${lg_q} ] ; do
# Iterate through page sizes and compute how many bins there are.
for lg_p in ${lg_parr} ; do
echo "#if (LG_SIZEOF_PTR == ${lg_z} && LG_TINY_MIN == ${lg_t} && LG_QUANTUM == ${lg_q} && LG_PAGE == ${lg_p})"
size_classes ${lg_z} ${lg_q} ${lg_t} ${lg_p} ${lg_g}
echo "#define SIZE_CLASSES_DEFINED"
echo "#define NTBINS ${ntbins}"
echo "#define NLBINS ${nlbins}"
echo "#define NBINS ${nbins}"
echo "#define NSIZES ${nsizes}"
echo "#define NPSIZES ${npsizes}"
echo "#define LG_TINY_MAXCLASS ${lg_tiny_maxclass}"
echo "#define LOOKUP_MAXCLASS ${lookup_maxclass}"
echo "#define SMALL_MAXCLASS ${small_maxclass}"
echo "#define LG_LARGE_MINCLASS ${lg_large_minclass}"
echo "#define HUGE_MAXCLASS ${huge_maxclass}"
echo "#endif" echo "#endif"
echo echo
lg_p=$((${lg_p} + 1))
done done
lg_t=$((${lg_t} + 1)) lg_t=$((${lg_t} + 1))
done done
lg_q=$((${lg_q} + 1)) done
done done
cat <<EOF cat <<EOF
...@@ -92,11 +289,10 @@ cat <<EOF ...@@ -92,11 +289,10 @@ cat <<EOF
#endif #endif
#undef SIZE_CLASSES_DEFINED #undef SIZE_CLASSES_DEFINED
/* /*
* The small_size2bin lookup table uses uint8_t to encode each bin index, so we * The size2index_tab lookup table uses uint8_t to encode each bin index, so we
* cannot support more than 256 small size classes. Further constrain NBINS to * cannot support more than 256 small size classes. Further constrain NBINS to
* 255 to support prof_promote, since all small size classes, plus a "not * 255 since all small size classes, plus a "not small" size class must be
* small" size class must be stored in 8 bits of arena_chunk_map_t's bits * stored in 8 bits of arena_chunk_map_bits_t's bits field.
* field.
*/ */
#if (NBINS > 255) #if (NBINS > 255)
# error "Too many small size classes" # error "Too many small size classes"
......
/*
* This file was generated by the following command:
* sh smoothstep.sh smoother 200 24 3 15
*/
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/*
* This header defines a precomputed table based on the smoothstep family of
* sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0
* to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so
* that floating point math can be avoided.
*
* 3 2
* smoothstep(x) = -2x + 3x
*
* 5 4 3
* smootherstep(x) = 6x - 15x + 10x
*
* 7 6 5 4
* smootheststep(x) = -20x + 70x - 84x + 35x
*/
#define SMOOTHSTEP_VARIANT "smoother"
#define SMOOTHSTEP_NSTEPS 200
#define SMOOTHSTEP_BFP 24
#define SMOOTHSTEP \
/* STEP(step, h, x, y) */ \
STEP( 1, UINT64_C(0x0000000000000014), 0.005, 0.000001240643750) \
STEP( 2, UINT64_C(0x00000000000000a5), 0.010, 0.000009850600000) \
STEP( 3, UINT64_C(0x0000000000000229), 0.015, 0.000032995181250) \
STEP( 4, UINT64_C(0x0000000000000516), 0.020, 0.000077619200000) \
STEP( 5, UINT64_C(0x00000000000009dc), 0.025, 0.000150449218750) \
STEP( 6, UINT64_C(0x00000000000010e8), 0.030, 0.000257995800000) \
STEP( 7, UINT64_C(0x0000000000001aa4), 0.035, 0.000406555756250) \
STEP( 8, UINT64_C(0x0000000000002777), 0.040, 0.000602214400000) \
STEP( 9, UINT64_C(0x00000000000037c2), 0.045, 0.000850847793750) \
STEP( 10, UINT64_C(0x0000000000004be6), 0.050, 0.001158125000000) \
STEP( 11, UINT64_C(0x000000000000643c), 0.055, 0.001529510331250) \
STEP( 12, UINT64_C(0x000000000000811f), 0.060, 0.001970265600000) \
STEP( 13, UINT64_C(0x000000000000a2e2), 0.065, 0.002485452368750) \
STEP( 14, UINT64_C(0x000000000000c9d8), 0.070, 0.003079934200000) \
STEP( 15, UINT64_C(0x000000000000f64f), 0.075, 0.003758378906250) \
STEP( 16, UINT64_C(0x0000000000012891), 0.080, 0.004525260800000) \
STEP( 17, UINT64_C(0x00000000000160e7), 0.085, 0.005384862943750) \
STEP( 18, UINT64_C(0x0000000000019f95), 0.090, 0.006341279400000) \
STEP( 19, UINT64_C(0x000000000001e4dc), 0.095, 0.007398417481250) \
STEP( 20, UINT64_C(0x00000000000230fc), 0.100, 0.008560000000000) \
STEP( 21, UINT64_C(0x0000000000028430), 0.105, 0.009829567518750) \
STEP( 22, UINT64_C(0x000000000002deb0), 0.110, 0.011210480600000) \
STEP( 23, UINT64_C(0x00000000000340b1), 0.115, 0.012705922056250) \
STEP( 24, UINT64_C(0x000000000003aa67), 0.120, 0.014318899200000) \
STEP( 25, UINT64_C(0x0000000000041c00), 0.125, 0.016052246093750) \
STEP( 26, UINT64_C(0x00000000000495a8), 0.130, 0.017908625800000) \
STEP( 27, UINT64_C(0x000000000005178b), 0.135, 0.019890532631250) \
STEP( 28, UINT64_C(0x000000000005a1cf), 0.140, 0.022000294400000) \
STEP( 29, UINT64_C(0x0000000000063498), 0.145, 0.024240074668750) \
STEP( 30, UINT64_C(0x000000000006d009), 0.150, 0.026611875000000) \
STEP( 31, UINT64_C(0x000000000007743f), 0.155, 0.029117537206250) \
STEP( 32, UINT64_C(0x0000000000082157), 0.160, 0.031758745600000) \
STEP( 33, UINT64_C(0x000000000008d76b), 0.165, 0.034537029243750) \
STEP( 34, UINT64_C(0x0000000000099691), 0.170, 0.037453764200000) \
STEP( 35, UINT64_C(0x00000000000a5edf), 0.175, 0.040510175781250) \
STEP( 36, UINT64_C(0x00000000000b3067), 0.180, 0.043707340800000) \
STEP( 37, UINT64_C(0x00000000000c0b38), 0.185, 0.047046189818750) \
STEP( 38, UINT64_C(0x00000000000cef5e), 0.190, 0.050527509400000) \
STEP( 39, UINT64_C(0x00000000000ddce6), 0.195, 0.054151944356250) \
STEP( 40, UINT64_C(0x00000000000ed3d8), 0.200, 0.057920000000000) \
STEP( 41, UINT64_C(0x00000000000fd439), 0.205, 0.061832044393750) \
STEP( 42, UINT64_C(0x000000000010de0e), 0.210, 0.065888310600000) \
STEP( 43, UINT64_C(0x000000000011f158), 0.215, 0.070088898931250) \
STEP( 44, UINT64_C(0x0000000000130e17), 0.220, 0.074433779200000) \
STEP( 45, UINT64_C(0x0000000000143448), 0.225, 0.078922792968750) \
STEP( 46, UINT64_C(0x00000000001563e7), 0.230, 0.083555655800000) \
STEP( 47, UINT64_C(0x0000000000169cec), 0.235, 0.088331959506250) \
STEP( 48, UINT64_C(0x000000000017df4f), 0.240, 0.093251174400000) \
STEP( 49, UINT64_C(0x0000000000192b04), 0.245, 0.098312651543750) \
STEP( 50, UINT64_C(0x00000000001a8000), 0.250, 0.103515625000000) \
STEP( 51, UINT64_C(0x00000000001bde32), 0.255, 0.108859214081250) \
STEP( 52, UINT64_C(0x00000000001d458b), 0.260, 0.114342425600000) \
STEP( 53, UINT64_C(0x00000000001eb5f8), 0.265, 0.119964156118750) \
STEP( 54, UINT64_C(0x0000000000202f65), 0.270, 0.125723194200000) \
STEP( 55, UINT64_C(0x000000000021b1bb), 0.275, 0.131618222656250) \
STEP( 56, UINT64_C(0x0000000000233ce3), 0.280, 0.137647820800000) \
STEP( 57, UINT64_C(0x000000000024d0c3), 0.285, 0.143810466693750) \
STEP( 58, UINT64_C(0x0000000000266d40), 0.290, 0.150104539400000) \
STEP( 59, UINT64_C(0x000000000028123d), 0.295, 0.156528321231250) \
STEP( 60, UINT64_C(0x000000000029bf9c), 0.300, 0.163080000000000) \
STEP( 61, UINT64_C(0x00000000002b753d), 0.305, 0.169757671268750) \
STEP( 62, UINT64_C(0x00000000002d32fe), 0.310, 0.176559340600000) \
STEP( 63, UINT64_C(0x00000000002ef8bc), 0.315, 0.183482925806250) \
STEP( 64, UINT64_C(0x000000000030c654), 0.320, 0.190526259200000) \
STEP( 65, UINT64_C(0x0000000000329b9f), 0.325, 0.197687089843750) \
STEP( 66, UINT64_C(0x0000000000347875), 0.330, 0.204963085800000) \
STEP( 67, UINT64_C(0x0000000000365cb0), 0.335, 0.212351836381250) \
STEP( 68, UINT64_C(0x0000000000384825), 0.340, 0.219850854400000) \
STEP( 69, UINT64_C(0x00000000003a3aa8), 0.345, 0.227457578418750) \
STEP( 70, UINT64_C(0x00000000003c340f), 0.350, 0.235169375000000) \
STEP( 71, UINT64_C(0x00000000003e342b), 0.355, 0.242983540956250) \
STEP( 72, UINT64_C(0x0000000000403ace), 0.360, 0.250897305600000) \
STEP( 73, UINT64_C(0x00000000004247c8), 0.365, 0.258907832993750) \
STEP( 74, UINT64_C(0x0000000000445ae9), 0.370, 0.267012224200000) \
STEP( 75, UINT64_C(0x0000000000467400), 0.375, 0.275207519531250) \
STEP( 76, UINT64_C(0x00000000004892d8), 0.380, 0.283490700800000) \
STEP( 77, UINT64_C(0x00000000004ab740), 0.385, 0.291858693568750) \
STEP( 78, UINT64_C(0x00000000004ce102), 0.390, 0.300308369400000) \
STEP( 79, UINT64_C(0x00000000004f0fe9), 0.395, 0.308836548106250) \
STEP( 80, UINT64_C(0x00000000005143bf), 0.400, 0.317440000000000) \
STEP( 81, UINT64_C(0x0000000000537c4d), 0.405, 0.326115448143750) \
STEP( 82, UINT64_C(0x000000000055b95b), 0.410, 0.334859570600000) \
STEP( 83, UINT64_C(0x000000000057fab1), 0.415, 0.343669002681250) \
STEP( 84, UINT64_C(0x00000000005a4015), 0.420, 0.352540339200000) \
STEP( 85, UINT64_C(0x00000000005c894e), 0.425, 0.361470136718750) \
STEP( 86, UINT64_C(0x00000000005ed622), 0.430, 0.370454915800000) \
STEP( 87, UINT64_C(0x0000000000612655), 0.435, 0.379491163256250) \
STEP( 88, UINT64_C(0x00000000006379ac), 0.440, 0.388575334400000) \
STEP( 89, UINT64_C(0x000000000065cfeb), 0.445, 0.397703855293750) \
STEP( 90, UINT64_C(0x00000000006828d6), 0.450, 0.406873125000000) \
STEP( 91, UINT64_C(0x00000000006a842f), 0.455, 0.416079517831250) \
STEP( 92, UINT64_C(0x00000000006ce1bb), 0.460, 0.425319385600000) \
STEP( 93, UINT64_C(0x00000000006f413a), 0.465, 0.434589059868750) \
STEP( 94, UINT64_C(0x000000000071a270), 0.470, 0.443884854200000) \
STEP( 95, UINT64_C(0x000000000074051d), 0.475, 0.453203066406250) \
STEP( 96, UINT64_C(0x0000000000766905), 0.480, 0.462539980800000) \
STEP( 97, UINT64_C(0x000000000078cde7), 0.485, 0.471891870443750) \
STEP( 98, UINT64_C(0x00000000007b3387), 0.490, 0.481254999400000) \
STEP( 99, UINT64_C(0x00000000007d99a4), 0.495, 0.490625624981250) \
STEP( 100, UINT64_C(0x0000000000800000), 0.500, 0.500000000000000) \
STEP( 101, UINT64_C(0x000000000082665b), 0.505, 0.509374375018750) \
STEP( 102, UINT64_C(0x000000000084cc78), 0.510, 0.518745000600000) \
STEP( 103, UINT64_C(0x0000000000873218), 0.515, 0.528108129556250) \
STEP( 104, UINT64_C(0x00000000008996fa), 0.520, 0.537460019200000) \
STEP( 105, UINT64_C(0x00000000008bfae2), 0.525, 0.546796933593750) \
STEP( 106, UINT64_C(0x00000000008e5d8f), 0.530, 0.556115145800000) \
STEP( 107, UINT64_C(0x000000000090bec5), 0.535, 0.565410940131250) \
STEP( 108, UINT64_C(0x0000000000931e44), 0.540, 0.574680614400000) \
STEP( 109, UINT64_C(0x0000000000957bd0), 0.545, 0.583920482168750) \
STEP( 110, UINT64_C(0x000000000097d729), 0.550, 0.593126875000000) \
STEP( 111, UINT64_C(0x00000000009a3014), 0.555, 0.602296144706250) \
STEP( 112, UINT64_C(0x00000000009c8653), 0.560, 0.611424665600000) \
STEP( 113, UINT64_C(0x00000000009ed9aa), 0.565, 0.620508836743750) \
STEP( 114, UINT64_C(0x0000000000a129dd), 0.570, 0.629545084200000) \
STEP( 115, UINT64_C(0x0000000000a376b1), 0.575, 0.638529863281250) \
STEP( 116, UINT64_C(0x0000000000a5bfea), 0.580, 0.647459660800000) \
STEP( 117, UINT64_C(0x0000000000a8054e), 0.585, 0.656330997318750) \
STEP( 118, UINT64_C(0x0000000000aa46a4), 0.590, 0.665140429400000) \
STEP( 119, UINT64_C(0x0000000000ac83b2), 0.595, 0.673884551856250) \
STEP( 120, UINT64_C(0x0000000000aebc40), 0.600, 0.682560000000000) \
STEP( 121, UINT64_C(0x0000000000b0f016), 0.605, 0.691163451893750) \
STEP( 122, UINT64_C(0x0000000000b31efd), 0.610, 0.699691630600000) \
STEP( 123, UINT64_C(0x0000000000b548bf), 0.615, 0.708141306431250) \
STEP( 124, UINT64_C(0x0000000000b76d27), 0.620, 0.716509299200000) \
STEP( 125, UINT64_C(0x0000000000b98c00), 0.625, 0.724792480468750) \
STEP( 126, UINT64_C(0x0000000000bba516), 0.630, 0.732987775800000) \
STEP( 127, UINT64_C(0x0000000000bdb837), 0.635, 0.741092167006250) \
STEP( 128, UINT64_C(0x0000000000bfc531), 0.640, 0.749102694400000) \
STEP( 129, UINT64_C(0x0000000000c1cbd4), 0.645, 0.757016459043750) \
STEP( 130, UINT64_C(0x0000000000c3cbf0), 0.650, 0.764830625000000) \
STEP( 131, UINT64_C(0x0000000000c5c557), 0.655, 0.772542421581250) \
STEP( 132, UINT64_C(0x0000000000c7b7da), 0.660, 0.780149145600000) \
STEP( 133, UINT64_C(0x0000000000c9a34f), 0.665, 0.787648163618750) \
STEP( 134, UINT64_C(0x0000000000cb878a), 0.670, 0.795036914200000) \
STEP( 135, UINT64_C(0x0000000000cd6460), 0.675, 0.802312910156250) \
STEP( 136, UINT64_C(0x0000000000cf39ab), 0.680, 0.809473740800000) \
STEP( 137, UINT64_C(0x0000000000d10743), 0.685, 0.816517074193750) \
STEP( 138, UINT64_C(0x0000000000d2cd01), 0.690, 0.823440659400000) \
STEP( 139, UINT64_C(0x0000000000d48ac2), 0.695, 0.830242328731250) \
STEP( 140, UINT64_C(0x0000000000d64063), 0.700, 0.836920000000000) \
STEP( 141, UINT64_C(0x0000000000d7edc2), 0.705, 0.843471678768750) \
STEP( 142, UINT64_C(0x0000000000d992bf), 0.710, 0.849895460600000) \
STEP( 143, UINT64_C(0x0000000000db2f3c), 0.715, 0.856189533306250) \
STEP( 144, UINT64_C(0x0000000000dcc31c), 0.720, 0.862352179200000) \
STEP( 145, UINT64_C(0x0000000000de4e44), 0.725, 0.868381777343750) \
STEP( 146, UINT64_C(0x0000000000dfd09a), 0.730, 0.874276805800000) \
STEP( 147, UINT64_C(0x0000000000e14a07), 0.735, 0.880035843881250) \
STEP( 148, UINT64_C(0x0000000000e2ba74), 0.740, 0.885657574400000) \
STEP( 149, UINT64_C(0x0000000000e421cd), 0.745, 0.891140785918750) \
STEP( 150, UINT64_C(0x0000000000e58000), 0.750, 0.896484375000000) \
STEP( 151, UINT64_C(0x0000000000e6d4fb), 0.755, 0.901687348456250) \
STEP( 152, UINT64_C(0x0000000000e820b0), 0.760, 0.906748825600000) \
STEP( 153, UINT64_C(0x0000000000e96313), 0.765, 0.911668040493750) \
STEP( 154, UINT64_C(0x0000000000ea9c18), 0.770, 0.916444344200000) \
STEP( 155, UINT64_C(0x0000000000ebcbb7), 0.775, 0.921077207031250) \
STEP( 156, UINT64_C(0x0000000000ecf1e8), 0.780, 0.925566220800000) \
STEP( 157, UINT64_C(0x0000000000ee0ea7), 0.785, 0.929911101068750) \
STEP( 158, UINT64_C(0x0000000000ef21f1), 0.790, 0.934111689400000) \
STEP( 159, UINT64_C(0x0000000000f02bc6), 0.795, 0.938167955606250) \
STEP( 160, UINT64_C(0x0000000000f12c27), 0.800, 0.942080000000000) \
STEP( 161, UINT64_C(0x0000000000f22319), 0.805, 0.945848055643750) \
STEP( 162, UINT64_C(0x0000000000f310a1), 0.810, 0.949472490600000) \
STEP( 163, UINT64_C(0x0000000000f3f4c7), 0.815, 0.952953810181250) \
STEP( 164, UINT64_C(0x0000000000f4cf98), 0.820, 0.956292659200000) \
STEP( 165, UINT64_C(0x0000000000f5a120), 0.825, 0.959489824218750) \
STEP( 166, UINT64_C(0x0000000000f6696e), 0.830, 0.962546235800000) \
STEP( 167, UINT64_C(0x0000000000f72894), 0.835, 0.965462970756250) \
STEP( 168, UINT64_C(0x0000000000f7dea8), 0.840, 0.968241254400000) \
STEP( 169, UINT64_C(0x0000000000f88bc0), 0.845, 0.970882462793750) \
STEP( 170, UINT64_C(0x0000000000f92ff6), 0.850, 0.973388125000000) \
STEP( 171, UINT64_C(0x0000000000f9cb67), 0.855, 0.975759925331250) \
STEP( 172, UINT64_C(0x0000000000fa5e30), 0.860, 0.977999705600000) \
STEP( 173, UINT64_C(0x0000000000fae874), 0.865, 0.980109467368750) \
STEP( 174, UINT64_C(0x0000000000fb6a57), 0.870, 0.982091374200000) \
STEP( 175, UINT64_C(0x0000000000fbe400), 0.875, 0.983947753906250) \
STEP( 176, UINT64_C(0x0000000000fc5598), 0.880, 0.985681100800000) \
STEP( 177, UINT64_C(0x0000000000fcbf4e), 0.885, 0.987294077943750) \
STEP( 178, UINT64_C(0x0000000000fd214f), 0.890, 0.988789519400000) \
STEP( 179, UINT64_C(0x0000000000fd7bcf), 0.895, 0.990170432481250) \
STEP( 180, UINT64_C(0x0000000000fdcf03), 0.900, 0.991440000000000) \
STEP( 181, UINT64_C(0x0000000000fe1b23), 0.905, 0.992601582518750) \
STEP( 182, UINT64_C(0x0000000000fe606a), 0.910, 0.993658720600000) \
STEP( 183, UINT64_C(0x0000000000fe9f18), 0.915, 0.994615137056250) \
STEP( 184, UINT64_C(0x0000000000fed76e), 0.920, 0.995474739200000) \
STEP( 185, UINT64_C(0x0000000000ff09b0), 0.925, 0.996241621093750) \
STEP( 186, UINT64_C(0x0000000000ff3627), 0.930, 0.996920065800000) \
STEP( 187, UINT64_C(0x0000000000ff5d1d), 0.935, 0.997514547631250) \
STEP( 188, UINT64_C(0x0000000000ff7ee0), 0.940, 0.998029734400000) \
STEP( 189, UINT64_C(0x0000000000ff9bc3), 0.945, 0.998470489668750) \
STEP( 190, UINT64_C(0x0000000000ffb419), 0.950, 0.998841875000000) \
STEP( 191, UINT64_C(0x0000000000ffc83d), 0.955, 0.999149152206250) \
STEP( 192, UINT64_C(0x0000000000ffd888), 0.960, 0.999397785600000) \
STEP( 193, UINT64_C(0x0000000000ffe55b), 0.965, 0.999593444243750) \
STEP( 194, UINT64_C(0x0000000000ffef17), 0.970, 0.999742004200000) \
STEP( 195, UINT64_C(0x0000000000fff623), 0.975, 0.999849550781250) \
STEP( 196, UINT64_C(0x0000000000fffae9), 0.980, 0.999922380800000) \
STEP( 197, UINT64_C(0x0000000000fffdd6), 0.985, 0.999967004818750) \
STEP( 198, UINT64_C(0x0000000000ffff5a), 0.990, 0.999990149400000) \
STEP( 199, UINT64_C(0x0000000000ffffeb), 0.995, 0.999998759356250) \
STEP( 200, UINT64_C(0x0000000001000000), 1.000, 1.000000000000000) \
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
#!/bin/sh
#
# Generate a discrete lookup table for a sigmoid function in the smoothstep
# family (https://en.wikipedia.org/wiki/Smoothstep), where the lookup table
# entries correspond to x in [1/nsteps, 2/nsteps, ..., nsteps/nsteps]. Encode
# the entries using a binary fixed point representation.
#
# Usage: smoothstep.sh <variant> <nsteps> <bfp> <xprec> <yprec>
#
# <variant> is in {smooth, smoother, smoothest}.
# <nsteps> must be greater than zero.
# <bfp> must be in [0..62]; reasonable values are roughly [10..30].
# <xprec> is x decimal precision.
# <yprec> is y decimal precision.
#set -x
cmd="sh smoothstep.sh $*"
variant=$1
nsteps=$2
bfp=$3
xprec=$4
yprec=$5
case "${variant}" in
smooth)
;;
smoother)
;;
smoothest)
;;
*)
echo "Unsupported variant"
exit 1
;;
esac
smooth() {
step=$1
y=`echo ${yprec} k ${step} ${nsteps} / sx _2 lx 3 ^ '*' 3 lx 2 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' `
}
smoother() {
step=$1
y=`echo ${yprec} k ${step} ${nsteps} / sx 6 lx 5 ^ '*' _15 lx 4 ^ '*' + 10 lx 3 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' `
}
smoothest() {
step=$1
y=`echo ${yprec} k ${step} ${nsteps} / sx _20 lx 7 ^ '*' 70 lx 6 ^ '*' + _84 lx 5 ^ '*' + 35 lx 4 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' `
}
cat <<EOF
/*
* This file was generated by the following command:
* $cmd
*/
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/*
* This header defines a precomputed table based on the smoothstep family of
* sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0
* to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so
* that floating point math can be avoided.
*
* 3 2
* smoothstep(x) = -2x + 3x
*
* 5 4 3
* smootherstep(x) = 6x - 15x + 10x
*
* 7 6 5 4
* smootheststep(x) = -20x + 70x - 84x + 35x
*/
#define SMOOTHSTEP_VARIANT "${variant}"
#define SMOOTHSTEP_NSTEPS ${nsteps}
#define SMOOTHSTEP_BFP ${bfp}
#define SMOOTHSTEP \\
/* STEP(step, h, x, y) */ \\
EOF
s=1
while [ $s -le $nsteps ] ; do
$variant ${s}
x=`echo ${xprec} k ${s} ${nsteps} / p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
printf ' STEP(%4d, UINT64_C(0x%016x), %s, %s) \\\n' ${s} ${h} ${x} ${y}
s=$((s+1))
done
echo
cat <<EOF
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
EOF
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct spin_s spin_t;
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct spin_s {
unsigned iteration;
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
void spin_init(spin_t *spin);
void spin_adaptive(spin_t *spin);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_SPIN_C_))
JEMALLOC_INLINE void
spin_init(spin_t *spin)
{
spin->iteration = 0;
}
JEMALLOC_INLINE void
spin_adaptive(spin_t *spin)
{
volatile uint64_t i;
for (i = 0; i < (KQU(1) << spin->iteration); i++)
CPU_SPINWAIT;
if (spin->iteration < 63)
spin->iteration++;
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
typedef struct tcache_bin_stats_s tcache_bin_stats_t; typedef struct tcache_bin_stats_s tcache_bin_stats_t;
typedef struct malloc_bin_stats_s malloc_bin_stats_t; typedef struct malloc_bin_stats_s malloc_bin_stats_t;
typedef struct malloc_large_stats_s malloc_large_stats_t; typedef struct malloc_large_stats_s malloc_large_stats_t;
typedef struct malloc_huge_stats_s malloc_huge_stats_t;
typedef struct arena_stats_s arena_stats_t; typedef struct arena_stats_s arena_stats_t;
typedef struct chunk_stats_s chunk_stats_t; typedef struct chunk_stats_s chunk_stats_t;
...@@ -20,12 +21,6 @@ struct tcache_bin_stats_s { ...@@ -20,12 +21,6 @@ struct tcache_bin_stats_s {
}; };
struct malloc_bin_stats_s { struct malloc_bin_stats_s {
/*
* Current number of bytes allocated, including objects currently
* cached by tcache.
*/
size_t allocated;
/* /*
* Total number of allocation/deallocation requests served directly by * Total number of allocation/deallocation requests served directly by
* the bin. Note that tcache may allocate an object, then recycle it * the bin. Note that tcache may allocate an object, then recycle it
...@@ -42,6 +37,12 @@ struct malloc_bin_stats_s { ...@@ -42,6 +37,12 @@ struct malloc_bin_stats_s {
*/ */
uint64_t nrequests; uint64_t nrequests;
/*
* Current number of regions of this size class, including regions
* currently cached by tcache.
*/
size_t curregs;
/* Number of tcache fills from this bin. */ /* Number of tcache fills from this bin. */
uint64_t nfills; uint64_t nfills;
...@@ -78,14 +79,37 @@ struct malloc_large_stats_s { ...@@ -78,14 +79,37 @@ struct malloc_large_stats_s {
*/ */
uint64_t nrequests; uint64_t nrequests;
/* Current number of runs of this size class. */ /*
* Current number of runs of this size class, including runs currently
* cached by tcache.
*/
size_t curruns; size_t curruns;
}; };
struct malloc_huge_stats_s {
/*
* Total number of allocation/deallocation requests served directly by
* the arena.
*/
uint64_t nmalloc;
uint64_t ndalloc;
/* Current number of (multi-)chunk allocations of this size class. */
size_t curhchunks;
};
struct arena_stats_s { struct arena_stats_s {
/* Number of bytes currently mapped. */ /* Number of bytes currently mapped. */
size_t mapped; size_t mapped;
/*
* Number of bytes currently retained as a side effect of munmap() being
* disabled/bypassed. Retained bytes are technically mapped (though
* always decommitted or purged), but they are excluded from the mapped
* statistic (above).
*/
size_t retained;
/* /*
* Total number of purge sweeps, total number of madvise calls made, * Total number of purge sweeps, total number of madvise calls made,
* and total pages purged in order to keep dirty unused memory under * and total pages purged in order to keep dirty unused memory under
...@@ -95,34 +119,28 @@ struct arena_stats_s { ...@@ -95,34 +119,28 @@ struct arena_stats_s {
uint64_t nmadvise; uint64_t nmadvise;
uint64_t purged; uint64_t purged;
/*
* Number of bytes currently mapped purely for metadata purposes, and
* number of bytes currently allocated for internal metadata.
*/
size_t metadata_mapped;
size_t metadata_allocated; /* Protected via atomic_*_z(). */
/* Per-size-category statistics. */ /* Per-size-category statistics. */
size_t allocated_large; size_t allocated_large;
uint64_t nmalloc_large; uint64_t nmalloc_large;
uint64_t ndalloc_large; uint64_t ndalloc_large;
uint64_t nrequests_large; uint64_t nrequests_large;
/* size_t allocated_huge;
* One element for each possible size class, including sizes that uint64_t nmalloc_huge;
* overlap with bin size classes. This is necessary because ipalloc() uint64_t ndalloc_huge;
* sometimes has to use such large objects in order to assure proper
* alignment.
*/
malloc_large_stats_t *lstats;
};
struct chunk_stats_s { /* One element for each large size class. */
/* Number of chunks that were allocated. */ malloc_large_stats_t *lstats;
uint64_t nchunks;
/* High-water mark for number of chunks allocated. */
size_t highchunks;
/* /* One element for each huge size class. */
* Current number of chunks allocated. This value isn't maintained for malloc_huge_stats_t *hstats;
* any other purpose, so keep track of it in order to be able to set
* highchunks.
*/
size_t curchunks;
}; };
#endif /* JEMALLOC_H_STRUCTS */ #endif /* JEMALLOC_H_STRUCTS */
...@@ -158,6 +176,9 @@ JEMALLOC_INLINE void ...@@ -158,6 +176,9 @@ JEMALLOC_INLINE void
stats_cactive_add(size_t size) stats_cactive_add(size_t size)
{ {
assert(size > 0);
assert((size & chunksize_mask) == 0);
atomic_add_z(&stats_cactive, size); atomic_add_z(&stats_cactive, size);
} }
...@@ -165,6 +186,9 @@ JEMALLOC_INLINE void ...@@ -165,6 +186,9 @@ JEMALLOC_INLINE void
stats_cactive_sub(size_t size) stats_cactive_sub(size_t size)
{ {
assert(size > 0);
assert((size & chunksize_mask) == 0);
atomic_sub_z(&stats_cactive, size); atomic_sub_z(&stats_cactive, size);
} }
#endif #endif
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
typedef struct tcache_bin_info_s tcache_bin_info_t; typedef struct tcache_bin_info_s tcache_bin_info_t;
typedef struct tcache_bin_s tcache_bin_t; typedef struct tcache_bin_s tcache_bin_t;
typedef struct tcache_s tcache_t; typedef struct tcache_s tcache_t;
typedef struct tcaches_s tcaches_t;
/* /*
* tcache pointers close to NULL are used to encode state information that is * tcache pointers close to NULL are used to encode state information that is
...@@ -15,6 +16,11 @@ typedef struct tcache_s tcache_t; ...@@ -15,6 +16,11 @@ typedef struct tcache_s tcache_t;
#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3) #define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3)
#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY #define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY
/*
* Absolute minimum number of cache slots for each small bin.
*/
#define TCACHE_NSLOTS_SMALL_MIN 20
/* /*
* Absolute maximum number of cache slots for each small bin in the thread * Absolute maximum number of cache slots for each small bin in the thread
* cache. This is an additional constraint beyond that imposed as: twice the * cache. This is an additional constraint beyond that imposed as: twice the
...@@ -64,15 +70,21 @@ struct tcache_bin_s { ...@@ -64,15 +70,21 @@ struct tcache_bin_s {
int low_water; /* Min # cached since last GC. */ int low_water; /* Min # cached since last GC. */
unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */ unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */
unsigned ncached; /* # of cached objects. */ unsigned ncached; /* # of cached objects. */
/*
* To make use of adjacent cacheline prefetch, the items in the avail
* stack goes to higher address for newer allocations. avail points
* just above the available space, which means that
* avail[-ncached, ... -1] are available items and the lowest item will
* be allocated first.
*/
void **avail; /* Stack of available objects. */ void **avail; /* Stack of available objects. */
}; };
struct tcache_s { struct tcache_s {
ql_elm(tcache_t) link; /* Used for aggregating stats. */ ql_elm(tcache_t) link; /* Used for aggregating stats. */
uint64_t prof_accumbytes;/* Cleared after arena_prof_accum() */ uint64_t prof_accumbytes;/* Cleared after arena_prof_accum(). */
arena_t *arena; /* This thread's arena. */ ticker_t gc_ticker; /* Drives incremental GC. */
unsigned ev_cnt; /* Event count since incremental GC. */ szind_t next_gc_bin; /* Next bin to GC. */
unsigned next_gc_bin; /* Next bin to GC. */
tcache_bin_t tbins[1]; /* Dynamically sized. */ tcache_bin_t tbins[1]; /* Dynamically sized. */
/* /*
* The pointer stacks associated with tbins follow as a contiguous * The pointer stacks associated with tbins follow as a contiguous
...@@ -82,6 +94,14 @@ struct tcache_s { ...@@ -82,6 +94,14 @@ struct tcache_s {
*/ */
}; };
/* Linkage for list of available (previously used) explicit tcache IDs. */
struct tcaches_s {
union {
tcache_t *tcache;
tcaches_t *next;
};
};
#endif /* JEMALLOC_H_STRUCTS */ #endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS #ifdef JEMALLOC_H_EXTERNS
...@@ -95,84 +115,88 @@ extern tcache_bin_info_t *tcache_bin_info; ...@@ -95,84 +115,88 @@ extern tcache_bin_info_t *tcache_bin_info;
* Number of tcache bins. There are NBINS small-object bins, plus 0 or more * Number of tcache bins. There are NBINS small-object bins, plus 0 or more
* large-object bins. * large-object bins.
*/ */
extern size_t nhbins; extern unsigned nhbins;
/* Maximum cached size class. */ /* Maximum cached size class. */
extern size_t tcache_maxclass; extern size_t tcache_maxclass;
size_t tcache_salloc(const void *ptr); /*
void tcache_event_hard(tcache_t *tcache); * Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and
void *tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, * usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are
size_t binind); * completely disjoint from this data structure. tcaches starts off as a sparse
void tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem, * array, so it has no physical memory footprint until individual pages are
tcache_t *tcache); * touched. This allows the entire array to be allocated the first time an
void tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem, * explicit tcache is created without a disproportionate impact on memory usage.
tcache_t *tcache); */
void tcache_arena_associate(tcache_t *tcache, arena_t *arena); extern tcaches_t *tcaches;
void tcache_arena_dissociate(tcache_t *tcache);
tcache_t *tcache_create(arena_t *arena); size_t tcache_salloc(tsdn_t *tsdn, const void *ptr);
void tcache_destroy(tcache_t *tcache); void tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
void tcache_thread_cleanup(void *arg); void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
void tcache_stats_merge(tcache_t *tcache, arena_t *arena); tcache_bin_t *tbin, szind_t binind, bool *tcache_success);
bool tcache_boot0(void); void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
bool tcache_boot1(void); szind_t binind, unsigned rem);
void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
unsigned rem, tcache_t *tcache);
void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache,
arena_t *oldarena, arena_t *newarena);
tcache_t *tcache_get_hard(tsd_t *tsd);
tcache_t *tcache_create(tsdn_t *tsdn, arena_t *arena);
void tcache_cleanup(tsd_t *tsd);
void tcache_enabled_cleanup(tsd_t *tsd);
void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
bool tcaches_create(tsd_t *tsd, unsigned *r_ind);
void tcaches_flush(tsd_t *tsd, unsigned ind);
void tcaches_destroy(tsd_t *tsd, unsigned ind);
bool tcache_boot(tsdn_t *tsdn);
#endif /* JEMALLOC_H_EXTERNS */ #endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_INLINES #ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE #ifndef JEMALLOC_ENABLE_INLINE
malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache, tcache_t *) void tcache_event(tsd_t *tsd, tcache_t *tcache);
malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache_enabled, tcache_enabled_t)
void tcache_event(tcache_t *tcache);
void tcache_flush(void); void tcache_flush(void);
bool tcache_enabled_get(void); bool tcache_enabled_get(void);
tcache_t *tcache_get(bool create); tcache_t *tcache_get(tsd_t *tsd, bool create);
void tcache_enabled_set(bool enabled); void tcache_enabled_set(bool enabled);
void *tcache_alloc_easy(tcache_bin_t *tbin); void *tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success);
void *tcache_alloc_small(tcache_t *tcache, size_t size, bool zero); void *tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
void *tcache_alloc_large(tcache_t *tcache, size_t size, bool zero); size_t size, szind_t ind, bool zero, bool slow_path);
void tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind); void *tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
void tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size); size_t size, szind_t ind, bool zero, bool slow_path);
void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr,
szind_t binind, bool slow_path);
void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr,
size_t size, bool slow_path);
tcache_t *tcaches_get(tsd_t *tsd, unsigned ind);
#endif #endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_)) #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
/* Map of thread-specific caches. */
malloc_tsd_externs(tcache, tcache_t *)
malloc_tsd_funcs(JEMALLOC_INLINE, tcache, tcache_t *, NULL,
tcache_thread_cleanup)
/* Per thread flag that allows thread caches to be disabled. */
malloc_tsd_externs(tcache_enabled, tcache_enabled_t)
malloc_tsd_funcs(JEMALLOC_INLINE, tcache_enabled, tcache_enabled_t,
tcache_enabled_default, malloc_tsd_no_cleanup)
JEMALLOC_INLINE void JEMALLOC_INLINE void
tcache_flush(void) tcache_flush(void)
{ {
tcache_t *tcache; tsd_t *tsd;
cassert(config_tcache); cassert(config_tcache);
tcache = *tcache_tsd_get(); tsd = tsd_fetch();
if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX) tcache_cleanup(tsd);
return;
tcache_destroy(tcache);
tcache = NULL;
tcache_tsd_set(&tcache);
} }
JEMALLOC_INLINE bool JEMALLOC_INLINE bool
tcache_enabled_get(void) tcache_enabled_get(void)
{ {
tsd_t *tsd;
tcache_enabled_t tcache_enabled; tcache_enabled_t tcache_enabled;
cassert(config_tcache); cassert(config_tcache);
tcache_enabled = *tcache_enabled_tsd_get(); tsd = tsd_fetch();
tcache_enabled = tsd_tcache_enabled_get(tsd);
if (tcache_enabled == tcache_enabled_default) { if (tcache_enabled == tcache_enabled_default) {
tcache_enabled = (tcache_enabled_t)opt_tcache; tcache_enabled = (tcache_enabled_t)opt_tcache;
tcache_enabled_tsd_set(&tcache_enabled); tsd_tcache_enabled_set(tsd, tcache_enabled);
} }
return ((bool)tcache_enabled); return ((bool)tcache_enabled);
...@@ -181,178 +205,171 @@ tcache_enabled_get(void) ...@@ -181,178 +205,171 @@ tcache_enabled_get(void)
JEMALLOC_INLINE void JEMALLOC_INLINE void
tcache_enabled_set(bool enabled) tcache_enabled_set(bool enabled)
{ {
tsd_t *tsd;
tcache_enabled_t tcache_enabled; tcache_enabled_t tcache_enabled;
tcache_t *tcache;
cassert(config_tcache); cassert(config_tcache);
tsd = tsd_fetch();
tcache_enabled = (tcache_enabled_t)enabled; tcache_enabled = (tcache_enabled_t)enabled;
tcache_enabled_tsd_set(&tcache_enabled); tsd_tcache_enabled_set(tsd, tcache_enabled);
tcache = *tcache_tsd_get();
if (enabled) { if (!enabled)
if (tcache == TCACHE_STATE_DISABLED) { tcache_cleanup(tsd);
tcache = NULL;
tcache_tsd_set(&tcache);
}
} else /* disabled */ {
if (tcache > TCACHE_STATE_MAX) {
tcache_destroy(tcache);
tcache = NULL;
}
if (tcache == NULL) {
tcache = TCACHE_STATE_DISABLED;
tcache_tsd_set(&tcache);
}
}
} }
JEMALLOC_INLINE tcache_t * JEMALLOC_ALWAYS_INLINE tcache_t *
tcache_get(bool create) tcache_get(tsd_t *tsd, bool create)
{ {
tcache_t *tcache; tcache_t *tcache;
if (config_tcache == false) if (!config_tcache)
return (NULL);
if (config_lazy_lock && isthreaded == false)
return (NULL); return (NULL);
tcache = *tcache_tsd_get(); tcache = tsd_tcache_get(tsd);
if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX) { if (!create)
if (tcache == TCACHE_STATE_DISABLED) return (tcache);
return (NULL); if (unlikely(tcache == NULL) && tsd_nominal(tsd)) {
if (tcache == NULL) { tcache = tcache_get_hard(tsd);
if (create == false) { tsd_tcache_set(tsd, tcache);
/*
* Creating a tcache here would cause
* allocation as a side effect of free().
* Ordinarily that would be okay since
* tcache_create() failure is a soft failure
* that doesn't propagate. However, if TLS
* data are freed via free() as in glibc,
* subtle corruption could result from setting
* a TLS variable after its backing memory is
* freed.
*/
return (NULL);
}
if (tcache_enabled_get() == false) {
tcache_enabled_set(false); /* Memoize. */
return (NULL);
}
return (tcache_create(choose_arena(NULL)));
}
if (tcache == TCACHE_STATE_PURGATORY) {
/*
* Make a note that an allocator function was called
* after tcache_thread_cleanup() was called.
*/
tcache = TCACHE_STATE_REINCARNATED;
tcache_tsd_set(&tcache);
return (NULL);
}
if (tcache == TCACHE_STATE_REINCARNATED)
return (NULL);
not_reached();
} }
return (tcache); return (tcache);
} }
JEMALLOC_INLINE void JEMALLOC_ALWAYS_INLINE void
tcache_event(tcache_t *tcache) tcache_event(tsd_t *tsd, tcache_t *tcache)
{ {
if (TCACHE_GC_INCR == 0) if (TCACHE_GC_INCR == 0)
return; return;
tcache->ev_cnt++; if (unlikely(ticker_tick(&tcache->gc_ticker)))
assert(tcache->ev_cnt <= TCACHE_GC_INCR); tcache_event_hard(tsd, tcache);
if (tcache->ev_cnt == TCACHE_GC_INCR)
tcache_event_hard(tcache);
} }
JEMALLOC_INLINE void * JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_easy(tcache_bin_t *tbin) tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success)
{ {
void *ret; void *ret;
if (tbin->ncached == 0) { if (unlikely(tbin->ncached == 0)) {
tbin->low_water = -1; tbin->low_water = -1;
*tcache_success = false;
return (NULL); return (NULL);
} }
/*
* tcache_success (instead of ret) should be checked upon the return of
* this function. We avoid checking (ret == NULL) because there is
* never a null stored on the avail stack (which is unknown to the
* compiler), and eagerly checking ret would cause pipeline stall
* (waiting for the cacheline).
*/
*tcache_success = true;
ret = *(tbin->avail - tbin->ncached);
tbin->ncached--; tbin->ncached--;
if ((int)tbin->ncached < tbin->low_water)
if (unlikely((int)tbin->ncached < tbin->low_water))
tbin->low_water = tbin->ncached; tbin->low_water = tbin->ncached;
ret = tbin->avail[tbin->ncached];
return (ret); return (ret);
} }
JEMALLOC_INLINE void * JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_small(tcache_t *tcache, size_t size, bool zero) tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
szind_t binind, bool zero, bool slow_path)
{ {
void *ret; void *ret;
size_t binind;
tcache_bin_t *tbin; tcache_bin_t *tbin;
bool tcache_success;
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
binind = SMALL_SIZE2BIN(size);
assert(binind < NBINS); assert(binind < NBINS);
tbin = &tcache->tbins[binind]; tbin = &tcache->tbins[binind];
ret = tcache_alloc_easy(tbin); ret = tcache_alloc_easy(tbin, &tcache_success);
if (ret == NULL) { assert(tcache_success == (ret != NULL));
ret = tcache_alloc_small_hard(tcache, tbin, binind); if (unlikely(!tcache_success)) {
if (ret == NULL) bool tcache_hard_success;
arena = arena_choose(tsd, arena);
if (unlikely(arena == NULL))
return (NULL); return (NULL);
ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache,
tbin, binind, &tcache_hard_success);
if (tcache_hard_success == false)
return (NULL);
}
assert(ret);
/*
* Only compute usize if required. The checks in the following if
* statement are all static.
*/
if (config_prof || (slow_path && config_fill) || unlikely(zero)) {
usize = index2size(binind);
assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize);
} }
assert(tcache_salloc(ret) == arena_bin_info[binind].reg_size);
if (zero == false) { if (likely(!zero)) {
if (config_fill) { if (slow_path && config_fill) {
if (opt_junk) { if (unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret, arena_alloc_junk_small(ret,
&arena_bin_info[binind], false); &arena_bin_info[binind], false);
} else if (opt_zero) } else if (unlikely(opt_zero))
memset(ret, 0, size); memset(ret, 0, usize);
} }
} else { } else {
if (config_fill && opt_junk) { if (slow_path && config_fill && unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret, &arena_bin_info[binind], arena_alloc_junk_small(ret, &arena_bin_info[binind],
true); true);
} }
VALGRIND_MAKE_MEM_UNDEFINED(ret, size); memset(ret, 0, usize);
memset(ret, 0, size);
} }
if (config_stats) if (config_stats)
tbin->tstats.nrequests++; tbin->tstats.nrequests++;
if (config_prof) if (config_prof)
tcache->prof_accumbytes += arena_bin_info[binind].reg_size; tcache->prof_accumbytes += usize;
tcache_event(tcache); tcache_event(tsd, tcache);
return (ret); return (ret);
} }
JEMALLOC_INLINE void * JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_large(tcache_t *tcache, size_t size, bool zero) tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
szind_t binind, bool zero, bool slow_path)
{ {
void *ret; void *ret;
size_t binind;
tcache_bin_t *tbin; tcache_bin_t *tbin;
bool tcache_success;
size = PAGE_CEILING(size);
assert(size <= tcache_maxclass);
binind = NBINS + (size >> LG_PAGE) - 1;
assert(binind < nhbins); assert(binind < nhbins);
tbin = &tcache->tbins[binind]; tbin = &tcache->tbins[binind];
ret = tcache_alloc_easy(tbin); ret = tcache_alloc_easy(tbin, &tcache_success);
if (ret == NULL) { assert(tcache_success == (ret != NULL));
if (unlikely(!tcache_success)) {
/* /*
* Only allocate one large object at a time, because it's quite * Only allocate one large object at a time, because it's quite
* expensive to create one and not use it. * expensive to create one and not use it.
*/ */
ret = arena_malloc_large(tcache->arena, size, zero); arena = arena_choose(tsd, arena);
if (unlikely(arena == NULL))
return (NULL);
ret = arena_malloc_large(tsd_tsdn(tsd), arena, binind, zero);
if (ret == NULL) if (ret == NULL)
return (NULL); return (NULL);
} else { } else {
if (config_prof && prof_promote && size == PAGE) { size_t usize JEMALLOC_CC_SILENCE_INIT(0);
/* Only compute usize on demand */
if (config_prof || (slow_path && config_fill) ||
unlikely(zero)) {
usize = index2size(binind);
assert(usize <= tcache_maxclass);
}
if (config_prof && usize == LARGE_MINCLASS) {
arena_chunk_t *chunk = arena_chunk_t *chunk =
(arena_chunk_t *)CHUNK_ADDR2BASE(ret); (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >> size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
...@@ -360,79 +377,91 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero) ...@@ -360,79 +377,91 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
arena_mapbits_large_binind_set(chunk, pageind, arena_mapbits_large_binind_set(chunk, pageind,
BININD_INVALID); BININD_INVALID);
} }
if (zero == false) { if (likely(!zero)) {
if (config_fill) { if (slow_path && config_fill) {
if (opt_junk) if (unlikely(opt_junk_alloc)) {
memset(ret, 0xa5, size); memset(ret, JEMALLOC_ALLOC_JUNK,
else if (opt_zero) usize);
memset(ret, 0, size); } else if (unlikely(opt_zero))
} memset(ret, 0, usize);
} else {
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
} }
} else
memset(ret, 0, usize);
if (config_stats) if (config_stats)
tbin->tstats.nrequests++; tbin->tstats.nrequests++;
if (config_prof) if (config_prof)
tcache->prof_accumbytes += size; tcache->prof_accumbytes += usize;
} }
tcache_event(tcache); tcache_event(tsd, tcache);
return (ret); return (ret);
} }
JEMALLOC_INLINE void JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind) tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
bool slow_path)
{ {
tcache_bin_t *tbin; tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info; tcache_bin_info_t *tbin_info;
assert(tcache_salloc(ptr) <= SMALL_MAXCLASS); assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SMALL_MAXCLASS);
if (config_fill && opt_junk) if (slow_path && config_fill && unlikely(opt_junk_free))
arena_dalloc_junk_small(ptr, &arena_bin_info[binind]); arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
tbin = &tcache->tbins[binind]; tbin = &tcache->tbins[binind];
tbin_info = &tcache_bin_info[binind]; tbin_info = &tcache_bin_info[binind];
if (tbin->ncached == tbin_info->ncached_max) { if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
tcache_bin_flush_small(tbin, binind, (tbin_info->ncached_max >> tcache_bin_flush_small(tsd, tcache, tbin, binind,
1), tcache); (tbin_info->ncached_max >> 1));
} }
assert(tbin->ncached < tbin_info->ncached_max); assert(tbin->ncached < tbin_info->ncached_max);
tbin->avail[tbin->ncached] = ptr;
tbin->ncached++; tbin->ncached++;
*(tbin->avail - tbin->ncached) = ptr;
tcache_event(tcache); tcache_event(tsd, tcache);
} }
JEMALLOC_INLINE void JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size) tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size,
bool slow_path)
{ {
size_t binind; szind_t binind;
tcache_bin_t *tbin; tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info; tcache_bin_info_t *tbin_info;
assert((size & PAGE_MASK) == 0); assert((size & PAGE_MASK) == 0);
assert(tcache_salloc(ptr) > SMALL_MAXCLASS); assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SMALL_MAXCLASS);
assert(tcache_salloc(ptr) <= tcache_maxclass); assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
binind = NBINS + (size >> LG_PAGE) - 1; binind = size2index(size);
if (config_fill && opt_junk) if (slow_path && config_fill && unlikely(opt_junk_free))
memset(ptr, 0x5a, size); arena_dalloc_junk_large(ptr, size);
tbin = &tcache->tbins[binind]; tbin = &tcache->tbins[binind];
tbin_info = &tcache_bin_info[binind]; tbin_info = &tcache_bin_info[binind];
if (tbin->ncached == tbin_info->ncached_max) { if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
tcache_bin_flush_large(tbin, binind, (tbin_info->ncached_max >> tcache_bin_flush_large(tsd, tbin, binind,
1), tcache); (tbin_info->ncached_max >> 1), tcache);
} }
assert(tbin->ncached < tbin_info->ncached_max); assert(tbin->ncached < tbin_info->ncached_max);
tbin->avail[tbin->ncached] = ptr;
tbin->ncached++; tbin->ncached++;
*(tbin->avail - tbin->ncached) = ptr;
tcache_event(tcache); tcache_event(tsd, tcache);
}
JEMALLOC_ALWAYS_INLINE tcache_t *
tcaches_get(tsd_t *tsd, unsigned ind)
{
tcaches_t *elm = &tcaches[ind];
if (unlikely(elm->tcache == NULL)) {
elm->tcache = tcache_create(tsd_tsdn(tsd), arena_choose(tsd,
NULL));
}
return (elm->tcache);
} }
#endif #endif
......
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct ticker_s ticker_t;
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct ticker_s {
int32_t tick;
int32_t nticks;
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
void ticker_init(ticker_t *ticker, int32_t nticks);
void ticker_copy(ticker_t *ticker, const ticker_t *other);
int32_t ticker_read(const ticker_t *ticker);
bool ticker_ticks(ticker_t *ticker, int32_t nticks);
bool ticker_tick(ticker_t *ticker);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TICKER_C_))
JEMALLOC_INLINE void
ticker_init(ticker_t *ticker, int32_t nticks)
{
ticker->tick = nticks;
ticker->nticks = nticks;
}
JEMALLOC_INLINE void
ticker_copy(ticker_t *ticker, const ticker_t *other)
{
*ticker = *other;
}
JEMALLOC_INLINE int32_t
ticker_read(const ticker_t *ticker)
{
return (ticker->tick);
}
JEMALLOC_INLINE bool
ticker_ticks(ticker_t *ticker, int32_t nticks)
{
if (unlikely(ticker->tick < nticks)) {
ticker->tick = ticker->nticks;
return (true);
}
ticker->tick -= nticks;
return(false);
}
JEMALLOC_INLINE bool
ticker_tick(ticker_t *ticker)
{
return (ticker_ticks(ticker, 1));
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
...@@ -2,13 +2,31 @@ ...@@ -2,13 +2,31 @@
#ifdef JEMALLOC_H_TYPES #ifdef JEMALLOC_H_TYPES
/* Maximum number of malloc_tsd users with cleanup functions. */ /* Maximum number of malloc_tsd users with cleanup functions. */
#define MALLOC_TSD_CLEANUPS_MAX 8 #define MALLOC_TSD_CLEANUPS_MAX 2
typedef bool (*malloc_tsd_cleanup_t)(void); typedef bool (*malloc_tsd_cleanup_t)(void);
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
!defined(_WIN32))
typedef struct tsd_init_block_s tsd_init_block_t;
typedef struct tsd_init_head_s tsd_init_head_t;
#endif
typedef struct tsd_s tsd_t;
typedef struct tsdn_s tsdn_t;
#define TSDN_NULL ((tsdn_t *)0)
typedef enum {
tsd_state_uninitialized,
tsd_state_nominal,
tsd_state_purgatory,
tsd_state_reincarnated
} tsd_state_t;
/* /*
* TLS/TSD-agnostic macro-based implementation of thread-specific data. There * TLS/TSD-agnostic macro-based implementation of thread-specific data. There
* are four macros that support (at least) three use cases: file-private, * are five macros that support (at least) three use cases: file-private,
* library-private, and library-private inlined. Following is an example * library-private, and library-private inlined. Following is an example
* library-private tsd variable: * library-private tsd variable:
* *
...@@ -18,34 +36,37 @@ typedef bool (*malloc_tsd_cleanup_t)(void); ...@@ -18,34 +36,37 @@ typedef bool (*malloc_tsd_cleanup_t)(void);
* int y; * int y;
* } example_t; * } example_t;
* #define EX_INITIALIZER JEMALLOC_CONCAT({0, 0}) * #define EX_INITIALIZER JEMALLOC_CONCAT({0, 0})
* malloc_tsd_protos(, example, example_t *) * malloc_tsd_types(example_, example_t)
* malloc_tsd_externs(example, example_t *) * malloc_tsd_protos(, example_, example_t)
* malloc_tsd_externs(example_, example_t)
* In example.c: * In example.c:
* malloc_tsd_data(, example, example_t *, EX_INITIALIZER) * malloc_tsd_data(, example_, example_t, EX_INITIALIZER)
* malloc_tsd_funcs(, example, example_t *, EX_INITIALIZER, * malloc_tsd_funcs(, example_, example_t, EX_INITIALIZER,
* example_tsd_cleanup) * example_tsd_cleanup)
* *
* The result is a set of generated functions, e.g.: * The result is a set of generated functions, e.g.:
* *
* bool example_tsd_boot(void) {...} * bool example_tsd_boot(void) {...}
* example_t **example_tsd_get() {...} * bool example_tsd_booted_get(void) {...}
* void example_tsd_set(example_t **val) {...} * example_t *example_tsd_get(bool init) {...}
* void example_tsd_set(example_t *val) {...}
* *
* Note that all of the functions deal in terms of (a_type *) rather than * Note that all of the functions deal in terms of (a_type *) rather than
* (a_type) so that it is possible to support non-pointer types (unlike * (a_type) so that it is possible to support non-pointer types (unlike
* pthreads TSD). example_tsd_cleanup() is passed an (a_type *) pointer that is * pthreads TSD). example_tsd_cleanup() is passed an (a_type *) pointer that is
* cast to (void *). This means that the cleanup function needs to cast *and* * cast to (void *). This means that the cleanup function needs to cast the
* dereference the function argument, e.g.: * function argument to (a_type *), then dereference the resulting pointer to
* access fields, e.g.
* *
* void * void
* example_tsd_cleanup(void *arg) * example_tsd_cleanup(void *arg)
* { * {
* example_t *example = *(example_t **)arg; * example_t *example = (example_t *)arg;
* *
* example->x = 42;
* [...] * [...]
* if ([want the cleanup function to be called again]) { * if ([want the cleanup function to be called again])
* example_tsd_set(&example); * example_tsd_set(example);
* }
* } * }
* *
* If example_tsd_set() is called within example_tsd_cleanup(), it will be * If example_tsd_set() is called within example_tsd_cleanup(), it will be
...@@ -54,58 +75,98 @@ typedef bool (*malloc_tsd_cleanup_t)(void); ...@@ -54,58 +75,98 @@ typedef bool (*malloc_tsd_cleanup_t)(void);
* non-NULL. * non-NULL.
*/ */
/* malloc_tsd_types(). */
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
#define malloc_tsd_types(a_name, a_type)
#elif (defined(JEMALLOC_TLS))
#define malloc_tsd_types(a_name, a_type)
#elif (defined(_WIN32))
#define malloc_tsd_types(a_name, a_type) \
typedef struct { \
bool initialized; \
a_type val; \
} a_name##tsd_wrapper_t;
#else
#define malloc_tsd_types(a_name, a_type) \
typedef struct { \
bool initialized; \
a_type val; \
} a_name##tsd_wrapper_t;
#endif
/* malloc_tsd_protos(). */ /* malloc_tsd_protos(). */
#define malloc_tsd_protos(a_attr, a_name, a_type) \ #define malloc_tsd_protos(a_attr, a_name, a_type) \
a_attr bool \ a_attr bool \
a_name##_tsd_boot(void); \ a_name##tsd_boot0(void); \
a_attr void \
a_name##tsd_boot1(void); \
a_attr bool \
a_name##tsd_boot(void); \
a_attr bool \
a_name##tsd_booted_get(void); \
a_attr a_type * \ a_attr a_type * \
a_name##_tsd_get(void); \ a_name##tsd_get(bool init); \
a_attr void \ a_attr void \
a_name##_tsd_set(a_type *val); a_name##tsd_set(a_type *val);
/* malloc_tsd_externs(). */ /* malloc_tsd_externs(). */
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP #ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
#define malloc_tsd_externs(a_name, a_type) \ #define malloc_tsd_externs(a_name, a_type) \
extern __thread a_type a_name##_tls; \ extern __thread a_type a_name##tsd_tls; \
extern __thread bool a_name##_initialized; \ extern __thread bool a_name##tsd_initialized; \
extern bool a_name##_booted; extern bool a_name##tsd_booted;
#elif (defined(JEMALLOC_TLS)) #elif (defined(JEMALLOC_TLS))
#define malloc_tsd_externs(a_name, a_type) \ #define malloc_tsd_externs(a_name, a_type) \
extern __thread a_type a_name##_tls; \ extern __thread a_type a_name##tsd_tls; \
extern pthread_key_t a_name##_tsd; \ extern pthread_key_t a_name##tsd_tsd; \
extern bool a_name##_booted; extern bool a_name##tsd_booted;
#elif (defined(_WIN32)) #elif (defined(_WIN32))
#define malloc_tsd_externs(a_name, a_type) \ #define malloc_tsd_externs(a_name, a_type) \
extern DWORD a_name##_tsd; \ extern DWORD a_name##tsd_tsd; \
extern bool a_name##_booted; extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \
extern bool a_name##tsd_booted;
#else #else
#define malloc_tsd_externs(a_name, a_type) \ #define malloc_tsd_externs(a_name, a_type) \
extern pthread_key_t a_name##_tsd; \ extern pthread_key_t a_name##tsd_tsd; \
extern bool a_name##_booted; extern tsd_init_head_t a_name##tsd_init_head; \
extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \
extern bool a_name##tsd_booted;
#endif #endif
/* malloc_tsd_data(). */ /* malloc_tsd_data(). */
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP #ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ #define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
a_attr __thread a_type JEMALLOC_TLS_MODEL \ a_attr __thread a_type JEMALLOC_TLS_MODEL \
a_name##_tls = a_initializer; \ a_name##tsd_tls = a_initializer; \
a_attr __thread bool JEMALLOC_TLS_MODEL \ a_attr __thread bool JEMALLOC_TLS_MODEL \
a_name##_initialized = false; \ a_name##tsd_initialized = false; \
a_attr bool a_name##_booted = false; a_attr bool a_name##tsd_booted = false;
#elif (defined(JEMALLOC_TLS)) #elif (defined(JEMALLOC_TLS))
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ #define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
a_attr __thread a_type JEMALLOC_TLS_MODEL \ a_attr __thread a_type JEMALLOC_TLS_MODEL \
a_name##_tls = a_initializer; \ a_name##tsd_tls = a_initializer; \
a_attr pthread_key_t a_name##_tsd; \ a_attr pthread_key_t a_name##tsd_tsd; \
a_attr bool a_name##_booted = false; a_attr bool a_name##tsd_booted = false;
#elif (defined(_WIN32)) #elif (defined(_WIN32))
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ #define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
a_attr DWORD a_name##_tsd; \ a_attr DWORD a_name##tsd_tsd; \
a_attr bool a_name##_booted = false; a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \
false, \
a_initializer \
}; \
a_attr bool a_name##tsd_booted = false;
#else #else
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ #define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
a_attr pthread_key_t a_name##_tsd; \ a_attr pthread_key_t a_name##tsd_tsd; \
a_attr bool a_name##_booted = false; a_attr tsd_init_head_t a_name##tsd_init_head = { \
ql_head_initializer(blocks), \
MALLOC_MUTEX_INITIALIZER \
}; \
a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \
false, \
a_initializer \
}; \
a_attr bool a_name##tsd_booted = false;
#endif #endif
/* malloc_tsd_funcs(). */ /* malloc_tsd_funcs(). */
...@@ -114,75 +175,124 @@ a_attr bool a_name##_booted = false; ...@@ -114,75 +175,124 @@ a_attr bool a_name##_booted = false;
a_cleanup) \ a_cleanup) \
/* Initialization/cleanup. */ \ /* Initialization/cleanup. */ \
a_attr bool \ a_attr bool \
a_name##_tsd_cleanup_wrapper(void) \ a_name##tsd_cleanup_wrapper(void) \
{ \ { \
\ \
if (a_name##_initialized) { \ if (a_name##tsd_initialized) { \
a_name##_initialized = false; \ a_name##tsd_initialized = false; \
a_cleanup(&a_name##_tls); \ a_cleanup(&a_name##tsd_tls); \
} \ } \
return (a_name##_initialized); \ return (a_name##tsd_initialized); \
} \ } \
a_attr bool \ a_attr bool \
a_name##_tsd_boot(void) \ a_name##tsd_boot0(void) \
{ \ { \
\ \
if (a_cleanup != malloc_tsd_no_cleanup) { \ if (a_cleanup != malloc_tsd_no_cleanup) { \
malloc_tsd_cleanup_register( \ malloc_tsd_cleanup_register( \
&a_name##_tsd_cleanup_wrapper); \ &a_name##tsd_cleanup_wrapper); \
} \ } \
a_name##_booted = true; \ a_name##tsd_booted = true; \
return (false); \
} \
a_attr void \
a_name##tsd_boot1(void) \
{ \
\
/* Do nothing. */ \
} \
a_attr bool \
a_name##tsd_boot(void) \
{ \
\
return (a_name##tsd_boot0()); \
} \
a_attr bool \
a_name##tsd_booted_get(void) \
{ \
\
return (a_name##tsd_booted); \
} \
a_attr bool \
a_name##tsd_get_allocates(void) \
{ \
\
return (false); \ return (false); \
} \ } \
/* Get/set. */ \ /* Get/set. */ \
a_attr a_type * \ a_attr a_type * \
a_name##_tsd_get(void) \ a_name##tsd_get(bool init) \
{ \ { \
\ \
assert(a_name##_booted); \ assert(a_name##tsd_booted); \
return (&a_name##_tls); \ return (&a_name##tsd_tls); \
} \ } \
a_attr void \ a_attr void \
a_name##_tsd_set(a_type *val) \ a_name##tsd_set(a_type *val) \
{ \ { \
\ \
assert(a_name##_booted); \ assert(a_name##tsd_booted); \
a_name##_tls = (*val); \ a_name##tsd_tls = (*val); \
if (a_cleanup != malloc_tsd_no_cleanup) \ if (a_cleanup != malloc_tsd_no_cleanup) \
a_name##_initialized = true; \ a_name##tsd_initialized = true; \
} }
#elif (defined(JEMALLOC_TLS)) #elif (defined(JEMALLOC_TLS))
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ #define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
a_cleanup) \ a_cleanup) \
/* Initialization/cleanup. */ \ /* Initialization/cleanup. */ \
a_attr bool \ a_attr bool \
a_name##_tsd_boot(void) \ a_name##tsd_boot0(void) \
{ \ { \
\ \
if (a_cleanup != malloc_tsd_no_cleanup) { \ if (a_cleanup != malloc_tsd_no_cleanup) { \
if (pthread_key_create(&a_name##_tsd, a_cleanup) != 0) \ if (pthread_key_create(&a_name##tsd_tsd, a_cleanup) != \
0) \
return (true); \ return (true); \
} \ } \
a_name##_booted = true; \ a_name##tsd_booted = true; \
return (false); \
} \
a_attr void \
a_name##tsd_boot1(void) \
{ \
\
/* Do nothing. */ \
} \
a_attr bool \
a_name##tsd_boot(void) \
{ \
\
return (a_name##tsd_boot0()); \
} \
a_attr bool \
a_name##tsd_booted_get(void) \
{ \
\
return (a_name##tsd_booted); \
} \
a_attr bool \
a_name##tsd_get_allocates(void) \
{ \
\
return (false); \ return (false); \
} \ } \
/* Get/set. */ \ /* Get/set. */ \
a_attr a_type * \ a_attr a_type * \
a_name##_tsd_get(void) \ a_name##tsd_get(bool init) \
{ \ { \
\ \
assert(a_name##_booted); \ assert(a_name##tsd_booted); \
return (&a_name##_tls); \ return (&a_name##tsd_tls); \
} \ } \
a_attr void \ a_attr void \
a_name##_tsd_set(a_type *val) \ a_name##tsd_set(a_type *val) \
{ \ { \
\ \
assert(a_name##_booted); \ assert(a_name##tsd_booted); \
a_name##_tls = (*val); \ a_name##tsd_tls = (*val); \
if (a_cleanup != malloc_tsd_no_cleanup) { \ if (a_cleanup != malloc_tsd_no_cleanup) { \
if (pthread_setspecific(a_name##_tsd, \ if (pthread_setspecific(a_name##tsd_tsd, \
(void *)(&a_name##_tls))) { \ (void *)(&a_name##tsd_tls))) { \
malloc_write("<jemalloc>: Error" \ malloc_write("<jemalloc>: Error" \
" setting TSD for "#a_name"\n"); \ " setting TSD for "#a_name"\n"); \
if (opt_abort) \ if (opt_abort) \
...@@ -193,27 +303,21 @@ a_name##_tsd_set(a_type *val) \ ...@@ -193,27 +303,21 @@ a_name##_tsd_set(a_type *val) \
#elif (defined(_WIN32)) #elif (defined(_WIN32))
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ #define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
a_cleanup) \ a_cleanup) \
/* Data structure. */ \
typedef struct { \
bool initialized; \
a_type val; \
} a_name##_tsd_wrapper_t; \
/* Initialization/cleanup. */ \ /* Initialization/cleanup. */ \
a_attr bool \ a_attr bool \
a_name##_tsd_cleanup_wrapper(void) \ a_name##tsd_cleanup_wrapper(void) \
{ \ { \
a_name##_tsd_wrapper_t *wrapper; \ DWORD error = GetLastError(); \
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
TlsGetValue(a_name##tsd_tsd); \
SetLastError(error); \
\ \
wrapper = (a_name##_tsd_wrapper_t *) TlsGetValue(a_name##_tsd); \
if (wrapper == NULL) \ if (wrapper == NULL) \
return (false); \ return (false); \
if (a_cleanup != malloc_tsd_no_cleanup && \ if (a_cleanup != malloc_tsd_no_cleanup && \
wrapper->initialized) { \ wrapper->initialized) { \
a_type val = wrapper->val; \
a_type tsd_static_data = a_initializer; \
wrapper->initialized = false; \ wrapper->initialized = false; \
wrapper->val = tsd_static_data; \ a_cleanup(&wrapper->val); \
a_cleanup(&val); \
if (wrapper->initialized) { \ if (wrapper->initialized) { \
/* Trigger another cleanup round. */ \ /* Trigger another cleanup round. */ \
return (true); \ return (true); \
...@@ -222,63 +326,109 @@ a_name##_tsd_cleanup_wrapper(void) \ ...@@ -222,63 +326,109 @@ a_name##_tsd_cleanup_wrapper(void) \
malloc_tsd_dalloc(wrapper); \ malloc_tsd_dalloc(wrapper); \
return (false); \ return (false); \
} \ } \
a_attr bool \ a_attr void \
a_name##_tsd_boot(void) \ a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \
{ \ { \
\ \
a_name##_tsd = TlsAlloc(); \ if (!TlsSetValue(a_name##tsd_tsd, (void *)wrapper)) { \
if (a_name##_tsd == TLS_OUT_OF_INDEXES) \ malloc_write("<jemalloc>: Error setting" \
return (true); \ " TSD for "#a_name"\n"); \
if (a_cleanup != malloc_tsd_no_cleanup) { \ abort(); \
malloc_tsd_cleanup_register( \
&a_name##_tsd_cleanup_wrapper); \
} \ } \
a_name##_booted = true; \
return (false); \
} \ } \
/* Get/set. */ \ a_attr a_name##tsd_wrapper_t * \
a_attr a_name##_tsd_wrapper_t * \ a_name##tsd_wrapper_get(bool init) \
a_name##_tsd_get_wrapper(void) \
{ \ { \
a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \ DWORD error = GetLastError(); \
TlsGetValue(a_name##_tsd); \ a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
TlsGetValue(a_name##tsd_tsd); \
SetLastError(error); \
\ \
if (wrapper == NULL) { \ if (init && unlikely(wrapper == NULL)) { \
wrapper = (a_name##_tsd_wrapper_t *) \ wrapper = (a_name##tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \ malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
if (wrapper == NULL) { \ if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \ malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \ " TSD for "#a_name"\n"); \
abort(); \ abort(); \
} else { \ } else { \
static a_type tsd_static_data = a_initializer; \
wrapper->initialized = false; \ wrapper->initialized = false; \
wrapper->val = tsd_static_data; \ wrapper->val = a_initializer; \
} \ } \
if (!TlsSetValue(a_name##_tsd, (void *)wrapper)) { \ a_name##tsd_wrapper_set(wrapper); \
malloc_write("<jemalloc>: Error setting" \ } \
return (wrapper); \
} \
a_attr bool \
a_name##tsd_boot0(void) \
{ \
\
a_name##tsd_tsd = TlsAlloc(); \
if (a_name##tsd_tsd == TLS_OUT_OF_INDEXES) \
return (true); \
if (a_cleanup != malloc_tsd_no_cleanup) { \
malloc_tsd_cleanup_register( \
&a_name##tsd_cleanup_wrapper); \
} \
a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \
a_name##tsd_booted = true; \
return (false); \
} \
a_attr void \
a_name##tsd_boot1(void) \
{ \
a_name##tsd_wrapper_t *wrapper; \
wrapper = (a_name##tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \ " TSD for "#a_name"\n"); \
abort(); \ abort(); \
} \ } \
} \ memcpy(wrapper, &a_name##tsd_boot_wrapper, \
return (wrapper); \ sizeof(a_name##tsd_wrapper_t)); \
a_name##tsd_wrapper_set(wrapper); \
} \
a_attr bool \
a_name##tsd_boot(void) \
{ \
\
if (a_name##tsd_boot0()) \
return (true); \
a_name##tsd_boot1(); \
return (false); \
} \
a_attr bool \
a_name##tsd_booted_get(void) \
{ \
\
return (a_name##tsd_booted); \
} \
a_attr bool \
a_name##tsd_get_allocates(void) \
{ \
\
return (true); \
} \ } \
/* Get/set. */ \
a_attr a_type * \ a_attr a_type * \
a_name##_tsd_get(void) \ a_name##tsd_get(bool init) \
{ \ { \
a_name##_tsd_wrapper_t *wrapper; \ a_name##tsd_wrapper_t *wrapper; \
\ \
assert(a_name##_booted); \ assert(a_name##tsd_booted); \
wrapper = a_name##_tsd_get_wrapper(); \ wrapper = a_name##tsd_wrapper_get(init); \
if (a_name##tsd_get_allocates() && !init && wrapper == NULL) \
return (NULL); \
return (&wrapper->val); \ return (&wrapper->val); \
} \ } \
a_attr void \ a_attr void \
a_name##_tsd_set(a_type *val) \ a_name##tsd_set(a_type *val) \
{ \ { \
a_name##_tsd_wrapper_t *wrapper; \ a_name##tsd_wrapper_t *wrapper; \
\ \
assert(a_name##_booted); \ assert(a_name##tsd_booted); \
wrapper = a_name##_tsd_get_wrapper(); \ wrapper = a_name##tsd_wrapper_get(true); \
wrapper->val = *(val); \ wrapper->val = *(val); \
if (a_cleanup != malloc_tsd_no_cleanup) \ if (a_cleanup != malloc_tsd_no_cleanup) \
wrapper->initialized = true; \ wrapper->initialized = true; \
...@@ -286,16 +436,11 @@ a_name##_tsd_set(a_type *val) \ ...@@ -286,16 +436,11 @@ a_name##_tsd_set(a_type *val) \
#else #else
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ #define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
a_cleanup) \ a_cleanup) \
/* Data structure. */ \
typedef struct { \
bool initialized; \
a_type val; \
} a_name##_tsd_wrapper_t; \
/* Initialization/cleanup. */ \ /* Initialization/cleanup. */ \
a_attr void \ a_attr void \
a_name##_tsd_cleanup_wrapper(void *arg) \ a_name##tsd_cleanup_wrapper(void *arg) \
{ \ { \
a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *)arg;\ a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *)arg; \
\ \
if (a_cleanup != malloc_tsd_no_cleanup && \ if (a_cleanup != malloc_tsd_no_cleanup && \
wrapper->initialized) { \ wrapper->initialized) { \
...@@ -303,7 +448,7 @@ a_name##_tsd_cleanup_wrapper(void *arg) \ ...@@ -303,7 +448,7 @@ a_name##_tsd_cleanup_wrapper(void *arg) \
a_cleanup(&wrapper->val); \ a_cleanup(&wrapper->val); \
if (wrapper->initialized) { \ if (wrapper->initialized) { \
/* Trigger another cleanup round. */ \ /* Trigger another cleanup round. */ \
if (pthread_setspecific(a_name##_tsd, \ if (pthread_setspecific(a_name##tsd_tsd, \
(void *)wrapper)) { \ (void *)wrapper)) { \
malloc_write("<jemalloc>: Error" \ malloc_write("<jemalloc>: Error" \
" setting TSD for "#a_name"\n"); \ " setting TSD for "#a_name"\n"); \
...@@ -315,60 +460,111 @@ a_name##_tsd_cleanup_wrapper(void *arg) \ ...@@ -315,60 +460,111 @@ a_name##_tsd_cleanup_wrapper(void *arg) \
} \ } \
malloc_tsd_dalloc(wrapper); \ malloc_tsd_dalloc(wrapper); \
} \ } \
a_attr bool \ a_attr void \
a_name##_tsd_boot(void) \ a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \
{ \ { \
\ \
if (pthread_key_create(&a_name##_tsd, \ if (pthread_setspecific(a_name##tsd_tsd, \
a_name##_tsd_cleanup_wrapper) != 0) \ (void *)wrapper)) { \
return (true); \ malloc_write("<jemalloc>: Error setting" \
a_name##_booted = true; \ " TSD for "#a_name"\n"); \
return (false); \ abort(); \
} \
} \ } \
/* Get/set. */ \ a_attr a_name##tsd_wrapper_t * \
a_attr a_name##_tsd_wrapper_t * \ a_name##tsd_wrapper_get(bool init) \
a_name##_tsd_get_wrapper(void) \
{ \ { \
a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \ a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
pthread_getspecific(a_name##_tsd); \ pthread_getspecific(a_name##tsd_tsd); \
\ \
if (wrapper == NULL) { \ if (init && unlikely(wrapper == NULL)) { \
wrapper = (a_name##_tsd_wrapper_t *) \ tsd_init_block_t block; \
malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \ wrapper = tsd_init_check_recursion( \
&a_name##tsd_init_head, &block); \
if (wrapper) \
return (wrapper); \
wrapper = (a_name##tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
block.data = wrapper; \
if (wrapper == NULL) { \ if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \ malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \ " TSD for "#a_name"\n"); \
abort(); \ abort(); \
} else { \ } else { \
static a_type tsd_static_data = a_initializer; \
wrapper->initialized = false; \ wrapper->initialized = false; \
wrapper->val = tsd_static_data; \ wrapper->val = a_initializer; \
} \ } \
if (pthread_setspecific(a_name##_tsd, \ a_name##tsd_wrapper_set(wrapper); \
(void *)wrapper)) { \ tsd_init_finish(&a_name##tsd_init_head, &block); \
malloc_write("<jemalloc>: Error setting" \ } \
return (wrapper); \
} \
a_attr bool \
a_name##tsd_boot0(void) \
{ \
\
if (pthread_key_create(&a_name##tsd_tsd, \
a_name##tsd_cleanup_wrapper) != 0) \
return (true); \
a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \
a_name##tsd_booted = true; \
return (false); \
} \
a_attr void \
a_name##tsd_boot1(void) \
{ \
a_name##tsd_wrapper_t *wrapper; \
wrapper = (a_name##tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \ " TSD for "#a_name"\n"); \
abort(); \ abort(); \
} \ } \
} \ memcpy(wrapper, &a_name##tsd_boot_wrapper, \
return (wrapper); \ sizeof(a_name##tsd_wrapper_t)); \
a_name##tsd_wrapper_set(wrapper); \
} \
a_attr bool \
a_name##tsd_boot(void) \
{ \
\
if (a_name##tsd_boot0()) \
return (true); \
a_name##tsd_boot1(); \
return (false); \
} \ } \
a_attr bool \
a_name##tsd_booted_get(void) \
{ \
\
return (a_name##tsd_booted); \
} \
a_attr bool \
a_name##tsd_get_allocates(void) \
{ \
\
return (true); \
} \
/* Get/set. */ \
a_attr a_type * \ a_attr a_type * \
a_name##_tsd_get(void) \ a_name##tsd_get(bool init) \
{ \ { \
a_name##_tsd_wrapper_t *wrapper; \ a_name##tsd_wrapper_t *wrapper; \
\ \
assert(a_name##_booted); \ assert(a_name##tsd_booted); \
wrapper = a_name##_tsd_get_wrapper(); \ wrapper = a_name##tsd_wrapper_get(init); \
if (a_name##tsd_get_allocates() && !init && wrapper == NULL) \
return (NULL); \
return (&wrapper->val); \ return (&wrapper->val); \
} \ } \
a_attr void \ a_attr void \
a_name##_tsd_set(a_type *val) \ a_name##tsd_set(a_type *val) \
{ \ { \
a_name##_tsd_wrapper_t *wrapper; \ a_name##tsd_wrapper_t *wrapper; \
\ \
assert(a_name##_booted); \ assert(a_name##tsd_booted); \
wrapper = a_name##_tsd_get_wrapper(); \ wrapper = a_name##tsd_wrapper_get(true); \
wrapper->val = *(val); \ wrapper->val = *(val); \
if (a_cleanup != malloc_tsd_no_cleanup) \ if (a_cleanup != malloc_tsd_no_cleanup) \
wrapper->initialized = true; \ wrapper->initialized = true; \
...@@ -379,19 +575,213 @@ a_name##_tsd_set(a_type *val) \ ...@@ -379,19 +575,213 @@ a_name##_tsd_set(a_type *val) \
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS #ifdef JEMALLOC_H_STRUCTS
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
!defined(_WIN32))
struct tsd_init_block_s {
ql_elm(tsd_init_block_t) link;
pthread_t thread;
void *data;
};
struct tsd_init_head_s {
ql_head(tsd_init_block_t) blocks;
malloc_mutex_t lock;
};
#endif
#define MALLOC_TSD \
/* O(name, type) */ \
O(tcache, tcache_t *) \
O(thread_allocated, uint64_t) \
O(thread_deallocated, uint64_t) \
O(prof_tdata, prof_tdata_t *) \
O(iarena, arena_t *) \
O(arena, arena_t *) \
O(arenas_tdata, arena_tdata_t *) \
O(narenas_tdata, unsigned) \
O(arenas_tdata_bypass, bool) \
O(tcache_enabled, tcache_enabled_t) \
O(quarantine, quarantine_t *) \
O(witnesses, witness_list_t) \
O(witness_fork, bool) \
#define TSD_INITIALIZER { \
tsd_state_uninitialized, \
NULL, \
0, \
0, \
NULL, \
NULL, \
NULL, \
NULL, \
0, \
false, \
tcache_enabled_default, \
NULL, \
ql_head_initializer(witnesses), \
false \
}
struct tsd_s {
tsd_state_t state;
#define O(n, t) \
t n;
MALLOC_TSD
#undef O
};
/*
* Wrapper around tsd_t that makes it possible to avoid implicit conversion
* between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be
* explicitly converted to tsd_t, which is non-nullable.
*/
struct tsdn_s {
tsd_t tsd;
};
static const tsd_t tsd_initializer = TSD_INITIALIZER;
malloc_tsd_types(, tsd_t)
#endif /* JEMALLOC_H_STRUCTS */ #endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS #ifdef JEMALLOC_H_EXTERNS
void *malloc_tsd_malloc(size_t size); void *malloc_tsd_malloc(size_t size);
void malloc_tsd_dalloc(void *wrapper); void malloc_tsd_dalloc(void *wrapper);
void malloc_tsd_no_cleanup(void *); void malloc_tsd_no_cleanup(void *arg);
void malloc_tsd_cleanup_register(bool (*f)(void)); void malloc_tsd_cleanup_register(bool (*f)(void));
void malloc_tsd_boot(void); tsd_t *malloc_tsd_boot0(void);
void malloc_tsd_boot1(void);
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
!defined(_WIN32))
void *tsd_init_check_recursion(tsd_init_head_t *head,
tsd_init_block_t *block);
void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block);
#endif
void tsd_cleanup(void *arg);
#endif /* JEMALLOC_H_EXTERNS */ #endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_INLINES #ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
malloc_tsd_protos(JEMALLOC_ATTR(unused), , tsd_t)
tsd_t *tsd_fetch_impl(bool init);
tsd_t *tsd_fetch(void);
tsdn_t *tsd_tsdn(tsd_t *tsd);
bool tsd_nominal(tsd_t *tsd);
#define O(n, t) \
t *tsd_##n##p_get(tsd_t *tsd); \
t tsd_##n##_get(tsd_t *tsd); \
void tsd_##n##_set(tsd_t *tsd, t n);
MALLOC_TSD
#undef O
tsdn_t *tsdn_fetch(void);
bool tsdn_null(const tsdn_t *tsdn);
tsd_t *tsdn_tsd(tsdn_t *tsdn);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TSD_C_))
malloc_tsd_externs(, tsd_t)
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, , tsd_t, tsd_initializer, tsd_cleanup)
JEMALLOC_ALWAYS_INLINE tsd_t *
tsd_fetch_impl(bool init)
{
tsd_t *tsd = tsd_get(init);
if (!init && tsd_get_allocates() && tsd == NULL)
return (NULL);
assert(tsd != NULL);
if (unlikely(tsd->state != tsd_state_nominal)) {
if (tsd->state == tsd_state_uninitialized) {
tsd->state = tsd_state_nominal;
/* Trigger cleanup handler registration. */
tsd_set(tsd);
} else if (tsd->state == tsd_state_purgatory) {
tsd->state = tsd_state_reincarnated;
tsd_set(tsd);
} else
assert(tsd->state == tsd_state_reincarnated);
}
return (tsd);
}
JEMALLOC_ALWAYS_INLINE tsd_t *
tsd_fetch(void)
{
return (tsd_fetch_impl(true));
}
JEMALLOC_ALWAYS_INLINE tsdn_t *
tsd_tsdn(tsd_t *tsd)
{
return ((tsdn_t *)tsd);
}
JEMALLOC_INLINE bool
tsd_nominal(tsd_t *tsd)
{
return (tsd->state == tsd_state_nominal);
}
#define O(n, t) \
JEMALLOC_ALWAYS_INLINE t * \
tsd_##n##p_get(tsd_t *tsd) \
{ \
\
return (&tsd->n); \
} \
\
JEMALLOC_ALWAYS_INLINE t \
tsd_##n##_get(tsd_t *tsd) \
{ \
\
return (*tsd_##n##p_get(tsd)); \
} \
\
JEMALLOC_ALWAYS_INLINE void \
tsd_##n##_set(tsd_t *tsd, t n) \
{ \
\
assert(tsd->state == tsd_state_nominal); \
tsd->n = n; \
}
MALLOC_TSD
#undef O
JEMALLOC_ALWAYS_INLINE tsdn_t *
tsdn_fetch(void)
{
if (!tsd_booted_get())
return (NULL);
return (tsd_tsdn(tsd_fetch_impl(false)));
}
JEMALLOC_ALWAYS_INLINE bool
tsdn_null(const tsdn_t *tsdn)
{
return (tsdn == NULL);
}
JEMALLOC_ALWAYS_INLINE tsd_t *
tsdn_tsd(tsdn_t *tsdn)
{
assert(!tsdn_null(tsdn));
return (&tsdn->tsd);
}
#endif
#endif /* JEMALLOC_H_INLINES */ #endif /* JEMALLOC_H_INLINES */
/******************************************************************************/ /******************************************************************************/
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_TYPES #ifdef JEMALLOC_H_TYPES
#ifdef _WIN32
# ifdef _WIN64
# define FMT64_PREFIX "ll"
# define FMTPTR_PREFIX "ll"
# else
# define FMT64_PREFIX "ll"
# define FMTPTR_PREFIX ""
# endif
# define FMTd32 "d"
# define FMTu32 "u"
# define FMTx32 "x"
# define FMTd64 FMT64_PREFIX "d"
# define FMTu64 FMT64_PREFIX "u"
# define FMTx64 FMT64_PREFIX "x"
# define FMTdPTR FMTPTR_PREFIX "d"
# define FMTuPTR FMTPTR_PREFIX "u"
# define FMTxPTR FMTPTR_PREFIX "x"
#else
# include <inttypes.h>
# define FMTd32 PRId32
# define FMTu32 PRIu32
# define FMTx32 PRIx32
# define FMTd64 PRId64
# define FMTu64 PRIu64
# define FMTx64 PRIx64
# define FMTdPTR PRIdPTR
# define FMTuPTR PRIuPTR
# define FMTxPTR PRIxPTR
#endif
/* Size of stack-allocated buffer passed to buferror(). */ /* Size of stack-allocated buffer passed to buferror(). */
#define BUFERROR_BUF 64 #define BUFERROR_BUF 64
...@@ -10,11 +40,19 @@ ...@@ -10,11 +40,19 @@
*/ */
#define MALLOC_PRINTF_BUFSIZE 4096 #define MALLOC_PRINTF_BUFSIZE 4096
/* Junk fill patterns. */
#ifndef JEMALLOC_ALLOC_JUNK
# define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5)
#endif
#ifndef JEMALLOC_FREE_JUNK
# define JEMALLOC_FREE_JUNK ((uint8_t)0x5a)
#endif
/* /*
* Wrap a cpp argument that contains commas such that it isn't broken up into * Wrap a cpp argument that contains commas such that it isn't broken up into
* multiple arguments. * multiple arguments.
*/ */
#define JEMALLOC_CONCAT(...) __VA_ARGS__ #define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__
/* /*
* Silence compiler warnings due to uninitialized values. This is used * Silence compiler warnings due to uninitialized values. This is used
...@@ -27,51 +65,26 @@ ...@@ -27,51 +65,26 @@
# define JEMALLOC_CC_SILENCE_INIT(v) # define JEMALLOC_CC_SILENCE_INIT(v)
#endif #endif
/* #ifdef __GNUC__
* Define a custom assert() in order to reduce the chances of deadlock during # define likely(x) __builtin_expect(!!(x), 1)
* assertion failure. # define unlikely(x) __builtin_expect(!!(x), 0)
*/ #else
#ifndef assert # define likely(x) !!(x)
#define assert(e) do { \ # define unlikely(x) !!(x)
if (config_debug && !(e)) { \
malloc_printf( \
"<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
__FILE__, __LINE__, #e); \
abort(); \
} \
} while (0)
#endif #endif
/* Use to assert a particular configuration, e.g., cassert(config_debug). */ #if !defined(JEMALLOC_INTERNAL_UNREACHABLE)
#define cassert(c) do { \ # error JEMALLOC_INTERNAL_UNREACHABLE should have been defined by configure
if ((c) == false) \
assert(false); \
} while (0)
#ifndef not_reached
#define not_reached() do { \
if (config_debug) { \
malloc_printf( \
"<jemalloc>: %s:%d: Unreachable code reached\n", \
__FILE__, __LINE__); \
abort(); \
} \
} while (0)
#endif #endif
#ifndef not_implemented #define unreachable() JEMALLOC_INTERNAL_UNREACHABLE()
#define not_implemented() do { \
if (config_debug) { \
malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
__FILE__, __LINE__); \
abort(); \
} \
} while (0)
#endif
#define assert_not_implemented(e) do { \ #include "jemalloc/internal/assert.h"
if (config_debug && !(e)) \
not_implemented(); \ /* Use to assert a particular configuration, e.g., cassert(config_debug). */
#define cassert(c) do { \
if (unlikely(!(c))) \
not_reached(); \
} while (0) } while (0)
#endif /* JEMALLOC_H_TYPES */ #endif /* JEMALLOC_H_TYPES */
...@@ -82,40 +95,115 @@ ...@@ -82,40 +95,115 @@
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS #ifdef JEMALLOC_H_EXTERNS
int buferror(char *buf, size_t buflen); int buferror(int err, char *buf, size_t buflen);
uintmax_t malloc_strtoumax(const char *nptr, char **endptr, int base); uintmax_t malloc_strtoumax(const char *restrict nptr,
char **restrict endptr, int base);
void malloc_write(const char *s); void malloc_write(const char *s);
/* /*
* malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating * malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating
* point math. * point math.
*/ */
int malloc_vsnprintf(char *str, size_t size, const char *format, size_t malloc_vsnprintf(char *str, size_t size, const char *format,
va_list ap); va_list ap);
int malloc_snprintf(char *str, size_t size, const char *format, ...) size_t malloc_snprintf(char *str, size_t size, const char *format, ...)
JEMALLOC_ATTR(format(printf, 3, 4)); JEMALLOC_FORMAT_PRINTF(3, 4);
void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
const char *format, va_list ap); const char *format, va_list ap);
void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque, void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque,
const char *format, ...) JEMALLOC_ATTR(format(printf, 3, 4)); const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4);
void malloc_printf(const char *format, ...) void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
JEMALLOC_ATTR(format(printf, 1, 2));
#endif /* JEMALLOC_H_EXTERNS */ #endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_INLINES #ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE #ifndef JEMALLOC_ENABLE_INLINE
size_t pow2_ceil(size_t x); unsigned ffs_llu(unsigned long long bitmap);
void malloc_write(const char *s); unsigned ffs_lu(unsigned long bitmap);
unsigned ffs_u(unsigned bitmap);
unsigned ffs_zu(size_t bitmap);
unsigned ffs_u64(uint64_t bitmap);
unsigned ffs_u32(uint32_t bitmap);
uint64_t pow2_ceil_u64(uint64_t x);
uint32_t pow2_ceil_u32(uint32_t x);
size_t pow2_ceil_zu(size_t x);
unsigned lg_floor(size_t x);
void set_errno(int errnum); void set_errno(int errnum);
int get_errno(void); int get_errno(void);
#endif #endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_)) #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_))
/* Compute the smallest power of 2 that is >= x. */
JEMALLOC_INLINE size_t /* Sanity check. */
pow2_ceil(size_t x) #if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \
|| !defined(JEMALLOC_INTERNAL_FFS)
# error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure
#endif
JEMALLOC_ALWAYS_INLINE unsigned
ffs_llu(unsigned long long bitmap)
{
return (JEMALLOC_INTERNAL_FFSLL(bitmap));
}
JEMALLOC_ALWAYS_INLINE unsigned
ffs_lu(unsigned long bitmap)
{
return (JEMALLOC_INTERNAL_FFSL(bitmap));
}
JEMALLOC_ALWAYS_INLINE unsigned
ffs_u(unsigned bitmap)
{
return (JEMALLOC_INTERNAL_FFS(bitmap));
}
JEMALLOC_ALWAYS_INLINE unsigned
ffs_zu(size_t bitmap)
{
#if LG_SIZEOF_PTR == LG_SIZEOF_INT
return (ffs_u(bitmap));
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG
return (ffs_lu(bitmap));
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG
return (ffs_llu(bitmap));
#else
#error No implementation for size_t ffs()
#endif
}
JEMALLOC_ALWAYS_INLINE unsigned
ffs_u64(uint64_t bitmap)
{
#if LG_SIZEOF_LONG == 3
return (ffs_lu(bitmap));
#elif LG_SIZEOF_LONG_LONG == 3
return (ffs_llu(bitmap));
#else
#error No implementation for 64-bit ffs()
#endif
}
JEMALLOC_ALWAYS_INLINE unsigned
ffs_u32(uint32_t bitmap)
{
#if LG_SIZEOF_INT == 2
return (ffs_u(bitmap));
#else
#error No implementation for 32-bit ffs()
#endif
return (ffs_u(bitmap));
}
JEMALLOC_INLINE uint64_t
pow2_ceil_u64(uint64_t x)
{ {
x--; x--;
...@@ -124,14 +212,108 @@ pow2_ceil(size_t x) ...@@ -124,14 +212,108 @@ pow2_ceil(size_t x)
x |= x >> 4; x |= x >> 4;
x |= x >> 8; x |= x >> 8;
x |= x >> 16; x |= x >> 16;
#if (LG_SIZEOF_PTR == 3)
x |= x >> 32; x |= x >> 32;
#endif
x++; x++;
return (x); return (x);
} }
/* Sets error code */ JEMALLOC_INLINE uint32_t
pow2_ceil_u32(uint32_t x)
{
x--;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
x++;
return (x);
}
/* Compute the smallest power of 2 that is >= x. */
JEMALLOC_INLINE size_t
pow2_ceil_zu(size_t x)
{
#if (LG_SIZEOF_PTR == 3)
return (pow2_ceil_u64(x));
#else
return (pow2_ceil_u32(x));
#endif
}
#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE unsigned
lg_floor(size_t x)
{
size_t ret;
assert(x != 0);
asm ("bsr %1, %0"
: "=r"(ret) // Outputs.
: "r"(x) // Inputs.
);
assert(ret < UINT_MAX);
return ((unsigned)ret);
}
#elif (defined(_MSC_VER))
JEMALLOC_INLINE unsigned
lg_floor(size_t x)
{
unsigned long ret;
assert(x != 0);
#if (LG_SIZEOF_PTR == 3)
_BitScanReverse64(&ret, x);
#elif (LG_SIZEOF_PTR == 2)
_BitScanReverse(&ret, x);
#else
# error "Unsupported type size for lg_floor()"
#endif
assert(ret < UINT_MAX);
return ((unsigned)ret);
}
#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
JEMALLOC_INLINE unsigned
lg_floor(size_t x)
{
assert(x != 0);
#if (LG_SIZEOF_PTR == LG_SIZEOF_INT)
return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x));
#elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG)
return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x));
#else
# error "Unsupported type size for lg_floor()"
#endif
}
#else
JEMALLOC_INLINE unsigned
lg_floor(size_t x)
{
assert(x != 0);
x |= (x >> 1);
x |= (x >> 2);
x |= (x >> 4);
x |= (x >> 8);
x |= (x >> 16);
#if (LG_SIZEOF_PTR == 3)
x |= (x >> 32);
#endif
if (x == SIZE_T_MAX)
return ((8 << LG_SIZEOF_PTR) - 1);
x++;
return (ffs_zu(x) - 2);
}
#endif
/* Set error code. */
JEMALLOC_INLINE void JEMALLOC_INLINE void
set_errno(int errnum) set_errno(int errnum)
{ {
...@@ -143,7 +325,7 @@ set_errno(int errnum) ...@@ -143,7 +325,7 @@ set_errno(int errnum)
#endif #endif
} }
/* Get last error code */ /* Get last error code. */
JEMALLOC_INLINE int JEMALLOC_INLINE int
get_errno(void) get_errno(void)
{ {
......
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#ifdef JEMALLOC_VALGRIND
#include <valgrind/valgrind.h>
/*
* The size that is reported to Valgrind must be consistent through a chain of
* malloc..realloc..realloc calls. Request size isn't recorded anywhere in
* jemalloc, so it is critical that all callers of these macros provide usize
* rather than request size. As a result, buffer overflow detection is
* technically weakened for the standard API, though it is generally accepted
* practice to consider any extra bytes reported by malloc_usable_size() as
* usable space.
*/
#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do { \
if (unlikely(in_valgrind)) \
valgrind_make_mem_noaccess(ptr, usize); \
} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do { \
if (unlikely(in_valgrind)) \
valgrind_make_mem_undefined(ptr, usize); \
} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do { \
if (unlikely(in_valgrind)) \
valgrind_make_mem_defined(ptr, usize); \
} while (0)
/*
* The VALGRIND_MALLOCLIKE_BLOCK() and VALGRIND_RESIZEINPLACE_BLOCK() macro
* calls must be embedded in macros rather than in functions so that when
* Valgrind reports errors, there are no extra stack frames in the backtraces.
*/
#define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do { \
if (unlikely(in_valgrind && cond)) { \
VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(tsdn, ptr), \
zero); \
} \
} while (0)
#define JEMALLOC_VALGRIND_REALLOC_MOVED_no(ptr, old_ptr) \
(false)
#define JEMALLOC_VALGRIND_REALLOC_MOVED_maybe(ptr, old_ptr) \
((ptr) != (old_ptr))
#define JEMALLOC_VALGRIND_REALLOC_PTR_NULL_no(ptr) \
(false)
#define JEMALLOC_VALGRIND_REALLOC_PTR_NULL_maybe(ptr) \
(ptr == NULL)
#define JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_no(old_ptr) \
(false)
#define JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_maybe(old_ptr) \
(old_ptr == NULL)
#define JEMALLOC_VALGRIND_REALLOC(moved, tsdn, ptr, usize, ptr_null, \
old_ptr, old_usize, old_rzsize, old_ptr_null, zero) do { \
if (unlikely(in_valgrind)) { \
size_t rzsize = p2rz(tsdn, ptr); \
\
if (!JEMALLOC_VALGRIND_REALLOC_MOVED_##moved(ptr, \
old_ptr)) { \
VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \
usize, rzsize); \
if (zero && old_usize < usize) { \
valgrind_make_mem_defined( \
(void *)((uintptr_t)ptr + \
old_usize), usize - old_usize); \
} \
} else { \
if (!JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_## \
old_ptr_null(old_ptr)) { \
valgrind_freelike_block(old_ptr, \
old_rzsize); \
} \
if (!JEMALLOC_VALGRIND_REALLOC_PTR_NULL_## \
ptr_null(ptr)) { \
size_t copy_size = (old_usize < usize) \
? old_usize : usize; \
size_t tail_size = usize - copy_size; \
VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \
rzsize, false); \
if (copy_size > 0) { \
valgrind_make_mem_defined(ptr, \
copy_size); \
} \
if (zero && tail_size > 0) { \
valgrind_make_mem_defined( \
(void *)((uintptr_t)ptr + \
copy_size), tail_size); \
} \
} \
} \
} \
} while (0)
#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \
if (unlikely(in_valgrind)) \
valgrind_freelike_block(ptr, rzsize); \
} while (0)
#else
#define RUNNING_ON_VALGRIND ((unsigned)0)
#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do {} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do {} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do {} while (0)
#define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do {} while (0)
#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, tsdn, ptr, usize, \
ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \
zero) do {} while (0)
#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0)
#endif
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#ifdef JEMALLOC_VALGRIND
void valgrind_make_mem_noaccess(void *ptr, size_t usize);
void valgrind_make_mem_undefined(void *ptr, size_t usize);
void valgrind_make_mem_defined(void *ptr, size_t usize);
void valgrind_freelike_block(void *ptr, size_t usize);
#endif
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct witness_s witness_t;
typedef unsigned witness_rank_t;
typedef ql_head(witness_t) witness_list_t;
typedef int witness_comp_t (const witness_t *, const witness_t *);
/*
* Lock ranks. Witnesses with rank WITNESS_RANK_OMIT are completely ignored by
* the witness machinery.
*/
#define WITNESS_RANK_OMIT 0U
#define WITNESS_RANK_INIT 1U
#define WITNESS_RANK_CTL 1U
#define WITNESS_RANK_ARENAS 2U
#define WITNESS_RANK_PROF_DUMP 3U
#define WITNESS_RANK_PROF_BT2GCTX 4U
#define WITNESS_RANK_PROF_TDATAS 5U
#define WITNESS_RANK_PROF_TDATA 6U
#define WITNESS_RANK_PROF_GCTX 7U
#define WITNESS_RANK_ARENA 8U
#define WITNESS_RANK_ARENA_CHUNKS 9U
#define WITNESS_RANK_ARENA_NODE_CACHE 10
#define WITNESS_RANK_BASE 11U
#define WITNESS_RANK_LEAF 0xffffffffU
#define WITNESS_RANK_ARENA_BIN WITNESS_RANK_LEAF
#define WITNESS_RANK_ARENA_HUGE WITNESS_RANK_LEAF
#define WITNESS_RANK_DSS WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF
#define WITNESS_INITIALIZER(rank) {"initializer", rank, NULL, {NULL, NULL}}
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct witness_s {
/* Name, used for printing lock order reversal messages. */
const char *name;
/*
* Witness rank, where 0 is lowest and UINT_MAX is highest. Witnesses
* must be acquired in order of increasing rank.
*/
witness_rank_t rank;
/*
* If two witnesses are of equal rank and they have the samp comp
* function pointer, it is called as a last attempt to differentiate
* between witnesses of equal rank.
*/
witness_comp_t *comp;
/* Linkage for thread's currently owned locks. */
ql_elm(witness_t) link;
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void witness_init(witness_t *witness, const char *name, witness_rank_t rank,
witness_comp_t *comp);
#ifdef JEMALLOC_JET
typedef void (witness_lock_error_t)(const witness_list_t *, const witness_t *);
extern witness_lock_error_t *witness_lock_error;
#else
void witness_lock_error(const witness_list_t *witnesses,
const witness_t *witness);
#endif
#ifdef JEMALLOC_JET
typedef void (witness_owner_error_t)(const witness_t *);
extern witness_owner_error_t *witness_owner_error;
#else
void witness_owner_error(const witness_t *witness);
#endif
#ifdef JEMALLOC_JET
typedef void (witness_not_owner_error_t)(const witness_t *);
extern witness_not_owner_error_t *witness_not_owner_error;
#else
void witness_not_owner_error(const witness_t *witness);
#endif
#ifdef JEMALLOC_JET
typedef void (witness_lockless_error_t)(const witness_list_t *);
extern witness_lockless_error_t *witness_lockless_error;
#else
void witness_lockless_error(const witness_list_t *witnesses);
#endif
void witnesses_cleanup(tsd_t *tsd);
void witness_fork_cleanup(tsd_t *tsd);
void witness_prefork(tsd_t *tsd);
void witness_postfork_parent(tsd_t *tsd);
void witness_postfork_child(tsd_t *tsd);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
bool witness_owner(tsd_t *tsd, const witness_t *witness);
void witness_assert_owner(tsdn_t *tsdn, const witness_t *witness);
void witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness);
void witness_assert_lockless(tsdn_t *tsdn);
void witness_lock(tsdn_t *tsdn, witness_t *witness);
void witness_unlock(tsdn_t *tsdn, witness_t *witness);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
JEMALLOC_INLINE bool
witness_owner(tsd_t *tsd, const witness_t *witness)
{
witness_list_t *witnesses;
witness_t *w;
witnesses = tsd_witnessesp_get(tsd);
ql_foreach(w, witnesses, link) {
if (w == witness)
return (true);
}
return (false);
}
JEMALLOC_INLINE void
witness_assert_owner(tsdn_t *tsdn, const witness_t *witness)
{
tsd_t *tsd;
if (!config_debug)
return;
if (tsdn_null(tsdn))
return;
tsd = tsdn_tsd(tsdn);
if (witness->rank == WITNESS_RANK_OMIT)
return;
if (witness_owner(tsd, witness))
return;
witness_owner_error(witness);
}
JEMALLOC_INLINE void
witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness)
{
tsd_t *tsd;
witness_list_t *witnesses;
witness_t *w;
if (!config_debug)
return;
if (tsdn_null(tsdn))
return;
tsd = tsdn_tsd(tsdn);
if (witness->rank == WITNESS_RANK_OMIT)
return;
witnesses = tsd_witnessesp_get(tsd);
ql_foreach(w, witnesses, link) {
if (w == witness)
witness_not_owner_error(witness);
}
}
JEMALLOC_INLINE void
witness_assert_lockless(tsdn_t *tsdn)
{
tsd_t *tsd;
witness_list_t *witnesses;
witness_t *w;
if (!config_debug)
return;
if (tsdn_null(tsdn))
return;
tsd = tsdn_tsd(tsdn);
witnesses = tsd_witnessesp_get(tsd);
w = ql_last(witnesses, link);
if (w != NULL)
witness_lockless_error(witnesses);
}
JEMALLOC_INLINE void
witness_lock(tsdn_t *tsdn, witness_t *witness)
{
tsd_t *tsd;
witness_list_t *witnesses;
witness_t *w;
if (!config_debug)
return;
if (tsdn_null(tsdn))
return;
tsd = tsdn_tsd(tsdn);
if (witness->rank == WITNESS_RANK_OMIT)
return;
witness_assert_not_owner(tsdn, witness);
witnesses = tsd_witnessesp_get(tsd);
w = ql_last(witnesses, link);
if (w == NULL) {
/* No other locks; do nothing. */
} else if (tsd_witness_fork_get(tsd) && w->rank <= witness->rank) {
/* Forking, and relaxed ranking satisfied. */
} else if (w->rank > witness->rank) {
/* Not forking, rank order reversal. */
witness_lock_error(witnesses, witness);
} else if (w->rank == witness->rank && (w->comp == NULL || w->comp !=
witness->comp || w->comp(w, witness) > 0)) {
/*
* Missing/incompatible comparison function, or comparison
* function indicates rank order reversal.
*/
witness_lock_error(witnesses, witness);
}
ql_elm_new(witness, link);
ql_tail_insert(witnesses, witness, link);
}
JEMALLOC_INLINE void
witness_unlock(tsdn_t *tsdn, witness_t *witness)
{
tsd_t *tsd;
witness_list_t *witnesses;
if (!config_debug)
return;
if (tsdn_null(tsdn))
return;
tsd = tsdn_tsd(tsdn);
if (witness->rank == WITNESS_RANK_OMIT)
return;
/*
* Check whether owner before removal, rather than relying on
* witness_assert_owner() to abort, so that unit tests can test this
* function's failure mode without causing undefined behavior.
*/
if (witness_owner(tsd, witness)) {
witnesses = tsd_witnessesp_get(tsd);
ql_remove(witnesses, witness, link);
} else
witness_assert_owner(tsdn, witness);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
#ifndef JEMALLOC_H_
#define JEMALLOC_H_
#ifdef __cplusplus
extern "C" {
#endif
#include <limits.h>
#include <strings.h>
#define JEMALLOC_VERSION "@jemalloc_version@"
#define JEMALLOC_VERSION_MAJOR @jemalloc_version_major@
#define JEMALLOC_VERSION_MINOR @jemalloc_version_minor@
#define JEMALLOC_VERSION_BUGFIX @jemalloc_version_bugfix@
#define JEMALLOC_VERSION_NREV @jemalloc_version_nrev@
#define JEMALLOC_VERSION_GID "@jemalloc_version_gid@"
#include "jemalloc_defs@install_suffix@.h"
#ifdef JEMALLOC_EXPERIMENTAL
#define ALLOCM_LG_ALIGN(la) (la)
#if LG_SIZEOF_PTR == 2
#define ALLOCM_ALIGN(a) (ffs(a)-1)
#else
#define ALLOCM_ALIGN(a) ((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
#endif
#define ALLOCM_ZERO ((int)0x40)
#define ALLOCM_NO_MOVE ((int)0x80)
/* Bias arena index bits so that 0 encodes "ALLOCM_ARENA() unspecified". */
#define ALLOCM_ARENA(a) ((int)(((a)+1) << 8))
#define ALLOCM_SUCCESS 0
#define ALLOCM_ERR_OOM 1
#define ALLOCM_ERR_NOT_MOVED 2
#endif
/*
* The je_ prefix on the following public symbol declarations is an artifact of
* namespace management, and should be omitted in application code unless
* JEMALLOC_NO_DEMANGLE is defined (see below).
*/
extern JEMALLOC_EXPORT const char *je_malloc_conf;
extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque,
const char *s);
JEMALLOC_EXPORT void *je_malloc(size_t size) JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT void *je_calloc(size_t num, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT int je_posix_memalign(void **memptr, size_t alignment,
size_t size) JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT void *je_aligned_alloc(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT void *je_realloc(void *ptr, size_t size);
JEMALLOC_EXPORT void je_free(void *ptr);
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
JEMALLOC_EXPORT void * je_memalign(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC
JEMALLOC_EXPORT void * je_valloc(size_t size) JEMALLOC_ATTR(malloc);
#endif
JEMALLOC_EXPORT size_t je_malloc_usable_size(
JEMALLOC_USABLE_SIZE_CONST void *ptr);
JEMALLOC_EXPORT void je_malloc_stats_print(void (*write_cb)(void *,
const char *), void *je_cbopaque, const char *opts);
JEMALLOC_EXPORT int je_mallctl(const char *name, void *oldp,
size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT int je_mallctlnametomib(const char *name, size_t *mibp,
size_t *miblenp);
JEMALLOC_EXPORT int je_mallctlbymib(const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen);
#ifdef JEMALLOC_EXPERIMENTAL
JEMALLOC_EXPORT int je_allocm(void **ptr, size_t *rsize, size_t size,
int flags) JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT int je_rallocm(void **ptr, size_t *rsize, size_t size,
size_t extra, int flags) JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT int je_sallocm(const void *ptr, size_t *rsize, int flags)
JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT int je_dallocm(void *ptr, int flags)
JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT int je_nallocm(size_t *rsize, size_t size, int flags);
#endif
/*
* By default application code must explicitly refer to mangled symbol names,
* so that it is possible to use jemalloc in conjunction with another allocator
* in the same application. Define JEMALLOC_MANGLE in order to cause automatic
* name mangling that matches the API prefixing that happened as a result of
* --with-mangling and/or --with-jemalloc-prefix configuration settings.
*/
#ifdef JEMALLOC_MANGLE
#ifndef JEMALLOC_NO_DEMANGLE
#define JEMALLOC_NO_DEMANGLE
#endif
#define malloc_conf je_malloc_conf
#define malloc_message je_malloc_message
#define malloc je_malloc
#define calloc je_calloc
#define posix_memalign je_posix_memalign
#define aligned_alloc je_aligned_alloc
#define realloc je_realloc
#define free je_free
#define malloc_usable_size je_malloc_usable_size
#define malloc_stats_print je_malloc_stats_print
#define mallctl je_mallctl
#define mallctlnametomib je_mallctlnametomib
#define mallctlbymib je_mallctlbymib
#define memalign je_memalign
#define valloc je_valloc
#ifdef JEMALLOC_EXPERIMENTAL
#define allocm je_allocm
#define rallocm je_rallocm
#define sallocm je_sallocm
#define dallocm je_dallocm
#define nallocm je_nallocm
#endif
#endif
/*
* The je_* macros can be used as stable alternative names for the public
* jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily meant
* for use in jemalloc itself, but it can be used by application code to
* provide isolation from the name mangling specified via --with-mangling
* and/or --with-jemalloc-prefix.
*/
#ifndef JEMALLOC_NO_DEMANGLE
#undef je_malloc_conf
#undef je_malloc_message
#undef je_malloc
#undef je_calloc
#undef je_posix_memalign
#undef je_aligned_alloc
#undef je_realloc
#undef je_free
#undef je_malloc_usable_size
#undef je_malloc_stats_print
#undef je_mallctl
#undef je_mallctlnametomib
#undef je_mallctlbymib
#undef je_memalign
#undef je_valloc
#ifdef JEMALLOC_EXPERIMENTAL
#undef je_allocm
#undef je_rallocm
#undef je_sallocm
#undef je_dallocm
#undef je_nallocm
#endif
#endif
#ifdef __cplusplus
};
#endif
#endif /* JEMALLOC_H_ */
#!/bin/sh
objroot=$1
cat <<EOF
#ifndef JEMALLOC_H_
#define JEMALLOC_H_
#ifdef __cplusplus
extern "C" {
#endif
EOF
for hdr in jemalloc_defs.h jemalloc_rename.h jemalloc_macros.h \
jemalloc_protos.h jemalloc_typedefs.h jemalloc_mangle.h ; do
cat "${objroot}include/jemalloc/${hdr}" \
| grep -v 'Generated from .* by configure\.' \
| sed -e 's/^#define /#define /g' \
| sed -e 's/ $//g'
echo
done
cat <<EOF
#ifdef __cplusplus
}
#endif
#endif /* JEMALLOC_H_ */
EOF
/*
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
* public APIs to be prefixed. This makes it possible, with some care, to use
* multiple allocators simultaneously.
*/
#undef JEMALLOC_PREFIX
#undef JEMALLOC_CPREFIX
/*
* Name mangling for public symbols is controlled by --with-mangling and
* --with-jemalloc-prefix. With default settings the je_ prefix is stripped by
* these macro definitions.
*/
#undef je_malloc_conf
#undef je_malloc_message
#undef je_malloc
#undef je_calloc
#undef je_posix_memalign
#undef je_aligned_alloc
#undef je_realloc
#undef je_free
#undef je_malloc_usable_size
#undef je_malloc_stats_print
#undef je_mallctl
#undef je_mallctlnametomib
#undef je_mallctlbymib
#undef je_memalign
#undef je_valloc
#undef je_allocm
#undef je_rallocm
#undef je_sallocm
#undef je_dallocm
#undef je_nallocm
/*
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
* For shared libraries, symbol visibility mechanisms prevent these symbols
* from being exported, but for static libraries, naming collisions are a real
* possibility.
*/
#undef JEMALLOC_PRIVATE_NAMESPACE
#undef JEMALLOC_N
/*
* Hyper-threaded CPUs may need a special instruction inside spin loops in
* order to yield to another virtual CPU.
*/
#undef CPU_SPINWAIT
/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */
#undef JEMALLOC_ATOMIC9
/*
* Defined if OSAtomic*() functions are available, as provided by Darwin, and
* documented in the atomic(3) manual page.
*/
#undef JEMALLOC_OSATOMIC
/*
* Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and
* __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the
* functions are defined in libgcc instead of being inlines)
*/
#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4
/*
* Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and
* __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the
* functions are defined in libgcc instead of being inlines)
*/
#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8
/*
* Defined if OSSpin*() functions are available, as provided by Darwin, and
* documented in the spinlock(3) manual page.
*/
#undef JEMALLOC_OSSPIN
/*
* Defined if _malloc_thread_cleanup() exists. At least in the case of
* FreeBSD, pthread_key_create() allocates, which if used during malloc
* bootstrapping will cause recursion into the pthreads library. Therefore, if
* _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
* malloc_tsd.
*/
#undef JEMALLOC_MALLOC_THREAD_CLEANUP
/*
* Defined if threaded initialization is known to be safe on this platform.
* Among other things, it must be possible to initialize a mutex without
* triggering allocation in order for threaded allocation to be safe.
*/
#undef JEMALLOC_THREADED_INIT
/*
* Defined if the pthreads implementation defines
* _pthread_mutex_init_calloc_cb(), in which case the function is used in order
* to avoid recursive allocation during mutex initialization.
*/
#undef JEMALLOC_MUTEX_INIT_CB
/* Defined if __attribute__((...)) syntax is supported. */ /* Defined if __attribute__((...)) syntax is supported. */
#undef JEMALLOC_HAVE_ATTR #undef JEMALLOC_HAVE_ATTR
#ifdef JEMALLOC_HAVE_ATTR
# define JEMALLOC_ATTR(s) __attribute__((s))
# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
#elif _MSC_VER
# define JEMALLOC_ATTR(s)
# ifdef DLLEXPORT
# define JEMALLOC_EXPORT __declspec(dllexport)
# else
# define JEMALLOC_EXPORT __declspec(dllimport)
# endif
# define JEMALLOC_ALIGNED(s) __declspec(align(s))
# define JEMALLOC_SECTION(s) __declspec(allocate(s))
# define JEMALLOC_NOINLINE __declspec(noinline)
#else
# define JEMALLOC_ATTR(s)
# define JEMALLOC_EXPORT
# define JEMALLOC_ALIGNED(s)
# define JEMALLOC_SECTION(s)
# define JEMALLOC_NOINLINE
#endif
/* Defined if sbrk() is supported. */ /* Defined if alloc_size attribute is supported. */
#undef JEMALLOC_HAVE_SBRK #undef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
/* Non-empty if the tls_model attribute is supported. */ /* Defined if format(gnu_printf, ...) attribute is supported. */
#undef JEMALLOC_TLS_MODEL #undef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */ /* Defined if format(printf, ...) attribute is supported. */
#undef JEMALLOC_CC_SILENCE #undef JEMALLOC_HAVE_ATTR_FORMAT_PRINTF
/* /*
* JEMALLOC_DEBUG enables assertions and other sanity checks, and disables * Define overrides for non-standard allocator-related functions if they are
* inline functions. * present on the system.
*/
#undef JEMALLOC_DEBUG
/* JEMALLOC_STATS enables statistics calculation. */
#undef JEMALLOC_STATS
/* JEMALLOC_PROF enables allocation profiling. */
#undef JEMALLOC_PROF
/* Use libunwind for profile backtracing if defined. */
#undef JEMALLOC_PROF_LIBUNWIND
/* Use libgcc for profile backtracing if defined. */
#undef JEMALLOC_PROF_LIBGCC
/* Use gcc intrinsics for profile backtracing if defined. */
#undef JEMALLOC_PROF_GCC
/*
* JEMALLOC_TCACHE enables a thread-specific caching layer for small objects.
* This makes it possible to allocate/deallocate objects without any locking
* when the cache is in the steady state.
*/
#undef JEMALLOC_TCACHE
/*
* JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage
* segment (DSS).
*/
#undef JEMALLOC_DSS
/* Support memory filling (junk/zero/quarantine/redzone). */
#undef JEMALLOC_FILL
/* Support the experimental API. */
#undef JEMALLOC_EXPERIMENTAL
/* Support utrace(2)-based tracing. */
#undef JEMALLOC_UTRACE
/* Support Valgrind. */
#undef JEMALLOC_VALGRIND
/* Support optional abort() on OOM. */
#undef JEMALLOC_XMALLOC
/* Support lazy locking (avoid locking unless a second thread is launched). */
#undef JEMALLOC_LAZY_LOCK
/* One page is 2^STATIC_PAGE_SHIFT bytes. */
#undef STATIC_PAGE_SHIFT
/*
* If defined, use munmap() to unmap freed chunks, rather than storing them for
* later reuse. This is disabled by default on Linux because common sequences
* of mmap()/munmap() calls will cause virtual memory map holes.
*/
#undef JEMALLOC_MUNMAP
/*
* If defined, use mremap(...MREMAP_FIXED...) for huge realloc(). This is
* disabled by default because it is Linux-specific and it will cause virtual
* memory map holes, much like munmap(2) does.
*/
#undef JEMALLOC_MREMAP
/* TLS is used to map arenas and magazine caches to threads. */
#undef JEMALLOC_TLS
/*
* JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside
* within jemalloc-owned chunks before dereferencing them.
*/
#undef JEMALLOC_IVSALLOC
/*
* Define overrides for non-standard allocator-related functions if they
* are present on the system.
*/ */
#undef JEMALLOC_OVERRIDE_MEMALIGN #undef JEMALLOC_OVERRIDE_MEMALIGN
#undef JEMALLOC_OVERRIDE_VALLOC #undef JEMALLOC_OVERRIDE_VALLOC
...@@ -231,32 +27,19 @@ ...@@ -231,32 +27,19 @@
#undef JEMALLOC_USABLE_SIZE_CONST #undef JEMALLOC_USABLE_SIZE_CONST
/* /*
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings. * If defined, specify throw() for the public function prototypes when compiling
* with C++. The only justification for this is to match the prototypes that
* glibc defines.
*/ */
#undef JEMALLOC_ZONE #undef JEMALLOC_USE_CXX_THROW
#undef JEMALLOC_ZONE_VERSION
/* #ifdef _MSC_VER
* Methods for purging unused pages differ between operating systems. # ifdef _WIN64
* # define LG_SIZEOF_PTR_WIN 3
* madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages, # else
* such that new pages will be demand-zeroed if # define LG_SIZEOF_PTR_WIN 2
* the address region is later touched. # endif
* madvise(..., MADV_FREE) : On FreeBSD and Darwin, this marks pages as being #endif
* unused, such that they will be discarded rather
* than swapped out.
*/
#undef JEMALLOC_PURGE_MADVISE_DONTNEED
#undef JEMALLOC_PURGE_MADVISE_FREE
/* sizeof(void *) == 2^LG_SIZEOF_PTR. */ /* sizeof(void *) == 2^LG_SIZEOF_PTR. */
#undef LG_SIZEOF_PTR #undef LG_SIZEOF_PTR
/* sizeof(int) == 2^LG_SIZEOF_INT. */
#undef LG_SIZEOF_INT
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
#undef LG_SIZEOF_LONG
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
#undef LG_SIZEOF_INTMAX_T
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment