Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
f63e81c2
Unverified
Commit
f63e81c2
authored
Aug 25, 2018
by
Chris Lamb
Committed by
GitHub
Aug 25, 2018
Browse files
Merge branch 'unstable' into config-set-maxmemory-grammar
parents
eaeba1b2
39c70e72
Changes
209
Show whitespace changes
Inline
Side-by-side
Too many changes to show.
To preserve performance only
209 of 209+
files are displayed.
Plain diff
Email patch
deps/jemalloc/include/jemalloc/internal/prof_structs.h
0 → 100644
View file @
f63e81c2
#ifndef JEMALLOC_INTERNAL_PROF_STRUCTS_H
#define JEMALLOC_INTERNAL_PROF_STRUCTS_H
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/rb.h"
struct
prof_bt_s
{
/* Backtrace, stored as len program counters. */
void
**
vec
;
unsigned
len
;
};
#ifdef JEMALLOC_PROF_LIBGCC
/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
typedef
struct
{
prof_bt_t
*
bt
;
unsigned
max
;
}
prof_unwind_data_t
;
#endif
struct
prof_accum_s
{
#ifndef JEMALLOC_ATOMIC_U64
malloc_mutex_t
mtx
;
uint64_t
accumbytes
;
#else
atomic_u64_t
accumbytes
;
#endif
};
struct
prof_cnt_s
{
/* Profiling counters. */
uint64_t
curobjs
;
uint64_t
curbytes
;
uint64_t
accumobjs
;
uint64_t
accumbytes
;
};
typedef
enum
{
prof_tctx_state_initializing
,
prof_tctx_state_nominal
,
prof_tctx_state_dumping
,
prof_tctx_state_purgatory
/* Dumper must finish destroying. */
}
prof_tctx_state_t
;
struct
prof_tctx_s
{
/* Thread data for thread that performed the allocation. */
prof_tdata_t
*
tdata
;
/*
* Copy of tdata->thr_{uid,discrim}, necessary because tdata may be
* defunct during teardown.
*/
uint64_t
thr_uid
;
uint64_t
thr_discrim
;
/* Profiling counters, protected by tdata->lock. */
prof_cnt_t
cnts
;
/* Associated global context. */
prof_gctx_t
*
gctx
;
/*
* UID that distinguishes multiple tctx's created by the same thread,
* but coexisting in gctx->tctxs. There are two ways that such
* coexistence can occur:
* - A dumper thread can cause a tctx to be retained in the purgatory
* state.
* - Although a single "producer" thread must create all tctx's which
* share the same thr_uid, multiple "consumers" can each concurrently
* execute portions of prof_tctx_destroy(). prof_tctx_destroy() only
* gets called once each time cnts.cur{objs,bytes} drop to 0, but this
* threshold can be hit again before the first consumer finishes
* executing prof_tctx_destroy().
*/
uint64_t
tctx_uid
;
/* Linkage into gctx's tctxs. */
rb_node
(
prof_tctx_t
)
tctx_link
;
/*
* True during prof_alloc_prep()..prof_malloc_sample_object(), prevents
* sample vs destroy race.
*/
bool
prepared
;
/* Current dump-related state, protected by gctx->lock. */
prof_tctx_state_t
state
;
/*
* Copy of cnts snapshotted during early dump phase, protected by
* dump_mtx.
*/
prof_cnt_t
dump_cnts
;
};
typedef
rb_tree
(
prof_tctx_t
)
prof_tctx_tree_t
;
struct
prof_gctx_s
{
/* Protects nlimbo, cnt_summed, and tctxs. */
malloc_mutex_t
*
lock
;
/*
* Number of threads that currently cause this gctx to be in a state of
* limbo due to one of:
* - Initializing this gctx.
* - Initializing per thread counters associated with this gctx.
* - Preparing to destroy this gctx.
* - Dumping a heap profile that includes this gctx.
* nlimbo must be 1 (single destroyer) in order to safely destroy the
* gctx.
*/
unsigned
nlimbo
;
/*
* Tree of profile counters, one for each thread that has allocated in
* this context.
*/
prof_tctx_tree_t
tctxs
;
/* Linkage for tree of contexts to be dumped. */
rb_node
(
prof_gctx_t
)
dump_link
;
/* Temporary storage for summation during dump. */
prof_cnt_t
cnt_summed
;
/* Associated backtrace. */
prof_bt_t
bt
;
/* Backtrace vector, variable size, referred to by bt. */
void
*
vec
[
1
];
};
typedef
rb_tree
(
prof_gctx_t
)
prof_gctx_tree_t
;
struct
prof_tdata_s
{
malloc_mutex_t
*
lock
;
/* Monotonically increasing unique thread identifier. */
uint64_t
thr_uid
;
/*
* Monotonically increasing discriminator among tdata structures
* associated with the same thr_uid.
*/
uint64_t
thr_discrim
;
/* Included in heap profile dumps if non-NULL. */
char
*
thread_name
;
bool
attached
;
bool
expired
;
rb_node
(
prof_tdata_t
)
tdata_link
;
/*
* Counter used to initialize prof_tctx_t's tctx_uid. No locking is
* necessary when incrementing this field, because only one thread ever
* does so.
*/
uint64_t
tctx_uid_next
;
/*
* Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks
* backtraces for which it has non-zero allocation/deallocation counters
* associated with thread-specific prof_tctx_t objects. Other threads
* may write to prof_tctx_t contents when freeing associated objects.
*/
ckh_t
bt2tctx
;
/* Sampling state. */
uint64_t
prng_state
;
uint64_t
bytes_until_sample
;
/* State used to avoid dumping while operating on prof internals. */
bool
enq
;
bool
enq_idump
;
bool
enq_gdump
;
/*
* Set to true during an early dump phase for tdata's which are
* currently being dumped. New threads' tdata's have this initialized
* to false so that they aren't accidentally included in later dump
* phases.
*/
bool
dumping
;
/*
* True if profiling is active for this tdata's thread
* (thread.prof.active mallctl).
*/
bool
active
;
/* Temporary storage for summation during dump. */
prof_cnt_t
cnt_summed
;
/* Backtrace vector, used for calls to prof_backtrace(). */
void
*
vec
[
PROF_BT_MAX
];
};
typedef
rb_tree
(
prof_tdata_t
)
prof_tdata_tree_t
;
#endif
/* JEMALLOC_INTERNAL_PROF_STRUCTS_H */
deps/jemalloc/include/jemalloc/internal/prof_types.h
0 → 100644
View file @
f63e81c2
#ifndef JEMALLOC_INTERNAL_PROF_TYPES_H
#define JEMALLOC_INTERNAL_PROF_TYPES_H
typedef
struct
prof_bt_s
prof_bt_t
;
typedef
struct
prof_accum_s
prof_accum_t
;
typedef
struct
prof_cnt_s
prof_cnt_t
;
typedef
struct
prof_tctx_s
prof_tctx_t
;
typedef
struct
prof_gctx_s
prof_gctx_t
;
typedef
struct
prof_tdata_s
prof_tdata_t
;
/* Option defaults. */
#ifdef JEMALLOC_PROF
# define PROF_PREFIX_DEFAULT "jeprof"
#else
# define PROF_PREFIX_DEFAULT ""
#endif
#define LG_PROF_SAMPLE_DEFAULT 19
#define LG_PROF_INTERVAL_DEFAULT -1
/*
* Hard limit on stack backtrace depth. The version of prof_backtrace() that
* is based on __builtin_return_address() necessarily has a hard-coded number
* of backtrace frame handlers, and should be kept in sync with this setting.
*/
#define PROF_BT_MAX 128
/* Initial hash table size. */
#define PROF_CKH_MINITEMS 64
/* Size of memory buffer to use when writing dump files. */
#define PROF_DUMP_BUFSIZE 65536
/* Size of stack-allocated buffer used by prof_printf(). */
#define PROF_PRINTF_BUFSIZE 128
/*
* Number of mutexes shared among all gctx's. No space is allocated for these
* unless profiling is enabled, so it's okay to over-provision.
*/
#define PROF_NCTX_LOCKS 1024
/*
* Number of mutexes shared among all tdata's. No space is allocated for these
* unless profiling is enabled, so it's okay to over-provision.
*/
#define PROF_NTDATA_LOCKS 256
/*
* prof_tdata pointers close to NULL are used to encode state information that
* is used for cleaning up during thread shutdown.
*/
#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1)
#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2)
#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY
#endif
/* JEMALLOC_INTERNAL_PROF_TYPES_H */
deps/jemalloc/include/jemalloc/internal/public_namespace.sh
View file @
f63e81c2
deps/jemalloc/include/jemalloc/internal/ql.h
View file @
f63e81c2
/*
#ifndef JEMALLOC_INTERNAL_QL_H
* List definitions.
#define JEMALLOC_INTERNAL_QL_H
*/
#include "jemalloc/internal/qr.h"
/* List definitions. */
#define ql_head(a_type) \
#define ql_head(a_type) \
struct { \
struct { \
a_type *qlh_first; \
a_type *qlh_first; \
...
@@ -81,3 +84,5 @@ struct { \
...
@@ -81,3 +84,5 @@ struct { \
#define ql_reverse_foreach(a_var, a_head, a_field) \
#define ql_reverse_foreach(a_var, a_head, a_field) \
qr_reverse_foreach((a_var), ql_first(a_head), a_field)
qr_reverse_foreach((a_var), ql_first(a_head), a_field)
#endif
/* JEMALLOC_INTERNAL_QL_H */
deps/jemalloc/include/jemalloc/internal/qr.h
View file @
f63e81c2
#ifndef JEMALLOC_INTERNAL_QR_H
#define JEMALLOC_INTERNAL_QR_H
/* Ring definitions. */
/* Ring definitions. */
#define qr(a_type) \
#define qr(a_type) \
struct { \
struct { \
...
@@ -22,17 +25,15 @@ struct { \
...
@@ -22,17 +25,15 @@ struct { \
(a_qrelm)->a_field.qre_prev = (a_qr); \
(a_qrelm)->a_field.qre_prev = (a_qr); \
} while (0)
} while (0)
#define qr_after_insert(a_qrelm, a_qr, a_field) \
#define qr_after_insert(a_qrelm, a_qr, a_field) do { \
do \
{ \
(a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
(a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
(a_qr)->a_field.qre_prev = (a_qrelm); \
(a_qr)->a_field.qre_prev = (a_qrelm); \
(a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \
(a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \
(a_qrelm)->a_field.qre_next = (a_qr); \
(a_qrelm)->a_field.qre_next = (a_qr); \
} while (0)
} while (0)
#define
qr_meld(a_qr_a, a_qr_b, a_field) do {
\
#define
qr_meld(a_qr_a, a_qr_b,
a_type,
a_field) do { \
void
*t; \
a_type
*t; \
(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
t = (a_qr_a)->a_field.qre_prev; \
t = (a_qr_a)->a_field.qre_prev; \
...
@@ -40,10 +41,12 @@ struct { \
...
@@ -40,10 +41,12 @@ struct { \
(a_qr_b)->a_field.qre_prev = t; \
(a_qr_b)->a_field.qre_prev = t; \
} while (0)
} while (0)
/* qr_meld() and qr_split() are functionally equivalent, so there's no need to
/*
* have two copies of the code. */
* qr_meld() and qr_split() are functionally equivalent, so there's no need to
#define qr_split(a_qr_a, a_qr_b, a_field) \
* have two copies of the code.
qr_meld((a_qr_a), (a_qr_b), a_field)
*/
#define qr_split(a_qr_a, a_qr_b, a_type, a_field) \
qr_meld((a_qr_a), (a_qr_b), a_type, a_field)
#define qr_remove(a_qr, a_field) do { \
#define qr_remove(a_qr, a_field) do { \
(a_qr)->a_field.qre_prev->a_field.qre_next \
(a_qr)->a_field.qre_prev->a_field.qre_next \
...
@@ -65,3 +68,5 @@ struct { \
...
@@ -65,3 +68,5 @@ struct { \
(var) != NULL; \
(var) != NULL; \
(var) = (((var) != (a_qr)) \
(var) = (((var) != (a_qr)) \
? (var)->a_field.qre_prev : NULL))
? (var)->a_field.qre_prev : NULL))
#endif
/* JEMALLOC_INTERNAL_QR_H */
deps/jemalloc/include/jemalloc/internal/quarantine.h
deleted
100644 → 0
View file @
eaeba1b2
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef
struct
quarantine_obj_s
quarantine_obj_t
;
typedef
struct
quarantine_s
quarantine_t
;
/* Default per thread quarantine size if valgrind is enabled. */
#define JEMALLOC_VALGRIND_QUARANTINE_DEFAULT (ZU(1) << 24)
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct
quarantine_obj_s
{
void
*
ptr
;
size_t
usize
;
};
struct
quarantine_s
{
size_t
curbytes
;
size_t
curobjs
;
size_t
first
;
#define LG_MAXOBJS_INIT 10
size_t
lg_maxobjs
;
quarantine_obj_t
objs
[
1
];
/* Dynamically sized ring buffer. */
};
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
quarantine_t
*
quarantine_init
(
size_t
lg_maxobjs
);
void
quarantine
(
void
*
ptr
);
void
quarantine_cleanup
(
void
*
arg
);
bool
quarantine_boot
(
void
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
malloc_tsd_protos
(
JEMALLOC_ATTR
(
unused
),
quarantine
,
quarantine_t
*
)
void
quarantine_alloc_hook
(
void
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_QUARANTINE_C_))
malloc_tsd_externs
(
quarantine
,
quarantine_t
*
)
malloc_tsd_funcs
(
JEMALLOC_ALWAYS_INLINE
,
quarantine
,
quarantine_t
*
,
NULL
,
quarantine_cleanup
)
JEMALLOC_ALWAYS_INLINE
void
quarantine_alloc_hook
(
void
)
{
quarantine_t
*
quarantine
;
assert
(
config_fill
&&
opt_quarantine
);
quarantine
=
*
quarantine_tsd_get
();
if
(
quarantine
==
NULL
)
quarantine_init
(
LG_MAXOBJS_INIT
);
}
#endif
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/rb.h
View file @
f63e81c2
...
@@ -22,6 +22,10 @@
...
@@ -22,6 +22,10 @@
#ifndef RB_H_
#ifndef RB_H_
#define RB_H_
#define RB_H_
#ifndef __PGI
#define RB_COMPACT
#endif
#ifdef RB_COMPACT
#ifdef RB_COMPACT
/* Node structure. */
/* Node structure. */
#define rb_node(a_type) \
#define rb_node(a_type) \
...
@@ -42,7 +46,6 @@ struct { \
...
@@ -42,7 +46,6 @@ struct { \
#define rb_tree(a_type) \
#define rb_tree(a_type) \
struct { \
struct { \
a_type *rbt_root; \
a_type *rbt_root; \
a_type rbt_nil; \
}
}
/* Left accessors. */
/* Left accessors. */
...
@@ -79,6 +82,15 @@ struct { \
...
@@ -79,6 +82,15 @@ struct { \
(a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \
(a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \
(a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \
(a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \
} while (0)
} while (0)
/* Node initializer. */
#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
/* Bookkeeping bit cannot be used by node pointer. */
\
assert(((uintptr_t)(a_node) & 0x1) == 0); \
rbtn_left_set(a_type, a_field, (a_node), NULL); \
rbtn_right_set(a_type, a_field, (a_node), NULL); \
rbtn_red_set(a_type, a_field, (a_node)); \
} while (0)
#else
#else
/* Right accessors. */
/* Right accessors. */
#define rbtn_right_get(a_type, a_field, a_node) \
#define rbtn_right_get(a_type, a_field, a_node) \
...
@@ -99,28 +111,26 @@ struct { \
...
@@ -99,28 +111,26 @@ struct { \
#define rbtn_black_set(a_type, a_field, a_node) do { \
#define rbtn_black_set(a_type, a_field, a_node) do { \
(a_node)->a_field.rbn_red = false; \
(a_node)->a_field.rbn_red = false; \
} while (0)
} while (0)
#endif
/* Node initializer. */
/* Node initializer. */
#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
rbtn_left_set(a_type, a_field, (a_node),
&(a_rbt)->rbt_nil
); \
rbtn_left_set(a_type, a_field, (a_node),
NULL
); \
rbtn_right_set(a_type, a_field, (a_node),
&(a_rbt)->rbt_nil
); \
rbtn_right_set(a_type, a_field, (a_node),
NULL
); \
rbtn_red_set(a_type, a_field, (a_node)); \
rbtn_red_set(a_type, a_field, (a_node)); \
} while (0)
} while (0)
#endif
/* Tree initializer. */
/* Tree initializer. */
#define rb_new(a_type, a_field, a_rbt) do { \
#define rb_new(a_type, a_field, a_rbt) do { \
(a_rbt)->rbt_root = &(a_rbt)->rbt_nil; \
(a_rbt)->rbt_root = NULL; \
rbt_node_new(a_type, a_field, a_rbt, &(a_rbt)->rbt_nil); \
rbtn_black_set(a_type, a_field, &(a_rbt)->rbt_nil); \
} while (0)
} while (0)
/* Internal utility macros. */
/* Internal utility macros. */
#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \
#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \
(r_node) = (a_root); \
(r_node) = (a_root); \
if ((r_node) !=
&(a_rbt)->rbt_nil
) { \
if ((r_node) !=
NULL
) {
\
for (; \
for (; \
rbtn_left_get(a_type, a_field, (r_node)) !=
&(a_rbt)->rbt_nil;
\
rbtn_left_get(a_type, a_field, (r_node)) !=
NULL;
\
(r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \
(r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \
} \
} \
} \
} \
...
@@ -128,10 +138,9 @@ struct { \
...
@@ -128,10 +138,9 @@ struct { \
#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \
#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \
(r_node) = (a_root); \
(r_node) = (a_root); \
if ((r_node) != &(a_rbt)->rbt_nil) { \
if ((r_node) != NULL) { \
for (; rbtn_right_get(a_type, a_field, (r_node)) != \
for (; rbtn_right_get(a_type, a_field, (r_node)) != NULL; \
&(a_rbt)->rbt_nil; (r_node) = rbtn_right_get(a_type, a_field, \
(r_node) = rbtn_right_get(a_type, a_field, (r_node))) { \
(r_node))) { \
} \
} \
} \
} \
} while (0)
} while (0)
...
@@ -158,6 +167,8 @@ struct { \
...
@@ -158,6 +167,8 @@ struct { \
#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \
#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \
a_attr void \
a_attr void \
a_prefix##new(a_rbt_type *rbtree); \
a_prefix##new(a_rbt_type *rbtree); \
a_attr bool \
a_prefix##empty(a_rbt_type *rbtree); \
a_attr a_type * \
a_attr a_type * \
a_prefix##first(a_rbt_type *rbtree); \
a_prefix##first(a_rbt_type *rbtree); \
a_attr a_type * \
a_attr a_type * \
...
@@ -167,11 +178,11 @@ a_prefix##next(a_rbt_type *rbtree, a_type *node); \
...
@@ -167,11 +178,11 @@ a_prefix##next(a_rbt_type *rbtree, a_type *node); \
a_attr a_type * \
a_attr a_type * \
a_prefix##prev(a_rbt_type *rbtree, a_type *node); \
a_prefix##prev(a_rbt_type *rbtree, a_type *node); \
a_attr a_type * \
a_attr a_type * \
a_prefix##search(a_rbt_type *rbtree, a_type *key);
\
a_prefix##search(a_rbt_type *rbtree,
const
a_type *key); \
a_attr a_type * \
a_attr a_type * \
a_prefix##nsearch(a_rbt_type *rbtree, a_type *key);
\
a_prefix##nsearch(a_rbt_type *rbtree,
const
a_type *key); \
a_attr a_type * \
a_attr a_type * \
a_prefix##psearch(a_rbt_type *rbtree, a_type *key);
\
a_prefix##psearch(a_rbt_type *rbtree,
const
a_type *key); \
a_attr void \
a_attr void \
a_prefix##insert(a_rbt_type *rbtree, a_type *node); \
a_prefix##insert(a_rbt_type *rbtree, a_type *node); \
a_attr void \
a_attr void \
...
@@ -181,7 +192,10 @@ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
...
@@ -181,7 +192,10 @@ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
a_rbt_type *, a_type *, void *), void *arg); \
a_rbt_type *, a_type *, void *), void *arg); \
a_attr a_type * \
a_attr a_type * \
a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg);
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); \
a_attr void \
a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
void *arg);
/*
/*
* The rb_gen() macro generates a type-specific red-black tree implementation,
* The rb_gen() macro generates a type-specific red-black tree implementation,
...
@@ -198,7 +212,7 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
...
@@ -198,7 +212,7 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
* int (a_cmp *)(a_type *a_node, a_type *a_other);
* int (a_cmp *)(a_type *a_node, a_type *a_other);
* ^^^^^^
* ^^^^^^
* or a_key
* or a_key
* Interpretation of comparis
i
on function return values:
* Interpretation of comparison function return values:
* -1 : a_node < a_other
* -1 : a_node < a_other
* 0 : a_node == a_other
* 0 : a_node == a_other
* 1 : a_node > a_other
* 1 : a_node > a_other
...
@@ -224,6 +238,13 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
...
@@ -224,6 +238,13 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
* Args:
* Args:
* tree: Pointer to an uninitialized red-black tree object.
* tree: Pointer to an uninitialized red-black tree object.
*
*
* static bool
* ex_empty(ex_t *tree);
* Description: Determine whether tree is empty.
* Args:
* tree: Pointer to an initialized red-black tree object.
* Ret: True if tree is empty, false otherwise.
*
* static ex_node_t *
* static ex_node_t *
* ex_first(ex_t *tree);
* ex_first(ex_t *tree);
* static ex_node_t *
* static ex_node_t *
...
@@ -245,7 +266,7 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
...
@@ -245,7 +266,7 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
* last/first.
* last/first.
*
*
* static ex_node_t *
* static ex_node_t *
* ex_search(ex_t *tree, ex_node_t *key);
* ex_search(ex_t *tree,
const
ex_node_t *key);
* Description: Search for node that matches key.
* Description: Search for node that matches key.
* Args:
* Args:
* tree: Pointer to an initialized red-black tree object.
* tree: Pointer to an initialized red-black tree object.
...
@@ -253,9 +274,9 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
...
@@ -253,9 +274,9 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
* Ret: Node in tree that matches key, or NULL if no match.
* Ret: Node in tree that matches key, or NULL if no match.
*
*
* static ex_node_t *
* static ex_node_t *
* ex_nsearch(ex_t *tree, ex_node_t *key);
* ex_nsearch(ex_t *tree,
const
ex_node_t *key);
* static ex_node_t *
* static ex_node_t *
* ex_psearch(ex_t *tree, ex_node_t *key);
* ex_psearch(ex_t *tree,
const
ex_node_t *key);
* Description: Search for node that matches key. If no match is found,
* Description: Search for node that matches key. If no match is found,
* return what would be key's successor/predecessor, were
* return what would be key's successor/predecessor, were
* key in tree.
* key in tree.
...
@@ -303,40 +324,52 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
...
@@ -303,40 +324,52 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
* arg : Opaque pointer passed to cb().
* arg : Opaque pointer passed to cb().
* Ret: NULL if iteration completed, or the non-NULL callback return value
* Ret: NULL if iteration completed, or the non-NULL callback return value
* that caused termination of the iteration.
* that caused termination of the iteration.
*
* static void
* ex_destroy(ex_t *tree, void (*cb)(ex_node_t *, void *), void *arg);
* Description: Iterate over the tree with post-order traversal, remove
* each node, and run the callback if non-null. This is
* used for destroying a tree without paying the cost to
* rebalance it. The tree must not be otherwise altered
* during traversal.
* Args:
* tree: Pointer to an initialized red-black tree object.
* cb : Callback function, which, if non-null, is called for each node
* during iteration. There is no way to stop iteration once it
* has begun.
* arg : Opaque pointer passed to cb().
*/
*/
#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \
#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \
a_attr void \
a_attr void \
a_prefix##new(a_rbt_type *rbtree) { \
a_prefix##new(a_rbt_type *rbtree) { \
rb_new(a_type, a_field, rbtree); \
rb_new(a_type, a_field, rbtree); \
} \
} \
a_attr bool \
a_prefix##empty(a_rbt_type *rbtree) { \
return (rbtree->rbt_root == NULL); \
} \
a_attr a_type * \
a_attr a_type * \
a_prefix##first(a_rbt_type *rbtree) { \
a_prefix##first(a_rbt_type *rbtree) { \
a_type *ret; \
a_type *ret; \
rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
if (ret == &rbtree->rbt_nil) { \
return ret; \
ret = NULL; \
} \
return (ret); \
} \
} \
a_attr a_type * \
a_attr a_type * \
a_prefix##last(a_rbt_type *rbtree) { \
a_prefix##last(a_rbt_type *rbtree) { \
a_type *ret; \
a_type *ret; \
rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
if (ret == &rbtree->rbt_nil) { \
return ret; \
ret = NULL; \
} \
return (ret); \
} \
} \
a_attr a_type * \
a_attr a_type * \
a_prefix##next(a_rbt_type *rbtree, a_type *node) { \
a_prefix##next(a_rbt_type *rbtree, a_type *node) { \
a_type *ret; \
a_type *ret; \
if (rbtn_right_get(a_type, a_field, node) !=
&rbtree->rbt_nil
) { \
if (rbtn_right_get(a_type, a_field, node) !=
NULL
) {
\
rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \
rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \
a_field, node), ret); \
a_field, node), ret); \
} else { \
} else { \
a_type *tnode = rbtree->rbt_root; \
a_type *tnode = rbtree->rbt_root; \
assert(tnode !=
&rbtree->rbt_nil);
\
assert(tnode !=
NULL);
\
ret =
&rbtree->rbt_nil;
\
ret =
NULL;
\
while (true) { \
while (true) { \
int cmp = (a_cmp)(node, tnode); \
int cmp = (a_cmp)(node, tnode); \
if (cmp < 0) { \
if (cmp < 0) { \
...
@@ -347,24 +380,21 @@ a_prefix##next(a_rbt_type *rbtree, a_type *node) { \
...
@@ -347,24 +380,21 @@ a_prefix##next(a_rbt_type *rbtree, a_type *node) { \
} else { \
} else { \
break; \
break; \
} \
} \
assert(tnode != &rbtree->rbt_nil); \
assert(tnode != NULL); \
} \
} \
} \
if (ret == &rbtree->rbt_nil) { \
ret = (NULL); \
} \
} \
return
(
ret
)
; \
return ret;
\
} \
} \
a_attr a_type * \
a_attr a_type * \
a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \
a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \
a_type *ret; \
a_type *ret; \
if (rbtn_left_get(a_type, a_field, node) !=
&rbtree->rbt_nil
) { \
if (rbtn_left_get(a_type, a_field, node) !=
NULL
) {
\
rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \
rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \
a_field, node), ret); \
a_field, node), ret); \
} else { \
} else { \
a_type *tnode = rbtree->rbt_root; \
a_type *tnode = rbtree->rbt_root; \
assert(tnode !=
&rbtree->rbt_nil);
\
assert(tnode !=
NULL);
\
ret =
&rbtree->rbt_nil;
\
ret =
NULL;
\
while (true) { \
while (true) { \
int cmp = (a_cmp)(node, tnode); \
int cmp = (a_cmp)(node, tnode); \
if (cmp < 0) { \
if (cmp < 0) { \
...
@@ -375,20 +405,17 @@ a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \
...
@@ -375,20 +405,17 @@ a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \
} else { \
} else { \
break; \
break; \
} \
} \
assert(tnode !=
&rbtree->rbt_nil
); \
assert(tnode !=
NULL
);
\
} \
} \
} \
} \
if (ret == &rbtree->rbt_nil) { \
return ret; \
ret = (NULL); \
} \
return (ret); \
} \
} \
a_attr a_type * \
a_attr a_type * \
a_prefix##search(a_rbt_type *rbtree, a_type *key) {
\
a_prefix##search(a_rbt_type *rbtree,
const
a_type *key) { \
a_type *ret; \
a_type *ret; \
int cmp; \
int cmp; \
ret = rbtree->rbt_root; \
ret = rbtree->rbt_root; \
while (ret !=
&rbtree->rbt_nil
\
while (ret !=
NULL
\
&& (cmp = (a_cmp)(key, ret)) != 0) { \
&& (cmp = (a_cmp)(key, ret)) != 0) { \
if (cmp < 0) { \
if (cmp < 0) { \
ret = rbtn_left_get(a_type, a_field, ret); \
ret = rbtn_left_get(a_type, a_field, ret); \
...
@@ -396,17 +423,14 @@ a_prefix##search(a_rbt_type *rbtree, a_type *key) { \
...
@@ -396,17 +423,14 @@ a_prefix##search(a_rbt_type *rbtree, a_type *key) { \
ret = rbtn_right_get(a_type, a_field, ret); \
ret = rbtn_right_get(a_type, a_field, ret); \
} \
} \
} \
} \
if (ret == &rbtree->rbt_nil) { \
return ret; \
ret = (NULL); \
} \
return (ret); \
} \
} \
a_attr a_type * \
a_attr a_type * \
a_prefix##nsearch(a_rbt_type *rbtree, a_type *key) {
\
a_prefix##nsearch(a_rbt_type *rbtree,
const
a_type *key) { \
a_type *ret; \
a_type *ret; \
a_type *tnode = rbtree->rbt_root; \
a_type *tnode = rbtree->rbt_root; \
ret =
&rbtree->rbt_nil;
\
ret =
NULL;
\
while (tnode !=
&rbtree->rbt_nil
) { \
while (tnode !=
NULL
) {
\
int cmp = (a_cmp)(key, tnode); \
int cmp = (a_cmp)(key, tnode); \
if (cmp < 0) { \
if (cmp < 0) { \
ret = tnode; \
ret = tnode; \
...
@@ -418,17 +442,14 @@ a_prefix##nsearch(a_rbt_type *rbtree, a_type *key) { \
...
@@ -418,17 +442,14 @@ a_prefix##nsearch(a_rbt_type *rbtree, a_type *key) { \
break; \
break; \
} \
} \
} \
} \
if (ret == &rbtree->rbt_nil) { \
return ret; \
ret = (NULL); \
} \
return (ret); \
} \
} \
a_attr a_type * \
a_attr a_type * \
a_prefix##psearch(a_rbt_type *rbtree, a_type *key) {
\
a_prefix##psearch(a_rbt_type *rbtree,
const
a_type *key) { \
a_type *ret; \
a_type *ret; \
a_type *tnode = rbtree->rbt_root; \
a_type *tnode = rbtree->rbt_root; \
ret =
&rbtree->rbt_nil;
\
ret =
NULL;
\
while (tnode !=
&rbtree->rbt_nil
) { \
while (tnode !=
NULL
) {
\
int cmp = (a_cmp)(key, tnode); \
int cmp = (a_cmp)(key, tnode); \
if (cmp < 0) { \
if (cmp < 0) { \
tnode = rbtn_left_get(a_type, a_field, tnode); \
tnode = rbtn_left_get(a_type, a_field, tnode); \
...
@@ -440,10 +461,7 @@ a_prefix##psearch(a_rbt_type *rbtree, a_type *key) { \
...
@@ -440,10 +461,7 @@ a_prefix##psearch(a_rbt_type *rbtree, a_type *key) { \
break; \
break; \
} \
} \
} \
} \
if (ret == &rbtree->rbt_nil) { \
return ret; \
ret = (NULL); \
} \
return (ret); \
} \
} \
a_attr void \
a_attr void \
a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
...
@@ -454,7 +472,7 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
...
@@ -454,7 +472,7 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
rbt_node_new(a_type, a_field, rbtree, node); \
rbt_node_new(a_type, a_field, rbtree, node); \
/* Wind. */
\
/* Wind. */
\
path->node = rbtree->rbt_root; \
path->node = rbtree->rbt_root; \
for (pathp = path; pathp->node !=
&rbtree->rbt_nil
; pathp++) { \
for (pathp = path; pathp->node !=
NULL
; pathp++) {
\
int cmp = pathp->cmp = a_cmp(node, pathp->node); \
int cmp = pathp->cmp = a_cmp(node, pathp->node); \
assert(cmp != 0); \
assert(cmp != 0); \
if (cmp < 0) { \
if (cmp < 0) { \
...
@@ -474,7 +492,8 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
...
@@ -474,7 +492,8 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
rbtn_left_set(a_type, a_field, cnode, left); \
rbtn_left_set(a_type, a_field, cnode, left); \
if (rbtn_red_get(a_type, a_field, left)) { \
if (rbtn_red_get(a_type, a_field, left)) { \
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
if (rbtn_red_get(a_type, a_field, leftleft)) { \
if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
leftleft)) { \
/* Fix up 4-node. */
\
/* Fix up 4-node. */
\
a_type *tnode; \
a_type *tnode; \
rbtn_black_set(a_type, a_field, leftleft); \
rbtn_black_set(a_type, a_field, leftleft); \
...
@@ -489,7 +508,8 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
...
@@ -489,7 +508,8 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, cnode, right); \
rbtn_right_set(a_type, a_field, cnode, right); \
if (rbtn_red_get(a_type, a_field, right)) { \
if (rbtn_red_get(a_type, a_field, right)) { \
a_type *left = rbtn_left_get(a_type, a_field, cnode); \
a_type *left = rbtn_left_get(a_type, a_field, cnode); \
if (rbtn_red_get(a_type, a_field, left)) { \
if (left != NULL && rbtn_red_get(a_type, a_field, \
left)) { \
/* Split 4-node. */
\
/* Split 4-node. */
\
rbtn_black_set(a_type, a_field, left); \
rbtn_black_set(a_type, a_field, left); \
rbtn_black_set(a_type, a_field, right); \
rbtn_black_set(a_type, a_field, right); \
...
@@ -522,7 +542,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
...
@@ -522,7 +542,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
/* Wind. */
\
/* Wind. */
\
nodep = NULL;
/* Silence compiler warning. */
\
nodep = NULL;
/* Silence compiler warning. */
\
path->node = rbtree->rbt_root; \
path->node = rbtree->rbt_root; \
for (pathp = path; pathp->node !=
&rbtree->rbt_nil
; pathp++) { \
for (pathp = path; pathp->node !=
NULL
; pathp++) {
\
int cmp = pathp->cmp = a_cmp(node, pathp->node); \
int cmp = pathp->cmp = a_cmp(node, pathp->node); \
if (cmp < 0) { \
if (cmp < 0) { \
pathp[1].node = rbtn_left_get(a_type, a_field, \
pathp[1].node = rbtn_left_get(a_type, a_field, \
...
@@ -534,8 +554,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
...
@@ -534,8 +554,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
/* Find node's successor, in preparation for swap. */
\
/* Find node's successor, in preparation for swap. */
\
pathp->cmp = 1; \
pathp->cmp = 1; \
nodep = pathp; \
nodep = pathp; \
for (pathp++; pathp->node != &rbtree->rbt_nil; \
for (pathp++; pathp->node != NULL; pathp++) { \
pathp++) { \
pathp->cmp = -1; \
pathp->cmp = -1; \
pathp[1].node = rbtn_left_get(a_type, a_field, \
pathp[1].node = rbtn_left_get(a_type, a_field, \
pathp->node); \
pathp->node); \
...
@@ -577,10 +596,10 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
...
@@ -577,10 +596,10 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
} \
} \
} else { \
} else { \
a_type *left = rbtn_left_get(a_type, a_field, node); \
a_type *left = rbtn_left_get(a_type, a_field, node); \
if (left !=
&rbtree->rbt_nil
) { \
if (left !=
NULL
) {
\
/* node has no successor, but it has a left child. */
\
/* node has no successor, but it has a left child. */
\
/* Splice node out, without losing the left child. */
\
/* Splice node out, without losing the left child. */
\
assert(rbtn_red_get(a_type, a_field, node)
== false
); \
assert(
!
rbtn_red_get(a_type, a_field, node));
\
assert(rbtn_red_get(a_type, a_field, left)); \
assert(rbtn_red_get(a_type, a_field, left)); \
rbtn_black_set(a_type, a_field, left); \
rbtn_black_set(a_type, a_field, left); \
if (pathp == path) { \
if (pathp == path) { \
...
@@ -597,34 +616,32 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
...
@@ -597,34 +616,32 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
return; \
return; \
} else if (pathp == path) { \
} else if (pathp == path) { \
/* The tree only contained one node. */
\
/* The tree only contained one node. */
\
rbtree->rbt_root =
&rbtree->rbt_nil;
\
rbtree->rbt_root =
NULL;
\
return; \
return; \
} \
} \
} \
} \
if (rbtn_red_get(a_type, a_field, pathp->node)) { \
if (rbtn_red_get(a_type, a_field, pathp->node)) { \
/* Prune red node, which requires no fixup. */
\
/* Prune red node, which requires no fixup. */
\
assert(pathp[-1].cmp < 0); \
assert(pathp[-1].cmp < 0); \
rbtn_left_set(a_type, a_field, pathp[-1].node, \
rbtn_left_set(a_type, a_field, pathp[-1].node, NULL); \
&rbtree->rbt_nil); \
return; \
return; \
} \
} \
/* The node to be pruned is black, so unwind until balance is */
\
/* The node to be pruned is black, so unwind until balance is */
\
/* restored. */
\
/* restored. */
\
pathp->node =
&rbtree->rbt_nil;
\
pathp->node =
NULL;
\
for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \
for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \
assert(pathp->cmp != 0); \
assert(pathp->cmp != 0); \
if (pathp->cmp < 0) { \
if (pathp->cmp < 0) { \
rbtn_left_set(a_type, a_field, pathp->node, \
rbtn_left_set(a_type, a_field, pathp->node, \
pathp[1].node); \
pathp[1].node); \
assert(rbtn_red_get(a_type, a_field, pathp[1].node) \
== false); \
if (rbtn_red_get(a_type, a_field, pathp->node)) { \
if (rbtn_red_get(a_type, a_field, pathp->node)) { \
a_type *right = rbtn_right_get(a_type, a_field, \
a_type *right = rbtn_right_get(a_type, a_field, \
pathp->node); \
pathp->node); \
a_type *rightleft = rbtn_left_get(a_type, a_field, \
a_type *rightleft = rbtn_left_get(a_type, a_field, \
right); \
right); \
a_type *tnode; \
a_type *tnode; \
if (rbtn_red_get(a_type, a_field, rightleft)) { \
if (rightleft != NULL && rbtn_red_get(a_type, a_field, \
rightleft)) { \
/* In the following diagrams, ||, //, and \\ */
\
/* In the following diagrams, ||, //, and \\ */
\
/* indicate the path to the removed node. */
\
/* indicate the path to the removed node. */
\
/* */
\
/* */
\
...
@@ -667,7 +684,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
...
@@ -667,7 +684,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
pathp->node); \
pathp->node); \
a_type *rightleft = rbtn_left_get(a_type, a_field, \
a_type *rightleft = rbtn_left_get(a_type, a_field, \
right); \
right); \
if (rbtn_red_get(a_type, a_field, rightleft)) { \
if (rightleft != NULL && rbtn_red_get(a_type, a_field, \
rightleft)) { \
/* || */
\
/* || */
\
/* pathp(b) */
\
/* pathp(b) */
\
/* // \ */
\
/* // \ */
\
...
@@ -681,7 +699,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
...
@@ -681,7 +699,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_rotate_left(a_type, a_field, pathp->node, \
rbtn_rotate_left(a_type, a_field, pathp->node, \
tnode); \
tnode); \
/* Balance restored, but rotation modified */
\
/* Balance restored, but rotation modified */
\
/* subree root, which may actually be the tree
*/
\
/* sub
t
ree root, which may actually be the tree */
\
/* root. */
\
/* root. */
\
if (pathp == path) { \
if (pathp == path) { \
/* Set root. */
\
/* Set root. */
\
...
@@ -721,7 +739,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
...
@@ -721,7 +739,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
left); \
left); \
a_type *leftrightleft = rbtn_left_get(a_type, a_field, \
a_type *leftrightleft = rbtn_left_get(a_type, a_field, \
leftright); \
leftright); \
if (rbtn_red_get(a_type, a_field, leftrightleft)) { \
if (leftrightleft != NULL && rbtn_red_get(a_type, \
a_field, leftrightleft)) { \
/* || */
\
/* || */
\
/* pathp(b) */
\
/* pathp(b) */
\
/* / \\ */
\
/* / \\ */
\
...
@@ -747,7 +766,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
...
@@ -747,7 +766,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
/* (b) */
\
/* (b) */
\
/* / */
\
/* / */
\
/* (b) */
\
/* (b) */
\
assert(leftright !=
&rbtree->rbt_nil);
\
assert(leftright !=
NULL);
\
rbtn_red_set(a_type, a_field, leftright); \
rbtn_red_set(a_type, a_field, leftright); \
rbtn_rotate_right(a_type, a_field, pathp->node, \
rbtn_rotate_right(a_type, a_field, pathp->node, \
tnode); \
tnode); \
...
@@ -770,7 +789,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
...
@@ -770,7 +789,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
return; \
return; \
} else if (rbtn_red_get(a_type, a_field, pathp->node)) { \
} else if (rbtn_red_get(a_type, a_field, pathp->node)) { \
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
if (rbtn_red_get(a_type, a_field, leftleft)) { \
if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
leftleft)) { \
/* || */
\
/* || */
\
/* pathp(r) */
\
/* pathp(r) */
\
/* / \\ */
\
/* / \\ */
\
...
@@ -808,7 +828,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
...
@@ -808,7 +828,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
} \
} \
} else { \
} else { \
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
if (rbtn_red_get(a_type, a_field, leftleft)) { \
if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
leftleft)) { \
/* || */
\
/* || */
\
/* pathp(b) */
\
/* pathp(b) */
\
/* / \\ */
\
/* / \\ */
\
...
@@ -849,22 +870,22 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
...
@@ -849,22 +870,22 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
} \
} \
/* Set root. */
\
/* Set root. */
\
rbtree->rbt_root = path->node; \
rbtree->rbt_root = path->node; \
assert(rbtn_red_get(a_type, a_field, rbtree->rbt_root)
== false
); \
assert(
!
rbtn_red_get(a_type, a_field, rbtree->rbt_root));
\
} \
} \
a_attr a_type * \
a_attr a_type * \
a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \
a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
if (node ==
&rbtree->rbt_nil
) { \
if (node ==
NULL
) {
\
return
(&rbtree->rbt_nil);
\
return
NULL;
\
} else { \
} else { \
a_type *ret; \
a_type *ret; \
if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \
if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \
a_field, node), cb, arg)) !=
&rbtree->rbt_nil
\
a_field, node), cb, arg)) !=
NULL || (ret = cb(rbtree, node,
\
|| (ret = cb(rbtree, node,
arg)) != NULL) { \
arg)) != NULL) {
\
return
(
ret
)
; \
return ret;
\
} \
} \
return
(
a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
a_field, node), cb, arg)
)
; \
a_field, node), cb, arg); \
} \
} \
} \
} \
a_attr a_type * \
a_attr a_type * \
...
@@ -874,22 +895,22 @@ a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \
...
@@ -874,22 +895,22 @@ a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \
if (cmp < 0) { \
if (cmp < 0) { \
a_type *ret; \
a_type *ret; \
if ((ret = a_prefix##iter_start(rbtree, start, \
if ((ret = a_prefix##iter_start(rbtree, start, \
rbtn_left_get(a_type, a_field, node), cb, arg)) !=
\
rbtn_left_get(a_type, a_field, node), cb, arg)) !=
NULL ||
\
&rbtree->rbt_nil ||
(ret = cb(rbtree, node, arg)) != NULL) { \
(ret = cb(rbtree, node, arg)) != NULL) {
\
return
(
ret
)
; \
return ret;
\
} \
} \
return
(
a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
a_field, node), cb, arg)
)
; \
a_field, node), cb, arg); \
} else if (cmp > 0) { \
} else if (cmp > 0) { \
return
(
a_prefix##iter_start(rbtree, start, \
return a_prefix##iter_start(rbtree, start, \
rbtn_right_get(a_type, a_field, node), cb, arg)
)
; \
rbtn_right_get(a_type, a_field, node), cb, arg); \
} else { \
} else { \
a_type *ret; \
a_type *ret; \
if ((ret = cb(rbtree, node, arg)) != NULL) { \
if ((ret = cb(rbtree, node, arg)) != NULL) { \
return
(
ret
)
; \
return ret;
\
} \
} \
return
(
a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
a_field, node), cb, arg)
)
; \
a_field, node), cb, arg); \
} \
} \
} \
} \
a_attr a_type * \
a_attr a_type * \
...
@@ -902,25 +923,22 @@ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
...
@@ -902,25 +923,22 @@ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
} else { \
} else { \
ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\
ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\
} \
} \
if (ret == &rbtree->rbt_nil) { \
return ret; \
ret = NULL; \
} \
return (ret); \
} \
} \
a_attr a_type * \
a_attr a_type * \
a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \
a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
if (node ==
&rbtree->rbt_nil
) { \
if (node ==
NULL
) {
\
return
(&rbtree->rbt_nil);
\
return
NULL;
\
} else { \
} else { \
a_type *ret; \
a_type *ret; \
if ((ret = a_prefix##reverse_iter_recurse(rbtree, \
if ((ret = a_prefix##reverse_iter_recurse(rbtree, \
rbtn_right_get(a_type, a_field, node), cb, arg)) !=
\
rbtn_right_get(a_type, a_field, node), cb, arg)) !=
NULL ||
\
&rbtree->rbt_nil ||
(ret = cb(rbtree, node, arg)) != NULL) { \
(ret = cb(rbtree, node, arg)) != NULL) {
\
return
(
ret
)
; \
return ret;
\
} \
} \
return
(
a_prefix##reverse_iter_recurse(rbtree, \
return a_prefix##reverse_iter_recurse(rbtree, \
rbtn_left_get(a_type, a_field, node), cb, arg)
)
; \
rbtn_left_get(a_type, a_field, node), cb, arg); \
} \
} \
} \
} \
a_attr a_type * \
a_attr a_type * \
...
@@ -931,22 +949,22 @@ a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \
...
@@ -931,22 +949,22 @@ a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \
if (cmp > 0) { \
if (cmp > 0) { \
a_type *ret; \
a_type *ret; \
if ((ret = a_prefix##reverse_iter_start(rbtree, start, \
if ((ret = a_prefix##reverse_iter_start(rbtree, start, \
rbtn_right_get(a_type, a_field, node), cb, arg)) !=
\
rbtn_right_get(a_type, a_field, node), cb, arg)) !=
NULL ||
\
&rbtree->rbt_nil ||
(ret = cb(rbtree, node, arg)) != NULL) { \
(ret = cb(rbtree, node, arg)) != NULL) {
\
return
(
ret
)
; \
return ret;
\
} \
} \
return
(
a_prefix##reverse_iter_recurse(rbtree, \
return a_prefix##reverse_iter_recurse(rbtree, \
rbtn_left_get(a_type, a_field, node), cb, arg)
)
; \
rbtn_left_get(a_type, a_field, node), cb, arg); \
} else if (cmp < 0) { \
} else if (cmp < 0) { \
return
(
a_prefix##reverse_iter_start(rbtree, start, \
return a_prefix##reverse_iter_start(rbtree, start, \
rbtn_left_get(a_type, a_field, node), cb, arg)
)
; \
rbtn_left_get(a_type, a_field, node), cb, arg); \
} else { \
} else { \
a_type *ret; \
a_type *ret; \
if ((ret = cb(rbtree, node, arg)) != NULL) { \
if ((ret = cb(rbtree, node, arg)) != NULL) { \
return
(
ret
)
; \
return ret;
\
} \
} \
return
(
a_prefix##reverse_iter_recurse(rbtree, \
return a_prefix##reverse_iter_recurse(rbtree, \
rbtn_left_get(a_type, a_field, node), cb, arg)
)
; \
rbtn_left_get(a_type, a_field, node), cb, arg); \
} \
} \
} \
} \
a_attr a_type * \
a_attr a_type * \
...
@@ -960,10 +978,29 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
...
@@ -960,10 +978,29 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \
ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \
cb, arg); \
cb, arg); \
} \
} \
if (ret == &rbtree->rbt_nil) { \
return ret; \
ret = NULL; \
} \
a_attr void \
a_prefix##destroy_recurse(a_rbt_type *rbtree, a_type *node, void (*cb)( \
a_type *, void *), void *arg) { \
if (node == NULL) { \
return; \
} \
a_prefix##destroy_recurse(rbtree, rbtn_left_get(a_type, a_field, \
node), cb, arg); \
rbtn_left_set(a_type, a_field, (node), NULL); \
a_prefix##destroy_recurse(rbtree, rbtn_right_get(a_type, a_field, \
node), cb, arg); \
rbtn_right_set(a_type, a_field, (node), NULL); \
if (cb) { \
cb(node, arg); \
} \
} \
return (ret); \
} \
a_attr void \
a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
void *arg) { \
a_prefix##destroy_recurse(rbtree, rbtree->rbt_root, cb, arg); \
rbtree->rbt_root = NULL; \
}
}
#endif
/* RB_H_ */
#endif
/* RB_H_ */
deps/jemalloc/include/jemalloc/internal/rtree.h
View file @
f63e81c2
#ifndef JEMALLOC_INTERNAL_RTREE_H
#define JEMALLOC_INTERNAL_RTREE_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/rtree_tsd.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/tsd.h"
/*
/*
* This radix tree implementation is tailored to the singular purpose of
* This radix tree implementation is tailored to the singular purpose of
* tracking which chunks are currently owned by jemalloc. This functionality
* associating metadata with extents that are currently owned by jemalloc.
* is mandatory for OS X, where jemalloc must be able to respond to object
* ownership queries.
*
*
*******************************************************************************
*******************************************************************************
*/
*/
#ifdef JEMALLOC_H_TYPES
/* Number of high insignificant bits. */
#define RTREE_NHIB ((1U << (LG_SIZEOF_PTR+3)) - LG_VADDR)
/* Number of low insigificant bits. */
#define RTREE_NLIB LG_PAGE
/* Number of significant bits. */
#define RTREE_NSB (LG_VADDR - RTREE_NLIB)
/* Number of levels in radix tree. */
#if RTREE_NSB <= 10
# define RTREE_HEIGHT 1
#elif RTREE_NSB <= 36
# define RTREE_HEIGHT 2
#elif RTREE_NSB <= 52
# define RTREE_HEIGHT 3
#else
# error Unsupported number of significant virtual address bits
#endif
/* Use compact leaf representation if virtual address encoding allows. */
#if RTREE_NHIB >= LG_CEIL_NSIZES
# define RTREE_LEAF_COMPACT
#endif
/* Needed for initialization only. */
#define RTREE_LEAFKEY_INVALID ((uintptr_t)1)
typedef
struct
rtree_node_elm_s
rtree_node_elm_t
;
struct
rtree_node_elm_s
{
atomic_p_t
child
;
/* (rtree_{node,leaf}_elm_t *) */
};
struct
rtree_leaf_elm_s
{
#ifdef RTREE_LEAF_COMPACT
/*
* Single pointer-width field containing all three leaf element fields.
* For example, on a 64-bit x64 system with 48 significant virtual
* memory address bits, the index, extent, and slab fields are packed as
* such:
*
* x: index
* e: extent
* b: slab
*
* 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee eeee000b
*/
atomic_p_t
le_bits
;
#else
atomic_p_t
le_extent
;
/* (extent_t *) */
atomic_u_t
le_szind
;
/* (szind_t) */
atomic_b_t
le_slab
;
/* (bool) */
#endif
};
typedef
struct
rtree_level_s
rtree_level_t
;
struct
rtree_level_s
{
/* Number of key bits distinguished by this level. */
unsigned
bits
;
/*
* Cumulative number of key bits distinguished by traversing to
* corresponding tree level.
*/
unsigned
cumbits
;
};
typedef
struct
rtree_s
rtree_t
;
typedef
struct
rtree_s
rtree_t
;
struct
rtree_s
{
malloc_mutex_t
init_lock
;
/* Number of elements based on rtree_levels[0].bits. */
#if RTREE_HEIGHT > 1
rtree_node_elm_t
root
[
1U
<<
(
RTREE_NSB
/
RTREE_HEIGHT
)];
#else
rtree_leaf_elm_t
root
[
1U
<<
(
RTREE_NSB
/
RTREE_HEIGHT
)];
#endif
};
/*
/*
* Size of each radix tree node (must be a power of 2). This impacts tree
* Split the bits into one to three partitions depending on number of
* depth.
* significant bits. It the number of bits does not divide evenly into the
* number of levels, place one remainder bit per level starting at the leaf
* level.
*/
*/
#define RTREE_NODESIZE (1U << 16)
static
const
rtree_level_t
rtree_levels
[]
=
{
#if RTREE_HEIGHT == 1
{
RTREE_NSB
,
RTREE_NHIB
+
RTREE_NSB
}
#elif RTREE_HEIGHT == 2
{
RTREE_NSB
/
2
,
RTREE_NHIB
+
RTREE_NSB
/
2
},
{
RTREE_NSB
/
2
+
RTREE_NSB
%
2
,
RTREE_NHIB
+
RTREE_NSB
}
#elif RTREE_HEIGHT == 3
{
RTREE_NSB
/
3
,
RTREE_NHIB
+
RTREE_NSB
/
3
},
{
RTREE_NSB
/
3
+
RTREE_NSB
%
3
/
2
,
RTREE_NHIB
+
RTREE_NSB
/
3
*
2
+
RTREE_NSB
%
3
/
2
},
{
RTREE_NSB
/
3
+
RTREE_NSB
%
3
-
RTREE_NSB
%
3
/
2
,
RTREE_NHIB
+
RTREE_NSB
}
#else
# error Unsupported rtree height
#endif
};
typedef
void
*
(
rtree_alloc_t
)(
size_t
);
bool
rtree_new
(
rtree_t
*
rtree
,
bool
zeroed
);
typedef
void
(
rtree_dalloc_t
)(
void
*
);
#endif
/* JEMALLOC_H_TYPES */
typedef
rtree_node_elm_t
*
(
rtree_node_alloc_t
)(
tsdn_t
*
,
rtree_t
*
,
size_t
);
/******************************************************************************/
extern
rtree_node_alloc_t
*
JET_MUTABLE
rtree_node_alloc
;
#ifdef JEMALLOC_H_STRUCTS
struct
rtree_s
{
typedef
rtree_leaf_elm_t
*
(
rtree_leaf_alloc_t
)(
tsdn_t
*
,
rtree_t
*
,
size_t
);
rtree_alloc_t
*
alloc
;
extern
rtree_leaf_alloc_t
*
JET_MUTABLE
rtree_leaf_alloc
;
rtree_dalloc_t
*
dalloc
;
malloc_mutex_t
mutex
;
typedef
void
(
rtree_node_dalloc_t
)(
tsdn_t
*
,
rtree_t
*
,
rtree_node_elm_t
*
);
void
**
root
;
extern
rtree_node_dalloc_t
*
JET_MUTABLE
rtree_node_dalloc
;
unsigned
height
;
unsigned
level2bits
[
1
];
/* Dynamically sized. */
typedef
void
(
rtree_leaf_dalloc_t
)(
tsdn_t
*
,
rtree_t
*
,
rtree_leaf_elm_t
*
);
};
extern
rtree_leaf_dalloc_t
*
JET_MUTABLE
rtree_leaf_dalloc
;
#ifdef JEMALLOC_JET
void
rtree_delete
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
);
#endif
rtree_leaf_elm_t
*
rtree_leaf_elm_lookup_hard
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
bool
dependent
,
bool
init_missing
);
#endif
/* JEMALLOC_H_STRUCTS */
JEMALLOC_ALWAYS_INLINE
uintptr_t
/******************************************************************************/
rtree_leafkey
(
uintptr_t
key
)
{
#ifdef JEMALLOC_H_EXTERNS
unsigned
ptrbits
=
ZU
(
1
)
<<
(
LG_SIZEOF_PTR
+
3
);
unsigned
cumbits
=
(
rtree_levels
[
RTREE_HEIGHT
-
1
].
cumbits
-
rtree_levels
[
RTREE_HEIGHT
-
1
].
bits
);
unsigned
maskbits
=
ptrbits
-
cumbits
;
uintptr_t
mask
=
~
((
ZU
(
1
)
<<
maskbits
)
-
1
);
return
(
key
&
mask
);
}
rtree_t
*
rtree_new
(
unsigned
bits
,
rtree_alloc_t
*
alloc
,
rtree_dalloc_t
*
dalloc
);
JEMALLOC_ALWAYS_INLINE
size_t
void
rtree_delete
(
rtree_t
*
rtree
);
rtree_cache_direct_map
(
uintptr_t
key
)
{
void
rtree_prefork
(
rtree_t
*
rtree
);
unsigned
ptrbits
=
ZU
(
1
)
<<
(
LG_SIZEOF_PTR
+
3
);
void
rtree_postfork_parent
(
rtree_t
*
rtree
);
unsigned
cumbits
=
(
rtree_levels
[
RTREE_HEIGHT
-
1
].
cumbits
-
void
rtree_postfork_child
(
rtree_t
*
rtree
);
rtree_levels
[
RTREE_HEIGHT
-
1
].
bits
);
unsigned
maskbits
=
ptrbits
-
cumbits
;
return
(
size_t
)((
key
>>
maskbits
)
&
(
RTREE_CTX_NCACHE
-
1
));
}
#endif
/* JEMALLOC_H_EXTERNS */
JEMALLOC_ALWAYS_INLINE
uintptr_t
/******************************************************************************/
rtree_subkey
(
uintptr_t
key
,
unsigned
level
)
{
#ifdef JEMALLOC_H_INLINES
unsigned
ptrbits
=
ZU
(
1
)
<<
(
LG_SIZEOF_PTR
+
3
);
unsigned
cumbits
=
rtree_levels
[
level
].
cumbits
;
unsigned
shiftbits
=
ptrbits
-
cumbits
;
unsigned
maskbits
=
rtree_levels
[
level
].
bits
;
uintptr_t
mask
=
(
ZU
(
1
)
<<
maskbits
)
-
1
;
return
((
key
>>
shiftbits
)
&
mask
);
}
#ifndef JEMALLOC_ENABLE_INLINE
/*
#ifdef JEMALLOC_DEBUG
* Atomic getters.
uint8_t
rtree_get_locked
(
rtree_t
*
rtree
,
uintptr_t
key
);
*
* dependent: Reading a value on behalf of a pointer to a valid allocation
* is guaranteed to be a clean read even without synchronization,
* because the rtree update became visible in memory before the
* pointer came into existence.
* !dependent: An arbitrary read, e.g. on behalf of ivsalloc(), may not be
* dependent on a previous rtree write, which means a stale read
* could result if synchronization were omitted here.
*/
# ifdef RTREE_LEAF_COMPACT
JEMALLOC_ALWAYS_INLINE
uintptr_t
rtree_leaf_elm_bits_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
bool
dependent
)
{
return
(
uintptr_t
)
atomic_load_p
(
&
elm
->
le_bits
,
dependent
?
ATOMIC_RELAXED
:
ATOMIC_ACQUIRE
);
}
JEMALLOC_ALWAYS_INLINE
extent_t
*
rtree_leaf_elm_bits_extent_get
(
uintptr_t
bits
)
{
# ifdef __aarch64__
/*
* aarch64 doesn't sign extend the highest virtual address bit to set
* the higher ones. Instead, the high bits gets zeroed.
*/
uintptr_t
high_bit_mask
=
((
uintptr_t
)
1
<<
LG_VADDR
)
-
1
;
/* Mask off the slab bit. */
uintptr_t
low_bit_mask
=
~
(
uintptr_t
)
1
;
uintptr_t
mask
=
high_bit_mask
&
low_bit_mask
;
return
(
extent_t
*
)(
bits
&
mask
);
# else
/* Restore sign-extended high bits, mask slab bit. */
return
(
extent_t
*
)((
uintptr_t
)((
intptr_t
)(
bits
<<
RTREE_NHIB
)
>>
RTREE_NHIB
)
&
~
((
uintptr_t
)
0x1
));
# endif
}
JEMALLOC_ALWAYS_INLINE
szind_t
rtree_leaf_elm_bits_szind_get
(
uintptr_t
bits
)
{
return
(
szind_t
)(
bits
>>
LG_VADDR
);
}
JEMALLOC_ALWAYS_INLINE
bool
rtree_leaf_elm_bits_slab_get
(
uintptr_t
bits
)
{
return
(
bool
)(
bits
&
(
uintptr_t
)
0x1
);
}
# endif
JEMALLOC_ALWAYS_INLINE
extent_t
*
rtree_leaf_elm_extent_read
(
UNUSED
tsdn_t
*
tsdn
,
UNUSED
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
bool
dependent
)
{
#ifdef RTREE_LEAF_COMPACT
uintptr_t
bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm
,
dependent
);
return
rtree_leaf_elm_bits_extent_get
(
bits
);
#else
extent_t
*
extent
=
(
extent_t
*
)
atomic_load_p
(
&
elm
->
le_extent
,
dependent
?
ATOMIC_RELAXED
:
ATOMIC_ACQUIRE
);
return
extent
;
#endif
#endif
uint8_t
rtree_get
(
rtree_t
*
rtree
,
uintptr_t
key
);
}
bool
rtree_set
(
rtree_t
*
rtree
,
uintptr_t
key
,
uint8_t
val
);
JEMALLOC_ALWAYS_INLINE
szind_t
rtree_leaf_elm_szind_read
(
UNUSED
tsdn_t
*
tsdn
,
UNUSED
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
bool
dependent
)
{
#ifdef RTREE_LEAF_COMPACT
uintptr_t
bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm
,
dependent
);
return
rtree_leaf_elm_bits_szind_get
(
bits
);
#else
return
(
szind_t
)
atomic_load_u
(
&
elm
->
le_szind
,
dependent
?
ATOMIC_RELAXED
:
ATOMIC_ACQUIRE
);
#endif
#endif
}
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
JEMALLOC_ALWAYS_INLINE
bool
#define RTREE_GET_GENERATE(f) \
rtree_leaf_elm_slab_read
(
UNUSED
tsdn_t
*
tsdn
,
UNUSED
rtree_t
*
rtree
,
/* The least significant bits of the key are ignored. */
\
rtree_leaf_elm_t
*
elm
,
bool
dependent
)
{
JEMALLOC_INLINE uint8_t \
#ifdef RTREE_LEAF_COMPACT
f(rtree_t *rtree, uintptr_t key) \
uintptr_t
bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm
,
dependent
);
{ \
return
rtree_leaf_elm_bits_slab_get
(
bits
);
uint8_t ret; \
#else
uintptr_t subkey; \
return
atomic_load_b
(
&
elm
->
le_slab
,
dependent
?
ATOMIC_RELAXED
:
unsigned i, lshift, height, bits; \
ATOMIC_ACQUIRE
);
void **node, **child; \
#endif
\
}
RTREE_LOCK(&rtree->mutex); \
for (i = lshift = 0, height = rtree->height, node = rtree->root;\
static
inline
void
i < height - 1; \
rtree_leaf_elm_extent_write
(
UNUSED
tsdn_t
*
tsdn
,
UNUSED
rtree_t
*
rtree
,
i++, lshift += bits, node = child) { \
rtree_leaf_elm_t
*
elm
,
extent_t
*
extent
)
{
bits = rtree->level2bits[i]; \
#ifdef RTREE_LEAF_COMPACT
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR + \
uintptr_t
old_bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm
,
true
);
3)) - bits); \
uintptr_t
bits
=
((
uintptr_t
)
rtree_leaf_elm_bits_szind_get
(
old_bits
)
<<
child = (void**)node[subkey]; \
LG_VADDR
)
|
((
uintptr_t
)
extent
&
(((
uintptr_t
)
0x1
<<
LG_VADDR
)
-
1
))
if (child == NULL) { \
|
((
uintptr_t
)
rtree_leaf_elm_bits_slab_get
(
old_bits
));
RTREE_UNLOCK(&rtree->mutex); \
atomic_store_p
(
&
elm
->
le_bits
,
(
void
*
)
bits
,
ATOMIC_RELEASE
);
return (0); \
#else
} \
atomic_store_p
(
&
elm
->
le_extent
,
extent
,
ATOMIC_RELEASE
);
} \
\
/* \
* node is a leaf, so it contains values rather than node \
* pointers. \
*/
\
bits = rtree->level2bits[i]; \
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - \
bits); \
{ \
uint8_t *leaf = (uint8_t *)node; \
ret = leaf[subkey]; \
} \
RTREE_UNLOCK(&rtree->mutex); \
\
RTREE_GET_VALIDATE \
return (ret); \
}
#ifdef JEMALLOC_DEBUG
# define RTREE_LOCK(l) malloc_mutex_lock(l)
# define RTREE_UNLOCK(l) malloc_mutex_unlock(l)
# define RTREE_GET_VALIDATE
RTREE_GET_GENERATE
(
rtree_get_locked
)
# undef RTREE_LOCK
# undef RTREE_UNLOCK
# undef RTREE_GET_VALIDATE
#endif
#endif
}
static
inline
void
rtree_leaf_elm_szind_write
(
UNUSED
tsdn_t
*
tsdn
,
UNUSED
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
szind_t
szind
)
{
assert
(
szind
<=
NSIZES
);
#ifdef RTREE_LEAF_COMPACT
uintptr_t
old_bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm
,
true
);
uintptr_t
bits
=
((
uintptr_t
)
szind
<<
LG_VADDR
)
|
((
uintptr_t
)
rtree_leaf_elm_bits_extent_get
(
old_bits
)
&
(((
uintptr_t
)
0x1
<<
LG_VADDR
)
-
1
))
|
((
uintptr_t
)
rtree_leaf_elm_bits_slab_get
(
old_bits
));
atomic_store_p
(
&
elm
->
le_bits
,
(
void
*
)
bits
,
ATOMIC_RELEASE
);
#else
atomic_store_u
(
&
elm
->
le_szind
,
szind
,
ATOMIC_RELEASE
);
#endif
}
static
inline
void
rtree_leaf_elm_slab_write
(
UNUSED
tsdn_t
*
tsdn
,
UNUSED
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
bool
slab
)
{
#ifdef RTREE_LEAF_COMPACT
uintptr_t
old_bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm
,
true
);
uintptr_t
bits
=
((
uintptr_t
)
rtree_leaf_elm_bits_szind_get
(
old_bits
)
<<
LG_VADDR
)
|
((
uintptr_t
)
rtree_leaf_elm_bits_extent_get
(
old_bits
)
&
(((
uintptr_t
)
0x1
<<
LG_VADDR
)
-
1
))
|
((
uintptr_t
)
slab
);
atomic_store_p
(
&
elm
->
le_bits
,
(
void
*
)
bits
,
ATOMIC_RELEASE
);
#else
atomic_store_b
(
&
elm
->
le_slab
,
slab
,
ATOMIC_RELEASE
);
#endif
}
#define RTREE_LOCK(l)
static
inline
void
#define RTREE_UNLOCK(l)
rtree_leaf_elm_write
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
#ifdef JEMALLOC_DEBUG
extent_t
*
extent
,
szind_t
szind
,
bool
slab
)
{
#ifdef RTREE_LEAF_COMPACT
uintptr_t
bits
=
((
uintptr_t
)
szind
<<
LG_VADDR
)
|
((
uintptr_t
)
extent
&
(((
uintptr_t
)
0x1
<<
LG_VADDR
)
-
1
))
|
((
uintptr_t
)
slab
);
atomic_store_p
(
&
elm
->
le_bits
,
(
void
*
)
bits
,
ATOMIC_RELEASE
);
#else
rtree_leaf_elm_slab_write
(
tsdn
,
rtree
,
elm
,
slab
);
rtree_leaf_elm_szind_write
(
tsdn
,
rtree
,
elm
,
szind
);
/*
/*
* Suppose that it were possible for a jemalloc-allocated chunk to be
* Write extent last, since the element is atomically considered valid
* munmap()ped, followed by a different allocator in another thread re-using
* as soon as the extent field is non-NULL.
* overlapping virtual memory, all without invalidating the cached rtree
* value. The result would be a false positive (the rtree would claim that
* jemalloc owns memory that it had actually discarded). This scenario
* seems impossible, but the following assertion is a prudent sanity check.
*/
*/
# define RTREE_GET_VALIDATE \
rtree_leaf_elm_extent_write
(
tsdn
,
rtree
,
elm
,
extent
);
assert(rtree_get_locked(rtree, key) == ret);
#else
# define RTREE_GET_VALIDATE
#endif
#endif
RTREE_GET_GENERATE
(
rtree_get
)
}
#undef RTREE_LOCK
#undef RTREE_UNLOCK
static
inline
void
#undef RTREE_GET_VALIDATE
rtree_leaf_elm_szind_slab_update
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
szind_t
szind
,
bool
slab
)
{
JEMALLOC_INLINE
bool
assert
(
!
slab
||
szind
<
NBINS
);
rtree_set
(
rtree_t
*
rtree
,
uintptr_t
key
,
uint8_t
val
)
{
/*
uintptr_t
subkey
;
* The caller implicitly assures that it is the only writer to the szind
unsigned
i
,
lshift
,
height
,
bits
;
* and slab fields, and that the extent field cannot currently change.
void
**
node
,
**
child
;
*/
rtree_leaf_elm_slab_write
(
tsdn
,
rtree
,
elm
,
slab
);
malloc_mutex_lock
(
&
rtree
->
mutex
);
rtree_leaf_elm_szind_write
(
tsdn
,
rtree
,
elm
,
szind
);
for
(
i
=
lshift
=
0
,
height
=
rtree
->
height
,
node
=
rtree
->
root
;
}
i
<
height
-
1
;
i
++
,
lshift
+=
bits
,
node
=
child
)
{
JEMALLOC_ALWAYS_INLINE
rtree_leaf_elm_t
*
bits
=
rtree
->
level2bits
[
i
];
rtree_leaf_elm_lookup
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
subkey
=
(
key
<<
lshift
)
>>
((
ZU
(
1
)
<<
(
LG_SIZEOF_PTR
+
3
))
-
uintptr_t
key
,
bool
dependent
,
bool
init_missing
)
{
bits
);
assert
(
key
!=
0
);
child
=
(
void
**
)
node
[
subkey
];
assert
(
!
dependent
||
!
init_missing
);
if
(
child
==
NULL
)
{
size_t
size
=
((
i
+
1
<
height
-
1
)
?
sizeof
(
void
*
)
size_t
slot
=
rtree_cache_direct_map
(
key
);
:
(
sizeof
(
uint8_t
)))
<<
rtree
->
level2bits
[
i
+
1
];
uintptr_t
leafkey
=
rtree_leafkey
(
key
);
child
=
(
void
**
)
rtree
->
alloc
(
size
);
assert
(
leafkey
!=
RTREE_LEAFKEY_INVALID
);
if
(
child
==
NULL
)
{
malloc_mutex_unlock
(
&
rtree
->
mutex
);
/* Fast path: L1 direct mapped cache. */
return
(
true
);
if
(
likely
(
rtree_ctx
->
cache
[
slot
].
leafkey
==
leafkey
))
{
rtree_leaf_elm_t
*
leaf
=
rtree_ctx
->
cache
[
slot
].
leaf
;
assert
(
leaf
!=
NULL
);
uintptr_t
subkey
=
rtree_subkey
(
key
,
RTREE_HEIGHT
-
1
);
return
&
leaf
[
subkey
];
}
}
memset
(
child
,
0
,
size
);
/*
node
[
subkey
]
=
child
;
* Search the L2 LRU cache. On hit, swap the matching element into the
* slot in L1 cache, and move the position in L2 up by 1.
*/
#define RTREE_CACHE_CHECK_L2(i) do { \
if (likely(rtree_ctx->l2_cache[i].leafkey == leafkey)) { \
rtree_leaf_elm_t *leaf = rtree_ctx->l2_cache[i].leaf; \
assert(leaf != NULL); \
if (i > 0) { \
/* Bubble up by one. */
\
rtree_ctx->l2_cache[i].leafkey = \
rtree_ctx->l2_cache[i - 1].leafkey; \
rtree_ctx->l2_cache[i].leaf = \
rtree_ctx->l2_cache[i - 1].leaf; \
rtree_ctx->l2_cache[i - 1].leafkey = \
rtree_ctx->cache[slot].leafkey; \
rtree_ctx->l2_cache[i - 1].leaf = \
rtree_ctx->cache[slot].leaf; \
} else { \
rtree_ctx->l2_cache[0].leafkey = \
rtree_ctx->cache[slot].leafkey; \
rtree_ctx->l2_cache[0].leaf = \
rtree_ctx->cache[slot].leaf; \
} \
rtree_ctx->cache[slot].leafkey = leafkey; \
rtree_ctx->cache[slot].leaf = leaf; \
uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1); \
return &leaf[subkey]; \
} \
} while (0)
/* Check the first cache entry. */
RTREE_CACHE_CHECK_L2
(
0
);
/* Search the remaining cache elements. */
for
(
unsigned
i
=
1
;
i
<
RTREE_CTX_NCACHE_L2
;
i
++
)
{
RTREE_CACHE_CHECK_L2
(
i
);
}
#undef RTREE_CACHE_CHECK_L2
return
rtree_leaf_elm_lookup_hard
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
dependent
,
init_missing
);
}
static
inline
bool
rtree_write
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
extent_t
*
extent
,
szind_t
szind
,
bool
slab
)
{
/* Use rtree_clear() to set the extent to NULL. */
assert
(
extent
!=
NULL
);
rtree_leaf_elm_t
*
elm
=
rtree_leaf_elm_lookup
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
false
,
true
);
if
(
elm
==
NULL
)
{
return
true
;
}
assert
(
rtree_leaf_elm_extent_read
(
tsdn
,
rtree
,
elm
,
false
)
==
NULL
);
rtree_leaf_elm_write
(
tsdn
,
rtree
,
elm
,
extent
,
szind
,
slab
);
return
false
;
}
JEMALLOC_ALWAYS_INLINE
rtree_leaf_elm_t
*
rtree_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
bool
dependent
)
{
rtree_leaf_elm_t
*
elm
=
rtree_leaf_elm_lookup
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
dependent
,
false
);
if
(
!
dependent
&&
elm
==
NULL
)
{
return
NULL
;
}
}
assert
(
elm
!=
NULL
);
return
elm
;
}
JEMALLOC_ALWAYS_INLINE
extent_t
*
rtree_extent_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
bool
dependent
)
{
rtree_leaf_elm_t
*
elm
=
rtree_read
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
dependent
);
if
(
!
dependent
&&
elm
==
NULL
)
{
return
NULL
;
}
}
return
rtree_leaf_elm_extent_read
(
tsdn
,
rtree
,
elm
,
dependent
);
}
/* node is a leaf, so it contains values rather than node pointers. */
JEMALLOC_ALWAYS_INLINE
szind_t
bits
=
rtree
->
level2bits
[
i
];
rtree_szind_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
subkey
=
(
key
<<
lshift
)
>>
((
ZU
(
1
)
<<
(
LG_SIZEOF_PTR
+
3
))
-
bits
);
uintptr_t
key
,
bool
dependent
)
{
{
rtree_leaf_elm_t
*
elm
=
rtree_read
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
uint8_t
*
leaf
=
(
uint8_t
*
)
node
;
dependent
);
leaf
[
subkey
]
=
val
;
if
(
!
dependent
&&
elm
==
NULL
)
{
return
NSIZES
;
}
}
malloc_mutex_unlock
(
&
rtree
->
mutex
);
return
rtree_leaf_elm_szind_read
(
tsdn
,
rtree
,
elm
,
dependent
);
}
/*
* rtree_slab_read() is intentionally omitted because slab is always read in
* conjunction with szind, which makes rtree_szind_slab_read() a better choice.
*/
return
(
false
);
JEMALLOC_ALWAYS_INLINE
bool
rtree_extent_szind_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
bool
dependent
,
extent_t
**
r_extent
,
szind_t
*
r_szind
)
{
rtree_leaf_elm_t
*
elm
=
rtree_read
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
dependent
);
if
(
!
dependent
&&
elm
==
NULL
)
{
return
true
;
}
*
r_extent
=
rtree_leaf_elm_extent_read
(
tsdn
,
rtree
,
elm
,
dependent
);
*
r_szind
=
rtree_leaf_elm_szind_read
(
tsdn
,
rtree
,
elm
,
dependent
);
return
false
;
}
}
JEMALLOC_ALWAYS_INLINE
bool
rtree_szind_slab_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
bool
dependent
,
szind_t
*
r_szind
,
bool
*
r_slab
)
{
rtree_leaf_elm_t
*
elm
=
rtree_read
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
dependent
);
if
(
!
dependent
&&
elm
==
NULL
)
{
return
true
;
}
#ifdef RTREE_LEAF_COMPACT
uintptr_t
bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm
,
dependent
);
*
r_szind
=
rtree_leaf_elm_bits_szind_get
(
bits
);
*
r_slab
=
rtree_leaf_elm_bits_slab_get
(
bits
);
#else
*
r_szind
=
rtree_leaf_elm_szind_read
(
tsdn
,
rtree
,
elm
,
dependent
);
*
r_slab
=
rtree_leaf_elm_slab_read
(
tsdn
,
rtree
,
elm
,
dependent
);
#endif
#endif
return
false
;
}
static
inline
void
rtree_szind_slab_update
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
szind_t
szind
,
bool
slab
)
{
assert
(
!
slab
||
szind
<
NBINS
);
rtree_leaf_elm_t
*
elm
=
rtree_read
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
true
);
rtree_leaf_elm_szind_slab_update
(
tsdn
,
rtree
,
elm
,
szind
,
slab
);
}
static
inline
void
rtree_clear
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
)
{
rtree_leaf_elm_t
*
elm
=
rtree_read
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
true
);
assert
(
rtree_leaf_elm_extent_read
(
tsdn
,
rtree
,
elm
,
false
)
!=
NULL
);
rtree_leaf_elm_write
(
tsdn
,
rtree
,
elm
,
NULL
,
NSIZES
,
false
);
}
#endif
/* JEMALLOC_H_INLINES */
#endif
/* JEMALLOC_INTERNAL_RTREE_H */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/rtree_tsd.h
0 → 100644
View file @
f63e81c2
#ifndef JEMALLOC_INTERNAL_RTREE_CTX_H
#define JEMALLOC_INTERNAL_RTREE_CTX_H
/*
* Number of leafkey/leaf pairs to cache in L1 and L2 level respectively. Each
* entry supports an entire leaf, so the cache hit rate is typically high even
* with a small number of entries. In rare cases extent activity will straddle
* the boundary between two leaf nodes. Furthermore, an arena may use a
* combination of dss and mmap. Note that as memory usage grows past the amount
* that this cache can directly cover, the cache will become less effective if
* locality of reference is low, but the consequence is merely cache misses
* while traversing the tree nodes.
*
* The L1 direct mapped cache offers consistent and low cost on cache hit.
* However collision could affect hit rate negatively. This is resolved by
* combining with a L2 LRU cache, which requires linear search and re-ordering
* on access but suffers no collision. Note that, the cache will itself suffer
* cache misses if made overly large, plus the cost of linear search in the LRU
* cache.
*/
#define RTREE_CTX_LG_NCACHE 4
#define RTREE_CTX_NCACHE (1 << RTREE_CTX_LG_NCACHE)
#define RTREE_CTX_NCACHE_L2 8
/*
* Zero initializer required for tsd initialization only. Proper initialization
* done via rtree_ctx_data_init().
*/
#define RTREE_CTX_ZERO_INITIALIZER {{{0}}, {{0}}}
typedef
struct
rtree_leaf_elm_s
rtree_leaf_elm_t
;
typedef
struct
rtree_ctx_cache_elm_s
rtree_ctx_cache_elm_t
;
struct
rtree_ctx_cache_elm_s
{
uintptr_t
leafkey
;
rtree_leaf_elm_t
*
leaf
;
};
typedef
struct
rtree_ctx_s
rtree_ctx_t
;
struct
rtree_ctx_s
{
/* Direct mapped cache. */
rtree_ctx_cache_elm_t
cache
[
RTREE_CTX_NCACHE
];
/* L2 LRU cache. */
rtree_ctx_cache_elm_t
l2_cache
[
RTREE_CTX_NCACHE_L2
];
};
void
rtree_ctx_data_init
(
rtree_ctx_t
*
ctx
);
#endif
/* JEMALLOC_INTERNAL_RTREE_CTX_H */
deps/jemalloc/include/jemalloc/internal/size_classes.sh
View file @
f63e81c2
#!/bin/sh
#!/bin/sh
#
# Usage: size_classes.sh <lg_qarr> <lg_tmin> <lg_parr> <lg_g>
# The following limits are chosen such that they cover all supported platforms.
# The following limits are chosen such that they cover all supported platforms.
# Range of quanta.
# Pointer sizes.
lg_qmin
=
3
lg_zarr
=
"2 3"
lg_qmax
=
4
# Quanta.
lg_qarr
=
$1
# The range of tiny size classes is [2^lg_tmin..2^(lg_q-1)].
# The range of tiny size classes is [2^lg_tmin..2^(lg_q-1)].
lg_tmin
=
3
lg_tmin
=
$2
# Maximum lookup size.
lg_kmax
=
12
# Page sizes.
lg_parr
=
`
echo
$3
|
tr
','
' '
`
# Range of page sizes.
# Size class group size (number of size classes for each size doubling).
lg_pmin
=
12
lg_g
=
$4
lg_pmax
=
16
pow2
()
{
pow2
()
{
e
=
$1
e
=
$1
...
@@ -22,68 +31,317 @@ pow2() {
...
@@ -22,68 +31,317 @@ pow2() {
done
done
}
}
cat
<<
EOF
lg
()
{
/* This file was automatically generated by size_classes.sh. */
x
=
$1
/******************************************************************************/
lg_result
=
0
#ifdef JEMALLOC_H_TYPES
while
[
${
x
}
-gt
1
]
;
do
lg_result
=
$((${
lg_result
}
+
1
))
x
=
$((${
x
}
/
2
))
done
}
EOF
lg_ceil
()
{
y
=
$1
lg
${
y
}
;
lg_floor
=
${
lg_result
}
pow2
${
lg_floor
}
;
pow2_floor
=
${
pow2_result
}
if
[
${
pow2_floor
}
-lt
${
y
}
]
;
then
lg_ceil_result
=
$((${
lg_floor
}
+
1
))
else
lg_ceil_result
=
${
lg_floor
}
fi
}
lg_q
=
${
lg_qmin
}
reg_size_compute
()
{
while
[
${
lg_q
}
-le
${
lg_qmax
}
]
;
do
lg_grp
=
$1
lg_t
=
${
lg_tmin
}
lg_delta
=
$2
while
[
${
lg_t
}
-le
${
lg_q
}
]
;
do
ndelta
=
$3
lg_p
=
${
lg_pmin
}
while
[
${
lg_p
}
-le
${
lg_pmax
}
]
;
do
pow2
${
lg_grp
}
;
grp
=
${
pow2_result
}
echo
"#if (LG_TINY_MIN ==
${
lg_t
}
&& LG_QUANTUM ==
${
lg_q
}
&& LG_PAGE ==
${
lg_p
}
)"
pow2
${
lg_delta
}
;
delta
=
${
pow2_result
}
echo
"#define SIZE_CLASSES_DEFINED"
reg_size
=
$((${
grp
}
+
${
delta
}
*
${
ndelta
}))
pow2
${
lg_q
}
;
q
=
${
pow2_result
}
}
pow2
${
lg_t
}
;
t
=
${
pow2_result
}
slab_size
()
{
lg_p
=
$1
lg_grp
=
$2
lg_delta
=
$3
ndelta
=
$4
pow2
${
lg_p
}
;
p
=
${
pow2_result
}
reg_size_compute
${
lg_grp
}
${
lg_delta
}
${
ndelta
}
# Compute smallest slab size that is an integer multiple of reg_size.
try_slab_size
=
${
p
}
try_nregs
=
$((${
try_slab_size
}
/
${
reg_size
}))
perfect
=
0
while
[
${
perfect
}
-eq
0
]
;
do
perfect_slab_size
=
${
try_slab_size
}
perfect_nregs
=
${
try_nregs
}
try_slab_size
=
$((${
try_slab_size
}
+
${
p
}))
try_nregs
=
$((${
try_slab_size
}
/
${
reg_size
}))
if
[
${
perfect_slab_size
}
-eq
$((${
perfect_nregs
}
*
${
reg_size
}))
]
;
then
perfect
=
1
fi
done
slab_size_pgs
=
$((${
perfect_slab_size
}
/
${
p
}))
}
size_class
()
{
index
=
$1
lg_grp
=
$2
lg_delta
=
$3
ndelta
=
$4
lg_p
=
$5
lg_kmax
=
$6
if
[
${
lg_delta
}
-ge
${
lg_p
}
]
;
then
psz
=
"yes"
else
pow2
${
lg_p
}
;
p
=
${
pow2_result
}
pow2
${
lg_p
}
;
p
=
${
pow2_result
}
bin
=
0
pow2
${
lg_grp
}
;
grp
=
${
pow2_result
}
psz
=
0
pow2
${
lg_delta
}
;
delta
=
${
pow2_result
}
sz
=
${
t
}
sz
=
$((${
grp
}
+
${
delta
}
*
${
ndelta
}))
delta
=
$((${
sz
}
-
${
psz
}))
npgs
=
$((${
sz
}
/
${
p
}))
echo
"/* SIZE_CLASS(bin, delta, sz) */"
if
[
${
sz
}
-eq
$((${
npgs
}
*
${
p
}))
]
;
then
psz
=
"yes"
else
psz
=
"no"
fi
fi
lg
${
ndelta
}
;
lg_ndelta
=
${
lg_result
}
;
pow2
${
lg_ndelta
}
if
[
${
pow2_result
}
-lt
${
ndelta
}
]
;
then
rem
=
"yes"
else
rem
=
"no"
fi
lg_size
=
${
lg_grp
}
if
[
$((${
lg_delta
}
+
${
lg_ndelta
}))
-eq
${
lg_grp
}
]
;
then
lg_size
=
$((${
lg_grp
}
+
1
))
else
lg_size
=
${
lg_grp
}
rem
=
"yes"
fi
if
[
${
lg_size
}
-lt
$((${
lg_p
}
+
${
lg_g
}))
]
;
then
bin
=
"yes"
slab_size
${
lg_p
}
${
lg_grp
}
${
lg_delta
}
${
ndelta
}
;
pgs
=
${
slab_size_pgs
}
else
bin
=
"no"
pgs
=
0
fi
if
[
${
lg_size
}
-lt
${
lg_kmax
}
\
-o
${
lg_size
}
-eq
${
lg_kmax
}
-a
${
rem
}
=
"no"
]
;
then
lg_delta_lookup
=
${
lg_delta
}
else
lg_delta_lookup
=
"no"
fi
printf
' SC(%3d, %6d, %8d, %6d, %3s, %3s, %3d, %2s) \\\n'
${
index
}
${
lg_grp
}
${
lg_delta
}
${
ndelta
}
${
psz
}
${
bin
}
${
pgs
}
${
lg_delta_lookup
}
# Defined upon return:
# - psz ("yes" or "no")
# - bin ("yes" or "no")
# - pgs
# - lg_delta_lookup (${lg_delta} or "no")
}
sep_line
()
{
echo
"
\\
"
}
size_classes
()
{
lg_z
=
$1
lg_q
=
$2
lg_t
=
$3
lg_p
=
$4
lg_g
=
$5
pow2
$((${
lg_z
}
+
3
))
;
ptr_bits
=
${
pow2_result
}
pow2
${
lg_g
}
;
g
=
${
pow2_result
}
echo
"#define SIZE_CLASSES
\\
"
echo
"#define SIZE_CLASSES
\\
"
echo
" /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */
\\
"
ntbins
=
0
nlbins
=
0
lg_tiny_maxclass
=
'"NA"'
nbins
=
0
npsizes
=
0
# Tiny size classes.
# Tiny size classes.
while
[
${
sz
}
-lt
${
q
}
]
;
do
ndelta
=
0
echo
" SIZE_CLASS(
${
bin
}
,
${
delta
}
,
${
sz
}
)
\\
"
index
=
0
bin
=
$((${
bin
}
+
1
))
lg_grp
=
${
lg_t
}
psz
=
${
sz
}
lg_delta
=
${
lg_grp
}
sz
=
$((${
sz
}
+
${
sz
}))
while
[
${
lg_grp
}
-lt
${
lg_q
}
]
;
do
delta
=
$((${
sz
}
-
${
psz
}))
size_class
${
index
}
${
lg_grp
}
${
lg_delta
}
${
ndelta
}
${
lg_p
}
${
lg_kmax
}
if
[
${
lg_delta_lookup
}
!=
"no"
]
;
then
nlbins
=
$((${
index
}
+
1
))
fi
if
[
${
psz
}
=
"yes"
]
;
then
npsizes
=
$((${
npsizes
}
+
1
))
fi
if
[
${
bin
}
!=
"no"
]
;
then
nbins
=
$((${
index
}
+
1
))
fi
ntbins
=
$((${
ntbins
}
+
1
))
lg_tiny_maxclass
=
${
lg_grp
}
# Final written value is correct.
index
=
$((${
index
}
+
1
))
lg_delta
=
${
lg_grp
}
lg_grp
=
$((${
lg_grp
}
+
1
))
done
# First non-tiny group.
if
[
${
ntbins
}
-gt
0
]
;
then
sep_line
# The first size class has an unusual encoding, because the size has to be
# split between grp and delta*ndelta.
lg_grp
=
$((${
lg_grp
}
-
1
))
ndelta
=
1
size_class
${
index
}
${
lg_grp
}
${
lg_delta
}
${
ndelta
}
${
lg_p
}
${
lg_kmax
}
index
=
$((${
index
}
+
1
))
lg_grp
=
$((${
lg_grp
}
+
1
))
lg_delta
=
$((${
lg_delta
}
+
1
))
if
[
${
psz
}
=
"yes"
]
;
then
npsizes
=
$((${
npsizes
}
+
1
))
fi
fi
while
[
${
ndelta
}
-lt
${
g
}
]
;
do
size_class
${
index
}
${
lg_grp
}
${
lg_delta
}
${
ndelta
}
${
lg_p
}
${
lg_kmax
}
index
=
$((${
index
}
+
1
))
ndelta
=
$((${
ndelta
}
+
1
))
if
[
${
psz
}
=
"yes"
]
;
then
npsizes
=
$((${
npsizes
}
+
1
))
fi
done
done
# Quantum-multiple size classes. For each doubling of sz, as many as 4
# size classes exist. Their spacing is the greater of:
# All remaining groups.
# - q
lg_grp
=
$((${
lg_grp
}
+
${
lg_g
}))
# - sz/4, where sz is a power of 2
while
[
${
lg_grp
}
-lt
$((${
ptr_bits
}
-
1
))
]
;
do
while
[
${
sz
}
-lt
${
p
}
]
;
do
sep_line
if
[
${
sz
}
-ge
$((${
q
}
*
4
))
]
;
then
ndelta
=
1
i
=
$((${
sz
}
/
4
))
if
[
${
lg_grp
}
-eq
$((${
ptr_bits
}
-
2
))
]
;
then
ndelta_limit
=
$((${
g
}
-
1
))
else
else
i
=
${
q
}
ndelta_limit
=
${
g
}
fi
fi
next_2pow
=
$((${
sz
}
*
2
))
while
[
${
ndelta
}
-le
${
ndelta_limit
}
]
;
do
while
[
${
sz
}
-lt
$next_2pow
]
;
do
size_class
${
index
}
${
lg_grp
}
${
lg_delta
}
${
ndelta
}
${
lg_p
}
${
lg_kmax
}
echo
" SIZE_CLASS(
${
bin
}
,
${
delta
}
,
${
sz
}
)
\\
"
if
[
${
lg_delta_lookup
}
!=
"no"
]
;
then
bin
=
$((${
bin
}
+
1
))
nlbins
=
$((${
index
}
+
1
))
psz
=
${
sz
}
# Final written value is correct:
sz
=
$((${
sz
}
+
${
i
}))
lookup_maxclass
=
"((((size_t)1) <<
${
lg_grp
}
) + (((size_t)
${
ndelta
}
) <<
${
lg_delta
}
))"
delta
=
$((${
sz
}
-
${
psz
}))
fi
if
[
${
psz
}
=
"yes"
]
;
then
npsizes
=
$((${
npsizes
}
+
1
))
fi
if
[
${
bin
}
!=
"no"
]
;
then
nbins
=
$((${
index
}
+
1
))
# Final written value is correct:
small_maxclass
=
"((((size_t)1) <<
${
lg_grp
}
) + (((size_t)
${
ndelta
}
) <<
${
lg_delta
}
))"
if
[
${
lg_g
}
-gt
0
]
;
then
lg_large_minclass
=
$((${
lg_grp
}
+
1
))
else
lg_large_minclass
=
$((${
lg_grp
}
+
2
))
fi
fi
# Final written value is correct:
large_maxclass
=
"((((size_t)1) <<
${
lg_grp
}
) + (((size_t)
${
ndelta
}
) <<
${
lg_delta
}
))"
index
=
$((${
index
}
+
1
))
ndelta
=
$((${
ndelta
}
+
1
))
done
done
lg_grp
=
$((${
lg_grp
}
+
1
))
lg_delta
=
$((${
lg_delta
}
+
1
))
done
done
echo
echo
echo
"#define NBINS
${
bin
}
"
nsizes
=
${
index
}
echo
"#define SMALL_MAXCLASS
${
psz
}
"
lg_ceil
${
nsizes
}
;
lg_ceil_nsizes
=
${
lg_ceil_result
}
# Defined upon completion:
# - ntbins
# - nlbins
# - nbins
# - nsizes
# - lg_ceil_nsizes
# - npsizes
# - lg_tiny_maxclass
# - lookup_maxclass
# - small_maxclass
# - lg_large_minclass
# - large_maxclass
}
cat
<<
EOF
#ifndef JEMALLOC_INTERNAL_SIZE_CLASSES_H
#define JEMALLOC_INTERNAL_SIZE_CLASSES_H
/* This file was automatically generated by size_classes.sh. */
#include "jemalloc/internal/jemalloc_internal_types.h"
/*
* This header file defines:
*
* LG_SIZE_CLASS_GROUP: Lg of size class count for each size doubling.
* LG_TINY_MIN: Lg of minimum size class to support.
* SIZE_CLASSES: Complete table of SC(index, lg_grp, lg_delta, ndelta, psz,
* bin, pgs, lg_delta_lookup) tuples.
* index: Size class index.
* lg_grp: Lg group base size (no deltas added).
* lg_delta: Lg delta to previous size class.
* ndelta: Delta multiplier. size == 1<<lg_grp + ndelta<<lg_delta
* psz: 'yes' if a multiple of the page size, 'no' otherwise.
* bin: 'yes' if a small bin size class, 'no' otherwise.
* pgs: Slab page count if a small bin size class, 0 otherwise.
* lg_delta_lookup: Same as lg_delta if a lookup table size class, 'no'
* otherwise.
* NTBINS: Number of tiny bins.
* NLBINS: Number of bins supported by the lookup table.
* NBINS: Number of small size class bins.
* NSIZES: Number of size classes.
* LG_CEIL_NSIZES: Number of bits required to store NSIZES.
* NPSIZES: Number of size classes that are a multiple of (1U << LG_PAGE).
* LG_TINY_MAXCLASS: Lg of maximum tiny size class.
* LOOKUP_MAXCLASS: Maximum size class included in lookup table.
* SMALL_MAXCLASS: Maximum small size class.
* LG_LARGE_MINCLASS: Lg of minimum large size class.
* LARGE_MAXCLASS: Maximum (large) size class.
*/
#define LG_SIZE_CLASS_GROUP
${
lg_g
}
#define LG_TINY_MIN
${
lg_tmin
}
EOF
for
lg_z
in
${
lg_zarr
}
;
do
for
lg_q
in
${
lg_qarr
}
;
do
lg_t
=
${
lg_tmin
}
while
[
${
lg_t
}
-le
${
lg_q
}
]
;
do
# Iterate through page sizes and compute how many bins there are.
for
lg_p
in
${
lg_parr
}
;
do
echo
"#if (LG_SIZEOF_PTR ==
${
lg_z
}
&& LG_TINY_MIN ==
${
lg_t
}
&& LG_QUANTUM ==
${
lg_q
}
&& LG_PAGE ==
${
lg_p
}
)"
size_classes
${
lg_z
}
${
lg_q
}
${
lg_t
}
${
lg_p
}
${
lg_g
}
echo
"#define SIZE_CLASSES_DEFINED"
echo
"#define NTBINS
${
ntbins
}
"
echo
"#define NLBINS
${
nlbins
}
"
echo
"#define NBINS
${
nbins
}
"
echo
"#define NSIZES
${
nsizes
}
"
echo
"#define LG_CEIL_NSIZES
${
lg_ceil_nsizes
}
"
echo
"#define NPSIZES
${
npsizes
}
"
echo
"#define LG_TINY_MAXCLASS
${
lg_tiny_maxclass
}
"
echo
"#define LOOKUP_MAXCLASS
${
lookup_maxclass
}
"
echo
"#define SMALL_MAXCLASS
${
small_maxclass
}
"
echo
"#define LG_LARGE_MINCLASS
${
lg_large_minclass
}
"
echo
"#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)"
echo
"#define LARGE_MAXCLASS
${
large_maxclass
}
"
echo
"#endif"
echo
"#endif"
echo
echo
lg_p
=
$((${
lg_p
}
+
1
))
done
done
lg_t
=
$((${
lg_t
}
+
1
))
lg_t
=
$((${
lg_t
}
+
1
))
done
done
lg_q
=
$((${
lg_q
}
+
1
))
done
done
done
cat
<<
EOF
cat
<<
EOF
...
@@ -92,31 +350,12 @@ cat <<EOF
...
@@ -92,31 +350,12 @@ cat <<EOF
#endif
#endif
#undef SIZE_CLASSES_DEFINED
#undef SIZE_CLASSES_DEFINED
/*
/*
* The small_size2bin lookup table uses uint8_t to encode each bin index, so we
* The size2index_tab lookup table uses uint8_t to encode each bin index, so we
* cannot support more than 256 small size classes. Further constrain NBINS to
* cannot support more than 256 small size classes.
* 255 to support prof_promote, since all small size classes, plus a "not
* small" size class must be stored in 8 bits of arena_chunk_map_t's bits
* field.
*/
*/
#if (NBINS > 25
5
)
#if (NBINS > 25
6
)
# error "Too many small size classes"
# error "Too many small size classes"
#endif
#endif
#endif /* JEMALLOC_H_TYPES */
#endif /* JEMALLOC_INTERNAL_SIZE_CLASSES_H */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
EOF
EOF
deps/jemalloc/include/jemalloc/internal/smoothstep.h
0 → 100644
View file @
f63e81c2
#ifndef JEMALLOC_INTERNAL_SMOOTHSTEP_H
#define JEMALLOC_INTERNAL_SMOOTHSTEP_H
/*
* This file was generated by the following command:
* sh smoothstep.sh smoother 200 24 3 15
*/
/******************************************************************************/
/*
* This header defines a precomputed table based on the smoothstep family of
* sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0
* to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so
* that floating point math can be avoided.
*
* 3 2
* smoothstep(x) = -2x + 3x
*
* 5 4 3
* smootherstep(x) = 6x - 15x + 10x
*
* 7 6 5 4
* smootheststep(x) = -20x + 70x - 84x + 35x
*/
#define SMOOTHSTEP_VARIANT "smoother"
#define SMOOTHSTEP_NSTEPS 200
#define SMOOTHSTEP_BFP 24
#define SMOOTHSTEP \
/* STEP(step, h, x, y) */
\
STEP( 1, UINT64_C(0x0000000000000014), 0.005, 0.000001240643750) \
STEP( 2, UINT64_C(0x00000000000000a5), 0.010, 0.000009850600000) \
STEP( 3, UINT64_C(0x0000000000000229), 0.015, 0.000032995181250) \
STEP( 4, UINT64_C(0x0000000000000516), 0.020, 0.000077619200000) \
STEP( 5, UINT64_C(0x00000000000009dc), 0.025, 0.000150449218750) \
STEP( 6, UINT64_C(0x00000000000010e8), 0.030, 0.000257995800000) \
STEP( 7, UINT64_C(0x0000000000001aa4), 0.035, 0.000406555756250) \
STEP( 8, UINT64_C(0x0000000000002777), 0.040, 0.000602214400000) \
STEP( 9, UINT64_C(0x00000000000037c2), 0.045, 0.000850847793750) \
STEP( 10, UINT64_C(0x0000000000004be6), 0.050, 0.001158125000000) \
STEP( 11, UINT64_C(0x000000000000643c), 0.055, 0.001529510331250) \
STEP( 12, UINT64_C(0x000000000000811f), 0.060, 0.001970265600000) \
STEP( 13, UINT64_C(0x000000000000a2e2), 0.065, 0.002485452368750) \
STEP( 14, UINT64_C(0x000000000000c9d8), 0.070, 0.003079934200000) \
STEP( 15, UINT64_C(0x000000000000f64f), 0.075, 0.003758378906250) \
STEP( 16, UINT64_C(0x0000000000012891), 0.080, 0.004525260800000) \
STEP( 17, UINT64_C(0x00000000000160e7), 0.085, 0.005384862943750) \
STEP( 18, UINT64_C(0x0000000000019f95), 0.090, 0.006341279400000) \
STEP( 19, UINT64_C(0x000000000001e4dc), 0.095, 0.007398417481250) \
STEP( 20, UINT64_C(0x00000000000230fc), 0.100, 0.008560000000000) \
STEP( 21, UINT64_C(0x0000000000028430), 0.105, 0.009829567518750) \
STEP( 22, UINT64_C(0x000000000002deb0), 0.110, 0.011210480600000) \
STEP( 23, UINT64_C(0x00000000000340b1), 0.115, 0.012705922056250) \
STEP( 24, UINT64_C(0x000000000003aa67), 0.120, 0.014318899200000) \
STEP( 25, UINT64_C(0x0000000000041c00), 0.125, 0.016052246093750) \
STEP( 26, UINT64_C(0x00000000000495a8), 0.130, 0.017908625800000) \
STEP( 27, UINT64_C(0x000000000005178b), 0.135, 0.019890532631250) \
STEP( 28, UINT64_C(0x000000000005a1cf), 0.140, 0.022000294400000) \
STEP( 29, UINT64_C(0x0000000000063498), 0.145, 0.024240074668750) \
STEP( 30, UINT64_C(0x000000000006d009), 0.150, 0.026611875000000) \
STEP( 31, UINT64_C(0x000000000007743f), 0.155, 0.029117537206250) \
STEP( 32, UINT64_C(0x0000000000082157), 0.160, 0.031758745600000) \
STEP( 33, UINT64_C(0x000000000008d76b), 0.165, 0.034537029243750) \
STEP( 34, UINT64_C(0x0000000000099691), 0.170, 0.037453764200000) \
STEP( 35, UINT64_C(0x00000000000a5edf), 0.175, 0.040510175781250) \
STEP( 36, UINT64_C(0x00000000000b3067), 0.180, 0.043707340800000) \
STEP( 37, UINT64_C(0x00000000000c0b38), 0.185, 0.047046189818750) \
STEP( 38, UINT64_C(0x00000000000cef5e), 0.190, 0.050527509400000) \
STEP( 39, UINT64_C(0x00000000000ddce6), 0.195, 0.054151944356250) \
STEP( 40, UINT64_C(0x00000000000ed3d8), 0.200, 0.057920000000000) \
STEP( 41, UINT64_C(0x00000000000fd439), 0.205, 0.061832044393750) \
STEP( 42, UINT64_C(0x000000000010de0e), 0.210, 0.065888310600000) \
STEP( 43, UINT64_C(0x000000000011f158), 0.215, 0.070088898931250) \
STEP( 44, UINT64_C(0x0000000000130e17), 0.220, 0.074433779200000) \
STEP( 45, UINT64_C(0x0000000000143448), 0.225, 0.078922792968750) \
STEP( 46, UINT64_C(0x00000000001563e7), 0.230, 0.083555655800000) \
STEP( 47, UINT64_C(0x0000000000169cec), 0.235, 0.088331959506250) \
STEP( 48, UINT64_C(0x000000000017df4f), 0.240, 0.093251174400000) \
STEP( 49, UINT64_C(0x0000000000192b04), 0.245, 0.098312651543750) \
STEP( 50, UINT64_C(0x00000000001a8000), 0.250, 0.103515625000000) \
STEP( 51, UINT64_C(0x00000000001bde32), 0.255, 0.108859214081250) \
STEP( 52, UINT64_C(0x00000000001d458b), 0.260, 0.114342425600000) \
STEP( 53, UINT64_C(0x00000000001eb5f8), 0.265, 0.119964156118750) \
STEP( 54, UINT64_C(0x0000000000202f65), 0.270, 0.125723194200000) \
STEP( 55, UINT64_C(0x000000000021b1bb), 0.275, 0.131618222656250) \
STEP( 56, UINT64_C(0x0000000000233ce3), 0.280, 0.137647820800000) \
STEP( 57, UINT64_C(0x000000000024d0c3), 0.285, 0.143810466693750) \
STEP( 58, UINT64_C(0x0000000000266d40), 0.290, 0.150104539400000) \
STEP( 59, UINT64_C(0x000000000028123d), 0.295, 0.156528321231250) \
STEP( 60, UINT64_C(0x000000000029bf9c), 0.300, 0.163080000000000) \
STEP( 61, UINT64_C(0x00000000002b753d), 0.305, 0.169757671268750) \
STEP( 62, UINT64_C(0x00000000002d32fe), 0.310, 0.176559340600000) \
STEP( 63, UINT64_C(0x00000000002ef8bc), 0.315, 0.183482925806250) \
STEP( 64, UINT64_C(0x000000000030c654), 0.320, 0.190526259200000) \
STEP( 65, UINT64_C(0x0000000000329b9f), 0.325, 0.197687089843750) \
STEP( 66, UINT64_C(0x0000000000347875), 0.330, 0.204963085800000) \
STEP( 67, UINT64_C(0x0000000000365cb0), 0.335, 0.212351836381250) \
STEP( 68, UINT64_C(0x0000000000384825), 0.340, 0.219850854400000) \
STEP( 69, UINT64_C(0x00000000003a3aa8), 0.345, 0.227457578418750) \
STEP( 70, UINT64_C(0x00000000003c340f), 0.350, 0.235169375000000) \
STEP( 71, UINT64_C(0x00000000003e342b), 0.355, 0.242983540956250) \
STEP( 72, UINT64_C(0x0000000000403ace), 0.360, 0.250897305600000) \
STEP( 73, UINT64_C(0x00000000004247c8), 0.365, 0.258907832993750) \
STEP( 74, UINT64_C(0x0000000000445ae9), 0.370, 0.267012224200000) \
STEP( 75, UINT64_C(0x0000000000467400), 0.375, 0.275207519531250) \
STEP( 76, UINT64_C(0x00000000004892d8), 0.380, 0.283490700800000) \
STEP( 77, UINT64_C(0x00000000004ab740), 0.385, 0.291858693568750) \
STEP( 78, UINT64_C(0x00000000004ce102), 0.390, 0.300308369400000) \
STEP( 79, UINT64_C(0x00000000004f0fe9), 0.395, 0.308836548106250) \
STEP( 80, UINT64_C(0x00000000005143bf), 0.400, 0.317440000000000) \
STEP( 81, UINT64_C(0x0000000000537c4d), 0.405, 0.326115448143750) \
STEP( 82, UINT64_C(0x000000000055b95b), 0.410, 0.334859570600000) \
STEP( 83, UINT64_C(0x000000000057fab1), 0.415, 0.343669002681250) \
STEP( 84, UINT64_C(0x00000000005a4015), 0.420, 0.352540339200000) \
STEP( 85, UINT64_C(0x00000000005c894e), 0.425, 0.361470136718750) \
STEP( 86, UINT64_C(0x00000000005ed622), 0.430, 0.370454915800000) \
STEP( 87, UINT64_C(0x0000000000612655), 0.435, 0.379491163256250) \
STEP( 88, UINT64_C(0x00000000006379ac), 0.440, 0.388575334400000) \
STEP( 89, UINT64_C(0x000000000065cfeb), 0.445, 0.397703855293750) \
STEP( 90, UINT64_C(0x00000000006828d6), 0.450, 0.406873125000000) \
STEP( 91, UINT64_C(0x00000000006a842f), 0.455, 0.416079517831250) \
STEP( 92, UINT64_C(0x00000000006ce1bb), 0.460, 0.425319385600000) \
STEP( 93, UINT64_C(0x00000000006f413a), 0.465, 0.434589059868750) \
STEP( 94, UINT64_C(0x000000000071a270), 0.470, 0.443884854200000) \
STEP( 95, UINT64_C(0x000000000074051d), 0.475, 0.453203066406250) \
STEP( 96, UINT64_C(0x0000000000766905), 0.480, 0.462539980800000) \
STEP( 97, UINT64_C(0x000000000078cde7), 0.485, 0.471891870443750) \
STEP( 98, UINT64_C(0x00000000007b3387), 0.490, 0.481254999400000) \
STEP( 99, UINT64_C(0x00000000007d99a4), 0.495, 0.490625624981250) \
STEP( 100, UINT64_C(0x0000000000800000), 0.500, 0.500000000000000) \
STEP( 101, UINT64_C(0x000000000082665b), 0.505, 0.509374375018750) \
STEP( 102, UINT64_C(0x000000000084cc78), 0.510, 0.518745000600000) \
STEP( 103, UINT64_C(0x0000000000873218), 0.515, 0.528108129556250) \
STEP( 104, UINT64_C(0x00000000008996fa), 0.520, 0.537460019200000) \
STEP( 105, UINT64_C(0x00000000008bfae2), 0.525, 0.546796933593750) \
STEP( 106, UINT64_C(0x00000000008e5d8f), 0.530, 0.556115145800000) \
STEP( 107, UINT64_C(0x000000000090bec5), 0.535, 0.565410940131250) \
STEP( 108, UINT64_C(0x0000000000931e44), 0.540, 0.574680614400000) \
STEP( 109, UINT64_C(0x0000000000957bd0), 0.545, 0.583920482168750) \
STEP( 110, UINT64_C(0x000000000097d729), 0.550, 0.593126875000000) \
STEP( 111, UINT64_C(0x00000000009a3014), 0.555, 0.602296144706250) \
STEP( 112, UINT64_C(0x00000000009c8653), 0.560, 0.611424665600000) \
STEP( 113, UINT64_C(0x00000000009ed9aa), 0.565, 0.620508836743750) \
STEP( 114, UINT64_C(0x0000000000a129dd), 0.570, 0.629545084200000) \
STEP( 115, UINT64_C(0x0000000000a376b1), 0.575, 0.638529863281250) \
STEP( 116, UINT64_C(0x0000000000a5bfea), 0.580, 0.647459660800000) \
STEP( 117, UINT64_C(0x0000000000a8054e), 0.585, 0.656330997318750) \
STEP( 118, UINT64_C(0x0000000000aa46a4), 0.590, 0.665140429400000) \
STEP( 119, UINT64_C(0x0000000000ac83b2), 0.595, 0.673884551856250) \
STEP( 120, UINT64_C(0x0000000000aebc40), 0.600, 0.682560000000000) \
STEP( 121, UINT64_C(0x0000000000b0f016), 0.605, 0.691163451893750) \
STEP( 122, UINT64_C(0x0000000000b31efd), 0.610, 0.699691630600000) \
STEP( 123, UINT64_C(0x0000000000b548bf), 0.615, 0.708141306431250) \
STEP( 124, UINT64_C(0x0000000000b76d27), 0.620, 0.716509299200000) \
STEP( 125, UINT64_C(0x0000000000b98c00), 0.625, 0.724792480468750) \
STEP( 126, UINT64_C(0x0000000000bba516), 0.630, 0.732987775800000) \
STEP( 127, UINT64_C(0x0000000000bdb837), 0.635, 0.741092167006250) \
STEP( 128, UINT64_C(0x0000000000bfc531), 0.640, 0.749102694400000) \
STEP( 129, UINT64_C(0x0000000000c1cbd4), 0.645, 0.757016459043750) \
STEP( 130, UINT64_C(0x0000000000c3cbf0), 0.650, 0.764830625000000) \
STEP( 131, UINT64_C(0x0000000000c5c557), 0.655, 0.772542421581250) \
STEP( 132, UINT64_C(0x0000000000c7b7da), 0.660, 0.780149145600000) \
STEP( 133, UINT64_C(0x0000000000c9a34f), 0.665, 0.787648163618750) \
STEP( 134, UINT64_C(0x0000000000cb878a), 0.670, 0.795036914200000) \
STEP( 135, UINT64_C(0x0000000000cd6460), 0.675, 0.802312910156250) \
STEP( 136, UINT64_C(0x0000000000cf39ab), 0.680, 0.809473740800000) \
STEP( 137, UINT64_C(0x0000000000d10743), 0.685, 0.816517074193750) \
STEP( 138, UINT64_C(0x0000000000d2cd01), 0.690, 0.823440659400000) \
STEP( 139, UINT64_C(0x0000000000d48ac2), 0.695, 0.830242328731250) \
STEP( 140, UINT64_C(0x0000000000d64063), 0.700, 0.836920000000000) \
STEP( 141, UINT64_C(0x0000000000d7edc2), 0.705, 0.843471678768750) \
STEP( 142, UINT64_C(0x0000000000d992bf), 0.710, 0.849895460600000) \
STEP( 143, UINT64_C(0x0000000000db2f3c), 0.715, 0.856189533306250) \
STEP( 144, UINT64_C(0x0000000000dcc31c), 0.720, 0.862352179200000) \
STEP( 145, UINT64_C(0x0000000000de4e44), 0.725, 0.868381777343750) \
STEP( 146, UINT64_C(0x0000000000dfd09a), 0.730, 0.874276805800000) \
STEP( 147, UINT64_C(0x0000000000e14a07), 0.735, 0.880035843881250) \
STEP( 148, UINT64_C(0x0000000000e2ba74), 0.740, 0.885657574400000) \
STEP( 149, UINT64_C(0x0000000000e421cd), 0.745, 0.891140785918750) \
STEP( 150, UINT64_C(0x0000000000e58000), 0.750, 0.896484375000000) \
STEP( 151, UINT64_C(0x0000000000e6d4fb), 0.755, 0.901687348456250) \
STEP( 152, UINT64_C(0x0000000000e820b0), 0.760, 0.906748825600000) \
STEP( 153, UINT64_C(0x0000000000e96313), 0.765, 0.911668040493750) \
STEP( 154, UINT64_C(0x0000000000ea9c18), 0.770, 0.916444344200000) \
STEP( 155, UINT64_C(0x0000000000ebcbb7), 0.775, 0.921077207031250) \
STEP( 156, UINT64_C(0x0000000000ecf1e8), 0.780, 0.925566220800000) \
STEP( 157, UINT64_C(0x0000000000ee0ea7), 0.785, 0.929911101068750) \
STEP( 158, UINT64_C(0x0000000000ef21f1), 0.790, 0.934111689400000) \
STEP( 159, UINT64_C(0x0000000000f02bc6), 0.795, 0.938167955606250) \
STEP( 160, UINT64_C(0x0000000000f12c27), 0.800, 0.942080000000000) \
STEP( 161, UINT64_C(0x0000000000f22319), 0.805, 0.945848055643750) \
STEP( 162, UINT64_C(0x0000000000f310a1), 0.810, 0.949472490600000) \
STEP( 163, UINT64_C(0x0000000000f3f4c7), 0.815, 0.952953810181250) \
STEP( 164, UINT64_C(0x0000000000f4cf98), 0.820, 0.956292659200000) \
STEP( 165, UINT64_C(0x0000000000f5a120), 0.825, 0.959489824218750) \
STEP( 166, UINT64_C(0x0000000000f6696e), 0.830, 0.962546235800000) \
STEP( 167, UINT64_C(0x0000000000f72894), 0.835, 0.965462970756250) \
STEP( 168, UINT64_C(0x0000000000f7dea8), 0.840, 0.968241254400000) \
STEP( 169, UINT64_C(0x0000000000f88bc0), 0.845, 0.970882462793750) \
STEP( 170, UINT64_C(0x0000000000f92ff6), 0.850, 0.973388125000000) \
STEP( 171, UINT64_C(0x0000000000f9cb67), 0.855, 0.975759925331250) \
STEP( 172, UINT64_C(0x0000000000fa5e30), 0.860, 0.977999705600000) \
STEP( 173, UINT64_C(0x0000000000fae874), 0.865, 0.980109467368750) \
STEP( 174, UINT64_C(0x0000000000fb6a57), 0.870, 0.982091374200000) \
STEP( 175, UINT64_C(0x0000000000fbe400), 0.875, 0.983947753906250) \
STEP( 176, UINT64_C(0x0000000000fc5598), 0.880, 0.985681100800000) \
STEP( 177, UINT64_C(0x0000000000fcbf4e), 0.885, 0.987294077943750) \
STEP( 178, UINT64_C(0x0000000000fd214f), 0.890, 0.988789519400000) \
STEP( 179, UINT64_C(0x0000000000fd7bcf), 0.895, 0.990170432481250) \
STEP( 180, UINT64_C(0x0000000000fdcf03), 0.900, 0.991440000000000) \
STEP( 181, UINT64_C(0x0000000000fe1b23), 0.905, 0.992601582518750) \
STEP( 182, UINT64_C(0x0000000000fe606a), 0.910, 0.993658720600000) \
STEP( 183, UINT64_C(0x0000000000fe9f18), 0.915, 0.994615137056250) \
STEP( 184, UINT64_C(0x0000000000fed76e), 0.920, 0.995474739200000) \
STEP( 185, UINT64_C(0x0000000000ff09b0), 0.925, 0.996241621093750) \
STEP( 186, UINT64_C(0x0000000000ff3627), 0.930, 0.996920065800000) \
STEP( 187, UINT64_C(0x0000000000ff5d1d), 0.935, 0.997514547631250) \
STEP( 188, UINT64_C(0x0000000000ff7ee0), 0.940, 0.998029734400000) \
STEP( 189, UINT64_C(0x0000000000ff9bc3), 0.945, 0.998470489668750) \
STEP( 190, UINT64_C(0x0000000000ffb419), 0.950, 0.998841875000000) \
STEP( 191, UINT64_C(0x0000000000ffc83d), 0.955, 0.999149152206250) \
STEP( 192, UINT64_C(0x0000000000ffd888), 0.960, 0.999397785600000) \
STEP( 193, UINT64_C(0x0000000000ffe55b), 0.965, 0.999593444243750) \
STEP( 194, UINT64_C(0x0000000000ffef17), 0.970, 0.999742004200000) \
STEP( 195, UINT64_C(0x0000000000fff623), 0.975, 0.999849550781250) \
STEP( 196, UINT64_C(0x0000000000fffae9), 0.980, 0.999922380800000) \
STEP( 197, UINT64_C(0x0000000000fffdd6), 0.985, 0.999967004818750) \
STEP( 198, UINT64_C(0x0000000000ffff5a), 0.990, 0.999990149400000) \
STEP( 199, UINT64_C(0x0000000000ffffeb), 0.995, 0.999998759356250) \
STEP( 200, UINT64_C(0x0000000001000000), 1.000, 1.000000000000000) \
#endif
/* JEMALLOC_INTERNAL_SMOOTHSTEP_H */
deps/jemalloc/include/jemalloc/internal/smoothstep.sh
0 → 100755
View file @
f63e81c2
#!/bin/sh
#
# Generate a discrete lookup table for a sigmoid function in the smoothstep
# family (https://en.wikipedia.org/wiki/Smoothstep), where the lookup table
# entries correspond to x in [1/nsteps, 2/nsteps, ..., nsteps/nsteps]. Encode
# the entries using a binary fixed point representation.
#
# Usage: smoothstep.sh <variant> <nsteps> <bfp> <xprec> <yprec>
#
# <variant> is in {smooth, smoother, smoothest}.
# <nsteps> must be greater than zero.
# <bfp> must be in [0..62]; reasonable values are roughly [10..30].
# <xprec> is x decimal precision.
# <yprec> is y decimal precision.
#set -x
cmd
=
"sh smoothstep.sh
$*
"
variant
=
$1
nsteps
=
$2
bfp
=
$3
xprec
=
$4
yprec
=
$5
case
"
${
variant
}
"
in
smooth
)
;;
smoother
)
;;
smoothest
)
;;
*
)
echo
"Unsupported variant"
exit
1
;;
esac
smooth
()
{
step
=
$1
y
=
`
echo
${
yprec
}
k
${
step
}
${
nsteps
}
/ sx _2 lx 3 ^
'*'
3 lx 2 ^
'*'
+ p | dc |
tr
-d
'\\\\\n'
|
sed
-e
's#^\.#0.#g'
`
h
=
`
echo
${
yprec
}
k 2
${
bfp
}
^
${
y
}
'*'
p | dc |
tr
-d
'\\\\\n'
|
sed
-e
's#^\.#0.#g'
|
tr
'.'
' '
|
awk
'{print $1}'
`
}
smoother
()
{
step
=
$1
y
=
`
echo
${
yprec
}
k
${
step
}
${
nsteps
}
/ sx 6 lx 5 ^
'*'
_15 lx 4 ^
'*'
+ 10 lx 3 ^
'*'
+ p | dc |
tr
-d
'\\\\\n'
|
sed
-e
's#^\.#0.#g'
`
h
=
`
echo
${
yprec
}
k 2
${
bfp
}
^
${
y
}
'*'
p | dc |
tr
-d
'\\\\\n'
|
sed
-e
's#^\.#0.#g'
|
tr
'.'
' '
|
awk
'{print $1}'
`
}
smoothest
()
{
step
=
$1
y
=
`
echo
${
yprec
}
k
${
step
}
${
nsteps
}
/ sx _20 lx 7 ^
'*'
70 lx 6 ^
'*'
+ _84 lx 5 ^
'*'
+ 35 lx 4 ^
'*'
+ p | dc |
tr
-d
'\\\\\n'
|
sed
-e
's#^\.#0.#g'
`
h
=
`
echo
${
yprec
}
k 2
${
bfp
}
^
${
y
}
'*'
p | dc |
tr
-d
'\\\\\n'
|
sed
-e
's#^\.#0.#g'
|
tr
'.'
' '
|
awk
'{print $1}'
`
}
cat
<<
EOF
#ifndef JEMALLOC_INTERNAL_SMOOTHSTEP_H
#define JEMALLOC_INTERNAL_SMOOTHSTEP_H
/*
* This file was generated by the following command:
*
$cmd
*/
/******************************************************************************/
/*
* This header defines a precomputed table based on the smoothstep family of
* sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0
* to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so
* that floating point math can be avoided.
*
* 3 2
* smoothstep(x) = -2x + 3x
*
* 5 4 3
* smootherstep(x) = 6x - 15x + 10x
*
* 7 6 5 4
* smootheststep(x) = -20x + 70x - 84x + 35x
*/
#define SMOOTHSTEP_VARIANT "
${
variant
}
"
#define SMOOTHSTEP_NSTEPS
${
nsteps
}
#define SMOOTHSTEP_BFP
${
bfp
}
#define SMOOTHSTEP
\\
/* STEP(step, h, x, y) */
\\
EOF
s
=
1
while
[
$s
-le
$nsteps
]
;
do
$variant
${
s
}
x
=
`
echo
${
xprec
}
k
${
s
}
${
nsteps
}
/ p | dc |
tr
-d
'\\\\\n'
|
sed
-e
's#^\.#0.#g'
`
printf
' STEP(%4d, UINT64_C(0x%016x), %s, %s) \\\n'
${
s
}
${
h
}
${
x
}
${
y
}
s
=
$((
s+1
))
done
echo
cat
<<
EOF
#endif /* JEMALLOC_INTERNAL_SMOOTHSTEP_H */
EOF
deps/jemalloc/include/jemalloc/internal/spin.h
0 → 100644
View file @
f63e81c2
#ifndef JEMALLOC_INTERNAL_SPIN_H
#define JEMALLOC_INTERNAL_SPIN_H
#define SPIN_INITIALIZER {0U}
typedef
struct
{
unsigned
iteration
;
}
spin_t
;
static
inline
void
spin_cpu_spinwait
()
{
# if HAVE_CPU_SPINWAIT
CPU_SPINWAIT
;
# else
volatile
int
x
=
0
;
x
=
x
;
# endif
}
static
inline
void
spin_adaptive
(
spin_t
*
spin
)
{
volatile
uint32_t
i
;
if
(
spin
->
iteration
<
5
)
{
for
(
i
=
0
;
i
<
(
1U
<<
spin
->
iteration
);
i
++
)
{
spin_cpu_spinwait
();
}
spin
->
iteration
++
;
}
else
{
#ifdef _WIN32
SwitchToThread
();
#else
sched_yield
();
#endif
}
}
#undef SPIN_INLINE
#endif
/* JEMALLOC_INTERNAL_SPIN_H */
deps/jemalloc/include/jemalloc/internal/stats.h
View file @
f63e81c2
/******************************************************************************/
#ifndef JEMALLOC_INTERNAL_STATS_H
#ifdef JEMALLOC_H_TYPES
#define JEMALLOC_INTERNAL_STATS_H
typedef
struct
tcache_bin_stats_s
tcache_bin_stats_t
;
/* OPTION(opt, var_name, default, set_value_to) */
typedef
struct
malloc_bin_stats_s
malloc_bin_stats_t
;
#define STATS_PRINT_OPTIONS \
typedef
struct
malloc_large_stats_s
malloc_large_stats_t
;
OPTION('J', json, false, true) \
typedef
struct
arena_stats_s
arena_stats_t
;
OPTION('g', general, true, false) \
typedef
struct
chunk_stats_s
chunk_stats_t
;
OPTION('m', merged, config_stats, false) \
OPTION('d', destroyed, config_stats, false) \
#endif
/* JEMALLOC_H_TYPES */
OPTION('a', unmerged, config_stats, false) \
/******************************************************************************/
OPTION('b', bins, true, false) \
#ifdef JEMALLOC_H_STRUCTS
OPTION('l', large, true, false) \
OPTION('x', mutex, true, false)
struct
tcache_bin_stats_s
{
/*
enum
{
* Number of allocation requests that corresponded to the size of this
#define OPTION(o, v, d, s) stats_print_option_num_##v,
* bin.
STATS_PRINT_OPTIONS
*/
#undef OPTION
uint64_t
nrequests
;
stats_print_tot_num_options
};
struct
malloc_bin_stats_s
{
/*
* Current number of bytes allocated, including objects currently
* cached by tcache.
*/
size_t
allocated
;
/*
* Total number of allocation/deallocation requests served directly by
* the bin. Note that tcache may allocate an object, then recycle it
* many times, resulting many increments to nrequests, but only one
* each to nmalloc and ndalloc.
*/
uint64_t
nmalloc
;
uint64_t
ndalloc
;
/*
* Number of allocation requests that correspond to the size of this
* bin. This includes requests served by tcache, though tcache only
* periodically merges into this counter.
*/
uint64_t
nrequests
;
/* Number of tcache fills from this bin. */
uint64_t
nfills
;
/* Number of tcache flushes to this bin. */
uint64_t
nflushes
;
/* Total number of runs created for this bin's size class. */
uint64_t
nruns
;
/*
* Total number of runs reused by extracting them from the runs tree for
* this bin's size class.
*/
uint64_t
reruns
;
/* Current number of runs in this bin. */
size_t
curruns
;
};
};
struct
malloc_large_stats_s
{
/* Options for stats_print. */
/*
* Total number of allocation/deallocation requests served directly by
* the arena. Note that tcache may allocate an object, then recycle it
* many times, resulting many increments to nrequests, but only one
* each to nmalloc and ndalloc.
*/
uint64_t
nmalloc
;
uint64_t
ndalloc
;
/*
* Number of allocation requests that correspond to this size class.
* This includes requests served by tcache, though tcache only
* periodically merges into this counter.
*/
uint64_t
nrequests
;
/* Current number of runs of this size class. */
size_t
curruns
;
};
struct
arena_stats_s
{
/* Number of bytes currently mapped. */
size_t
mapped
;
/*
* Total number of purge sweeps, total number of madvise calls made,
* and total pages purged in order to keep dirty unused memory under
* control.
*/
uint64_t
npurge
;
uint64_t
nmadvise
;
uint64_t
purged
;
/* Per-size-category statistics. */
size_t
allocated_large
;
uint64_t
nmalloc_large
;
uint64_t
ndalloc_large
;
uint64_t
nrequests_large
;
/*
* One element for each possible size class, including sizes that
* overlap with bin size classes. This is necessary because ipalloc()
* sometimes has to use such large objects in order to assure proper
* alignment.
*/
malloc_large_stats_t
*
lstats
;
};
struct
chunk_stats_s
{
/* Number of chunks that were allocated. */
uint64_t
nchunks
;
/* High-water mark for number of chunks allocated. */
size_t
highchunks
;
/*
* Current number of chunks allocated. This value isn't maintained for
* any other purpose, so keep track of it in order to be able to set
* highchunks.
*/
size_t
curchunks
;
};
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern
bool
opt_stats_print
;
extern
bool
opt_stats_print
;
extern
char
opt_stats_print_opts
[
stats_print_tot_num_options
+
1
];
extern
size_t
stats_cactive
;
/* Implements je_malloc_stats_print. */
void
stats_print
(
void
(
*
write_cb
)(
void
*
,
const
char
*
),
void
*
cbopaque
,
void
stats_print
(
void
(
*
write
)(
void
*
,
const
char
*
),
void
*
cbopaque
,
const
char
*
opts
);
const
char
*
opts
);
#endif
/* JEMALLOC_H_EXTERNS */
#endif
/* JEMALLOC_INTERNAL_STATS_H */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
size_t
stats_cactive_get
(
void
);
void
stats_cactive_add
(
size_t
size
);
void
stats_cactive_sub
(
size_t
size
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_))
JEMALLOC_INLINE
size_t
stats_cactive_get
(
void
)
{
return
(
atomic_read_z
(
&
stats_cactive
));
}
JEMALLOC_INLINE
void
stats_cactive_add
(
size_t
size
)
{
atomic_add_z
(
&
stats_cactive
,
size
);
}
JEMALLOC_INLINE
void
stats_cactive_sub
(
size_t
size
)
{
atomic_sub_z
(
&
stats_cactive
,
size
);
}
#endif
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/sz.h
0 → 100644
View file @
f63e81c2
#ifndef JEMALLOC_INTERNAL_SIZE_H
#define JEMALLOC_INTERNAL_SIZE_H
#include "jemalloc/internal/bit_util.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/util.h"
/*
* sz module: Size computations.
*
* Some abbreviations used here:
* p: Page
* ind: Index
* s, sz: Size
* u: Usable size
* a: Aligned
*
* These are not always used completely consistently, but should be enough to
* interpret function names. E.g. sz_psz2ind converts page size to page size
* index; sz_sa2u converts a (size, alignment) allocation request to the usable
* size that would result from such an allocation.
*/
/*
* sz_pind2sz_tab encodes the same information as could be computed by
* sz_pind2sz_compute().
*/
extern
size_t
const
sz_pind2sz_tab
[
NPSIZES
+
1
];
/*
* sz_index2size_tab encodes the same information as could be computed (at
* unacceptable cost in some code paths) by sz_index2size_compute().
*/
extern
size_t
const
sz_index2size_tab
[
NSIZES
];
/*
* sz_size2index_tab is a compact lookup table that rounds request sizes up to
* size classes. In order to reduce cache footprint, the table is compressed,
* and all accesses are via sz_size2index().
*/
extern
uint8_t
const
sz_size2index_tab
[];
static
const
size_t
sz_large_pad
=
#ifdef JEMALLOC_CACHE_OBLIVIOUS
PAGE
#else
0
#endif
;
JEMALLOC_ALWAYS_INLINE
pszind_t
sz_psz2ind
(
size_t
psz
)
{
if
(
unlikely
(
psz
>
LARGE_MAXCLASS
))
{
return
NPSIZES
;
}
{
pszind_t
x
=
lg_floor
((
psz
<<
1
)
-
1
);
pszind_t
shift
=
(
x
<
LG_SIZE_CLASS_GROUP
+
LG_PAGE
)
?
0
:
x
-
(
LG_SIZE_CLASS_GROUP
+
LG_PAGE
);
pszind_t
grp
=
shift
<<
LG_SIZE_CLASS_GROUP
;
pszind_t
lg_delta
=
(
x
<
LG_SIZE_CLASS_GROUP
+
LG_PAGE
+
1
)
?
LG_PAGE
:
x
-
LG_SIZE_CLASS_GROUP
-
1
;
size_t
delta_inverse_mask
=
ZU
(
-
1
)
<<
lg_delta
;
pszind_t
mod
=
((((
psz
-
1
)
&
delta_inverse_mask
)
>>
lg_delta
))
&
((
ZU
(
1
)
<<
LG_SIZE_CLASS_GROUP
)
-
1
);
pszind_t
ind
=
grp
+
mod
;
return
ind
;
}
}
static
inline
size_t
sz_pind2sz_compute
(
pszind_t
pind
)
{
if
(
unlikely
(
pind
==
NPSIZES
))
{
return
LARGE_MAXCLASS
+
PAGE
;
}
{
size_t
grp
=
pind
>>
LG_SIZE_CLASS_GROUP
;
size_t
mod
=
pind
&
((
ZU
(
1
)
<<
LG_SIZE_CLASS_GROUP
)
-
1
);
size_t
grp_size_mask
=
~
((
!!
grp
)
-
1
);
size_t
grp_size
=
((
ZU
(
1
)
<<
(
LG_PAGE
+
(
LG_SIZE_CLASS_GROUP
-
1
)))
<<
grp
)
&
grp_size_mask
;
size_t
shift
=
(
grp
==
0
)
?
1
:
grp
;
size_t
lg_delta
=
shift
+
(
LG_PAGE
-
1
);
size_t
mod_size
=
(
mod
+
1
)
<<
lg_delta
;
size_t
sz
=
grp_size
+
mod_size
;
return
sz
;
}
}
static
inline
size_t
sz_pind2sz_lookup
(
pszind_t
pind
)
{
size_t
ret
=
(
size_t
)
sz_pind2sz_tab
[
pind
];
assert
(
ret
==
sz_pind2sz_compute
(
pind
));
return
ret
;
}
static
inline
size_t
sz_pind2sz
(
pszind_t
pind
)
{
assert
(
pind
<
NPSIZES
+
1
);
return
sz_pind2sz_lookup
(
pind
);
}
static
inline
size_t
sz_psz2u
(
size_t
psz
)
{
if
(
unlikely
(
psz
>
LARGE_MAXCLASS
))
{
return
LARGE_MAXCLASS
+
PAGE
;
}
{
size_t
x
=
lg_floor
((
psz
<<
1
)
-
1
);
size_t
lg_delta
=
(
x
<
LG_SIZE_CLASS_GROUP
+
LG_PAGE
+
1
)
?
LG_PAGE
:
x
-
LG_SIZE_CLASS_GROUP
-
1
;
size_t
delta
=
ZU
(
1
)
<<
lg_delta
;
size_t
delta_mask
=
delta
-
1
;
size_t
usize
=
(
psz
+
delta_mask
)
&
~
delta_mask
;
return
usize
;
}
}
static
inline
szind_t
sz_size2index_compute
(
size_t
size
)
{
if
(
unlikely
(
size
>
LARGE_MAXCLASS
))
{
return
NSIZES
;
}
#if (NTBINS != 0)
if
(
size
<=
(
ZU
(
1
)
<<
LG_TINY_MAXCLASS
))
{
szind_t
lg_tmin
=
LG_TINY_MAXCLASS
-
NTBINS
+
1
;
szind_t
lg_ceil
=
lg_floor
(
pow2_ceil_zu
(
size
));
return
(
lg_ceil
<
lg_tmin
?
0
:
lg_ceil
-
lg_tmin
);
}
#endif
{
szind_t
x
=
lg_floor
((
size
<<
1
)
-
1
);
szind_t
shift
=
(
x
<
LG_SIZE_CLASS_GROUP
+
LG_QUANTUM
)
?
0
:
x
-
(
LG_SIZE_CLASS_GROUP
+
LG_QUANTUM
);
szind_t
grp
=
shift
<<
LG_SIZE_CLASS_GROUP
;
szind_t
lg_delta
=
(
x
<
LG_SIZE_CLASS_GROUP
+
LG_QUANTUM
+
1
)
?
LG_QUANTUM
:
x
-
LG_SIZE_CLASS_GROUP
-
1
;
size_t
delta_inverse_mask
=
ZU
(
-
1
)
<<
lg_delta
;
szind_t
mod
=
((((
size
-
1
)
&
delta_inverse_mask
)
>>
lg_delta
))
&
((
ZU
(
1
)
<<
LG_SIZE_CLASS_GROUP
)
-
1
);
szind_t
index
=
NTBINS
+
grp
+
mod
;
return
index
;
}
}
JEMALLOC_ALWAYS_INLINE
szind_t
sz_size2index_lookup
(
size_t
size
)
{
assert
(
size
<=
LOOKUP_MAXCLASS
);
{
szind_t
ret
=
(
sz_size2index_tab
[(
size
-
1
)
>>
LG_TINY_MIN
]);
assert
(
ret
==
sz_size2index_compute
(
size
));
return
ret
;
}
}
JEMALLOC_ALWAYS_INLINE
szind_t
sz_size2index
(
size_t
size
)
{
assert
(
size
>
0
);
if
(
likely
(
size
<=
LOOKUP_MAXCLASS
))
{
return
sz_size2index_lookup
(
size
);
}
return
sz_size2index_compute
(
size
);
}
static
inline
size_t
sz_index2size_compute
(
szind_t
index
)
{
#if (NTBINS > 0)
if
(
index
<
NTBINS
)
{
return
(
ZU
(
1
)
<<
(
LG_TINY_MAXCLASS
-
NTBINS
+
1
+
index
));
}
#endif
{
size_t
reduced_index
=
index
-
NTBINS
;
size_t
grp
=
reduced_index
>>
LG_SIZE_CLASS_GROUP
;
size_t
mod
=
reduced_index
&
((
ZU
(
1
)
<<
LG_SIZE_CLASS_GROUP
)
-
1
);
size_t
grp_size_mask
=
~
((
!!
grp
)
-
1
);
size_t
grp_size
=
((
ZU
(
1
)
<<
(
LG_QUANTUM
+
(
LG_SIZE_CLASS_GROUP
-
1
)))
<<
grp
)
&
grp_size_mask
;
size_t
shift
=
(
grp
==
0
)
?
1
:
grp
;
size_t
lg_delta
=
shift
+
(
LG_QUANTUM
-
1
);
size_t
mod_size
=
(
mod
+
1
)
<<
lg_delta
;
size_t
usize
=
grp_size
+
mod_size
;
return
usize
;
}
}
JEMALLOC_ALWAYS_INLINE
size_t
sz_index2size_lookup
(
szind_t
index
)
{
size_t
ret
=
(
size_t
)
sz_index2size_tab
[
index
];
assert
(
ret
==
sz_index2size_compute
(
index
));
return
ret
;
}
JEMALLOC_ALWAYS_INLINE
size_t
sz_index2size
(
szind_t
index
)
{
assert
(
index
<
NSIZES
);
return
sz_index2size_lookup
(
index
);
}
JEMALLOC_ALWAYS_INLINE
size_t
sz_s2u_compute
(
size_t
size
)
{
if
(
unlikely
(
size
>
LARGE_MAXCLASS
))
{
return
0
;
}
#if (NTBINS > 0)
if
(
size
<=
(
ZU
(
1
)
<<
LG_TINY_MAXCLASS
))
{
size_t
lg_tmin
=
LG_TINY_MAXCLASS
-
NTBINS
+
1
;
size_t
lg_ceil
=
lg_floor
(
pow2_ceil_zu
(
size
));
return
(
lg_ceil
<
lg_tmin
?
(
ZU
(
1
)
<<
lg_tmin
)
:
(
ZU
(
1
)
<<
lg_ceil
));
}
#endif
{
size_t
x
=
lg_floor
((
size
<<
1
)
-
1
);
size_t
lg_delta
=
(
x
<
LG_SIZE_CLASS_GROUP
+
LG_QUANTUM
+
1
)
?
LG_QUANTUM
:
x
-
LG_SIZE_CLASS_GROUP
-
1
;
size_t
delta
=
ZU
(
1
)
<<
lg_delta
;
size_t
delta_mask
=
delta
-
1
;
size_t
usize
=
(
size
+
delta_mask
)
&
~
delta_mask
;
return
usize
;
}
}
JEMALLOC_ALWAYS_INLINE
size_t
sz_s2u_lookup
(
size_t
size
)
{
size_t
ret
=
sz_index2size_lookup
(
sz_size2index_lookup
(
size
));
assert
(
ret
==
sz_s2u_compute
(
size
));
return
ret
;
}
/*
* Compute usable size that would result from allocating an object with the
* specified size.
*/
JEMALLOC_ALWAYS_INLINE
size_t
sz_s2u
(
size_t
size
)
{
assert
(
size
>
0
);
if
(
likely
(
size
<=
LOOKUP_MAXCLASS
))
{
return
sz_s2u_lookup
(
size
);
}
return
sz_s2u_compute
(
size
);
}
/*
* Compute usable size that would result from allocating an object with the
* specified size and alignment.
*/
JEMALLOC_ALWAYS_INLINE
size_t
sz_sa2u
(
size_t
size
,
size_t
alignment
)
{
size_t
usize
;
assert
(
alignment
!=
0
&&
((
alignment
-
1
)
&
alignment
)
==
0
);
/* Try for a small size class. */
if
(
size
<=
SMALL_MAXCLASS
&&
alignment
<
PAGE
)
{
/*
* Round size up to the nearest multiple of alignment.
*
* This done, we can take advantage of the fact that for each
* small size class, every object is aligned at the smallest
* power of two that is non-zero in the base two representation
* of the size. For example:
*
* Size | Base 2 | Minimum alignment
* -----+----------+------------------
* 96 | 1100000 | 32
* 144 | 10100000 | 32
* 192 | 11000000 | 64
*/
usize
=
sz_s2u
(
ALIGNMENT_CEILING
(
size
,
alignment
));
if
(
usize
<
LARGE_MINCLASS
)
{
return
usize
;
}
}
/* Large size class. Beware of overflow. */
if
(
unlikely
(
alignment
>
LARGE_MAXCLASS
))
{
return
0
;
}
/* Make sure result is a large size class. */
if
(
size
<=
LARGE_MINCLASS
)
{
usize
=
LARGE_MINCLASS
;
}
else
{
usize
=
sz_s2u
(
size
);
if
(
usize
<
size
)
{
/* size_t overflow. */
return
0
;
}
}
/*
* Calculate the multi-page mapping that large_palloc() would need in
* order to guarantee the alignment.
*/
if
(
usize
+
sz_large_pad
+
PAGE_CEILING
(
alignment
)
-
PAGE
<
usize
)
{
/* size_t overflow. */
return
0
;
}
return
usize
;
}
#endif
/* JEMALLOC_INTERNAL_SIZE_H */
deps/jemalloc/include/jemalloc/internal/tcache.h
deleted
100644 → 0
View file @
eaeba1b2
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef
struct
tcache_bin_info_s
tcache_bin_info_t
;
typedef
struct
tcache_bin_s
tcache_bin_t
;
typedef
struct
tcache_s
tcache_t
;
/*
* tcache pointers close to NULL are used to encode state information that is
* used for two purposes: preventing thread caching on a per thread basis and
* cleaning up during thread shutdown.
*/
#define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1)
#define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2)
#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3)
#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY
/*
* Absolute maximum number of cache slots for each small bin in the thread
* cache. This is an additional constraint beyond that imposed as: twice the
* number of regions per run for this size class.
*
* This constant must be an even number.
*/
#define TCACHE_NSLOTS_SMALL_MAX 200
/* Number of cache slots for large size classes. */
#define TCACHE_NSLOTS_LARGE 20
/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
#define LG_TCACHE_MAXCLASS_DEFAULT 15
/*
* TCACHE_GC_SWEEP is the approximate number of allocation events between
* full GC sweeps. Integer rounding may cause the actual number to be
* slightly higher, since GC is performed incrementally.
*/
#define TCACHE_GC_SWEEP 8192
/* Number of tcache allocation/deallocation events between incremental GCs. */
#define TCACHE_GC_INCR \
((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1))
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
typedef
enum
{
tcache_enabled_false
=
0
,
/* Enable cast to/from bool. */
tcache_enabled_true
=
1
,
tcache_enabled_default
=
2
}
tcache_enabled_t
;
/*
* Read-only information associated with each element of tcache_t's tbins array
* is stored separately, mainly to reduce memory usage.
*/
struct
tcache_bin_info_s
{
unsigned
ncached_max
;
/* Upper limit on ncached. */
};
struct
tcache_bin_s
{
tcache_bin_stats_t
tstats
;
int
low_water
;
/* Min # cached since last GC. */
unsigned
lg_fill_div
;
/* Fill (ncached_max >> lg_fill_div). */
unsigned
ncached
;
/* # of cached objects. */
void
**
avail
;
/* Stack of available objects. */
};
struct
tcache_s
{
ql_elm
(
tcache_t
)
link
;
/* Used for aggregating stats. */
uint64_t
prof_accumbytes
;
/* Cleared after arena_prof_accum() */
arena_t
*
arena
;
/* This thread's arena. */
unsigned
ev_cnt
;
/* Event count since incremental GC. */
unsigned
next_gc_bin
;
/* Next bin to GC. */
tcache_bin_t
tbins
[
1
];
/* Dynamically sized. */
/*
* The pointer stacks associated with tbins follow as a contiguous
* array. During tcache initialization, the avail pointer in each
* element of tbins is initialized to point to the proper offset within
* this array.
*/
};
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern
bool
opt_tcache
;
extern
ssize_t
opt_lg_tcache_max
;
extern
tcache_bin_info_t
*
tcache_bin_info
;
/*
* Number of tcache bins. There are NBINS small-object bins, plus 0 or more
* large-object bins.
*/
extern
size_t
nhbins
;
/* Maximum cached size class. */
extern
size_t
tcache_maxclass
;
size_t
tcache_salloc
(
const
void
*
ptr
);
void
tcache_event_hard
(
tcache_t
*
tcache
);
void
*
tcache_alloc_small_hard
(
tcache_t
*
tcache
,
tcache_bin_t
*
tbin
,
size_t
binind
);
void
tcache_bin_flush_small
(
tcache_bin_t
*
tbin
,
size_t
binind
,
unsigned
rem
,
tcache_t
*
tcache
);
void
tcache_bin_flush_large
(
tcache_bin_t
*
tbin
,
size_t
binind
,
unsigned
rem
,
tcache_t
*
tcache
);
void
tcache_arena_associate
(
tcache_t
*
tcache
,
arena_t
*
arena
);
void
tcache_arena_dissociate
(
tcache_t
*
tcache
);
tcache_t
*
tcache_create
(
arena_t
*
arena
);
void
tcache_destroy
(
tcache_t
*
tcache
);
void
tcache_thread_cleanup
(
void
*
arg
);
void
tcache_stats_merge
(
tcache_t
*
tcache
,
arena_t
*
arena
);
bool
tcache_boot0
(
void
);
bool
tcache_boot1
(
void
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
malloc_tsd_protos
(
JEMALLOC_ATTR
(
unused
),
tcache
,
tcache_t
*
)
malloc_tsd_protos
(
JEMALLOC_ATTR
(
unused
),
tcache_enabled
,
tcache_enabled_t
)
void
tcache_event
(
tcache_t
*
tcache
);
void
tcache_flush
(
void
);
bool
tcache_enabled_get
(
void
);
tcache_t
*
tcache_get
(
bool
create
);
void
tcache_enabled_set
(
bool
enabled
);
void
*
tcache_alloc_easy
(
tcache_bin_t
*
tbin
);
void
*
tcache_alloc_small
(
tcache_t
*
tcache
,
size_t
size
,
bool
zero
);
void
*
tcache_alloc_large
(
tcache_t
*
tcache
,
size_t
size
,
bool
zero
);
void
tcache_dalloc_small
(
tcache_t
*
tcache
,
void
*
ptr
,
size_t
binind
);
void
tcache_dalloc_large
(
tcache_t
*
tcache
,
void
*
ptr
,
size_t
size
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
/* Map of thread-specific caches. */
malloc_tsd_externs
(
tcache
,
tcache_t
*
)
malloc_tsd_funcs
(
JEMALLOC_ALWAYS_INLINE
,
tcache
,
tcache_t
*
,
NULL
,
tcache_thread_cleanup
)
/* Per thread flag that allows thread caches to be disabled. */
malloc_tsd_externs
(
tcache_enabled
,
tcache_enabled_t
)
malloc_tsd_funcs
(
JEMALLOC_ALWAYS_INLINE
,
tcache_enabled
,
tcache_enabled_t
,
tcache_enabled_default
,
malloc_tsd_no_cleanup
)
JEMALLOC_INLINE
void
tcache_flush
(
void
)
{
tcache_t
*
tcache
;
cassert
(
config_tcache
);
tcache
=
*
tcache_tsd_get
();
if
((
uintptr_t
)
tcache
<=
(
uintptr_t
)
TCACHE_STATE_MAX
)
return
;
tcache_destroy
(
tcache
);
tcache
=
NULL
;
tcache_tsd_set
(
&
tcache
);
}
JEMALLOC_INLINE
bool
tcache_enabled_get
(
void
)
{
tcache_enabled_t
tcache_enabled
;
cassert
(
config_tcache
);
tcache_enabled
=
*
tcache_enabled_tsd_get
();
if
(
tcache_enabled
==
tcache_enabled_default
)
{
tcache_enabled
=
(
tcache_enabled_t
)
opt_tcache
;
tcache_enabled_tsd_set
(
&
tcache_enabled
);
}
return
((
bool
)
tcache_enabled
);
}
JEMALLOC_INLINE
void
tcache_enabled_set
(
bool
enabled
)
{
tcache_enabled_t
tcache_enabled
;
tcache_t
*
tcache
;
cassert
(
config_tcache
);
tcache_enabled
=
(
tcache_enabled_t
)
enabled
;
tcache_enabled_tsd_set
(
&
tcache_enabled
);
tcache
=
*
tcache_tsd_get
();
if
(
enabled
)
{
if
(
tcache
==
TCACHE_STATE_DISABLED
)
{
tcache
=
NULL
;
tcache_tsd_set
(
&
tcache
);
}
}
else
/* disabled */
{
if
(
tcache
>
TCACHE_STATE_MAX
)
{
tcache_destroy
(
tcache
);
tcache
=
NULL
;
}
if
(
tcache
==
NULL
)
{
tcache
=
TCACHE_STATE_DISABLED
;
tcache_tsd_set
(
&
tcache
);
}
}
}
JEMALLOC_ALWAYS_INLINE
tcache_t
*
tcache_get
(
bool
create
)
{
tcache_t
*
tcache
;
if
(
config_tcache
==
false
)
return
(
NULL
);
if
(
config_lazy_lock
&&
isthreaded
==
false
)
return
(
NULL
);
tcache
=
*
tcache_tsd_get
();
if
((
uintptr_t
)
tcache
<=
(
uintptr_t
)
TCACHE_STATE_MAX
)
{
if
(
tcache
==
TCACHE_STATE_DISABLED
)
return
(
NULL
);
if
(
tcache
==
NULL
)
{
if
(
create
==
false
)
{
/*
* Creating a tcache here would cause
* allocation as a side effect of free().
* Ordinarily that would be okay since
* tcache_create() failure is a soft failure
* that doesn't propagate. However, if TLS
* data are freed via free() as in glibc,
* subtle corruption could result from setting
* a TLS variable after its backing memory is
* freed.
*/
return
(
NULL
);
}
if
(
tcache_enabled_get
()
==
false
)
{
tcache_enabled_set
(
false
);
/* Memoize. */
return
(
NULL
);
}
return
(
tcache_create
(
choose_arena
(
NULL
)));
}
if
(
tcache
==
TCACHE_STATE_PURGATORY
)
{
/*
* Make a note that an allocator function was called
* after tcache_thread_cleanup() was called.
*/
tcache
=
TCACHE_STATE_REINCARNATED
;
tcache_tsd_set
(
&
tcache
);
return
(
NULL
);
}
if
(
tcache
==
TCACHE_STATE_REINCARNATED
)
return
(
NULL
);
not_reached
();
}
return
(
tcache
);
}
JEMALLOC_ALWAYS_INLINE
void
tcache_event
(
tcache_t
*
tcache
)
{
if
(
TCACHE_GC_INCR
==
0
)
return
;
tcache
->
ev_cnt
++
;
assert
(
tcache
->
ev_cnt
<=
TCACHE_GC_INCR
);
if
(
tcache
->
ev_cnt
==
TCACHE_GC_INCR
)
tcache_event_hard
(
tcache
);
}
JEMALLOC_ALWAYS_INLINE
void
*
tcache_alloc_easy
(
tcache_bin_t
*
tbin
)
{
void
*
ret
;
if
(
tbin
->
ncached
==
0
)
{
tbin
->
low_water
=
-
1
;
return
(
NULL
);
}
tbin
->
ncached
--
;
if
((
int
)
tbin
->
ncached
<
tbin
->
low_water
)
tbin
->
low_water
=
tbin
->
ncached
;
ret
=
tbin
->
avail
[
tbin
->
ncached
];
return
(
ret
);
}
JEMALLOC_ALWAYS_INLINE
void
*
tcache_alloc_small
(
tcache_t
*
tcache
,
size_t
size
,
bool
zero
)
{
void
*
ret
;
size_t
binind
;
tcache_bin_t
*
tbin
;
binind
=
SMALL_SIZE2BIN
(
size
);
assert
(
binind
<
NBINS
);
tbin
=
&
tcache
->
tbins
[
binind
];
size
=
arena_bin_info
[
binind
].
reg_size
;
ret
=
tcache_alloc_easy
(
tbin
);
if
(
ret
==
NULL
)
{
ret
=
tcache_alloc_small_hard
(
tcache
,
tbin
,
binind
);
if
(
ret
==
NULL
)
return
(
NULL
);
}
assert
(
tcache_salloc
(
ret
)
==
arena_bin_info
[
binind
].
reg_size
);
if
(
zero
==
false
)
{
if
(
config_fill
)
{
if
(
opt_junk
)
{
arena_alloc_junk_small
(
ret
,
&
arena_bin_info
[
binind
],
false
);
}
else
if
(
opt_zero
)
memset
(
ret
,
0
,
size
);
}
VALGRIND_MAKE_MEM_UNDEFINED
(
ret
,
size
);
}
else
{
if
(
config_fill
&&
opt_junk
)
{
arena_alloc_junk_small
(
ret
,
&
arena_bin_info
[
binind
],
true
);
}
VALGRIND_MAKE_MEM_UNDEFINED
(
ret
,
size
);
memset
(
ret
,
0
,
size
);
}
if
(
config_stats
)
tbin
->
tstats
.
nrequests
++
;
if
(
config_prof
)
tcache
->
prof_accumbytes
+=
arena_bin_info
[
binind
].
reg_size
;
tcache_event
(
tcache
);
return
(
ret
);
}
JEMALLOC_ALWAYS_INLINE
void
*
tcache_alloc_large
(
tcache_t
*
tcache
,
size_t
size
,
bool
zero
)
{
void
*
ret
;
size_t
binind
;
tcache_bin_t
*
tbin
;
size
=
PAGE_CEILING
(
size
);
assert
(
size
<=
tcache_maxclass
);
binind
=
NBINS
+
(
size
>>
LG_PAGE
)
-
1
;
assert
(
binind
<
nhbins
);
tbin
=
&
tcache
->
tbins
[
binind
];
ret
=
tcache_alloc_easy
(
tbin
);
if
(
ret
==
NULL
)
{
/*
* Only allocate one large object at a time, because it's quite
* expensive to create one and not use it.
*/
ret
=
arena_malloc_large
(
tcache
->
arena
,
size
,
zero
);
if
(
ret
==
NULL
)
return
(
NULL
);
}
else
{
if
(
config_prof
&&
prof_promote
&&
size
==
PAGE
)
{
arena_chunk_t
*
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
ret
);
size_t
pageind
=
(((
uintptr_t
)
ret
-
(
uintptr_t
)
chunk
)
>>
LG_PAGE
);
arena_mapbits_large_binind_set
(
chunk
,
pageind
,
BININD_INVALID
);
}
if
(
zero
==
false
)
{
if
(
config_fill
)
{
if
(
opt_junk
)
memset
(
ret
,
0xa5
,
size
);
else
if
(
opt_zero
)
memset
(
ret
,
0
,
size
);
}
VALGRIND_MAKE_MEM_UNDEFINED
(
ret
,
size
);
}
else
{
VALGRIND_MAKE_MEM_UNDEFINED
(
ret
,
size
);
memset
(
ret
,
0
,
size
);
}
if
(
config_stats
)
tbin
->
tstats
.
nrequests
++
;
if
(
config_prof
)
tcache
->
prof_accumbytes
+=
size
;
}
tcache_event
(
tcache
);
return
(
ret
);
}
JEMALLOC_ALWAYS_INLINE
void
tcache_dalloc_small
(
tcache_t
*
tcache
,
void
*
ptr
,
size_t
binind
)
{
tcache_bin_t
*
tbin
;
tcache_bin_info_t
*
tbin_info
;
assert
(
tcache_salloc
(
ptr
)
<=
SMALL_MAXCLASS
);
if
(
config_fill
&&
opt_junk
)
arena_dalloc_junk_small
(
ptr
,
&
arena_bin_info
[
binind
]);
tbin
=
&
tcache
->
tbins
[
binind
];
tbin_info
=
&
tcache_bin_info
[
binind
];
if
(
tbin
->
ncached
==
tbin_info
->
ncached_max
)
{
tcache_bin_flush_small
(
tbin
,
binind
,
(
tbin_info
->
ncached_max
>>
1
),
tcache
);
}
assert
(
tbin
->
ncached
<
tbin_info
->
ncached_max
);
tbin
->
avail
[
tbin
->
ncached
]
=
ptr
;
tbin
->
ncached
++
;
tcache_event
(
tcache
);
}
JEMALLOC_ALWAYS_INLINE
void
tcache_dalloc_large
(
tcache_t
*
tcache
,
void
*
ptr
,
size_t
size
)
{
size_t
binind
;
tcache_bin_t
*
tbin
;
tcache_bin_info_t
*
tbin_info
;
assert
((
size
&
PAGE_MASK
)
==
0
);
assert
(
tcache_salloc
(
ptr
)
>
SMALL_MAXCLASS
);
assert
(
tcache_salloc
(
ptr
)
<=
tcache_maxclass
);
binind
=
NBINS
+
(
size
>>
LG_PAGE
)
-
1
;
if
(
config_fill
&&
opt_junk
)
memset
(
ptr
,
0x5a
,
size
);
tbin
=
&
tcache
->
tbins
[
binind
];
tbin_info
=
&
tcache_bin_info
[
binind
];
if
(
tbin
->
ncached
==
tbin_info
->
ncached_max
)
{
tcache_bin_flush_large
(
tbin
,
binind
,
(
tbin_info
->
ncached_max
>>
1
),
tcache
);
}
assert
(
tbin
->
ncached
<
tbin_info
->
ncached_max
);
tbin
->
avail
[
tbin
->
ncached
]
=
ptr
;
tbin
->
ncached
++
;
tcache_event
(
tcache
);
}
#endif
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/tcache_externs.h
0 → 100644
View file @
f63e81c2
#ifndef JEMALLOC_INTERNAL_TCACHE_EXTERNS_H
#define JEMALLOC_INTERNAL_TCACHE_EXTERNS_H
#include "jemalloc/internal/size_classes.h"
extern
bool
opt_tcache
;
extern
ssize_t
opt_lg_tcache_max
;
extern
cache_bin_info_t
*
tcache_bin_info
;
/*
* Number of tcache bins. There are NBINS small-object bins, plus 0 or more
* large-object bins.
*/
extern
unsigned
nhbins
;
/* Maximum cached size class. */
extern
size_t
tcache_maxclass
;
/*
* Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and
* usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are
* completely disjoint from this data structure. tcaches starts off as a sparse
* array, so it has no physical memory footprint until individual pages are
* touched. This allows the entire array to be allocated the first time an
* explicit tcache is created without a disproportionate impact on memory usage.
*/
extern
tcaches_t
*
tcaches
;
size_t
tcache_salloc
(
tsdn_t
*
tsdn
,
const
void
*
ptr
);
void
tcache_event_hard
(
tsd_t
*
tsd
,
tcache_t
*
tcache
);
void
*
tcache_alloc_small_hard
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
tcache_t
*
tcache
,
cache_bin_t
*
tbin
,
szind_t
binind
,
bool
*
tcache_success
);
void
tcache_bin_flush_small
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
cache_bin_t
*
tbin
,
szind_t
binind
,
unsigned
rem
);
void
tcache_bin_flush_large
(
tsd_t
*
tsd
,
cache_bin_t
*
tbin
,
szind_t
binind
,
unsigned
rem
,
tcache_t
*
tcache
);
void
tcache_arena_reassociate
(
tsdn_t
*
tsdn
,
tcache_t
*
tcache
,
arena_t
*
arena
);
tcache_t
*
tcache_create_explicit
(
tsd_t
*
tsd
);
void
tcache_cleanup
(
tsd_t
*
tsd
);
void
tcache_stats_merge
(
tsdn_t
*
tsdn
,
tcache_t
*
tcache
,
arena_t
*
arena
);
bool
tcaches_create
(
tsd_t
*
tsd
,
unsigned
*
r_ind
);
void
tcaches_flush
(
tsd_t
*
tsd
,
unsigned
ind
);
void
tcaches_destroy
(
tsd_t
*
tsd
,
unsigned
ind
);
bool
tcache_boot
(
tsdn_t
*
tsdn
);
void
tcache_arena_associate
(
tsdn_t
*
tsdn
,
tcache_t
*
tcache
,
arena_t
*
arena
);
void
tcache_prefork
(
tsdn_t
*
tsdn
);
void
tcache_postfork_parent
(
tsdn_t
*
tsdn
);
void
tcache_postfork_child
(
tsdn_t
*
tsdn
);
void
tcache_flush
(
tsd_t
*
tsd
);
bool
tsd_tcache_data_init
(
tsd_t
*
tsd
);
bool
tsd_tcache_enabled_data_init
(
tsd_t
*
tsd
);
#endif
/* JEMALLOC_INTERNAL_TCACHE_EXTERNS_H */
deps/jemalloc/include/jemalloc/internal/tcache_inlines.h
0 → 100644
View file @
f63e81c2
#ifndef JEMALLOC_INTERNAL_TCACHE_INLINES_H
#define JEMALLOC_INTERNAL_TCACHE_INLINES_H
#include "jemalloc/internal/bin.h"
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/util.h"
static
inline
bool
tcache_enabled_get
(
tsd_t
*
tsd
)
{
return
tsd_tcache_enabled_get
(
tsd
);
}
static
inline
void
tcache_enabled_set
(
tsd_t
*
tsd
,
bool
enabled
)
{
bool
was_enabled
=
tsd_tcache_enabled_get
(
tsd
);
if
(
!
was_enabled
&&
enabled
)
{
tsd_tcache_data_init
(
tsd
);
}
else
if
(
was_enabled
&&
!
enabled
)
{
tcache_cleanup
(
tsd
);
}
/* Commit the state last. Above calls check current state. */
tsd_tcache_enabled_set
(
tsd
,
enabled
);
tsd_slow_update
(
tsd
);
}
JEMALLOC_ALWAYS_INLINE
void
tcache_event
(
tsd_t
*
tsd
,
tcache_t
*
tcache
)
{
if
(
TCACHE_GC_INCR
==
0
)
{
return
;
}
if
(
unlikely
(
ticker_tick
(
&
tcache
->
gc_ticker
)))
{
tcache_event_hard
(
tsd
,
tcache
);
}
}
JEMALLOC_ALWAYS_INLINE
void
*
tcache_alloc_small
(
tsd_t
*
tsd
,
arena_t
*
arena
,
tcache_t
*
tcache
,
UNUSED
size_t
size
,
szind_t
binind
,
bool
zero
,
bool
slow_path
)
{
void
*
ret
;
cache_bin_t
*
bin
;
bool
tcache_success
;
size_t
usize
JEMALLOC_CC_SILENCE_INIT
(
0
);
assert
(
binind
<
NBINS
);
bin
=
tcache_small_bin_get
(
tcache
,
binind
);
ret
=
cache_bin_alloc_easy
(
bin
,
&
tcache_success
);
assert
(
tcache_success
==
(
ret
!=
NULL
));
if
(
unlikely
(
!
tcache_success
))
{
bool
tcache_hard_success
;
arena
=
arena_choose
(
tsd
,
arena
);
if
(
unlikely
(
arena
==
NULL
))
{
return
NULL
;
}
ret
=
tcache_alloc_small_hard
(
tsd_tsdn
(
tsd
),
arena
,
tcache
,
bin
,
binind
,
&
tcache_hard_success
);
if
(
tcache_hard_success
==
false
)
{
return
NULL
;
}
}
assert
(
ret
);
/*
* Only compute usize if required. The checks in the following if
* statement are all static.
*/
if
(
config_prof
||
(
slow_path
&&
config_fill
)
||
unlikely
(
zero
))
{
usize
=
sz_index2size
(
binind
);
assert
(
tcache_salloc
(
tsd_tsdn
(
tsd
),
ret
)
==
usize
);
}
if
(
likely
(
!
zero
))
{
if
(
slow_path
&&
config_fill
)
{
if
(
unlikely
(
opt_junk_alloc
))
{
arena_alloc_junk_small
(
ret
,
&
bin_infos
[
binind
],
false
);
}
else
if
(
unlikely
(
opt_zero
))
{
memset
(
ret
,
0
,
usize
);
}
}
}
else
{
if
(
slow_path
&&
config_fill
&&
unlikely
(
opt_junk_alloc
))
{
arena_alloc_junk_small
(
ret
,
&
bin_infos
[
binind
],
true
);
}
memset
(
ret
,
0
,
usize
);
}
if
(
config_stats
)
{
bin
->
tstats
.
nrequests
++
;
}
if
(
config_prof
)
{
tcache
->
prof_accumbytes
+=
usize
;
}
tcache_event
(
tsd
,
tcache
);
return
ret
;
}
JEMALLOC_ALWAYS_INLINE
void
*
tcache_alloc_large
(
tsd_t
*
tsd
,
arena_t
*
arena
,
tcache_t
*
tcache
,
size_t
size
,
szind_t
binind
,
bool
zero
,
bool
slow_path
)
{
void
*
ret
;
cache_bin_t
*
bin
;
bool
tcache_success
;
assert
(
binind
>=
NBINS
&&
binind
<
nhbins
);
bin
=
tcache_large_bin_get
(
tcache
,
binind
);
ret
=
cache_bin_alloc_easy
(
bin
,
&
tcache_success
);
assert
(
tcache_success
==
(
ret
!=
NULL
));
if
(
unlikely
(
!
tcache_success
))
{
/*
* Only allocate one large object at a time, because it's quite
* expensive to create one and not use it.
*/
arena
=
arena_choose
(
tsd
,
arena
);
if
(
unlikely
(
arena
==
NULL
))
{
return
NULL
;
}
ret
=
large_malloc
(
tsd_tsdn
(
tsd
),
arena
,
sz_s2u
(
size
),
zero
);
if
(
ret
==
NULL
)
{
return
NULL
;
}
}
else
{
size_t
usize
JEMALLOC_CC_SILENCE_INIT
(
0
);
/* Only compute usize on demand */
if
(
config_prof
||
(
slow_path
&&
config_fill
)
||
unlikely
(
zero
))
{
usize
=
sz_index2size
(
binind
);
assert
(
usize
<=
tcache_maxclass
);
}
if
(
likely
(
!
zero
))
{
if
(
slow_path
&&
config_fill
)
{
if
(
unlikely
(
opt_junk_alloc
))
{
memset
(
ret
,
JEMALLOC_ALLOC_JUNK
,
usize
);
}
else
if
(
unlikely
(
opt_zero
))
{
memset
(
ret
,
0
,
usize
);
}
}
}
else
{
memset
(
ret
,
0
,
usize
);
}
if
(
config_stats
)
{
bin
->
tstats
.
nrequests
++
;
}
if
(
config_prof
)
{
tcache
->
prof_accumbytes
+=
usize
;
}
}
tcache_event
(
tsd
,
tcache
);
return
ret
;
}
JEMALLOC_ALWAYS_INLINE
void
tcache_dalloc_small
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
void
*
ptr
,
szind_t
binind
,
bool
slow_path
)
{
cache_bin_t
*
bin
;
cache_bin_info_t
*
bin_info
;
assert
(
tcache_salloc
(
tsd_tsdn
(
tsd
),
ptr
)
<=
SMALL_MAXCLASS
);
if
(
slow_path
&&
config_fill
&&
unlikely
(
opt_junk_free
))
{
arena_dalloc_junk_small
(
ptr
,
&
bin_infos
[
binind
]);
}
bin
=
tcache_small_bin_get
(
tcache
,
binind
);
bin_info
=
&
tcache_bin_info
[
binind
];
if
(
unlikely
(
bin
->
ncached
==
bin_info
->
ncached_max
))
{
tcache_bin_flush_small
(
tsd
,
tcache
,
bin
,
binind
,
(
bin_info
->
ncached_max
>>
1
));
}
assert
(
bin
->
ncached
<
bin_info
->
ncached_max
);
bin
->
ncached
++
;
*
(
bin
->
avail
-
bin
->
ncached
)
=
ptr
;
tcache_event
(
tsd
,
tcache
);
}
JEMALLOC_ALWAYS_INLINE
void
tcache_dalloc_large
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
void
*
ptr
,
szind_t
binind
,
bool
slow_path
)
{
cache_bin_t
*
bin
;
cache_bin_info_t
*
bin_info
;
assert
(
tcache_salloc
(
tsd_tsdn
(
tsd
),
ptr
)
>
SMALL_MAXCLASS
);
assert
(
tcache_salloc
(
tsd_tsdn
(
tsd
),
ptr
)
<=
tcache_maxclass
);
if
(
slow_path
&&
config_fill
&&
unlikely
(
opt_junk_free
))
{
large_dalloc_junk
(
ptr
,
sz_index2size
(
binind
));
}
bin
=
tcache_large_bin_get
(
tcache
,
binind
);
bin_info
=
&
tcache_bin_info
[
binind
];
if
(
unlikely
(
bin
->
ncached
==
bin_info
->
ncached_max
))
{
tcache_bin_flush_large
(
tsd
,
bin
,
binind
,
(
bin_info
->
ncached_max
>>
1
),
tcache
);
}
assert
(
bin
->
ncached
<
bin_info
->
ncached_max
);
bin
->
ncached
++
;
*
(
bin
->
avail
-
bin
->
ncached
)
=
ptr
;
tcache_event
(
tsd
,
tcache
);
}
JEMALLOC_ALWAYS_INLINE
tcache_t
*
tcaches_get
(
tsd_t
*
tsd
,
unsigned
ind
)
{
tcaches_t
*
elm
=
&
tcaches
[
ind
];
if
(
unlikely
(
elm
->
tcache
==
NULL
))
{
elm
->
tcache
=
tcache_create_explicit
(
tsd
);
}
return
elm
->
tcache
;
}
#endif
/* JEMALLOC_INTERNAL_TCACHE_INLINES_H */
deps/jemalloc/include/jemalloc/internal/tcache_structs.h
0 → 100644
View file @
f63e81c2
#ifndef JEMALLOC_INTERNAL_TCACHE_STRUCTS_H
#define JEMALLOC_INTERNAL_TCACHE_STRUCTS_H
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/cache_bin.h"
#include "jemalloc/internal/ticker.h"
struct
tcache_s
{
/*
* To minimize our cache-footprint, we put the frequently accessed data
* together at the start of this struct.
*/
/* Cleared after arena_prof_accum(). */
uint64_t
prof_accumbytes
;
/* Drives incremental GC. */
ticker_t
gc_ticker
;
/*
* The pointer stacks associated with bins follow as a contiguous array.
* During tcache initialization, the avail pointer in each element of
* tbins is initialized to point to the proper offset within this array.
*/
cache_bin_t
bins_small
[
NBINS
];
/*
* This data is less hot; we can be a little less careful with our
* footprint here.
*/
/* Lets us track all the tcaches in an arena. */
ql_elm
(
tcache_t
)
link
;
/*
* The descriptor lets the arena find our cache bins without seeing the
* tcache definition. This enables arenas to aggregate stats across
* tcaches without having a tcache dependency.
*/
cache_bin_array_descriptor_t
cache_bin_array_descriptor
;
/* The arena this tcache is associated with. */
arena_t
*
arena
;
/* Next bin to GC. */
szind_t
next_gc_bin
;
/* For small bins, fill (ncached_max >> lg_fill_div). */
uint8_t
lg_fill_div
[
NBINS
];
/*
* We put the cache bins for large size classes at the end of the
* struct, since some of them might not get used. This might end up
* letting us avoid touching an extra page if we don't have to.
*/
cache_bin_t
bins_large
[
NSIZES
-
NBINS
];
};
/* Linkage for list of available (previously used) explicit tcache IDs. */
struct
tcaches_s
{
union
{
tcache_t
*
tcache
;
tcaches_t
*
next
;
};
};
#endif
/* JEMALLOC_INTERNAL_TCACHE_STRUCTS_H */
deps/jemalloc/include/jemalloc/internal/tcache_types.h
0 → 100644
View file @
f63e81c2
#ifndef JEMALLOC_INTERNAL_TCACHE_TYPES_H
#define JEMALLOC_INTERNAL_TCACHE_TYPES_H
#include "jemalloc/internal/size_classes.h"
typedef
struct
tcache_s
tcache_t
;
typedef
struct
tcaches_s
tcaches_t
;
/*
* tcache pointers close to NULL are used to encode state information that is
* used for two purposes: preventing thread caching on a per thread basis and
* cleaning up during thread shutdown.
*/
#define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1)
#define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2)
#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3)
#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY
/*
* Absolute minimum number of cache slots for each small bin.
*/
#define TCACHE_NSLOTS_SMALL_MIN 20
/*
* Absolute maximum number of cache slots for each small bin in the thread
* cache. This is an additional constraint beyond that imposed as: twice the
* number of regions per slab for this size class.
*
* This constant must be an even number.
*/
#define TCACHE_NSLOTS_SMALL_MAX 200
/* Number of cache slots for large size classes. */
#define TCACHE_NSLOTS_LARGE 20
/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
#define LG_TCACHE_MAXCLASS_DEFAULT 15
/*
* TCACHE_GC_SWEEP is the approximate number of allocation events between
* full GC sweeps. Integer rounding may cause the actual number to be
* slightly higher, since GC is performed incrementally.
*/
#define TCACHE_GC_SWEEP 8192
/* Number of tcache allocation/deallocation events between incremental GCs. */
#define TCACHE_GC_INCR \
((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1))
/* Used in TSD static initializer only. Real init in tcache_data_init(). */
#define TCACHE_ZERO_INITIALIZER {0}
/* Used in TSD static initializer only. Will be initialized to opt_tcache. */
#define TCACHE_ENABLED_ZERO_INITIALIZER false
#endif
/* JEMALLOC_INTERNAL_TCACHE_TYPES_H */
Prev
1
…
4
5
6
7
8
9
10
11
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment