Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
a9951b1b
Commit
a9951b1b
authored
Oct 06, 2015
by
antirez
Browse files
Jemalloc updated to 4.0.3.
parent
e3ded027
Changes
136
Show whitespace changes
Inline
Side-by-side
deps/jemalloc/include/jemalloc/internal/quarantine.h
View file @
a9951b1b
...
@@ -29,36 +29,29 @@ struct quarantine_s {
...
@@ -29,36 +29,29 @@ struct quarantine_s {
/******************************************************************************/
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#ifdef JEMALLOC_H_EXTERNS
quarantine_t
*
quarantine_init
(
size_t
lg_maxobjs
);
void
quarantine_alloc_hook_work
(
tsd_t
*
tsd
);
void
quarantine
(
void
*
ptr
);
void
quarantine
(
tsd_t
*
tsd
,
void
*
ptr
);
void
quarantine_cleanup
(
void
*
arg
);
void
quarantine_cleanup
(
tsd_t
*
tsd
);
bool
quarantine_boot
(
void
);
#endif
/* JEMALLOC_H_EXTERNS */
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
#ifndef JEMALLOC_ENABLE_INLINE
malloc_tsd_protos
(
JEMALLOC_ATTR
(
unused
),
quarantine
,
quarantine_t
*
)
void
quarantine_alloc_hook
(
void
);
void
quarantine_alloc_hook
(
void
);
#endif
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_QUARANTINE_C_))
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_QUARANTINE_C_))
malloc_tsd_externs
(
quarantine
,
quarantine_t
*
)
malloc_tsd_funcs
(
JEMALLOC_ALWAYS_INLINE
,
quarantine
,
quarantine_t
*
,
NULL
,
quarantine_cleanup
)
JEMALLOC_ALWAYS_INLINE
void
JEMALLOC_ALWAYS_INLINE
void
quarantine_alloc_hook
(
void
)
quarantine_alloc_hook
(
void
)
{
{
quarantine_t
*
quarantine
;
tsd_t
*
tsd
;
assert
(
config_fill
&&
opt_quarantine
);
assert
(
config_fill
&&
opt_quarantine
);
quarantine
=
*
quarantine_
tsd_
g
et
();
tsd
=
tsd_
f
et
ch
();
if
(
quarantine
==
NULL
)
if
(
tsd_
quarantine
_get
(
tsd
)
==
NULL
)
quarantine_
init
(
LG_MAXOBJS_INIT
);
quarantine_
alloc_hook_work
(
tsd
);
}
}
#endif
#endif
...
...
deps/jemalloc/include/jemalloc/internal/rb.h
View file @
a9951b1b
...
@@ -158,6 +158,8 @@ struct { \
...
@@ -158,6 +158,8 @@ struct { \
#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \
#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \
a_attr void \
a_attr void \
a_prefix##new(a_rbt_type *rbtree); \
a_prefix##new(a_rbt_type *rbtree); \
a_attr bool \
a_prefix##empty(a_rbt_type *rbtree); \
a_attr a_type * \
a_attr a_type * \
a_prefix##first(a_rbt_type *rbtree); \
a_prefix##first(a_rbt_type *rbtree); \
a_attr a_type * \
a_attr a_type * \
...
@@ -198,7 +200,7 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
...
@@ -198,7 +200,7 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
* int (a_cmp *)(a_type *a_node, a_type *a_other);
* int (a_cmp *)(a_type *a_node, a_type *a_other);
* ^^^^^^
* ^^^^^^
* or a_key
* or a_key
* Interpretation of comparis
i
on function return values:
* Interpretation of comparison function return values:
* -1 : a_node < a_other
* -1 : a_node < a_other
* 0 : a_node == a_other
* 0 : a_node == a_other
* 1 : a_node > a_other
* 1 : a_node > a_other
...
@@ -224,6 +226,13 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
...
@@ -224,6 +226,13 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
* Args:
* Args:
* tree: Pointer to an uninitialized red-black tree object.
* tree: Pointer to an uninitialized red-black tree object.
*
*
* static bool
* ex_empty(ex_t *tree);
* Description: Determine whether tree is empty.
* Args:
* tree: Pointer to an initialized red-black tree object.
* Ret: True if tree is empty, false otherwise.
*
* static ex_node_t *
* static ex_node_t *
* ex_first(ex_t *tree);
* ex_first(ex_t *tree);
* static ex_node_t *
* static ex_node_t *
...
@@ -309,6 +318,10 @@ a_attr void \
...
@@ -309,6 +318,10 @@ a_attr void \
a_prefix##new(a_rbt_type *rbtree) { \
a_prefix##new(a_rbt_type *rbtree) { \
rb_new(a_type, a_field, rbtree); \
rb_new(a_type, a_field, rbtree); \
} \
} \
a_attr bool \
a_prefix##empty(a_rbt_type *rbtree) { \
return (rbtree->rbt_root == &rbtree->rbt_nil); \
} \
a_attr a_type * \
a_attr a_type * \
a_prefix##first(a_rbt_type *rbtree) { \
a_prefix##first(a_rbt_type *rbtree) { \
a_type *ret; \
a_type *ret; \
...
@@ -580,7 +593,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
...
@@ -580,7 +593,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
if (left != &rbtree->rbt_nil) { \
if (left != &rbtree->rbt_nil) { \
/* node has no successor, but it has a left child. */
\
/* node has no successor, but it has a left child. */
\
/* Splice node out, without losing the left child. */
\
/* Splice node out, without losing the left child. */
\
assert(rbtn_red_get(a_type, a_field, node)
== false
); \
assert(
!
rbtn_red_get(a_type, a_field, node));
\
assert(rbtn_red_get(a_type, a_field, left)); \
assert(rbtn_red_get(a_type, a_field, left)); \
rbtn_black_set(a_type, a_field, left); \
rbtn_black_set(a_type, a_field, left); \
if (pathp == path) { \
if (pathp == path) { \
...
@@ -616,8 +629,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
...
@@ -616,8 +629,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
if (pathp->cmp < 0) { \
if (pathp->cmp < 0) { \
rbtn_left_set(a_type, a_field, pathp->node, \
rbtn_left_set(a_type, a_field, pathp->node, \
pathp[1].node); \
pathp[1].node); \
assert(rbtn_red_get(a_type, a_field, pathp[1].node) \
assert(!rbtn_red_get(a_type, a_field, pathp[1].node)); \
== false); \
if (rbtn_red_get(a_type, a_field, pathp->node)) { \
if (rbtn_red_get(a_type, a_field, pathp->node)) { \
a_type *right = rbtn_right_get(a_type, a_field, \
a_type *right = rbtn_right_get(a_type, a_field, \
pathp->node); \
pathp->node); \
...
@@ -681,7 +693,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
...
@@ -681,7 +693,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_rotate_left(a_type, a_field, pathp->node, \
rbtn_rotate_left(a_type, a_field, pathp->node, \
tnode); \
tnode); \
/* Balance restored, but rotation modified */
\
/* Balance restored, but rotation modified */
\
/* subree root, which may actually be the tree
*/
\
/* sub
t
ree root, which may actually be the tree */
\
/* root. */
\
/* root. */
\
if (pathp == path) { \
if (pathp == path) { \
/* Set root. */
\
/* Set root. */
\
...
@@ -849,7 +861,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
...
@@ -849,7 +861,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
} \
} \
/* Set root. */
\
/* Set root. */
\
rbtree->rbt_root = path->node; \
rbtree->rbt_root = path->node; \
assert(rbtn_red_get(a_type, a_field, rbtree->rbt_root)
== false
); \
assert(
!
rbtn_red_get(a_type, a_field, rbtree->rbt_root));
\
} \
} \
a_attr a_type * \
a_attr a_type * \
a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \
a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \
...
...
deps/jemalloc/include/jemalloc/internal/rtree.h
View file @
a9951b1b
/*
/*
* This radix tree implementation is tailored to the singular purpose of
* This radix tree implementation is tailored to the singular purpose of
* tracking which chunks are currently owned by jemalloc. This functionality
* associating metadata with chunks that are currently owned by jemalloc.
* is mandatory for OS X, where jemalloc must be able to respond to object
* ownership queries.
*
*
*******************************************************************************
*******************************************************************************
*/
*/
#ifdef JEMALLOC_H_TYPES
#ifdef JEMALLOC_H_TYPES
typedef
struct
rtree_node_elm_s
rtree_node_elm_t
;
typedef
struct
rtree_level_s
rtree_level_t
;
typedef
struct
rtree_s
rtree_t
;
typedef
struct
rtree_s
rtree_t
;
/*
/*
*
Size of each radix tree node (must be a power of 2). This impacts tre
e
*
RTREE_BITS_PER_LEVEL must be a power of two that is no larger than th
e
*
dep
th.
*
machine address wid
th.
*/
*/
#define RTREE_NODESIZE (1U << 16)
#define LG_RTREE_BITS_PER_LEVEL 4
#define RTREE_BITS_PER_LEVEL (ZU(1) << LG_RTREE_BITS_PER_LEVEL)
#define RTREE_HEIGHT_MAX \
((ZU(1) << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL)
typedef
void
*
(
rtree_alloc_t
)(
size_t
);
/* Used for two-stage lock-free node initialization. */
typedef
void
(
rtree_dalloc_t
)(
void
*
);
#define RTREE_NODE_INITIALIZING ((rtree_node_elm_t *)0x1)
/*
* The node allocation callback function's argument is the number of contiguous
* rtree_node_elm_t structures to allocate, and the resulting memory must be
* zeroed.
*/
typedef
rtree_node_elm_t
*
(
rtree_node_alloc_t
)(
size_t
);
typedef
void
(
rtree_node_dalloc_t
)(
rtree_node_elm_t
*
);
#endif
/* JEMALLOC_H_TYPES */
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#ifdef JEMALLOC_H_STRUCTS
struct
rtree_node_elm_s
{
union
{
void
*
pun
;
rtree_node_elm_t
*
child
;
extent_node_t
*
val
;
};
};
struct
rtree_level_s
{
/*
* A non-NULL subtree points to a subtree rooted along the hypothetical
* path to the leaf node corresponding to key 0. Depending on what keys
* have been used to store to the tree, an arbitrary combination of
* subtree pointers may remain NULL.
*
* Suppose keys comprise 48 bits, and LG_RTREE_BITS_PER_LEVEL is 4.
* This results in a 3-level tree, and the leftmost leaf can be directly
* accessed via subtrees[2], the subtree prefixed by 0x0000 (excluding
* 0x00000000) can be accessed via subtrees[1], and the remainder of the
* tree can be accessed via subtrees[0].
*
* levels[0] : [<unused> | 0x0001******** | 0x0002******** | ...]
*
* levels[1] : [<unused> | 0x00000001**** | 0x00000002**** | ... ]
*
* levels[2] : [val(0x000000000000) | val(0x000000000001) | ...]
*
* This has practical implications on x64, which currently uses only the
* lower 47 bits of virtual address space in userland, thus leaving
* subtrees[0] unused and avoiding a level of tree traversal.
*/
union
{
void
*
subtree_pun
;
rtree_node_elm_t
*
subtree
;
};
/* Number of key bits distinguished by this level. */
unsigned
bits
;
/*
* Cumulative number of key bits distinguished by traversing to
* corresponding tree level.
*/
unsigned
cumbits
;
};
struct
rtree_s
{
struct
rtree_s
{
rtree_alloc_t
*
alloc
;
rtree_node_alloc_t
*
alloc
;
rtree_dalloc_t
*
dalloc
;
rtree_node_dalloc_t
*
dalloc
;
malloc_mutex_t
mutex
;
void
**
root
;
unsigned
height
;
unsigned
height
;
unsigned
level2bits
[
1
];
/* Dynamically sized. */
/*
* Precomputed table used to convert from the number of leading 0 key
* bits to which subtree level to start at.
*/
unsigned
start_level
[
RTREE_HEIGHT_MAX
];
rtree_level_t
levels
[
RTREE_HEIGHT_MAX
];
};
};
#endif
/* JEMALLOC_H_STRUCTS */
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#ifdef JEMALLOC_H_EXTERNS
rtree_t
*
rtree_new
(
unsigned
bits
,
rtree_alloc_t
*
alloc
,
rtree_dalloc_t
*
dalloc
);
bool
rtree_new
(
rtree_t
*
rtree
,
unsigned
bits
,
rtree_node_alloc_t
*
alloc
,
rtree_node_dalloc_t
*
dalloc
);
void
rtree_delete
(
rtree_t
*
rtree
);
void
rtree_delete
(
rtree_t
*
rtree
);
void
rtree_prefork
(
rtree_t
*
rtree
);
rtree_node_elm_t
*
rtree_subtree_read_hard
(
rtree_t
*
rtree
,
void
rtree_postfork_parent
(
rtree_t
*
rtree
);
unsigned
level
);
void
rtree_postfork_child
(
rtree_t
*
rtree
);
rtree_node_elm_t
*
rtree_child_read_hard
(
rtree_t
*
rtree
,
rtree_node_elm_t
*
elm
,
unsigned
level
);
#endif
/* JEMALLOC_H_EXTERNS */
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
#ifndef JEMALLOC_ENABLE_INLINE
#ifdef JEMALLOC_DEBUG
unsigned
rtree_start_level
(
rtree_t
*
rtree
,
uintptr_t
key
);
uint8_t
rtree_get_locked
(
rtree_t
*
rtree
,
uintptr_t
key
);
uintptr_t
rtree_subkey
(
rtree_t
*
rtree
,
uintptr_t
key
,
unsigned
level
);
#endif
uint8_t
rtree_get
(
rtree_t
*
rtree
,
uintptr_t
key
);
bool
rtree_node_valid
(
rtree_node_elm_t
*
node
);
bool
rtree_set
(
rtree_t
*
rtree
,
uintptr_t
key
,
uint8_t
val
);
rtree_node_elm_t
*
rtree_child_tryread
(
rtree_node_elm_t
*
elm
);
rtree_node_elm_t
*
rtree_child_read
(
rtree_t
*
rtree
,
rtree_node_elm_t
*
elm
,
unsigned
level
);
extent_node_t
*
rtree_val_read
(
rtree_t
*
rtree
,
rtree_node_elm_t
*
elm
,
bool
dependent
);
void
rtree_val_write
(
rtree_t
*
rtree
,
rtree_node_elm_t
*
elm
,
const
extent_node_t
*
val
);
rtree_node_elm_t
*
rtree_subtree_tryread
(
rtree_t
*
rtree
,
unsigned
level
);
rtree_node_elm_t
*
rtree_subtree_read
(
rtree_t
*
rtree
,
unsigned
level
);
extent_node_t
*
rtree_get
(
rtree_t
*
rtree
,
uintptr_t
key
,
bool
dependent
);
bool
rtree_set
(
rtree_t
*
rtree
,
uintptr_t
key
,
const
extent_node_t
*
val
);
#endif
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
#define RTREE_GET_GENERATE(f) \
JEMALLOC_INLINE
unsigned
/* The least significant bits of the key are ignored. */
\
rtree_start_level
(
rtree_t
*
rtree
,
uintptr_t
key
)
JEMALLOC_INLINE uint8_t \
{
f(rtree_t *rtree, uintptr_t key) \
unsigned
start_level
;
{ \
uint8_t ret; \
if
(
unlikely
(
key
==
0
))
uintptr_t subkey; \
return
(
rtree
->
height
-
1
);
unsigned i, lshift, height, bits; \
void **node, **child; \
start_level
=
rtree
->
start_level
[
lg_floor
(
key
)
>>
\
LG_RTREE_BITS_PER_LEVEL
];
RTREE_LOCK(&rtree->mutex); \
assert
(
start_level
<
rtree
->
height
);
for (i = lshift = 0, height = rtree->height, node = rtree->root;\
return
(
start_level
);
i < height - 1; \
i++, lshift += bits, node = child) { \
bits = rtree->level2bits[i]; \
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR + \
3)) - bits); \
child = (void**)node[subkey]; \
if (child == NULL) { \
RTREE_UNLOCK(&rtree->mutex); \
return (0); \
} \
} \
\
/* \
* node is a leaf, so it contains values rather than node \
* pointers. \
*/
\
bits = rtree->level2bits[i]; \
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - \
bits); \
{ \
uint8_t *leaf = (uint8_t *)node; \
ret = leaf[subkey]; \
} \
RTREE_UNLOCK(&rtree->mutex); \
\
RTREE_GET_VALIDATE \
return (ret); \
}
}
#ifdef JEMALLOC_DEBUG
JEMALLOC_INLINE
uintptr_t
# define RTREE_LOCK(l) malloc_mutex_lock(l)
rtree_subkey
(
rtree_t
*
rtree
,
uintptr_t
key
,
unsigned
level
)
# define RTREE_UNLOCK(l) malloc_mutex_unlock(l)
{
# define RTREE_GET_VALIDATE
RTREE_GET_GENERATE
(
rtree_get_locked
)
return
((
key
>>
((
ZU
(
1
)
<<
(
LG_SIZEOF_PTR
+
3
))
-
# undef RTREE_LOCK
rtree
->
levels
[
level
].
cumbits
))
&
((
ZU
(
1
)
<<
# undef RTREE_UNLOCK
rtree
->
levels
[
level
].
bits
)
-
1
));
# undef RTREE_GET_VALIDATE
}
#endif
JEMALLOC_INLINE
bool
rtree_node_valid
(
rtree_node_elm_t
*
node
)
{
return
((
uintptr_t
)
node
>
(
uintptr_t
)
RTREE_NODE_INITIALIZING
);
}
JEMALLOC_INLINE
rtree_node_elm_t
*
rtree_child_tryread
(
rtree_node_elm_t
*
elm
)
{
rtree_node_elm_t
*
child
;
#define RTREE_LOCK(l)
/* Double-checked read (first read may be stale. */
#define RTREE_UNLOCK(l)
child
=
elm
->
child
;
#ifdef JEMALLOC_DEBUG
if
(
!
rtree_node_valid
(
child
))
child
=
atomic_read_p
(
&
elm
->
pun
);
return
(
child
);
}
JEMALLOC_INLINE
rtree_node_elm_t
*
rtree_child_read
(
rtree_t
*
rtree
,
rtree_node_elm_t
*
elm
,
unsigned
level
)
{
rtree_node_elm_t
*
child
;
child
=
rtree_child_tryread
(
elm
);
if
(
unlikely
(
!
rtree_node_valid
(
child
)))
child
=
rtree_child_read_hard
(
rtree
,
elm
,
level
);
return
(
child
);
}
JEMALLOC_INLINE
extent_node_t
*
rtree_val_read
(
rtree_t
*
rtree
,
rtree_node_elm_t
*
elm
,
bool
dependent
)
{
if
(
dependent
)
{
/*
/*
* Suppose that it were possible for a jemalloc-allocated chunk to be
* Reading a val on behalf of a pointer to a valid allocation is
* munmap()ped, followed by a different allocator in another thread re-using
* guaranteed to be a clean read even without synchronization,
* overlapping virtual memory, all without invalidating the cached rtree
* because the rtree update became visible in memory before the
* value. The result would be a false positive (the rtree would claim that
* pointer came into existence.
* jemalloc owns memory that it had actually discarded). This scenario
* seems impossible, but the following assertion is a prudent sanity check.
*/
*/
# define RTREE_GET_VALIDATE \
return
(
elm
->
val
);
assert(rtree_get_locked(rtree, key) == ret);
}
else
{
#else
/*
# define RTREE_GET_VALIDATE
* An arbitrary read, e.g. on behalf of ivsalloc(), may not be
#endif
* dependent on a previous rtree write, which means a stale read
RTREE_GET_GENERATE
(
rtree_get
)
* could result if synchronization were omitted here.
#undef RTREE_LOCK
*/
#undef RTREE_UNLOCK
return
(
atomic_read_p
(
&
elm
->
pun
));
#undef RTREE_GET_VALIDATE
}
}
JEMALLOC_INLINE
bool
JEMALLOC_INLINE
void
rtree_set
(
rtree_t
*
rtree
,
uintptr_t
key
,
uint8_t
val
)
rtree_val_write
(
rtree_t
*
rtree
,
rtree_node_elm_t
*
elm
,
const
extent_node_t
*
val
)
{
atomic_write_p
(
&
elm
->
pun
,
val
);
}
JEMALLOC_INLINE
rtree_node_elm_t
*
rtree_subtree_tryread
(
rtree_t
*
rtree
,
unsigned
level
)
{
rtree_node_elm_t
*
subtree
;
/* Double-checked read (first read may be stale. */
subtree
=
rtree
->
levels
[
level
].
subtree
;
if
(
!
rtree_node_valid
(
subtree
))
subtree
=
atomic_read_p
(
&
rtree
->
levels
[
level
].
subtree_pun
);
return
(
subtree
);
}
JEMALLOC_INLINE
rtree_node_elm_t
*
rtree_subtree_read
(
rtree_t
*
rtree
,
unsigned
level
)
{
rtree_node_elm_t
*
subtree
;
subtree
=
rtree_subtree_tryread
(
rtree
,
level
);
if
(
unlikely
(
!
rtree_node_valid
(
subtree
)))
subtree
=
rtree_subtree_read_hard
(
rtree
,
level
);
return
(
subtree
);
}
JEMALLOC_INLINE
extent_node_t
*
rtree_get
(
rtree_t
*
rtree
,
uintptr_t
key
,
bool
dependent
)
{
{
uintptr_t
subkey
;
uintptr_t
subkey
;
unsigned
i
,
lshift
,
height
,
bits
;
unsigned
i
,
start_level
;
void
**
node
,
**
child
;
rtree_node_elm_t
*
node
,
*
child
;
malloc_mutex_lock
(
&
rtree
->
mutex
);
start_level
=
rtree_start_level
(
rtree
,
key
);
for
(
i
=
lshift
=
0
,
height
=
rtree
->
height
,
node
=
rtree
->
root
;
i
<
height
-
1
;
for
(
i
=
start_level
,
node
=
rtree_subtree_tryread
(
rtree
,
start_level
);
i
++
,
lshift
+=
bits
,
node
=
child
)
{
/**/
;
i
++
,
node
=
child
)
{
bits
=
rtree
->
level2bits
[
i
];
if
(
!
dependent
&&
unlikely
(
!
rtree_node_valid
(
node
)))
subkey
=
(
key
<<
lshift
)
>>
((
ZU
(
1
)
<<
(
LG_SIZEOF_PTR
+
3
))
-
return
(
NULL
);
bits
);
subkey
=
rtree_subkey
(
rtree
,
key
,
i
);
child
=
(
void
**
)
node
[
subkey
];
if
(
i
==
rtree
->
height
-
1
)
{
if
(
child
==
NULL
)
{
/*
size_t
size
=
((
i
+
1
<
height
-
1
)
?
sizeof
(
void
*
)
* node is a leaf, so it contains values rather than
:
(
sizeof
(
uint8_t
)))
<<
rtree
->
level2bits
[
i
+
1
];
* child pointers.
child
=
(
void
**
)
rtree
->
alloc
(
size
);
*/
if
(
child
==
NULL
)
{
return
(
rtree_val_read
(
rtree
,
&
node
[
subkey
],
malloc_mutex_unlock
(
&
rtree
->
mutex
);
dependent
));
return
(
true
);
}
memset
(
child
,
0
,
size
);
node
[
subkey
]
=
child
;
}
}
assert
(
i
<
rtree
->
height
-
1
);
child
=
rtree_child_tryread
(
&
node
[
subkey
]);
}
}
not_reached
();
}
/* node is a leaf, so it contains values rather than node pointers. */
JEMALLOC_INLINE
bool
bits
=
rtree
->
level2bits
[
i
];
rtree_set
(
rtree_t
*
rtree
,
uintptr_t
key
,
const
extent_node_t
*
val
)
subkey
=
(
key
<<
lshift
)
>>
((
ZU
(
1
)
<<
(
LG_SIZEOF_PTR
+
3
))
-
bits
);
{
{
uintptr_t
subkey
;
u
int8_t
*
leaf
=
(
uint8_t
*
)
node
;
u
nsigned
i
,
start_level
;
leaf
[
subkey
]
=
val
;
rtree_node_elm_t
*
node
,
*
child
;
}
malloc_mutex_unlock
(
&
rtree
->
mutex
);
start_level
=
rtree_start_level
(
rtree
,
key
);
node
=
rtree_subtree_read
(
rtree
,
start_level
);
if
(
node
==
NULL
)
return
(
true
);
for
(
i
=
start_level
;
/**/
;
i
++
,
node
=
child
)
{
subkey
=
rtree_subkey
(
rtree
,
key
,
i
);
if
(
i
==
rtree
->
height
-
1
)
{
/*
* node is a leaf, so it contains values rather than
* child pointers.
*/
rtree_val_write
(
rtree
,
&
node
[
subkey
],
val
);
return
(
false
);
return
(
false
);
}
assert
(
i
+
1
<
rtree
->
height
);
child
=
rtree_child_read
(
rtree
,
&
node
[
subkey
],
i
);
if
(
child
==
NULL
)
return
(
true
);
}
not_reached
();
}
}
#endif
#endif
...
...
deps/jemalloc/include/jemalloc/internal/size_classes.sh
View file @
a9951b1b
#!/bin/sh
#!/bin/sh
#
# Usage: size_classes.sh <lg_qarr> <lg_tmin> <lg_parr> <lg_g>
# The following limits are chosen such that they cover all supported platforms.
# The following limits are chosen such that they cover all supported platforms.
# Range of quanta.
# Pointer sizes.
lg_qmin
=
3
lg_zarr
=
"2 3"
lg_qmax
=
4
# Quanta.
lg_qarr
=
$1
# The range of tiny size classes is [2^lg_tmin..2^(lg_q-1)].
# The range of tiny size classes is [2^lg_tmin..2^(lg_q-1)].
lg_tmin
=
3
lg_tmin
=
$2
# Maximum lookup size.
lg_kmax
=
12
# Range of page sizes.
# Page sizes.
lg_pmin
=
12
lg_parr
=
`
echo
$3
|
tr
','
' '
`
lg_pmax
=
16
# Size class group size (number of size classes for each size doubling).
lg_g
=
$4
pow2
()
{
pow2
()
{
e
=
$1
e
=
$1
...
@@ -22,68 +31,224 @@ pow2() {
...
@@ -22,68 +31,224 @@ pow2() {
done
done
}
}
cat
<<
EOF
lg
()
{
/* This file was automatically generated by size_classes.sh. */
x
=
$1
/******************************************************************************/
lg_result
=
0
#ifdef JEMALLOC_H_TYPES
while
[
${
x
}
-gt
1
]
;
do
lg_result
=
$((${
lg_result
}
+
1
))
x
=
$((${
x
}
/
2
))
done
}
EOF
size_class
()
{
index
=
$1
lg_grp
=
$2
lg_delta
=
$3
ndelta
=
$4
lg_p
=
$5
lg_kmax
=
$6
lg
${
ndelta
}
;
lg_ndelta
=
${
lg_result
}
;
pow2
${
lg_ndelta
}
if
[
${
pow2_result
}
-lt
${
ndelta
}
]
;
then
rem
=
"yes"
else
rem
=
"no"
fi
lg_size
=
${
lg_grp
}
if
[
$((${
lg_delta
}
+
${
lg_ndelta
}))
-eq
${
lg_grp
}
]
;
then
lg_size
=
$((${
lg_grp
}
+
1
))
else
lg_size
=
${
lg_grp
}
rem
=
"yes"
fi
if
[
${
lg_size
}
-lt
$((${
lg_p
}
+
${
lg_g
}))
]
;
then
bin
=
"yes"
else
bin
=
"no"
fi
if
[
${
lg_size
}
-lt
${
lg_kmax
}
\
-o
${
lg_size
}
-eq
${
lg_kmax
}
-a
${
rem
}
=
"no"
]
;
then
lg_delta_lookup
=
${
lg_delta
}
else
lg_delta_lookup
=
"no"
fi
printf
' SC(%3d, %6d, %8d, %6d, %3s, %2s) \\\n'
${
index
}
${
lg_grp
}
${
lg_delta
}
${
ndelta
}
${
bin
}
${
lg_delta_lookup
}
# Defined upon return:
# - lg_delta_lookup (${lg_delta} or "no")
# - bin ("yes" or "no")
}
sep_line
()
{
echo
"
\\
"
}
size_classes
()
{
lg_z
=
$1
lg_q
=
$2
lg_t
=
$3
lg_p
=
$4
lg_g
=
$5
pow2
$((${
lg_z
}
+
3
))
;
ptr_bits
=
${
pow2_result
}
pow2
${
lg_g
}
;
g
=
${
pow2_result
}
lg_q
=
${
lg_qmin
}
while
[
${
lg_q
}
-le
${
lg_qmax
}
]
;
do
lg_t
=
${
lg_tmin
}
while
[
${
lg_t
}
-le
${
lg_q
}
]
;
do
lg_p
=
${
lg_pmin
}
while
[
${
lg_p
}
-le
${
lg_pmax
}
]
;
do
echo
"#if (LG_TINY_MIN ==
${
lg_t
}
&& LG_QUANTUM ==
${
lg_q
}
&& LG_PAGE ==
${
lg_p
}
)"
echo
"#define SIZE_CLASSES_DEFINED"
pow2
${
lg_q
}
;
q
=
${
pow2_result
}
pow2
${
lg_t
}
;
t
=
${
pow2_result
}
pow2
${
lg_p
}
;
p
=
${
pow2_result
}
bin
=
0
psz
=
0
sz
=
${
t
}
delta
=
$((${
sz
}
-
${
psz
}))
echo
"/* SIZE_CLASS(bin, delta, sz) */"
echo
"#define SIZE_CLASSES
\\
"
echo
"#define SIZE_CLASSES
\\
"
echo
" /* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */
\\
"
ntbins
=
0
nlbins
=
0
lg_tiny_maxclass
=
'"NA"'
nbins
=
0
# Tiny size classes.
# Tiny size classes.
while
[
${
sz
}
-lt
${
q
}
]
;
do
ndelta
=
0
echo
" SIZE_CLASS(
${
bin
}
,
${
delta
}
,
${
sz
}
)
\\
"
index
=
0
bin
=
$((${
bin
}
+
1
))
lg_grp
=
${
lg_t
}
psz
=
${
sz
}
lg_delta
=
${
lg_grp
}
sz
=
$((${
sz
}
+
${
sz
}))
while
[
${
lg_grp
}
-lt
${
lg_q
}
]
;
do
delta
=
$((${
sz
}
-
${
psz
}))
size_class
${
index
}
${
lg_grp
}
${
lg_delta
}
${
ndelta
}
${
lg_p
}
${
lg_kmax
}
if
[
${
lg_delta_lookup
}
!=
"no"
]
;
then
nlbins
=
$((${
index
}
+
1
))
fi
if
[
${
bin
}
!=
"no"
]
;
then
nbins
=
$((${
index
}
+
1
))
fi
ntbins
=
$((${
ntbins
}
+
1
))
lg_tiny_maxclass
=
${
lg_grp
}
# Final written value is correct.
index
=
$((${
index
}
+
1
))
lg_delta
=
${
lg_grp
}
lg_grp
=
$((${
lg_grp
}
+
1
))
done
# First non-tiny group.
if
[
${
ntbins
}
-gt
0
]
;
then
sep_line
# The first size class has an unusual encoding, because the size has to be
# split between grp and delta*ndelta.
lg_grp
=
$((${
lg_grp
}
-
1
))
ndelta
=
1
size_class
${
index
}
${
lg_grp
}
${
lg_delta
}
${
ndelta
}
${
lg_p
}
${
lg_kmax
}
index
=
$((${
index
}
+
1
))
lg_grp
=
$((${
lg_grp
}
+
1
))
lg_delta
=
$((${
lg_delta
}
+
1
))
fi
while
[
${
ndelta
}
-lt
${
g
}
]
;
do
size_class
${
index
}
${
lg_grp
}
${
lg_delta
}
${
ndelta
}
${
lg_p
}
${
lg_kmax
}
index
=
$((${
index
}
+
1
))
ndelta
=
$((${
ndelta
}
+
1
))
done
done
# Quantum-multiple size classes. For each doubling of sz, as many as 4
# size classes exist. Their spacing is the greater of:
# All remaining groups.
# - q
lg_grp
=
$((${
lg_grp
}
+
${
lg_g
}))
# - sz/4, where sz is a power of 2
while
[
${
lg_grp
}
-lt
${
ptr_bits
}
]
;
do
while
[
${
sz
}
-lt
${
p
}
]
;
do
sep_line
if
[
${
sz
}
-ge
$((${
q
}
*
4
))
]
;
then
ndelta
=
1
i
=
$((${
sz
}
/
4
))
if
[
${
lg_grp
}
-eq
$((${
ptr_bits
}
-
1
))
]
;
then
ndelta_limit
=
$((${
g
}
-
1
))
else
else
i
=
${
q
}
ndelta_limit
=
${
g
}
fi
while
[
${
ndelta
}
-le
${
ndelta_limit
}
]
;
do
size_class
${
index
}
${
lg_grp
}
${
lg_delta
}
${
ndelta
}
${
lg_p
}
${
lg_kmax
}
if
[
${
lg_delta_lookup
}
!=
"no"
]
;
then
nlbins
=
$((${
index
}
+
1
))
# Final written value is correct:
lookup_maxclass
=
"((((size_t)1) <<
${
lg_grp
}
) + (((size_t)
${
ndelta
}
) <<
${
lg_delta
}
))"
fi
fi
next_2pow
=
$((${
sz
}
*
2
))
if
[
${
bin
}
!=
"no"
]
;
then
while
[
${
sz
}
-lt
$next_2pow
]
;
do
nbins
=
$((${
index
}
+
1
))
echo
" SIZE_CLASS(
${
bin
}
,
${
delta
}
,
${
sz
}
)
\\
"
# Final written value is correct:
bin
=
$((${
bin
}
+
1
))
small_maxclass
=
"((((size_t)1) <<
${
lg_grp
}
) + (((size_t)
${
ndelta
}
) <<
${
lg_delta
}
))"
psz
=
${
sz
}
if
[
${
lg_g
}
-gt
0
]
;
then
sz
=
$((${
sz
}
+
${
i
}))
lg_large_minclass
=
$((${
lg_grp
}
+
1
))
delta
=
$((${
sz
}
-
${
psz
}))
else
lg_large_minclass
=
$((${
lg_grp
}
+
2
))
fi
fi
# Final written value is correct:
huge_maxclass
=
"((((size_t)1) <<
${
lg_grp
}
) + (((size_t)
${
ndelta
}
) <<
${
lg_delta
}
))"
index
=
$((${
index
}
+
1
))
ndelta
=
$((${
ndelta
}
+
1
))
done
done
lg_grp
=
$((${
lg_grp
}
+
1
))
lg_delta
=
$((${
lg_delta
}
+
1
))
done
done
echo
echo
echo
"#define NBINS
${
bin
}
"
nsizes
=
${
index
}
echo
"#define SMALL_MAXCLASS
${
psz
}
"
# Defined upon completion:
# - ntbins
# - nlbins
# - nbins
# - nsizes
# - lg_tiny_maxclass
# - lookup_maxclass
# - small_maxclass
# - lg_large_minclass
# - huge_maxclass
}
cat
<<
EOF
/* This file was automatically generated by size_classes.sh. */
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/*
* This header requires LG_SIZEOF_PTR, LG_TINY_MIN, LG_QUANTUM, and LG_PAGE to
* be defined prior to inclusion, and it in turn defines:
*
* LG_SIZE_CLASS_GROUP: Lg of size class count for each size doubling.
* SIZE_CLASSES: Complete table of
* SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup)
* tuples.
* index: Size class index.
* lg_grp: Lg group base size (no deltas added).
* lg_delta: Lg delta to previous size class.
* ndelta: Delta multiplier. size == 1<<lg_grp + ndelta<<lg_delta
* bin: 'yes' if a small bin size class, 'no' otherwise.
* lg_delta_lookup: Same as lg_delta if a lookup table size class, 'no'
* otherwise.
* NTBINS: Number of tiny bins.
* NLBINS: Number of bins supported by the lookup table.
* NBINS: Number of small size class bins.
* NSIZES: Number of size classes.
* LG_TINY_MAXCLASS: Lg of maximum tiny size class.
* LOOKUP_MAXCLASS: Maximum size class included in lookup table.
* SMALL_MAXCLASS: Maximum small size class.
* LG_LARGE_MINCLASS: Lg of minimum large size class.
* HUGE_MAXCLASS: Maximum (huge) size class.
*/
#define LG_SIZE_CLASS_GROUP
${
lg_g
}
EOF
for
lg_z
in
${
lg_zarr
}
;
do
for
lg_q
in
${
lg_qarr
}
;
do
lg_t
=
${
lg_tmin
}
while
[
${
lg_t
}
-le
${
lg_q
}
]
;
do
# Iterate through page sizes and compute how many bins there are.
for
lg_p
in
${
lg_parr
}
;
do
echo
"#if (LG_SIZEOF_PTR ==
${
lg_z
}
&& LG_TINY_MIN ==
${
lg_t
}
&& LG_QUANTUM ==
${
lg_q
}
&& LG_PAGE ==
${
lg_p
}
)"
size_classes
${
lg_z
}
${
lg_q
}
${
lg_t
}
${
lg_p
}
${
lg_g
}
echo
"#define SIZE_CLASSES_DEFINED"
echo
"#define NTBINS
${
ntbins
}
"
echo
"#define NLBINS
${
nlbins
}
"
echo
"#define NBINS
${
nbins
}
"
echo
"#define NSIZES
${
nsizes
}
"
echo
"#define LG_TINY_MAXCLASS
${
lg_tiny_maxclass
}
"
echo
"#define LOOKUP_MAXCLASS
${
lookup_maxclass
}
"
echo
"#define SMALL_MAXCLASS
${
small_maxclass
}
"
echo
"#define LG_LARGE_MINCLASS
${
lg_large_minclass
}
"
echo
"#define HUGE_MAXCLASS
${
huge_maxclass
}
"
echo
"#endif"
echo
"#endif"
echo
echo
lg_p
=
$((${
lg_p
}
+
1
))
done
done
lg_t
=
$((${
lg_t
}
+
1
))
lg_t
=
$((${
lg_t
}
+
1
))
done
done
lg_q
=
$((${
lg_q
}
+
1
))
done
done
done
cat
<<
EOF
cat
<<
EOF
...
@@ -92,11 +257,10 @@ cat <<EOF
...
@@ -92,11 +257,10 @@ cat <<EOF
#endif
#endif
#undef SIZE_CLASSES_DEFINED
#undef SIZE_CLASSES_DEFINED
/*
/*
* The
small_
size2
b
in lookup table uses uint8_t to encode each bin index, so we
* The size2in
dex_tab
lookup table uses uint8_t to encode each bin index, so we
* cannot support more than 256 small size classes. Further constrain NBINS to
* cannot support more than 256 small size classes. Further constrain NBINS to
* 255 to support prof_promote, since all small size classes, plus a "not
* 255 since all small size classes, plus a "not small" size class must be
* small" size class must be stored in 8 bits of arena_chunk_map_t's bits
* stored in 8 bits of arena_chunk_map_bits_t's bits field.
* field.
*/
*/
#if (NBINS > 255)
#if (NBINS > 255)
# error "Too many small size classes"
# error "Too many small size classes"
...
...
deps/jemalloc/include/jemalloc/internal/stats.h
View file @
a9951b1b
...
@@ -4,6 +4,7 @@
...
@@ -4,6 +4,7 @@
typedef
struct
tcache_bin_stats_s
tcache_bin_stats_t
;
typedef
struct
tcache_bin_stats_s
tcache_bin_stats_t
;
typedef
struct
malloc_bin_stats_s
malloc_bin_stats_t
;
typedef
struct
malloc_bin_stats_s
malloc_bin_stats_t
;
typedef
struct
malloc_large_stats_s
malloc_large_stats_t
;
typedef
struct
malloc_large_stats_s
malloc_large_stats_t
;
typedef
struct
malloc_huge_stats_s
malloc_huge_stats_t
;
typedef
struct
arena_stats_s
arena_stats_t
;
typedef
struct
arena_stats_s
arena_stats_t
;
typedef
struct
chunk_stats_s
chunk_stats_t
;
typedef
struct
chunk_stats_s
chunk_stats_t
;
...
@@ -20,12 +21,6 @@ struct tcache_bin_stats_s {
...
@@ -20,12 +21,6 @@ struct tcache_bin_stats_s {
};
};
struct
malloc_bin_stats_s
{
struct
malloc_bin_stats_s
{
/*
* Current number of bytes allocated, including objects currently
* cached by tcache.
*/
size_t
allocated
;
/*
/*
* Total number of allocation/deallocation requests served directly by
* Total number of allocation/deallocation requests served directly by
* the bin. Note that tcache may allocate an object, then recycle it
* the bin. Note that tcache may allocate an object, then recycle it
...
@@ -42,6 +37,12 @@ struct malloc_bin_stats_s {
...
@@ -42,6 +37,12 @@ struct malloc_bin_stats_s {
*/
*/
uint64_t
nrequests
;
uint64_t
nrequests
;
/*
* Current number of regions of this size class, including regions
* currently cached by tcache.
*/
size_t
curregs
;
/* Number of tcache fills from this bin. */
/* Number of tcache fills from this bin. */
uint64_t
nfills
;
uint64_t
nfills
;
...
@@ -78,10 +79,25 @@ struct malloc_large_stats_s {
...
@@ -78,10 +79,25 @@ struct malloc_large_stats_s {
*/
*/
uint64_t
nrequests
;
uint64_t
nrequests
;
/* Current number of runs of this size class. */
/*
* Current number of runs of this size class, including runs currently
* cached by tcache.
*/
size_t
curruns
;
size_t
curruns
;
};
};
struct
malloc_huge_stats_s
{
/*
* Total number of allocation/deallocation requests served directly by
* the arena.
*/
uint64_t
nmalloc
;
uint64_t
ndalloc
;
/* Current number of (multi-)chunk allocations of this size class. */
size_t
curhchunks
;
};
struct
arena_stats_s
{
struct
arena_stats_s
{
/* Number of bytes currently mapped. */
/* Number of bytes currently mapped. */
size_t
mapped
;
size_t
mapped
;
...
@@ -95,34 +111,28 @@ struct arena_stats_s {
...
@@ -95,34 +111,28 @@ struct arena_stats_s {
uint64_t
nmadvise
;
uint64_t
nmadvise
;
uint64_t
purged
;
uint64_t
purged
;
/*
* Number of bytes currently mapped purely for metadata purposes, and
* number of bytes currently allocated for internal metadata.
*/
size_t
metadata_mapped
;
size_t
metadata_allocated
;
/* Protected via atomic_*_z(). */
/* Per-size-category statistics. */
/* Per-size-category statistics. */
size_t
allocated_large
;
size_t
allocated_large
;
uint64_t
nmalloc_large
;
uint64_t
nmalloc_large
;
uint64_t
ndalloc_large
;
uint64_t
ndalloc_large
;
uint64_t
nrequests_large
;
uint64_t
nrequests_large
;
/*
size_t
allocated_huge
;
* One element for each possible size class, including sizes that
uint64_t
nmalloc_huge
;
* overlap with bin size classes. This is necessary because ipalloc()
uint64_t
ndalloc_huge
;
* sometimes has to use such large objects in order to assure proper
* alignment.
*/
malloc_large_stats_t
*
lstats
;
};
struct
chunk_stats_s
{
/* Number of chunks that were allocated. */
uint64_t
nchunks
;
/*
High-water mark for number of chunks allocated
. */
/*
One element for each large size class
. */
size_t
highchunk
s
;
malloc_large_stats_t
*
lstat
s
;
/*
/* One element for each huge size class. */
* Current number of chunks allocated. This value isn't maintained for
malloc_huge_stats_t
*
hstats
;
* any other purpose, so keep track of it in order to be able to set
* highchunks.
*/
size_t
curchunks
;
};
};
#endif
/* JEMALLOC_H_STRUCTS */
#endif
/* JEMALLOC_H_STRUCTS */
...
...
deps/jemalloc/include/jemalloc/internal/tcache.h
View file @
a9951b1b
...
@@ -4,6 +4,7 @@
...
@@ -4,6 +4,7 @@
typedef
struct
tcache_bin_info_s
tcache_bin_info_t
;
typedef
struct
tcache_bin_info_s
tcache_bin_info_t
;
typedef
struct
tcache_bin_s
tcache_bin_t
;
typedef
struct
tcache_bin_s
tcache_bin_t
;
typedef
struct
tcache_s
tcache_t
;
typedef
struct
tcache_s
tcache_t
;
typedef
struct
tcaches_s
tcaches_t
;
/*
/*
* tcache pointers close to NULL are used to encode state information that is
* tcache pointers close to NULL are used to encode state information that is
...
@@ -15,6 +16,11 @@ typedef struct tcache_s tcache_t;
...
@@ -15,6 +16,11 @@ typedef struct tcache_s tcache_t;
#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3)
#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3)
#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY
#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY
/*
* Absolute minimum number of cache slots for each small bin.
*/
#define TCACHE_NSLOTS_SMALL_MIN 20
/*
/*
* Absolute maximum number of cache slots for each small bin in the thread
* Absolute maximum number of cache slots for each small bin in the thread
* cache. This is an additional constraint beyond that imposed as: twice the
* cache. This is an additional constraint beyond that imposed as: twice the
...
@@ -69,10 +75,9 @@ struct tcache_bin_s {
...
@@ -69,10 +75,9 @@ struct tcache_bin_s {
struct
tcache_s
{
struct
tcache_s
{
ql_elm
(
tcache_t
)
link
;
/* Used for aggregating stats. */
ql_elm
(
tcache_t
)
link
;
/* Used for aggregating stats. */
uint64_t
prof_accumbytes
;
/* Cleared after arena_prof_accum() */
uint64_t
prof_accumbytes
;
/* Cleared after arena_prof_accum(). */
arena_t
*
arena
;
/* This thread's arena. */
unsigned
ev_cnt
;
/* Event count since incremental GC. */
unsigned
ev_cnt
;
/* Event count since incremental GC. */
unsigned
next_gc_bin
;
/* Next bin to GC. */
szind_t
next_gc_bin
;
/* Next bin to GC. */
tcache_bin_t
tbins
[
1
];
/* Dynamically sized. */
tcache_bin_t
tbins
[
1
];
/* Dynamically sized. */
/*
/*
* The pointer stacks associated with tbins follow as a contiguous
* The pointer stacks associated with tbins follow as a contiguous
...
@@ -82,6 +87,14 @@ struct tcache_s {
...
@@ -82,6 +87,14 @@ struct tcache_s {
*/
*/
};
};
/* Linkage for list of available (previously used) explicit tcache IDs. */
struct
tcaches_s
{
union
{
tcache_t
*
tcache
;
tcaches_t
*
next
;
};
};
#endif
/* JEMALLOC_H_STRUCTS */
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#ifdef JEMALLOC_H_EXTERNS
...
@@ -100,79 +113,85 @@ extern size_t nhbins;
...
@@ -100,79 +113,85 @@ extern size_t nhbins;
/* Maximum cached size class. */
/* Maximum cached size class. */
extern
size_t
tcache_maxclass
;
extern
size_t
tcache_maxclass
;
/*
* Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and
* usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are
* completely disjoint from this data structure. tcaches starts off as a sparse
* array, so it has no physical memory footprint until individual pages are
* touched. This allows the entire array to be allocated the first time an
* explicit tcache is created without a disproportionate impact on memory usage.
*/
extern
tcaches_t
*
tcaches
;
size_t
tcache_salloc
(
const
void
*
ptr
);
size_t
tcache_salloc
(
const
void
*
ptr
);
void
tcache_event_hard
(
tcache_t
*
tcache
);
void
tcache_event_hard
(
tsd_t
*
tsd
,
tcache_t
*
tcache
);
void
*
tcache_alloc_small_hard
(
t
cache_t
*
tcache
,
tcache_bin_t
*
tbin
,
void
*
tcache_alloc_small_hard
(
t
sd_t
*
tsd
,
arena_t
*
arena
,
tcache_t
*
tcache
,
size
_t
binind
);
tcache_bin_t
*
tbin
,
szind
_t
binind
);
void
tcache_bin_flush_small
(
t
cache_bin_t
*
tbin
,
size_t
binind
,
unsigned
rem
,
void
tcache_bin_flush_small
(
t
sd_t
*
tsd
,
tcache_t
*
tcache
,
tcache_bin_t
*
tbin
,
tcache_t
*
tcache
);
szind_t
binind
,
unsigned
rem
);
void
tcache_bin_flush_large
(
tcache_bin_t
*
tbin
,
s
ize
_t
binind
,
unsigned
rem
,
void
tcache_bin_flush_large
(
tsd_t
*
tsd
,
tcache_bin_t
*
tbin
,
s
zind
_t
binind
,
tcache_t
*
tcache
);
unsigned
rem
,
tcache_t
*
tcache
);
void
tcache_arena_associate
(
tcache_t
*
tcache
,
arena_t
*
arena
);
void
tcache_arena_associate
(
tcache_t
*
tcache
,
arena_t
*
arena
);
void
tcache_arena_dissociate
(
tcache_t
*
tcache
);
void
tcache_arena_reassociate
(
tcache_t
*
tcache
,
arena_t
*
oldarena
,
tcache_t
*
tcache_create
(
arena_t
*
arena
);
arena_t
*
newarena
);
void
tcache_destroy
(
tcache_t
*
tcache
);
void
tcache_arena_dissociate
(
tcache_t
*
tcache
,
arena_t
*
arena
);
void
tcache_thread_cleanup
(
void
*
arg
);
tcache_t
*
tcache_get_hard
(
tsd_t
*
tsd
);
tcache_t
*
tcache_create
(
tsd_t
*
tsd
,
arena_t
*
arena
);
void
tcache_cleanup
(
tsd_t
*
tsd
);
void
tcache_enabled_cleanup
(
tsd_t
*
tsd
);
void
tcache_stats_merge
(
tcache_t
*
tcache
,
arena_t
*
arena
);
void
tcache_stats_merge
(
tcache_t
*
tcache
,
arena_t
*
arena
);
bool
tcache_boot0
(
void
);
bool
tcaches_create
(
tsd_t
*
tsd
,
unsigned
*
r_ind
);
bool
tcache_boot1
(
void
);
void
tcaches_flush
(
tsd_t
*
tsd
,
unsigned
ind
);
void
tcaches_destroy
(
tsd_t
*
tsd
,
unsigned
ind
);
bool
tcache_boot
(
void
);
#endif
/* JEMALLOC_H_EXTERNS */
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
#ifndef JEMALLOC_ENABLE_INLINE
malloc_tsd_protos
(
JEMALLOC_ATTR
(
unused
),
tcache
,
tcache_t
*
)
void
tcache_event
(
tsd_t
*
tsd
,
tcache_t
*
tcache
);
malloc_tsd_protos
(
JEMALLOC_ATTR
(
unused
),
tcache_enabled
,
tcache_enabled_t
)
void
tcache_event
(
tcache_t
*
tcache
);
void
tcache_flush
(
void
);
void
tcache_flush
(
void
);
bool
tcache_enabled_get
(
void
);
bool
tcache_enabled_get
(
void
);
tcache_t
*
tcache_get
(
bool
create
);
tcache_t
*
tcache_get
(
tsd_t
*
tsd
,
bool
create
);
void
tcache_enabled_set
(
bool
enabled
);
void
tcache_enabled_set
(
bool
enabled
);
void
*
tcache_alloc_easy
(
tcache_bin_t
*
tbin
);
void
*
tcache_alloc_easy
(
tcache_bin_t
*
tbin
);
void
*
tcache_alloc_small
(
tcache_t
*
tcache
,
size_t
size
,
bool
zero
);
void
*
tcache_alloc_small
(
tsd_t
*
tsd
,
arena_t
*
arena
,
tcache_t
*
tcache
,
void
*
tcache_alloc_large
(
tcache_t
*
tcache
,
size_t
size
,
bool
zero
);
size_t
size
,
bool
zero
);
void
tcache_dalloc_small
(
tcache_t
*
tcache
,
void
*
ptr
,
size_t
binind
);
void
*
tcache_alloc_large
(
tsd_t
*
tsd
,
arena_t
*
arena
,
tcache_t
*
tcache
,
void
tcache_dalloc_large
(
tcache_t
*
tcache
,
void
*
ptr
,
size_t
size
);
size_t
size
,
bool
zero
);
void
tcache_dalloc_small
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
void
*
ptr
,
szind_t
binind
);
void
tcache_dalloc_large
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
void
*
ptr
,
size_t
size
);
tcache_t
*
tcaches_get
(
tsd_t
*
tsd
,
unsigned
ind
);
#endif
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
/* Map of thread-specific caches. */
malloc_tsd_externs
(
tcache
,
tcache_t
*
)
malloc_tsd_funcs
(
JEMALLOC_ALWAYS_INLINE
,
tcache
,
tcache_t
*
,
NULL
,
tcache_thread_cleanup
)
/* Per thread flag that allows thread caches to be disabled. */
malloc_tsd_externs
(
tcache_enabled
,
tcache_enabled_t
)
malloc_tsd_funcs
(
JEMALLOC_ALWAYS_INLINE
,
tcache_enabled
,
tcache_enabled_t
,
tcache_enabled_default
,
malloc_tsd_no_cleanup
)
JEMALLOC_INLINE
void
JEMALLOC_INLINE
void
tcache_flush
(
void
)
tcache_flush
(
void
)
{
{
t
cache_t
*
tcache
;
t
sd_t
*
tsd
;
cassert
(
config_tcache
);
cassert
(
config_tcache
);
tcache
=
*
tcache_tsd_get
();
tsd
=
tsd_fetch
();
if
((
uintptr_t
)
tcache
<=
(
uintptr_t
)
TCACHE_STATE_MAX
)
tcache_cleanup
(
tsd
);
return
;
tcache_destroy
(
tcache
);
tcache
=
NULL
;
tcache_tsd_set
(
&
tcache
);
}
}
JEMALLOC_INLINE
bool
JEMALLOC_INLINE
bool
tcache_enabled_get
(
void
)
tcache_enabled_get
(
void
)
{
{
tsd_t
*
tsd
;
tcache_enabled_t
tcache_enabled
;
tcache_enabled_t
tcache_enabled
;
cassert
(
config_tcache
);
cassert
(
config_tcache
);
tcache_enabled
=
*
tcache_enabled_tsd_get
();
tsd
=
tsd_fetch
();
tcache_enabled
=
tsd_tcache_enabled_get
(
tsd
);
if
(
tcache_enabled
==
tcache_enabled_default
)
{
if
(
tcache_enabled
==
tcache_enabled_default
)
{
tcache_enabled
=
(
tcache_enabled_t
)
opt_tcache
;
tcache_enabled
=
(
tcache_enabled_t
)
opt_tcache
;
tcache_enabled_
tsd_set
(
&
tcache_enabled
);
tsd_
tcache_enabled_
set
(
tsd
,
tcache_enabled
);
}
}
return
((
bool
)
tcache_enabled
);
return
((
bool
)
tcache_enabled
);
...
@@ -181,85 +200,41 @@ tcache_enabled_get(void)
...
@@ -181,85 +200,41 @@ tcache_enabled_get(void)
JEMALLOC_INLINE
void
JEMALLOC_INLINE
void
tcache_enabled_set
(
bool
enabled
)
tcache_enabled_set
(
bool
enabled
)
{
{
tsd_t
*
tsd
;
tcache_enabled_t
tcache_enabled
;
tcache_enabled_t
tcache_enabled
;
tcache_t
*
tcache
;
cassert
(
config_tcache
);
cassert
(
config_tcache
);
tsd
=
tsd_fetch
();
tcache_enabled
=
(
tcache_enabled_t
)
enabled
;
tcache_enabled
=
(
tcache_enabled_t
)
enabled
;
tcache_enabled_tsd_set
(
&
tcache_enabled
);
tsd_tcache_enabled_set
(
tsd
,
tcache_enabled
);
tcache
=
*
tcache_tsd_get
();
if
(
enabled
)
{
if
(
!
enabled
)
if
(
tcache
==
TCACHE_STATE_DISABLED
)
{
tcache_cleanup
(
tsd
);
tcache
=
NULL
;
tcache_tsd_set
(
&
tcache
);
}
}
else
/* disabled */
{
if
(
tcache
>
TCACHE_STATE_MAX
)
{
tcache_destroy
(
tcache
);
tcache
=
NULL
;
}
if
(
tcache
==
NULL
)
{
tcache
=
TCACHE_STATE_DISABLED
;
tcache_tsd_set
(
&
tcache
);
}
}
}
}
JEMALLOC_ALWAYS_INLINE
tcache_t
*
JEMALLOC_ALWAYS_INLINE
tcache_t
*
tcache_get
(
bool
create
)
tcache_get
(
tsd_t
*
tsd
,
bool
create
)
{
{
tcache_t
*
tcache
;
tcache_t
*
tcache
;
if
(
config_tcache
==
false
)
if
(
!
config_tcache
)
return
(
NULL
);
if
(
config_lazy_lock
&&
isthreaded
==
false
)
return
(
NULL
);
return
(
NULL
);
tcache
=
*
tcache_tsd_get
();
tcache
=
tsd_tcache_get
(
tsd
);
if
((
uintptr_t
)
tcache
<=
(
uintptr_t
)
TCACHE_STATE_MAX
)
{
if
(
!
create
)
if
(
tcache
==
TCACHE_STATE_DISABLED
)
return
(
tcache
);
return
(
NULL
);
if
(
unlikely
(
tcache
==
NULL
)
&&
tsd_nominal
(
tsd
))
{
if
(
tcache
==
NULL
)
{
tcache
=
tcache_get_hard
(
tsd
);
if
(
create
==
false
)
{
tsd_tcache_set
(
tsd
,
tcache
);
/*
* Creating a tcache here would cause
* allocation as a side effect of free().
* Ordinarily that would be okay since
* tcache_create() failure is a soft failure
* that doesn't propagate. However, if TLS
* data are freed via free() as in glibc,
* subtle corruption could result from setting
* a TLS variable after its backing memory is
* freed.
*/
return
(
NULL
);
}
if
(
tcache_enabled_get
()
==
false
)
{
tcache_enabled_set
(
false
);
/* Memoize. */
return
(
NULL
);
}
return
(
tcache_create
(
choose_arena
(
NULL
)));
}
if
(
tcache
==
TCACHE_STATE_PURGATORY
)
{
/*
* Make a note that an allocator function was called
* after tcache_thread_cleanup() was called.
*/
tcache
=
TCACHE_STATE_REINCARNATED
;
tcache_tsd_set
(
&
tcache
);
return
(
NULL
);
}
if
(
tcache
==
TCACHE_STATE_REINCARNATED
)
return
(
NULL
);
not_reached
();
}
}
return
(
tcache
);
return
(
tcache
);
}
}
JEMALLOC_ALWAYS_INLINE
void
JEMALLOC_ALWAYS_INLINE
void
tcache_event
(
tcache_t
*
tcache
)
tcache_event
(
tsd_t
*
tsd
,
tcache_t
*
tcache
)
{
{
if
(
TCACHE_GC_INCR
==
0
)
if
(
TCACHE_GC_INCR
==
0
)
...
@@ -267,8 +242,8 @@ tcache_event(tcache_t *tcache)
...
@@ -267,8 +242,8 @@ tcache_event(tcache_t *tcache)
tcache
->
ev_cnt
++
;
tcache
->
ev_cnt
++
;
assert
(
tcache
->
ev_cnt
<=
TCACHE_GC_INCR
);
assert
(
tcache
->
ev_cnt
<=
TCACHE_GC_INCR
);
if
(
tcache
->
ev_cnt
==
TCACHE_GC_INCR
)
if
(
unlikely
(
tcache
->
ev_cnt
==
TCACHE_GC_INCR
)
)
tcache_event_hard
(
tcache
);
tcache_event_hard
(
tsd
,
tcache
);
}
}
JEMALLOC_ALWAYS_INLINE
void
*
JEMALLOC_ALWAYS_INLINE
void
*
...
@@ -276,85 +251,87 @@ tcache_alloc_easy(tcache_bin_t *tbin)
...
@@ -276,85 +251,87 @@ tcache_alloc_easy(tcache_bin_t *tbin)
{
{
void
*
ret
;
void
*
ret
;
if
(
tbin
->
ncached
==
0
)
{
if
(
unlikely
(
tbin
->
ncached
==
0
)
)
{
tbin
->
low_water
=
-
1
;
tbin
->
low_water
=
-
1
;
return
(
NULL
);
return
(
NULL
);
}
}
tbin
->
ncached
--
;
tbin
->
ncached
--
;
if
((
int
)
tbin
->
ncached
<
tbin
->
low_water
)
if
(
unlikely
((
int
)
tbin
->
ncached
<
tbin
->
low_water
)
)
tbin
->
low_water
=
tbin
->
ncached
;
tbin
->
low_water
=
tbin
->
ncached
;
ret
=
tbin
->
avail
[
tbin
->
ncached
];
ret
=
tbin
->
avail
[
tbin
->
ncached
];
return
(
ret
);
return
(
ret
);
}
}
JEMALLOC_ALWAYS_INLINE
void
*
JEMALLOC_ALWAYS_INLINE
void
*
tcache_alloc_small
(
tcache_t
*
tcache
,
size_t
size
,
bool
zero
)
tcache_alloc_small
(
tsd_t
*
tsd
,
arena_t
*
arena
,
tcache_t
*
tcache
,
size_t
size
,
bool
zero
)
{
{
void
*
ret
;
void
*
ret
;
size_t
binind
;
szind_t
binind
;
size_t
usize
;
tcache_bin_t
*
tbin
;
tcache_bin_t
*
tbin
;
binind
=
SMALL_SIZE2BIN
(
size
);
binind
=
size2index
(
size
);
assert
(
binind
<
NBINS
);
assert
(
binind
<
NBINS
);
tbin
=
&
tcache
->
tbins
[
binind
];
tbin
=
&
tcache
->
tbins
[
binind
];
size
=
arena_bin_info
[
binind
].
reg_size
;
u
size
=
index2size
(
binind
)
;
ret
=
tcache_alloc_easy
(
tbin
);
ret
=
tcache_alloc_easy
(
tbin
);
if
(
ret
==
NULL
)
{
if
(
unlikely
(
ret
==
NULL
)
)
{
ret
=
tcache_alloc_small_hard
(
tcache
,
tbin
,
binind
);
ret
=
tcache_alloc_small_hard
(
tsd
,
arena
,
tcache
,
tbin
,
binind
);
if
(
ret
==
NULL
)
if
(
ret
==
NULL
)
return
(
NULL
);
return
(
NULL
);
}
}
assert
(
tcache_salloc
(
ret
)
==
arena_bin_info
[
binind
].
reg_
size
);
assert
(
tcache_salloc
(
ret
)
==
u
size
);
if
(
zero
==
false
)
{
if
(
likely
(
!
zero
)
)
{
if
(
config_fill
)
{
if
(
config_fill
)
{
if
(
opt_junk
)
{
if
(
unlikely
(
opt_junk
_alloc
)
)
{
arena_alloc_junk_small
(
ret
,
arena_alloc_junk_small
(
ret
,
&
arena_bin_info
[
binind
],
false
);
&
arena_bin_info
[
binind
],
false
);
}
else
if
(
opt_zero
)
}
else
if
(
unlikely
(
opt_zero
)
)
memset
(
ret
,
0
,
size
);
memset
(
ret
,
0
,
u
size
);
}
}
VALGRIND_MAKE_MEM_UNDEFINED
(
ret
,
size
);
}
else
{
}
else
{
if
(
config_fill
&&
opt_junk
)
{
if
(
config_fill
&&
unlikely
(
opt_junk
_alloc
)
)
{
arena_alloc_junk_small
(
ret
,
&
arena_bin_info
[
binind
],
arena_alloc_junk_small
(
ret
,
&
arena_bin_info
[
binind
],
true
);
true
);
}
}
VALGRIND_MAKE_MEM_UNDEFINED
(
ret
,
size
);
memset
(
ret
,
0
,
usize
);
memset
(
ret
,
0
,
size
);
}
}
if
(
config_stats
)
if
(
config_stats
)
tbin
->
tstats
.
nrequests
++
;
tbin
->
tstats
.
nrequests
++
;
if
(
config_prof
)
if
(
config_prof
)
tcache
->
prof_accumbytes
+=
arena_bin_info
[
binind
].
reg_
size
;
tcache
->
prof_accumbytes
+=
u
size
;
tcache_event
(
tcache
);
tcache_event
(
tsd
,
tcache
);
return
(
ret
);
return
(
ret
);
}
}
JEMALLOC_ALWAYS_INLINE
void
*
JEMALLOC_ALWAYS_INLINE
void
*
tcache_alloc_large
(
tcache_t
*
tcache
,
size_t
size
,
bool
zero
)
tcache_alloc_large
(
tsd_t
*
tsd
,
arena_t
*
arena
,
tcache_t
*
tcache
,
size_t
size
,
bool
zero
)
{
{
void
*
ret
;
void
*
ret
;
size_t
binind
;
szind_t
binind
;
size_t
usize
;
tcache_bin_t
*
tbin
;
tcache_bin_t
*
tbin
;
size
=
PAGE_CEILING
(
size
);
binind
=
size2index
(
size
);
assert
(
size
<
=
tcache_maxclass
);
u
size
=
index2size
(
binind
);
binind
=
NBINS
+
(
size
>>
LG_PAGE
)
-
1
;
assert
(
usize
<=
tcache_maxclass
)
;
assert
(
binind
<
nhbins
);
assert
(
binind
<
nhbins
);
tbin
=
&
tcache
->
tbins
[
binind
];
tbin
=
&
tcache
->
tbins
[
binind
];
ret
=
tcache_alloc_easy
(
tbin
);
ret
=
tcache_alloc_easy
(
tbin
);
if
(
ret
==
NULL
)
{
if
(
unlikely
(
ret
==
NULL
)
)
{
/*
/*
* Only allocate one large object at a time, because it's quite
* Only allocate one large object at a time, because it's quite
* expensive to create one and not use it.
* expensive to create one and not use it.
*/
*/
ret
=
arena_malloc_large
(
tcache
->
arena
,
size
,
zero
);
ret
=
arena_malloc_large
(
arena
,
u
size
,
zero
);
if
(
ret
==
NULL
)
if
(
ret
==
NULL
)
return
(
NULL
);
return
(
NULL
);
}
else
{
}
else
{
if
(
config_prof
&&
prof_promote
&&
size
==
PAGE
)
{
if
(
config_prof
&&
usize
==
LARGE_MINCLASS
)
{
arena_chunk_t
*
chunk
=
arena_chunk_t
*
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
ret
);
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
ret
);
size_t
pageind
=
(((
uintptr_t
)
ret
-
(
uintptr_t
)
chunk
)
>>
size_t
pageind
=
(((
uintptr_t
)
ret
-
(
uintptr_t
)
chunk
)
>>
...
@@ -362,57 +339,54 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
...
@@ -362,57 +339,54 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
arena_mapbits_large_binind_set
(
chunk
,
pageind
,
arena_mapbits_large_binind_set
(
chunk
,
pageind
,
BININD_INVALID
);
BININD_INVALID
);
}
}
if
(
zero
==
false
)
{
if
(
likely
(
!
zero
)
)
{
if
(
config_fill
)
{
if
(
config_fill
)
{
if
(
opt_junk
)
if
(
unlikely
(
opt_junk_alloc
))
memset
(
ret
,
0xa5
,
size
);
memset
(
ret
,
0xa5
,
usize
);
else
if
(
opt_zero
)
else
if
(
unlikely
(
opt_zero
))
memset
(
ret
,
0
,
size
);
memset
(
ret
,
0
,
usize
);
}
VALGRIND_MAKE_MEM_UNDEFINED
(
ret
,
size
);
}
else
{
VALGRIND_MAKE_MEM_UNDEFINED
(
ret
,
size
);
memset
(
ret
,
0
,
size
);
}
}
}
else
memset
(
ret
,
0
,
usize
);
if
(
config_stats
)
if
(
config_stats
)
tbin
->
tstats
.
nrequests
++
;
tbin
->
tstats
.
nrequests
++
;
if
(
config_prof
)
if
(
config_prof
)
tcache
->
prof_accumbytes
+=
size
;
tcache
->
prof_accumbytes
+=
u
size
;
}
}
tcache_event
(
tcache
);
tcache_event
(
tsd
,
tcache
);
return
(
ret
);
return
(
ret
);
}
}
JEMALLOC_ALWAYS_INLINE
void
JEMALLOC_ALWAYS_INLINE
void
tcache_dalloc_small
(
tcache_t
*
tcache
,
void
*
ptr
,
s
ize
_t
binind
)
tcache_dalloc_small
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
void
*
ptr
,
s
zind
_t
binind
)
{
{
tcache_bin_t
*
tbin
;
tcache_bin_t
*
tbin
;
tcache_bin_info_t
*
tbin_info
;
tcache_bin_info_t
*
tbin_info
;
assert
(
tcache_salloc
(
ptr
)
<=
SMALL_MAXCLASS
);
assert
(
tcache_salloc
(
ptr
)
<=
SMALL_MAXCLASS
);
if
(
config_fill
&&
opt_junk
)
if
(
config_fill
&&
unlikely
(
opt_junk
_free
)
)
arena_dalloc_junk_small
(
ptr
,
&
arena_bin_info
[
binind
]);
arena_dalloc_junk_small
(
ptr
,
&
arena_bin_info
[
binind
]);
tbin
=
&
tcache
->
tbins
[
binind
];
tbin
=
&
tcache
->
tbins
[
binind
];
tbin_info
=
&
tcache_bin_info
[
binind
];
tbin_info
=
&
tcache_bin_info
[
binind
];
if
(
tbin
->
ncached
==
tbin_info
->
ncached_max
)
{
if
(
unlikely
(
tbin
->
ncached
==
tbin_info
->
ncached_max
)
)
{
tcache_bin_flush_small
(
t
bin
,
bin
ind
,
(
t
bin
_
in
fo
->
ncached_max
>>
tcache_bin_flush_small
(
t
sd
,
tcache
,
t
bin
,
binin
d
,
1
),
tcache
);
(
tbin_info
->
ncached_max
>>
1
)
);
}
}
assert
(
tbin
->
ncached
<
tbin_info
->
ncached_max
);
assert
(
tbin
->
ncached
<
tbin_info
->
ncached_max
);
tbin
->
avail
[
tbin
->
ncached
]
=
ptr
;
tbin
->
avail
[
tbin
->
ncached
]
=
ptr
;
tbin
->
ncached
++
;
tbin
->
ncached
++
;
tcache_event
(
tcache
);
tcache_event
(
tsd
,
tcache
);
}
}
JEMALLOC_ALWAYS_INLINE
void
JEMALLOC_ALWAYS_INLINE
void
tcache_dalloc_large
(
tcache_t
*
tcache
,
void
*
ptr
,
size_t
size
)
tcache_dalloc_large
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
void
*
ptr
,
size_t
size
)
{
{
s
ize
_t
binind
;
s
zind
_t
binind
;
tcache_bin_t
*
tbin
;
tcache_bin_t
*
tbin
;
tcache_bin_info_t
*
tbin_info
;
tcache_bin_info_t
*
tbin_info
;
...
@@ -420,22 +394,31 @@ tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
...
@@ -420,22 +394,31 @@ tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
assert
(
tcache_salloc
(
ptr
)
>
SMALL_MAXCLASS
);
assert
(
tcache_salloc
(
ptr
)
>
SMALL_MAXCLASS
);
assert
(
tcache_salloc
(
ptr
)
<=
tcache_maxclass
);
assert
(
tcache_salloc
(
ptr
)
<=
tcache_maxclass
);
binind
=
NBINS
+
(
size
>>
LG_PAGE
)
-
1
;
binind
=
size2index
(
size
)
;
if
(
config_fill
&&
opt_junk
)
if
(
config_fill
&&
unlikely
(
opt_junk
_free
)
)
memset
(
ptr
,
0x5a
,
size
);
arena_dalloc_junk_large
(
ptr
,
size
);
tbin
=
&
tcache
->
tbins
[
binind
];
tbin
=
&
tcache
->
tbins
[
binind
];
tbin_info
=
&
tcache_bin_info
[
binind
];
tbin_info
=
&
tcache_bin_info
[
binind
];
if
(
tbin
->
ncached
==
tbin_info
->
ncached_max
)
{
if
(
unlikely
(
tbin
->
ncached
==
tbin_info
->
ncached_max
)
)
{
tcache_bin_flush_large
(
t
bin
,
bin
ind
,
(
t
bin
_
in
fo
->
ncached_max
>>
tcache_bin_flush_large
(
t
sd
,
t
bin
,
binin
d
,
1
),
tcache
);
(
tbin_info
->
ncached_max
>>
1
),
tcache
);
}
}
assert
(
tbin
->
ncached
<
tbin_info
->
ncached_max
);
assert
(
tbin
->
ncached
<
tbin_info
->
ncached_max
);
tbin
->
avail
[
tbin
->
ncached
]
=
ptr
;
tbin
->
avail
[
tbin
->
ncached
]
=
ptr
;
tbin
->
ncached
++
;
tbin
->
ncached
++
;
tcache_event
(
tcache
);
tcache_event
(
tsd
,
tcache
);
}
JEMALLOC_ALWAYS_INLINE
tcache_t
*
tcaches_get
(
tsd_t
*
tsd
,
unsigned
ind
)
{
tcaches_t
*
elm
=
&
tcaches
[
ind
];
if
(
unlikely
(
elm
->
tcache
==
NULL
))
elm
->
tcache
=
tcache_create
(
tsd
,
arena_choose
(
tsd
,
NULL
));
return
(
elm
->
tcache
);
}
}
#endif
#endif
...
...
deps/jemalloc/include/jemalloc/internal/tsd.h
View file @
a9951b1b
...
@@ -2,7 +2,7 @@
...
@@ -2,7 +2,7 @@
#ifdef JEMALLOC_H_TYPES
#ifdef JEMALLOC_H_TYPES
/* Maximum number of malloc_tsd users with cleanup functions. */
/* Maximum number of malloc_tsd users with cleanup functions. */
#define MALLOC_TSD_CLEANUPS_MAX
8
#define MALLOC_TSD_CLEANUPS_MAX
2
typedef
bool
(
*
malloc_tsd_cleanup_t
)(
void
);
typedef
bool
(
*
malloc_tsd_cleanup_t
)(
void
);
...
@@ -12,9 +12,18 @@ typedef struct tsd_init_block_s tsd_init_block_t;
...
@@ -12,9 +12,18 @@ typedef struct tsd_init_block_s tsd_init_block_t;
typedef
struct
tsd_init_head_s
tsd_init_head_t
;
typedef
struct
tsd_init_head_s
tsd_init_head_t
;
#endif
#endif
typedef
struct
tsd_s
tsd_t
;
typedef
enum
{
tsd_state_uninitialized
,
tsd_state_nominal
,
tsd_state_purgatory
,
tsd_state_reincarnated
}
tsd_state_t
;
/*
/*
* TLS/TSD-agnostic macro-based implementation of thread-specific data. There
* TLS/TSD-agnostic macro-based implementation of thread-specific data. There
* are f
our
macros that support (at least) three use cases: file-private,
* are f
ive
macros that support (at least) three use cases: file-private,
* library-private, and library-private inlined. Following is an example
* library-private, and library-private inlined. Following is an example
* library-private tsd variable:
* library-private tsd variable:
*
*
...
@@ -24,34 +33,36 @@ typedef struct tsd_init_head_s tsd_init_head_t;
...
@@ -24,34 +33,36 @@ typedef struct tsd_init_head_s tsd_init_head_t;
* int y;
* int y;
* } example_t;
* } example_t;
* #define EX_INITIALIZER JEMALLOC_CONCAT({0, 0})
* #define EX_INITIALIZER JEMALLOC_CONCAT({0, 0})
* malloc_tsd_protos(, example, example_t *)
* malloc_tsd_types(example_, example_t)
* malloc_tsd_externs(example, example_t *)
* malloc_tsd_protos(, example_, example_t)
* malloc_tsd_externs(example_, example_t)
* In example.c:
* In example.c:
* malloc_tsd_data(, example, example_t
*
, EX_INITIALIZER)
* malloc_tsd_data(, example
_
, example_t, EX_INITIALIZER)
* malloc_tsd_funcs(, example, example_t
*
, EX_INITIALIZER,
* malloc_tsd_funcs(, example
_
, example_t, EX_INITIALIZER,
* example_tsd_cleanup)
* example_tsd_cleanup)
*
*
* The result is a set of generated functions, e.g.:
* The result is a set of generated functions, e.g.:
*
*
* bool example_tsd_boot(void) {...}
* bool example_tsd_boot(void) {...}
* example_t
*
*example_tsd_get() {...}
* example_t *example_tsd_get() {...}
* void example_tsd_set(example_t
*
*val) {...}
* void example_tsd_set(example_t *val) {...}
*
*
* Note that all of the functions deal in terms of (a_type *) rather than
* Note that all of the functions deal in terms of (a_type *) rather than
* (a_type) so that it is possible to support non-pointer types (unlike
* (a_type) so that it is possible to support non-pointer types (unlike
* pthreads TSD). example_tsd_cleanup() is passed an (a_type *) pointer that is
* pthreads TSD). example_tsd_cleanup() is passed an (a_type *) pointer that is
* cast to (void *). This means that the cleanup function needs to cast *and*
* cast to (void *). This means that the cleanup function needs to cast the
* dereference the function argument, e.g.:
* function argument to (a_type *), then dereference the resulting pointer to
* access fields, e.g.
*
*
* void
* void
* example_tsd_cleanup(void *arg)
* example_tsd_cleanup(void *arg)
* {
* {
* example_t *example =
*
(example_t
*
*)arg;
* example_t *example = (example_t *)arg;
*
*
* example->x = 42;
* [...]
* [...]
* if ([want the cleanup function to be called again]) {
* if ([want the cleanup function to be called again])
* example_tsd_set(&example);
* example_tsd_set(example);
* }
* }
* }
*
*
* If example_tsd_set() is called within example_tsd_cleanup(), it will be
* If example_tsd_set() is called within example_tsd_cleanup(), it will be
...
@@ -60,63 +71,96 @@ typedef struct tsd_init_head_s tsd_init_head_t;
...
@@ -60,63 +71,96 @@ typedef struct tsd_init_head_s tsd_init_head_t;
* non-NULL.
* non-NULL.
*/
*/
/* malloc_tsd_types(). */
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
#define malloc_tsd_types(a_name, a_type)
#elif (defined(JEMALLOC_TLS))
#define malloc_tsd_types(a_name, a_type)
#elif (defined(_WIN32))
#define malloc_tsd_types(a_name, a_type) \
typedef struct { \
bool initialized; \
a_type val; \
} a_name##tsd_wrapper_t;
#else
#define malloc_tsd_types(a_name, a_type) \
typedef struct { \
bool initialized; \
a_type val; \
} a_name##tsd_wrapper_t;
#endif
/* malloc_tsd_protos(). */
/* malloc_tsd_protos(). */
#define malloc_tsd_protos(a_attr, a_name, a_type) \
#define malloc_tsd_protos(a_attr, a_name, a_type) \
a_attr bool \
a_attr bool \
a_name##_tsd_boot(void); \
a_name##tsd_boot0(void); \
a_attr void \
a_name##tsd_boot1(void); \
a_attr bool \
a_name##tsd_boot(void); \
a_attr a_type * \
a_attr a_type * \
a_name##
_
tsd_get(void); \
a_name##tsd_get(void); \
a_attr void \
a_attr void \
a_name##
_
tsd_set(a_type *val);
a_name##tsd_set(a_type *val);
/* malloc_tsd_externs(). */
/* malloc_tsd_externs(). */
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
#define malloc_tsd_externs(a_name, a_type) \
#define malloc_tsd_externs(a_name, a_type) \
extern __thread a_type a_name##_tls;
\
extern __thread a_type a_name##
tsd
_tls; \
extern __thread bool a_name##_initialized;
\
extern __thread bool a_name##
tsd
_initialized; \
extern bool a_name##_booted;
extern bool a_name##
tsd
_booted;
#elif (defined(JEMALLOC_TLS))
#elif (defined(JEMALLOC_TLS))
#define malloc_tsd_externs(a_name, a_type) \
#define malloc_tsd_externs(a_name, a_type) \
extern __thread a_type a_name##_tls;
\
extern __thread a_type a_name##
tsd
_tls; \
extern pthread_key_t a_name##_tsd;
\
extern pthread_key_t a_name##
tsd
_tsd; \
extern bool a_name##_booted;
extern bool a_name##
tsd
_booted;
#elif (defined(_WIN32))
#elif (defined(_WIN32))
#define malloc_tsd_externs(a_name, a_type) \
#define malloc_tsd_externs(a_name, a_type) \
extern DWORD a_name##_tsd; \
extern DWORD a_name##tsd_tsd; \
extern bool a_name##_booted;
extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \
extern bool a_name##tsd_booted;
#else
#else
#define malloc_tsd_externs(a_name, a_type) \
#define malloc_tsd_externs(a_name, a_type) \
extern pthread_key_t a_name##_tsd; \
extern pthread_key_t a_name##tsd_tsd; \
extern tsd_init_head_t a_name##_tsd_init_head; \
extern tsd_init_head_t a_name##tsd_init_head; \
extern bool a_name##_booted;
extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \
extern bool a_name##tsd_booted;
#endif
#endif
/* malloc_tsd_data(). */
/* malloc_tsd_data(). */
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
a_attr __thread a_type JEMALLOC_TLS_MODEL \
a_attr __thread a_type JEMALLOC_TLS_MODEL \
a_name##_tls = a_initializer; \
a_name##
tsd
_tls = a_initializer; \
a_attr __thread bool JEMALLOC_TLS_MODEL \
a_attr __thread bool JEMALLOC_TLS_MODEL \
a_name##_initialized = false; \
a_name##
tsd
_initialized = false; \
a_attr bool a_name##_booted = false;
a_attr bool a_name##
tsd
_booted = false;
#elif (defined(JEMALLOC_TLS))
#elif (defined(JEMALLOC_TLS))
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
a_attr __thread a_type JEMALLOC_TLS_MODEL \
a_attr __thread a_type JEMALLOC_TLS_MODEL \
a_name##_tls = a_initializer; \
a_name##
tsd
_tls = a_initializer; \
a_attr pthread_key_t a_name##_tsd;
\
a_attr pthread_key_t a_name##
tsd
_tsd; \
a_attr bool a_name##_booted = false;
a_attr bool a_name##
tsd
_booted = false;
#elif (defined(_WIN32))
#elif (defined(_WIN32))
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
a_attr DWORD a_name##_tsd; \
a_attr DWORD a_name##tsd_tsd; \
a_attr bool a_name##_booted = false;
a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \
false, \
a_initializer \
}; \
a_attr bool a_name##tsd_booted = false;
#else
#else
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
a_attr pthread_key_t a_name##_tsd;
\
a_attr pthread_key_t a_name##
tsd
_tsd; \
a_attr tsd_init_head_t a_name##
_
tsd_init_head = { \
a_attr tsd_init_head_t a_name##tsd_init_head = { \
ql_head_initializer(blocks), \
ql_head_initializer(blocks), \
MALLOC_MUTEX_INITIALIZER \
MALLOC_MUTEX_INITIALIZER \
}; \
}; \
a_attr bool a_name##_booted = false;
a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \
false, \
a_initializer \
}; \
a_attr bool a_name##tsd_booted = false;
#endif
#endif
/* malloc_tsd_funcs(). */
/* malloc_tsd_funcs(). */
...
@@ -125,75 +169,100 @@ a_attr bool a_name##_booted = false;
...
@@ -125,75 +169,100 @@ a_attr bool a_name##_booted = false;
a_cleanup) \
a_cleanup) \
/* Initialization/cleanup. */
\
/* Initialization/cleanup. */
\
a_attr bool \
a_attr bool \
a_name##
_
tsd_cleanup_wrapper(void) \
a_name##tsd_cleanup_wrapper(void) \
{ \
{ \
\
\
if (a_name##_initialized) { \
if (a_name##
tsd
_initialized) { \
a_name##_initialized = false;
\
a_name##
tsd
_initialized = false; \
a_cleanup(&a_name##_tls); \
a_cleanup(&a_name##
tsd
_tls); \
} \
} \
return (a_name##_initialized);
\
return (a_name##
tsd
_initialized); \
} \
} \
a_attr bool \
a_attr bool \
a_name##
_
tsd_boot(void) \
a_name##tsd_boot
0
(void) \
{ \
{ \
\
\
if (a_cleanup != malloc_tsd_no_cleanup) { \
if (a_cleanup != malloc_tsd_no_cleanup) { \
malloc_tsd_cleanup_register( \
malloc_tsd_cleanup_register( \
&a_name##
_
tsd_cleanup_wrapper); \
&a_name##tsd_cleanup_wrapper); \
} \
} \
a_name##_booted = true;
\
a_name##
tsd
_booted = true; \
return (false); \
return (false); \
} \
} \
a_attr void \
a_name##tsd_boot1(void) \
{ \
\
/* Do nothing. */
\
} \
a_attr bool \
a_name##tsd_boot(void) \
{ \
\
return (a_name##tsd_boot0()); \
} \
/* Get/set. */
\
/* Get/set. */
\
a_attr a_type * \
a_attr a_type * \
a_name##
_
tsd_get(void) \
a_name##tsd_get(void) \
{ \
{ \
\
\
assert(a_name##_booted); \
assert(a_name##
tsd
_booted); \
return (&a_name##_tls);
\
return (&a_name##
tsd
_tls); \
} \
} \
a_attr void \
a_attr void \
a_name##
_
tsd_set(a_type *val) \
a_name##tsd_set(a_type *val) \
{ \
{ \
\
\
assert(a_name##_booted); \
assert(a_name##
tsd
_booted); \
a_name##_tls = (*val);
\
a_name##
tsd
_tls = (*val); \
if (a_cleanup != malloc_tsd_no_cleanup) \
if (a_cleanup != malloc_tsd_no_cleanup) \
a_name##_initialized = true; \
a_name##
tsd
_initialized = true; \
}
}
#elif (defined(JEMALLOC_TLS))
#elif (defined(JEMALLOC_TLS))
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
a_cleanup) \
a_cleanup) \
/* Initialization/cleanup. */
\
/* Initialization/cleanup. */
\
a_attr bool \
a_attr bool \
a_name##
_
tsd_boot(void) \
a_name##tsd_boot
0
(void) \
{ \
{ \
\
\
if (a_cleanup != malloc_tsd_no_cleanup) { \
if (a_cleanup != malloc_tsd_no_cleanup) { \
if (pthread_key_create(&a_name##_tsd, a_cleanup) != 0) \
if (pthread_key_create(&a_name##tsd_tsd, a_cleanup) != \
0) \
return (true); \
return (true); \
} \
} \
a_name##_booted = true;
\
a_name##
tsd
_booted = true; \
return (false); \
return (false); \
} \
} \
a_attr void \
a_name##tsd_boot1(void) \
{ \
\
/* Do nothing. */
\
} \
a_attr bool \
a_name##tsd_boot(void) \
{ \
\
return (a_name##tsd_boot0()); \
} \
/* Get/set. */
\
/* Get/set. */
\
a_attr a_type * \
a_attr a_type * \
a_name##
_
tsd_get(void) \
a_name##tsd_get(void) \
{ \
{ \
\
\
assert(a_name##_booted); \
assert(a_name##
tsd
_booted); \
return (&a_name##_tls);
\
return (&a_name##
tsd
_tls); \
} \
} \
a_attr void \
a_attr void \
a_name##
_
tsd_set(a_type *val) \
a_name##tsd_set(a_type *val) \
{ \
{ \
\
\
assert(a_name##_booted); \
assert(a_name##
tsd
_booted); \
a_name##_tls = (*val);
\
a_name##
tsd
_tls = (*val); \
if (a_cleanup != malloc_tsd_no_cleanup) { \
if (a_cleanup != malloc_tsd_no_cleanup) { \
if (pthread_setspecific(a_name##_tsd,
\
if (pthread_setspecific(a_name##
tsd
_tsd, \
(void *)(&a_name##_tls))) {
\
(void *)(&a_name##
tsd
_tls))) { \
malloc_write("<jemalloc>: Error" \
malloc_write("<jemalloc>: Error" \
" setting TSD for "#a_name"\n"); \
" setting TSD for "#a_name"\n"); \
if (opt_abort) \
if (opt_abort) \
...
@@ -204,27 +273,21 @@ a_name##_tsd_set(a_type *val) \
...
@@ -204,27 +273,21 @@ a_name##_tsd_set(a_type *val) \
#elif (defined(_WIN32))
#elif (defined(_WIN32))
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
a_cleanup) \
a_cleanup) \
/* Data structure. */
\
typedef struct { \
bool initialized; \
a_type val; \
} a_name##_tsd_wrapper_t; \
/* Initialization/cleanup. */
\
/* Initialization/cleanup. */
\
a_attr bool \
a_attr bool \
a_name##
_
tsd_cleanup_wrapper(void) \
a_name##tsd_cleanup_wrapper(void) \
{ \
{ \
a_name##_tsd_wrapper_t *wrapper; \
DWORD error = GetLastError(); \
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
TlsGetValue(a_name##tsd_tsd); \
SetLastError(error); \
\
\
wrapper = (a_name##_tsd_wrapper_t *) TlsGetValue(a_name##_tsd); \
if (wrapper == NULL) \
if (wrapper == NULL) \
return (false); \
return (false); \
if (a_cleanup != malloc_tsd_no_cleanup && \
if (a_cleanup != malloc_tsd_no_cleanup && \
wrapper->initialized) { \
wrapper->initialized) { \
a_type val = wrapper->val; \
a_type tsd_static_data = a_initializer; \
wrapper->initialized = false; \
wrapper->initialized = false; \
wrapper->val = tsd_static_data; \
a_cleanup(&wrapper->val); \
a_cleanup(&val); \
if (wrapper->initialized) { \
if (wrapper->initialized) { \
/* Trigger another cleanup round. */
\
/* Trigger another cleanup round. */
\
return (true); \
return (true); \
...
@@ -233,63 +296,95 @@ a_name##_tsd_cleanup_wrapper(void) \
...
@@ -233,63 +296,95 @@ a_name##_tsd_cleanup_wrapper(void) \
malloc_tsd_dalloc(wrapper); \
malloc_tsd_dalloc(wrapper); \
return (false); \
return (false); \
} \
} \
a_attr
bool
\
a_attr
void
\
a_name##
_
tsd_
boot(void)
\
a_name##tsd_
wrapper_set(a_name##tsd_wrapper_t *wrapper)
\
{ \
{ \
\
\
a_name##_tsd = TlsAlloc(); \
if (!TlsSetValue(a_name##tsd_tsd, (void *)wrapper)) { \
if (a_name##_tsd == TLS_OUT_OF_INDEXES) \
malloc_write("<jemalloc>: Error setting" \
return (true); \
" TSD for "#a_name"\n"); \
if (a_cleanup != malloc_tsd_no_cleanup) { \
abort(); \
malloc_tsd_cleanup_register( \
&a_name##_tsd_cleanup_wrapper); \
} \
} \
a_name##_booted = true; \
return (false); \
} \
} \
/* Get/set. */
\
a_attr a_name##tsd_wrapper_t * \
a_attr a_name##_tsd_wrapper_t * \
a_name##tsd_wrapper_get(void) \
a_name##_tsd_get_wrapper(void) \
{ \
{ \
a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \
DWORD error = GetLastError(); \
TlsGetValue(a_name##_tsd); \
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
TlsGetValue(a_name##tsd_tsd); \
SetLastError(error); \
\
\
if (wrapper == NULL) {
\
if
(unlikely
(wrapper == NULL)
)
{ \
wrapper = (a_name##
_
tsd_wrapper_t *) \
wrapper = (a_name##tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##
_
tsd_wrapper_t)); \
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
if (wrapper == NULL) { \
if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \
" TSD for "#a_name"\n"); \
abort(); \
abort(); \
} else { \
} else { \
static a_type tsd_static_data = a_initializer; \
wrapper->initialized = false; \
wrapper->initialized = false; \
wrapper->val =
tsd_static_data
; \
wrapper->val =
a_initializer
; \
} \
} \
if (!TlsSetValue(a_name##_tsd, (void *)wrapper)) { \
a_name##tsd_wrapper_set(wrapper); \
malloc_write("<jemalloc>: Error setting" \
} \
return (wrapper); \
} \
a_attr bool \
a_name##tsd_boot0(void) \
{ \
\
a_name##tsd_tsd = TlsAlloc(); \
if (a_name##tsd_tsd == TLS_OUT_OF_INDEXES) \
return (true); \
if (a_cleanup != malloc_tsd_no_cleanup) { \
malloc_tsd_cleanup_register( \
&a_name##tsd_cleanup_wrapper); \
} \
a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \
a_name##tsd_booted = true; \
return (false); \
} \
a_attr void \
a_name##tsd_boot1(void) \
{ \
a_name##tsd_wrapper_t *wrapper; \
wrapper = (a_name##tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \
" TSD for "#a_name"\n"); \
abort(); \
abort(); \
} \
} \
} \
memcpy(wrapper, &a_name##tsd_boot_wrapper, \
return (wrapper); \
sizeof(a_name##tsd_wrapper_t)); \
a_name##tsd_wrapper_set(wrapper); \
} \
} \
a_attr bool \
a_name##tsd_boot(void) \
{ \
\
if (a_name##tsd_boot0()) \
return (true); \
a_name##tsd_boot1(); \
return (false); \
} \
/* Get/set. */
\
a_attr a_type * \
a_attr a_type * \
a_name##
_
tsd_get(void) \
a_name##tsd_get(void) \
{ \
{ \
a_name##
_
tsd_wrapper_t *wrapper; \
a_name##tsd_wrapper_t *wrapper;
\
\
\
assert(a_name##_booted); \
assert(a_name##
tsd
_booted); \
wrapper = a_name##
_
tsd_
get_
wrapper(); \
wrapper = a_name##tsd_wrapper
_get
(); \
return (&wrapper->val); \
return (&wrapper->val); \
} \
} \
a_attr void \
a_attr void \
a_name##
_
tsd_set(a_type *val) \
a_name##tsd_set(a_type *val) \
{ \
{ \
a_name##
_
tsd_wrapper_t *wrapper; \
a_name##tsd_wrapper_t *wrapper;
\
\
\
assert(a_name##_booted); \
assert(a_name##
tsd
_booted); \
wrapper = a_name##
_
tsd_
get_
wrapper(); \
wrapper = a_name##tsd_wrapper
_get
(); \
wrapper->val = *(val); \
wrapper->val = *(val); \
if (a_cleanup != malloc_tsd_no_cleanup) \
if (a_cleanup != malloc_tsd_no_cleanup) \
wrapper->initialized = true; \
wrapper->initialized = true; \
...
@@ -297,16 +392,11 @@ a_name##_tsd_set(a_type *val) \
...
@@ -297,16 +392,11 @@ a_name##_tsd_set(a_type *val) \
#else
#else
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
a_cleanup) \
a_cleanup) \
/* Data structure. */
\
typedef struct { \
bool initialized; \
a_type val; \
} a_name##_tsd_wrapper_t; \
/* Initialization/cleanup. */
\
/* Initialization/cleanup. */
\
a_attr void \
a_attr void \
a_name##
_
tsd_cleanup_wrapper(void *arg) \
a_name##tsd_cleanup_wrapper(void *arg) \
{ \
{ \
a_name##
_
tsd_wrapper_t *wrapper = (a_name##
_
tsd_wrapper_t *)arg;\
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *)arg;
\
\
\
if (a_cleanup != malloc_tsd_no_cleanup && \
if (a_cleanup != malloc_tsd_no_cleanup && \
wrapper->initialized) { \
wrapper->initialized) { \
...
@@ -314,7 +404,7 @@ a_name##_tsd_cleanup_wrapper(void *arg) \
...
@@ -314,7 +404,7 @@ a_name##_tsd_cleanup_wrapper(void *arg) \
a_cleanup(&wrapper->val); \
a_cleanup(&wrapper->val); \
if (wrapper->initialized) { \
if (wrapper->initialized) { \
/* Trigger another cleanup round. */
\
/* Trigger another cleanup round. */
\
if (pthread_setspecific(a_name##_tsd,
\
if (pthread_setspecific(a_name##
tsd
_tsd, \
(void *)wrapper)) { \
(void *)wrapper)) { \
malloc_write("<jemalloc>: Error" \
malloc_write("<jemalloc>: Error" \
" setting TSD for "#a_name"\n"); \
" setting TSD for "#a_name"\n"); \
...
@@ -326,67 +416,97 @@ a_name##_tsd_cleanup_wrapper(void *arg) \
...
@@ -326,67 +416,97 @@ a_name##_tsd_cleanup_wrapper(void *arg) \
} \
} \
malloc_tsd_dalloc(wrapper); \
malloc_tsd_dalloc(wrapper); \
} \
} \
a_attr
bool
\
a_attr
void
\
a_name##
_
tsd_
boot(void)
\
a_name##tsd_
wrapper_set(a_name##tsd_wrapper_t *wrapper)
\
{ \
{ \
\
\
if (pthread_key_create(&a_name##_tsd, \
if (pthread_setspecific(a_name##tsd_tsd, \
a_name##_tsd_cleanup_wrapper) != 0) \
(void *)wrapper)) { \
return (true); \
malloc_write("<jemalloc>: Error setting" \
a_name##_booted = true; \
" TSD for "#a_name"\n"); \
return (false); \
abort(); \
} \
} \
} \
/* Get/set. */
\
a_attr a_name##tsd_wrapper_t * \
a_attr a_name##_tsd_wrapper_t * \
a_name##tsd_wrapper_get(void) \
a_name##_tsd_get_wrapper(void) \
{ \
{ \
a_name##
_
tsd_wrapper_t *wrapper = (a_name##
_
tsd_wrapper_t *) \
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
pthread_getspecific(a_name##_tsd);
\
pthread_getspecific(a_name##
tsd
_tsd); \
\
\
if (wrapper == NULL) {
\
if
(unlikely
(wrapper == NULL)
)
{ \
tsd_init_block_t block; \
tsd_init_block_t block; \
wrapper = tsd_init_check_recursion( \
wrapper = tsd_init_check_recursion( \
&a_name##
_
tsd_init_head, &block); \
&a_name##tsd_init_head, &block); \
if (wrapper) \
if (wrapper) \
return (wrapper); \
return (wrapper); \
wrapper = (a_name##
_
tsd_wrapper_t *) \
wrapper = (a_name##tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##
_
tsd_wrapper_t)); \
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
block.data = wrapper; \
block.data = wrapper; \
if (wrapper == NULL) { \
if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \
" TSD for "#a_name"\n"); \
abort(); \
abort(); \
} else { \
} else { \
static a_type tsd_static_data = a_initializer; \
wrapper->initialized = false; \
wrapper->initialized = false; \
wrapper->val =
tsd_static_data
; \
wrapper->val =
a_initializer
; \
} \
} \
if (pthread_setspecific(a_name##_tsd, \
a_name##tsd_wrapper_set(wrapper); \
(void *)wrapper)) { \
tsd_init_finish(&a_name##tsd_init_head, &block); \
malloc_write("<jemalloc>: Error setting" \
} \
return (wrapper); \
} \
a_attr bool \
a_name##tsd_boot0(void) \
{ \
\
if (pthread_key_create(&a_name##tsd_tsd, \
a_name##tsd_cleanup_wrapper) != 0) \
return (true); \
a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \
a_name##tsd_booted = true; \
return (false); \
} \
a_attr void \
a_name##tsd_boot1(void) \
{ \
a_name##tsd_wrapper_t *wrapper; \
wrapper = (a_name##tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \
" TSD for "#a_name"\n"); \
abort(); \
abort(); \
} \
} \
tsd_init_finish(&a_name##_tsd_init_head, &block); \
memcpy(wrapper, &a_name##tsd_boot_wrapper, \
} \
sizeof(a_name##tsd_wrapper_t)); \
return (wrapper); \
a_name##tsd_wrapper_set(wrapper); \
} \
a_attr bool \
a_name##tsd_boot(void) \
{ \
\
if (a_name##tsd_boot0()) \
return (true); \
a_name##tsd_boot1(); \
return (false); \
} \
} \
/* Get/set. */
\
a_attr a_type * \
a_attr a_type * \
a_name##
_
tsd_get(void) \
a_name##tsd_get(void) \
{ \
{ \
a_name##
_
tsd_wrapper_t *wrapper; \
a_name##tsd_wrapper_t *wrapper;
\
\
\
assert(a_name##_booted); \
assert(a_name##
tsd
_booted); \
wrapper = a_name##
_
tsd_
get_
wrapper(); \
wrapper = a_name##tsd_wrapper
_get
(); \
return (&wrapper->val); \
return (&wrapper->val); \
} \
} \
a_attr void \
a_attr void \
a_name##
_
tsd_set(a_type *val) \
a_name##tsd_set(a_type *val) \
{ \
{ \
a_name##
_
tsd_wrapper_t *wrapper; \
a_name##tsd_wrapper_t *wrapper;
\
\
\
assert(a_name##_booted); \
assert(a_name##
tsd
_booted); \
wrapper = a_name##
_
tsd_
get_
wrapper(); \
wrapper = a_name##tsd_wrapper
_get
(); \
wrapper->val = *(val); \
wrapper->val = *(val); \
if (a_cleanup != malloc_tsd_no_cleanup) \
if (a_cleanup != malloc_tsd_no_cleanup) \
wrapper->initialized = true; \
wrapper->initialized = true; \
...
@@ -410,25 +530,136 @@ struct tsd_init_head_s {
...
@@ -410,25 +530,136 @@ struct tsd_init_head_s {
};
};
#endif
#endif
#define MALLOC_TSD \
/* O(name, type) */
\
O(tcache, tcache_t *) \
O(thread_allocated, uint64_t) \
O(thread_deallocated, uint64_t) \
O(prof_tdata, prof_tdata_t *) \
O(arena, arena_t *) \
O(arenas_cache, arena_t **) \
O(narenas_cache, unsigned) \
O(arenas_cache_bypass, bool) \
O(tcache_enabled, tcache_enabled_t) \
O(quarantine, quarantine_t *) \
#define TSD_INITIALIZER { \
tsd_state_uninitialized, \
NULL, \
0, \
0, \
NULL, \
NULL, \
NULL, \
0, \
false, \
tcache_enabled_default, \
NULL \
}
struct
tsd_s
{
tsd_state_t
state
;
#define O(n, t) \
t n;
MALLOC_TSD
#undef O
};
static
const
tsd_t
tsd_initializer
=
TSD_INITIALIZER
;
malloc_tsd_types
(,
tsd_t
)
#endif
/* JEMALLOC_H_STRUCTS */
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#ifdef JEMALLOC_H_EXTERNS
void
*
malloc_tsd_malloc
(
size_t
size
);
void
*
malloc_tsd_malloc
(
size_t
size
);
void
malloc_tsd_dalloc
(
void
*
wrapper
);
void
malloc_tsd_dalloc
(
void
*
wrapper
);
void
malloc_tsd_no_cleanup
(
void
*
);
void
malloc_tsd_no_cleanup
(
void
*
arg
);
void
malloc_tsd_cleanup_register
(
bool
(
*
f
)(
void
));
void
malloc_tsd_cleanup_register
(
bool
(
*
f
)(
void
));
void
malloc_tsd_boot
(
void
);
bool
malloc_tsd_boot0
(
void
);
void
malloc_tsd_boot1
(
void
);
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
!defined(_WIN32))
!defined(_WIN32))
void
*
tsd_init_check_recursion
(
tsd_init_head_t
*
head
,
void
*
tsd_init_check_recursion
(
tsd_init_head_t
*
head
,
tsd_init_block_t
*
block
);
tsd_init_block_t
*
block
);
void
tsd_init_finish
(
tsd_init_head_t
*
head
,
tsd_init_block_t
*
block
);
void
tsd_init_finish
(
tsd_init_head_t
*
head
,
tsd_init_block_t
*
block
);
#endif
#endif
void
tsd_cleanup
(
void
*
arg
);
#endif
/* JEMALLOC_H_EXTERNS */
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
malloc_tsd_protos
(
JEMALLOC_ATTR
(
unused
),
,
tsd_t
)
tsd_t
*
tsd_fetch
(
void
);
bool
tsd_nominal
(
tsd_t
*
tsd
);
#define O(n, t) \
t *tsd_##n##p_get(tsd_t *tsd); \
t tsd_##n##_get(tsd_t *tsd); \
void tsd_##n##_set(tsd_t *tsd, t n);
MALLOC_TSD
#undef O
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TSD_C_))
malloc_tsd_externs
(,
tsd_t
)
malloc_tsd_funcs
(
JEMALLOC_ALWAYS_INLINE
,
,
tsd_t
,
tsd_initializer
,
tsd_cleanup
)
JEMALLOC_ALWAYS_INLINE
tsd_t
*
tsd_fetch
(
void
)
{
tsd_t
*
tsd
=
tsd_get
();
if
(
unlikely
(
tsd
->
state
!=
tsd_state_nominal
))
{
if
(
tsd
->
state
==
tsd_state_uninitialized
)
{
tsd
->
state
=
tsd_state_nominal
;
/* Trigger cleanup handler registration. */
tsd_set
(
tsd
);
}
else
if
(
tsd
->
state
==
tsd_state_purgatory
)
{
tsd
->
state
=
tsd_state_reincarnated
;
tsd_set
(
tsd
);
}
else
assert
(
tsd
->
state
==
tsd_state_reincarnated
);
}
return
(
tsd
);
}
JEMALLOC_INLINE
bool
tsd_nominal
(
tsd_t
*
tsd
)
{
return
(
tsd
->
state
==
tsd_state_nominal
);
}
#define O(n, t) \
JEMALLOC_ALWAYS_INLINE t * \
tsd_##n##p_get(tsd_t *tsd) \
{ \
\
return (&tsd->n); \
} \
\
JEMALLOC_ALWAYS_INLINE t \
tsd_##n##_get(tsd_t *tsd) \
{ \
\
return (*tsd_##n##p_get(tsd)); \
} \
\
JEMALLOC_ALWAYS_INLINE void \
tsd_##n##_set(tsd_t *tsd, t n) \
{ \
\
assert(tsd->state == tsd_state_nominal); \
tsd->n = n; \
}
MALLOC_TSD
#undef O
#endif
#endif
/* JEMALLOC_H_INLINES */
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/util.h
View file @
a9951b1b
/******************************************************************************/
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#ifdef JEMALLOC_H_TYPES
#ifdef _WIN32
# ifdef _WIN64
# define FMT64_PREFIX "ll"
# define FMTPTR_PREFIX "ll"
# else
# define FMT64_PREFIX "ll"
# define FMTPTR_PREFIX ""
# endif
# define FMTd32 "d"
# define FMTu32 "u"
# define FMTx32 "x"
# define FMTd64 FMT64_PREFIX "d"
# define FMTu64 FMT64_PREFIX "u"
# define FMTx64 FMT64_PREFIX "x"
# define FMTdPTR FMTPTR_PREFIX "d"
# define FMTuPTR FMTPTR_PREFIX "u"
# define FMTxPTR FMTPTR_PREFIX "x"
#else
# include <inttypes.h>
# define FMTd32 PRId32
# define FMTu32 PRIu32
# define FMTx32 PRIx32
# define FMTd64 PRId64
# define FMTu64 PRIu64
# define FMTx64 PRIx64
# define FMTdPTR PRIdPTR
# define FMTuPTR PRIuPTR
# define FMTxPTR PRIxPTR
#endif
/* Size of stack-allocated buffer passed to buferror(). */
/* Size of stack-allocated buffer passed to buferror(). */
#define BUFERROR_BUF 64
#define BUFERROR_BUF 64
...
@@ -27,13 +57,37 @@
...
@@ -27,13 +57,37 @@
# define JEMALLOC_CC_SILENCE_INIT(v)
# define JEMALLOC_CC_SILENCE_INIT(v)
#endif
#endif
#define JEMALLOC_GNUC_PREREQ(major, minor) \
(!defined(__clang__) && \
(__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))))
#ifndef __has_builtin
# define __has_builtin(builtin) (0)
#endif
#define JEMALLOC_CLANG_HAS_BUILTIN(builtin) \
(defined(__clang__) && __has_builtin(builtin))
#ifdef __GNUC__
# define likely(x) __builtin_expect(!!(x), 1)
# define unlikely(x) __builtin_expect(!!(x), 0)
# if JEMALLOC_GNUC_PREREQ(4, 6) || \
JEMALLOC_CLANG_HAS_BUILTIN(__builtin_unreachable)
# define unreachable() __builtin_unreachable()
# else
# define unreachable()
# endif
#else
# define likely(x) !!(x)
# define unlikely(x) !!(x)
# define unreachable()
#endif
/*
/*
* Define a custom assert() in order to reduce the chances of deadlock during
* Define a custom assert() in order to reduce the chances of deadlock during
* assertion failure.
* assertion failure.
*/
*/
#ifndef assert
#ifndef assert
#define assert(e) do { \
#define assert(e) do { \
if (config_debug && !(e)) {
\
if
(unlikely
(config_debug && !(e))
)
{ \
malloc_printf( \
malloc_printf( \
"<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
"<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
__FILE__, __LINE__, #e); \
__FILE__, __LINE__, #e); \
...
@@ -50,6 +104,7 @@
...
@@ -50,6 +104,7 @@
__FILE__, __LINE__); \
__FILE__, __LINE__); \
abort(); \
abort(); \
} \
} \
unreachable(); \
} while (0)
} while (0)
#endif
#endif
...
@@ -65,14 +120,14 @@
...
@@ -65,14 +120,14 @@
#ifndef assert_not_implemented
#ifndef assert_not_implemented
#define assert_not_implemented(e) do { \
#define assert_not_implemented(e) do { \
if (config_debug && !(e))
\
if
(unlikely
(config_debug && !(e))
)
\
not_implemented(); \
not_implemented(); \
} while (0)
} while (0)
#endif
#endif
/* Use to assert a particular configuration, e.g., cassert(config_debug). */
/* Use to assert a particular configuration, e.g., cassert(config_debug). */
#define cassert(c) do { \
#define cassert(c) do { \
if (
(c) == false
) \
if (
unlikely(!(c))
) \
not_reached(); \
not_reached(); \
} while (0)
} while (0)
...
@@ -96,25 +151,47 @@ void malloc_write(const char *s);
...
@@ -96,25 +151,47 @@ void malloc_write(const char *s);
int
malloc_vsnprintf
(
char
*
str
,
size_t
size
,
const
char
*
format
,
int
malloc_vsnprintf
(
char
*
str
,
size_t
size
,
const
char
*
format
,
va_list
ap
);
va_list
ap
);
int
malloc_snprintf
(
char
*
str
,
size_t
size
,
const
char
*
format
,
...)
int
malloc_snprintf
(
char
*
str
,
size_t
size
,
const
char
*
format
,
...)
JEMALLOC_
ATTR
(
format
(
printf
,
3
,
4
)
)
;
JEMALLOC_
FORMAT_PRINTF
(
3
,
4
);
void
malloc_vcprintf
(
void
(
*
write_cb
)(
void
*
,
const
char
*
),
void
*
cbopaque
,
void
malloc_vcprintf
(
void
(
*
write_cb
)(
void
*
,
const
char
*
),
void
*
cbopaque
,
const
char
*
format
,
va_list
ap
);
const
char
*
format
,
va_list
ap
);
void
malloc_cprintf
(
void
(
*
write
)(
void
*
,
const
char
*
),
void
*
cbopaque
,
void
malloc_cprintf
(
void
(
*
write
)(
void
*
,
const
char
*
),
void
*
cbopaque
,
const
char
*
format
,
...)
JEMALLOC_ATTR
(
format
(
printf
,
3
,
4
));
const
char
*
format
,
...)
JEMALLOC_FORMAT_PRINTF
(
3
,
4
);
void
malloc_printf
(
const
char
*
format
,
...)
void
malloc_printf
(
const
char
*
format
,
...)
JEMALLOC_FORMAT_PRINTF
(
1
,
2
);
JEMALLOC_ATTR
(
format
(
printf
,
1
,
2
));
#endif
/* JEMALLOC_H_EXTERNS */
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
#ifndef JEMALLOC_ENABLE_INLINE
int
jemalloc_ffsl
(
long
bitmap
);
int
jemalloc_ffs
(
int
bitmap
);
size_t
pow2_ceil
(
size_t
x
);
size_t
pow2_ceil
(
size_t
x
);
size_t
lg_floor
(
size_t
x
);
void
set_errno
(
int
errnum
);
void
set_errno
(
int
errnum
);
int
get_errno
(
void
);
int
get_errno
(
void
);
#endif
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_))
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_))
/* Sanity check. */
#if !defined(JEMALLOC_INTERNAL_FFSL) || !defined(JEMALLOC_INTERNAL_FFS)
# error Both JEMALLOC_INTERNAL_FFSL && JEMALLOC_INTERNAL_FFS should have been defined by configure
#endif
JEMALLOC_ALWAYS_INLINE
int
jemalloc_ffsl
(
long
bitmap
)
{
return
(
JEMALLOC_INTERNAL_FFSL
(
bitmap
));
}
JEMALLOC_ALWAYS_INLINE
int
jemalloc_ffs
(
int
bitmap
)
{
return
(
JEMALLOC_INTERNAL_FFS
(
bitmap
));
}
/* Compute the smallest power of 2 that is >= x. */
/* Compute the smallest power of 2 that is >= x. */
JEMALLOC_INLINE
size_t
JEMALLOC_INLINE
size_t
pow2_ceil
(
size_t
x
)
pow2_ceil
(
size_t
x
)
...
@@ -133,7 +210,82 @@ pow2_ceil(size_t x)
...
@@ -133,7 +210,82 @@ pow2_ceil(size_t x)
return
(
x
);
return
(
x
);
}
}
/* Sets error code */
#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE
size_t
lg_floor
(
size_t
x
)
{
size_t
ret
;
assert
(
x
!=
0
);
asm
(
"bsr %1, %0"
:
"=r"
(
ret
)
// Outputs.
:
"r"
(
x
)
// Inputs.
);
return
(
ret
);
}
#elif (defined(_MSC_VER))
JEMALLOC_INLINE
size_t
lg_floor
(
size_t
x
)
{
unsigned
long
ret
;
assert
(
x
!=
0
);
#if (LG_SIZEOF_PTR == 3)
_BitScanReverse64
(
&
ret
,
x
);
#elif (LG_SIZEOF_PTR == 2)
_BitScanReverse
(
&
ret
,
x
);
#else
# error "Unsupported type sizes for lg_floor()"
#endif
return
(
ret
);
}
#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
JEMALLOC_INLINE
size_t
lg_floor
(
size_t
x
)
{
assert
(
x
!=
0
);
#if (LG_SIZEOF_PTR == LG_SIZEOF_INT)
return
(((
8
<<
LG_SIZEOF_PTR
)
-
1
)
-
__builtin_clz
(
x
));
#elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG)
return
(((
8
<<
LG_SIZEOF_PTR
)
-
1
)
-
__builtin_clzl
(
x
));
#else
# error "Unsupported type sizes for lg_floor()"
#endif
}
#else
JEMALLOC_INLINE
size_t
lg_floor
(
size_t
x
)
{
assert
(
x
!=
0
);
x
|=
(
x
>>
1
);
x
|=
(
x
>>
2
);
x
|=
(
x
>>
4
);
x
|=
(
x
>>
8
);
x
|=
(
x
>>
16
);
#if (LG_SIZEOF_PTR == 3 && LG_SIZEOF_PTR == LG_SIZEOF_LONG)
x
|=
(
x
>>
32
);
if
(
x
==
KZU
(
0xffffffffffffffff
))
return
(
63
);
x
++
;
return
(
jemalloc_ffsl
(
x
)
-
2
);
#elif (LG_SIZEOF_PTR == 2)
if
(
x
==
KZU
(
0xffffffff
))
return
(
31
);
x
++
;
return
(
jemalloc_ffs
(
x
)
-
2
);
#else
# error "Unsupported type sizes for lg_floor()"
#endif
}
#endif
/* Set error code. */
JEMALLOC_INLINE
void
JEMALLOC_INLINE
void
set_errno
(
int
errnum
)
set_errno
(
int
errnum
)
{
{
...
@@ -145,7 +297,7 @@ set_errno(int errnum)
...
@@ -145,7 +297,7 @@ set_errno(int errnum)
#endif
#endif
}
}
/* Get last error code */
/* Get last error code
.
*/
JEMALLOC_INLINE
int
JEMALLOC_INLINE
int
get_errno
(
void
)
get_errno
(
void
)
{
{
...
...
deps/jemalloc/include/jemalloc/internal/valgrind.h
0 → 100644
View file @
a9951b1b
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#ifdef JEMALLOC_VALGRIND
#include <valgrind/valgrind.h>
/*
* The size that is reported to Valgrind must be consistent through a chain of
* malloc..realloc..realloc calls. Request size isn't recorded anywhere in
* jemalloc, so it is critical that all callers of these macros provide usize
* rather than request size. As a result, buffer overflow detection is
* technically weakened for the standard API, though it is generally accepted
* practice to consider any extra bytes reported by malloc_usable_size() as
* usable space.
*/
#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do { \
if (unlikely(in_valgrind)) \
valgrind_make_mem_noaccess(ptr, usize); \
} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do { \
if (unlikely(in_valgrind)) \
valgrind_make_mem_undefined(ptr, usize); \
} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do { \
if (unlikely(in_valgrind)) \
valgrind_make_mem_defined(ptr, usize); \
} while (0)
/*
* The VALGRIND_MALLOCLIKE_BLOCK() and VALGRIND_RESIZEINPLACE_BLOCK() macro
* calls must be embedded in macros rather than in functions so that when
* Valgrind reports errors, there are no extra stack frames in the backtraces.
*/
#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do { \
if (unlikely(in_valgrind && cond)) \
VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero); \
} while (0)
#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, ptr, usize, \
ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \
zero) do { \
if (unlikely(in_valgrind)) { \
size_t rzsize = p2rz(ptr); \
\
if (!maybe_moved || ptr == old_ptr) { \
VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \
usize, rzsize); \
if (zero && old_usize < usize) { \
valgrind_make_mem_defined( \
(void *)((uintptr_t)ptr + \
old_usize), usize - old_usize); \
} \
} else { \
if (!old_ptr_maybe_null || old_ptr != NULL) { \
valgrind_freelike_block(old_ptr, \
old_rzsize); \
} \
if (!ptr_maybe_null || ptr != NULL) { \
size_t copy_size = (old_usize < usize) \
? old_usize : usize; \
size_t tail_size = usize - copy_size; \
VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \
rzsize, false); \
if (copy_size > 0) { \
valgrind_make_mem_defined(ptr, \
copy_size); \
} \
if (zero && tail_size > 0) { \
valgrind_make_mem_defined( \
(void *)((uintptr_t)ptr + \
copy_size), tail_size); \
} \
} \
} \
} \
} while (0)
#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \
if (unlikely(in_valgrind)) \
valgrind_freelike_block(ptr, rzsize); \
} while (0)
#else
#define RUNNING_ON_VALGRIND ((unsigned)0)
#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do {} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do {} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do {} while (0)
#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {} while (0)
#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, ptr, usize, \
ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \
zero) do {} while (0)
#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0)
#endif
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#ifdef JEMALLOC_VALGRIND
void
valgrind_make_mem_noaccess
(
void
*
ptr
,
size_t
usize
);
void
valgrind_make_mem_undefined
(
void
*
ptr
,
size_t
usize
);
void
valgrind_make_mem_defined
(
void
*
ptr
,
size_t
usize
);
void
valgrind_freelike_block
(
void
*
ptr
,
size_t
usize
);
#endif
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/jemalloc.sh
View file @
a9951b1b
...
@@ -12,7 +12,7 @@ extern "C" {
...
@@ -12,7 +12,7 @@ extern "C" {
EOF
EOF
for
hdr
in
jemalloc_defs.h jemalloc_rename.h jemalloc_macros.h
\
for
hdr
in
jemalloc_defs.h jemalloc_rename.h jemalloc_macros.h
\
jemalloc_protos.h jemalloc_mangle.h
;
do
jemalloc_protos.h
jemalloc_typedefs.h
jemalloc_mangle.h
;
do
cat
"
${
objroot
}
include/jemalloc/
${
hdr
}
"
\
cat
"
${
objroot
}
include/jemalloc/
${
hdr
}
"
\
|
grep
-v
'Generated from .* by configure\.'
\
|
grep
-v
'Generated from .* by configure\.'
\
|
sed
-e
's/^#define /#define /g'
\
|
sed
-e
's/^#define /#define /g'
\
...
@@ -22,7 +22,7 @@ done
...
@@ -22,7 +22,7 @@ done
cat
<<
EOF
cat
<<
EOF
#ifdef __cplusplus
#ifdef __cplusplus
}
;
}
#endif
#endif
#endif /* JEMALLOC_H_ */
#endif /* JEMALLOC_H_ */
EOF
EOF
deps/jemalloc/include/jemalloc/jemalloc_defs.h.in
View file @
a9951b1b
/* Defined if __attribute__((...)) syntax is supported. */
/* Defined if __attribute__((...)) syntax is supported. */
#undef JEMALLOC_HAVE_ATTR
#undef JEMALLOC_HAVE_ATTR
/* Support the experimental API. */
/* Defined if alloc_size attribute is supported. */
#undef JEMALLOC_EXPERIMENTAL
#undef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
/* Defined if format(gnu_printf, ...) attribute is supported. */
#undef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
/* Defined if format(printf, ...) attribute is supported. */
#undef JEMALLOC_HAVE_ATTR_FORMAT_PRINTF
/*
/*
* Define overrides for non-standard allocator-related functions if they are
* Define overrides for non-standard allocator-related functions if they are
...
@@ -20,5 +26,12 @@
...
@@ -20,5 +26,12 @@
*/
*/
#undef JEMALLOC_USABLE_SIZE_CONST
#undef JEMALLOC_USABLE_SIZE_CONST
/*
* If defined, specify throw() for the public function prototypes when compiling
* with C++. The only justification for this is to match the prototypes that
* glibc defines.
*/
#undef JEMALLOC_USE_CXX_THROW
/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
#undef LG_SIZEOF_PTR
#undef LG_SIZEOF_PTR
deps/jemalloc/include/jemalloc/jemalloc_macros.h.in
View file @
a9951b1b
#include <stdlib.h>
#include <stdbool.h>
#include <stdint.h>
#include <limits.h>
#include <limits.h>
#include <strings.h>
#include <strings.h>
...
@@ -16,46 +19,84 @@
...
@@ -16,46 +19,84 @@
((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
# endif
# endif
# define MALLOCX_ZERO ((int)0x40)
# define MALLOCX_ZERO ((int)0x40)
/* Bias arena index bits so that 0 encodes "MALLOCX_ARENA() unspecified". */
/*
# define MALLOCX_ARENA(a) ((int)(((a)+1) << 8))
* Bias tcache index bits so that 0 encodes "automatic tcache management", and 1
* encodes MALLOCX_TCACHE_NONE.
*/
# define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8))
# define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1)
/*
* Bias arena index bits so that 0 encodes "use an automatically chosen arena".
*/
# define MALLOCX_ARENA(a) ((int)(((a)+1) << 20))
#ifdef JEMALLOC_EXPERIMENTAL
#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW)
# define ALLOCM_LG_ALIGN(la) (la)
# define JEMALLOC_CXX_THROW throw()
# if LG_SIZEOF_PTR == 2
#else
# define ALLOCM_ALIGN(a) (ffs(a)-1)
# define JEMALLOC_CXX_THROW
# else
# define ALLOCM_ALIGN(a) \
((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
# endif
# define ALLOCM_ZERO ((int)0x40)
# define ALLOCM_NO_MOVE ((int)0x80)
/* Bias arena index bits so that 0 encodes "ALLOCM_ARENA() unspecified". */
# define ALLOCM_ARENA(a) ((int)(((a)+1) << 8))
# define ALLOCM_SUCCESS 0
# define ALLOCM_ERR_OOM 1
# define ALLOCM_ERR_NOT_MOVED 2
#endif
#endif
#ifdef JEMALLOC_HAVE_ATTR
#ifdef JEMALLOC_HAVE_ATTR
# define JEMALLOC_ATTR(s) __attribute__((s))
# define JEMALLOC_ATTR(s) __attribute__((s))
# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s))
# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2))
# else
# define JEMALLOC_ALLOC_SIZE(s)
# define JEMALLOC_ALLOC_SIZE2(s1, s2)
# endif
# ifndef JEMALLOC_EXPORT
# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
# endif
# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i))
# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF)
# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i))
# else
# define JEMALLOC_FORMAT_PRINTF(s, i)
# endif
# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow)
# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
# define JEMALLOC_RESTRICT_RETURN
# define JEMALLOC_ALLOCATOR
#elif _MSC_VER
#elif _MSC_VER
# define JEMALLOC_ATTR(s)
# define JEMALLOC_ATTR(s)
# define JEMALLOC_ALIGNED(s) __declspec(align(s))
# define JEMALLOC_ALLOC_SIZE(s)
# define JEMALLOC_ALLOC_SIZE2(s1, s2)
# ifndef JEMALLOC_EXPORT
# ifdef DLLEXPORT
# ifdef DLLEXPORT
# define JEMALLOC_EXPORT __declspec(dllexport)
# define JEMALLOC_EXPORT __declspec(dllexport)
# else
# else
# define JEMALLOC_EXPORT __declspec(dllimport)
# define JEMALLOC_EXPORT __declspec(dllimport)
# endif
# endif
#
define JEMALLOC_ALIGNED(s) __declspec(align(s))
#
endif
# define JEMALLOC_
SECTION(s) __declspec(allocate(s)
)
# define JEMALLOC_
FORMAT_PRINTF(s, i
)
# define JEMALLOC_NOINLINE __declspec(noinline)
# define JEMALLOC_NOINLINE __declspec(noinline)
# ifdef __cplusplus
# define JEMALLOC_NOTHROW __declspec(nothrow)
# else
# define JEMALLOC_NOTHROW
# endif
# define JEMALLOC_SECTION(s) __declspec(allocate(s))
# define JEMALLOC_RESTRICT_RETURN __declspec(restrict)
# if _MSC_VER >= 1900 && !defined(__EDG__)
# define JEMALLOC_ALLOCATOR __declspec(allocator)
# else
# define JEMALLOC_ALLOCATOR
# endif
#else
#else
# define JEMALLOC_ATTR(s)
# define JEMALLOC_ATTR(s)
# define JEMALLOC_EXPORT
# define JEMALLOC_ALIGNED(s)
# define JEMALLOC_ALIGNED(s)
# define JEMALLOC_SECTION(s)
# define JEMALLOC_ALLOC_SIZE(s)
# define JEMALLOC_ALLOC_SIZE2(s1, s2)
# define JEMALLOC_EXPORT
# define JEMALLOC_FORMAT_PRINTF(s, i)
# define JEMALLOC_NOINLINE
# define JEMALLOC_NOINLINE
# define JEMALLOC_NOTHROW
# define JEMALLOC_SECTION(s)
# define JEMALLOC_RESTRICT_RETURN
# define JEMALLOC_ALLOCATOR
#endif
#endif
deps/jemalloc/include/jemalloc/jemalloc_protos.h.in
View file @
a9951b1b
...
@@ -7,52 +7,60 @@ extern JEMALLOC_EXPORT const char *@je_@malloc_conf;
...
@@ -7,52 +7,60 @@ extern JEMALLOC_EXPORT const char *@je_@malloc_conf;
extern JEMALLOC_EXPORT void (*@je_@malloc_message)(void *cbopaque,
extern JEMALLOC_EXPORT void (*@je_@malloc_message)(void *cbopaque,
const char *s);
const char *s);
JEMALLOC_EXPORT void *@je_@malloc(size_t size) JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
JEMALLOC_EXPORT void *@je_@calloc(size_t num, size_t size)
void JEMALLOC_NOTHROW *@je_@malloc(size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
JEMALLOC_EXPORT int @je_@posix_memalign(void **memptr, size_t alignment,
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
size_t size) JEMALLOC_ATTR(nonnull(1));
void JEMALLOC_NOTHROW *@je_@calloc(size_t num, size_t size)
JEMALLOC_EXPORT void *@je_@aligned_alloc(size_t alignment, size_t size)
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2);
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT int JEMALLOC_NOTHROW @je_@posix_memalign(void **memptr,
JEMALLOC_EXPORT void *@je_@realloc(void *ptr, size_t size);
size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT void @je_@free(void *ptr);
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *@je_@aligned_alloc(size_t alignment,
size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc)
JEMALLOC_ALLOC_SIZE(2);
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *@je_@realloc(void *ptr, size_t size)
JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2);
JEMALLOC_EXPORT void JEMALLOC_NOTHROW @je_@free(void *ptr)
JEMALLOC_CXX_THROW;
JEMALLOC_EXPORT void *@je_@mallocx(size_t size, int flags);
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
JEMALLOC_EXPORT void *@je_@rallocx(void *ptr, size_t size, int flags);
void JEMALLOC_NOTHROW *@je_@mallocx(size_t size, int flags)
JEMALLOC_EXPORT size_t @je_@xallocx(void *ptr, size_t size, size_t extra,
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *@je_@rallocx(void *ptr, size_t size,
int flags) JEMALLOC_ALLOC_SIZE(2);
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@xallocx(void *ptr, size_t size,
size_t extra, int flags);
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@sallocx(const void *ptr,
int flags) JEMALLOC_ATTR(pure);
JEMALLOC_EXPORT void JEMALLOC_NOTHROW @je_@dallocx(void *ptr, int flags);
JEMALLOC_EXPORT void JEMALLOC_NOTHROW @je_@sdallocx(void *ptr, size_t size,
int flags);
int flags);
JEMALLOC_EXPORT size_t @je_@sallocx(const void *ptr, int flags);
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@nallocx(size_t size, int flags)
JEMALLOC_EXPORT void @je_@dallocx(void *ptr, int flags);
JEMALLOC_ATTR(pure);
JEMALLOC_EXPORT size_t @je_@nallocx(size_t size, int flags);
JEMALLOC_EXPORT int @je_@mallctl(const char *name, void *oldp,
JEMALLOC_EXPORT int JEMALLOC_NOTHROW @je_@mallctl(const char *name,
size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT int @je_@mallctlnametomib(const char *name, size_t *mibp,
size_t *miblenp);
JEMALLOC_EXPORT int @je_@mallctlbymib(const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen);
void *oldp, size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT void @je_@malloc_stats_print(void (*write_cb)(void *,
JEMALLOC_EXPORT int JEMALLOC_NOTHROW @je_@mallctlnametomib(const char *name,
const char *), void *@je_@cbopaque, const char *opts);
size_t *mibp, size_t *miblenp);
JEMALLOC_EXPORT size_t @je_@malloc_usable_size(
JEMALLOC_EXPORT int JEMALLOC_NOTHROW @je_@mallctlbymib(const size_t *mib,
JEMALLOC_USABLE_SIZE_CONST void *ptr);
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT void JEMALLOC_NOTHROW @je_@malloc_stats_print(
void (*write_cb)(void *, const char *), void *@je_@cbopaque,
const char *opts);
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@malloc_usable_size(
JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW;
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
JEMALLOC_EXPORT void * @je_@memalign(size_t alignment, size_t size)
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
JEMALLOC_ATTR(malloc);
void JEMALLOC_NOTHROW *@je_@memalign(size_t alignment, size_t size)
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc);
#endif
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC
#ifdef JEMALLOC_OVERRIDE_VALLOC
JEMALLOC_EXPORT void * @je_@valloc(size_t size) JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
#endif
void JEMALLOC_NOTHROW *@je_@valloc(size_t size) JEMALLOC_CXX_THROW
JEMALLOC_ATTR(malloc);
#ifdef JEMALLOC_EXPERIMENTAL
JEMALLOC_EXPORT int @je_@allocm(void **ptr, size_t *rsize, size_t size,
int flags) JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT int @je_@rallocm(void **ptr, size_t *rsize, size_t size,
size_t extra, int flags) JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT int @je_@sallocm(const void *ptr, size_t *rsize, int flags)
JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT int @je_@dallocm(void *ptr, int flags)
JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT int @je_@nallocm(size_t *rsize, size_t size, int flags);
#endif
#endif
deps/jemalloc/include/jemalloc/jemalloc_typedefs.h.in
0 → 100644
View file @
a9951b1b
/*
* void *
* chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero,
* bool *commit, unsigned arena_ind);
*/
typedef void *(chunk_alloc_t)(void *, size_t, size_t, bool *, bool *, unsigned);
/*
* bool
* chunk_dalloc(void *chunk, size_t size, bool committed, unsigned arena_ind);
*/
typedef bool (chunk_dalloc_t)(void *, size_t, bool, unsigned);
/*
* bool
* chunk_commit(void *chunk, size_t size, size_t offset, size_t length,
* unsigned arena_ind);
*/
typedef bool (chunk_commit_t)(void *, size_t, size_t, size_t, unsigned);
/*
* bool
* chunk_decommit(void *chunk, size_t size, size_t offset, size_t length,
* unsigned arena_ind);
*/
typedef bool (chunk_decommit_t)(void *, size_t, size_t, size_t, unsigned);
/*
* bool
* chunk_purge(void *chunk, size_t size, size_t offset, size_t length,
* unsigned arena_ind);
*/
typedef bool (chunk_purge_t)(void *, size_t, size_t, size_t, unsigned);
/*
* bool
* chunk_split(void *chunk, size_t size, size_t size_a, size_t size_b,
* bool committed, unsigned arena_ind);
*/
typedef bool (chunk_split_t)(void *, size_t, size_t, size_t, bool, unsigned);
/*
* bool
* chunk_merge(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
* bool committed, unsigned arena_ind);
*/
typedef bool (chunk_merge_t)(void *, size_t, void *, size_t, bool, unsigned);
typedef struct {
chunk_alloc_t *alloc;
chunk_dalloc_t *dalloc;
chunk_commit_t *commit;
chunk_decommit_t *decommit;
chunk_purge_t *purge;
chunk_split_t *split;
chunk_merge_t *merge;
} chunk_hooks_t;
deps/jemalloc/include/msvc_compat/stdbool.h
→
deps/jemalloc/include/msvc_compat/
C99/
stdbool.h
View file @
a9951b1b
...
@@ -5,7 +5,11 @@
...
@@ -5,7 +5,11 @@
/* MSVC doesn't define _Bool or bool in C, but does have BOOL */
/* MSVC doesn't define _Bool or bool in C, but does have BOOL */
/* Note this doesn't pass autoconf's test because (bool) 0.5 != true */
/* Note this doesn't pass autoconf's test because (bool) 0.5 != true */
/* Clang-cl uses MSVC headers, so needs msvc_compat, but has _Bool as
* a built-in type. */
#ifndef __clang__
typedef
BOOL
_Bool
;
typedef
BOOL
_Bool
;
#endif
#define bool _Bool
#define bool _Bool
#define true 1
#define true 1
...
...
deps/jemalloc/include/msvc_compat/stdint.h
→
deps/jemalloc/include/msvc_compat/
C99/
stdint.h
View file @
a9951b1b
File moved
deps/jemalloc/include/msvc_compat/inttypes.h
deleted
100644 → 0
View file @
e3ded027
// ISO C9x compliant inttypes.h for Microsoft Visual Studio
// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
//
// Copyright (c) 2006 Alexander Chemeris
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. The name of the author may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef _MSC_VER // [
#error "Use this header only with Microsoft Visual C++ compilers!"
#endif // _MSC_VER ]
#ifndef _MSC_INTTYPES_H_ // [
#define _MSC_INTTYPES_H_
#if _MSC_VER > 1000
#pragma once
#endif
#include "stdint.h"
// 7.8 Format conversion of integer types
typedef
struct
{
intmax_t
quot
;
intmax_t
rem
;
}
imaxdiv_t
;
// 7.8.1 Macros for format specifiers
#if !defined(__cplusplus) || defined(__STDC_FORMAT_MACROS) // [ See footnote 185 at page 198
#ifdef _WIN64
# define __PRI64_PREFIX "l"
# define __PRIPTR_PREFIX "l"
#else
# define __PRI64_PREFIX "ll"
# define __PRIPTR_PREFIX
#endif
// The fprintf macros for signed integers are:
#define PRId8 "d"
#define PRIi8 "i"
#define PRIdLEAST8 "d"
#define PRIiLEAST8 "i"
#define PRIdFAST8 "d"
#define PRIiFAST8 "i"
#define PRId16 "hd"
#define PRIi16 "hi"
#define PRIdLEAST16 "hd"
#define PRIiLEAST16 "hi"
#define PRIdFAST16 "hd"
#define PRIiFAST16 "hi"
#define PRId32 "d"
#define PRIi32 "i"
#define PRIdLEAST32 "d"
#define PRIiLEAST32 "i"
#define PRIdFAST32 "d"
#define PRIiFAST32 "i"
#define PRId64 __PRI64_PREFIX "d"
#define PRIi64 __PRI64_PREFIX "i"
#define PRIdLEAST64 __PRI64_PREFIX "d"
#define PRIiLEAST64 __PRI64_PREFIX "i"
#define PRIdFAST64 __PRI64_PREFIX "d"
#define PRIiFAST64 __PRI64_PREFIX "i"
#define PRIdMAX __PRI64_PREFIX "d"
#define PRIiMAX __PRI64_PREFIX "i"
#define PRIdPTR __PRIPTR_PREFIX "d"
#define PRIiPTR __PRIPTR_PREFIX "i"
// The fprintf macros for unsigned integers are:
#define PRIo8 "o"
#define PRIu8 "u"
#define PRIx8 "x"
#define PRIX8 "X"
#define PRIoLEAST8 "o"
#define PRIuLEAST8 "u"
#define PRIxLEAST8 "x"
#define PRIXLEAST8 "X"
#define PRIoFAST8 "o"
#define PRIuFAST8 "u"
#define PRIxFAST8 "x"
#define PRIXFAST8 "X"
#define PRIo16 "ho"
#define PRIu16 "hu"
#define PRIx16 "hx"
#define PRIX16 "hX"
#define PRIoLEAST16 "ho"
#define PRIuLEAST16 "hu"
#define PRIxLEAST16 "hx"
#define PRIXLEAST16 "hX"
#define PRIoFAST16 "ho"
#define PRIuFAST16 "hu"
#define PRIxFAST16 "hx"
#define PRIXFAST16 "hX"
#define PRIo32 "o"
#define PRIu32 "u"
#define PRIx32 "x"
#define PRIX32 "X"
#define PRIoLEAST32 "o"
#define PRIuLEAST32 "u"
#define PRIxLEAST32 "x"
#define PRIXLEAST32 "X"
#define PRIoFAST32 "o"
#define PRIuFAST32 "u"
#define PRIxFAST32 "x"
#define PRIXFAST32 "X"
#define PRIo64 __PRI64_PREFIX "o"
#define PRIu64 __PRI64_PREFIX "u"
#define PRIx64 __PRI64_PREFIX "x"
#define PRIX64 __PRI64_PREFIX "X"
#define PRIoLEAST64 __PRI64_PREFIX "o"
#define PRIuLEAST64 __PRI64_PREFIX "u"
#define PRIxLEAST64 __PRI64_PREFIX "x"
#define PRIXLEAST64 __PRI64_PREFIX "X"
#define PRIoFAST64 __PRI64_PREFIX "o"
#define PRIuFAST64 __PRI64_PREFIX "u"
#define PRIxFAST64 __PRI64_PREFIX "x"
#define PRIXFAST64 __PRI64_PREFIX "X"
#define PRIoMAX __PRI64_PREFIX "o"
#define PRIuMAX __PRI64_PREFIX "u"
#define PRIxMAX __PRI64_PREFIX "x"
#define PRIXMAX __PRI64_PREFIX "X"
#define PRIoPTR __PRIPTR_PREFIX "o"
#define PRIuPTR __PRIPTR_PREFIX "u"
#define PRIxPTR __PRIPTR_PREFIX "x"
#define PRIXPTR __PRIPTR_PREFIX "X"
// The fscanf macros for signed integers are:
#define SCNd8 "d"
#define SCNi8 "i"
#define SCNdLEAST8 "d"
#define SCNiLEAST8 "i"
#define SCNdFAST8 "d"
#define SCNiFAST8 "i"
#define SCNd16 "hd"
#define SCNi16 "hi"
#define SCNdLEAST16 "hd"
#define SCNiLEAST16 "hi"
#define SCNdFAST16 "hd"
#define SCNiFAST16 "hi"
#define SCNd32 "ld"
#define SCNi32 "li"
#define SCNdLEAST32 "ld"
#define SCNiLEAST32 "li"
#define SCNdFAST32 "ld"
#define SCNiFAST32 "li"
#define SCNd64 "I64d"
#define SCNi64 "I64i"
#define SCNdLEAST64 "I64d"
#define SCNiLEAST64 "I64i"
#define SCNdFAST64 "I64d"
#define SCNiFAST64 "I64i"
#define SCNdMAX "I64d"
#define SCNiMAX "I64i"
#ifdef _WIN64 // [
# define SCNdPTR "I64d"
# define SCNiPTR "I64i"
#else // _WIN64 ][
# define SCNdPTR "ld"
# define SCNiPTR "li"
#endif // _WIN64 ]
// The fscanf macros for unsigned integers are:
#define SCNo8 "o"
#define SCNu8 "u"
#define SCNx8 "x"
#define SCNX8 "X"
#define SCNoLEAST8 "o"
#define SCNuLEAST8 "u"
#define SCNxLEAST8 "x"
#define SCNXLEAST8 "X"
#define SCNoFAST8 "o"
#define SCNuFAST8 "u"
#define SCNxFAST8 "x"
#define SCNXFAST8 "X"
#define SCNo16 "ho"
#define SCNu16 "hu"
#define SCNx16 "hx"
#define SCNX16 "hX"
#define SCNoLEAST16 "ho"
#define SCNuLEAST16 "hu"
#define SCNxLEAST16 "hx"
#define SCNXLEAST16 "hX"
#define SCNoFAST16 "ho"
#define SCNuFAST16 "hu"
#define SCNxFAST16 "hx"
#define SCNXFAST16 "hX"
#define SCNo32 "lo"
#define SCNu32 "lu"
#define SCNx32 "lx"
#define SCNX32 "lX"
#define SCNoLEAST32 "lo"
#define SCNuLEAST32 "lu"
#define SCNxLEAST32 "lx"
#define SCNXLEAST32 "lX"
#define SCNoFAST32 "lo"
#define SCNuFAST32 "lu"
#define SCNxFAST32 "lx"
#define SCNXFAST32 "lX"
#define SCNo64 "I64o"
#define SCNu64 "I64u"
#define SCNx64 "I64x"
#define SCNX64 "I64X"
#define SCNoLEAST64 "I64o"
#define SCNuLEAST64 "I64u"
#define SCNxLEAST64 "I64x"
#define SCNXLEAST64 "I64X"
#define SCNoFAST64 "I64o"
#define SCNuFAST64 "I64u"
#define SCNxFAST64 "I64x"
#define SCNXFAST64 "I64X"
#define SCNoMAX "I64o"
#define SCNuMAX "I64u"
#define SCNxMAX "I64x"
#define SCNXMAX "I64X"
#ifdef _WIN64 // [
# define SCNoPTR "I64o"
# define SCNuPTR "I64u"
# define SCNxPTR "I64x"
# define SCNXPTR "I64X"
#else // _WIN64 ][
# define SCNoPTR "lo"
# define SCNuPTR "lu"
# define SCNxPTR "lx"
# define SCNXPTR "lX"
#endif // _WIN64 ]
#endif // __STDC_FORMAT_MACROS ]
// 7.8.2 Functions for greatest-width integer types
// 7.8.2.1 The imaxabs function
#define imaxabs _abs64
// 7.8.2.2 The imaxdiv function
// This is modified version of div() function from Microsoft's div.c found
// in %MSVC.NET%\crt\src\div.c
#ifdef STATIC_IMAXDIV // [
static
#else // STATIC_IMAXDIV ][
_inline
#endif // STATIC_IMAXDIV ]
imaxdiv_t
__cdecl
imaxdiv
(
intmax_t
numer
,
intmax_t
denom
)
{
imaxdiv_t
result
;
result
.
quot
=
numer
/
denom
;
result
.
rem
=
numer
%
denom
;
if
(
numer
<
0
&&
result
.
rem
>
0
)
{
// did division wrong; must fix up
++
result
.
quot
;
result
.
rem
-=
denom
;
}
return
result
;
}
// 7.8.2.3 The strtoimax and strtoumax functions
#define strtoimax _strtoi64
#define strtoumax _strtoui64
// 7.8.2.4 The wcstoimax and wcstoumax functions
#define wcstoimax _wcstoi64
#define wcstoumax _wcstoui64
#endif // _MSC_INTTYPES_H_ ]
deps/jemalloc/include/msvc_compat/strings.h
View file @
a9951b1b
...
@@ -3,8 +3,9 @@
...
@@ -3,8 +3,9 @@
/* MSVC doesn't define ffs/ffsl. This dummy strings.h header is provided
/* MSVC doesn't define ffs/ffsl. This dummy strings.h header is provided
* for both */
* for both */
#include <intrin.h>
#ifdef _MSC_VER
#pragma intrinsic(_BitScanForward)
# include <intrin.h>
# pragma intrinsic(_BitScanForward)
static
__forceinline
int
ffsl
(
long
x
)
static
__forceinline
int
ffsl
(
long
x
)
{
{
unsigned
long
i
;
unsigned
long
i
;
...
@@ -20,4 +21,9 @@ static __forceinline int ffs(int x)
...
@@ -20,4 +21,9 @@ static __forceinline int ffs(int x)
return
(
ffsl
(
x
));
return
(
ffsl
(
x
));
}
}
#else
# define ffsl(x) __builtin_ffsl(x)
# define ffs(x) __builtin_ffs(x)
#endif
#endif
#endif
/* strings_h */
deps/jemalloc/include/msvc_compat/windows_extra.h
0 → 100644
View file @
a9951b1b
#ifndef MSVC_COMPAT_WINDOWS_EXTRA_H
#define MSVC_COMPAT_WINDOWS_EXTRA_H
#ifndef ENOENT
# define ENOENT ERROR_PATH_NOT_FOUND
#endif
#ifndef EINVAL
# define EINVAL ERROR_BAD_ARGUMENTS
#endif
#ifndef EAGAIN
# define EAGAIN ERROR_OUTOFMEMORY
#endif
#ifndef EPERM
# define EPERM ERROR_WRITE_FAULT
#endif
#ifndef EFAULT
# define EFAULT ERROR_INVALID_ADDRESS
#endif
#ifndef ENOMEM
# define ENOMEM ERROR_NOT_ENOUGH_MEMORY
#endif
#ifndef ERANGE
# define ERANGE ERROR_INVALID_DATA
#endif
#endif
/* MSVC_COMPAT_WINDOWS_EXTRA_H */
deps/jemalloc/jemalloc.pc.in
0 → 100644
View file @
a9951b1b
prefix=@prefix@
exec_prefix=@exec_prefix@
libdir=@libdir@
includedir=@includedir@
install_suffix=@install_suffix@
Name: jemalloc
Description: A general purpose malloc(3) implementation that emphasizes fragmentation avoidance and scalable concurrency support.
URL: http://www.canonware.com/jemalloc
Version: @jemalloc_version@
Cflags: -I${includedir}
Libs: -L${libdir} -ljemalloc${install_suffix}
Prev
1
2
3
4
5
6
7
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment