Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
fceef8e0
Commit
fceef8e0
authored
Jun 20, 2014
by
antirez
Browse files
Jemalloc updated to 3.6.0.
Not a single bug in about 3 months, and our previous version was too old (3.2.0).
parent
fe596d67
Changes
143
Hide whitespace changes
Inline
Side-by-side
deps/jemalloc/include/jemalloc/internal/huge.h
View file @
fceef8e0
...
...
@@ -17,14 +17,20 @@ extern size_t huge_allocated;
/* Protects chunk-related data structures. */
extern
malloc_mutex_t
huge_mtx
;
void
*
huge_malloc
(
size_t
size
,
bool
zero
);
void
*
huge_palloc
(
size_t
size
,
size_t
alignment
,
bool
zero
);
void
*
huge_ralloc_no_move
(
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
void
*
huge_malloc
(
size_t
size
,
bool
zero
,
dss_prec_t
dss_prec
);
void
*
huge_palloc
(
size_t
size
,
size_t
alignment
,
bool
zero
,
dss_prec_t
dss_prec
);
bool
huge_ralloc_no_move
(
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
extra
);
void
*
huge_ralloc
(
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
extra
,
size_t
alignment
,
bool
zero
,
bool
try_tcache_dalloc
);
size_t
alignment
,
bool
zero
,
bool
try_tcache_dalloc
,
dss_prec_t
dss_prec
);
#ifdef JEMALLOC_JET
typedef
void
(
huge_dalloc_junk_t
)(
void
*
,
size_t
);
extern
huge_dalloc_junk_t
*
huge_dalloc_junk
;
#endif
void
huge_dalloc
(
void
*
ptr
,
bool
unmap
);
size_t
huge_salloc
(
const
void
*
ptr
);
dss_prec_t
huge_dss_prec_get
(
arena_t
*
arena
);
prof_ctx_t
*
huge_prof_ctx_get
(
const
void
*
ptr
);
void
huge_prof_ctx_set
(
const
void
*
ptr
,
prof_ctx_t
*
ctx
);
bool
huge_boot
(
void
);
...
...
deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in
View file @
fceef8e0
#ifndef JEMALLOC_INTERNAL_H
#define
JEMALLOC_INTERNAL_H
#define
JEMALLOC_INTERNAL_H
#include <math.h>
#ifdef _WIN32
# include <windows.h>
...
...
@@ -54,8 +54,7 @@ typedef intptr_t ssize_t;
#endif
#include <fcntl.h>
#define JEMALLOC_NO_DEMANGLE
#include "../jemalloc@install_suffix@.h"
#include "jemalloc_internal_defs.h"
#ifdef JEMALLOC_UTRACE
#include <sys/ktrace.h>
...
...
@@ -66,13 +65,18 @@ typedef intptr_t ssize_t;
#include <valgrind/memcheck.h>
#endif
#include "jemalloc/internal/private_namespace.h"
#ifdef JEMALLOC_CC_SILENCE
#define UNUSED JEMALLOC_ATTR(unused)
#define JEMALLOC_NO_DEMANGLE
#ifdef JEMALLOC_JET
# define JEMALLOC_N(n) jet_##n
# include "jemalloc/internal/public_namespace.h"
# define JEMALLOC_NO_RENAME
# include "../jemalloc@install_suffix@.h"
# undef JEMALLOC_NO_RENAME
#else
#define UNUSED
# define JEMALLOC_N(n) @private_namespace@##n
# include "../jemalloc@install_suffix@.h"
#endif
#include "jemalloc/internal/private_namespace.h"
static const bool config_debug =
#ifdef JEMALLOC_DEBUG
...
...
@@ -221,27 +225,12 @@ static const bool config_ivsalloc =
* JEMALLOC_H_INLINES : Inline functions.
*/
/******************************************************************************/
#define JEMALLOC_H_TYPES
#define ALLOCM_LG_ALIGN_MASK ((int)0x3f)
#define JEMALLOC_H_TYPES
#
define ZU(z) ((size_t)z)
#
include "jemalloc/internal/jemalloc_internal_macros.h"
#ifndef __DECONST
# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
#endif
#ifdef JEMALLOC_DEBUG
/* Disable inlining to make debugging easier. */
# define JEMALLOC_INLINE
# define inline
#else
# define JEMALLOC_ENABLE_INLINE
# define JEMALLOC_INLINE static inline
# ifdef _MSC_VER
# define inline _inline
# endif
#endif
#define MALLOCX_LG_ALIGN_MASK ((int)0x3f)
#define ALLOCM_LG_ALIGN_MASK ((int)0x3f)
/* Smallest size class to support. */
#define LG_TINY_MIN 3
...
...
@@ -270,6 +259,9 @@ static const bool config_ivsalloc =
# ifdef __arm__
# define LG_QUANTUM 3
# endif
# ifdef __aarch64__
# define LG_QUANTUM 4
# endif
# ifdef __hppa__
# define LG_QUANTUM 4
# endif
...
...
@@ -279,7 +271,7 @@ static const bool config_ivsalloc =
# ifdef __powerpc__
# define LG_QUANTUM 4
# endif
# ifdef __s390
x
__
# ifdef __s390__
# define LG_QUANTUM 4
# endif
# ifdef __SH4__
...
...
@@ -359,7 +351,11 @@ static const bool config_ivsalloc =
# include <malloc.h>
# define alloca _alloca
# else
# include <alloca.h>
# ifdef JEMALLOC_HAS_ALLOCA_H
# include <alloca.h>
# else
# include <stdlib.h>
# endif
# endif
# define VARIABLE_ARRAY(type, name, count) \
type *name = alloca(sizeof(type) * count)
...
...
@@ -428,15 +424,18 @@ static const bool config_ivsalloc =
} while (0)
#else
#define RUNNING_ON_VALGRIND ((unsigned)0)
#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)
#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB)
#define VALGRIND_FREELIKE_BLOCK(addr, rzB)
#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len)
#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len)
#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero)
#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
do {} while (0)
#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \
do {} while (0)
#define VALGRIND_FREELIKE_BLOCK(addr, rzB) do {} while (0)
#define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr, _qzz_len) do {} while (0)
#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len) do {} while (0)
#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len) do {} while (0)
#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {} while (0)
#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \
old_rzsize, zero)
#define JEMALLOC_VALGRIND_FREE(ptr, rzsize)
old_rzsize, zero)
do {} while (0)
#define JEMALLOC_VALGRIND_FREE(ptr, rzsize)
do {} while (0)
#endif
#include "jemalloc/internal/util.h"
...
...
@@ -463,7 +462,7 @@ static const bool config_ivsalloc =
#undef JEMALLOC_H_TYPES
/******************************************************************************/
#define
JEMALLOC_H_STRUCTS
#define
JEMALLOC_H_STRUCTS
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
...
...
@@ -492,14 +491,14 @@ typedef struct {
uint64_t deallocated;
} thread_allocated_t;
/*
* The JEMALLOC_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro
* The JEMALLOC_
ARG_
CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro
* argument.
*/
#define THREAD_ALLOCATED_INITIALIZER JEMALLOC_CONCAT({0, 0})
#define THREAD_ALLOCATED_INITIALIZER JEMALLOC_
ARG_
CONCAT({0, 0})
#undef JEMALLOC_H_STRUCTS
/******************************************************************************/
#define
JEMALLOC_H_EXTERNS
#define
JEMALLOC_H_EXTERNS
extern bool opt_abort;
extern bool opt_junk;
...
...
@@ -559,7 +558,7 @@ void jemalloc_postfork_child(void);
#undef JEMALLOC_H_EXTERNS
/******************************************************************************/
#define
JEMALLOC_H_INLINES
#define
JEMALLOC_H_INLINES
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
...
...
@@ -591,13 +590,14 @@ arena_t *choose_arena(arena_t *arena);
* for allocations.
*/
malloc_tsd_externs(arenas, arena_t *)
malloc_tsd_funcs(JEMALLOC_INLINE, arenas, arena_t *, NULL, arenas_cleanup)
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, arenas, arena_t *, NULL,
arenas_cleanup)
/*
* Compute usable size that would result from allocating an object with the
* specified size.
*/
JEMALLOC_INLINE size_t
JEMALLOC_
ALWAYS_
INLINE size_t
s2u(size_t size)
{
...
...
@@ -612,7 +612,7 @@ s2u(size_t size)
* Compute usable size that would result from allocating an object with the
* specified size and alignment.
*/
JEMALLOC_INLINE size_t
JEMALLOC_
ALWAYS_
INLINE size_t
sa2u(size_t size, size_t alignment)
{
size_t usize;
...
...
@@ -733,32 +733,36 @@ choose_arena(arena_t *arena)
#include "jemalloc/internal/quarantine.h"
#ifndef JEMALLOC_ENABLE_INLINE
void *imalloc
x
(size_t size, bool try_tcache, arena_t *arena);
void *imalloc
t
(size_t size, bool try_tcache, arena_t *arena);
void *imalloc(size_t size);
void *icalloc
x
(size_t size, bool try_tcache, arena_t *arena);
void *icalloc
t
(size_t size, bool try_tcache, arena_t *arena);
void *icalloc(size_t size);
void *ipalloc
x
(size_t usize, size_t alignment, bool zero, bool try_tcache,
void *ipalloc
t
(size_t usize, size_t alignment, bool zero, bool try_tcache,
arena_t *arena);
void *ipalloc(size_t usize, size_t alignment, bool zero);
size_t isalloc(const void *ptr, bool demote);
size_t ivsalloc(const void *ptr, bool demote);
size_t u2rz(size_t usize);
size_t p2rz(const void *ptr);
void idalloc
x
(void *ptr, bool try_tcache);
void idalloc
t
(void *ptr, bool try_tcache);
void idalloc(void *ptr);
void iqalloc
x
(void *ptr, bool try_tcache);
void iqalloc
t
(void *ptr, bool try_tcache);
void iqalloc(void *ptr);
void *iralloc
x
(void *ptr, size_t size, size_t
extra
, size_t
alignment
,
bool zero, bool no_move
, bool try_tcache_alloc, bool try_tcache_dalloc,
void *iralloc
t_realign
(void *ptr, size_t
old
size, size_t
size
, size_t
extra
,
size_t alignment, bool zero
, bool try_tcache_alloc, bool try_tcache_dalloc,
arena_t *arena);
void *iralloct(void *ptr, size_t size, size_t extra, size_t alignment,
bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena);
void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
bool zero, bool no_move);
bool zero);
bool ixalloc(void *ptr, size_t size, size_t extra, size_t alignment,
bool zero);
malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
JEMALLOC_INLINE void *
imalloc
x
(size_t size, bool try_tcache, arena_t *arena)
JEMALLOC_
ALWAYS_
INLINE void *
imalloc
t
(size_t size, bool try_tcache, arena_t *arena)
{
assert(size != 0);
...
...
@@ -766,35 +770,35 @@ imallocx(size_t size, bool try_tcache, arena_t *arena)
if (size <= arena_maxclass)
return (arena_malloc(arena, size, false, try_tcache));
else
return (huge_malloc(size, false));
return (huge_malloc(size, false
, huge_dss_prec_get(arena)
));
}
JEMALLOC_INLINE void *
JEMALLOC_
ALWAYS_
INLINE void *
imalloc(size_t size)
{
return (imalloc
x
(size, true, NULL));
return (imalloc
t
(size, true, NULL));
}
JEMALLOC_INLINE void *
icalloc
x
(size_t size, bool try_tcache, arena_t *arena)
JEMALLOC_
ALWAYS_
INLINE void *
icalloc
t
(size_t size, bool try_tcache, arena_t *arena)
{
if (size <= arena_maxclass)
return (arena_malloc(arena, size, true, try_tcache));
else
return (huge_malloc(size, true));
return (huge_malloc(size, true
, huge_dss_prec_get(arena)
));
}
JEMALLOC_INLINE void *
JEMALLOC_
ALWAYS_
INLINE void *
icalloc(size_t size)
{
return (icalloc
x
(size, true, NULL));
return (icalloc
t
(size, true, NULL));
}
JEMALLOC_INLINE void *
ipalloc
x
(size_t usize, size_t alignment, bool zero, bool try_tcache,
JEMALLOC_
ALWAYS_
INLINE void *
ipalloc
t
(size_t usize, size_t alignment, bool zero, bool try_tcache,
arena_t *arena)
{
void *ret;
...
...
@@ -809,20 +813,20 @@ ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
ret = arena_palloc(choose_arena(arena), usize,
alignment, zero);
} else if (alignment <= chunksize)
ret = huge_malloc(usize, zero);
ret = huge_malloc(usize, zero
, huge_dss_prec_get(arena)
);
else
ret = huge_palloc(usize, alignment, zero);
ret = huge_palloc(usize, alignment, zero
, huge_dss_prec_get(arena)
);
}
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
return (ret);
}
JEMALLOC_INLINE void *
JEMALLOC_
ALWAYS_
INLINE void *
ipalloc(size_t usize, size_t alignment, bool zero)
{
return (ipalloc
x
(usize, alignment, zero, true, NULL));
return (ipalloc
t
(usize, alignment, zero, true, NULL));
}
/*
...
...
@@ -830,7 +834,7 @@ ipalloc(size_t usize, size_t alignment, bool zero)
* void *ptr = [...]
* size_t sz = isalloc(ptr, config_prof);
*/
JEMALLOC_INLINE size_t
JEMALLOC_
ALWAYS_
INLINE size_t
isalloc(const void *ptr, bool demote)
{
size_t ret;
...
...
@@ -849,12 +853,12 @@ isalloc(const void *ptr, bool demote)
return (ret);
}
JEMALLOC_INLINE size_t
JEMALLOC_
ALWAYS_
INLINE size_t
ivsalloc(const void *ptr, bool demote)
{
/* Return 0 if ptr is not within a chunk managed by jemalloc. */
if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) ==
NULL
)
if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) ==
0
)
return (0);
return (isalloc(ptr, demote));
...
...
@@ -882,8 +886,8 @@ p2rz(const void *ptr)
return (u2rz(usize));
}
JEMALLOC_INLINE void
idalloc
x
(void *ptr, bool try_tcache)
JEMALLOC_
ALWAYS_
INLINE void
idalloc
t
(void *ptr, bool try_tcache)
{
arena_chunk_t *chunk;
...
...
@@ -896,35 +900,67 @@ idallocx(void *ptr, bool try_tcache)
huge_dalloc(ptr, true);
}
JEMALLOC_INLINE void
JEMALLOC_
ALWAYS_
INLINE void
idalloc(void *ptr)
{
idalloc
x
(ptr, true);
idalloc
t
(ptr, true);
}
JEMALLOC_INLINE void
iqalloc
x
(void *ptr, bool try_tcache)
JEMALLOC_
ALWAYS_
INLINE void
iqalloc
t
(void *ptr, bool try_tcache)
{
if (config_fill && opt_quarantine)
quarantine(ptr);
else
idalloc
x
(ptr, try_tcache);
idalloc
t
(ptr, try_tcache);
}
JEMALLOC_INLINE void
JEMALLOC_
ALWAYS_
INLINE void
iqalloc(void *ptr)
{
iqalloc
x
(ptr, true);
iqalloc
t
(ptr, true);
}
JEMALLOC_INLINE void *
irallocx(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena)
JEMALLOC_ALWAYS_INLINE void *
iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
arena_t *arena)
{
void *p;
size_t usize, copysize;
usize = sa2u(size + extra, alignment);
if (usize == 0)
return (NULL);
p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
if (p == NULL) {
if (extra == 0)
return (NULL);
/* Try again, without extra this time. */
usize = sa2u(size, alignment);
if (usize == 0)
return (NULL);
p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
if (p == NULL)
return (NULL);
}
/*
* Copy at most size bytes (not size+extra), since the caller has no
* expectation that the extra bytes will be reliably preserved.
*/
copysize = (size < oldsize) ? size : oldsize;
memcpy(p, ptr, copysize);
iqalloct(ptr, try_tcache_dalloc);
return (p);
}
JEMALLOC_ALWAYS_INLINE void *
iralloct(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena)
{
void *ret;
size_t oldsize;
assert(ptr != NULL);
...
...
@@ -934,72 +970,54 @@ irallocx(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
!= 0) {
size_t usize, copysize;
/*
* Existing object alignment is inadequate; allocate new space
* and copy.
*/
if (no_move)
return (NULL);
usize = sa2u(size + extra, alignment);
if (usize == 0)
return (NULL);
ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena);
if (ret == NULL) {
if (extra == 0)
return (NULL);
/* Try again, without extra this time. */
usize = sa2u(size, alignment);
if (usize == 0)
return (NULL);
ret = ipallocx(usize, alignment, zero, try_tcache_alloc,
arena);
if (ret == NULL)
return (NULL);
}
/*
* Copy at most size bytes (not size+extra), since the caller
* has no expectation that the extra bytes will be reliably
* preserved.
*/
copysize = (size < oldsize) ? size : oldsize;
memcpy(ret, ptr, copysize);
iqallocx(ptr, try_tcache_dalloc);
return (ret);
return (iralloct_realign(ptr, oldsize, size, extra, alignment,
zero, try_tcache_alloc, try_tcache_dalloc, arena));
}
if (no_move) {
if (size <= arena_maxclass) {
return (arena_ralloc_no_move(ptr, oldsize, size,
extra, zero));
} else {
return (huge_ralloc_no_move(ptr, oldsize, size,
extra));
}
if (size + extra <= arena_maxclass) {
return (arena_ralloc(arena, ptr, oldsize, size, extra,
alignment, zero, try_tcache_alloc,
try_tcache_dalloc));
} else {
if (size + extra <= arena_maxclass) {
return (arena_ralloc(arena, ptr, oldsize, size, extra,
alignment, zero, try_tcache_alloc,
try_tcache_dalloc));
} else {
return (huge_ralloc(ptr, oldsize, size, extra,
alignment, zero, try_tcache_dalloc));
}
return (huge_ralloc(ptr, oldsize, size, extra,
alignment, zero, try_tcache_dalloc, huge_dss_prec_get(arena)));
}
}
JEMALLOC_INLINE void *
iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
bool no_move)
JEMALLOC_ALWAYS_INLINE void *
iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero)
{
return (iralloct(ptr, size, extra, alignment, zero, true, true, NULL));
}
JEMALLOC_ALWAYS_INLINE bool
ixalloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero)
{
size_t oldsize;
assert(ptr != NULL);
assert(size != 0);
return (irallocx(ptr, size, extra, alignment, zero, no_move, true, true,
NULL));
oldsize = isalloc(ptr, config_prof);
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
!= 0) {
/* Existing object alignment is inadequate. */
return (true);
}
if (size <= arena_maxclass)
return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero));
else
return (huge_ralloc_no_move(ptr, oldsize, size, extra));
}
malloc_tsd_externs(thread_allocated, thread_allocated_t)
malloc_tsd_funcs(JEMALLOC_INLINE, thread_allocated, thread_allocated_t,
malloc_tsd_funcs(JEMALLOC_
ALWAYS_
INLINE, thread_allocated, thread_allocated_t,
THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup)
#endif
...
...
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in
0 → 100644
View file @
fceef8e0
#ifndef JEMALLOC_INTERNAL_DEFS_H_
#define JEMALLOC_INTERNAL_DEFS_H_
/*
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
* public APIs to be prefixed. This makes it possible, with some care, to use
* multiple allocators simultaneously.
*/
#undef JEMALLOC_PREFIX
#undef JEMALLOC_CPREFIX
/*
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
* For shared libraries, symbol visibility mechanisms prevent these symbols
* from being exported, but for static libraries, naming collisions are a real
* possibility.
*/
#undef JEMALLOC_PRIVATE_NAMESPACE
/*
* Hyper-threaded CPUs may need a special instruction inside spin loops in
* order to yield to another virtual CPU.
*/
#undef CPU_SPINWAIT
/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */
#undef JEMALLOC_ATOMIC9
/*
* Defined if OSAtomic*() functions are available, as provided by Darwin, and
* documented in the atomic(3) manual page.
*/
#undef JEMALLOC_OSATOMIC
/*
* Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and
* __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the
* functions are defined in libgcc instead of being inlines)
*/
#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4
/*
* Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and
* __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the
* functions are defined in libgcc instead of being inlines)
*/
#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8
/*
* Defined if OSSpin*() functions are available, as provided by Darwin, and
* documented in the spinlock(3) manual page.
*/
#undef JEMALLOC_OSSPIN
/*
* Defined if _malloc_thread_cleanup() exists. At least in the case of
* FreeBSD, pthread_key_create() allocates, which if used during malloc
* bootstrapping will cause recursion into the pthreads library. Therefore, if
* _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
* malloc_tsd.
*/
#undef JEMALLOC_MALLOC_THREAD_CLEANUP
/*
* Defined if threaded initialization is known to be safe on this platform.
* Among other things, it must be possible to initialize a mutex without
* triggering allocation in order for threaded allocation to be safe.
*/
#undef JEMALLOC_THREADED_INIT
/*
* Defined if the pthreads implementation defines
* _pthread_mutex_init_calloc_cb(), in which case the function is used in order
* to avoid recursive allocation during mutex initialization.
*/
#undef JEMALLOC_MUTEX_INIT_CB
/* Defined if sbrk() is supported. */
#undef JEMALLOC_HAVE_SBRK
/* Non-empty if the tls_model attribute is supported. */
#undef JEMALLOC_TLS_MODEL
/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */
#undef JEMALLOC_CC_SILENCE
/* JEMALLOC_CODE_COVERAGE enables test code coverage analysis. */
#undef JEMALLOC_CODE_COVERAGE
/*
* JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
* inline functions.
*/
#undef JEMALLOC_DEBUG
/* JEMALLOC_STATS enables statistics calculation. */
#undef JEMALLOC_STATS
/* JEMALLOC_PROF enables allocation profiling. */
#undef JEMALLOC_PROF
/* Use libunwind for profile backtracing if defined. */
#undef JEMALLOC_PROF_LIBUNWIND
/* Use libgcc for profile backtracing if defined. */
#undef JEMALLOC_PROF_LIBGCC
/* Use gcc intrinsics for profile backtracing if defined. */
#undef JEMALLOC_PROF_GCC
/*
* JEMALLOC_TCACHE enables a thread-specific caching layer for small objects.
* This makes it possible to allocate/deallocate objects without any locking
* when the cache is in the steady state.
*/
#undef JEMALLOC_TCACHE
/*
* JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage
* segment (DSS).
*/
#undef JEMALLOC_DSS
/* Support memory filling (junk/zero/quarantine/redzone). */
#undef JEMALLOC_FILL
/* Support utrace(2)-based tracing. */
#undef JEMALLOC_UTRACE
/* Support Valgrind. */
#undef JEMALLOC_VALGRIND
/* Support optional abort() on OOM. */
#undef JEMALLOC_XMALLOC
/* Support lazy locking (avoid locking unless a second thread is launched). */
#undef JEMALLOC_LAZY_LOCK
/* One page is 2^STATIC_PAGE_SHIFT bytes. */
#undef STATIC_PAGE_SHIFT
/*
* If defined, use munmap() to unmap freed chunks, rather than storing them for
* later reuse. This is disabled by default on Linux because common sequences
* of mmap()/munmap() calls will cause virtual memory map holes.
*/
#undef JEMALLOC_MUNMAP
/*
* If defined, use mremap(...MREMAP_FIXED...) for huge realloc(). This is
* disabled by default because it is Linux-specific and it will cause virtual
* memory map holes, much like munmap(2) does.
*/
#undef JEMALLOC_MREMAP
/* TLS is used to map arenas and magazine caches to threads. */
#undef JEMALLOC_TLS
/*
* JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside
* within jemalloc-owned chunks before dereferencing them.
*/
#undef JEMALLOC_IVSALLOC
/*
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
*/
#undef JEMALLOC_ZONE
#undef JEMALLOC_ZONE_VERSION
/*
* Methods for purging unused pages differ between operating systems.
*
* madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages,
* such that new pages will be demand-zeroed if
* the address region is later touched.
* madvise(..., MADV_FREE) : On FreeBSD and Darwin, this marks pages as being
* unused, such that they will be discarded rather
* than swapped out.
*/
#undef JEMALLOC_PURGE_MADVISE_DONTNEED
#undef JEMALLOC_PURGE_MADVISE_FREE
/*
* Define if operating system has alloca.h header.
*/
#undef JEMALLOC_HAS_ALLOCA_H
/* C99 restrict keyword supported. */
#undef JEMALLOC_HAS_RESTRICT
/* For use by hash code. */
#undef JEMALLOC_BIG_ENDIAN
/* sizeof(int) == 2^LG_SIZEOF_INT. */
#undef LG_SIZEOF_INT
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
#undef LG_SIZEOF_LONG
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
#undef LG_SIZEOF_INTMAX_T
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h
0 → 100644
View file @
fceef8e0
/*
* JEMALLOC_ALWAYS_INLINE and JEMALLOC_INLINE are used within header files for
* functions that are static inline functions if inlining is enabled, and
* single-definition library-private functions if inlining is disabled.
*
* JEMALLOC_ALWAYS_INLINE_C and JEMALLOC_INLINE_C are for use in .c files, in
* which case the denoted functions are always static, regardless of whether
* inlining is enabled.
*/
#if defined(JEMALLOC_DEBUG) || defined(JEMALLOC_CODE_COVERAGE)
/* Disable inlining to make debugging/profiling easier. */
# define JEMALLOC_ALWAYS_INLINE
# define JEMALLOC_ALWAYS_INLINE_C static
# define JEMALLOC_INLINE
# define JEMALLOC_INLINE_C static
# define inline
#else
# define JEMALLOC_ENABLE_INLINE
# ifdef JEMALLOC_HAVE_ATTR
# define JEMALLOC_ALWAYS_INLINE \
static inline JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline)
# define JEMALLOC_ALWAYS_INLINE_C \
static inline JEMALLOC_ATTR(always_inline)
# else
# define JEMALLOC_ALWAYS_INLINE static inline
# define JEMALLOC_ALWAYS_INLINE_C static inline
# endif
# define JEMALLOC_INLINE static inline
# define JEMALLOC_INLINE_C static inline
# ifdef _MSC_VER
# define inline _inline
# endif
#endif
#ifdef JEMALLOC_CC_SILENCE
# define UNUSED JEMALLOC_ATTR(unused)
#else
# define UNUSED
#endif
#define ZU(z) ((size_t)z)
#define QU(q) ((uint64_t)q)
#define QI(q) ((int64_t)q)
#ifndef __DECONST
# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
#endif
#ifndef JEMALLOC_HAS_RESTRICT
# define restrict
#endif
deps/jemalloc/include/jemalloc/internal/private_namespace.h
deleted
100644 → 0
View file @
fe596d67
#define a0calloc JEMALLOC_N(a0calloc)
#define a0free JEMALLOC_N(a0free)
#define a0malloc JEMALLOC_N(a0malloc)
#define arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small)
#define arena_bin_index JEMALLOC_N(arena_bin_index)
#define arena_bin_info JEMALLOC_N(arena_bin_info)
#define arena_boot JEMALLOC_N(arena_boot)
#define arena_dalloc JEMALLOC_N(arena_dalloc)
#define arena_dalloc_bin JEMALLOC_N(arena_dalloc_bin)
#define arena_dalloc_bin_locked JEMALLOC_N(arena_dalloc_bin_locked)
#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
#define arena_dalloc_large JEMALLOC_N(arena_dalloc_large)
#define arena_dalloc_large_locked JEMALLOC_N(arena_dalloc_large_locked)
#define arena_dalloc_small JEMALLOC_N(arena_dalloc_small)
#define arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get)
#define arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set)
#define arena_malloc JEMALLOC_N(arena_malloc)
#define arena_malloc_large JEMALLOC_N(arena_malloc_large)
#define arena_malloc_small JEMALLOC_N(arena_malloc_small)
#define arena_mapbits_allocated_get JEMALLOC_N(arena_mapbits_allocated_get)
#define arena_mapbits_binind_get JEMALLOC_N(arena_mapbits_binind_get)
#define arena_mapbits_dirty_get JEMALLOC_N(arena_mapbits_dirty_get)
#define arena_mapbits_get JEMALLOC_N(arena_mapbits_get)
#define arena_mapbits_large_binind_set JEMALLOC_N(arena_mapbits_large_binind_set)
#define arena_mapbits_large_get JEMALLOC_N(arena_mapbits_large_get)
#define arena_mapbits_large_set JEMALLOC_N(arena_mapbits_large_set)
#define arena_mapbits_large_size_get JEMALLOC_N(arena_mapbits_large_size_get)
#define arena_mapbits_small_runind_get JEMALLOC_N(arena_mapbits_small_runind_get)
#define arena_mapbits_small_set JEMALLOC_N(arena_mapbits_small_set)
#define arena_mapbits_unallocated_set JEMALLOC_N(arena_mapbits_unallocated_set)
#define arena_mapbits_unallocated_size_get JEMALLOC_N(arena_mapbits_unallocated_size_get)
#define arena_mapbits_unallocated_size_set JEMALLOC_N(arena_mapbits_unallocated_size_set)
#define arena_mapbits_unzeroed_get JEMALLOC_N(arena_mapbits_unzeroed_get)
#define arena_mapbits_unzeroed_set JEMALLOC_N(arena_mapbits_unzeroed_set)
#define arena_mapbitsp_get JEMALLOC_N(arena_mapbitsp_get)
#define arena_mapp_get JEMALLOC_N(arena_mapp_get)
#define arena_maxclass JEMALLOC_N(arena_maxclass)
#define arena_new JEMALLOC_N(arena_new)
#define arena_palloc JEMALLOC_N(arena_palloc)
#define arena_postfork_child JEMALLOC_N(arena_postfork_child)
#define arena_postfork_parent JEMALLOC_N(arena_postfork_parent)
#define arena_prefork JEMALLOC_N(arena_prefork)
#define arena_prof_accum JEMALLOC_N(arena_prof_accum)
#define arena_prof_ctx_get JEMALLOC_N(arena_prof_ctx_get)
#define arena_prof_ctx_set JEMALLOC_N(arena_prof_ctx_set)
#define arena_prof_promoted JEMALLOC_N(arena_prof_promoted)
#define arena_ptr_small_binind_get JEMALLOC_N(arena_ptr_small_binind_get)
#define arena_purge_all JEMALLOC_N(arena_purge_all)
#define arena_ralloc JEMALLOC_N(arena_ralloc)
#define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move)
#define arena_run_regind JEMALLOC_N(arena_run_regind)
#define arena_salloc JEMALLOC_N(arena_salloc)
#define arena_stats_merge JEMALLOC_N(arena_stats_merge)
#define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small)
#define arenas JEMALLOC_N(arenas)
#define arenas_booted JEMALLOC_N(arenas_booted)
#define arenas_cleanup JEMALLOC_N(arenas_cleanup)
#define arenas_extend JEMALLOC_N(arenas_extend)
#define arenas_initialized JEMALLOC_N(arenas_initialized)
#define arenas_lock JEMALLOC_N(arenas_lock)
#define arenas_tls JEMALLOC_N(arenas_tls)
#define arenas_tsd JEMALLOC_N(arenas_tsd)
#define arenas_tsd_boot JEMALLOC_N(arenas_tsd_boot)
#define arenas_tsd_cleanup_wrapper JEMALLOC_N(arenas_tsd_cleanup_wrapper)
#define arenas_tsd_get JEMALLOC_N(arenas_tsd_get)
#define arenas_tsd_set JEMALLOC_N(arenas_tsd_set)
#define atomic_add_u JEMALLOC_N(atomic_add_u)
#define atomic_add_uint32 JEMALLOC_N(atomic_add_uint32)
#define atomic_add_uint64 JEMALLOC_N(atomic_add_uint64)
#define atomic_add_z JEMALLOC_N(atomic_add_z)
#define atomic_sub_u JEMALLOC_N(atomic_sub_u)
#define atomic_sub_uint32 JEMALLOC_N(atomic_sub_uint32)
#define atomic_sub_uint64 JEMALLOC_N(atomic_sub_uint64)
#define atomic_sub_z JEMALLOC_N(atomic_sub_z)
#define base_alloc JEMALLOC_N(base_alloc)
#define base_boot JEMALLOC_N(base_boot)
#define base_calloc JEMALLOC_N(base_calloc)
#define base_node_alloc JEMALLOC_N(base_node_alloc)
#define base_node_dealloc JEMALLOC_N(base_node_dealloc)
#define base_postfork_child JEMALLOC_N(base_postfork_child)
#define base_postfork_parent JEMALLOC_N(base_postfork_parent)
#define base_prefork JEMALLOC_N(base_prefork)
#define bitmap_full JEMALLOC_N(bitmap_full)
#define bitmap_get JEMALLOC_N(bitmap_get)
#define bitmap_info_init JEMALLOC_N(bitmap_info_init)
#define bitmap_info_ngroups JEMALLOC_N(bitmap_info_ngroups)
#define bitmap_init JEMALLOC_N(bitmap_init)
#define bitmap_set JEMALLOC_N(bitmap_set)
#define bitmap_sfu JEMALLOC_N(bitmap_sfu)
#define bitmap_size JEMALLOC_N(bitmap_size)
#define bitmap_unset JEMALLOC_N(bitmap_unset)
#define bt_init JEMALLOC_N(bt_init)
#define buferror JEMALLOC_N(buferror)
#define choose_arena JEMALLOC_N(choose_arena)
#define choose_arena_hard JEMALLOC_N(choose_arena_hard)
#define chunk_alloc JEMALLOC_N(chunk_alloc)
#define chunk_alloc_dss JEMALLOC_N(chunk_alloc_dss)
#define chunk_alloc_mmap JEMALLOC_N(chunk_alloc_mmap)
#define chunk_boot JEMALLOC_N(chunk_boot)
#define chunk_dealloc JEMALLOC_N(chunk_dealloc)
#define chunk_dealloc_mmap JEMALLOC_N(chunk_dealloc_mmap)
#define chunk_dss_boot JEMALLOC_N(chunk_dss_boot)
#define chunk_dss_postfork_child JEMALLOC_N(chunk_dss_postfork_child)
#define chunk_dss_postfork_parent JEMALLOC_N(chunk_dss_postfork_parent)
#define chunk_dss_prec_get JEMALLOC_N(chunk_dss_prec_get)
#define chunk_dss_prec_set JEMALLOC_N(chunk_dss_prec_set)
#define chunk_dss_prefork JEMALLOC_N(chunk_dss_prefork)
#define chunk_in_dss JEMALLOC_N(chunk_in_dss)
#define chunk_npages JEMALLOC_N(chunk_npages)
#define chunk_postfork_child JEMALLOC_N(chunk_postfork_child)
#define chunk_postfork_parent JEMALLOC_N(chunk_postfork_parent)
#define chunk_prefork JEMALLOC_N(chunk_prefork)
#define chunk_unmap JEMALLOC_N(chunk_unmap)
#define chunks_mtx JEMALLOC_N(chunks_mtx)
#define chunks_rtree JEMALLOC_N(chunks_rtree)
#define chunksize JEMALLOC_N(chunksize)
#define chunksize_mask JEMALLOC_N(chunksize_mask)
#define ckh_bucket_search JEMALLOC_N(ckh_bucket_search)
#define ckh_count JEMALLOC_N(ckh_count)
#define ckh_delete JEMALLOC_N(ckh_delete)
#define ckh_evict_reloc_insert JEMALLOC_N(ckh_evict_reloc_insert)
#define ckh_insert JEMALLOC_N(ckh_insert)
#define ckh_isearch JEMALLOC_N(ckh_isearch)
#define ckh_iter JEMALLOC_N(ckh_iter)
#define ckh_new JEMALLOC_N(ckh_new)
#define ckh_pointer_hash JEMALLOC_N(ckh_pointer_hash)
#define ckh_pointer_keycomp JEMALLOC_N(ckh_pointer_keycomp)
#define ckh_rebuild JEMALLOC_N(ckh_rebuild)
#define ckh_remove JEMALLOC_N(ckh_remove)
#define ckh_search JEMALLOC_N(ckh_search)
#define ckh_string_hash JEMALLOC_N(ckh_string_hash)
#define ckh_string_keycomp JEMALLOC_N(ckh_string_keycomp)
#define ckh_try_bucket_insert JEMALLOC_N(ckh_try_bucket_insert)
#define ckh_try_insert JEMALLOC_N(ckh_try_insert)
#define ctl_boot JEMALLOC_N(ctl_boot)
#define ctl_bymib JEMALLOC_N(ctl_bymib)
#define ctl_byname JEMALLOC_N(ctl_byname)
#define ctl_nametomib JEMALLOC_N(ctl_nametomib)
#define ctl_postfork_child JEMALLOC_N(ctl_postfork_child)
#define ctl_postfork_parent JEMALLOC_N(ctl_postfork_parent)
#define ctl_prefork JEMALLOC_N(ctl_prefork)
#define dss_prec_names JEMALLOC_N(dss_prec_names)
#define extent_tree_ad_first JEMALLOC_N(extent_tree_ad_first)
#define extent_tree_ad_insert JEMALLOC_N(extent_tree_ad_insert)
#define extent_tree_ad_iter JEMALLOC_N(extent_tree_ad_iter)
#define extent_tree_ad_iter_recurse JEMALLOC_N(extent_tree_ad_iter_recurse)
#define extent_tree_ad_iter_start JEMALLOC_N(extent_tree_ad_iter_start)
#define extent_tree_ad_last JEMALLOC_N(extent_tree_ad_last)
#define extent_tree_ad_new JEMALLOC_N(extent_tree_ad_new)
#define extent_tree_ad_next JEMALLOC_N(extent_tree_ad_next)
#define extent_tree_ad_nsearch JEMALLOC_N(extent_tree_ad_nsearch)
#define extent_tree_ad_prev JEMALLOC_N(extent_tree_ad_prev)
#define extent_tree_ad_psearch JEMALLOC_N(extent_tree_ad_psearch)
#define extent_tree_ad_remove JEMALLOC_N(extent_tree_ad_remove)
#define extent_tree_ad_reverse_iter JEMALLOC_N(extent_tree_ad_reverse_iter)
#define extent_tree_ad_reverse_iter_recurse JEMALLOC_N(extent_tree_ad_reverse_iter_recurse)
#define extent_tree_ad_reverse_iter_start JEMALLOC_N(extent_tree_ad_reverse_iter_start)
#define extent_tree_ad_search JEMALLOC_N(extent_tree_ad_search)
#define extent_tree_szad_first JEMALLOC_N(extent_tree_szad_first)
#define extent_tree_szad_insert JEMALLOC_N(extent_tree_szad_insert)
#define extent_tree_szad_iter JEMALLOC_N(extent_tree_szad_iter)
#define extent_tree_szad_iter_recurse JEMALLOC_N(extent_tree_szad_iter_recurse)
#define extent_tree_szad_iter_start JEMALLOC_N(extent_tree_szad_iter_start)
#define extent_tree_szad_last JEMALLOC_N(extent_tree_szad_last)
#define extent_tree_szad_new JEMALLOC_N(extent_tree_szad_new)
#define extent_tree_szad_next JEMALLOC_N(extent_tree_szad_next)
#define extent_tree_szad_nsearch JEMALLOC_N(extent_tree_szad_nsearch)
#define extent_tree_szad_prev JEMALLOC_N(extent_tree_szad_prev)
#define extent_tree_szad_psearch JEMALLOC_N(extent_tree_szad_psearch)
#define extent_tree_szad_remove JEMALLOC_N(extent_tree_szad_remove)
#define extent_tree_szad_reverse_iter JEMALLOC_N(extent_tree_szad_reverse_iter)
#define extent_tree_szad_reverse_iter_recurse JEMALLOC_N(extent_tree_szad_reverse_iter_recurse)
#define extent_tree_szad_reverse_iter_start JEMALLOC_N(extent_tree_szad_reverse_iter_start)
#define extent_tree_szad_search JEMALLOC_N(extent_tree_szad_search)
#define get_errno JEMALLOC_N(get_errno)
#define hash JEMALLOC_N(hash)
#define huge_allocated JEMALLOC_N(huge_allocated)
#define huge_boot JEMALLOC_N(huge_boot)
#define huge_dalloc JEMALLOC_N(huge_dalloc)
#define huge_malloc JEMALLOC_N(huge_malloc)
#define huge_mtx JEMALLOC_N(huge_mtx)
#define huge_ndalloc JEMALLOC_N(huge_ndalloc)
#define huge_nmalloc JEMALLOC_N(huge_nmalloc)
#define huge_palloc JEMALLOC_N(huge_palloc)
#define huge_postfork_child JEMALLOC_N(huge_postfork_child)
#define huge_postfork_parent JEMALLOC_N(huge_postfork_parent)
#define huge_prefork JEMALLOC_N(huge_prefork)
#define huge_prof_ctx_get JEMALLOC_N(huge_prof_ctx_get)
#define huge_prof_ctx_set JEMALLOC_N(huge_prof_ctx_set)
#define huge_ralloc JEMALLOC_N(huge_ralloc)
#define huge_ralloc_no_move JEMALLOC_N(huge_ralloc_no_move)
#define huge_salloc JEMALLOC_N(huge_salloc)
#define iallocm JEMALLOC_N(iallocm)
#define icalloc JEMALLOC_N(icalloc)
#define icallocx JEMALLOC_N(icallocx)
#define idalloc JEMALLOC_N(idalloc)
#define idallocx JEMALLOC_N(idallocx)
#define imalloc JEMALLOC_N(imalloc)
#define imallocx JEMALLOC_N(imallocx)
#define ipalloc JEMALLOC_N(ipalloc)
#define ipallocx JEMALLOC_N(ipallocx)
#define iqalloc JEMALLOC_N(iqalloc)
#define iqallocx JEMALLOC_N(iqallocx)
#define iralloc JEMALLOC_N(iralloc)
#define irallocx JEMALLOC_N(irallocx)
#define isalloc JEMALLOC_N(isalloc)
#define isthreaded JEMALLOC_N(isthreaded)
#define ivsalloc JEMALLOC_N(ivsalloc)
#define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child)
#define jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent)
#define jemalloc_prefork JEMALLOC_N(jemalloc_prefork)
#define malloc_cprintf JEMALLOC_N(malloc_cprintf)
#define malloc_mutex_init JEMALLOC_N(malloc_mutex_init)
#define malloc_mutex_lock JEMALLOC_N(malloc_mutex_lock)
#define malloc_mutex_postfork_child JEMALLOC_N(malloc_mutex_postfork_child)
#define malloc_mutex_postfork_parent JEMALLOC_N(malloc_mutex_postfork_parent)
#define malloc_mutex_prefork JEMALLOC_N(malloc_mutex_prefork)
#define malloc_mutex_unlock JEMALLOC_N(malloc_mutex_unlock)
#define malloc_printf JEMALLOC_N(malloc_printf)
#define malloc_snprintf JEMALLOC_N(malloc_snprintf)
#define malloc_strtoumax JEMALLOC_N(malloc_strtoumax)
#define malloc_tsd_boot JEMALLOC_N(malloc_tsd_boot)
#define malloc_tsd_cleanup_register JEMALLOC_N(malloc_tsd_cleanup_register)
#define malloc_tsd_dalloc JEMALLOC_N(malloc_tsd_dalloc)
#define malloc_tsd_malloc JEMALLOC_N(malloc_tsd_malloc)
#define malloc_tsd_no_cleanup JEMALLOC_N(malloc_tsd_no_cleanup)
#define malloc_vcprintf JEMALLOC_N(malloc_vcprintf)
#define malloc_vsnprintf JEMALLOC_N(malloc_vsnprintf)
#define malloc_write JEMALLOC_N(malloc_write)
#define map_bias JEMALLOC_N(map_bias)
#define mb_write JEMALLOC_N(mb_write)
#define mutex_boot JEMALLOC_N(mutex_boot)
#define narenas_auto JEMALLOC_N(narenas_auto)
#define narenas_total JEMALLOC_N(narenas_total)
#define narenas_total_get JEMALLOC_N(narenas_total_get)
#define ncpus JEMALLOC_N(ncpus)
#define nhbins JEMALLOC_N(nhbins)
#define opt_abort JEMALLOC_N(opt_abort)
#define opt_junk JEMALLOC_N(opt_junk)
#define opt_lg_chunk JEMALLOC_N(opt_lg_chunk)
#define opt_lg_dirty_mult JEMALLOC_N(opt_lg_dirty_mult)
#define opt_lg_prof_interval JEMALLOC_N(opt_lg_prof_interval)
#define opt_lg_prof_sample JEMALLOC_N(opt_lg_prof_sample)
#define opt_lg_tcache_max JEMALLOC_N(opt_lg_tcache_max)
#define opt_narenas JEMALLOC_N(opt_narenas)
#define opt_prof JEMALLOC_N(opt_prof)
#define opt_prof_accum JEMALLOC_N(opt_prof_accum)
#define opt_prof_active JEMALLOC_N(opt_prof_active)
#define opt_prof_final JEMALLOC_N(opt_prof_final)
#define opt_prof_gdump JEMALLOC_N(opt_prof_gdump)
#define opt_prof_leak JEMALLOC_N(opt_prof_leak)
#define opt_prof_prefix JEMALLOC_N(opt_prof_prefix)
#define opt_quarantine JEMALLOC_N(opt_quarantine)
#define opt_redzone JEMALLOC_N(opt_redzone)
#define opt_stats_print JEMALLOC_N(opt_stats_print)
#define opt_tcache JEMALLOC_N(opt_tcache)
#define opt_utrace JEMALLOC_N(opt_utrace)
#define opt_valgrind JEMALLOC_N(opt_valgrind)
#define opt_xmalloc JEMALLOC_N(opt_xmalloc)
#define opt_zero JEMALLOC_N(opt_zero)
#define p2rz JEMALLOC_N(p2rz)
#define pages_purge JEMALLOC_N(pages_purge)
#define pow2_ceil JEMALLOC_N(pow2_ceil)
#define prof_backtrace JEMALLOC_N(prof_backtrace)
#define prof_boot0 JEMALLOC_N(prof_boot0)
#define prof_boot1 JEMALLOC_N(prof_boot1)
#define prof_boot2 JEMALLOC_N(prof_boot2)
#define prof_ctx_get JEMALLOC_N(prof_ctx_get)
#define prof_ctx_set JEMALLOC_N(prof_ctx_set)
#define prof_free JEMALLOC_N(prof_free)
#define prof_gdump JEMALLOC_N(prof_gdump)
#define prof_idump JEMALLOC_N(prof_idump)
#define prof_interval JEMALLOC_N(prof_interval)
#define prof_lookup JEMALLOC_N(prof_lookup)
#define prof_malloc JEMALLOC_N(prof_malloc)
#define prof_mdump JEMALLOC_N(prof_mdump)
#define prof_postfork_child JEMALLOC_N(prof_postfork_child)
#define prof_postfork_parent JEMALLOC_N(prof_postfork_parent)
#define prof_prefork JEMALLOC_N(prof_prefork)
#define prof_promote JEMALLOC_N(prof_promote)
#define prof_realloc JEMALLOC_N(prof_realloc)
#define prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update)
#define prof_sample_threshold_update JEMALLOC_N(prof_sample_threshold_update)
#define prof_tdata_booted JEMALLOC_N(prof_tdata_booted)
#define prof_tdata_cleanup JEMALLOC_N(prof_tdata_cleanup)
#define prof_tdata_get JEMALLOC_N(prof_tdata_get)
#define prof_tdata_init JEMALLOC_N(prof_tdata_init)
#define prof_tdata_initialized JEMALLOC_N(prof_tdata_initialized)
#define prof_tdata_tls JEMALLOC_N(prof_tdata_tls)
#define prof_tdata_tsd JEMALLOC_N(prof_tdata_tsd)
#define prof_tdata_tsd_boot JEMALLOC_N(prof_tdata_tsd_boot)
#define prof_tdata_tsd_cleanup_wrapper JEMALLOC_N(prof_tdata_tsd_cleanup_wrapper)
#define prof_tdata_tsd_get JEMALLOC_N(prof_tdata_tsd_get)
#define prof_tdata_tsd_set JEMALLOC_N(prof_tdata_tsd_set)
#define quarantine JEMALLOC_N(quarantine)
#define quarantine_boot JEMALLOC_N(quarantine_boot)
#define quarantine_tsd_boot JEMALLOC_N(quarantine_tsd_boot)
#define quarantine_tsd_cleanup_wrapper JEMALLOC_N(quarantine_tsd_cleanup_wrapper)
#define quarantine_tsd_get JEMALLOC_N(quarantine_tsd_get)
#define quarantine_tsd_set JEMALLOC_N(quarantine_tsd_set)
#define register_zone JEMALLOC_N(register_zone)
#define rtree_get JEMALLOC_N(rtree_get)
#define rtree_get_locked JEMALLOC_N(rtree_get_locked)
#define rtree_new JEMALLOC_N(rtree_new)
#define rtree_postfork_child JEMALLOC_N(rtree_postfork_child)
#define rtree_postfork_parent JEMALLOC_N(rtree_postfork_parent)
#define rtree_prefork JEMALLOC_N(rtree_prefork)
#define rtree_set JEMALLOC_N(rtree_set)
#define s2u JEMALLOC_N(s2u)
#define sa2u JEMALLOC_N(sa2u)
#define set_errno JEMALLOC_N(set_errno)
#define stats_cactive JEMALLOC_N(stats_cactive)
#define stats_cactive_add JEMALLOC_N(stats_cactive_add)
#define stats_cactive_get JEMALLOC_N(stats_cactive_get)
#define stats_cactive_sub JEMALLOC_N(stats_cactive_sub)
#define stats_chunks JEMALLOC_N(stats_chunks)
#define stats_print JEMALLOC_N(stats_print)
#define tcache_alloc_easy JEMALLOC_N(tcache_alloc_easy)
#define tcache_alloc_large JEMALLOC_N(tcache_alloc_large)
#define tcache_alloc_small JEMALLOC_N(tcache_alloc_small)
#define tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard)
#define tcache_arena_associate JEMALLOC_N(tcache_arena_associate)
#define tcache_arena_dissociate JEMALLOC_N(tcache_arena_dissociate)
#define tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large)
#define tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small)
#define tcache_bin_info JEMALLOC_N(tcache_bin_info)
#define tcache_boot0 JEMALLOC_N(tcache_boot0)
#define tcache_boot1 JEMALLOC_N(tcache_boot1)
#define tcache_booted JEMALLOC_N(tcache_booted)
#define tcache_create JEMALLOC_N(tcache_create)
#define tcache_dalloc_large JEMALLOC_N(tcache_dalloc_large)
#define tcache_dalloc_small JEMALLOC_N(tcache_dalloc_small)
#define tcache_destroy JEMALLOC_N(tcache_destroy)
#define tcache_enabled_booted JEMALLOC_N(tcache_enabled_booted)
#define tcache_enabled_get JEMALLOC_N(tcache_enabled_get)
#define tcache_enabled_initialized JEMALLOC_N(tcache_enabled_initialized)
#define tcache_enabled_set JEMALLOC_N(tcache_enabled_set)
#define tcache_enabled_tls JEMALLOC_N(tcache_enabled_tls)
#define tcache_enabled_tsd JEMALLOC_N(tcache_enabled_tsd)
#define tcache_enabled_tsd_boot JEMALLOC_N(tcache_enabled_tsd_boot)
#define tcache_enabled_tsd_cleanup_wrapper JEMALLOC_N(tcache_enabled_tsd_cleanup_wrapper)
#define tcache_enabled_tsd_get JEMALLOC_N(tcache_enabled_tsd_get)
#define tcache_enabled_tsd_set JEMALLOC_N(tcache_enabled_tsd_set)
#define tcache_event JEMALLOC_N(tcache_event)
#define tcache_event_hard JEMALLOC_N(tcache_event_hard)
#define tcache_flush JEMALLOC_N(tcache_flush)
#define tcache_get JEMALLOC_N(tcache_get)
#define tcache_initialized JEMALLOC_N(tcache_initialized)
#define tcache_maxclass JEMALLOC_N(tcache_maxclass)
#define tcache_salloc JEMALLOC_N(tcache_salloc)
#define tcache_stats_merge JEMALLOC_N(tcache_stats_merge)
#define tcache_thread_cleanup JEMALLOC_N(tcache_thread_cleanup)
#define tcache_tls JEMALLOC_N(tcache_tls)
#define tcache_tsd JEMALLOC_N(tcache_tsd)
#define tcache_tsd_boot JEMALLOC_N(tcache_tsd_boot)
#define tcache_tsd_cleanup_wrapper JEMALLOC_N(tcache_tsd_cleanup_wrapper)
#define tcache_tsd_get JEMALLOC_N(tcache_tsd_get)
#define tcache_tsd_set JEMALLOC_N(tcache_tsd_set)
#define thread_allocated_booted JEMALLOC_N(thread_allocated_booted)
#define thread_allocated_initialized JEMALLOC_N(thread_allocated_initialized)
#define thread_allocated_tls JEMALLOC_N(thread_allocated_tls)
#define thread_allocated_tsd JEMALLOC_N(thread_allocated_tsd)
#define thread_allocated_tsd_boot JEMALLOC_N(thread_allocated_tsd_boot)
#define thread_allocated_tsd_cleanup_wrapper JEMALLOC_N(thread_allocated_tsd_cleanup_wrapper)
#define thread_allocated_tsd_get JEMALLOC_N(thread_allocated_tsd_get)
#define thread_allocated_tsd_set JEMALLOC_N(thread_allocated_tsd_set)
#define u2rz JEMALLOC_N(u2rz)
deps/jemalloc/include/jemalloc/internal/private_namespace.sh
0 → 100755
View file @
fceef8e0
#!/bin/sh
for
symbol
in
`
cat
$1
`
;
do
echo
"#define
${
symbol
}
JEMALLOC_N(
${
symbol
}
)"
done
deps/jemalloc/include/jemalloc/internal/private_symbols.txt
0 → 100644
View file @
fceef8e0
a0calloc
a0free
a0malloc
arena_alloc_junk_small
arena_bin_index
arena_bin_info
arena_boot
arena_dalloc
arena_dalloc_bin
arena_dalloc_bin_locked
arena_dalloc_junk_large
arena_dalloc_junk_small
arena_dalloc_large
arena_dalloc_large_locked
arena_dalloc_small
arena_dss_prec_get
arena_dss_prec_set
arena_malloc
arena_malloc_large
arena_malloc_small
arena_mapbits_allocated_get
arena_mapbits_binind_get
arena_mapbits_dirty_get
arena_mapbits_get
arena_mapbits_large_binind_set
arena_mapbits_large_get
arena_mapbits_large_set
arena_mapbits_large_size_get
arena_mapbits_small_runind_get
arena_mapbits_small_set
arena_mapbits_unallocated_set
arena_mapbits_unallocated_size_get
arena_mapbits_unallocated_size_set
arena_mapbits_unzeroed_get
arena_mapbits_unzeroed_set
arena_mapbitsp_get
arena_mapbitsp_read
arena_mapbitsp_write
arena_mapp_get
arena_maxclass
arena_new
arena_palloc
arena_postfork_child
arena_postfork_parent
arena_prefork
arena_prof_accum
arena_prof_accum_impl
arena_prof_accum_locked
arena_prof_ctx_get
arena_prof_ctx_set
arena_prof_promoted
arena_ptr_small_binind_get
arena_purge_all
arena_quarantine_junk_small
arena_ralloc
arena_ralloc_junk_large
arena_ralloc_no_move
arena_redzone_corruption
arena_run_regind
arena_salloc
arena_stats_merge
arena_tcache_fill_small
arenas
arenas_booted
arenas_cleanup
arenas_extend
arenas_initialized
arenas_lock
arenas_tls
arenas_tsd
arenas_tsd_boot
arenas_tsd_cleanup_wrapper
arenas_tsd_get
arenas_tsd_get_wrapper
arenas_tsd_init_head
arenas_tsd_set
atomic_add_u
atomic_add_uint32
atomic_add_uint64
atomic_add_z
atomic_sub_u
atomic_sub_uint32
atomic_sub_uint64
atomic_sub_z
base_alloc
base_boot
base_calloc
base_node_alloc
base_node_dealloc
base_postfork_child
base_postfork_parent
base_prefork
bitmap_full
bitmap_get
bitmap_info_init
bitmap_info_ngroups
bitmap_init
bitmap_set
bitmap_sfu
bitmap_size
bitmap_unset
bt_init
buferror
choose_arena
choose_arena_hard
chunk_alloc
chunk_alloc_dss
chunk_alloc_mmap
chunk_boot
chunk_dealloc
chunk_dealloc_mmap
chunk_dss_boot
chunk_dss_postfork_child
chunk_dss_postfork_parent
chunk_dss_prec_get
chunk_dss_prec_set
chunk_dss_prefork
chunk_in_dss
chunk_npages
chunk_postfork_child
chunk_postfork_parent
chunk_prefork
chunk_unmap
chunks_mtx
chunks_rtree
chunksize
chunksize_mask
ckh_bucket_search
ckh_count
ckh_delete
ckh_evict_reloc_insert
ckh_insert
ckh_isearch
ckh_iter
ckh_new
ckh_pointer_hash
ckh_pointer_keycomp
ckh_rebuild
ckh_remove
ckh_search
ckh_string_hash
ckh_string_keycomp
ckh_try_bucket_insert
ckh_try_insert
ctl_boot
ctl_bymib
ctl_byname
ctl_nametomib
ctl_postfork_child
ctl_postfork_parent
ctl_prefork
dss_prec_names
extent_tree_ad_first
extent_tree_ad_insert
extent_tree_ad_iter
extent_tree_ad_iter_recurse
extent_tree_ad_iter_start
extent_tree_ad_last
extent_tree_ad_new
extent_tree_ad_next
extent_tree_ad_nsearch
extent_tree_ad_prev
extent_tree_ad_psearch
extent_tree_ad_remove
extent_tree_ad_reverse_iter
extent_tree_ad_reverse_iter_recurse
extent_tree_ad_reverse_iter_start
extent_tree_ad_search
extent_tree_szad_first
extent_tree_szad_insert
extent_tree_szad_iter
extent_tree_szad_iter_recurse
extent_tree_szad_iter_start
extent_tree_szad_last
extent_tree_szad_new
extent_tree_szad_next
extent_tree_szad_nsearch
extent_tree_szad_prev
extent_tree_szad_psearch
extent_tree_szad_remove
extent_tree_szad_reverse_iter
extent_tree_szad_reverse_iter_recurse
extent_tree_szad_reverse_iter_start
extent_tree_szad_search
get_errno
hash
hash_fmix_32
hash_fmix_64
hash_get_block_32
hash_get_block_64
hash_rotl_32
hash_rotl_64
hash_x64_128
hash_x86_128
hash_x86_32
huge_allocated
huge_boot
huge_dalloc
huge_dalloc_junk
huge_dss_prec_get
huge_malloc
huge_mtx
huge_ndalloc
huge_nmalloc
huge_palloc
huge_postfork_child
huge_postfork_parent
huge_prefork
huge_prof_ctx_get
huge_prof_ctx_set
huge_ralloc
huge_ralloc_no_move
huge_salloc
iallocm
icalloc
icalloct
idalloc
idalloct
imalloc
imalloct
ipalloc
ipalloct
iqalloc
iqalloct
iralloc
iralloct
iralloct_realign
isalloc
isthreaded
ivsalloc
ixalloc
jemalloc_postfork_child
jemalloc_postfork_parent
jemalloc_prefork
malloc_cprintf
malloc_mutex_init
malloc_mutex_lock
malloc_mutex_postfork_child
malloc_mutex_postfork_parent
malloc_mutex_prefork
malloc_mutex_unlock
malloc_printf
malloc_snprintf
malloc_strtoumax
malloc_tsd_boot
malloc_tsd_cleanup_register
malloc_tsd_dalloc
malloc_tsd_malloc
malloc_tsd_no_cleanup
malloc_vcprintf
malloc_vsnprintf
malloc_write
map_bias
mb_write
mutex_boot
narenas_auto
narenas_total
narenas_total_get
ncpus
nhbins
opt_abort
opt_dss
opt_junk
opt_lg_chunk
opt_lg_dirty_mult
opt_lg_prof_interval
opt_lg_prof_sample
opt_lg_tcache_max
opt_narenas
opt_prof
opt_prof_accum
opt_prof_active
opt_prof_final
opt_prof_gdump
opt_prof_leak
opt_prof_prefix
opt_quarantine
opt_redzone
opt_stats_print
opt_tcache
opt_utrace
opt_valgrind
opt_xmalloc
opt_zero
p2rz
pages_purge
pow2_ceil
prof_backtrace
prof_boot0
prof_boot1
prof_boot2
prof_bt_count
prof_ctx_get
prof_ctx_set
prof_dump_open
prof_free
prof_gdump
prof_idump
prof_interval
prof_lookup
prof_malloc
prof_mdump
prof_postfork_child
prof_postfork_parent
prof_prefork
prof_promote
prof_realloc
prof_sample_accum_update
prof_sample_threshold_update
prof_tdata_booted
prof_tdata_cleanup
prof_tdata_get
prof_tdata_init
prof_tdata_initialized
prof_tdata_tls
prof_tdata_tsd
prof_tdata_tsd_boot
prof_tdata_tsd_cleanup_wrapper
prof_tdata_tsd_get
prof_tdata_tsd_get_wrapper
prof_tdata_tsd_init_head
prof_tdata_tsd_set
quarantine
quarantine_alloc_hook
quarantine_boot
quarantine_booted
quarantine_cleanup
quarantine_init
quarantine_tls
quarantine_tsd
quarantine_tsd_boot
quarantine_tsd_cleanup_wrapper
quarantine_tsd_get
quarantine_tsd_get_wrapper
quarantine_tsd_init_head
quarantine_tsd_set
register_zone
rtree_delete
rtree_get
rtree_get_locked
rtree_new
rtree_postfork_child
rtree_postfork_parent
rtree_prefork
rtree_set
s2u
sa2u
set_errno
small_size2bin
stats_cactive
stats_cactive_add
stats_cactive_get
stats_cactive_sub
stats_chunks
stats_print
tcache_alloc_easy
tcache_alloc_large
tcache_alloc_small
tcache_alloc_small_hard
tcache_arena_associate
tcache_arena_dissociate
tcache_bin_flush_large
tcache_bin_flush_small
tcache_bin_info
tcache_boot0
tcache_boot1
tcache_booted
tcache_create
tcache_dalloc_large
tcache_dalloc_small
tcache_destroy
tcache_enabled_booted
tcache_enabled_get
tcache_enabled_initialized
tcache_enabled_set
tcache_enabled_tls
tcache_enabled_tsd
tcache_enabled_tsd_boot
tcache_enabled_tsd_cleanup_wrapper
tcache_enabled_tsd_get
tcache_enabled_tsd_get_wrapper
tcache_enabled_tsd_init_head
tcache_enabled_tsd_set
tcache_event
tcache_event_hard
tcache_flush
tcache_get
tcache_initialized
tcache_maxclass
tcache_salloc
tcache_stats_merge
tcache_thread_cleanup
tcache_tls
tcache_tsd
tcache_tsd_boot
tcache_tsd_cleanup_wrapper
tcache_tsd_get
tcache_tsd_get_wrapper
tcache_tsd_init_head
tcache_tsd_set
thread_allocated_booted
thread_allocated_initialized
thread_allocated_tls
thread_allocated_tsd
thread_allocated_tsd_boot
thread_allocated_tsd_cleanup_wrapper
thread_allocated_tsd_get
thread_allocated_tsd_get_wrapper
thread_allocated_tsd_init_head
thread_allocated_tsd_set
tsd_init_check_recursion
tsd_init_finish
u2rz
deps/jemalloc/include/jemalloc/internal/private_unnamespace.sh
0 → 100755
View file @
fceef8e0
#!/bin/sh
for
symbol
in
`
cat
$1
`
;
do
echo
"#undef
${
symbol
}
"
done
deps/jemalloc/include/jemalloc/internal/prng.h
View file @
fceef8e0
...
...
@@ -25,7 +25,7 @@
* uint32_t state : Seed value.
* const uint32_t a, c : See above discussion.
*/
#define
prng32(r, lg_range, state, a, c) do { \
#define
prng32(r, lg_range, state, a, c) do { \
assert(lg_range > 0); \
assert(lg_range <= 32); \
\
...
...
@@ -35,7 +35,7 @@
} while (false)
/* Same as prng32(), but 64 bits of pseudo-randomness, using uint64_t. */
#define
prng64(r, lg_range, state, a, c) do { \
#define
prng64(r, lg_range, state, a, c) do { \
assert(lg_range > 0); \
assert(lg_range <= 64); \
\
...
...
deps/jemalloc/include/jemalloc/internal/prof.h
View file @
fceef8e0
...
...
@@ -8,7 +8,11 @@ typedef struct prof_ctx_s prof_ctx_t;
typedef
struct
prof_tdata_s
prof_tdata_t
;
/* Option defaults. */
#define PROF_PREFIX_DEFAULT "jeprof"
#ifdef JEMALLOC_PROF
# define PROF_PREFIX_DEFAULT "jeprof"
#else
# define PROF_PREFIX_DEFAULT ""
#endif
#define LG_PROF_SAMPLE_DEFAULT 19
#define LG_PROF_INTERVAL_DEFAULT -1
...
...
@@ -129,6 +133,7 @@ struct prof_ctx_s {
* limbo due to one of:
* - Initializing per thread counters associated with this ctx.
* - Preparing to destroy this ctx.
* - Dumping a heap profile that includes this ctx.
* nlimbo must be 1 (single destroyer) in order to safely destroy the
* ctx.
*/
...
...
@@ -145,7 +150,11 @@ struct prof_ctx_s {
* this context.
*/
ql_head
(
prof_thr_cnt_t
)
cnts_ql
;
/* Linkage for list of contexts to be dumped. */
ql_elm
(
prof_ctx_t
)
dump_link
;
};
typedef
ql_head
(
prof_ctx_t
)
prof_ctx_list_t
;
struct
prof_tdata_s
{
/*
...
...
@@ -195,7 +204,12 @@ extern bool opt_prof_gdump; /* High-water memory dumping. */
extern
bool
opt_prof_final
;
/* Final profile dumping. */
extern
bool
opt_prof_leak
;
/* Dump leak summary at exit. */
extern
bool
opt_prof_accum
;
/* Report cumulative bytes. */
extern
char
opt_prof_prefix
[
PATH_MAX
+
1
];
extern
char
opt_prof_prefix
[
/* Minimize memory bloat for non-prof builds. */
#ifdef JEMALLOC_PROF
PATH_MAX
+
#endif
1
];
/*
* Profile dump interval, measured in bytes allocated. Each arena triggers a
...
...
@@ -215,6 +229,11 @@ extern bool prof_promote;
void
bt_init
(
prof_bt_t
*
bt
,
void
**
vec
);
void
prof_backtrace
(
prof_bt_t
*
bt
,
unsigned
nignore
);
prof_thr_cnt_t
*
prof_lookup
(
prof_bt_t
*
bt
);
#ifdef JEMALLOC_JET
size_t
prof_bt_count
(
void
);
typedef
int
(
prof_dump_open_t
)(
bool
,
const
char
*
);
extern
prof_dump_open_t
*
prof_dump_open
;
#endif
void
prof_idump
(
void
);
bool
prof_mdump
(
const
char
*
filename
);
void
prof_gdump
(
void
);
...
...
@@ -237,7 +256,7 @@ void prof_postfork_child(void);
\
assert(size == s2u(size)); \
\
prof_tdata = prof_tdata_get();
\
prof_tdata = prof_tdata_get(
true
); \
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) { \
if (prof_tdata != NULL) \
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
...
...
@@ -286,14 +305,14 @@ void prof_postfork_child(void);
#ifndef JEMALLOC_ENABLE_INLINE
malloc_tsd_protos
(
JEMALLOC_ATTR
(
unused
),
prof_tdata
,
prof_tdata_t
*
)
prof_tdata_t
*
prof_tdata_get
(
void
);
prof_tdata_t
*
prof_tdata_get
(
bool
create
);
void
prof_sample_threshold_update
(
prof_tdata_t
*
prof_tdata
);
prof_ctx_t
*
prof_ctx_get
(
const
void
*
ptr
);
void
prof_ctx_set
(
const
void
*
ptr
,
prof_ctx_t
*
ctx
);
void
prof_ctx_set
(
const
void
*
ptr
,
size_t
usize
,
prof_ctx_t
*
ctx
);
bool
prof_sample_accum_update
(
size_t
size
);
void
prof_malloc
(
const
void
*
ptr
,
size_t
size
,
prof_thr_cnt_t
*
cnt
);
void
prof_realloc
(
const
void
*
ptr
,
size_t
size
,
prof_thr_cnt_t
*
cnt
,
size_t
old_size
,
prof_ctx_t
*
old_ctx
);
void
prof_malloc
(
const
void
*
ptr
,
size_t
u
size
,
prof_thr_cnt_t
*
cnt
);
void
prof_realloc
(
const
void
*
ptr
,
size_t
u
size
,
prof_thr_cnt_t
*
cnt
,
size_t
old_
u
size
,
prof_ctx_t
*
old_ctx
);
void
prof_free
(
const
void
*
ptr
,
size_t
size
);
#endif
...
...
@@ -304,17 +323,15 @@ malloc_tsd_funcs(JEMALLOC_INLINE, prof_tdata, prof_tdata_t *, NULL,
prof_tdata_cleanup
)
JEMALLOC_INLINE
prof_tdata_t
*
prof_tdata_get
(
void
)
prof_tdata_get
(
bool
create
)
{
prof_tdata_t
*
prof_tdata
;
cassert
(
config_prof
);
prof_tdata
=
*
prof_tdata_tsd_get
();
if
((
uintptr_t
)
prof_tdata
<=
(
uintptr_t
)
PROF_TDATA_STATE_MAX
)
{
if
(
prof_tdata
==
NULL
)
prof_tdata
=
prof_tdata_init
();
}
if
(
create
&&
prof_tdata
==
NULL
)
prof_tdata
=
prof_tdata_init
();
return
(
prof_tdata
);
}
...
...
@@ -322,6 +339,20 @@ prof_tdata_get(void)
JEMALLOC_INLINE
void
prof_sample_threshold_update
(
prof_tdata_t
*
prof_tdata
)
{
/*
* The body of this function is compiled out unless heap profiling is
* enabled, so that it is possible to compile jemalloc with floating
* point support completely disabled. Avoiding floating point code is
* important on memory-constrained systems, but it also enables a
* workaround for versions of glibc that don't properly save/restore
* floating point registers during dynamic lazy symbol loading (which
* internally calls into whatever malloc implementation happens to be
* integrated into the application). Note that some compilers (e.g.
* gcc 4.8) may use floating point registers for fast memory moves, so
* jemalloc must be compiled with such optimizations disabled (e.g.
* -mno-sse) in order for the workaround to be complete.
*/
#ifdef JEMALLOC_PROF
uint64_t
r
;
double
u
;
...
...
@@ -343,7 +374,7 @@ prof_sample_threshold_update(prof_tdata_t *prof_tdata)
* Luc Devroye
* Springer-Verlag, New York, 1986
* pp 500
* (http://
cg.scs.carleton.ca/~luc
/rnbookindex.html)
* (http://
luc.devroye.org
/rnbookindex.html)
*/
prng64
(
r
,
53
,
prof_tdata
->
prng_state
,
UINT64_C
(
6364136223846793005
),
UINT64_C
(
1442695040888963407
));
...
...
@@ -351,6 +382,7 @@ prof_sample_threshold_update(prof_tdata_t *prof_tdata)
prof_tdata
->
threshold
=
(
uint64_t
)(
log
(
u
)
/
log
(
1
.
0
-
(
1
.
0
/
(
double
)((
uint64_t
)
1U
<<
opt_lg_prof_sample
))))
+
(
uint64_t
)
1U
;
#endif
}
JEMALLOC_INLINE
prof_ctx_t
*
...
...
@@ -373,7 +405,7 @@ prof_ctx_get(const void *ptr)
}
JEMALLOC_INLINE
void
prof_ctx_set
(
const
void
*
ptr
,
prof_ctx_t
*
ctx
)
prof_ctx_set
(
const
void
*
ptr
,
size_t
usize
,
prof_ctx_t
*
ctx
)
{
arena_chunk_t
*
chunk
;
...
...
@@ -383,7 +415,7 @@ prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
ptr
);
if
(
chunk
!=
ptr
)
{
/* Region. */
arena_prof_ctx_set
(
ptr
,
ctx
);
arena_prof_ctx_set
(
ptr
,
usize
,
ctx
);
}
else
huge_prof_ctx_set
(
ptr
,
ctx
);
}
...
...
@@ -397,7 +429,7 @@ prof_sample_accum_update(size_t size)
/* Sampling logic is unnecessary if the interval is 1. */
assert
(
opt_lg_prof_sample
!=
0
);
prof_tdata
=
*
prof_tdata_
tsd_
get
();
prof_tdata
=
prof_tdata_get
(
false
);
if
((
uintptr_t
)
prof_tdata
<=
(
uintptr_t
)
PROF_TDATA_STATE_MAX
)
return
(
true
);
...
...
@@ -418,20 +450,20 @@ prof_sample_accum_update(size_t size)
}
JEMALLOC_INLINE
void
prof_malloc
(
const
void
*
ptr
,
size_t
size
,
prof_thr_cnt_t
*
cnt
)
prof_malloc
(
const
void
*
ptr
,
size_t
u
size
,
prof_thr_cnt_t
*
cnt
)
{
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
assert
(
size
==
isalloc
(
ptr
,
true
));
assert
(
u
size
==
isalloc
(
ptr
,
true
));
if
(
opt_lg_prof_sample
!=
0
)
{
if
(
prof_sample_accum_update
(
size
))
{
if
(
prof_sample_accum_update
(
u
size
))
{
/*
* Don't sample. For malloc()-like allocation, it is
* always possible to tell in advance how large an
* object's usable size will be, so there should never
* be a difference between the size passed to
* be a difference between the
u
size passed to
* PROF_ALLOC_PREP() and prof_malloc().
*/
assert
((
uintptr_t
)
cnt
==
(
uintptr_t
)
1U
);
...
...
@@ -439,17 +471,17 @@ prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt)
}
if
((
uintptr_t
)
cnt
>
(
uintptr_t
)
1U
)
{
prof_ctx_set
(
ptr
,
cnt
->
ctx
);
prof_ctx_set
(
ptr
,
usize
,
cnt
->
ctx
);
cnt
->
epoch
++
;
/*********/
mb_write
();
/*********/
cnt
->
cnts
.
curobjs
++
;
cnt
->
cnts
.
curbytes
+=
size
;
cnt
->
cnts
.
curbytes
+=
u
size
;
if
(
opt_prof_accum
)
{
cnt
->
cnts
.
accumobjs
++
;
cnt
->
cnts
.
accumbytes
+=
size
;
cnt
->
cnts
.
accumbytes
+=
u
size
;
}
/*********/
mb_write
();
...
...
@@ -459,12 +491,12 @@ prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt)
mb_write
();
/*********/
}
else
prof_ctx_set
(
ptr
,
(
prof_ctx_t
*
)(
uintptr_t
)
1U
);
prof_ctx_set
(
ptr
,
usize
,
(
prof_ctx_t
*
)(
uintptr_t
)
1U
);
}
JEMALLOC_INLINE
void
prof_realloc
(
const
void
*
ptr
,
size_t
size
,
prof_thr_cnt_t
*
cnt
,
size_t
old_size
,
prof_ctx_t
*
old_ctx
)
prof_realloc
(
const
void
*
ptr
,
size_t
u
size
,
prof_thr_cnt_t
*
cnt
,
size_t
old_
u
size
,
prof_ctx_t
*
old_ctx
)
{
prof_thr_cnt_t
*
told_cnt
;
...
...
@@ -472,15 +504,15 @@ prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
assert
(
ptr
!=
NULL
||
(
uintptr_t
)
cnt
<=
(
uintptr_t
)
1U
);
if
(
ptr
!=
NULL
)
{
assert
(
size
==
isalloc
(
ptr
,
true
));
assert
(
u
size
==
isalloc
(
ptr
,
true
));
if
(
opt_lg_prof_sample
!=
0
)
{
if
(
prof_sample_accum_update
(
size
))
{
if
(
prof_sample_accum_update
(
u
size
))
{
/*
* Don't sample. The size passed to
* Don't sample. The
u
size passed to
* PROF_ALLOC_PREP() was larger than what
* actually got allocated, so a backtrace was
* captured for this allocation, even though
* its actual size was insufficient to cross
* its actual
u
size was insufficient to cross
* the sample threshold.
*/
cnt
=
(
prof_thr_cnt_t
*
)(
uintptr_t
)
1U
;
...
...
@@ -497,7 +529,7 @@ prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
*/
malloc_mutex_lock
(
old_ctx
->
lock
);
old_ctx
->
cnt_merged
.
curobjs
--
;
old_ctx
->
cnt_merged
.
curbytes
-=
old_size
;
old_ctx
->
cnt_merged
.
curbytes
-=
old_
u
size
;
malloc_mutex_unlock
(
old_ctx
->
lock
);
told_cnt
=
(
prof_thr_cnt_t
*
)(
uintptr_t
)
1U
;
}
...
...
@@ -507,23 +539,23 @@ prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
if
((
uintptr_t
)
told_cnt
>
(
uintptr_t
)
1U
)
told_cnt
->
epoch
++
;
if
((
uintptr_t
)
cnt
>
(
uintptr_t
)
1U
)
{
prof_ctx_set
(
ptr
,
cnt
->
ctx
);
prof_ctx_set
(
ptr
,
usize
,
cnt
->
ctx
);
cnt
->
epoch
++
;
}
else
if
(
ptr
!=
NULL
)
prof_ctx_set
(
ptr
,
(
prof_ctx_t
*
)(
uintptr_t
)
1U
);
prof_ctx_set
(
ptr
,
usize
,
(
prof_ctx_t
*
)(
uintptr_t
)
1U
);
/*********/
mb_write
();
/*********/
if
((
uintptr_t
)
told_cnt
>
(
uintptr_t
)
1U
)
{
told_cnt
->
cnts
.
curobjs
--
;
told_cnt
->
cnts
.
curbytes
-=
old_size
;
told_cnt
->
cnts
.
curbytes
-=
old_
u
size
;
}
if
((
uintptr_t
)
cnt
>
(
uintptr_t
)
1U
)
{
cnt
->
cnts
.
curobjs
++
;
cnt
->
cnts
.
curbytes
+=
size
;
cnt
->
cnts
.
curbytes
+=
u
size
;
if
(
opt_prof_accum
)
{
cnt
->
cnts
.
accumobjs
++
;
cnt
->
cnts
.
accumbytes
+=
size
;
cnt
->
cnts
.
accumbytes
+=
u
size
;
}
}
/*********/
...
...
deps/jemalloc/include/jemalloc/internal/public_namespace.sh
0 → 100755
View file @
fceef8e0
#!/bin/sh
for
nm
in
`
cat
$1
`
;
do
n
=
`
echo
${
nm
}
|tr
':'
' '
|awk
'{print $1}'
`
echo
"#define je_
${
n
}
JEMALLOC_N(
${
n
}
)"
done
deps/jemalloc/include/jemalloc/internal/public_unnamespace.sh
0 → 100755
View file @
fceef8e0
#!/bin/sh
for
nm
in
`
cat
$1
`
;
do
n
=
`
echo
${
nm
}
|tr
':'
' '
|awk
'{print $1}'
`
echo
"#undef je_
${
n
}
"
done
deps/jemalloc/include/jemalloc/internal/ql.h
View file @
fceef8e0
/*
* List definitions.
*/
#define
ql_head(a_type) \
#define
ql_head(a_type) \
struct { \
a_type *qlh_first; \
}
#define
ql_head_initializer(a_head) {NULL}
#define
ql_head_initializer(a_head) {NULL}
#define
ql_elm(a_type) qr(a_type)
#define
ql_elm(a_type) qr(a_type)
/* List functions. */
#define
ql_new(a_head) do { \
#define
ql_new(a_head) do { \
(a_head)->qlh_first = NULL; \
} while (0)
#define
ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
#define
ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
#define
ql_first(a_head) ((a_head)->qlh_first)
#define
ql_first(a_head) ((a_head)->qlh_first)
#define
ql_last(a_head, a_field) \
#define
ql_last(a_head, a_field) \
((ql_first(a_head) != NULL) \
? qr_prev(ql_first(a_head), a_field) : NULL)
#define
ql_next(a_head, a_elm, a_field) \
#define
ql_next(a_head, a_elm, a_field) \
((ql_last(a_head, a_field) != (a_elm)) \
? qr_next((a_elm), a_field) : NULL)
#define
ql_prev(a_head, a_elm, a_field) \
#define
ql_prev(a_head, a_elm, a_field) \
((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \
: NULL)
#define
ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
#define
ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
qr_before_insert((a_qlelm), (a_elm), a_field); \
if (ql_first(a_head) == (a_qlelm)) { \
ql_first(a_head) = (a_elm); \
} \
} while (0)
#define
ql_after_insert(a_qlelm, a_elm, a_field) \
#define
ql_after_insert(a_qlelm, a_elm, a_field) \
qr_after_insert((a_qlelm), (a_elm), a_field)
#define
ql_head_insert(a_head, a_elm, a_field) do { \
#define
ql_head_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = (a_elm); \
} while (0)
#define
ql_tail_insert(a_head, a_elm, a_field) do { \
#define
ql_tail_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = qr_next((a_elm), a_field); \
} while (0)
#define
ql_remove(a_head, a_elm, a_field) do { \
#define
ql_remove(a_head, a_elm, a_field) do { \
if (ql_first(a_head) == (a_elm)) { \
ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
} \
...
...
@@ -66,18 +66,18 @@ struct { \
} \
} while (0)
#define
ql_head_remove(a_head, a_type, a_field) do { \
#define
ql_head_remove(a_head, a_type, a_field) do { \
a_type *t = ql_first(a_head); \
ql_remove((a_head), t, a_field); \
} while (0)
#define
ql_tail_remove(a_head, a_type, a_field) do { \
#define
ql_tail_remove(a_head, a_type, a_field) do { \
a_type *t = ql_last(a_head, a_field); \
ql_remove((a_head), t, a_field); \
} while (0)
#define
ql_foreach(a_var, a_head, a_field) \
#define
ql_foreach(a_var, a_head, a_field) \
qr_foreach((a_var), ql_first(a_head), a_field)
#define
ql_reverse_foreach(a_var, a_head, a_field) \
#define
ql_reverse_foreach(a_var, a_head, a_field) \
qr_reverse_foreach((a_var), ql_first(a_head), a_field)
deps/jemalloc/include/jemalloc/internal/qr.h
View file @
fceef8e0
/* Ring definitions. */
#define
qr(a_type) \
#define
qr(a_type) \
struct { \
a_type *qre_next; \
a_type *qre_prev; \
}
/* Ring functions. */
#define
qr_new(a_qr, a_field) do { \
#define
qr_new(a_qr, a_field) do { \
(a_qr)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
#define
qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
#define
qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
#define
qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
#define
qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
#define
qr_before_insert(a_qrelm, a_qr, a_field) do { \
#define
qr_before_insert(a_qrelm, a_qr, a_field) do { \
(a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \
(a_qr)->a_field.qre_next = (a_qrelm); \
(a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \
(a_qrelm)->a_field.qre_prev = (a_qr); \
} while (0)
#define
qr_after_insert(a_qrelm, a_qr, a_field) \
#define
qr_after_insert(a_qrelm, a_qr, a_field) \
do \
{ \
(a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
...
...
@@ -31,7 +31,7 @@ struct { \
(a_qrelm)->a_field.qre_next = (a_qr); \
} while (0)
#define
qr_meld(a_qr_a, a_qr_b, a_field) do { \
#define
qr_meld(a_qr_a, a_qr_b, a_field) do { \
void *t; \
(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
...
...
@@ -42,10 +42,10 @@ struct { \
/* qr_meld() and qr_split() are functionally equivalent, so there's no need to
* have two copies of the code. */
#define
qr_split(a_qr_a, a_qr_b, a_field) \
#define
qr_split(a_qr_a, a_qr_b, a_field) \
qr_meld((a_qr_a), (a_qr_b), a_field)
#define
qr_remove(a_qr, a_field) do { \
#define
qr_remove(a_qr, a_field) do { \
(a_qr)->a_field.qre_prev->a_field.qre_next \
= (a_qr)->a_field.qre_next; \
(a_qr)->a_field.qre_next->a_field.qre_prev \
...
...
@@ -54,13 +54,13 @@ struct { \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
#define
qr_foreach(var, a_qr, a_field) \
#define
qr_foreach(var, a_qr, a_field) \
for ((var) = (a_qr); \
(var) != NULL; \
(var) = (((var)->a_field.qre_next != (a_qr)) \
? (var)->a_field.qre_next : NULL))
#define
qr_reverse_foreach(var, a_qr, a_field) \
#define
qr_reverse_foreach(var, a_qr, a_field) \
for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \
(var) != NULL; \
(var) = (((var) != (a_qr)) \
...
...
deps/jemalloc/include/jemalloc/internal/quarantine.h
View file @
fceef8e0
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef
struct
quarantine_obj_s
quarantine_obj_t
;
typedef
struct
quarantine_s
quarantine_t
;
/* Default per thread quarantine size if valgrind is enabled. */
#define JEMALLOC_VALGRIND_QUARANTINE_DEFAULT (ZU(1) << 24)
...
...
@@ -8,17 +11,57 @@
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct
quarantine_obj_s
{
void
*
ptr
;
size_t
usize
;
};
struct
quarantine_s
{
size_t
curbytes
;
size_t
curobjs
;
size_t
first
;
#define LG_MAXOBJS_INIT 10
size_t
lg_maxobjs
;
quarantine_obj_t
objs
[
1
];
/* Dynamically sized ring buffer. */
};
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
quarantine_t
*
quarantine_init
(
size_t
lg_maxobjs
);
void
quarantine
(
void
*
ptr
);
void
quarantine_cleanup
(
void
*
arg
);
bool
quarantine_boot
(
void
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
malloc_tsd_protos
(
JEMALLOC_ATTR
(
unused
),
quarantine
,
quarantine_t
*
)
void
quarantine_alloc_hook
(
void
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_QUARANTINE_C_))
malloc_tsd_externs
(
quarantine
,
quarantine_t
*
)
malloc_tsd_funcs
(
JEMALLOC_ALWAYS_INLINE
,
quarantine
,
quarantine_t
*
,
NULL
,
quarantine_cleanup
)
JEMALLOC_ALWAYS_INLINE
void
quarantine_alloc_hook
(
void
)
{
quarantine_t
*
quarantine
;
assert
(
config_fill
&&
opt_quarantine
);
quarantine
=
*
quarantine_tsd_get
();
if
(
quarantine
==
NULL
)
quarantine_init
(
LG_MAXOBJS_INIT
);
}
#endif
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/rb.h
View file @
fceef8e0
...
...
@@ -22,10 +22,6 @@
#ifndef RB_H_
#define RB_H_
#if 0
__FBSDID("$FreeBSD: head/lib/libc/stdlib/rb.h 204493 2010-02-28 22:57:13Z jasone $");
#endif
#ifdef RB_COMPACT
/* Node structure. */
#define rb_node(a_type) \
...
...
deps/jemalloc/include/jemalloc/internal/rtree.h
View file @
fceef8e0
...
...
@@ -14,17 +14,18 @@ typedef struct rtree_s rtree_t;
* Size of each radix tree node (must be a power of 2). This impacts tree
* depth.
*/
#if (LG_SIZEOF_PTR == 2)
# define RTREE_NODESIZE (1U << 14)
#else
# define RTREE_NODESIZE CACHELINE
#endif
#define RTREE_NODESIZE (1U << 16)
typedef
void
*
(
rtree_alloc_t
)(
size_t
);
typedef
void
(
rtree_dalloc_t
)(
void
*
);
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct
rtree_s
{
rtree_alloc_t
*
alloc
;
rtree_dalloc_t
*
dalloc
;
malloc_mutex_t
mutex
;
void
**
root
;
unsigned
height
;
...
...
@@ -35,7 +36,8 @@ struct rtree_s {
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
rtree_t
*
rtree_new
(
unsigned
bits
);
rtree_t
*
rtree_new
(
unsigned
bits
,
rtree_alloc_t
*
alloc
,
rtree_dalloc_t
*
dalloc
);
void
rtree_delete
(
rtree_t
*
rtree
);
void
rtree_prefork
(
rtree_t
*
rtree
);
void
rtree_postfork_parent
(
rtree_t
*
rtree
);
void
rtree_postfork_child
(
rtree_t
*
rtree
);
...
...
@@ -45,20 +47,20 @@ void rtree_postfork_child(rtree_t *rtree);
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
#if
n
def JEMALLOC_DEBUG
void
*
rtree_get_locked
(
rtree_t
*
rtree
,
uintptr_t
key
);
#ifdef JEMALLOC_DEBUG
uint8_t
rtree_get_locked
(
rtree_t
*
rtree
,
uintptr_t
key
);
#endif
void
*
rtree_get
(
rtree_t
*
rtree
,
uintptr_t
key
);
bool
rtree_set
(
rtree_t
*
rtree
,
uintptr_t
key
,
void
*
val
);
uint8_t
rtree_get
(
rtree_t
*
rtree
,
uintptr_t
key
);
bool
rtree_set
(
rtree_t
*
rtree
,
uintptr_t
key
,
uint8_t
val
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
#define RTREE_GET_GENERATE(f) \
/* The least significant bits of the key are ignored. */
\
JEMALLOC_INLINE
void *
\
JEMALLOC_INLINE
uint8_t
\
f(rtree_t *rtree, uintptr_t key) \
{ \
void *
ret; \
uint8_t
ret; \
uintptr_t subkey; \
unsigned i, lshift, height, bits; \
void **node, **child; \
...
...
@@ -68,12 +70,12 @@ f(rtree_t *rtree, uintptr_t key) \
i < height - 1; \
i++, lshift += bits, node = child) { \
bits = rtree->level2bits[i]; \
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR +
\
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR +
\
3)) - bits); \
child = (void**)node[subkey]; \
if (child == NULL) { \
RTREE_UNLOCK(&rtree->mutex); \
return (
NULL
); \
return (
0
); \
} \
} \
\
...
...
@@ -84,7 +86,10 @@ f(rtree_t *rtree, uintptr_t key) \
bits = rtree->level2bits[i]; \
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - \
bits); \
ret = node[subkey]; \
{ \
uint8_t *leaf = (uint8_t *)node; \
ret = leaf[subkey]; \
} \
RTREE_UNLOCK(&rtree->mutex); \
\
RTREE_GET_VALIDATE \
...
...
@@ -123,7 +128,7 @@ RTREE_GET_GENERATE(rtree_get)
#undef RTREE_GET_VALIDATE
JEMALLOC_INLINE
bool
rtree_set
(
rtree_t
*
rtree
,
uintptr_t
key
,
void
*
val
)
rtree_set
(
rtree_t
*
rtree
,
uintptr_t
key
,
uint8_t
val
)
{
uintptr_t
subkey
;
unsigned
i
,
lshift
,
height
,
bits
;
...
...
@@ -138,14 +143,14 @@ rtree_set(rtree_t *rtree, uintptr_t key, void *val)
bits
);
child
=
(
void
**
)
node
[
subkey
];
if
(
child
==
NULL
)
{
child
=
(
void
**
)
base_alloc
(
sizeof
(
void
*
)
<<
rtree
->
level2bits
[
i
+
1
]);
size_t
size
=
((
i
+
1
<
height
-
1
)
?
sizeof
(
void
*
)
:
(
sizeof
(
uint8_t
)))
<<
rtree
->
level2bits
[
i
+
1
];
child
=
(
void
**
)
rtree
->
alloc
(
size
);
if
(
child
==
NULL
)
{
malloc_mutex_unlock
(
&
rtree
->
mutex
);
return
(
true
);
}
memset
(
child
,
0
,
sizeof
(
void
*
)
<<
rtree
->
level2bits
[
i
+
1
]);
memset
(
child
,
0
,
size
);
node
[
subkey
]
=
child
;
}
}
...
...
@@ -153,7 +158,10 @@ rtree_set(rtree_t *rtree, uintptr_t key, void *val)
/* node is a leaf, so it contains values rather than node pointers. */
bits
=
rtree
->
level2bits
[
i
];
subkey
=
(
key
<<
lshift
)
>>
((
ZU
(
1
)
<<
(
LG_SIZEOF_PTR
+
3
))
-
bits
);
node
[
subkey
]
=
val
;
{
uint8_t
*
leaf
=
(
uint8_t
*
)
node
;
leaf
[
subkey
]
=
val
;
}
malloc_mutex_unlock
(
&
rtree
->
mutex
);
return
(
false
);
...
...
deps/jemalloc/include/jemalloc/internal/tcache.h
View file @
fceef8e0
...
...
@@ -140,11 +140,11 @@ void tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size);
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
/* Map of thread-specific caches. */
malloc_tsd_externs
(
tcache
,
tcache_t
*
)
malloc_tsd_funcs
(
JEMALLOC_INLINE
,
tcache
,
tcache_t
*
,
NULL
,
malloc_tsd_funcs
(
JEMALLOC_
ALWAYS_
INLINE
,
tcache
,
tcache_t
*
,
NULL
,
tcache_thread_cleanup
)
/* Per thread flag that allows thread caches to be disabled. */
malloc_tsd_externs
(
tcache_enabled
,
tcache_enabled_t
)
malloc_tsd_funcs
(
JEMALLOC_INLINE
,
tcache_enabled
,
tcache_enabled_t
,
malloc_tsd_funcs
(
JEMALLOC_
ALWAYS_
INLINE
,
tcache_enabled
,
tcache_enabled_t
,
tcache_enabled_default
,
malloc_tsd_no_cleanup
)
JEMALLOC_INLINE
void
...
...
@@ -206,7 +206,7 @@ tcache_enabled_set(bool enabled)
}
}
JEMALLOC_INLINE
tcache_t
*
JEMALLOC_
ALWAYS_
INLINE
tcache_t
*
tcache_get
(
bool
create
)
{
tcache_t
*
tcache
;
...
...
@@ -258,7 +258,7 @@ tcache_get(bool create)
return
(
tcache
);
}
JEMALLOC_INLINE
void
JEMALLOC_
ALWAYS_
INLINE
void
tcache_event
(
tcache_t
*
tcache
)
{
...
...
@@ -271,7 +271,7 @@ tcache_event(tcache_t *tcache)
tcache_event_hard
(
tcache
);
}
JEMALLOC_INLINE
void
*
JEMALLOC_
ALWAYS_
INLINE
void
*
tcache_alloc_easy
(
tcache_bin_t
*
tbin
)
{
void
*
ret
;
...
...
@@ -287,7 +287,7 @@ tcache_alloc_easy(tcache_bin_t *tbin)
return
(
ret
);
}
JEMALLOC_INLINE
void
*
JEMALLOC_
ALWAYS_
INLINE
void
*
tcache_alloc_small
(
tcache_t
*
tcache
,
size_t
size
,
bool
zero
)
{
void
*
ret
;
...
...
@@ -297,6 +297,7 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
binind
=
SMALL_SIZE2BIN
(
size
);
assert
(
binind
<
NBINS
);
tbin
=
&
tcache
->
tbins
[
binind
];
size
=
arena_bin_info
[
binind
].
reg_size
;
ret
=
tcache_alloc_easy
(
tbin
);
if
(
ret
==
NULL
)
{
ret
=
tcache_alloc_small_hard
(
tcache
,
tbin
,
binind
);
...
...
@@ -313,6 +314,7 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
}
else
if
(
opt_zero
)
memset
(
ret
,
0
,
size
);
}
VALGRIND_MAKE_MEM_UNDEFINED
(
ret
,
size
);
}
else
{
if
(
config_fill
&&
opt_junk
)
{
arena_alloc_junk_small
(
ret
,
&
arena_bin_info
[
binind
],
...
...
@@ -330,7 +332,7 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
return
(
ret
);
}
JEMALLOC_INLINE
void
*
JEMALLOC_
ALWAYS_
INLINE
void
*
tcache_alloc_large
(
tcache_t
*
tcache
,
size_t
size
,
bool
zero
)
{
void
*
ret
;
...
...
@@ -367,6 +369,7 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
else
if
(
opt_zero
)
memset
(
ret
,
0
,
size
);
}
VALGRIND_MAKE_MEM_UNDEFINED
(
ret
,
size
);
}
else
{
VALGRIND_MAKE_MEM_UNDEFINED
(
ret
,
size
);
memset
(
ret
,
0
,
size
);
...
...
@@ -382,7 +385,7 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
return
(
ret
);
}
JEMALLOC_INLINE
void
JEMALLOC_
ALWAYS_
INLINE
void
tcache_dalloc_small
(
tcache_t
*
tcache
,
void
*
ptr
,
size_t
binind
)
{
tcache_bin_t
*
tbin
;
...
...
@@ -406,7 +409,7 @@ tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind)
tcache_event
(
tcache
);
}
JEMALLOC_INLINE
void
JEMALLOC_
ALWAYS_
INLINE
void
tcache_dalloc_large
(
tcache_t
*
tcache
,
void
*
ptr
,
size_t
size
)
{
size_t
binind
;
...
...
deps/jemalloc/include/jemalloc/internal/tsd.h
View file @
fceef8e0
...
...
@@ -6,6 +6,12 @@
typedef
bool
(
*
malloc_tsd_cleanup_t
)(
void
);
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
!defined(_WIN32))
typedef
struct
tsd_init_block_s
tsd_init_block_t
;
typedef
struct
tsd_init_head_s
tsd_init_head_t
;
#endif
/*
* TLS/TSD-agnostic macro-based implementation of thread-specific data. There
* are four macros that support (at least) three use cases: file-private,
...
...
@@ -75,12 +81,13 @@ extern __thread a_type a_name##_tls; \
extern pthread_key_t a_name##_tsd; \
extern bool a_name##_booted;
#elif (defined(_WIN32))
#define
malloc_tsd_externs(a_name, a_type) \
#define
malloc_tsd_externs(a_name, a_type) \
extern DWORD a_name##_tsd; \
extern bool a_name##_booted;
#else
#define malloc_tsd_externs(a_name, a_type) \
extern pthread_key_t a_name##_tsd; \
extern tsd_init_head_t a_name##_tsd_init_head; \
extern bool a_name##_booted;
#endif
...
...
@@ -105,6 +112,10 @@ a_attr bool a_name##_booted = false;
#else
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
a_attr pthread_key_t a_name##_tsd; \
a_attr tsd_init_head_t a_name##_tsd_init_head = { \
ql_head_initializer(blocks), \
MALLOC_MUTEX_INITIALIZER \
}; \
a_attr bool a_name##_booted = false;
#endif
...
...
@@ -333,8 +344,14 @@ a_name##_tsd_get_wrapper(void) \
pthread_getspecific(a_name##_tsd); \
\
if (wrapper == NULL) { \
tsd_init_block_t block; \
wrapper = tsd_init_check_recursion( \
&a_name##_tsd_init_head, &block); \
if (wrapper) \
return (wrapper); \
wrapper = (a_name##_tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \
block.data = wrapper; \
if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \
...
...
@@ -350,6 +367,7 @@ a_name##_tsd_get_wrapper(void) \
" TSD for "#a_name"\n"); \
abort(); \
} \
tsd_init_finish(&a_name##_tsd_init_head, &block); \
} \
return (wrapper); \
} \
...
...
@@ -379,6 +397,19 @@ a_name##_tsd_set(a_type *val) \
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
!defined(_WIN32))
struct
tsd_init_block_s
{
ql_elm
(
tsd_init_block_t
)
link
;
pthread_t
thread
;
void
*
data
;
};
struct
tsd_init_head_s
{
ql_head
(
tsd_init_block_t
)
blocks
;
malloc_mutex_t
lock
;
};
#endif
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
...
...
@@ -388,6 +419,12 @@ void malloc_tsd_dalloc(void *wrapper);
void
malloc_tsd_no_cleanup
(
void
*
);
void
malloc_tsd_cleanup_register
(
bool
(
*
f
)(
void
));
void
malloc_tsd_boot
(
void
);
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
!defined(_WIN32))
void
*
tsd_init_check_recursion
(
tsd_init_head_t
*
head
,
tsd_init_block_t
*
block
);
void
tsd_init_finish
(
tsd_init_head_t
*
head
,
tsd_init_block_t
*
block
);
#endif
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
...
...
deps/jemalloc/include/jemalloc/internal/util.h
View file @
fceef8e0
...
...
@@ -14,7 +14,7 @@
* Wrap a cpp argument that contains commas such that it isn't broken up into
* multiple arguments.
*/
#define
JEMALLOC_CONCAT(...) __VA_ARGS__
#define
JEMALLOC_
ARG_
CONCAT(...) __VA_ARGS__
/*
* Silence compiler warnings due to uninitialized values. This is used
...
...
@@ -42,12 +42,6 @@
} while (0)
#endif
/* Use to assert a particular configuration, e.g., cassert(config_debug). */
#define cassert(c) do { \
if ((c) == false) \
assert(false); \
} while (0)
#ifndef not_reached
#define not_reached() do { \
if (config_debug) { \
...
...
@@ -69,10 +63,18 @@
} while (0)
#endif
#ifndef assert_not_implemented
#define assert_not_implemented(e) do { \
if (config_debug && !(e)) \
not_implemented(); \
} while (0)
#endif
/* Use to assert a particular configuration, e.g., cassert(config_debug). */
#define cassert(c) do { \
if ((c) == false) \
not_reached(); \
} while (0)
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
...
...
@@ -82,8 +84,9 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
int
buferror
(
char
*
buf
,
size_t
buflen
);
uintmax_t
malloc_strtoumax
(
const
char
*
nptr
,
char
**
endptr
,
int
base
);
int
buferror
(
int
err
,
char
*
buf
,
size_t
buflen
);
uintmax_t
malloc_strtoumax
(
const
char
*
restrict
nptr
,
char
**
restrict
endptr
,
int
base
);
void
malloc_write
(
const
char
*
s
);
/*
...
...
@@ -107,7 +110,6 @@ void malloc_printf(const char *format, ...)
#ifndef JEMALLOC_ENABLE_INLINE
size_t
pow2_ceil
(
size_t
x
);
void
malloc_write
(
const
char
*
s
);
void
set_errno
(
int
errnum
);
int
get_errno
(
void
);
#endif
...
...
Prev
1
2
3
4
5
6
…
8
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment