Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
f63e81c2
Unverified
Commit
f63e81c2
authored
Aug 25, 2018
by
Chris Lamb
Committed by
GitHub
Aug 25, 2018
Browse files
Merge branch 'unstable' into config-set-maxmemory-grammar
parents
eaeba1b2
39c70e72
Changes
209
Hide whitespace changes
Inline
Side-by-side
Too many changes to show.
To preserve performance only
209 of 209+
files are displayed.
Plain diff
Email patch
deps/jemalloc/include/jemalloc/internal/jemalloc_preamble.h.in
0 → 100644
View file @
f63e81c2
#ifndef JEMALLOC_PREAMBLE_H
#define JEMALLOC_PREAMBLE_H
#include "jemalloc_internal_defs.h"
#include "jemalloc/internal/jemalloc_internal_decls.h"
#ifdef JEMALLOC_UTRACE
#include <sys/ktrace.h>
#endif
#define JEMALLOC_NO_DEMANGLE
#ifdef JEMALLOC_JET
# undef JEMALLOC_IS_MALLOC
# define JEMALLOC_N(n) jet_##n
# include "jemalloc/internal/public_namespace.h"
# define JEMALLOC_NO_RENAME
# include "../jemalloc@install_suffix@.h"
# undef JEMALLOC_NO_RENAME
#else
# define JEMALLOC_N(n) @private_namespace@##n
# include "../jemalloc@install_suffix@.h"
#endif
#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
#include <libkern/OSAtomic.h>
#endif
#ifdef JEMALLOC_ZONE
#include <mach/mach_error.h>
#include <mach/mach_init.h>
#include <mach/vm_map.h>
#endif
#include "jemalloc/internal/jemalloc_internal_macros.h"
/*
* Note that the ordering matters here; the hook itself is name-mangled. We
* want the inclusion of hooks to happen early, so that we hook as much as
* possible.
*/
#ifndef JEMALLOC_NO_PRIVATE_NAMESPACE
# ifndef JEMALLOC_JET
# include "jemalloc/internal/private_namespace.h"
# else
# include "jemalloc/internal/private_namespace_jet.h"
# endif
#endif
#include "jemalloc/internal/hooks.h"
#ifdef JEMALLOC_DEFINE_MADVISE_FREE
# define JEMALLOC_MADV_FREE 8
#endif
static const bool config_debug =
#ifdef JEMALLOC_DEBUG
true
#else
false
#endif
;
static const bool have_dss =
#ifdef JEMALLOC_DSS
true
#else
false
#endif
;
static const bool have_madvise_huge =
#ifdef JEMALLOC_HAVE_MADVISE_HUGE
true
#else
false
#endif
;
static const bool config_fill =
#ifdef JEMALLOC_FILL
true
#else
false
#endif
;
static const bool config_lazy_lock =
#ifdef JEMALLOC_LAZY_LOCK
true
#else
false
#endif
;
static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF;
static const bool config_prof =
#ifdef JEMALLOC_PROF
true
#else
false
#endif
;
static const bool config_prof_libgcc =
#ifdef JEMALLOC_PROF_LIBGCC
true
#else
false
#endif
;
static const bool config_prof_libunwind =
#ifdef JEMALLOC_PROF_LIBUNWIND
true
#else
false
#endif
;
static const bool maps_coalesce =
#ifdef JEMALLOC_MAPS_COALESCE
true
#else
false
#endif
;
static const bool config_stats =
#ifdef JEMALLOC_STATS
true
#else
false
#endif
;
static const bool config_tls =
#ifdef JEMALLOC_TLS
true
#else
false
#endif
;
static const bool config_utrace =
#ifdef JEMALLOC_UTRACE
true
#else
false
#endif
;
static const bool config_xmalloc =
#ifdef JEMALLOC_XMALLOC
true
#else
false
#endif
;
static const bool config_cache_oblivious =
#ifdef JEMALLOC_CACHE_OBLIVIOUS
true
#else
false
#endif
;
/*
* Undocumented, for jemalloc development use only at the moment. See the note
* in jemalloc/internal/log.h.
*/
static const bool config_log =
#ifdef JEMALLOC_LOG
true
#else
false
#endif
;
#ifdef JEMALLOC_HAVE_SCHED_GETCPU
/* Currently percpu_arena depends on sched_getcpu. */
#define JEMALLOC_PERCPU_ARENA
#endif
static const bool have_percpu_arena =
#ifdef JEMALLOC_PERCPU_ARENA
true
#else
false
#endif
;
/*
* Undocumented, and not recommended; the application should take full
* responsibility for tracking provenance.
*/
static const bool force_ivsalloc =
#ifdef JEMALLOC_FORCE_IVSALLOC
true
#else
false
#endif
;
static const bool have_background_thread =
#ifdef JEMALLOC_BACKGROUND_THREAD
true
#else
false
#endif
;
#endif /* JEMALLOC_PREAMBLE_H */
deps/jemalloc/include/jemalloc/internal/large_externs.h
0 → 100644
View file @
f63e81c2
#ifndef JEMALLOC_INTERNAL_LARGE_EXTERNS_H
#define JEMALLOC_INTERNAL_LARGE_EXTERNS_H
void
*
large_malloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
usize
,
bool
zero
);
void
*
large_palloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
usize
,
size_t
alignment
,
bool
zero
);
bool
large_ralloc_no_move
(
tsdn_t
*
tsdn
,
extent_t
*
extent
,
size_t
usize_min
,
size_t
usize_max
,
bool
zero
);
void
*
large_ralloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_t
*
extent
,
size_t
usize
,
size_t
alignment
,
bool
zero
,
tcache_t
*
tcache
);
typedef
void
(
large_dalloc_junk_t
)(
void
*
,
size_t
);
extern
large_dalloc_junk_t
*
JET_MUTABLE
large_dalloc_junk
;
typedef
void
(
large_dalloc_maybe_junk_t
)(
void
*
,
size_t
);
extern
large_dalloc_maybe_junk_t
*
JET_MUTABLE
large_dalloc_maybe_junk
;
void
large_dalloc_prep_junked_locked
(
tsdn_t
*
tsdn
,
extent_t
*
extent
);
void
large_dalloc_finish
(
tsdn_t
*
tsdn
,
extent_t
*
extent
);
void
large_dalloc
(
tsdn_t
*
tsdn
,
extent_t
*
extent
);
size_t
large_salloc
(
tsdn_t
*
tsdn
,
const
extent_t
*
extent
);
prof_tctx_t
*
large_prof_tctx_get
(
tsdn_t
*
tsdn
,
const
extent_t
*
extent
);
void
large_prof_tctx_set
(
tsdn_t
*
tsdn
,
extent_t
*
extent
,
prof_tctx_t
*
tctx
);
void
large_prof_tctx_reset
(
tsdn_t
*
tsdn
,
extent_t
*
extent
);
#endif
/* JEMALLOC_INTERNAL_LARGE_EXTERNS_H */
deps/jemalloc/include/jemalloc/internal/log.h
0 → 100644
View file @
f63e81c2
#ifndef JEMALLOC_INTERNAL_LOG_H
#define JEMALLOC_INTERNAL_LOG_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/malloc_io.h"
#include "jemalloc/internal/mutex.h"
#ifdef JEMALLOC_LOG
# define JEMALLOC_LOG_VAR_BUFSIZE 1000
#else
# define JEMALLOC_LOG_VAR_BUFSIZE 1
#endif
#define JEMALLOC_LOG_BUFSIZE 4096
/*
* The log malloc_conf option is a '|'-delimited list of log_var name segments
* which should be logged. The names are themselves hierarchical, with '.' as
* the delimiter (a "segment" is just a prefix in the log namespace). So, if
* you have:
*
* log("arena", "log msg for arena"); // 1
* log("arena.a", "log msg for arena.a"); // 2
* log("arena.b", "log msg for arena.b"); // 3
* log("arena.a.a", "log msg for arena.a.a"); // 4
* log("extent.a", "log msg for extent.a"); // 5
* log("extent.b", "log msg for extent.b"); // 6
*
* And your malloc_conf option is "log=arena.a|extent", then lines 2, 4, 5, and
* 6 will print at runtime. You can enable logging from all log vars by
* writing "log=.".
*
* None of this should be regarded as a stable API for right now. It's intended
* as a debugging interface, to let us keep around some of our printf-debugging
* statements.
*/
extern
char
log_var_names
[
JEMALLOC_LOG_VAR_BUFSIZE
];
extern
atomic_b_t
log_init_done
;
typedef
struct
log_var_s
log_var_t
;
struct
log_var_s
{
/*
* Lowest bit is "inited", second lowest is "enabled". Putting them in
* a single word lets us avoid any fences on weak architectures.
*/
atomic_u_t
state
;
const
char
*
name
;
};
#define LOG_NOT_INITIALIZED 0U
#define LOG_INITIALIZED_NOT_ENABLED 1U
#define LOG_ENABLED 2U
#define LOG_VAR_INIT(name_str) {ATOMIC_INIT(LOG_NOT_INITIALIZED), name_str}
/*
* Returns the value we should assume for state (which is not necessarily
* accurate; if logging is done before logging has finished initializing, then
* we default to doing the safe thing by logging everything).
*/
unsigned
log_var_update_state
(
log_var_t
*
log_var
);
/* We factor out the metadata management to allow us to test more easily. */
#define log_do_begin(log_var) \
if (config_log) { \
unsigned log_state = atomic_load_u(&(log_var).state, \
ATOMIC_RELAXED); \
if (unlikely(log_state == LOG_NOT_INITIALIZED)) { \
log_state = log_var_update_state(&(log_var)); \
assert(log_state != LOG_NOT_INITIALIZED); \
} \
if (log_state == LOG_ENABLED) { \
{
/* User code executes here. */
#define log_do_end(log_var) \
} \
} \
}
/*
* MSVC has some preprocessor bugs in its expansion of __VA_ARGS__ during
* preprocessing. To work around this, we take all potential extra arguments in
* a var-args functions. Since a varargs macro needs at least one argument in
* the "...", we accept the format string there, and require that the first
* argument in this "..." is a const char *.
*/
static
inline
void
log_impl_varargs
(
const
char
*
name
,
...)
{
char
buf
[
JEMALLOC_LOG_BUFSIZE
];
va_list
ap
;
va_start
(
ap
,
name
);
const
char
*
format
=
va_arg
(
ap
,
const
char
*
);
size_t
dst_offset
=
0
;
dst_offset
+=
malloc_snprintf
(
buf
,
JEMALLOC_LOG_BUFSIZE
,
"%s: "
,
name
);
dst_offset
+=
malloc_vsnprintf
(
buf
+
dst_offset
,
JEMALLOC_LOG_BUFSIZE
-
dst_offset
,
format
,
ap
);
dst_offset
+=
malloc_snprintf
(
buf
+
dst_offset
,
JEMALLOC_LOG_BUFSIZE
-
dst_offset
,
"
\n
"
);
va_end
(
ap
);
malloc_write
(
buf
);
}
/* Call as log("log.var.str", "format_string %d", arg_for_format_string); */
#define LOG(log_var_str, ...) \
do { \
static log_var_t log_var = LOG_VAR_INIT(log_var_str); \
log_do_begin(log_var) \
log_impl_varargs((log_var).name, __VA_ARGS__); \
log_do_end(log_var) \
} while (0)
#endif
/* JEMALLOC_INTERNAL_LOG_H */
deps/jemalloc/include/jemalloc/internal/malloc_io.h
0 → 100644
View file @
f63e81c2
#ifndef JEMALLOC_INTERNAL_MALLOC_IO_H
#define JEMALLOC_INTERNAL_MALLOC_IO_H
#ifdef _WIN32
# ifdef _WIN64
# define FMT64_PREFIX "ll"
# define FMTPTR_PREFIX "ll"
# else
# define FMT64_PREFIX "ll"
# define FMTPTR_PREFIX ""
# endif
# define FMTd32 "d"
# define FMTu32 "u"
# define FMTx32 "x"
# define FMTd64 FMT64_PREFIX "d"
# define FMTu64 FMT64_PREFIX "u"
# define FMTx64 FMT64_PREFIX "x"
# define FMTdPTR FMTPTR_PREFIX "d"
# define FMTuPTR FMTPTR_PREFIX "u"
# define FMTxPTR FMTPTR_PREFIX "x"
#else
# include <inttypes.h>
# define FMTd32 PRId32
# define FMTu32 PRIu32
# define FMTx32 PRIx32
# define FMTd64 PRId64
# define FMTu64 PRIu64
# define FMTx64 PRIx64
# define FMTdPTR PRIdPTR
# define FMTuPTR PRIuPTR
# define FMTxPTR PRIxPTR
#endif
/* Size of stack-allocated buffer passed to buferror(). */
#define BUFERROR_BUF 64
/*
* Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be
* large enough for all possible uses within jemalloc.
*/
#define MALLOC_PRINTF_BUFSIZE 4096
int
buferror
(
int
err
,
char
*
buf
,
size_t
buflen
);
uintmax_t
malloc_strtoumax
(
const
char
*
restrict
nptr
,
char
**
restrict
endptr
,
int
base
);
void
malloc_write
(
const
char
*
s
);
/*
* malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating
* point math.
*/
size_t
malloc_vsnprintf
(
char
*
str
,
size_t
size
,
const
char
*
format
,
va_list
ap
);
size_t
malloc_snprintf
(
char
*
str
,
size_t
size
,
const
char
*
format
,
...)
JEMALLOC_FORMAT_PRINTF
(
3
,
4
);
/*
* The caller can set write_cb and cbopaque to null to choose to print with the
* je_malloc_message hook.
*/
void
malloc_vcprintf
(
void
(
*
write_cb
)(
void
*
,
const
char
*
),
void
*
cbopaque
,
const
char
*
format
,
va_list
ap
);
void
malloc_cprintf
(
void
(
*
write_cb
)(
void
*
,
const
char
*
),
void
*
cbopaque
,
const
char
*
format
,
...)
JEMALLOC_FORMAT_PRINTF
(
3
,
4
);
void
malloc_printf
(
const
char
*
format
,
...)
JEMALLOC_FORMAT_PRINTF
(
1
,
2
);
static
inline
ssize_t
malloc_write_fd
(
int
fd
,
const
void
*
buf
,
size_t
count
)
{
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_write)
/*
* Use syscall(2) rather than write(2) when possible in order to avoid
* the possibility of memory allocation within libc. This is necessary
* on FreeBSD; most operating systems do not have this problem though.
*
* syscall() returns long or int, depending on platform, so capture the
* result in the widest plausible type to avoid compiler warnings.
*/
long
result
=
syscall
(
SYS_write
,
fd
,
buf
,
count
);
#else
ssize_t
result
=
(
ssize_t
)
write
(
fd
,
buf
,
#ifdef _WIN32
(
unsigned
int
)
#endif
count
);
#endif
return
(
ssize_t
)
result
;
}
static
inline
ssize_t
malloc_read_fd
(
int
fd
,
void
*
buf
,
size_t
count
)
{
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read)
long
result
=
syscall
(
SYS_read
,
fd
,
buf
,
count
);
#else
ssize_t
result
=
read
(
fd
,
buf
,
#ifdef _WIN32
(
unsigned
int
)
#endif
count
);
#endif
return
(
ssize_t
)
result
;
}
#endif
/* JEMALLOC_INTERNAL_MALLOC_IO_H */
deps/jemalloc/include/jemalloc/internal/mb.h
deleted
100644 → 0
View file @
eaeba1b2
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
void
mb_write
(
void
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MB_C_))
#ifdef __i386__
/*
* According to the Intel Architecture Software Developer's Manual, current
* processors execute instructions in order from the perspective of other
* processors in a multiprocessor system, but 1) Intel reserves the right to
* change that, and 2) the compiler's optimizer could re-order instructions if
* there weren't some form of barrier. Therefore, even if running on an
* architecture that does not need memory barriers (everything through at least
* i686), an "optimizer barrier" is necessary.
*/
JEMALLOC_INLINE
void
mb_write
(
void
)
{
# if 0
/* This is a true memory barrier. */
asm
volatile
(
"pusha;"
"xor %%eax,%%eax;"
"cpuid;"
"popa;"
:
/* Outputs. */
:
/* Inputs. */
:
"memory"
/* Clobbers. */
);
#else
/*
* This is hopefully enough to keep the compiler from reordering
* instructions around this one.
*/
asm
volatile
(
"nop;"
:
/* Outputs. */
:
/* Inputs. */
:
"memory"
/* Clobbers. */
);
#endif
}
#elif (defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE
void
mb_write
(
void
)
{
asm
volatile
(
"sfence"
:
/* Outputs. */
:
/* Inputs. */
:
"memory"
/* Clobbers. */
);
}
#elif defined(__powerpc__)
JEMALLOC_INLINE
void
mb_write
(
void
)
{
asm
volatile
(
"eieio"
:
/* Outputs. */
:
/* Inputs. */
:
"memory"
/* Clobbers. */
);
}
#elif defined(__sparc64__)
JEMALLOC_INLINE
void
mb_write
(
void
)
{
asm
volatile
(
"membar #StoreStore"
:
/* Outputs. */
:
/* Inputs. */
:
"memory"
/* Clobbers. */
);
}
#elif defined(__tile__)
JEMALLOC_INLINE
void
mb_write
(
void
)
{
__sync_synchronize
();
}
#else
/*
* This is much slower than a simple memory barrier, but the semantics of mutex
* unlock make this work.
*/
JEMALLOC_INLINE
void
mb_write
(
void
)
{
malloc_mutex_t
mtx
;
malloc_mutex_init
(
&
mtx
);
malloc_mutex_lock
(
&
mtx
);
malloc_mutex_unlock
(
&
mtx
);
}
#endif
#endif
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/mutex.h
View file @
f63e81c2
/******************************************************************************/
#
if
def JEMALLOC_
H_TYPES
#ifndef JEMALLOC_INTERNAL_MUTEX_H
#def
ine
JEMALLOC_
INTERNAL_MUTEX_H
typedef
struct
malloc_mutex_s
malloc_mutex_t
;
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/mutex_prof.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/witness.h"
typedef
enum
{
/* Can only acquire one mutex of a given witness rank at a time. */
malloc_mutex_rank_exclusive
,
/*
* Can acquire multiple mutexes of the same witness rank, but in
* address-ascending order only.
*/
malloc_mutex_address_ordered
}
malloc_mutex_lock_order_t
;
typedef
struct
malloc_mutex_s
malloc_mutex_t
;
struct
malloc_mutex_s
{
union
{
struct
{
/*
* prof_data is defined first to reduce cacheline
* bouncing: the data is not touched by the mutex holder
* during unlocking, while might be modified by
* contenders. Having it before the mutex itself could
* avoid prefetching a modified cacheline (for the
* unlocking thread).
*/
mutex_prof_data_t
prof_data
;
#ifdef _WIN32
# define MALLOC_MUTEX_INITIALIZER
# if _WIN32_WINNT >= 0x0600
SRWLOCK
lock
;
# else
CRITICAL_SECTION
lock
;
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
os_unfair_lock
lock
;
#elif (defined(JEMALLOC_OSSPIN))
# define MALLOC_MUTEX_INITIALIZER {0}
OSSpinLock
lock
;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, NULL}
pthread_mutex_t
lock
;
malloc_mutex_t
*
postponed_next
;
#else
# if (defined(PTHREAD_MUTEX_ADAPTIVE_NP) && \
defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP))
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP}
pthread_mutex_t
lock
;
#endif
};
/*
* We only touch witness when configured w/ debug. However we
* keep the field in a union when !debug so that we don't have
* to pollute the code base with #ifdefs, while avoid paying the
* memory cost.
*/
#if !defined(JEMALLOC_DEBUG)
witness_t
witness
;
malloc_mutex_lock_order_t
lock_order
;
#endif
};
#if defined(JEMALLOC_DEBUG)
witness_t
witness
;
malloc_mutex_lock_order_t
lock_order
;
#endif
};
/*
* Based on benchmark results, a fixed spin with this amount of retries works
* well for our critical sections.
*/
#define MALLOC_MUTEX_MAX_SPIN 250
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
# define MALLOC_MUTEX_LOCK(m) AcquireSRWLockExclusive(&(m)->lock)
# define MALLOC_MUTEX_UNLOCK(m) ReleaseSRWLockExclusive(&(m)->lock)
# define MALLOC_MUTEX_TRYLOCK(m) (!TryAcquireSRWLockExclusive(&(m)->lock))
# else
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER}
# define MALLOC_MUTEX_LOCK(m) EnterCriticalSection(&(m)->lock)
# define MALLOC_MUTEX_UNLOCK(m) LeaveCriticalSection(&(m)->lock)
# define MALLOC_MUTEX_TRYLOCK(m) (!TryEnterCriticalSection(&(m)->lock))
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
# define MALLOC_MUTEX_LOCK(m) os_unfair_lock_lock(&(m)->lock)
# define MALLOC_MUTEX_UNLOCK(m) os_unfair_lock_unlock(&(m)->lock)
# define MALLOC_MUTEX_TRYLOCK(m) (!os_unfair_lock_trylock(&(m)->lock))
#elif (defined(JEMALLOC_OSSPIN))
# define MALLOC_MUTEX_LOCK(m) OSSpinLockLock(&(m)->lock)
# define MALLOC_MUTEX_UNLOCK(m) OSSpinLockUnlock(&(m)->lock)
# define MALLOC_MUTEX_TRYLOCK(m) (!OSSpinLockTry(&(m)->lock))
#else
# define MALLOC_MUTEX_LOCK(m) pthread_mutex_lock(&(m)->lock)
# define MALLOC_MUTEX_UNLOCK(m) pthread_mutex_unlock(&(m)->lock)
# define MALLOC_MUTEX_TRYLOCK(m) (pthread_mutex_trylock(&(m)->lock) != 0)
#endif
#
endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#
define LOCK_PROF_DATA_INITIALIZER \
{NSTIME_ZERO_INITIALIZER, NSTIME_ZERO_INITIALIZER, 0, 0, 0, \
ATOMIC_INIT(0), 0, NULL, 0}
struct
malloc_mutex_s
{
#ifdef _WIN32
CRITICAL_SECTION
lock
;
# define MALLOC_MUTEX_INITIALIZER
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLock
lock
;
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, 0}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
pthread_mutex_t
lock
;
malloc_mutex_t
*
postponed_next
;
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
#else
pthread_mutex_t
lock
;
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
#endif
};
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#ifdef JEMALLOC_LAZY_LOCK
extern
bool
isthreaded
;
...
...
@@ -48,52 +126,123 @@ extern bool isthreaded;
# define isthreaded true
#endif
bool
malloc_mutex_init
(
malloc_mutex_t
*
mutex
);
void
malloc_mutex_prefork
(
malloc_mutex_t
*
mutex
);
void
malloc_mutex_postfork_parent
(
malloc_mutex_t
*
mutex
);
void
malloc_mutex_postfork_child
(
malloc_mutex_t
*
mutex
);
bool
mutex_boot
(
void
);
bool
malloc_mutex_init
(
malloc_mutex_t
*
mutex
,
const
char
*
name
,
witness_rank_t
rank
,
malloc_mutex_lock_order_t
lock_order
);
void
malloc_mutex_prefork
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
);
void
malloc_mutex_postfork_parent
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
);
void
malloc_mutex_postfork_child
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
);
bool
malloc_mutex_boot
(
void
);
void
malloc_mutex_prof_data_reset
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
void
malloc_mutex_lock_slow
(
malloc_mutex_t
*
mutex
);
#ifndef JEMALLOC_ENABLE_INLINE
void
malloc_mutex_lock
(
malloc_mutex_t
*
mutex
)
;
void
malloc_mutex_unlock
(
malloc_mutex_t
*
mutex
);
#endif
static
inline
void
malloc_mutex_lock
_final
(
malloc_mutex_t
*
mutex
)
{
MALLOC_MUTEX_LOCK
(
mutex
);
}
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
JEMALLOC_INLINE
void
malloc_mutex_lock
(
malloc_mutex_t
*
mutex
)
{
static
inline
bool
malloc_mutex_trylock_final
(
malloc_mutex_t
*
mutex
)
{
return
MALLOC_MUTEX_TRYLOCK
(
mutex
)
;
}
static
inline
void
mutex_owner_stats_update
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
)
{
if
(
config_stats
)
{
mutex_prof_data_t
*
data
=
&
mutex
->
prof_data
;
data
->
n_lock_ops
++
;
if
(
data
->
prev_owner
!=
tsdn
)
{
data
->
prev_owner
=
tsdn
;
data
->
n_owner_switches
++
;
}
}
}
/* Trylock: return false if the lock is successfully acquired. */
static
inline
bool
malloc_mutex_trylock
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
)
{
witness_assert_not_owner
(
tsdn_witness_tsdp_get
(
tsdn
),
&
mutex
->
witness
);
if
(
isthreaded
)
{
#ifdef _WIN32
EnterCriticalSection
(
&
mutex
->
lock
);
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockLock
(
&
mutex
->
lock
);
#else
pthread_mutex_lock
(
&
mutex
->
lock
);
#endif
if
(
malloc_mutex_trylock_final
(
mutex
))
{
return
true
;
}
mutex_owner_stats_update
(
tsdn
,
mutex
);
}
witness_lock
(
tsdn_witness_tsdp_get
(
tsdn
),
&
mutex
->
witness
);
return
false
;
}
JEMALLOC_INLINE
void
malloc_mutex_unlock
(
malloc_mutex_t
*
mutex
)
{
/* Aggregate lock prof data. */
static
inline
void
malloc_mutex_prof_merge
(
mutex_prof_data_t
*
sum
,
mutex_prof_data_t
*
data
)
{
nstime_add
(
&
sum
->
tot_wait_time
,
&
data
->
tot_wait_time
);
if
(
nstime_compare
(
&
sum
->
max_wait_time
,
&
data
->
max_wait_time
)
<
0
)
{
nstime_copy
(
&
sum
->
max_wait_time
,
&
data
->
max_wait_time
);
}
sum
->
n_wait_times
+=
data
->
n_wait_times
;
sum
->
n_spin_acquired
+=
data
->
n_spin_acquired
;
if
(
sum
->
max_n_thds
<
data
->
max_n_thds
)
{
sum
->
max_n_thds
=
data
->
max_n_thds
;
}
uint32_t
cur_n_waiting_thds
=
atomic_load_u32
(
&
sum
->
n_waiting_thds
,
ATOMIC_RELAXED
);
uint32_t
new_n_waiting_thds
=
cur_n_waiting_thds
+
atomic_load_u32
(
&
data
->
n_waiting_thds
,
ATOMIC_RELAXED
);
atomic_store_u32
(
&
sum
->
n_waiting_thds
,
new_n_waiting_thds
,
ATOMIC_RELAXED
);
sum
->
n_owner_switches
+=
data
->
n_owner_switches
;
sum
->
n_lock_ops
+=
data
->
n_lock_ops
;
}
static
inline
void
malloc_mutex_lock
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
)
{
witness_assert_not_owner
(
tsdn_witness_tsdp_get
(
tsdn
),
&
mutex
->
witness
);
if
(
isthreaded
)
{
#ifdef _WIN32
LeaveCriticalSection
(
&
mutex
->
lock
);
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockUnlock
(
&
mutex
->
lock
);
#else
pthread_mutex_unlock
(
&
mutex
->
lock
);
#endif
if
(
malloc_mutex_trylock_final
(
mutex
))
{
malloc_mutex_lock_slow
(
mutex
);
}
mutex_owner_stats_update
(
tsdn
,
mutex
);
}
witness_lock
(
tsdn_witness_tsdp_get
(
tsdn
),
&
mutex
->
witness
);
}
static
inline
void
malloc_mutex_unlock
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
)
{
witness_unlock
(
tsdn_witness_tsdp_get
(
tsdn
),
&
mutex
->
witness
);
if
(
isthreaded
)
{
MALLOC_MUTEX_UNLOCK
(
mutex
);
}
}
static
inline
void
malloc_mutex_assert_owner
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
)
{
witness_assert_owner
(
tsdn_witness_tsdp_get
(
tsdn
),
&
mutex
->
witness
);
}
static
inline
void
malloc_mutex_assert_not_owner
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
)
{
witness_assert_not_owner
(
tsdn_witness_tsdp_get
(
tsdn
),
&
mutex
->
witness
);
}
/* Copy the prof data from mutex for processing. */
static
inline
void
malloc_mutex_prof_read
(
tsdn_t
*
tsdn
,
mutex_prof_data_t
*
data
,
malloc_mutex_t
*
mutex
)
{
mutex_prof_data_t
*
source
=
&
mutex
->
prof_data
;
/* Can only read holding the mutex. */
malloc_mutex_assert_owner
(
tsdn
,
mutex
);
/*
* Not *really* allowed (we shouldn't be doing non-atomic loads of
* atomic data), but the mutex protection makes this safe, and writing
* a member-for-member copy is tedious for this situation.
*/
*
data
=
*
source
;
/* n_wait_thds is not reported (modified w/o locking). */
atomic_store_u32
(
&
data
->
n_waiting_thds
,
0
,
ATOMIC_RELAXED
);
}
#endif
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
#endif
/* JEMALLOC_INTERNAL_MUTEX_H */
deps/jemalloc/include/jemalloc/internal/mutex_pool.h
0 → 100644
View file @
f63e81c2
#ifndef JEMALLOC_INTERNAL_MUTEX_POOL_H
#define JEMALLOC_INTERNAL_MUTEX_POOL_H
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/witness.h"
/* We do mod reductions by this value, so it should be kept a power of 2. */
#define MUTEX_POOL_SIZE 256
typedef
struct
mutex_pool_s
mutex_pool_t
;
struct
mutex_pool_s
{
malloc_mutex_t
mutexes
[
MUTEX_POOL_SIZE
];
};
bool
mutex_pool_init
(
mutex_pool_t
*
pool
,
const
char
*
name
,
witness_rank_t
rank
);
/* Internal helper - not meant to be called outside this module. */
static
inline
malloc_mutex_t
*
mutex_pool_mutex
(
mutex_pool_t
*
pool
,
uintptr_t
key
)
{
size_t
hash_result
[
2
];
hash
(
&
key
,
sizeof
(
key
),
0xd50dcc1b
,
hash_result
);
return
&
pool
->
mutexes
[
hash_result
[
0
]
%
MUTEX_POOL_SIZE
];
}
static
inline
void
mutex_pool_assert_not_held
(
tsdn_t
*
tsdn
,
mutex_pool_t
*
pool
)
{
for
(
int
i
=
0
;
i
<
MUTEX_POOL_SIZE
;
i
++
)
{
malloc_mutex_assert_not_owner
(
tsdn
,
&
pool
->
mutexes
[
i
]);
}
}
/*
* Note that a mutex pool doesn't work exactly the way an embdedded mutex would.
* You're not allowed to acquire mutexes in the pool one at a time. You have to
* acquire all the mutexes you'll need in a single function call, and then
* release them all in a single function call.
*/
static
inline
void
mutex_pool_lock
(
tsdn_t
*
tsdn
,
mutex_pool_t
*
pool
,
uintptr_t
key
)
{
mutex_pool_assert_not_held
(
tsdn
,
pool
);
malloc_mutex_t
*
mutex
=
mutex_pool_mutex
(
pool
,
key
);
malloc_mutex_lock
(
tsdn
,
mutex
);
}
static
inline
void
mutex_pool_unlock
(
tsdn_t
*
tsdn
,
mutex_pool_t
*
pool
,
uintptr_t
key
)
{
malloc_mutex_t
*
mutex
=
mutex_pool_mutex
(
pool
,
key
);
malloc_mutex_unlock
(
tsdn
,
mutex
);
mutex_pool_assert_not_held
(
tsdn
,
pool
);
}
static
inline
void
mutex_pool_lock2
(
tsdn_t
*
tsdn
,
mutex_pool_t
*
pool
,
uintptr_t
key1
,
uintptr_t
key2
)
{
mutex_pool_assert_not_held
(
tsdn
,
pool
);
malloc_mutex_t
*
mutex1
=
mutex_pool_mutex
(
pool
,
key1
);
malloc_mutex_t
*
mutex2
=
mutex_pool_mutex
(
pool
,
key2
);
if
((
uintptr_t
)
mutex1
<
(
uintptr_t
)
mutex2
)
{
malloc_mutex_lock
(
tsdn
,
mutex1
);
malloc_mutex_lock
(
tsdn
,
mutex2
);
}
else
if
((
uintptr_t
)
mutex1
==
(
uintptr_t
)
mutex2
)
{
malloc_mutex_lock
(
tsdn
,
mutex1
);
}
else
{
malloc_mutex_lock
(
tsdn
,
mutex2
);
malloc_mutex_lock
(
tsdn
,
mutex1
);
}
}
static
inline
void
mutex_pool_unlock2
(
tsdn_t
*
tsdn
,
mutex_pool_t
*
pool
,
uintptr_t
key1
,
uintptr_t
key2
)
{
malloc_mutex_t
*
mutex1
=
mutex_pool_mutex
(
pool
,
key1
);
malloc_mutex_t
*
mutex2
=
mutex_pool_mutex
(
pool
,
key2
);
if
(
mutex1
==
mutex2
)
{
malloc_mutex_unlock
(
tsdn
,
mutex1
);
}
else
{
malloc_mutex_unlock
(
tsdn
,
mutex1
);
malloc_mutex_unlock
(
tsdn
,
mutex2
);
}
mutex_pool_assert_not_held
(
tsdn
,
pool
);
}
static
inline
void
mutex_pool_assert_owner
(
tsdn_t
*
tsdn
,
mutex_pool_t
*
pool
,
uintptr_t
key
)
{
malloc_mutex_assert_owner
(
tsdn
,
mutex_pool_mutex
(
pool
,
key
));
}
#endif
/* JEMALLOC_INTERNAL_MUTEX_POOL_H */
deps/jemalloc/include/jemalloc/internal/mutex_prof.h
0 → 100644
View file @
f63e81c2
#ifndef JEMALLOC_INTERNAL_MUTEX_PROF_H
#define JEMALLOC_INTERNAL_MUTEX_PROF_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/tsd_types.h"
#define MUTEX_PROF_GLOBAL_MUTEXES \
OP(background_thread) \
OP(ctl) \
OP(prof)
typedef
enum
{
#define OP(mtx) global_prof_mutex_##mtx,
MUTEX_PROF_GLOBAL_MUTEXES
#undef OP
mutex_prof_num_global_mutexes
}
mutex_prof_global_ind_t
;
#define MUTEX_PROF_ARENA_MUTEXES \
OP(large) \
OP(extent_avail) \
OP(extents_dirty) \
OP(extents_muzzy) \
OP(extents_retained) \
OP(decay_dirty) \
OP(decay_muzzy) \
OP(base) \
OP(tcache_list)
typedef
enum
{
#define OP(mtx) arena_prof_mutex_##mtx,
MUTEX_PROF_ARENA_MUTEXES
#undef OP
mutex_prof_num_arena_mutexes
}
mutex_prof_arena_ind_t
;
#define MUTEX_PROF_UINT64_COUNTERS \
OP(num_ops, uint64_t, "n_lock_ops") \
OP(num_wait, uint64_t, "n_waiting") \
OP(num_spin_acq, uint64_t, "n_spin_acq") \
OP(num_owner_switch, uint64_t, "n_owner_switch") \
OP(total_wait_time, uint64_t, "total_wait_ns") \
OP(max_wait_time, uint64_t, "max_wait_ns")
#define MUTEX_PROF_UINT32_COUNTERS \
OP(max_num_thds, uint32_t, "max_n_thds")
#define MUTEX_PROF_COUNTERS \
MUTEX_PROF_UINT64_COUNTERS \
MUTEX_PROF_UINT32_COUNTERS
#define OP(counter, type, human) mutex_counter_##counter,
#define COUNTER_ENUM(counter_list, t) \
typedef enum { \
counter_list \
mutex_prof_num_##t##_counters \
} mutex_prof_##t##_counter_ind_t;
COUNTER_ENUM
(
MUTEX_PROF_UINT64_COUNTERS
,
uint64_t
)
COUNTER_ENUM
(
MUTEX_PROF_UINT32_COUNTERS
,
uint32_t
)
#undef COUNTER_ENUM
#undef OP
typedef
struct
{
/*
* Counters touched on the slow path, i.e. when there is lock
* contention. We update them once we have the lock.
*/
/* Total time (in nano seconds) spent waiting on this mutex. */
nstime_t
tot_wait_time
;
/* Max time (in nano seconds) spent on a single lock operation. */
nstime_t
max_wait_time
;
/* # of times have to wait for this mutex (after spinning). */
uint64_t
n_wait_times
;
/* # of times acquired the mutex through local spinning. */
uint64_t
n_spin_acquired
;
/* Max # of threads waiting for the mutex at the same time. */
uint32_t
max_n_thds
;
/* Current # of threads waiting on the lock. Atomic synced. */
atomic_u32_t
n_waiting_thds
;
/*
* Data touched on the fast path. These are modified right after we
* grab the lock, so it's placed closest to the end (i.e. right before
* the lock) so that we have a higher chance of them being on the same
* cacheline.
*/
/* # of times the mutex holder is different than the previous one. */
uint64_t
n_owner_switches
;
/* Previous mutex holder, to facilitate n_owner_switches. */
tsdn_t
*
prev_owner
;
/* # of lock() operations in total. */
uint64_t
n_lock_ops
;
}
mutex_prof_data_t
;
#endif
/* JEMALLOC_INTERNAL_MUTEX_PROF_H */
deps/jemalloc/include/jemalloc/internal/nstime.h
0 → 100644
View file @
f63e81c2
#ifndef JEMALLOC_INTERNAL_NSTIME_H
#define JEMALLOC_INTERNAL_NSTIME_H
/* Maximum supported number of seconds (~584 years). */
#define NSTIME_SEC_MAX KQU(18446744072)
#define NSTIME_ZERO_INITIALIZER {0}
typedef
struct
{
uint64_t
ns
;
}
nstime_t
;
void
nstime_init
(
nstime_t
*
time
,
uint64_t
ns
);
void
nstime_init2
(
nstime_t
*
time
,
uint64_t
sec
,
uint64_t
nsec
);
uint64_t
nstime_ns
(
const
nstime_t
*
time
);
uint64_t
nstime_sec
(
const
nstime_t
*
time
);
uint64_t
nstime_msec
(
const
nstime_t
*
time
);
uint64_t
nstime_nsec
(
const
nstime_t
*
time
);
void
nstime_copy
(
nstime_t
*
time
,
const
nstime_t
*
source
);
int
nstime_compare
(
const
nstime_t
*
a
,
const
nstime_t
*
b
);
void
nstime_add
(
nstime_t
*
time
,
const
nstime_t
*
addend
);
void
nstime_iadd
(
nstime_t
*
time
,
uint64_t
addend
);
void
nstime_subtract
(
nstime_t
*
time
,
const
nstime_t
*
subtrahend
);
void
nstime_isubtract
(
nstime_t
*
time
,
uint64_t
subtrahend
);
void
nstime_imultiply
(
nstime_t
*
time
,
uint64_t
multiplier
);
void
nstime_idivide
(
nstime_t
*
time
,
uint64_t
divisor
);
uint64_t
nstime_divide
(
const
nstime_t
*
time
,
const
nstime_t
*
divisor
);
typedef
bool
(
nstime_monotonic_t
)(
void
);
extern
nstime_monotonic_t
*
JET_MUTABLE
nstime_monotonic
;
typedef
bool
(
nstime_update_t
)(
nstime_t
*
);
extern
nstime_update_t
*
JET_MUTABLE
nstime_update
;
#endif
/* JEMALLOC_INTERNAL_NSTIME_H */
deps/jemalloc/include/jemalloc/internal/pages.h
0 → 100644
View file @
f63e81c2
#ifndef JEMALLOC_INTERNAL_PAGES_EXTERNS_H
#define JEMALLOC_INTERNAL_PAGES_EXTERNS_H
/* Page size. LG_PAGE is determined by the configure script. */
#ifdef PAGE_MASK
# undef PAGE_MASK
#endif
#define PAGE ((size_t)(1U << LG_PAGE))
#define PAGE_MASK ((size_t)(PAGE - 1))
/* Return the page base address for the page containing address a. */
#define PAGE_ADDR2BASE(a) \
((void *)((uintptr_t)(a) & ~PAGE_MASK))
/* Return the smallest pagesize multiple that is >= s. */
#define PAGE_CEILING(s) \
(((s) + PAGE_MASK) & ~PAGE_MASK)
/* Huge page size. LG_HUGEPAGE is determined by the configure script. */
#define HUGEPAGE ((size_t)(1U << LG_HUGEPAGE))
#define HUGEPAGE_MASK ((size_t)(HUGEPAGE - 1))
/* Return the huge page base address for the huge page containing address a. */
#define HUGEPAGE_ADDR2BASE(a) \
((void *)((uintptr_t)(a) & ~HUGEPAGE_MASK))
/* Return the smallest pagesize multiple that is >= s. */
#define HUGEPAGE_CEILING(s) \
(((s) + HUGEPAGE_MASK) & ~HUGEPAGE_MASK)
/* PAGES_CAN_PURGE_LAZY is defined if lazy purging is supported. */
#if defined(_WIN32) || defined(JEMALLOC_PURGE_MADVISE_FREE)
# define PAGES_CAN_PURGE_LAZY
#endif
/*
* PAGES_CAN_PURGE_FORCED is defined if forced purging is supported.
*
* The only supported way to hard-purge on Windows is to decommit and then
* re-commit, but doing so is racy, and if re-commit fails it's a pain to
* propagate the "poisoned" memory state. Since we typically decommit as the
* next step after purging on Windows anyway, there's no point in adding such
* complexity.
*/
#if !defined(_WIN32) && ((defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)) || \
defined(JEMALLOC_MAPS_COALESCE))
# define PAGES_CAN_PURGE_FORCED
#endif
static
const
bool
pages_can_purge_lazy
=
#ifdef PAGES_CAN_PURGE_LAZY
true
#else
false
#endif
;
static
const
bool
pages_can_purge_forced
=
#ifdef PAGES_CAN_PURGE_FORCED
true
#else
false
#endif
;
typedef
enum
{
thp_mode_default
=
0
,
/* Do not change hugepage settings. */
thp_mode_always
=
1
,
/* Always set MADV_HUGEPAGE. */
thp_mode_never
=
2
,
/* Always set MADV_NOHUGEPAGE. */
thp_mode_names_limit
=
3
,
/* Used for option processing. */
thp_mode_not_supported
=
3
/* No THP support detected. */
}
thp_mode_t
;
#define THP_MODE_DEFAULT thp_mode_default
extern
thp_mode_t
opt_thp
;
extern
thp_mode_t
init_system_thp_mode
;
/* Initial system wide state. */
extern
const
char
*
thp_mode_names
[];
void
*
pages_map
(
void
*
addr
,
size_t
size
,
size_t
alignment
,
bool
*
commit
);
void
pages_unmap
(
void
*
addr
,
size_t
size
);
bool
pages_commit
(
void
*
addr
,
size_t
size
);
bool
pages_decommit
(
void
*
addr
,
size_t
size
);
bool
pages_purge_lazy
(
void
*
addr
,
size_t
size
);
bool
pages_purge_forced
(
void
*
addr
,
size_t
size
);
bool
pages_huge
(
void
*
addr
,
size_t
size
);
bool
pages_nohuge
(
void
*
addr
,
size_t
size
);
bool
pages_dontdump
(
void
*
addr
,
size_t
size
);
bool
pages_dodump
(
void
*
addr
,
size_t
size
);
bool
pages_boot
(
void
);
void
pages_set_thp_state
(
void
*
ptr
,
size_t
size
);
#endif
/* JEMALLOC_INTERNAL_PAGES_EXTERNS_H */
deps/jemalloc/include/jemalloc/internal/ph.h
0 → 100644
View file @
f63e81c2
/*
* A Pairing Heap implementation.
*
* "The Pairing Heap: A New Form of Self-Adjusting Heap"
* https://www.cs.cmu.edu/~sleator/papers/pairing-heaps.pdf
*
* With auxiliary twopass list, described in a follow on paper.
*
* "Pairing Heaps: Experiments and Analysis"
* http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf
*
*******************************************************************************
*/
#ifndef PH_H_
#define PH_H_
/* Node structure. */
#define phn(a_type) \
struct { \
a_type *phn_prev; \
a_type *phn_next; \
a_type *phn_lchild; \
}
/* Root structure. */
#define ph(a_type) \
struct { \
a_type *ph_root; \
}
/* Internal utility macros. */
#define phn_lchild_get(a_type, a_field, a_phn) \
(a_phn->a_field.phn_lchild)
#define phn_lchild_set(a_type, a_field, a_phn, a_lchild) do { \
a_phn->a_field.phn_lchild = a_lchild; \
} while (0)
#define phn_next_get(a_type, a_field, a_phn) \
(a_phn->a_field.phn_next)
#define phn_prev_set(a_type, a_field, a_phn, a_prev) do { \
a_phn->a_field.phn_prev = a_prev; \
} while (0)
#define phn_prev_get(a_type, a_field, a_phn) \
(a_phn->a_field.phn_prev)
#define phn_next_set(a_type, a_field, a_phn, a_next) do { \
a_phn->a_field.phn_next = a_next; \
} while (0)
#define phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do { \
a_type *phn0child; \
\
assert(a_phn0 != NULL); \
assert(a_phn1 != NULL); \
assert(a_cmp(a_phn0, a_phn1) <= 0); \
\
phn_prev_set(a_type, a_field, a_phn1, a_phn0); \
phn0child = phn_lchild_get(a_type, a_field, a_phn0); \
phn_next_set(a_type, a_field, a_phn1, phn0child); \
if (phn0child != NULL) { \
phn_prev_set(a_type, a_field, phn0child, a_phn1); \
} \
phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \
} while (0)
#define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \
if (a_phn0 == NULL) { \
r_phn = a_phn1; \
} else if (a_phn1 == NULL) { \
r_phn = a_phn0; \
} else if (a_cmp(a_phn0, a_phn1) < 0) { \
phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \
a_cmp); \
r_phn = a_phn0; \
} else { \
phn_merge_ordered(a_type, a_field, a_phn1, a_phn0, \
a_cmp); \
r_phn = a_phn1; \
} \
} while (0)
#define ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do { \
a_type *head = NULL; \
a_type *tail = NULL; \
a_type *phn0 = a_phn; \
a_type *phn1 = phn_next_get(a_type, a_field, phn0); \
\
/* \
* Multipass merge, wherein the first two elements of a FIFO \
* are repeatedly merged, and each result is appended to the \
* singly linked FIFO, until the FIFO contains only a single \
* element. We start with a sibling list but no reference to \
* its tail, so we do a single pass over the sibling list to \
* populate the FIFO. \
*/
\
if (phn1 != NULL) { \
a_type *phnrest = phn_next_get(a_type, a_field, phn1); \
if (phnrest != NULL) { \
phn_prev_set(a_type, a_field, phnrest, NULL); \
} \
phn_prev_set(a_type, a_field, phn0, NULL); \
phn_next_set(a_type, a_field, phn0, NULL); \
phn_prev_set(a_type, a_field, phn1, NULL); \
phn_next_set(a_type, a_field, phn1, NULL); \
phn_merge(a_type, a_field, phn0, phn1, a_cmp, phn0); \
head = tail = phn0; \
phn0 = phnrest; \
while (phn0 != NULL) { \
phn1 = phn_next_get(a_type, a_field, phn0); \
if (phn1 != NULL) { \
phnrest = phn_next_get(a_type, a_field, \
phn1); \
if (phnrest != NULL) { \
phn_prev_set(a_type, a_field, \
phnrest, NULL); \
} \
phn_prev_set(a_type, a_field, phn0, \
NULL); \
phn_next_set(a_type, a_field, phn0, \
NULL); \
phn_prev_set(a_type, a_field, phn1, \
NULL); \
phn_next_set(a_type, a_field, phn1, \
NULL); \
phn_merge(a_type, a_field, phn0, phn1, \
a_cmp, phn0); \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
phn0 = phnrest; \
} else { \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
phn0 = NULL; \
} \
} \
phn0 = head; \
phn1 = phn_next_get(a_type, a_field, phn0); \
if (phn1 != NULL) { \
while (true) { \
head = phn_next_get(a_type, a_field, \
phn1); \
assert(phn_prev_get(a_type, a_field, \
phn0) == NULL); \
phn_next_set(a_type, a_field, phn0, \
NULL); \
assert(phn_prev_get(a_type, a_field, \
phn1) == NULL); \
phn_next_set(a_type, a_field, phn1, \
NULL); \
phn_merge(a_type, a_field, phn0, phn1, \
a_cmp, phn0); \
if (head == NULL) { \
break; \
} \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
phn0 = head; \
phn1 = phn_next_get(a_type, a_field, \
phn0); \
} \
} \
} \
r_phn = phn0; \
} while (0)
#define ph_merge_aux(a_type, a_field, a_ph, a_cmp) do { \
a_type *phn = phn_next_get(a_type, a_field, a_ph->ph_root); \
if (phn != NULL) { \
phn_prev_set(a_type, a_field, a_ph->ph_root, NULL); \
phn_next_set(a_type, a_field, a_ph->ph_root, NULL); \
phn_prev_set(a_type, a_field, phn, NULL); \
ph_merge_siblings(a_type, a_field, phn, a_cmp, phn); \
assert(phn_next_get(a_type, a_field, phn) == NULL); \
phn_merge(a_type, a_field, a_ph->ph_root, phn, a_cmp, \
a_ph->ph_root); \
} \
} while (0)
#define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \
a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \
if (lchild == NULL) { \
r_phn = NULL; \
} else { \
ph_merge_siblings(a_type, a_field, lchild, a_cmp, \
r_phn); \
} \
} while (0)
/*
* The ph_proto() macro generates function prototypes that correspond to the
* functions generated by an equivalently parameterized call to ph_gen().
*/
#define ph_proto(a_attr, a_prefix, a_ph_type, a_type) \
a_attr void a_prefix##new(a_ph_type *ph); \
a_attr bool a_prefix##empty(a_ph_type *ph); \
a_attr a_type *a_prefix##first(a_ph_type *ph); \
a_attr a_type *a_prefix##any(a_ph_type *ph); \
a_attr void a_prefix##insert(a_ph_type *ph, a_type *phn); \
a_attr a_type *a_prefix##remove_first(a_ph_type *ph); \
a_attr a_type *a_prefix##remove_any(a_ph_type *ph); \
a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn);
/*
* The ph_gen() macro generates a type-specific pairing heap implementation,
* based on the above cpp macros.
*/
#define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \
a_attr void \
a_prefix##new(a_ph_type *ph) { \
memset(ph, 0, sizeof(ph(a_type))); \
} \
a_attr bool \
a_prefix##empty(a_ph_type *ph) { \
return (ph->ph_root == NULL); \
} \
a_attr a_type * \
a_prefix##first(a_ph_type *ph) { \
if (ph->ph_root == NULL) { \
return NULL; \
} \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
return ph->ph_root; \
} \
a_attr a_type * \
a_prefix##any(a_ph_type *ph) { \
if (ph->ph_root == NULL) { \
return NULL; \
} \
a_type *aux = phn_next_get(a_type, a_field, ph->ph_root); \
if (aux != NULL) { \
return aux; \
} \
return ph->ph_root; \
} \
a_attr void \
a_prefix##insert(a_ph_type *ph, a_type *phn) { \
memset(&phn->a_field, 0, sizeof(phn(a_type))); \
\
/* \
* Treat the root as an aux list during insertion, and lazily \
* merge during a_prefix##remove_first(). For elements that \
* are inserted, then removed via a_prefix##remove() before the \
* aux list is ever processed, this makes insert/remove \
* constant-time, whereas eager merging would make insert \
* O(log n). \
*/
\
if (ph->ph_root == NULL) { \
ph->ph_root = phn; \
} else { \
phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \
a_field, ph->ph_root)); \
if (phn_next_get(a_type, a_field, ph->ph_root) != \
NULL) { \
phn_prev_set(a_type, a_field, \
phn_next_get(a_type, a_field, ph->ph_root), \
phn); \
} \
phn_prev_set(a_type, a_field, phn, ph->ph_root); \
phn_next_set(a_type, a_field, ph->ph_root, phn); \
} \
} \
a_attr a_type * \
a_prefix##remove_first(a_ph_type *ph) { \
a_type *ret; \
\
if (ph->ph_root == NULL) { \
return NULL; \
} \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
\
ret = ph->ph_root; \
\
ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \
ph->ph_root); \
\
return ret; \
} \
a_attr a_type * \
a_prefix##remove_any(a_ph_type *ph) { \
/* \
* Remove the most recently inserted aux list element, or the \
* root if the aux list is empty. This has the effect of \
* behaving as a LIFO (and insertion/removal is therefore \
* constant-time) if a_prefix##[remove_]first() are never \
* called. \
*/
\
if (ph->ph_root == NULL) { \
return NULL; \
} \
a_type *ret = phn_next_get(a_type, a_field, ph->ph_root); \
if (ret != NULL) { \
a_type *aux = phn_next_get(a_type, a_field, ret); \
phn_next_set(a_type, a_field, ph->ph_root, aux); \
if (aux != NULL) { \
phn_prev_set(a_type, a_field, aux, \
ph->ph_root); \
} \
return ret; \
} \
ret = ph->ph_root; \
ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \
ph->ph_root); \
return ret; \
} \
a_attr void \
a_prefix##remove(a_ph_type *ph, a_type *phn) { \
a_type *replace, *parent; \
\
if (ph->ph_root == phn) { \
/* \
* We can delete from aux list without merging it, but \
* we need to merge if we are dealing with the root \
* node and it has children. \
*/
\
if (phn_lchild_get(a_type, a_field, phn) == NULL) { \
ph->ph_root = phn_next_get(a_type, a_field, \
phn); \
if (ph->ph_root != NULL) { \
phn_prev_set(a_type, a_field, \
ph->ph_root, NULL); \
} \
return; \
} \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
if (ph->ph_root == phn) { \
ph_merge_children(a_type, a_field, ph->ph_root, \
a_cmp, ph->ph_root); \
return; \
} \
} \
\
/* Get parent (if phn is leftmost child) before mutating. */
\
if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \
if (phn_lchild_get(a_type, a_field, parent) != phn) { \
parent = NULL; \
} \
} \
/* Find a possible replacement node, and link to parent. */
\
ph_merge_children(a_type, a_field, phn, a_cmp, replace); \
/* Set next/prev for sibling linked list. */
\
if (replace != NULL) { \
if (parent != NULL) { \
phn_prev_set(a_type, a_field, replace, parent); \
phn_lchild_set(a_type, a_field, parent, \
replace); \
} else { \
phn_prev_set(a_type, a_field, replace, \
phn_prev_get(a_type, a_field, phn)); \
if (phn_prev_get(a_type, a_field, phn) != \
NULL) { \
phn_next_set(a_type, a_field, \
phn_prev_get(a_type, a_field, phn), \
replace); \
} \
} \
phn_next_set(a_type, a_field, replace, \
phn_next_get(a_type, a_field, phn)); \
if (phn_next_get(a_type, a_field, phn) != NULL) { \
phn_prev_set(a_type, a_field, \
phn_next_get(a_type, a_field, phn), \
replace); \
} \
} else { \
if (parent != NULL) { \
a_type *next = phn_next_get(a_type, a_field, \
phn); \
phn_lchild_set(a_type, a_field, parent, next); \
if (next != NULL) { \
phn_prev_set(a_type, a_field, next, \
parent); \
} \
} else { \
assert(phn_prev_get(a_type, a_field, phn) != \
NULL); \
phn_next_set(a_type, a_field, \
phn_prev_get(a_type, a_field, phn), \
phn_next_get(a_type, a_field, phn)); \
} \
if (phn_next_get(a_type, a_field, phn) != NULL) { \
phn_prev_set(a_type, a_field, \
phn_next_get(a_type, a_field, phn), \
phn_prev_get(a_type, a_field, phn)); \
} \
} \
}
#endif
/* PH_H_ */
deps/jemalloc/include/jemalloc/internal/private_namespace.sh
View file @
f63e81c2
#!/bin/sh
for
symbol
in
`
cat
$1
`
;
do
echo
"#define
${
symbol
}
JEMALLOC_N(
${
symbol
}
)"
for
symbol
in
`
cat
"
$@
"
`
;
do
echo
"#define
${
symbol
}
JEMALLOC_N(
${
symbol
}
)"
done
deps/jemalloc/include/jemalloc/internal/private_symbols.sh
0 → 100755
View file @
f63e81c2
#!/bin/sh
#
# Generate private_symbols[_jet].awk.
#
# Usage: private_symbols.sh <sym_prefix> <sym>*
#
# <sym_prefix> is typically "" or "_".
sym_prefix
=
$1
shift
cat
<<
EOF
#!/usr/bin/env awk -f
BEGIN {
sym_prefix = "
${
sym_prefix
}
"
split("
\\
EOF
for
public_sym
in
"
$@
"
;
do
cat
<<
EOF
${
sym_prefix
}${
public_sym
}
\\
EOF
done
cat
<<
"
EOF
"
", exported_symbol_names)
# Store exported symbol names as keys in exported_symbols.
for (i in exported_symbol_names) {
exported_symbols[exported_symbol_names[i]] = 1
}
}
# Process 'nm -a <c_source.o>' output.
#
# Handle lines like:
# 0000000000000008 D opt_junk
# 0000000000007574 T malloc_initialized
(NF == 3 &&
$2
~ /^[ABCDGRSTVW]
$/
&& !(
$3
in exported_symbols) &&
$3
~ /^[A-Za-z0-9_]+
$/
) {
print substr(
$3
, 1+length(sym_prefix), length(
$3
)-length(sym_prefix))
}
# Process 'dumpbin /SYMBOLS <c_source.obj>' output.
#
# Handle lines like:
# 353 00008098 SECT4 notype External | opt_junk
# 3F1 00000000 SECT7 notype () External | malloc_initialized
(
$3
~ /^SECT[0-9]+/ &&
$(
NF-2
)
== "External" && !(
$NF
in exported_symbols)) {
print
$NF
}
EOF
deps/jemalloc/include/jemalloc/internal/private_symbols.txt
deleted
100644 → 0
View file @
eaeba1b2
a0calloc
a0free
a0malloc
arena_alloc_junk_small
arena_bin_index
arena_bin_info
arena_boot
arena_dalloc
arena_dalloc_bin
arena_dalloc_bin_locked
arena_dalloc_junk_large
arena_dalloc_junk_small
arena_dalloc_large
arena_dalloc_large_locked
arena_dalloc_small
arena_dss_prec_get
arena_dss_prec_set
arena_malloc
arena_malloc_large
arena_malloc_small
arena_mapbits_allocated_get
arena_mapbits_binind_get
arena_mapbits_dirty_get
arena_mapbits_get
arena_mapbits_large_binind_set
arena_mapbits_large_get
arena_mapbits_large_set
arena_mapbits_large_size_get
arena_mapbits_small_runind_get
arena_mapbits_small_set
arena_mapbits_unallocated_set
arena_mapbits_unallocated_size_get
arena_mapbits_unallocated_size_set
arena_mapbits_unzeroed_get
arena_mapbits_unzeroed_set
arena_mapbitsp_get
arena_mapbitsp_read
arena_mapbitsp_write
arena_mapp_get
arena_maxclass
arena_new
arena_palloc
arena_postfork_child
arena_postfork_parent
arena_prefork
arena_prof_accum
arena_prof_accum_impl
arena_prof_accum_locked
arena_prof_ctx_get
arena_prof_ctx_set
arena_prof_promoted
arena_ptr_small_binind_get
arena_purge_all
arena_quarantine_junk_small
arena_ralloc
arena_ralloc_junk_large
arena_ralloc_no_move
arena_redzone_corruption
arena_run_regind
arena_salloc
arena_stats_merge
arena_tcache_fill_small
arenas
arenas_booted
arenas_cleanup
arenas_extend
arenas_initialized
arenas_lock
arenas_tls
arenas_tsd
arenas_tsd_boot
arenas_tsd_cleanup_wrapper
arenas_tsd_get
arenas_tsd_get_wrapper
arenas_tsd_init_head
arenas_tsd_set
atomic_add_u
atomic_add_uint32
atomic_add_uint64
atomic_add_z
atomic_sub_u
atomic_sub_uint32
atomic_sub_uint64
atomic_sub_z
base_alloc
base_boot
base_calloc
base_node_alloc
base_node_dealloc
base_postfork_child
base_postfork_parent
base_prefork
bitmap_full
bitmap_get
bitmap_info_init
bitmap_info_ngroups
bitmap_init
bitmap_set
bitmap_sfu
bitmap_size
bitmap_unset
bt_init
buferror
choose_arena
choose_arena_hard
chunk_alloc
chunk_alloc_dss
chunk_alloc_mmap
chunk_boot
chunk_dealloc
chunk_dealloc_mmap
chunk_dss_boot
chunk_dss_postfork_child
chunk_dss_postfork_parent
chunk_dss_prec_get
chunk_dss_prec_set
chunk_dss_prefork
chunk_in_dss
chunk_npages
chunk_postfork_child
chunk_postfork_parent
chunk_prefork
chunk_unmap
chunks_mtx
chunks_rtree
chunksize
chunksize_mask
ckh_bucket_search
ckh_count
ckh_delete
ckh_evict_reloc_insert
ckh_insert
ckh_isearch
ckh_iter
ckh_new
ckh_pointer_hash
ckh_pointer_keycomp
ckh_rebuild
ckh_remove
ckh_search
ckh_string_hash
ckh_string_keycomp
ckh_try_bucket_insert
ckh_try_insert
ctl_boot
ctl_bymib
ctl_byname
ctl_nametomib
ctl_postfork_child
ctl_postfork_parent
ctl_prefork
dss_prec_names
extent_tree_ad_first
extent_tree_ad_insert
extent_tree_ad_iter
extent_tree_ad_iter_recurse
extent_tree_ad_iter_start
extent_tree_ad_last
extent_tree_ad_new
extent_tree_ad_next
extent_tree_ad_nsearch
extent_tree_ad_prev
extent_tree_ad_psearch
extent_tree_ad_remove
extent_tree_ad_reverse_iter
extent_tree_ad_reverse_iter_recurse
extent_tree_ad_reverse_iter_start
extent_tree_ad_search
extent_tree_szad_first
extent_tree_szad_insert
extent_tree_szad_iter
extent_tree_szad_iter_recurse
extent_tree_szad_iter_start
extent_tree_szad_last
extent_tree_szad_new
extent_tree_szad_next
extent_tree_szad_nsearch
extent_tree_szad_prev
extent_tree_szad_psearch
extent_tree_szad_remove
extent_tree_szad_reverse_iter
extent_tree_szad_reverse_iter_recurse
extent_tree_szad_reverse_iter_start
extent_tree_szad_search
get_errno
hash
hash_fmix_32
hash_fmix_64
hash_get_block_32
hash_get_block_64
hash_rotl_32
hash_rotl_64
hash_x64_128
hash_x86_128
hash_x86_32
huge_allocated
huge_boot
huge_dalloc
huge_dalloc_junk
huge_dss_prec_get
huge_malloc
huge_mtx
huge_ndalloc
huge_nmalloc
huge_palloc
huge_postfork_child
huge_postfork_parent
huge_prefork
huge_prof_ctx_get
huge_prof_ctx_set
huge_ralloc
huge_ralloc_no_move
huge_salloc
iallocm
icalloc
icalloct
idalloc
idalloct
imalloc
imalloct
ipalloc
ipalloct
iqalloc
iqalloct
iralloc
iralloct
iralloct_realign
isalloc
isthreaded
ivsalloc
ixalloc
jemalloc_postfork_child
jemalloc_postfork_parent
jemalloc_prefork
malloc_cprintf
malloc_mutex_init
malloc_mutex_lock
malloc_mutex_postfork_child
malloc_mutex_postfork_parent
malloc_mutex_prefork
malloc_mutex_unlock
malloc_printf
malloc_snprintf
malloc_strtoumax
malloc_tsd_boot
malloc_tsd_cleanup_register
malloc_tsd_dalloc
malloc_tsd_malloc
malloc_tsd_no_cleanup
malloc_vcprintf
malloc_vsnprintf
malloc_write
map_bias
mb_write
mutex_boot
narenas_auto
narenas_total
narenas_total_get
ncpus
nhbins
opt_abort
opt_dss
opt_junk
opt_lg_chunk
opt_lg_dirty_mult
opt_lg_prof_interval
opt_lg_prof_sample
opt_lg_tcache_max
opt_narenas
opt_prof
opt_prof_accum
opt_prof_active
opt_prof_final
opt_prof_gdump
opt_prof_leak
opt_prof_prefix
opt_quarantine
opt_redzone
opt_stats_print
opt_tcache
opt_utrace
opt_valgrind
opt_xmalloc
opt_zero
p2rz
pages_purge
pow2_ceil
prof_backtrace
prof_boot0
prof_boot1
prof_boot2
prof_bt_count
prof_ctx_get
prof_ctx_set
prof_dump_open
prof_free
prof_gdump
prof_idump
prof_interval
prof_lookup
prof_malloc
prof_mdump
prof_postfork_child
prof_postfork_parent
prof_prefork
prof_promote
prof_realloc
prof_sample_accum_update
prof_sample_threshold_update
prof_tdata_booted
prof_tdata_cleanup
prof_tdata_get
prof_tdata_init
prof_tdata_initialized
prof_tdata_tls
prof_tdata_tsd
prof_tdata_tsd_boot
prof_tdata_tsd_cleanup_wrapper
prof_tdata_tsd_get
prof_tdata_tsd_get_wrapper
prof_tdata_tsd_init_head
prof_tdata_tsd_set
quarantine
quarantine_alloc_hook
quarantine_boot
quarantine_booted
quarantine_cleanup
quarantine_init
quarantine_tls
quarantine_tsd
quarantine_tsd_boot
quarantine_tsd_cleanup_wrapper
quarantine_tsd_get
quarantine_tsd_get_wrapper
quarantine_tsd_init_head
quarantine_tsd_set
register_zone
rtree_delete
rtree_get
rtree_get_locked
rtree_new
rtree_postfork_child
rtree_postfork_parent
rtree_prefork
rtree_set
s2u
sa2u
set_errno
small_size2bin
stats_cactive
stats_cactive_add
stats_cactive_get
stats_cactive_sub
stats_chunks
stats_print
tcache_alloc_easy
tcache_alloc_large
tcache_alloc_small
tcache_alloc_small_hard
tcache_arena_associate
tcache_arena_dissociate
tcache_bin_flush_large
tcache_bin_flush_small
tcache_bin_info
tcache_boot0
tcache_boot1
tcache_booted
tcache_create
tcache_dalloc_large
tcache_dalloc_small
tcache_destroy
tcache_enabled_booted
tcache_enabled_get
tcache_enabled_initialized
tcache_enabled_set
tcache_enabled_tls
tcache_enabled_tsd
tcache_enabled_tsd_boot
tcache_enabled_tsd_cleanup_wrapper
tcache_enabled_tsd_get
tcache_enabled_tsd_get_wrapper
tcache_enabled_tsd_init_head
tcache_enabled_tsd_set
tcache_event
tcache_event_hard
tcache_flush
tcache_get
tcache_initialized
tcache_maxclass
tcache_salloc
tcache_stats_merge
tcache_thread_cleanup
tcache_tls
tcache_tsd
tcache_tsd_boot
tcache_tsd_cleanup_wrapper
tcache_tsd_get
tcache_tsd_get_wrapper
tcache_tsd_init_head
tcache_tsd_set
thread_allocated_booted
thread_allocated_initialized
thread_allocated_tls
thread_allocated_tsd
thread_allocated_tsd_boot
thread_allocated_tsd_cleanup_wrapper
thread_allocated_tsd_get
thread_allocated_tsd_get_wrapper
thread_allocated_tsd_init_head
thread_allocated_tsd_set
tsd_init_check_recursion
tsd_init_finish
u2rz
deps/jemalloc/include/jemalloc/internal/private_unnamespace.sh
deleted
100755 → 0
View file @
eaeba1b2
#!/bin/sh
for
symbol
in
`
cat
$1
`
;
do
echo
"#undef
${
symbol
}
"
done
deps/jemalloc/include/jemalloc/internal/prng.h
View file @
f63e81c2
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#ifndef JEMALLOC_INTERNAL_PRNG_H
#define JEMALLOC_INTERNAL_PRNG_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bit_util.h"
/*
* Simple linear congruential pseudo-random number generator:
...
...
@@ -15,46 +18,168 @@
* See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints.
*
* This choice of m has the disadvantage that the quality of the bits is
* proportional to bit position. For example
.
the lowest bit has a cycle of 2,
* proportional to bit position. For example
,
the lowest bit has a cycle of 2,
* the next has a cycle of 4, etc. For this reason, we prefer to use the upper
* bits.
*
* Macro parameters:
* uint32_t r : Result.
* unsigned lg_range : (0..32], number of least significant bits to return.
* uint32_t state : Seed value.
* const uint32_t a, c : See above discussion.
*/
#define prng32(r, lg_range, state, a, c) do { \
assert(lg_range > 0); \
assert(lg_range <= 32); \
\
r = (state * (a)) + (c); \
state = r; \
r >>= (32 - lg_range); \
} while (false)
/* Same as prng32(), but 64 bits of pseudo-randomness, using uint64_t. */
#define prng64(r, lg_range, state, a, c) do { \
assert(lg_range > 0); \
assert(lg_range <= 64); \
\
r = (state * (a)) + (c); \
state = r; \
r >>= (64 - lg_range); \
} while (false)
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif
/* JEMALLOC_H_EXTERNS */
/* INTERNAL DEFINITIONS -- IGNORE */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#define PRNG_A_32 UINT32_C(1103515241)
#define PRNG_C_32 UINT32_C(12347)
#define PRNG_A_64 UINT64_C(6364136223846793005)
#define PRNG_C_64 UINT64_C(1442695040888963407)
JEMALLOC_ALWAYS_INLINE
uint32_t
prng_state_next_u32
(
uint32_t
state
)
{
return
(
state
*
PRNG_A_32
)
+
PRNG_C_32
;
}
JEMALLOC_ALWAYS_INLINE
uint64_t
prng_state_next_u64
(
uint64_t
state
)
{
return
(
state
*
PRNG_A_64
)
+
PRNG_C_64
;
}
JEMALLOC_ALWAYS_INLINE
size_t
prng_state_next_zu
(
size_t
state
)
{
#if LG_SIZEOF_PTR == 2
return
(
state
*
PRNG_A_32
)
+
PRNG_C_32
;
#elif LG_SIZEOF_PTR == 3
return
(
state
*
PRNG_A_64
)
+
PRNG_C_64
;
#else
#error Unsupported pointer size
#endif
}
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
/* BEGIN PUBLIC API */
/******************************************************************************/
/*
* The prng_lg_range functions give a uniform int in the half-open range [0,
* 2**lg_range). If atomic is true, they do so safely from multiple threads.
* Multithreaded 64-bit prngs aren't supported.
*/
JEMALLOC_ALWAYS_INLINE
uint32_t
prng_lg_range_u32
(
atomic_u32_t
*
state
,
unsigned
lg_range
,
bool
atomic
)
{
uint32_t
ret
,
state0
,
state1
;
assert
(
lg_range
>
0
);
assert
(
lg_range
<=
32
);
state0
=
atomic_load_u32
(
state
,
ATOMIC_RELAXED
);
if
(
atomic
)
{
do
{
state1
=
prng_state_next_u32
(
state0
);
}
while
(
!
atomic_compare_exchange_weak_u32
(
state
,
&
state0
,
state1
,
ATOMIC_RELAXED
,
ATOMIC_RELAXED
));
}
else
{
state1
=
prng_state_next_u32
(
state0
);
atomic_store_u32
(
state
,
state1
,
ATOMIC_RELAXED
);
}
ret
=
state1
>>
(
32
-
lg_range
);
return
ret
;
}
JEMALLOC_ALWAYS_INLINE
uint64_t
prng_lg_range_u64
(
uint64_t
*
state
,
unsigned
lg_range
)
{
uint64_t
ret
,
state1
;
assert
(
lg_range
>
0
);
assert
(
lg_range
<=
64
);
state1
=
prng_state_next_u64
(
*
state
);
*
state
=
state1
;
ret
=
state1
>>
(
64
-
lg_range
);
return
ret
;
}
JEMALLOC_ALWAYS_INLINE
size_t
prng_lg_range_zu
(
atomic_zu_t
*
state
,
unsigned
lg_range
,
bool
atomic
)
{
size_t
ret
,
state0
,
state1
;
assert
(
lg_range
>
0
);
assert
(
lg_range
<=
ZU
(
1
)
<<
(
3
+
LG_SIZEOF_PTR
));
state0
=
atomic_load_zu
(
state
,
ATOMIC_RELAXED
);
if
(
atomic
)
{
do
{
state1
=
prng_state_next_zu
(
state0
);
}
while
(
atomic_compare_exchange_weak_zu
(
state
,
&
state0
,
state1
,
ATOMIC_RELAXED
,
ATOMIC_RELAXED
));
}
else
{
state1
=
prng_state_next_zu
(
state0
);
atomic_store_zu
(
state
,
state1
,
ATOMIC_RELAXED
);
}
ret
=
state1
>>
((
ZU
(
1
)
<<
(
3
+
LG_SIZEOF_PTR
))
-
lg_range
);
return
ret
;
}
/*
* The prng_range functions behave like the prng_lg_range, but return a result
* in [0, range) instead of [0, 2**lg_range).
*/
JEMALLOC_ALWAYS_INLINE
uint32_t
prng_range_u32
(
atomic_u32_t
*
state
,
uint32_t
range
,
bool
atomic
)
{
uint32_t
ret
;
unsigned
lg_range
;
assert
(
range
>
1
);
/* Compute the ceiling of lg(range). */
lg_range
=
ffs_u32
(
pow2_ceil_u32
(
range
))
-
1
;
/* Generate a result in [0..range) via repeated trial. */
do
{
ret
=
prng_lg_range_u32
(
state
,
lg_range
,
atomic
);
}
while
(
ret
>=
range
);
return
ret
;
}
JEMALLOC_ALWAYS_INLINE
uint64_t
prng_range_u64
(
uint64_t
*
state
,
uint64_t
range
)
{
uint64_t
ret
;
unsigned
lg_range
;
assert
(
range
>
1
);
/* Compute the ceiling of lg(range). */
lg_range
=
ffs_u64
(
pow2_ceil_u64
(
range
))
-
1
;
/* Generate a result in [0..range) via repeated trial. */
do
{
ret
=
prng_lg_range_u64
(
state
,
lg_range
);
}
while
(
ret
>=
range
);
return
ret
;
}
JEMALLOC_ALWAYS_INLINE
size_t
prng_range_zu
(
atomic_zu_t
*
state
,
size_t
range
,
bool
atomic
)
{
size_t
ret
;
unsigned
lg_range
;
assert
(
range
>
1
);
/* Compute the ceiling of lg(range). */
lg_range
=
ffs_u64
(
pow2_ceil_u64
(
range
))
-
1
;
/* Generate a result in [0..range) via repeated trial. */
do
{
ret
=
prng_lg_range_zu
(
state
,
lg_range
,
atomic
);
}
while
(
ret
>=
range
);
return
ret
;
}
#endif
/* JEMALLOC_INTERNAL_PRNG_H */
deps/jemalloc/include/jemalloc/internal/prof.h
deleted
100644 → 0
View file @
eaeba1b2
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef
struct
prof_bt_s
prof_bt_t
;
typedef
struct
prof_cnt_s
prof_cnt_t
;
typedef
struct
prof_thr_cnt_s
prof_thr_cnt_t
;
typedef
struct
prof_ctx_s
prof_ctx_t
;
typedef
struct
prof_tdata_s
prof_tdata_t
;
/* Option defaults. */
#ifdef JEMALLOC_PROF
# define PROF_PREFIX_DEFAULT "jeprof"
#else
# define PROF_PREFIX_DEFAULT ""
#endif
#define LG_PROF_SAMPLE_DEFAULT 19
#define LG_PROF_INTERVAL_DEFAULT -1
/*
* Hard limit on stack backtrace depth. The version of prof_backtrace() that
* is based on __builtin_return_address() necessarily has a hard-coded number
* of backtrace frame handlers, and should be kept in sync with this setting.
*/
#define PROF_BT_MAX 128
/* Maximum number of backtraces to store in each per thread LRU cache. */
#define PROF_TCMAX 1024
/* Initial hash table size. */
#define PROF_CKH_MINITEMS 64
/* Size of memory buffer to use when writing dump files. */
#define PROF_DUMP_BUFSIZE 65536
/* Size of stack-allocated buffer used by prof_printf(). */
#define PROF_PRINTF_BUFSIZE 128
/*
* Number of mutexes shared among all ctx's. No space is allocated for these
* unless profiling is enabled, so it's okay to over-provision.
*/
#define PROF_NCTX_LOCKS 1024
/*
* prof_tdata pointers close to NULL are used to encode state information that
* is used for cleaning up during thread shutdown.
*/
#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1)
#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2)
#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct
prof_bt_s
{
/* Backtrace, stored as len program counters. */
void
**
vec
;
unsigned
len
;
};
#ifdef JEMALLOC_PROF_LIBGCC
/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
typedef
struct
{
prof_bt_t
*
bt
;
unsigned
nignore
;
unsigned
max
;
}
prof_unwind_data_t
;
#endif
struct
prof_cnt_s
{
/*
* Profiling counters. An allocation/deallocation pair can operate on
* different prof_thr_cnt_t objects that are linked into the same
* prof_ctx_t cnts_ql, so it is possible for the cur* counters to go
* negative. In principle it is possible for the *bytes counters to
* overflow/underflow, but a general solution would require something
* like 128-bit counters; this implementation doesn't bother to solve
* that problem.
*/
int64_t
curobjs
;
int64_t
curbytes
;
uint64_t
accumobjs
;
uint64_t
accumbytes
;
};
struct
prof_thr_cnt_s
{
/* Linkage into prof_ctx_t's cnts_ql. */
ql_elm
(
prof_thr_cnt_t
)
cnts_link
;
/* Linkage into thread's LRU. */
ql_elm
(
prof_thr_cnt_t
)
lru_link
;
/*
* Associated context. If a thread frees an object that it did not
* allocate, it is possible that the context is not cached in the
* thread's hash table, in which case it must be able to look up the
* context, insert a new prof_thr_cnt_t into the thread's hash table,
* and link it into the prof_ctx_t's cnts_ql.
*/
prof_ctx_t
*
ctx
;
/*
* Threads use memory barriers to update the counters. Since there is
* only ever one writer, the only challenge is for the reader to get a
* consistent read of the counters.
*
* The writer uses this series of operations:
*
* 1) Increment epoch to an odd number.
* 2) Update counters.
* 3) Increment epoch to an even number.
*
* The reader must assure 1) that the epoch is even while it reads the
* counters, and 2) that the epoch doesn't change between the time it
* starts and finishes reading the counters.
*/
unsigned
epoch
;
/* Profiling counters. */
prof_cnt_t
cnts
;
};
struct
prof_ctx_s
{
/* Associated backtrace. */
prof_bt_t
*
bt
;
/* Protects nlimbo, cnt_merged, and cnts_ql. */
malloc_mutex_t
*
lock
;
/*
* Number of threads that currently cause this ctx to be in a state of
* limbo due to one of:
* - Initializing per thread counters associated with this ctx.
* - Preparing to destroy this ctx.
* - Dumping a heap profile that includes this ctx.
* nlimbo must be 1 (single destroyer) in order to safely destroy the
* ctx.
*/
unsigned
nlimbo
;
/* Temporary storage for summation during dump. */
prof_cnt_t
cnt_summed
;
/* When threads exit, they merge their stats into cnt_merged. */
prof_cnt_t
cnt_merged
;
/*
* List of profile counters, one for each thread that has allocated in
* this context.
*/
ql_head
(
prof_thr_cnt_t
)
cnts_ql
;
/* Linkage for list of contexts to be dumped. */
ql_elm
(
prof_ctx_t
)
dump_link
;
};
typedef
ql_head
(
prof_ctx_t
)
prof_ctx_list_t
;
struct
prof_tdata_s
{
/*
* Hash of (prof_bt_t *)-->(prof_thr_cnt_t *). Each thread keeps a
* cache of backtraces, with associated thread-specific prof_thr_cnt_t
* objects. Other threads may read the prof_thr_cnt_t contents, but no
* others will ever write them.
*
* Upon thread exit, the thread must merge all the prof_thr_cnt_t
* counter data into the associated prof_ctx_t objects, and unlink/free
* the prof_thr_cnt_t objects.
*/
ckh_t
bt2cnt
;
/* LRU for contents of bt2cnt. */
ql_head
(
prof_thr_cnt_t
)
lru_ql
;
/* Backtrace vector, used for calls to prof_backtrace(). */
void
**
vec
;
/* Sampling state. */
uint64_t
prng_state
;
uint64_t
threshold
;
uint64_t
accum
;
/* State used to avoid dumping while operating on prof internals. */
bool
enq
;
bool
enq_idump
;
bool
enq_gdump
;
};
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern
bool
opt_prof
;
/*
* Even if opt_prof is true, sampling can be temporarily disabled by setting
* opt_prof_active to false. No locking is used when updating opt_prof_active,
* so there are no guarantees regarding how long it will take for all threads
* to notice state changes.
*/
extern
bool
opt_prof_active
;
extern
size_t
opt_lg_prof_sample
;
/* Mean bytes between samples. */
extern
ssize_t
opt_lg_prof_interval
;
/* lg(prof_interval). */
extern
bool
opt_prof_gdump
;
/* High-water memory dumping. */
extern
bool
opt_prof_final
;
/* Final profile dumping. */
extern
bool
opt_prof_leak
;
/* Dump leak summary at exit. */
extern
bool
opt_prof_accum
;
/* Report cumulative bytes. */
extern
char
opt_prof_prefix
[
/* Minimize memory bloat for non-prof builds. */
#ifdef JEMALLOC_PROF
PATH_MAX
+
#endif
1
];
/*
* Profile dump interval, measured in bytes allocated. Each arena triggers a
* profile dump when it reaches this threshold. The effect is that the
* interval between profile dumps averages prof_interval, though the actual
* interval between dumps will tend to be sporadic, and the interval will be a
* maximum of approximately (prof_interval * narenas).
*/
extern
uint64_t
prof_interval
;
/*
* If true, promote small sampled objects to large objects, since small run
* headers do not have embedded profile context pointers.
*/
extern
bool
prof_promote
;
void
bt_init
(
prof_bt_t
*
bt
,
void
**
vec
);
void
prof_backtrace
(
prof_bt_t
*
bt
,
unsigned
nignore
);
prof_thr_cnt_t
*
prof_lookup
(
prof_bt_t
*
bt
);
#ifdef JEMALLOC_JET
size_t
prof_bt_count
(
void
);
typedef
int
(
prof_dump_open_t
)(
bool
,
const
char
*
);
extern
prof_dump_open_t
*
prof_dump_open
;
#endif
void
prof_idump
(
void
);
bool
prof_mdump
(
const
char
*
filename
);
void
prof_gdump
(
void
);
prof_tdata_t
*
prof_tdata_init
(
void
);
void
prof_tdata_cleanup
(
void
*
arg
);
void
prof_boot0
(
void
);
void
prof_boot1
(
void
);
bool
prof_boot2
(
void
);
void
prof_prefork
(
void
);
void
prof_postfork_parent
(
void
);
void
prof_postfork_child
(
void
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#define PROF_ALLOC_PREP(nignore, size, ret) do { \
prof_tdata_t *prof_tdata; \
prof_bt_t bt; \
\
assert(size == s2u(size)); \
\
prof_tdata = prof_tdata_get(true); \
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) { \
if (prof_tdata != NULL) \
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
else \
ret = NULL; \
break; \
} \
\
if (opt_prof_active == false) { \
/* Sampling is currently inactive, so avoid sampling. */
\
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
} else if (opt_lg_prof_sample == 0) { \
/* Don't bother with sampling logic, since sampling */
\
/* interval is 1. */
\
bt_init(&bt, prof_tdata->vec); \
prof_backtrace(&bt, nignore); \
ret = prof_lookup(&bt); \
} else { \
if (prof_tdata->threshold == 0) { \
/* Initialize. Seed the prng differently for */
\
/* each thread. */
\
prof_tdata->prng_state = \
(uint64_t)(uintptr_t)&size; \
prof_sample_threshold_update(prof_tdata); \
} \
\
/* Determine whether to capture a backtrace based on */
\
/* whether size is enough for prof_accum to reach */
\
/* prof_tdata->threshold. However, delay updating */
\
/* these variables until prof_{m,re}alloc(), because */
\
/* we don't know for sure that the allocation will */
\
/* succeed. */
\
/* */
\
/* Use subtraction rather than addition to avoid */
\
/* potential integer overflow. */
\
if (size >= prof_tdata->threshold - \
prof_tdata->accum) { \
bt_init(&bt, prof_tdata->vec); \
prof_backtrace(&bt, nignore); \
ret = prof_lookup(&bt); \
} else \
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
} \
} while (0)
#ifndef JEMALLOC_ENABLE_INLINE
malloc_tsd_protos
(
JEMALLOC_ATTR
(
unused
),
prof_tdata
,
prof_tdata_t
*
)
prof_tdata_t
*
prof_tdata_get
(
bool
create
);
void
prof_sample_threshold_update
(
prof_tdata_t
*
prof_tdata
);
prof_ctx_t
*
prof_ctx_get
(
const
void
*
ptr
);
void
prof_ctx_set
(
const
void
*
ptr
,
size_t
usize
,
prof_ctx_t
*
ctx
);
bool
prof_sample_accum_update
(
size_t
size
);
void
prof_malloc
(
const
void
*
ptr
,
size_t
usize
,
prof_thr_cnt_t
*
cnt
);
void
prof_realloc
(
const
void
*
ptr
,
size_t
usize
,
prof_thr_cnt_t
*
cnt
,
size_t
old_usize
,
prof_ctx_t
*
old_ctx
);
void
prof_free
(
const
void
*
ptr
,
size_t
size
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_))
/* Thread-specific backtrace cache, used to reduce bt2ctx contention. */
malloc_tsd_externs
(
prof_tdata
,
prof_tdata_t
*
)
malloc_tsd_funcs
(
JEMALLOC_INLINE
,
prof_tdata
,
prof_tdata_t
*
,
NULL
,
prof_tdata_cleanup
)
JEMALLOC_INLINE
prof_tdata_t
*
prof_tdata_get
(
bool
create
)
{
prof_tdata_t
*
prof_tdata
;
cassert
(
config_prof
);
prof_tdata
=
*
prof_tdata_tsd_get
();
if
(
create
&&
prof_tdata
==
NULL
)
prof_tdata
=
prof_tdata_init
();
return
(
prof_tdata
);
}
JEMALLOC_INLINE
void
prof_sample_threshold_update
(
prof_tdata_t
*
prof_tdata
)
{
/*
* The body of this function is compiled out unless heap profiling is
* enabled, so that it is possible to compile jemalloc with floating
* point support completely disabled. Avoiding floating point code is
* important on memory-constrained systems, but it also enables a
* workaround for versions of glibc that don't properly save/restore
* floating point registers during dynamic lazy symbol loading (which
* internally calls into whatever malloc implementation happens to be
* integrated into the application). Note that some compilers (e.g.
* gcc 4.8) may use floating point registers for fast memory moves, so
* jemalloc must be compiled with such optimizations disabled (e.g.
* -mno-sse) in order for the workaround to be complete.
*/
#ifdef JEMALLOC_PROF
uint64_t
r
;
double
u
;
cassert
(
config_prof
);
/*
* Compute sample threshold as a geometrically distributed random
* variable with mean (2^opt_lg_prof_sample).
*
* __ __
* | log(u) | 1
* prof_tdata->threshold = | -------- |, where p = -------------------
* | log(1-p) | opt_lg_prof_sample
* 2
*
* For more information on the math, see:
*
* Non-Uniform Random Variate Generation
* Luc Devroye
* Springer-Verlag, New York, 1986
* pp 500
* (http://luc.devroye.org/rnbookindex.html)
*/
prng64
(
r
,
53
,
prof_tdata
->
prng_state
,
UINT64_C
(
6364136223846793005
),
UINT64_C
(
1442695040888963407
));
u
=
(
double
)
r
*
(
1
.
0
/
9007199254740992
.
0L
);
prof_tdata
->
threshold
=
(
uint64_t
)(
log
(
u
)
/
log
(
1
.
0
-
(
1
.
0
/
(
double
)((
uint64_t
)
1U
<<
opt_lg_prof_sample
))))
+
(
uint64_t
)
1U
;
#endif
}
JEMALLOC_INLINE
prof_ctx_t
*
prof_ctx_get
(
const
void
*
ptr
)
{
prof_ctx_t
*
ret
;
arena_chunk_t
*
chunk
;
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
ptr
);
if
(
chunk
!=
ptr
)
{
/* Region. */
ret
=
arena_prof_ctx_get
(
ptr
);
}
else
ret
=
huge_prof_ctx_get
(
ptr
);
return
(
ret
);
}
JEMALLOC_INLINE
void
prof_ctx_set
(
const
void
*
ptr
,
size_t
usize
,
prof_ctx_t
*
ctx
)
{
arena_chunk_t
*
chunk
;
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
ptr
);
if
(
chunk
!=
ptr
)
{
/* Region. */
arena_prof_ctx_set
(
ptr
,
usize
,
ctx
);
}
else
huge_prof_ctx_set
(
ptr
,
ctx
);
}
JEMALLOC_INLINE
bool
prof_sample_accum_update
(
size_t
size
)
{
prof_tdata_t
*
prof_tdata
;
cassert
(
config_prof
);
/* Sampling logic is unnecessary if the interval is 1. */
assert
(
opt_lg_prof_sample
!=
0
);
prof_tdata
=
prof_tdata_get
(
false
);
if
((
uintptr_t
)
prof_tdata
<=
(
uintptr_t
)
PROF_TDATA_STATE_MAX
)
return
(
true
);
/* Take care to avoid integer overflow. */
if
(
size
>=
prof_tdata
->
threshold
-
prof_tdata
->
accum
)
{
prof_tdata
->
accum
-=
(
prof_tdata
->
threshold
-
size
);
/* Compute new sample threshold. */
prof_sample_threshold_update
(
prof_tdata
);
while
(
prof_tdata
->
accum
>=
prof_tdata
->
threshold
)
{
prof_tdata
->
accum
-=
prof_tdata
->
threshold
;
prof_sample_threshold_update
(
prof_tdata
);
}
return
(
false
);
}
else
{
prof_tdata
->
accum
+=
size
;
return
(
true
);
}
}
JEMALLOC_INLINE
void
prof_malloc
(
const
void
*
ptr
,
size_t
usize
,
prof_thr_cnt_t
*
cnt
)
{
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
assert
(
usize
==
isalloc
(
ptr
,
true
));
if
(
opt_lg_prof_sample
!=
0
)
{
if
(
prof_sample_accum_update
(
usize
))
{
/*
* Don't sample. For malloc()-like allocation, it is
* always possible to tell in advance how large an
* object's usable size will be, so there should never
* be a difference between the usize passed to
* PROF_ALLOC_PREP() and prof_malloc().
*/
assert
((
uintptr_t
)
cnt
==
(
uintptr_t
)
1U
);
}
}
if
((
uintptr_t
)
cnt
>
(
uintptr_t
)
1U
)
{
prof_ctx_set
(
ptr
,
usize
,
cnt
->
ctx
);
cnt
->
epoch
++
;
/*********/
mb_write
();
/*********/
cnt
->
cnts
.
curobjs
++
;
cnt
->
cnts
.
curbytes
+=
usize
;
if
(
opt_prof_accum
)
{
cnt
->
cnts
.
accumobjs
++
;
cnt
->
cnts
.
accumbytes
+=
usize
;
}
/*********/
mb_write
();
/*********/
cnt
->
epoch
++
;
/*********/
mb_write
();
/*********/
}
else
prof_ctx_set
(
ptr
,
usize
,
(
prof_ctx_t
*
)(
uintptr_t
)
1U
);
}
JEMALLOC_INLINE
void
prof_realloc
(
const
void
*
ptr
,
size_t
usize
,
prof_thr_cnt_t
*
cnt
,
size_t
old_usize
,
prof_ctx_t
*
old_ctx
)
{
prof_thr_cnt_t
*
told_cnt
;
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
||
(
uintptr_t
)
cnt
<=
(
uintptr_t
)
1U
);
if
(
ptr
!=
NULL
)
{
assert
(
usize
==
isalloc
(
ptr
,
true
));
if
(
opt_lg_prof_sample
!=
0
)
{
if
(
prof_sample_accum_update
(
usize
))
{
/*
* Don't sample. The usize passed to
* PROF_ALLOC_PREP() was larger than what
* actually got allocated, so a backtrace was
* captured for this allocation, even though
* its actual usize was insufficient to cross
* the sample threshold.
*/
cnt
=
(
prof_thr_cnt_t
*
)(
uintptr_t
)
1U
;
}
}
}
if
((
uintptr_t
)
old_ctx
>
(
uintptr_t
)
1U
)
{
told_cnt
=
prof_lookup
(
old_ctx
->
bt
);
if
(
told_cnt
==
NULL
)
{
/*
* It's too late to propagate OOM for this realloc(),
* so operate directly on old_cnt->ctx->cnt_merged.
*/
malloc_mutex_lock
(
old_ctx
->
lock
);
old_ctx
->
cnt_merged
.
curobjs
--
;
old_ctx
->
cnt_merged
.
curbytes
-=
old_usize
;
malloc_mutex_unlock
(
old_ctx
->
lock
);
told_cnt
=
(
prof_thr_cnt_t
*
)(
uintptr_t
)
1U
;
}
}
else
told_cnt
=
(
prof_thr_cnt_t
*
)(
uintptr_t
)
1U
;
if
((
uintptr_t
)
told_cnt
>
(
uintptr_t
)
1U
)
told_cnt
->
epoch
++
;
if
((
uintptr_t
)
cnt
>
(
uintptr_t
)
1U
)
{
prof_ctx_set
(
ptr
,
usize
,
cnt
->
ctx
);
cnt
->
epoch
++
;
}
else
if
(
ptr
!=
NULL
)
prof_ctx_set
(
ptr
,
usize
,
(
prof_ctx_t
*
)(
uintptr_t
)
1U
);
/*********/
mb_write
();
/*********/
if
((
uintptr_t
)
told_cnt
>
(
uintptr_t
)
1U
)
{
told_cnt
->
cnts
.
curobjs
--
;
told_cnt
->
cnts
.
curbytes
-=
old_usize
;
}
if
((
uintptr_t
)
cnt
>
(
uintptr_t
)
1U
)
{
cnt
->
cnts
.
curobjs
++
;
cnt
->
cnts
.
curbytes
+=
usize
;
if
(
opt_prof_accum
)
{
cnt
->
cnts
.
accumobjs
++
;
cnt
->
cnts
.
accumbytes
+=
usize
;
}
}
/*********/
mb_write
();
/*********/
if
((
uintptr_t
)
told_cnt
>
(
uintptr_t
)
1U
)
told_cnt
->
epoch
++
;
if
((
uintptr_t
)
cnt
>
(
uintptr_t
)
1U
)
cnt
->
epoch
++
;
/*********/
mb_write
();
/* Not strictly necessary. */
}
JEMALLOC_INLINE
void
prof_free
(
const
void
*
ptr
,
size_t
size
)
{
prof_ctx_t
*
ctx
=
prof_ctx_get
(
ptr
);
cassert
(
config_prof
);
if
((
uintptr_t
)
ctx
>
(
uintptr_t
)
1
)
{
prof_thr_cnt_t
*
tcnt
;
assert
(
size
==
isalloc
(
ptr
,
true
));
tcnt
=
prof_lookup
(
ctx
->
bt
);
if
(
tcnt
!=
NULL
)
{
tcnt
->
epoch
++
;
/*********/
mb_write
();
/*********/
tcnt
->
cnts
.
curobjs
--
;
tcnt
->
cnts
.
curbytes
-=
size
;
/*********/
mb_write
();
/*********/
tcnt
->
epoch
++
;
/*********/
mb_write
();
/*********/
}
else
{
/*
* OOM during free() cannot be propagated, so operate
* directly on cnt->ctx->cnt_merged.
*/
malloc_mutex_lock
(
ctx
->
lock
);
ctx
->
cnt_merged
.
curobjs
--
;
ctx
->
cnt_merged
.
curbytes
-=
size
;
malloc_mutex_unlock
(
ctx
->
lock
);
}
}
}
#endif
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/prof_externs.h
0 → 100644
View file @
f63e81c2
#ifndef JEMALLOC_INTERNAL_PROF_EXTERNS_H
#define JEMALLOC_INTERNAL_PROF_EXTERNS_H
#include "jemalloc/internal/mutex.h"
extern
malloc_mutex_t
bt2gctx_mtx
;
extern
bool
opt_prof
;
extern
bool
opt_prof_active
;
extern
bool
opt_prof_thread_active_init
;
extern
size_t
opt_lg_prof_sample
;
/* Mean bytes between samples. */
extern
ssize_t
opt_lg_prof_interval
;
/* lg(prof_interval). */
extern
bool
opt_prof_gdump
;
/* High-water memory dumping. */
extern
bool
opt_prof_final
;
/* Final profile dumping. */
extern
bool
opt_prof_leak
;
/* Dump leak summary at exit. */
extern
bool
opt_prof_accum
;
/* Report cumulative bytes. */
extern
char
opt_prof_prefix
[
/* Minimize memory bloat for non-prof builds. */
#ifdef JEMALLOC_PROF
PATH_MAX
+
#endif
1
];
/* Accessed via prof_active_[gs]et{_unlocked,}(). */
extern
bool
prof_active
;
/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */
extern
bool
prof_gdump_val
;
/*
* Profile dump interval, measured in bytes allocated. Each arena triggers a
* profile dump when it reaches this threshold. The effect is that the
* interval between profile dumps averages prof_interval, though the actual
* interval between dumps will tend to be sporadic, and the interval will be a
* maximum of approximately (prof_interval * narenas).
*/
extern
uint64_t
prof_interval
;
/*
* Initialized as opt_lg_prof_sample, and potentially modified during profiling
* resets.
*/
extern
size_t
lg_prof_sample
;
void
prof_alloc_rollback
(
tsd_t
*
tsd
,
prof_tctx_t
*
tctx
,
bool
updated
);
void
prof_malloc_sample_object
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
);
void
prof_free_sampled_object
(
tsd_t
*
tsd
,
size_t
usize
,
prof_tctx_t
*
tctx
);
void
bt_init
(
prof_bt_t
*
bt
,
void
**
vec
);
void
prof_backtrace
(
prof_bt_t
*
bt
);
prof_tctx_t
*
prof_lookup
(
tsd_t
*
tsd
,
prof_bt_t
*
bt
);
#ifdef JEMALLOC_JET
size_t
prof_tdata_count
(
void
);
size_t
prof_bt_count
(
void
);
#endif
typedef
int
(
prof_dump_open_t
)(
bool
,
const
char
*
);
extern
prof_dump_open_t
*
JET_MUTABLE
prof_dump_open
;
typedef
bool
(
prof_dump_header_t
)(
tsdn_t
*
,
bool
,
const
prof_cnt_t
*
);
extern
prof_dump_header_t
*
JET_MUTABLE
prof_dump_header
;
#ifdef JEMALLOC_JET
void
prof_cnt_all
(
uint64_t
*
curobjs
,
uint64_t
*
curbytes
,
uint64_t
*
accumobjs
,
uint64_t
*
accumbytes
);
#endif
bool
prof_accum_init
(
tsdn_t
*
tsdn
,
prof_accum_t
*
prof_accum
);
void
prof_idump
(
tsdn_t
*
tsdn
);
bool
prof_mdump
(
tsd_t
*
tsd
,
const
char
*
filename
);
void
prof_gdump
(
tsdn_t
*
tsdn
);
prof_tdata_t
*
prof_tdata_init
(
tsd_t
*
tsd
);
prof_tdata_t
*
prof_tdata_reinit
(
tsd_t
*
tsd
,
prof_tdata_t
*
tdata
);
void
prof_reset
(
tsd_t
*
tsd
,
size_t
lg_sample
);
void
prof_tdata_cleanup
(
tsd_t
*
tsd
);
bool
prof_active_get
(
tsdn_t
*
tsdn
);
bool
prof_active_set
(
tsdn_t
*
tsdn
,
bool
active
);
const
char
*
prof_thread_name_get
(
tsd_t
*
tsd
);
int
prof_thread_name_set
(
tsd_t
*
tsd
,
const
char
*
thread_name
);
bool
prof_thread_active_get
(
tsd_t
*
tsd
);
bool
prof_thread_active_set
(
tsd_t
*
tsd
,
bool
active
);
bool
prof_thread_active_init_get
(
tsdn_t
*
tsdn
);
bool
prof_thread_active_init_set
(
tsdn_t
*
tsdn
,
bool
active_init
);
bool
prof_gdump_get
(
tsdn_t
*
tsdn
);
bool
prof_gdump_set
(
tsdn_t
*
tsdn
,
bool
active
);
void
prof_boot0
(
void
);
void
prof_boot1
(
void
);
bool
prof_boot2
(
tsd_t
*
tsd
);
void
prof_prefork0
(
tsdn_t
*
tsdn
);
void
prof_prefork1
(
tsdn_t
*
tsdn
);
void
prof_postfork_parent
(
tsdn_t
*
tsdn
);
void
prof_postfork_child
(
tsdn_t
*
tsdn
);
void
prof_sample_threshold_update
(
prof_tdata_t
*
tdata
);
#endif
/* JEMALLOC_INTERNAL_PROF_EXTERNS_H */
deps/jemalloc/include/jemalloc/internal/prof_inlines_a.h
0 → 100644
View file @
f63e81c2
#ifndef JEMALLOC_INTERNAL_PROF_INLINES_A_H
#define JEMALLOC_INTERNAL_PROF_INLINES_A_H
#include "jemalloc/internal/mutex.h"
static
inline
bool
prof_accum_add
(
tsdn_t
*
tsdn
,
prof_accum_t
*
prof_accum
,
uint64_t
accumbytes
)
{
cassert
(
config_prof
);
bool
overflow
;
uint64_t
a0
,
a1
;
/*
* If the application allocates fast enough (and/or if idump is slow
* enough), extreme overflow here (a1 >= prof_interval * 2) can cause
* idump trigger coalescing. This is an intentional mechanism that
* avoids rate-limiting allocation.
*/
#ifdef JEMALLOC_ATOMIC_U64
a0
=
atomic_load_u64
(
&
prof_accum
->
accumbytes
,
ATOMIC_RELAXED
);
do
{
a1
=
a0
+
accumbytes
;
assert
(
a1
>=
a0
);
overflow
=
(
a1
>=
prof_interval
);
if
(
overflow
)
{
a1
%=
prof_interval
;
}
}
while
(
!
atomic_compare_exchange_weak_u64
(
&
prof_accum
->
accumbytes
,
&
a0
,
a1
,
ATOMIC_RELAXED
,
ATOMIC_RELAXED
));
#else
malloc_mutex_lock
(
tsdn
,
&
prof_accum
->
mtx
);
a0
=
prof_accum
->
accumbytes
;
a1
=
a0
+
accumbytes
;
overflow
=
(
a1
>=
prof_interval
);
if
(
overflow
)
{
a1
%=
prof_interval
;
}
prof_accum
->
accumbytes
=
a1
;
malloc_mutex_unlock
(
tsdn
,
&
prof_accum
->
mtx
);
#endif
return
overflow
;
}
static
inline
void
prof_accum_cancel
(
tsdn_t
*
tsdn
,
prof_accum_t
*
prof_accum
,
size_t
usize
)
{
cassert
(
config_prof
);
/*
* Cancel out as much of the excessive prof_accumbytes increase as
* possible without underflowing. Interval-triggered dumps occur
* slightly more often than intended as a result of incomplete
* canceling.
*/
uint64_t
a0
,
a1
;
#ifdef JEMALLOC_ATOMIC_U64
a0
=
atomic_load_u64
(
&
prof_accum
->
accumbytes
,
ATOMIC_RELAXED
);
do
{
a1
=
(
a0
>=
LARGE_MINCLASS
-
usize
)
?
a0
-
(
LARGE_MINCLASS
-
usize
)
:
0
;
}
while
(
!
atomic_compare_exchange_weak_u64
(
&
prof_accum
->
accumbytes
,
&
a0
,
a1
,
ATOMIC_RELAXED
,
ATOMIC_RELAXED
));
#else
malloc_mutex_lock
(
tsdn
,
&
prof_accum
->
mtx
);
a0
=
prof_accum
->
accumbytes
;
a1
=
(
a0
>=
LARGE_MINCLASS
-
usize
)
?
a0
-
(
LARGE_MINCLASS
-
usize
)
:
0
;
prof_accum
->
accumbytes
=
a1
;
malloc_mutex_unlock
(
tsdn
,
&
prof_accum
->
mtx
);
#endif
}
JEMALLOC_ALWAYS_INLINE
bool
prof_active_get_unlocked
(
void
)
{
/*
* Even if opt_prof is true, sampling can be temporarily disabled by
* setting prof_active to false. No locking is used when reading
* prof_active in the fast path, so there are no guarantees regarding
* how long it will take for all threads to notice state changes.
*/
return
prof_active
;
}
#endif
/* JEMALLOC_INTERNAL_PROF_INLINES_A_H */
deps/jemalloc/include/jemalloc/internal/prof_inlines_b.h
0 → 100644
View file @
f63e81c2
#ifndef JEMALLOC_INTERNAL_PROF_INLINES_B_H
#define JEMALLOC_INTERNAL_PROF_INLINES_B_H
#include "jemalloc/internal/sz.h"
JEMALLOC_ALWAYS_INLINE
bool
prof_gdump_get_unlocked
(
void
)
{
/*
* No locking is used when reading prof_gdump_val in the fast path, so
* there are no guarantees regarding how long it will take for all
* threads to notice state changes.
*/
return
prof_gdump_val
;
}
JEMALLOC_ALWAYS_INLINE
prof_tdata_t
*
prof_tdata_get
(
tsd_t
*
tsd
,
bool
create
)
{
prof_tdata_t
*
tdata
;
cassert
(
config_prof
);
tdata
=
tsd_prof_tdata_get
(
tsd
);
if
(
create
)
{
if
(
unlikely
(
tdata
==
NULL
))
{
if
(
tsd_nominal
(
tsd
))
{
tdata
=
prof_tdata_init
(
tsd
);
tsd_prof_tdata_set
(
tsd
,
tdata
);
}
}
else
if
(
unlikely
(
tdata
->
expired
))
{
tdata
=
prof_tdata_reinit
(
tsd
,
tdata
);
tsd_prof_tdata_set
(
tsd
,
tdata
);
}
assert
(
tdata
==
NULL
||
tdata
->
attached
);
}
return
tdata
;
}
JEMALLOC_ALWAYS_INLINE
prof_tctx_t
*
prof_tctx_get
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
alloc_ctx_t
*
alloc_ctx
)
{
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
return
arena_prof_tctx_get
(
tsdn
,
ptr
,
alloc_ctx
);
}
JEMALLOC_ALWAYS_INLINE
void
prof_tctx_set
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
size_t
usize
,
alloc_ctx_t
*
alloc_ctx
,
prof_tctx_t
*
tctx
)
{
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
arena_prof_tctx_set
(
tsdn
,
ptr
,
usize
,
alloc_ctx
,
tctx
);
}
JEMALLOC_ALWAYS_INLINE
void
prof_tctx_reset
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
prof_tctx_t
*
tctx
)
{
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
arena_prof_tctx_reset
(
tsdn
,
ptr
,
tctx
);
}
JEMALLOC_ALWAYS_INLINE
bool
prof_sample_accum_update
(
tsd_t
*
tsd
,
size_t
usize
,
bool
update
,
prof_tdata_t
**
tdata_out
)
{
prof_tdata_t
*
tdata
;
cassert
(
config_prof
);
tdata
=
prof_tdata_get
(
tsd
,
true
);
if
(
unlikely
((
uintptr_t
)
tdata
<=
(
uintptr_t
)
PROF_TDATA_STATE_MAX
))
{
tdata
=
NULL
;
}
if
(
tdata_out
!=
NULL
)
{
*
tdata_out
=
tdata
;
}
if
(
unlikely
(
tdata
==
NULL
))
{
return
true
;
}
if
(
likely
(
tdata
->
bytes_until_sample
>=
usize
))
{
if
(
update
)
{
tdata
->
bytes_until_sample
-=
usize
;
}
return
true
;
}
else
{
if
(
tsd_reentrancy_level_get
(
tsd
)
>
0
)
{
return
true
;
}
/* Compute new sample threshold. */
if
(
update
)
{
prof_sample_threshold_update
(
tdata
);
}
return
!
tdata
->
active
;
}
}
JEMALLOC_ALWAYS_INLINE
prof_tctx_t
*
prof_alloc_prep
(
tsd_t
*
tsd
,
size_t
usize
,
bool
prof_active
,
bool
update
)
{
prof_tctx_t
*
ret
;
prof_tdata_t
*
tdata
;
prof_bt_t
bt
;
assert
(
usize
==
sz_s2u
(
usize
));
if
(
!
prof_active
||
likely
(
prof_sample_accum_update
(
tsd
,
usize
,
update
,
&
tdata
)))
{
ret
=
(
prof_tctx_t
*
)(
uintptr_t
)
1U
;
}
else
{
bt_init
(
&
bt
,
tdata
->
vec
);
prof_backtrace
(
&
bt
);
ret
=
prof_lookup
(
tsd
,
&
bt
);
}
return
ret
;
}
JEMALLOC_ALWAYS_INLINE
void
prof_malloc
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
size_t
usize
,
alloc_ctx_t
*
alloc_ctx
,
prof_tctx_t
*
tctx
)
{
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
assert
(
usize
==
isalloc
(
tsdn
,
ptr
));
if
(
unlikely
((
uintptr_t
)
tctx
>
(
uintptr_t
)
1U
))
{
prof_malloc_sample_object
(
tsdn
,
ptr
,
usize
,
tctx
);
}
else
{
prof_tctx_set
(
tsdn
,
ptr
,
usize
,
alloc_ctx
,
(
prof_tctx_t
*
)(
uintptr_t
)
1U
);
}
}
JEMALLOC_ALWAYS_INLINE
void
prof_realloc
(
tsd_t
*
tsd
,
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
,
bool
prof_active
,
bool
updated
,
const
void
*
old_ptr
,
size_t
old_usize
,
prof_tctx_t
*
old_tctx
)
{
bool
sampled
,
old_sampled
,
moved
;
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
||
(
uintptr_t
)
tctx
<=
(
uintptr_t
)
1U
);
if
(
prof_active
&&
!
updated
&&
ptr
!=
NULL
)
{
assert
(
usize
==
isalloc
(
tsd_tsdn
(
tsd
),
ptr
));
if
(
prof_sample_accum_update
(
tsd
,
usize
,
true
,
NULL
))
{
/*
* Don't sample. The usize passed to prof_alloc_prep()
* was larger than what actually got allocated, so a
* backtrace was captured for this allocation, even
* though its actual usize was insufficient to cross the
* sample threshold.
*/
prof_alloc_rollback
(
tsd
,
tctx
,
true
);
tctx
=
(
prof_tctx_t
*
)(
uintptr_t
)
1U
;
}
}
sampled
=
((
uintptr_t
)
tctx
>
(
uintptr_t
)
1U
);
old_sampled
=
((
uintptr_t
)
old_tctx
>
(
uintptr_t
)
1U
);
moved
=
(
ptr
!=
old_ptr
);
if
(
unlikely
(
sampled
))
{
prof_malloc_sample_object
(
tsd_tsdn
(
tsd
),
ptr
,
usize
,
tctx
);
}
else
if
(
moved
)
{
prof_tctx_set
(
tsd_tsdn
(
tsd
),
ptr
,
usize
,
NULL
,
(
prof_tctx_t
*
)(
uintptr_t
)
1U
);
}
else
if
(
unlikely
(
old_sampled
))
{
/*
* prof_tctx_set() would work for the !moved case as well, but
* prof_tctx_reset() is slightly cheaper, and the proper thing
* to do here in the presence of explicit knowledge re: moved
* state.
*/
prof_tctx_reset
(
tsd_tsdn
(
tsd
),
ptr
,
tctx
);
}
else
{
assert
((
uintptr_t
)
prof_tctx_get
(
tsd_tsdn
(
tsd
),
ptr
,
NULL
)
==
(
uintptr_t
)
1U
);
}
/*
* The prof_free_sampled_object() call must come after the
* prof_malloc_sample_object() call, because tctx and old_tctx may be
* the same, in which case reversing the call order could cause the tctx
* to be prematurely destroyed as a side effect of momentarily zeroed
* counters.
*/
if
(
unlikely
(
old_sampled
))
{
prof_free_sampled_object
(
tsd
,
old_usize
,
old_tctx
);
}
}
JEMALLOC_ALWAYS_INLINE
void
prof_free
(
tsd_t
*
tsd
,
const
void
*
ptr
,
size_t
usize
,
alloc_ctx_t
*
alloc_ctx
)
{
prof_tctx_t
*
tctx
=
prof_tctx_get
(
tsd_tsdn
(
tsd
),
ptr
,
alloc_ctx
);
cassert
(
config_prof
);
assert
(
usize
==
isalloc
(
tsd_tsdn
(
tsd
),
ptr
));
if
(
unlikely
((
uintptr_t
)
tctx
>
(
uintptr_t
)
1U
))
{
prof_free_sampled_object
(
tsd
,
usize
,
tctx
);
}
}
#endif
/* JEMALLOC_INTERNAL_PROF_INLINES_B_H */
Prev
1
…
3
4
5
6
7
8
9
10
11
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment