Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
fb1f4f4e
Unverified
Commit
fb1f4f4e
authored
Oct 25, 2019
by
Wander Hillen
Committed by
GitHub
Oct 25, 2019
Browse files
Merge branch 'unstable' into minor-typos
parents
dda8cc18
6e98214f
Changes
205
Show whitespace changes
Inline
Side-by-side
Too many changes to show.
To preserve performance only
205 of 205+
files are displayed.
Plain diff
Email patch
deps/jemalloc/include/jemalloc/internal/log.h
0 → 100644
View file @
fb1f4f4e
#ifndef JEMALLOC_INTERNAL_LOG_H
#define JEMALLOC_INTERNAL_LOG_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/malloc_io.h"
#include "jemalloc/internal/mutex.h"
#ifdef JEMALLOC_LOG
# define JEMALLOC_LOG_VAR_BUFSIZE 1000
#else
# define JEMALLOC_LOG_VAR_BUFSIZE 1
#endif
#define JEMALLOC_LOG_BUFSIZE 4096
/*
* The log malloc_conf option is a '|'-delimited list of log_var name segments
* which should be logged. The names are themselves hierarchical, with '.' as
* the delimiter (a "segment" is just a prefix in the log namespace). So, if
* you have:
*
* log("arena", "log msg for arena"); // 1
* log("arena.a", "log msg for arena.a"); // 2
* log("arena.b", "log msg for arena.b"); // 3
* log("arena.a.a", "log msg for arena.a.a"); // 4
* log("extent.a", "log msg for extent.a"); // 5
* log("extent.b", "log msg for extent.b"); // 6
*
* And your malloc_conf option is "log=arena.a|extent", then lines 2, 4, 5, and
* 6 will print at runtime. You can enable logging from all log vars by
* writing "log=.".
*
* None of this should be regarded as a stable API for right now. It's intended
* as a debugging interface, to let us keep around some of our printf-debugging
* statements.
*/
extern
char
log_var_names
[
JEMALLOC_LOG_VAR_BUFSIZE
];
extern
atomic_b_t
log_init_done
;
typedef
struct
log_var_s
log_var_t
;
struct
log_var_s
{
/*
* Lowest bit is "inited", second lowest is "enabled". Putting them in
* a single word lets us avoid any fences on weak architectures.
*/
atomic_u_t
state
;
const
char
*
name
;
};
#define LOG_NOT_INITIALIZED 0U
#define LOG_INITIALIZED_NOT_ENABLED 1U
#define LOG_ENABLED 2U
#define LOG_VAR_INIT(name_str) {ATOMIC_INIT(LOG_NOT_INITIALIZED), name_str}
/*
* Returns the value we should assume for state (which is not necessarily
* accurate; if logging is done before logging has finished initializing, then
* we default to doing the safe thing by logging everything).
*/
unsigned
log_var_update_state
(
log_var_t
*
log_var
);
/* We factor out the metadata management to allow us to test more easily. */
#define log_do_begin(log_var) \
if (config_log) { \
unsigned log_state = atomic_load_u(&(log_var).state, \
ATOMIC_RELAXED); \
if (unlikely(log_state == LOG_NOT_INITIALIZED)) { \
log_state = log_var_update_state(&(log_var)); \
assert(log_state != LOG_NOT_INITIALIZED); \
} \
if (log_state == LOG_ENABLED) { \
{
/* User code executes here. */
#define log_do_end(log_var) \
} \
} \
}
/*
* MSVC has some preprocessor bugs in its expansion of __VA_ARGS__ during
* preprocessing. To work around this, we take all potential extra arguments in
* a var-args functions. Since a varargs macro needs at least one argument in
* the "...", we accept the format string there, and require that the first
* argument in this "..." is a const char *.
*/
static
inline
void
log_impl_varargs
(
const
char
*
name
,
...)
{
char
buf
[
JEMALLOC_LOG_BUFSIZE
];
va_list
ap
;
va_start
(
ap
,
name
);
const
char
*
format
=
va_arg
(
ap
,
const
char
*
);
size_t
dst_offset
=
0
;
dst_offset
+=
malloc_snprintf
(
buf
,
JEMALLOC_LOG_BUFSIZE
,
"%s: "
,
name
);
dst_offset
+=
malloc_vsnprintf
(
buf
+
dst_offset
,
JEMALLOC_LOG_BUFSIZE
-
dst_offset
,
format
,
ap
);
dst_offset
+=
malloc_snprintf
(
buf
+
dst_offset
,
JEMALLOC_LOG_BUFSIZE
-
dst_offset
,
"
\n
"
);
va_end
(
ap
);
malloc_write
(
buf
);
}
/* Call as log("log.var.str", "format_string %d", arg_for_format_string); */
#define LOG(log_var_str, ...) \
do { \
static log_var_t log_var = LOG_VAR_INIT(log_var_str); \
log_do_begin(log_var) \
log_impl_varargs((log_var).name, __VA_ARGS__); \
log_do_end(log_var) \
} while (0)
#endif
/* JEMALLOC_INTERNAL_LOG_H */
deps/jemalloc/include/jemalloc/internal/malloc_io.h
0 → 100644
View file @
fb1f4f4e
#ifndef JEMALLOC_INTERNAL_MALLOC_IO_H
#define JEMALLOC_INTERNAL_MALLOC_IO_H
#ifdef _WIN32
# ifdef _WIN64
# define FMT64_PREFIX "ll"
# define FMTPTR_PREFIX "ll"
# else
# define FMT64_PREFIX "ll"
# define FMTPTR_PREFIX ""
# endif
# define FMTd32 "d"
# define FMTu32 "u"
# define FMTx32 "x"
# define FMTd64 FMT64_PREFIX "d"
# define FMTu64 FMT64_PREFIX "u"
# define FMTx64 FMT64_PREFIX "x"
# define FMTdPTR FMTPTR_PREFIX "d"
# define FMTuPTR FMTPTR_PREFIX "u"
# define FMTxPTR FMTPTR_PREFIX "x"
#else
# include <inttypes.h>
# define FMTd32 PRId32
# define FMTu32 PRIu32
# define FMTx32 PRIx32
# define FMTd64 PRId64
# define FMTu64 PRIu64
# define FMTx64 PRIx64
# define FMTdPTR PRIdPTR
# define FMTuPTR PRIuPTR
# define FMTxPTR PRIxPTR
#endif
/* Size of stack-allocated buffer passed to buferror(). */
#define BUFERROR_BUF 64
/*
* Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be
* large enough for all possible uses within jemalloc.
*/
#define MALLOC_PRINTF_BUFSIZE 4096
int
buferror
(
int
err
,
char
*
buf
,
size_t
buflen
);
uintmax_t
malloc_strtoumax
(
const
char
*
restrict
nptr
,
char
**
restrict
endptr
,
int
base
);
void
malloc_write
(
const
char
*
s
);
/*
* malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating
* point math.
*/
size_t
malloc_vsnprintf
(
char
*
str
,
size_t
size
,
const
char
*
format
,
va_list
ap
);
size_t
malloc_snprintf
(
char
*
str
,
size_t
size
,
const
char
*
format
,
...)
JEMALLOC_FORMAT_PRINTF
(
3
,
4
);
/*
* The caller can set write_cb and cbopaque to null to choose to print with the
* je_malloc_message hook.
*/
void
malloc_vcprintf
(
void
(
*
write_cb
)(
void
*
,
const
char
*
),
void
*
cbopaque
,
const
char
*
format
,
va_list
ap
);
void
malloc_cprintf
(
void
(
*
write_cb
)(
void
*
,
const
char
*
),
void
*
cbopaque
,
const
char
*
format
,
...)
JEMALLOC_FORMAT_PRINTF
(
3
,
4
);
void
malloc_printf
(
const
char
*
format
,
...)
JEMALLOC_FORMAT_PRINTF
(
1
,
2
);
static
inline
ssize_t
malloc_write_fd
(
int
fd
,
const
void
*
buf
,
size_t
count
)
{
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_write)
/*
* Use syscall(2) rather than write(2) when possible in order to avoid
* the possibility of memory allocation within libc. This is necessary
* on FreeBSD; most operating systems do not have this problem though.
*
* syscall() returns long or int, depending on platform, so capture the
* result in the widest plausible type to avoid compiler warnings.
*/
long
result
=
syscall
(
SYS_write
,
fd
,
buf
,
count
);
#else
ssize_t
result
=
(
ssize_t
)
write
(
fd
,
buf
,
#ifdef _WIN32
(
unsigned
int
)
#endif
count
);
#endif
return
(
ssize_t
)
result
;
}
static
inline
ssize_t
malloc_read_fd
(
int
fd
,
void
*
buf
,
size_t
count
)
{
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read)
long
result
=
syscall
(
SYS_read
,
fd
,
buf
,
count
);
#else
ssize_t
result
=
read
(
fd
,
buf
,
#ifdef _WIN32
(
unsigned
int
)
#endif
count
);
#endif
return
(
ssize_t
)
result
;
}
#endif
/* JEMALLOC_INTERNAL_MALLOC_IO_H */
deps/jemalloc/include/jemalloc/internal/mb.h
deleted
100644 → 0
View file @
dda8cc18
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
void
mb_write
(
void
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MB_C_))
#ifdef __i386__
/*
* According to the Intel Architecture Software Developer's Manual, current
* processors execute instructions in order from the perspective of other
* processors in a multiprocessor system, but 1) Intel reserves the right to
* change that, and 2) the compiler's optimizer could re-order instructions if
* there weren't some form of barrier. Therefore, even if running on an
* architecture that does not need memory barriers (everything through at least
* i686), an "optimizer barrier" is necessary.
*/
JEMALLOC_INLINE
void
mb_write
(
void
)
{
# if 0
/* This is a true memory barrier. */
asm
volatile
(
"pusha;"
"xor %%eax,%%eax;"
"cpuid;"
"popa;"
:
/* Outputs. */
:
/* Inputs. */
:
"memory"
/* Clobbers. */
);
#else
/*
* This is hopefully enough to keep the compiler from reordering
* instructions around this one.
*/
asm
volatile
(
"nop;"
:
/* Outputs. */
:
/* Inputs. */
:
"memory"
/* Clobbers. */
);
#endif
}
#elif (defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE
void
mb_write
(
void
)
{
asm
volatile
(
"sfence"
:
/* Outputs. */
:
/* Inputs. */
:
"memory"
/* Clobbers. */
);
}
#elif defined(__powerpc__)
JEMALLOC_INLINE
void
mb_write
(
void
)
{
asm
volatile
(
"eieio"
:
/* Outputs. */
:
/* Inputs. */
:
"memory"
/* Clobbers. */
);
}
#elif defined(__sparc64__)
JEMALLOC_INLINE
void
mb_write
(
void
)
{
asm
volatile
(
"membar #StoreStore"
:
/* Outputs. */
:
/* Inputs. */
:
"memory"
/* Clobbers. */
);
}
#elif defined(__tile__)
JEMALLOC_INLINE
void
mb_write
(
void
)
{
__sync_synchronize
();
}
#else
/*
* This is much slower than a simple memory barrier, but the semantics of mutex
* unlock make this work.
*/
JEMALLOC_INLINE
void
mb_write
(
void
)
{
malloc_mutex_t
mtx
;
malloc_mutex_init
(
&
mtx
);
malloc_mutex_lock
(
&
mtx
);
malloc_mutex_unlock
(
&
mtx
);
}
#endif
#endif
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/mutex.h
View file @
fb1f4f4e
/******************************************************************************/
#
if
def JEMALLOC_
H_TYPES
#ifndef JEMALLOC_INTERNAL_MUTEX_H
#def
ine
JEMALLOC_
INTERNAL_MUTEX_H
typedef
struct
malloc_mutex_s
malloc_mutex_t
;
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/mutex_prof.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/witness.h"
#ifdef _WIN32
# define MALLOC_MUTEX_INITIALIZER
#elif (defined(JEMALLOC_OSSPIN))
# define MALLOC_MUTEX_INITIALIZER {0}
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, NULL}
#else
# if (defined(JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP) && \
defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP))
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP}
# else
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER}
# endif
#endif
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
typedef
enum
{
/* Can only acquire one mutex of a given witness rank at a time. */
malloc_mutex_rank_exclusive
,
/*
* Can acquire multiple mutexes of the same witness rank, but in
* address-ascending order only.
*/
malloc_mutex_address_ordered
}
malloc_mutex_lock_order_t
;
typedef
struct
malloc_mutex_s
malloc_mutex_t
;
struct
malloc_mutex_s
{
union
{
struct
{
/*
* prof_data is defined first to reduce cacheline
* bouncing: the data is not touched by the mutex holder
* during unlocking, while might be modified by
* contenders. Having it before the mutex itself could
* avoid prefetching a modified cacheline (for the
* unlocking thread).
*/
mutex_prof_data_t
prof_data
;
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
SRWLOCK
lock
;
# else
CRITICAL_SECTION
lock
;
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
os_unfair_lock
lock
;
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLock
lock
;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
...
...
@@ -38,12 +44,80 @@ struct malloc_mutex_s {
malloc_mutex_t
*
postponed_next
;
#else
pthread_mutex_t
lock
;
#endif
};
/*
* We only touch witness when configured w/ debug. However we
* keep the field in a union when !debug so that we don't have
* to pollute the code base with #ifdefs, while avoid paying the
* memory cost.
*/
#if !defined(JEMALLOC_DEBUG)
witness_t
witness
;
malloc_mutex_lock_order_t
lock_order
;
#endif
};
#if defined(JEMALLOC_DEBUG)
witness_t
witness
;
malloc_mutex_lock_order_t
lock_order
;
#endif
};
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
/*
* Based on benchmark results, a fixed spin with this amount of retries works
* well for our critical sections.
*/
#define MALLOC_MUTEX_MAX_SPIN 250
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
# define MALLOC_MUTEX_LOCK(m) AcquireSRWLockExclusive(&(m)->lock)
# define MALLOC_MUTEX_UNLOCK(m) ReleaseSRWLockExclusive(&(m)->lock)
# define MALLOC_MUTEX_TRYLOCK(m) (!TryAcquireSRWLockExclusive(&(m)->lock))
# else
# define MALLOC_MUTEX_LOCK(m) EnterCriticalSection(&(m)->lock)
# define MALLOC_MUTEX_UNLOCK(m) LeaveCriticalSection(&(m)->lock)
# define MALLOC_MUTEX_TRYLOCK(m) (!TryEnterCriticalSection(&(m)->lock))
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
# define MALLOC_MUTEX_LOCK(m) os_unfair_lock_lock(&(m)->lock)
# define MALLOC_MUTEX_UNLOCK(m) os_unfair_lock_unlock(&(m)->lock)
# define MALLOC_MUTEX_TRYLOCK(m) (!os_unfair_lock_trylock(&(m)->lock))
#elif (defined(JEMALLOC_OSSPIN))
# define MALLOC_MUTEX_LOCK(m) OSSpinLockLock(&(m)->lock)
# define MALLOC_MUTEX_UNLOCK(m) OSSpinLockUnlock(&(m)->lock)
# define MALLOC_MUTEX_TRYLOCK(m) (!OSSpinLockTry(&(m)->lock))
#else
# define MALLOC_MUTEX_LOCK(m) pthread_mutex_lock(&(m)->lock)
# define MALLOC_MUTEX_UNLOCK(m) pthread_mutex_unlock(&(m)->lock)
# define MALLOC_MUTEX_TRYLOCK(m) (pthread_mutex_trylock(&(m)->lock) != 0)
#endif
#define LOCK_PROF_DATA_INITIALIZER \
{NSTIME_ZERO_INITIALIZER, NSTIME_ZERO_INITIALIZER, 0, 0, 0, \
ATOMIC_INIT(0), 0, NULL, 0}
#ifdef _WIN32
# define MALLOC_MUTEX_INITIALIZER
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
#elif (defined(JEMALLOC_OSSPIN))
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, 0}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
#else
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
#endif
#ifdef JEMALLOC_LAZY_LOCK
extern
bool
isthreaded
;
...
...
@@ -52,60 +126,123 @@ extern bool isthreaded;
# define isthreaded true
#endif
bool
malloc_mutex_init
(
malloc_mutex_t
*
mutex
);
void
malloc_mutex_prefork
(
malloc_mutex_t
*
mutex
);
void
malloc_mutex_postfork_parent
(
malloc_mutex_t
*
mutex
);
void
malloc_mutex_postfork_child
(
malloc_mutex_t
*
mutex
);
bool
mutex_boot
(
void
);
bool
malloc_mutex_init
(
malloc_mutex_t
*
mutex
,
const
char
*
name
,
witness_rank_t
rank
,
malloc_mutex_lock_order_t
lock_order
);
void
malloc_mutex_prefork
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
);
void
malloc_mutex_postfork_parent
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
);
void
malloc_mutex_postfork_child
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
);
bool
malloc_mutex_boot
(
void
);
void
malloc_mutex_prof_data_reset
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
void
malloc_mutex_lock_slow
(
malloc_mutex_t
*
mutex
);
#ifndef JEMALLOC_ENABLE_INLINE
void
malloc_mutex_lock
(
malloc_mutex_t
*
mutex
)
;
void
malloc_mutex_unlock
(
malloc_mutex_t
*
mutex
);
#endif
static
inline
void
malloc_mutex_lock
_final
(
malloc_mutex_t
*
mutex
)
{
MALLOC_MUTEX_LOCK
(
mutex
);
}
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
JEMALLOC_INLINE
void
malloc_mutex_lock
(
malloc_mutex_t
*
mutex
)
{
static
inline
bool
malloc_mutex_trylock_final
(
malloc_mutex_t
*
mutex
)
{
return
MALLOC_MUTEX_TRYLOCK
(
mutex
)
;
}
static
inline
void
mutex_owner_stats_update
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
)
{
if
(
config_stats
)
{
mutex_prof_data_t
*
data
=
&
mutex
->
prof_data
;
data
->
n_lock_ops
++
;
if
(
data
->
prev_owner
!=
tsdn
)
{
data
->
prev_owner
=
tsdn
;
data
->
n_owner_switches
++
;
}
}
}
/* Trylock: return false if the lock is successfully acquired. */
static
inline
bool
malloc_mutex_trylock
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
)
{
witness_assert_not_owner
(
tsdn_witness_tsdp_get
(
tsdn
),
&
mutex
->
witness
);
if
(
isthreaded
)
{
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
AcquireSRWLockExclusive
(
&
mutex
->
lock
);
# else
EnterCriticalSection
(
&
mutex
->
lock
);
# endif
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockLock
(
&
mutex
->
lock
);
#else
pthread_mutex_lock
(
&
mutex
->
lock
);
#endif
if
(
malloc_mutex_trylock_final
(
mutex
))
{
return
true
;
}
mutex_owner_stats_update
(
tsdn
,
mutex
);
}
witness_lock
(
tsdn_witness_tsdp_get
(
tsdn
),
&
mutex
->
witness
);
return
false
;
}
JEMALLOC_INLINE
void
malloc_mutex_unlock
(
malloc_mutex_t
*
mutex
)
{
/* Aggregate lock prof data. */
static
inline
void
malloc_mutex_prof_merge
(
mutex_prof_data_t
*
sum
,
mutex_prof_data_t
*
data
)
{
nstime_add
(
&
sum
->
tot_wait_time
,
&
data
->
tot_wait_time
);
if
(
nstime_compare
(
&
sum
->
max_wait_time
,
&
data
->
max_wait_time
)
<
0
)
{
nstime_copy
(
&
sum
->
max_wait_time
,
&
data
->
max_wait_time
);
}
sum
->
n_wait_times
+=
data
->
n_wait_times
;
sum
->
n_spin_acquired
+=
data
->
n_spin_acquired
;
if
(
sum
->
max_n_thds
<
data
->
max_n_thds
)
{
sum
->
max_n_thds
=
data
->
max_n_thds
;
}
uint32_t
cur_n_waiting_thds
=
atomic_load_u32
(
&
sum
->
n_waiting_thds
,
ATOMIC_RELAXED
);
uint32_t
new_n_waiting_thds
=
cur_n_waiting_thds
+
atomic_load_u32
(
&
data
->
n_waiting_thds
,
ATOMIC_RELAXED
);
atomic_store_u32
(
&
sum
->
n_waiting_thds
,
new_n_waiting_thds
,
ATOMIC_RELAXED
);
sum
->
n_owner_switches
+=
data
->
n_owner_switches
;
sum
->
n_lock_ops
+=
data
->
n_lock_ops
;
}
static
inline
void
malloc_mutex_lock
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
)
{
witness_assert_not_owner
(
tsdn_witness_tsdp_get
(
tsdn
),
&
mutex
->
witness
);
if
(
isthreaded
)
{
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
ReleaseSRWLockExclusive
(
&
mutex
->
lock
);
# else
LeaveCriticalSection
(
&
mutex
->
lock
);
# endif
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockUnlock
(
&
mutex
->
lock
);
#else
pthread_mutex_unlock
(
&
mutex
->
lock
);
#endif
if
(
malloc_mutex_trylock_final
(
mutex
))
{
malloc_mutex_lock_slow
(
mutex
);
}
mutex_owner_stats_update
(
tsdn
,
mutex
);
}
witness_lock
(
tsdn_witness_tsdp_get
(
tsdn
),
&
mutex
->
witness
);
}
static
inline
void
malloc_mutex_unlock
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
)
{
witness_unlock
(
tsdn_witness_tsdp_get
(
tsdn
),
&
mutex
->
witness
);
if
(
isthreaded
)
{
MALLOC_MUTEX_UNLOCK
(
mutex
);
}
}
static
inline
void
malloc_mutex_assert_owner
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
)
{
witness_assert_owner
(
tsdn_witness_tsdp_get
(
tsdn
),
&
mutex
->
witness
);
}
static
inline
void
malloc_mutex_assert_not_owner
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
)
{
witness_assert_not_owner
(
tsdn_witness_tsdp_get
(
tsdn
),
&
mutex
->
witness
);
}
/* Copy the prof data from mutex for processing. */
static
inline
void
malloc_mutex_prof_read
(
tsdn_t
*
tsdn
,
mutex_prof_data_t
*
data
,
malloc_mutex_t
*
mutex
)
{
mutex_prof_data_t
*
source
=
&
mutex
->
prof_data
;
/* Can only read holding the mutex. */
malloc_mutex_assert_owner
(
tsdn
,
mutex
);
/*
* Not *really* allowed (we shouldn't be doing non-atomic loads of
* atomic data), but the mutex protection makes this safe, and writing
* a member-for-member copy is tedious for this situation.
*/
*
data
=
*
source
;
/* n_wait_thds is not reported (modified w/o locking). */
atomic_store_u32
(
&
data
->
n_waiting_thds
,
0
,
ATOMIC_RELAXED
);
}
#endif
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
#endif
/* JEMALLOC_INTERNAL_MUTEX_H */
deps/jemalloc/include/jemalloc/internal/mutex_pool.h
0 → 100644
View file @
fb1f4f4e
#ifndef JEMALLOC_INTERNAL_MUTEX_POOL_H
#define JEMALLOC_INTERNAL_MUTEX_POOL_H
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/witness.h"
/* We do mod reductions by this value, so it should be kept a power of 2. */
#define MUTEX_POOL_SIZE 256
typedef
struct
mutex_pool_s
mutex_pool_t
;
struct
mutex_pool_s
{
malloc_mutex_t
mutexes
[
MUTEX_POOL_SIZE
];
};
bool
mutex_pool_init
(
mutex_pool_t
*
pool
,
const
char
*
name
,
witness_rank_t
rank
);
/* Internal helper - not meant to be called outside this module. */
static
inline
malloc_mutex_t
*
mutex_pool_mutex
(
mutex_pool_t
*
pool
,
uintptr_t
key
)
{
size_t
hash_result
[
2
];
hash
(
&
key
,
sizeof
(
key
),
0xd50dcc1b
,
hash_result
);
return
&
pool
->
mutexes
[
hash_result
[
0
]
%
MUTEX_POOL_SIZE
];
}
static
inline
void
mutex_pool_assert_not_held
(
tsdn_t
*
tsdn
,
mutex_pool_t
*
pool
)
{
for
(
int
i
=
0
;
i
<
MUTEX_POOL_SIZE
;
i
++
)
{
malloc_mutex_assert_not_owner
(
tsdn
,
&
pool
->
mutexes
[
i
]);
}
}
/*
* Note that a mutex pool doesn't work exactly the way an embdedded mutex would.
* You're not allowed to acquire mutexes in the pool one at a time. You have to
* acquire all the mutexes you'll need in a single function call, and then
* release them all in a single function call.
*/
static
inline
void
mutex_pool_lock
(
tsdn_t
*
tsdn
,
mutex_pool_t
*
pool
,
uintptr_t
key
)
{
mutex_pool_assert_not_held
(
tsdn
,
pool
);
malloc_mutex_t
*
mutex
=
mutex_pool_mutex
(
pool
,
key
);
malloc_mutex_lock
(
tsdn
,
mutex
);
}
static
inline
void
mutex_pool_unlock
(
tsdn_t
*
tsdn
,
mutex_pool_t
*
pool
,
uintptr_t
key
)
{
malloc_mutex_t
*
mutex
=
mutex_pool_mutex
(
pool
,
key
);
malloc_mutex_unlock
(
tsdn
,
mutex
);
mutex_pool_assert_not_held
(
tsdn
,
pool
);
}
static
inline
void
mutex_pool_lock2
(
tsdn_t
*
tsdn
,
mutex_pool_t
*
pool
,
uintptr_t
key1
,
uintptr_t
key2
)
{
mutex_pool_assert_not_held
(
tsdn
,
pool
);
malloc_mutex_t
*
mutex1
=
mutex_pool_mutex
(
pool
,
key1
);
malloc_mutex_t
*
mutex2
=
mutex_pool_mutex
(
pool
,
key2
);
if
((
uintptr_t
)
mutex1
<
(
uintptr_t
)
mutex2
)
{
malloc_mutex_lock
(
tsdn
,
mutex1
);
malloc_mutex_lock
(
tsdn
,
mutex2
);
}
else
if
((
uintptr_t
)
mutex1
==
(
uintptr_t
)
mutex2
)
{
malloc_mutex_lock
(
tsdn
,
mutex1
);
}
else
{
malloc_mutex_lock
(
tsdn
,
mutex2
);
malloc_mutex_lock
(
tsdn
,
mutex1
);
}
}
static
inline
void
mutex_pool_unlock2
(
tsdn_t
*
tsdn
,
mutex_pool_t
*
pool
,
uintptr_t
key1
,
uintptr_t
key2
)
{
malloc_mutex_t
*
mutex1
=
mutex_pool_mutex
(
pool
,
key1
);
malloc_mutex_t
*
mutex2
=
mutex_pool_mutex
(
pool
,
key2
);
if
(
mutex1
==
mutex2
)
{
malloc_mutex_unlock
(
tsdn
,
mutex1
);
}
else
{
malloc_mutex_unlock
(
tsdn
,
mutex1
);
malloc_mutex_unlock
(
tsdn
,
mutex2
);
}
mutex_pool_assert_not_held
(
tsdn
,
pool
);
}
static
inline
void
mutex_pool_assert_owner
(
tsdn_t
*
tsdn
,
mutex_pool_t
*
pool
,
uintptr_t
key
)
{
malloc_mutex_assert_owner
(
tsdn
,
mutex_pool_mutex
(
pool
,
key
));
}
#endif
/* JEMALLOC_INTERNAL_MUTEX_POOL_H */
deps/jemalloc/include/jemalloc/internal/mutex_prof.h
0 → 100644
View file @
fb1f4f4e
#ifndef JEMALLOC_INTERNAL_MUTEX_PROF_H
#define JEMALLOC_INTERNAL_MUTEX_PROF_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/tsd_types.h"
#define MUTEX_PROF_GLOBAL_MUTEXES \
OP(background_thread) \
OP(ctl) \
OP(prof)
typedef
enum
{
#define OP(mtx) global_prof_mutex_##mtx,
MUTEX_PROF_GLOBAL_MUTEXES
#undef OP
mutex_prof_num_global_mutexes
}
mutex_prof_global_ind_t
;
#define MUTEX_PROF_ARENA_MUTEXES \
OP(large) \
OP(extent_avail) \
OP(extents_dirty) \
OP(extents_muzzy) \
OP(extents_retained) \
OP(decay_dirty) \
OP(decay_muzzy) \
OP(base) \
OP(tcache_list)
typedef
enum
{
#define OP(mtx) arena_prof_mutex_##mtx,
MUTEX_PROF_ARENA_MUTEXES
#undef OP
mutex_prof_num_arena_mutexes
}
mutex_prof_arena_ind_t
;
#define MUTEX_PROF_UINT64_COUNTERS \
OP(num_ops, uint64_t, "n_lock_ops") \
OP(num_wait, uint64_t, "n_waiting") \
OP(num_spin_acq, uint64_t, "n_spin_acq") \
OP(num_owner_switch, uint64_t, "n_owner_switch") \
OP(total_wait_time, uint64_t, "total_wait_ns") \
OP(max_wait_time, uint64_t, "max_wait_ns")
#define MUTEX_PROF_UINT32_COUNTERS \
OP(max_num_thds, uint32_t, "max_n_thds")
#define MUTEX_PROF_COUNTERS \
MUTEX_PROF_UINT64_COUNTERS \
MUTEX_PROF_UINT32_COUNTERS
#define OP(counter, type, human) mutex_counter_##counter,
#define COUNTER_ENUM(counter_list, t) \
typedef enum { \
counter_list \
mutex_prof_num_##t##_counters \
} mutex_prof_##t##_counter_ind_t;
COUNTER_ENUM
(
MUTEX_PROF_UINT64_COUNTERS
,
uint64_t
)
COUNTER_ENUM
(
MUTEX_PROF_UINT32_COUNTERS
,
uint32_t
)
#undef COUNTER_ENUM
#undef OP
typedef
struct
{
/*
* Counters touched on the slow path, i.e. when there is lock
* contention. We update them once we have the lock.
*/
/* Total time (in nano seconds) spent waiting on this mutex. */
nstime_t
tot_wait_time
;
/* Max time (in nano seconds) spent on a single lock operation. */
nstime_t
max_wait_time
;
/* # of times have to wait for this mutex (after spinning). */
uint64_t
n_wait_times
;
/* # of times acquired the mutex through local spinning. */
uint64_t
n_spin_acquired
;
/* Max # of threads waiting for the mutex at the same time. */
uint32_t
max_n_thds
;
/* Current # of threads waiting on the lock. Atomic synced. */
atomic_u32_t
n_waiting_thds
;
/*
* Data touched on the fast path. These are modified right after we
* grab the lock, so it's placed closest to the end (i.e. right before
* the lock) so that we have a higher chance of them being on the same
* cacheline.
*/
/* # of times the mutex holder is different than the previous one. */
uint64_t
n_owner_switches
;
/* Previous mutex holder, to facilitate n_owner_switches. */
tsdn_t
*
prev_owner
;
/* # of lock() operations in total. */
uint64_t
n_lock_ops
;
}
mutex_prof_data_t
;
#endif
/* JEMALLOC_INTERNAL_MUTEX_PROF_H */
deps/jemalloc/include/jemalloc/internal/nstime.h
0 → 100644
View file @
fb1f4f4e
#ifndef JEMALLOC_INTERNAL_NSTIME_H
#define JEMALLOC_INTERNAL_NSTIME_H
/* Maximum supported number of seconds (~584 years). */
#define NSTIME_SEC_MAX KQU(18446744072)
#define NSTIME_ZERO_INITIALIZER {0}
typedef
struct
{
uint64_t
ns
;
}
nstime_t
;
void
nstime_init
(
nstime_t
*
time
,
uint64_t
ns
);
void
nstime_init2
(
nstime_t
*
time
,
uint64_t
sec
,
uint64_t
nsec
);
uint64_t
nstime_ns
(
const
nstime_t
*
time
);
uint64_t
nstime_sec
(
const
nstime_t
*
time
);
uint64_t
nstime_msec
(
const
nstime_t
*
time
);
uint64_t
nstime_nsec
(
const
nstime_t
*
time
);
void
nstime_copy
(
nstime_t
*
time
,
const
nstime_t
*
source
);
int
nstime_compare
(
const
nstime_t
*
a
,
const
nstime_t
*
b
);
void
nstime_add
(
nstime_t
*
time
,
const
nstime_t
*
addend
);
void
nstime_iadd
(
nstime_t
*
time
,
uint64_t
addend
);
void
nstime_subtract
(
nstime_t
*
time
,
const
nstime_t
*
subtrahend
);
void
nstime_isubtract
(
nstime_t
*
time
,
uint64_t
subtrahend
);
void
nstime_imultiply
(
nstime_t
*
time
,
uint64_t
multiplier
);
void
nstime_idivide
(
nstime_t
*
time
,
uint64_t
divisor
);
uint64_t
nstime_divide
(
const
nstime_t
*
time
,
const
nstime_t
*
divisor
);
typedef
bool
(
nstime_monotonic_t
)(
void
);
extern
nstime_monotonic_t
*
JET_MUTABLE
nstime_monotonic
;
typedef
bool
(
nstime_update_t
)(
nstime_t
*
);
extern
nstime_update_t
*
JET_MUTABLE
nstime_update
;
#endif
/* JEMALLOC_INTERNAL_NSTIME_H */
deps/jemalloc/include/jemalloc/internal/pages.h
View file @
fb1f4f4e
/******************************************************************************/
#
if
def JEMALLOC_
H_TYPES
#ifndef JEMALLOC_INTERNAL_PAGES_EXTERNS_H
#def
ine
JEMALLOC_
INTERNAL_PAGES_EXTERNS_H
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
/* Page size. LG_PAGE is determined by the configure script. */
#ifdef PAGE_MASK
# undef PAGE_MASK
#endif
#define PAGE ((size_t)(1U << LG_PAGE))
#define PAGE_MASK ((size_t)(PAGE - 1))
/* Return the page base address for the page containing address a. */
#define PAGE_ADDR2BASE(a) \
((void *)((uintptr_t)(a) & ~PAGE_MASK))
/* Return the smallest pagesize multiple that is >= s. */
#define PAGE_CEILING(s) \
(((s) + PAGE_MASK) & ~PAGE_MASK)
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
/* Huge page size. LG_HUGEPAGE is determined by the configure script. */
#define HUGEPAGE ((size_t)(1U << LG_HUGEPAGE))
#define HUGEPAGE_MASK ((size_t)(HUGEPAGE - 1))
/* Return the huge page base address for the huge page containing address a. */
#define HUGEPAGE_ADDR2BASE(a) \
((void *)((uintptr_t)(a) & ~HUGEPAGE_MASK))
/* Return the smallest pagesize multiple that is >= s. */
#define HUGEPAGE_CEILING(s) \
(((s) + HUGEPAGE_MASK) & ~HUGEPAGE_MASK)
void
*
pages_map
(
void
*
addr
,
size_t
size
);
/* PAGES_CAN_PURGE_LAZY is defined if lazy purging is supported. */
#if defined(_WIN32) || defined(JEMALLOC_PURGE_MADVISE_FREE)
# define PAGES_CAN_PURGE_LAZY
#endif
/*
* PAGES_CAN_PURGE_FORCED is defined if forced purging is supported.
*
* The only supported way to hard-purge on Windows is to decommit and then
* re-commit, but doing so is racy, and if re-commit fails it's a pain to
* propagate the "poisoned" memory state. Since we typically decommit as the
* next step after purging on Windows anyway, there's no point in adding such
* complexity.
*/
#if !defined(_WIN32) && ((defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)) || \
defined(JEMALLOC_MAPS_COALESCE))
# define PAGES_CAN_PURGE_FORCED
#endif
static
const
bool
pages_can_purge_lazy
=
#ifdef PAGES_CAN_PURGE_LAZY
true
#else
false
#endif
;
static
const
bool
pages_can_purge_forced
=
#ifdef PAGES_CAN_PURGE_FORCED
true
#else
false
#endif
;
typedef
enum
{
thp_mode_default
=
0
,
/* Do not change hugepage settings. */
thp_mode_always
=
1
,
/* Always set MADV_HUGEPAGE. */
thp_mode_never
=
2
,
/* Always set MADV_NOHUGEPAGE. */
thp_mode_names_limit
=
3
,
/* Used for option processing. */
thp_mode_not_supported
=
3
/* No THP support detected. */
}
thp_mode_t
;
#define THP_MODE_DEFAULT thp_mode_default
extern
thp_mode_t
opt_thp
;
extern
thp_mode_t
init_system_thp_mode
;
/* Initial system wide state. */
extern
const
char
*
thp_mode_names
[];
void
*
pages_map
(
void
*
addr
,
size_t
size
,
size_t
alignment
,
bool
*
commit
);
void
pages_unmap
(
void
*
addr
,
size_t
size
);
void
*
pages_trim
(
void
*
addr
,
size_t
alloc_size
,
size_t
leadsize
,
size_t
size
);
bool
pages_commit
(
void
*
addr
,
size_t
size
);
bool
pages_decommit
(
void
*
addr
,
size_t
size
);
bool
pages_purge
(
void
*
addr
,
size_t
size
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
bool
pages_purge
_lazy
(
void
*
addr
,
size_t
size
);
bool
pages_purge_forced
(
void
*
addr
,
size_t
size
);
bool
pages_huge
(
void
*
addr
,
size_t
size
);
bool
pages_nohuge
(
void
*
addr
,
size_t
size
);
bool
pages_dontdump
(
void
*
addr
,
size_t
size
);
bool
pages_dodump
(
void
*
addr
,
size_t
size
);
bool
pages_boot
(
void
);
void
pages_set_thp_state
(
void
*
ptr
,
size_t
size
);
#endif
/* JEMALLOC_INTERNAL_PAGES_EXTERNS_H */
deps/jemalloc/include/jemalloc/internal/ph.h
0 → 100644
View file @
fb1f4f4e
/*
* A Pairing Heap implementation.
*
* "The Pairing Heap: A New Form of Self-Adjusting Heap"
* https://www.cs.cmu.edu/~sleator/papers/pairing-heaps.pdf
*
* With auxiliary twopass list, described in a follow on paper.
*
* "Pairing Heaps: Experiments and Analysis"
* http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf
*
*******************************************************************************
*/
#ifndef PH_H_
#define PH_H_
/* Node structure. */
#define phn(a_type) \
struct { \
a_type *phn_prev; \
a_type *phn_next; \
a_type *phn_lchild; \
}
/* Root structure. */
#define ph(a_type) \
struct { \
a_type *ph_root; \
}
/* Internal utility macros. */
#define phn_lchild_get(a_type, a_field, a_phn) \
(a_phn->a_field.phn_lchild)
#define phn_lchild_set(a_type, a_field, a_phn, a_lchild) do { \
a_phn->a_field.phn_lchild = a_lchild; \
} while (0)
#define phn_next_get(a_type, a_field, a_phn) \
(a_phn->a_field.phn_next)
#define phn_prev_set(a_type, a_field, a_phn, a_prev) do { \
a_phn->a_field.phn_prev = a_prev; \
} while (0)
#define phn_prev_get(a_type, a_field, a_phn) \
(a_phn->a_field.phn_prev)
#define phn_next_set(a_type, a_field, a_phn, a_next) do { \
a_phn->a_field.phn_next = a_next; \
} while (0)
#define phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do { \
a_type *phn0child; \
\
assert(a_phn0 != NULL); \
assert(a_phn1 != NULL); \
assert(a_cmp(a_phn0, a_phn1) <= 0); \
\
phn_prev_set(a_type, a_field, a_phn1, a_phn0); \
phn0child = phn_lchild_get(a_type, a_field, a_phn0); \
phn_next_set(a_type, a_field, a_phn1, phn0child); \
if (phn0child != NULL) { \
phn_prev_set(a_type, a_field, phn0child, a_phn1); \
} \
phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \
} while (0)
#define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \
if (a_phn0 == NULL) { \
r_phn = a_phn1; \
} else if (a_phn1 == NULL) { \
r_phn = a_phn0; \
} else if (a_cmp(a_phn0, a_phn1) < 0) { \
phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \
a_cmp); \
r_phn = a_phn0; \
} else { \
phn_merge_ordered(a_type, a_field, a_phn1, a_phn0, \
a_cmp); \
r_phn = a_phn1; \
} \
} while (0)
#define ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do { \
a_type *head = NULL; \
a_type *tail = NULL; \
a_type *phn0 = a_phn; \
a_type *phn1 = phn_next_get(a_type, a_field, phn0); \
\
/* \
* Multipass merge, wherein the first two elements of a FIFO \
* are repeatedly merged, and each result is appended to the \
* singly linked FIFO, until the FIFO contains only a single \
* element. We start with a sibling list but no reference to \
* its tail, so we do a single pass over the sibling list to \
* populate the FIFO. \
*/
\
if (phn1 != NULL) { \
a_type *phnrest = phn_next_get(a_type, a_field, phn1); \
if (phnrest != NULL) { \
phn_prev_set(a_type, a_field, phnrest, NULL); \
} \
phn_prev_set(a_type, a_field, phn0, NULL); \
phn_next_set(a_type, a_field, phn0, NULL); \
phn_prev_set(a_type, a_field, phn1, NULL); \
phn_next_set(a_type, a_field, phn1, NULL); \
phn_merge(a_type, a_field, phn0, phn1, a_cmp, phn0); \
head = tail = phn0; \
phn0 = phnrest; \
while (phn0 != NULL) { \
phn1 = phn_next_get(a_type, a_field, phn0); \
if (phn1 != NULL) { \
phnrest = phn_next_get(a_type, a_field, \
phn1); \
if (phnrest != NULL) { \
phn_prev_set(a_type, a_field, \
phnrest, NULL); \
} \
phn_prev_set(a_type, a_field, phn0, \
NULL); \
phn_next_set(a_type, a_field, phn0, \
NULL); \
phn_prev_set(a_type, a_field, phn1, \
NULL); \
phn_next_set(a_type, a_field, phn1, \
NULL); \
phn_merge(a_type, a_field, phn0, phn1, \
a_cmp, phn0); \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
phn0 = phnrest; \
} else { \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
phn0 = NULL; \
} \
} \
phn0 = head; \
phn1 = phn_next_get(a_type, a_field, phn0); \
if (phn1 != NULL) { \
while (true) { \
head = phn_next_get(a_type, a_field, \
phn1); \
assert(phn_prev_get(a_type, a_field, \
phn0) == NULL); \
phn_next_set(a_type, a_field, phn0, \
NULL); \
assert(phn_prev_get(a_type, a_field, \
phn1) == NULL); \
phn_next_set(a_type, a_field, phn1, \
NULL); \
phn_merge(a_type, a_field, phn0, phn1, \
a_cmp, phn0); \
if (head == NULL) { \
break; \
} \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
phn0 = head; \
phn1 = phn_next_get(a_type, a_field, \
phn0); \
} \
} \
} \
r_phn = phn0; \
} while (0)
#define ph_merge_aux(a_type, a_field, a_ph, a_cmp) do { \
a_type *phn = phn_next_get(a_type, a_field, a_ph->ph_root); \
if (phn != NULL) { \
phn_prev_set(a_type, a_field, a_ph->ph_root, NULL); \
phn_next_set(a_type, a_field, a_ph->ph_root, NULL); \
phn_prev_set(a_type, a_field, phn, NULL); \
ph_merge_siblings(a_type, a_field, phn, a_cmp, phn); \
assert(phn_next_get(a_type, a_field, phn) == NULL); \
phn_merge(a_type, a_field, a_ph->ph_root, phn, a_cmp, \
a_ph->ph_root); \
} \
} while (0)
#define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \
a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \
if (lchild == NULL) { \
r_phn = NULL; \
} else { \
ph_merge_siblings(a_type, a_field, lchild, a_cmp, \
r_phn); \
} \
} while (0)
/*
* The ph_proto() macro generates function prototypes that correspond to the
* functions generated by an equivalently parameterized call to ph_gen().
*/
#define ph_proto(a_attr, a_prefix, a_ph_type, a_type) \
a_attr void a_prefix##new(a_ph_type *ph); \
a_attr bool a_prefix##empty(a_ph_type *ph); \
a_attr a_type *a_prefix##first(a_ph_type *ph); \
a_attr a_type *a_prefix##any(a_ph_type *ph); \
a_attr void a_prefix##insert(a_ph_type *ph, a_type *phn); \
a_attr a_type *a_prefix##remove_first(a_ph_type *ph); \
a_attr a_type *a_prefix##remove_any(a_ph_type *ph); \
a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn);
/*
* The ph_gen() macro generates a type-specific pairing heap implementation,
* based on the above cpp macros.
*/
#define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \
a_attr void \
a_prefix##new(a_ph_type *ph) { \
memset(ph, 0, sizeof(ph(a_type))); \
} \
a_attr bool \
a_prefix##empty(a_ph_type *ph) { \
return (ph->ph_root == NULL); \
} \
a_attr a_type * \
a_prefix##first(a_ph_type *ph) { \
if (ph->ph_root == NULL) { \
return NULL; \
} \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
return ph->ph_root; \
} \
a_attr a_type * \
a_prefix##any(a_ph_type *ph) { \
if (ph->ph_root == NULL) { \
return NULL; \
} \
a_type *aux = phn_next_get(a_type, a_field, ph->ph_root); \
if (aux != NULL) { \
return aux; \
} \
return ph->ph_root; \
} \
a_attr void \
a_prefix##insert(a_ph_type *ph, a_type *phn) { \
memset(&phn->a_field, 0, sizeof(phn(a_type))); \
\
/* \
* Treat the root as an aux list during insertion, and lazily \
* merge during a_prefix##remove_first(). For elements that \
* are inserted, then removed via a_prefix##remove() before the \
* aux list is ever processed, this makes insert/remove \
* constant-time, whereas eager merging would make insert \
* O(log n). \
*/
\
if (ph->ph_root == NULL) { \
ph->ph_root = phn; \
} else { \
phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \
a_field, ph->ph_root)); \
if (phn_next_get(a_type, a_field, ph->ph_root) != \
NULL) { \
phn_prev_set(a_type, a_field, \
phn_next_get(a_type, a_field, ph->ph_root), \
phn); \
} \
phn_prev_set(a_type, a_field, phn, ph->ph_root); \
phn_next_set(a_type, a_field, ph->ph_root, phn); \
} \
} \
a_attr a_type * \
a_prefix##remove_first(a_ph_type *ph) { \
a_type *ret; \
\
if (ph->ph_root == NULL) { \
return NULL; \
} \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
\
ret = ph->ph_root; \
\
ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \
ph->ph_root); \
\
return ret; \
} \
a_attr a_type * \
a_prefix##remove_any(a_ph_type *ph) { \
/* \
* Remove the most recently inserted aux list element, or the \
* root if the aux list is empty. This has the effect of \
* behaving as a LIFO (and insertion/removal is therefore \
* constant-time) if a_prefix##[remove_]first() are never \
* called. \
*/
\
if (ph->ph_root == NULL) { \
return NULL; \
} \
a_type *ret = phn_next_get(a_type, a_field, ph->ph_root); \
if (ret != NULL) { \
a_type *aux = phn_next_get(a_type, a_field, ret); \
phn_next_set(a_type, a_field, ph->ph_root, aux); \
if (aux != NULL) { \
phn_prev_set(a_type, a_field, aux, \
ph->ph_root); \
} \
return ret; \
} \
ret = ph->ph_root; \
ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \
ph->ph_root); \
return ret; \
} \
a_attr void \
a_prefix##remove(a_ph_type *ph, a_type *phn) { \
a_type *replace, *parent; \
\
if (ph->ph_root == phn) { \
/* \
* We can delete from aux list without merging it, but \
* we need to merge if we are dealing with the root \
* node and it has children. \
*/
\
if (phn_lchild_get(a_type, a_field, phn) == NULL) { \
ph->ph_root = phn_next_get(a_type, a_field, \
phn); \
if (ph->ph_root != NULL) { \
phn_prev_set(a_type, a_field, \
ph->ph_root, NULL); \
} \
return; \
} \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
if (ph->ph_root == phn) { \
ph_merge_children(a_type, a_field, ph->ph_root, \
a_cmp, ph->ph_root); \
return; \
} \
} \
\
/* Get parent (if phn is leftmost child) before mutating. */
\
if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \
if (phn_lchild_get(a_type, a_field, parent) != phn) { \
parent = NULL; \
} \
} \
/* Find a possible replacement node, and link to parent. */
\
ph_merge_children(a_type, a_field, phn, a_cmp, replace); \
/* Set next/prev for sibling linked list. */
\
if (replace != NULL) { \
if (parent != NULL) { \
phn_prev_set(a_type, a_field, replace, parent); \
phn_lchild_set(a_type, a_field, parent, \
replace); \
} else { \
phn_prev_set(a_type, a_field, replace, \
phn_prev_get(a_type, a_field, phn)); \
if (phn_prev_get(a_type, a_field, phn) != \
NULL) { \
phn_next_set(a_type, a_field, \
phn_prev_get(a_type, a_field, phn), \
replace); \
} \
} \
phn_next_set(a_type, a_field, replace, \
phn_next_get(a_type, a_field, phn)); \
if (phn_next_get(a_type, a_field, phn) != NULL) { \
phn_prev_set(a_type, a_field, \
phn_next_get(a_type, a_field, phn), \
replace); \
} \
} else { \
if (parent != NULL) { \
a_type *next = phn_next_get(a_type, a_field, \
phn); \
phn_lchild_set(a_type, a_field, parent, next); \
if (next != NULL) { \
phn_prev_set(a_type, a_field, next, \
parent); \
} \
} else { \
assert(phn_prev_get(a_type, a_field, phn) != \
NULL); \
phn_next_set(a_type, a_field, \
phn_prev_get(a_type, a_field, phn), \
phn_next_get(a_type, a_field, phn)); \
} \
if (phn_next_get(a_type, a_field, phn) != NULL) { \
phn_prev_set(a_type, a_field, \
phn_next_get(a_type, a_field, phn), \
phn_prev_get(a_type, a_field, phn)); \
} \
} \
}
#endif
/* PH_H_ */
deps/jemalloc/include/jemalloc/internal/private_namespace.sh
View file @
fb1f4f4e
#!/bin/sh
for
symbol
in
`
cat
$1
`
;
do
for
symbol
in
`
cat
"
$@
"
`
;
do
echo
"#define
${
symbol
}
JEMALLOC_N(
${
symbol
}
)"
done
deps/jemalloc/include/jemalloc/internal/private_symbols.sh
0 → 100755
View file @
fb1f4f4e
#!/bin/sh
#
# Generate private_symbols[_jet].awk.
#
# Usage: private_symbols.sh <sym_prefix> <sym>*
#
# <sym_prefix> is typically "" or "_".
sym_prefix
=
$1
shift
cat
<<
EOF
#!/usr/bin/env awk -f
BEGIN {
sym_prefix = "
${
sym_prefix
}
"
split("
\\
EOF
for
public_sym
in
"
$@
"
;
do
cat
<<
EOF
${
sym_prefix
}${
public_sym
}
\\
EOF
done
cat
<<
"
EOF
"
", exported_symbol_names)
# Store exported symbol names as keys in exported_symbols.
for (i in exported_symbol_names) {
exported_symbols[exported_symbol_names[i]] = 1
}
}
# Process 'nm -a <c_source.o>' output.
#
# Handle lines like:
# 0000000000000008 D opt_junk
# 0000000000007574 T malloc_initialized
(NF == 3 &&
$2
~ /^[ABCDGRSTVW]
$/
&& !(
$3
in exported_symbols) &&
$3
~ /^[A-Za-z0-9_]+
$/
) {
print substr(
$3
, 1+length(sym_prefix), length(
$3
)-length(sym_prefix))
}
# Process 'dumpbin /SYMBOLS <c_source.obj>' output.
#
# Handle lines like:
# 353 00008098 SECT4 notype External | opt_junk
# 3F1 00000000 SECT7 notype () External | malloc_initialized
(
$3
~ /^SECT[0-9]+/ &&
$(
NF-2
)
== "External" && !(
$NF
in exported_symbols)) {
print
$NF
}
EOF
deps/jemalloc/include/jemalloc/internal/private_symbols.txt
deleted
100644 → 0
View file @
dda8cc18
a0dalloc
a0get
a0malloc
arena_aalloc
arena_alloc_junk_small
arena_bin_index
arena_bin_info
arena_bitselm_get
arena_boot
arena_choose
arena_choose_hard
arena_chunk_alloc_huge
arena_chunk_cache_maybe_insert
arena_chunk_cache_maybe_remove
arena_chunk_dalloc_huge
arena_chunk_ralloc_huge_expand
arena_chunk_ralloc_huge_shrink
arena_chunk_ralloc_huge_similar
arena_cleanup
arena_dalloc
arena_dalloc_bin
arena_dalloc_bin_junked_locked
arena_dalloc_junk_large
arena_dalloc_junk_small
arena_dalloc_large
arena_dalloc_large_junked_locked
arena_dalloc_small
arena_dss_prec_get
arena_dss_prec_set
arena_get
arena_get_hard
arena_init
arena_lg_dirty_mult_default_get
arena_lg_dirty_mult_default_set
arena_lg_dirty_mult_get
arena_lg_dirty_mult_set
arena_malloc
arena_malloc_large
arena_malloc_small
arena_mapbits_allocated_get
arena_mapbits_binind_get
arena_mapbits_decommitted_get
arena_mapbits_dirty_get
arena_mapbits_get
arena_mapbits_internal_set
arena_mapbits_large_binind_set
arena_mapbits_large_get
arena_mapbits_large_set
arena_mapbits_large_size_get
arena_mapbitsp_get
arena_mapbitsp_read
arena_mapbitsp_write
arena_mapbits_size_decode
arena_mapbits_size_encode
arena_mapbits_small_runind_get
arena_mapbits_small_set
arena_mapbits_unallocated_set
arena_mapbits_unallocated_size_get
arena_mapbits_unallocated_size_set
arena_mapbits_unzeroed_get
arena_maxrun
arena_maybe_purge
arena_metadata_allocated_add
arena_metadata_allocated_get
arena_metadata_allocated_sub
arena_migrate
arena_miscelm_get
arena_miscelm_to_pageind
arena_miscelm_to_rpages
arena_nbound
arena_new
arena_node_alloc
arena_node_dalloc
arena_palloc
arena_postfork_child
arena_postfork_parent
arena_prefork
arena_prof_accum
arena_prof_accum_impl
arena_prof_accum_locked
arena_prof_promoted
arena_prof_tctx_get
arena_prof_tctx_reset
arena_prof_tctx_set
arena_ptr_small_binind_get
arena_purge_all
arena_quarantine_junk_small
arena_ralloc
arena_ralloc_junk_large
arena_ralloc_no_move
arena_rd_to_miscelm
arena_redzone_corruption
arena_run_regind
arena_run_to_miscelm
arena_salloc
arenas_cache_bypass_cleanup
arenas_cache_cleanup
arena_sdalloc
arena_stats_merge
arena_tcache_fill_small
atomic_add_p
atomic_add_u
atomic_add_uint32
atomic_add_uint64
atomic_add_z
atomic_cas_p
atomic_cas_u
atomic_cas_uint32
atomic_cas_uint64
atomic_cas_z
atomic_sub_p
atomic_sub_u
atomic_sub_uint32
atomic_sub_uint64
atomic_sub_z
base_alloc
base_boot
base_postfork_child
base_postfork_parent
base_prefork
base_stats_get
bitmap_full
bitmap_get
bitmap_info_init
bitmap_info_ngroups
bitmap_init
bitmap_set
bitmap_sfu
bitmap_size
bitmap_unset
bootstrap_calloc
bootstrap_free
bootstrap_malloc
bt_init
buferror
chunk_alloc_base
chunk_alloc_cache
chunk_alloc_dss
chunk_alloc_mmap
chunk_alloc_wrapper
chunk_boot
chunk_dalloc_arena
chunk_dalloc_cache
chunk_dalloc_mmap
chunk_dalloc_wrapper
chunk_deregister
chunk_dss_boot
chunk_dss_postfork_child
chunk_dss_postfork_parent
chunk_dss_prec_get
chunk_dss_prec_set
chunk_dss_prefork
chunk_hooks_default
chunk_hooks_get
chunk_hooks_set
chunk_in_dss
chunk_lookup
chunk_npages
chunk_postfork_child
chunk_postfork_parent
chunk_prefork
chunk_purge_arena
chunk_purge_wrapper
chunk_register
chunksize
chunksize_mask
chunks_rtree
ckh_count
ckh_delete
ckh_insert
ckh_iter
ckh_new
ckh_pointer_hash
ckh_pointer_keycomp
ckh_remove
ckh_search
ckh_string_hash
ckh_string_keycomp
ctl_boot
ctl_bymib
ctl_byname
ctl_nametomib
ctl_postfork_child
ctl_postfork_parent
ctl_prefork
dss_prec_names
extent_node_achunk_get
extent_node_achunk_set
extent_node_addr_get
extent_node_addr_set
extent_node_arena_get
extent_node_arena_set
extent_node_dirty_insert
extent_node_dirty_linkage_init
extent_node_dirty_remove
extent_node_init
extent_node_prof_tctx_get
extent_node_prof_tctx_set
extent_node_size_get
extent_node_size_set
extent_node_zeroed_get
extent_node_zeroed_set
extent_tree_ad_empty
extent_tree_ad_first
extent_tree_ad_insert
extent_tree_ad_iter
extent_tree_ad_iter_recurse
extent_tree_ad_iter_start
extent_tree_ad_last
extent_tree_ad_new
extent_tree_ad_next
extent_tree_ad_nsearch
extent_tree_ad_prev
extent_tree_ad_psearch
extent_tree_ad_remove
extent_tree_ad_reverse_iter
extent_tree_ad_reverse_iter_recurse
extent_tree_ad_reverse_iter_start
extent_tree_ad_search
extent_tree_szad_empty
extent_tree_szad_first
extent_tree_szad_insert
extent_tree_szad_iter
extent_tree_szad_iter_recurse
extent_tree_szad_iter_start
extent_tree_szad_last
extent_tree_szad_new
extent_tree_szad_next
extent_tree_szad_nsearch
extent_tree_szad_prev
extent_tree_szad_psearch
extent_tree_szad_remove
extent_tree_szad_reverse_iter
extent_tree_szad_reverse_iter_recurse
extent_tree_szad_reverse_iter_start
extent_tree_szad_search
get_errno
hash
hash_fmix_32
hash_fmix_64
hash_get_block_32
hash_get_block_64
hash_rotl_32
hash_rotl_64
hash_x64_128
hash_x86_128
hash_x86_32
huge_aalloc
huge_dalloc
huge_dalloc_junk
huge_malloc
huge_palloc
huge_prof_tctx_get
huge_prof_tctx_reset
huge_prof_tctx_set
huge_ralloc
huge_ralloc_no_move
huge_salloc
iaalloc
iallocztm
icalloc
icalloct
idalloc
idalloct
idalloctm
imalloc
imalloct
index2size
index2size_compute
index2size_lookup
index2size_tab
in_valgrind
ipalloc
ipalloct
ipallocztm
iqalloc
iralloc
iralloct
iralloct_realign
isalloc
isdalloct
isqalloc
isthreaded
ivsalloc
ixalloc
jemalloc_postfork_child
jemalloc_postfork_parent
jemalloc_prefork
large_maxclass
lg_floor
malloc_cprintf
malloc_mutex_init
malloc_mutex_lock
malloc_mutex_postfork_child
malloc_mutex_postfork_parent
malloc_mutex_prefork
malloc_mutex_unlock
malloc_printf
malloc_snprintf
malloc_strtoumax
malloc_tsd_boot0
malloc_tsd_boot1
malloc_tsd_cleanup_register
malloc_tsd_dalloc
malloc_tsd_malloc
malloc_tsd_no_cleanup
malloc_vcprintf
malloc_vsnprintf
malloc_write
map_bias
map_misc_offset
mb_write
mutex_boot
narenas_cache_cleanup
narenas_total_get
ncpus
nhbins
opt_abort
opt_dss
opt_junk
opt_junk_alloc
opt_junk_free
opt_lg_chunk
opt_lg_dirty_mult
opt_lg_prof_interval
opt_lg_prof_sample
opt_lg_tcache_max
opt_narenas
opt_prof
opt_prof_accum
opt_prof_active
opt_prof_final
opt_prof_gdump
opt_prof_leak
opt_prof_prefix
opt_prof_thread_active_init
opt_quarantine
opt_redzone
opt_stats_print
opt_tcache
opt_utrace
opt_xmalloc
opt_zero
p2rz
pages_commit
pages_decommit
pages_map
pages_purge
pages_trim
pages_unmap
pow2_ceil
prof_active_get
prof_active_get_unlocked
prof_active_set
prof_alloc_prep
prof_alloc_rollback
prof_backtrace
prof_boot0
prof_boot1
prof_boot2
prof_dump_header
prof_dump_open
prof_free
prof_free_sampled_object
prof_gdump
prof_gdump_get
prof_gdump_get_unlocked
prof_gdump_set
prof_gdump_val
prof_idump
prof_interval
prof_lookup
prof_malloc
prof_malloc_sample_object
prof_mdump
prof_postfork_child
prof_postfork_parent
prof_prefork
prof_realloc
prof_reset
prof_sample_accum_update
prof_sample_threshold_update
prof_tctx_get
prof_tctx_reset
prof_tctx_set
prof_tdata_cleanup
prof_tdata_get
prof_tdata_init
prof_tdata_reinit
prof_thread_active_get
prof_thread_active_init_get
prof_thread_active_init_set
prof_thread_active_set
prof_thread_name_get
prof_thread_name_set
quarantine
quarantine_alloc_hook
quarantine_alloc_hook_work
quarantine_cleanup
register_zone
rtree_child_read
rtree_child_read_hard
rtree_child_tryread
rtree_delete
rtree_get
rtree_new
rtree_node_valid
rtree_set
rtree_start_level
rtree_subkey
rtree_subtree_read
rtree_subtree_read_hard
rtree_subtree_tryread
rtree_val_read
rtree_val_write
s2u
s2u_compute
s2u_lookup
sa2u
set_errno
size2index
size2index_compute
size2index_lookup
size2index_tab
stats_cactive
stats_cactive_add
stats_cactive_get
stats_cactive_sub
stats_print
tcache_alloc_easy
tcache_alloc_large
tcache_alloc_small
tcache_alloc_small_hard
tcache_arena_associate
tcache_arena_dissociate
tcache_arena_reassociate
tcache_bin_flush_large
tcache_bin_flush_small
tcache_bin_info
tcache_boot
tcache_cleanup
tcache_create
tcache_dalloc_large
tcache_dalloc_small
tcache_enabled_cleanup
tcache_enabled_get
tcache_enabled_set
tcache_event
tcache_event_hard
tcache_flush
tcache_get
tcache_get_hard
tcache_maxclass
tcaches
tcache_salloc
tcaches_create
tcaches_destroy
tcaches_flush
tcaches_get
tcache_stats_merge
thread_allocated_cleanup
thread_deallocated_cleanup
tsd_arena_get
tsd_arena_set
tsd_boot
tsd_boot0
tsd_boot1
tsd_booted
tsd_cleanup
tsd_cleanup_wrapper
tsd_fetch
tsd_get
tsd_wrapper_get
tsd_wrapper_set
tsd_initialized
tsd_init_check_recursion
tsd_init_finish
tsd_init_head
tsd_nominal
tsd_quarantine_get
tsd_quarantine_set
tsd_set
tsd_tcache_enabled_get
tsd_tcache_enabled_set
tsd_tcache_get
tsd_tcache_set
tsd_tls
tsd_tsd
tsd_prof_tdata_get
tsd_prof_tdata_set
tsd_thread_allocated_get
tsd_thread_allocated_set
tsd_thread_deallocated_get
tsd_thread_deallocated_set
u2rz
valgrind_freelike_block
valgrind_make_mem_defined
valgrind_make_mem_noaccess
valgrind_make_mem_undefined
deps/jemalloc/include/jemalloc/internal/private_unnamespace.sh
deleted
100755 → 0
View file @
dda8cc18
#!/bin/sh
for
symbol
in
`
cat
$1
`
;
do
echo
"#undef
${
symbol
}
"
done
deps/jemalloc/include/jemalloc/internal/prng.h
View file @
fb1f4f4e
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#ifndef JEMALLOC_INTERNAL_PRNG_H
#define JEMALLOC_INTERNAL_PRNG_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bit_util.h"
/*
* Simple linear congruential pseudo-random number generator:
...
...
@@ -18,43 +21,165 @@
* proportional to bit position. For example, the lowest bit has a cycle of 2,
* the next has a cycle of 4, etc. For this reason, we prefer to use the upper
* bits.
*
* Macro parameters:
* uint32_t r : Result.
* unsigned lg_range : (0..32], number of least significant bits to return.
* uint32_t state : Seed value.
* const uint32_t a, c : See above discussion.
*/
#define prng32(r, lg_range, state, a, c) do { \
assert((lg_range) > 0); \
assert((lg_range) <= 32); \
\
r = (state * (a)) + (c); \
state = r; \
r >>= (32 - (lg_range)); \
} while (false)
/* Same as prng32(), but 64 bits of pseudo-randomness, using uint64_t. */
#define prng64(r, lg_range, state, a, c) do { \
assert((lg_range) > 0); \
assert((lg_range) <= 64); \
\
r = (state * (a)) + (c); \
state = r; \
r >>= (64 - (lg_range)); \
} while (false)
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif
/* JEMALLOC_H_EXTERNS */
/* INTERNAL DEFINITIONS -- IGNORE */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#define PRNG_A_32 UINT32_C(1103515241)
#define PRNG_C_32 UINT32_C(12347)
#define PRNG_A_64 UINT64_C(6364136223846793005)
#define PRNG_C_64 UINT64_C(1442695040888963407)
JEMALLOC_ALWAYS_INLINE
uint32_t
prng_state_next_u32
(
uint32_t
state
)
{
return
(
state
*
PRNG_A_32
)
+
PRNG_C_32
;
}
JEMALLOC_ALWAYS_INLINE
uint64_t
prng_state_next_u64
(
uint64_t
state
)
{
return
(
state
*
PRNG_A_64
)
+
PRNG_C_64
;
}
JEMALLOC_ALWAYS_INLINE
size_t
prng_state_next_zu
(
size_t
state
)
{
#if LG_SIZEOF_PTR == 2
return
(
state
*
PRNG_A_32
)
+
PRNG_C_32
;
#elif LG_SIZEOF_PTR == 3
return
(
state
*
PRNG_A_64
)
+
PRNG_C_64
;
#else
#error Unsupported pointer size
#endif
}
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
/* BEGIN PUBLIC API */
/******************************************************************************/
/*
* The prng_lg_range functions give a uniform int in the half-open range [0,
* 2**lg_range). If atomic is true, they do so safely from multiple threads.
* Multithreaded 64-bit prngs aren't supported.
*/
JEMALLOC_ALWAYS_INLINE
uint32_t
prng_lg_range_u32
(
atomic_u32_t
*
state
,
unsigned
lg_range
,
bool
atomic
)
{
uint32_t
ret
,
state0
,
state1
;
assert
(
lg_range
>
0
);
assert
(
lg_range
<=
32
);
state0
=
atomic_load_u32
(
state
,
ATOMIC_RELAXED
);
if
(
atomic
)
{
do
{
state1
=
prng_state_next_u32
(
state0
);
}
while
(
!
atomic_compare_exchange_weak_u32
(
state
,
&
state0
,
state1
,
ATOMIC_RELAXED
,
ATOMIC_RELAXED
));
}
else
{
state1
=
prng_state_next_u32
(
state0
);
atomic_store_u32
(
state
,
state1
,
ATOMIC_RELAXED
);
}
ret
=
state1
>>
(
32
-
lg_range
);
return
ret
;
}
JEMALLOC_ALWAYS_INLINE
uint64_t
prng_lg_range_u64
(
uint64_t
*
state
,
unsigned
lg_range
)
{
uint64_t
ret
,
state1
;
assert
(
lg_range
>
0
);
assert
(
lg_range
<=
64
);
state1
=
prng_state_next_u64
(
*
state
);
*
state
=
state1
;
ret
=
state1
>>
(
64
-
lg_range
);
return
ret
;
}
JEMALLOC_ALWAYS_INLINE
size_t
prng_lg_range_zu
(
atomic_zu_t
*
state
,
unsigned
lg_range
,
bool
atomic
)
{
size_t
ret
,
state0
,
state1
;
assert
(
lg_range
>
0
);
assert
(
lg_range
<=
ZU
(
1
)
<<
(
3
+
LG_SIZEOF_PTR
));
state0
=
atomic_load_zu
(
state
,
ATOMIC_RELAXED
);
if
(
atomic
)
{
do
{
state1
=
prng_state_next_zu
(
state0
);
}
while
(
atomic_compare_exchange_weak_zu
(
state
,
&
state0
,
state1
,
ATOMIC_RELAXED
,
ATOMIC_RELAXED
));
}
else
{
state1
=
prng_state_next_zu
(
state0
);
atomic_store_zu
(
state
,
state1
,
ATOMIC_RELAXED
);
}
ret
=
state1
>>
((
ZU
(
1
)
<<
(
3
+
LG_SIZEOF_PTR
))
-
lg_range
);
return
ret
;
}
/*
* The prng_range functions behave like the prng_lg_range, but return a result
* in [0, range) instead of [0, 2**lg_range).
*/
JEMALLOC_ALWAYS_INLINE
uint32_t
prng_range_u32
(
atomic_u32_t
*
state
,
uint32_t
range
,
bool
atomic
)
{
uint32_t
ret
;
unsigned
lg_range
;
assert
(
range
>
1
);
/* Compute the ceiling of lg(range). */
lg_range
=
ffs_u32
(
pow2_ceil_u32
(
range
))
-
1
;
/* Generate a result in [0..range) via repeated trial. */
do
{
ret
=
prng_lg_range_u32
(
state
,
lg_range
,
atomic
);
}
while
(
ret
>=
range
);
return
ret
;
}
JEMALLOC_ALWAYS_INLINE
uint64_t
prng_range_u64
(
uint64_t
*
state
,
uint64_t
range
)
{
uint64_t
ret
;
unsigned
lg_range
;
assert
(
range
>
1
);
/* Compute the ceiling of lg(range). */
lg_range
=
ffs_u64
(
pow2_ceil_u64
(
range
))
-
1
;
/* Generate a result in [0..range) via repeated trial. */
do
{
ret
=
prng_lg_range_u64
(
state
,
lg_range
);
}
while
(
ret
>=
range
);
return
ret
;
}
JEMALLOC_ALWAYS_INLINE
size_t
prng_range_zu
(
atomic_zu_t
*
state
,
size_t
range
,
bool
atomic
)
{
size_t
ret
;
unsigned
lg_range
;
assert
(
range
>
1
);
/* Compute the ceiling of lg(range). */
lg_range
=
ffs_u64
(
pow2_ceil_u64
(
range
))
-
1
;
/* Generate a result in [0..range) via repeated trial. */
do
{
ret
=
prng_lg_range_zu
(
state
,
lg_range
,
atomic
);
}
while
(
ret
>=
range
);
return
ret
;
}
#endif
/* JEMALLOC_INTERNAL_PRNG_H */
deps/jemalloc/include/jemalloc/internal/prof_externs.h
0 → 100644
View file @
fb1f4f4e
#ifndef JEMALLOC_INTERNAL_PROF_EXTERNS_H
#define JEMALLOC_INTERNAL_PROF_EXTERNS_H
#include "jemalloc/internal/mutex.h"
extern
malloc_mutex_t
bt2gctx_mtx
;
extern
bool
opt_prof
;
extern
bool
opt_prof_active
;
extern
bool
opt_prof_thread_active_init
;
extern
size_t
opt_lg_prof_sample
;
/* Mean bytes between samples. */
extern
ssize_t
opt_lg_prof_interval
;
/* lg(prof_interval). */
extern
bool
opt_prof_gdump
;
/* High-water memory dumping. */
extern
bool
opt_prof_final
;
/* Final profile dumping. */
extern
bool
opt_prof_leak
;
/* Dump leak summary at exit. */
extern
bool
opt_prof_accum
;
/* Report cumulative bytes. */
extern
char
opt_prof_prefix
[
/* Minimize memory bloat for non-prof builds. */
#ifdef JEMALLOC_PROF
PATH_MAX
+
#endif
1
];
/* Accessed via prof_active_[gs]et{_unlocked,}(). */
extern
bool
prof_active
;
/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */
extern
bool
prof_gdump_val
;
/*
* Profile dump interval, measured in bytes allocated. Each arena triggers a
* profile dump when it reaches this threshold. The effect is that the
* interval between profile dumps averages prof_interval, though the actual
* interval between dumps will tend to be sporadic, and the interval will be a
* maximum of approximately (prof_interval * narenas).
*/
extern
uint64_t
prof_interval
;
/*
* Initialized as opt_lg_prof_sample, and potentially modified during profiling
* resets.
*/
extern
size_t
lg_prof_sample
;
void
prof_alloc_rollback
(
tsd_t
*
tsd
,
prof_tctx_t
*
tctx
,
bool
updated
);
void
prof_malloc_sample_object
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
);
void
prof_free_sampled_object
(
tsd_t
*
tsd
,
size_t
usize
,
prof_tctx_t
*
tctx
);
void
bt_init
(
prof_bt_t
*
bt
,
void
**
vec
);
void
prof_backtrace
(
prof_bt_t
*
bt
);
prof_tctx_t
*
prof_lookup
(
tsd_t
*
tsd
,
prof_bt_t
*
bt
);
#ifdef JEMALLOC_JET
size_t
prof_tdata_count
(
void
);
size_t
prof_bt_count
(
void
);
#endif
typedef
int
(
prof_dump_open_t
)(
bool
,
const
char
*
);
extern
prof_dump_open_t
*
JET_MUTABLE
prof_dump_open
;
typedef
bool
(
prof_dump_header_t
)(
tsdn_t
*
,
bool
,
const
prof_cnt_t
*
);
extern
prof_dump_header_t
*
JET_MUTABLE
prof_dump_header
;
#ifdef JEMALLOC_JET
void
prof_cnt_all
(
uint64_t
*
curobjs
,
uint64_t
*
curbytes
,
uint64_t
*
accumobjs
,
uint64_t
*
accumbytes
);
#endif
bool
prof_accum_init
(
tsdn_t
*
tsdn
,
prof_accum_t
*
prof_accum
);
void
prof_idump
(
tsdn_t
*
tsdn
);
bool
prof_mdump
(
tsd_t
*
tsd
,
const
char
*
filename
);
void
prof_gdump
(
tsdn_t
*
tsdn
);
prof_tdata_t
*
prof_tdata_init
(
tsd_t
*
tsd
);
prof_tdata_t
*
prof_tdata_reinit
(
tsd_t
*
tsd
,
prof_tdata_t
*
tdata
);
void
prof_reset
(
tsd_t
*
tsd
,
size_t
lg_sample
);
void
prof_tdata_cleanup
(
tsd_t
*
tsd
);
bool
prof_active_get
(
tsdn_t
*
tsdn
);
bool
prof_active_set
(
tsdn_t
*
tsdn
,
bool
active
);
const
char
*
prof_thread_name_get
(
tsd_t
*
tsd
);
int
prof_thread_name_set
(
tsd_t
*
tsd
,
const
char
*
thread_name
);
bool
prof_thread_active_get
(
tsd_t
*
tsd
);
bool
prof_thread_active_set
(
tsd_t
*
tsd
,
bool
active
);
bool
prof_thread_active_init_get
(
tsdn_t
*
tsdn
);
bool
prof_thread_active_init_set
(
tsdn_t
*
tsdn
,
bool
active_init
);
bool
prof_gdump_get
(
tsdn_t
*
tsdn
);
bool
prof_gdump_set
(
tsdn_t
*
tsdn
,
bool
active
);
void
prof_boot0
(
void
);
void
prof_boot1
(
void
);
bool
prof_boot2
(
tsd_t
*
tsd
);
void
prof_prefork0
(
tsdn_t
*
tsdn
);
void
prof_prefork1
(
tsdn_t
*
tsdn
);
void
prof_postfork_parent
(
tsdn_t
*
tsdn
);
void
prof_postfork_child
(
tsdn_t
*
tsdn
);
void
prof_sample_threshold_update
(
prof_tdata_t
*
tdata
);
#endif
/* JEMALLOC_INTERNAL_PROF_EXTERNS_H */
deps/jemalloc/include/jemalloc/internal/prof_inlines_a.h
0 → 100644
View file @
fb1f4f4e
#ifndef JEMALLOC_INTERNAL_PROF_INLINES_A_H
#define JEMALLOC_INTERNAL_PROF_INLINES_A_H
#include "jemalloc/internal/mutex.h"
static
inline
bool
prof_accum_add
(
tsdn_t
*
tsdn
,
prof_accum_t
*
prof_accum
,
uint64_t
accumbytes
)
{
cassert
(
config_prof
);
bool
overflow
;
uint64_t
a0
,
a1
;
/*
* If the application allocates fast enough (and/or if idump is slow
* enough), extreme overflow here (a1 >= prof_interval * 2) can cause
* idump trigger coalescing. This is an intentional mechanism that
* avoids rate-limiting allocation.
*/
#ifdef JEMALLOC_ATOMIC_U64
a0
=
atomic_load_u64
(
&
prof_accum
->
accumbytes
,
ATOMIC_RELAXED
);
do
{
a1
=
a0
+
accumbytes
;
assert
(
a1
>=
a0
);
overflow
=
(
a1
>=
prof_interval
);
if
(
overflow
)
{
a1
%=
prof_interval
;
}
}
while
(
!
atomic_compare_exchange_weak_u64
(
&
prof_accum
->
accumbytes
,
&
a0
,
a1
,
ATOMIC_RELAXED
,
ATOMIC_RELAXED
));
#else
malloc_mutex_lock
(
tsdn
,
&
prof_accum
->
mtx
);
a0
=
prof_accum
->
accumbytes
;
a1
=
a0
+
accumbytes
;
overflow
=
(
a1
>=
prof_interval
);
if
(
overflow
)
{
a1
%=
prof_interval
;
}
prof_accum
->
accumbytes
=
a1
;
malloc_mutex_unlock
(
tsdn
,
&
prof_accum
->
mtx
);
#endif
return
overflow
;
}
static
inline
void
prof_accum_cancel
(
tsdn_t
*
tsdn
,
prof_accum_t
*
prof_accum
,
size_t
usize
)
{
cassert
(
config_prof
);
/*
* Cancel out as much of the excessive prof_accumbytes increase as
* possible without underflowing. Interval-triggered dumps occur
* slightly more often than intended as a result of incomplete
* canceling.
*/
uint64_t
a0
,
a1
;
#ifdef JEMALLOC_ATOMIC_U64
a0
=
atomic_load_u64
(
&
prof_accum
->
accumbytes
,
ATOMIC_RELAXED
);
do
{
a1
=
(
a0
>=
LARGE_MINCLASS
-
usize
)
?
a0
-
(
LARGE_MINCLASS
-
usize
)
:
0
;
}
while
(
!
atomic_compare_exchange_weak_u64
(
&
prof_accum
->
accumbytes
,
&
a0
,
a1
,
ATOMIC_RELAXED
,
ATOMIC_RELAXED
));
#else
malloc_mutex_lock
(
tsdn
,
&
prof_accum
->
mtx
);
a0
=
prof_accum
->
accumbytes
;
a1
=
(
a0
>=
LARGE_MINCLASS
-
usize
)
?
a0
-
(
LARGE_MINCLASS
-
usize
)
:
0
;
prof_accum
->
accumbytes
=
a1
;
malloc_mutex_unlock
(
tsdn
,
&
prof_accum
->
mtx
);
#endif
}
JEMALLOC_ALWAYS_INLINE
bool
prof_active_get_unlocked
(
void
)
{
/*
* Even if opt_prof is true, sampling can be temporarily disabled by
* setting prof_active to false. No locking is used when reading
* prof_active in the fast path, so there are no guarantees regarding
* how long it will take for all threads to notice state changes.
*/
return
prof_active
;
}
#endif
/* JEMALLOC_INTERNAL_PROF_INLINES_A_H */
deps/jemalloc/include/jemalloc/internal/prof_inlines_b.h
0 → 100644
View file @
fb1f4f4e
#ifndef JEMALLOC_INTERNAL_PROF_INLINES_B_H
#define JEMALLOC_INTERNAL_PROF_INLINES_B_H
#include "jemalloc/internal/sz.h"
JEMALLOC_ALWAYS_INLINE
bool
prof_gdump_get_unlocked
(
void
)
{
/*
* No locking is used when reading prof_gdump_val in the fast path, so
* there are no guarantees regarding how long it will take for all
* threads to notice state changes.
*/
return
prof_gdump_val
;
}
JEMALLOC_ALWAYS_INLINE
prof_tdata_t
*
prof_tdata_get
(
tsd_t
*
tsd
,
bool
create
)
{
prof_tdata_t
*
tdata
;
cassert
(
config_prof
);
tdata
=
tsd_prof_tdata_get
(
tsd
);
if
(
create
)
{
if
(
unlikely
(
tdata
==
NULL
))
{
if
(
tsd_nominal
(
tsd
))
{
tdata
=
prof_tdata_init
(
tsd
);
tsd_prof_tdata_set
(
tsd
,
tdata
);
}
}
else
if
(
unlikely
(
tdata
->
expired
))
{
tdata
=
prof_tdata_reinit
(
tsd
,
tdata
);
tsd_prof_tdata_set
(
tsd
,
tdata
);
}
assert
(
tdata
==
NULL
||
tdata
->
attached
);
}
return
tdata
;
}
JEMALLOC_ALWAYS_INLINE
prof_tctx_t
*
prof_tctx_get
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
alloc_ctx_t
*
alloc_ctx
)
{
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
return
arena_prof_tctx_get
(
tsdn
,
ptr
,
alloc_ctx
);
}
JEMALLOC_ALWAYS_INLINE
void
prof_tctx_set
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
size_t
usize
,
alloc_ctx_t
*
alloc_ctx
,
prof_tctx_t
*
tctx
)
{
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
arena_prof_tctx_set
(
tsdn
,
ptr
,
usize
,
alloc_ctx
,
tctx
);
}
JEMALLOC_ALWAYS_INLINE
void
prof_tctx_reset
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
prof_tctx_t
*
tctx
)
{
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
arena_prof_tctx_reset
(
tsdn
,
ptr
,
tctx
);
}
JEMALLOC_ALWAYS_INLINE
bool
prof_sample_accum_update
(
tsd_t
*
tsd
,
size_t
usize
,
bool
update
,
prof_tdata_t
**
tdata_out
)
{
prof_tdata_t
*
tdata
;
cassert
(
config_prof
);
tdata
=
prof_tdata_get
(
tsd
,
true
);
if
(
unlikely
((
uintptr_t
)
tdata
<=
(
uintptr_t
)
PROF_TDATA_STATE_MAX
))
{
tdata
=
NULL
;
}
if
(
tdata_out
!=
NULL
)
{
*
tdata_out
=
tdata
;
}
if
(
unlikely
(
tdata
==
NULL
))
{
return
true
;
}
if
(
likely
(
tdata
->
bytes_until_sample
>=
usize
))
{
if
(
update
)
{
tdata
->
bytes_until_sample
-=
usize
;
}
return
true
;
}
else
{
if
(
tsd_reentrancy_level_get
(
tsd
)
>
0
)
{
return
true
;
}
/* Compute new sample threshold. */
if
(
update
)
{
prof_sample_threshold_update
(
tdata
);
}
return
!
tdata
->
active
;
}
}
JEMALLOC_ALWAYS_INLINE
prof_tctx_t
*
prof_alloc_prep
(
tsd_t
*
tsd
,
size_t
usize
,
bool
prof_active
,
bool
update
)
{
prof_tctx_t
*
ret
;
prof_tdata_t
*
tdata
;
prof_bt_t
bt
;
assert
(
usize
==
sz_s2u
(
usize
));
if
(
!
prof_active
||
likely
(
prof_sample_accum_update
(
tsd
,
usize
,
update
,
&
tdata
)))
{
ret
=
(
prof_tctx_t
*
)(
uintptr_t
)
1U
;
}
else
{
bt_init
(
&
bt
,
tdata
->
vec
);
prof_backtrace
(
&
bt
);
ret
=
prof_lookup
(
tsd
,
&
bt
);
}
return
ret
;
}
JEMALLOC_ALWAYS_INLINE
void
prof_malloc
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
size_t
usize
,
alloc_ctx_t
*
alloc_ctx
,
prof_tctx_t
*
tctx
)
{
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
assert
(
usize
==
isalloc
(
tsdn
,
ptr
));
if
(
unlikely
((
uintptr_t
)
tctx
>
(
uintptr_t
)
1U
))
{
prof_malloc_sample_object
(
tsdn
,
ptr
,
usize
,
tctx
);
}
else
{
prof_tctx_set
(
tsdn
,
ptr
,
usize
,
alloc_ctx
,
(
prof_tctx_t
*
)(
uintptr_t
)
1U
);
}
}
JEMALLOC_ALWAYS_INLINE
void
prof_realloc
(
tsd_t
*
tsd
,
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
,
bool
prof_active
,
bool
updated
,
const
void
*
old_ptr
,
size_t
old_usize
,
prof_tctx_t
*
old_tctx
)
{
bool
sampled
,
old_sampled
,
moved
;
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
||
(
uintptr_t
)
tctx
<=
(
uintptr_t
)
1U
);
if
(
prof_active
&&
!
updated
&&
ptr
!=
NULL
)
{
assert
(
usize
==
isalloc
(
tsd_tsdn
(
tsd
),
ptr
));
if
(
prof_sample_accum_update
(
tsd
,
usize
,
true
,
NULL
))
{
/*
* Don't sample. The usize passed to prof_alloc_prep()
* was larger than what actually got allocated, so a
* backtrace was captured for this allocation, even
* though its actual usize was insufficient to cross the
* sample threshold.
*/
prof_alloc_rollback
(
tsd
,
tctx
,
true
);
tctx
=
(
prof_tctx_t
*
)(
uintptr_t
)
1U
;
}
}
sampled
=
((
uintptr_t
)
tctx
>
(
uintptr_t
)
1U
);
old_sampled
=
((
uintptr_t
)
old_tctx
>
(
uintptr_t
)
1U
);
moved
=
(
ptr
!=
old_ptr
);
if
(
unlikely
(
sampled
))
{
prof_malloc_sample_object
(
tsd_tsdn
(
tsd
),
ptr
,
usize
,
tctx
);
}
else
if
(
moved
)
{
prof_tctx_set
(
tsd_tsdn
(
tsd
),
ptr
,
usize
,
NULL
,
(
prof_tctx_t
*
)(
uintptr_t
)
1U
);
}
else
if
(
unlikely
(
old_sampled
))
{
/*
* prof_tctx_set() would work for the !moved case as well, but
* prof_tctx_reset() is slightly cheaper, and the proper thing
* to do here in the presence of explicit knowledge re: moved
* state.
*/
prof_tctx_reset
(
tsd_tsdn
(
tsd
),
ptr
,
tctx
);
}
else
{
assert
((
uintptr_t
)
prof_tctx_get
(
tsd_tsdn
(
tsd
),
ptr
,
NULL
)
==
(
uintptr_t
)
1U
);
}
/*
* The prof_free_sampled_object() call must come after the
* prof_malloc_sample_object() call, because tctx and old_tctx may be
* the same, in which case reversing the call order could cause the tctx
* to be prematurely destroyed as a side effect of momentarily zeroed
* counters.
*/
if
(
unlikely
(
old_sampled
))
{
prof_free_sampled_object
(
tsd
,
old_usize
,
old_tctx
);
}
}
JEMALLOC_ALWAYS_INLINE
void
prof_free
(
tsd_t
*
tsd
,
const
void
*
ptr
,
size_t
usize
,
alloc_ctx_t
*
alloc_ctx
)
{
prof_tctx_t
*
tctx
=
prof_tctx_get
(
tsd_tsdn
(
tsd
),
ptr
,
alloc_ctx
);
cassert
(
config_prof
);
assert
(
usize
==
isalloc
(
tsd_tsdn
(
tsd
),
ptr
));
if
(
unlikely
((
uintptr_t
)
tctx
>
(
uintptr_t
)
1U
))
{
prof_free_sampled_object
(
tsd
,
usize
,
tctx
);
}
}
#endif
/* JEMALLOC_INTERNAL_PROF_INLINES_B_H */
deps/jemalloc/include/jemalloc/internal/prof.h
→
deps/jemalloc/include/jemalloc/internal/prof
_structs
.h
View file @
fb1f4f4e
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef
struct
prof_bt_s
prof_bt_t
;
typedef
struct
prof_cnt_s
prof_cnt_t
;
typedef
struct
prof_tctx_s
prof_tctx_t
;
typedef
struct
prof_gctx_s
prof_gctx_t
;
typedef
struct
prof_tdata_s
prof_tdata_t
;
/* Option defaults. */
#ifdef JEMALLOC_PROF
# define PROF_PREFIX_DEFAULT "jeprof"
#else
# define PROF_PREFIX_DEFAULT ""
#endif
#define LG_PROF_SAMPLE_DEFAULT 19
#define LG_PROF_INTERVAL_DEFAULT -1
/*
* Hard limit on stack backtrace depth. The version of prof_backtrace() that
* is based on __builtin_return_address() necessarily has a hard-coded number
* of backtrace frame handlers, and should be kept in sync with this setting.
*/
#define PROF_BT_MAX 128
/* Initial hash table size. */
#define PROF_CKH_MINITEMS 64
#ifndef JEMALLOC_INTERNAL_PROF_STRUCTS_H
#define JEMALLOC_INTERNAL_PROF_STRUCTS_H
/* Size of memory buffer to use when writing dump files. */
#define PROF_DUMP_BUFSIZE 65536
/* Size of stack-allocated buffer used by prof_printf(). */
#define PROF_PRINTF_BUFSIZE 128
/*
* Number of mutexes shared among all gctx's. No space is allocated for these
* unless profiling is enabled, so it's okay to over-provision.
*/
#define PROF_NCTX_LOCKS 1024
/*
* Number of mutexes shared among all tdata's. No space is allocated for these
* unless profiling is enabled, so it's okay to over-provision.
*/
#define PROF_NTDATA_LOCKS 256
/*
* prof_tdata pointers close to NULL are used to encode state information that
* is used for cleaning up during thread shutdown.
*/
#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1)
#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2)
#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/rb.h"
struct
prof_bt_s
{
/* Backtrace, stored as len program counters. */
...
...
@@ -70,6 +20,15 @@ typedef struct {
}
prof_unwind_data_t
;
#endif
struct
prof_accum_s
{
#ifndef JEMALLOC_ATOMIC_U64
malloc_mutex_t
mtx
;
uint64_t
accumbytes
;
#else
atomic_u64_t
accumbytes
;
#endif
};
struct
prof_cnt_s
{
/* Profiling counters. */
uint64_t
curobjs
;
...
...
@@ -239,307 +198,4 @@ struct prof_tdata_s {
};
typedef
rb_tree
(
prof_tdata_t
)
prof_tdata_tree_t
;
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern
bool
opt_prof
;
extern
bool
opt_prof_active
;
extern
bool
opt_prof_thread_active_init
;
extern
size_t
opt_lg_prof_sample
;
/* Mean bytes between samples. */
extern
ssize_t
opt_lg_prof_interval
;
/* lg(prof_interval). */
extern
bool
opt_prof_gdump
;
/* High-water memory dumping. */
extern
bool
opt_prof_final
;
/* Final profile dumping. */
extern
bool
opt_prof_leak
;
/* Dump leak summary at exit. */
extern
bool
opt_prof_accum
;
/* Report cumulative bytes. */
extern
char
opt_prof_prefix
[
/* Minimize memory bloat for non-prof builds. */
#ifdef JEMALLOC_PROF
PATH_MAX
+
#endif
1
];
/* Accessed via prof_active_[gs]et{_unlocked,}(). */
extern
bool
prof_active
;
/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */
extern
bool
prof_gdump_val
;
/*
* Profile dump interval, measured in bytes allocated. Each arena triggers a
* profile dump when it reaches this threshold. The effect is that the
* interval between profile dumps averages prof_interval, though the actual
* interval between dumps will tend to be sporadic, and the interval will be a
* maximum of approximately (prof_interval * narenas).
*/
extern
uint64_t
prof_interval
;
/*
* Initialized as opt_lg_prof_sample, and potentially modified during profiling
* resets.
*/
extern
size_t
lg_prof_sample
;
void
prof_alloc_rollback
(
tsd_t
*
tsd
,
prof_tctx_t
*
tctx
,
bool
updated
);
void
prof_malloc_sample_object
(
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
);
void
prof_free_sampled_object
(
tsd_t
*
tsd
,
size_t
usize
,
prof_tctx_t
*
tctx
);
void
bt_init
(
prof_bt_t
*
bt
,
void
**
vec
);
void
prof_backtrace
(
prof_bt_t
*
bt
);
prof_tctx_t
*
prof_lookup
(
tsd_t
*
tsd
,
prof_bt_t
*
bt
);
#ifdef JEMALLOC_JET
size_t
prof_tdata_count
(
void
);
size_t
prof_bt_count
(
void
);
const
prof_cnt_t
*
prof_cnt_all
(
void
);
typedef
int
(
prof_dump_open_t
)(
bool
,
const
char
*
);
extern
prof_dump_open_t
*
prof_dump_open
;
typedef
bool
(
prof_dump_header_t
)(
bool
,
const
prof_cnt_t
*
);
extern
prof_dump_header_t
*
prof_dump_header
;
#endif
void
prof_idump
(
void
);
bool
prof_mdump
(
const
char
*
filename
);
void
prof_gdump
(
void
);
prof_tdata_t
*
prof_tdata_init
(
tsd_t
*
tsd
);
prof_tdata_t
*
prof_tdata_reinit
(
tsd_t
*
tsd
,
prof_tdata_t
*
tdata
);
void
prof_reset
(
tsd_t
*
tsd
,
size_t
lg_sample
);
void
prof_tdata_cleanup
(
tsd_t
*
tsd
);
const
char
*
prof_thread_name_get
(
void
);
bool
prof_active_get
(
void
);
bool
prof_active_set
(
bool
active
);
int
prof_thread_name_set
(
tsd_t
*
tsd
,
const
char
*
thread_name
);
bool
prof_thread_active_get
(
void
);
bool
prof_thread_active_set
(
bool
active
);
bool
prof_thread_active_init_get
(
void
);
bool
prof_thread_active_init_set
(
bool
active_init
);
bool
prof_gdump_get
(
void
);
bool
prof_gdump_set
(
bool
active
);
void
prof_boot0
(
void
);
void
prof_boot1
(
void
);
bool
prof_boot2
(
void
);
void
prof_prefork
(
void
);
void
prof_postfork_parent
(
void
);
void
prof_postfork_child
(
void
);
void
prof_sample_threshold_update
(
prof_tdata_t
*
tdata
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
bool
prof_active_get_unlocked
(
void
);
bool
prof_gdump_get_unlocked
(
void
);
prof_tdata_t
*
prof_tdata_get
(
tsd_t
*
tsd
,
bool
create
);
bool
prof_sample_accum_update
(
tsd_t
*
tsd
,
size_t
usize
,
bool
commit
,
prof_tdata_t
**
tdata_out
);
prof_tctx_t
*
prof_alloc_prep
(
tsd_t
*
tsd
,
size_t
usize
,
bool
prof_active
,
bool
update
);
prof_tctx_t
*
prof_tctx_get
(
const
void
*
ptr
);
void
prof_tctx_set
(
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
);
void
prof_tctx_reset
(
const
void
*
ptr
,
size_t
usize
,
const
void
*
old_ptr
,
prof_tctx_t
*
tctx
);
void
prof_malloc_sample_object
(
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
);
void
prof_malloc
(
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
);
void
prof_realloc
(
tsd_t
*
tsd
,
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
,
bool
prof_active
,
bool
updated
,
const
void
*
old_ptr
,
size_t
old_usize
,
prof_tctx_t
*
old_tctx
);
void
prof_free
(
tsd_t
*
tsd
,
const
void
*
ptr
,
size_t
usize
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_))
JEMALLOC_ALWAYS_INLINE
bool
prof_active_get_unlocked
(
void
)
{
/*
* Even if opt_prof is true, sampling can be temporarily disabled by
* setting prof_active to false. No locking is used when reading
* prof_active in the fast path, so there are no guarantees regarding
* how long it will take for all threads to notice state changes.
*/
return
(
prof_active
);
}
JEMALLOC_ALWAYS_INLINE
bool
prof_gdump_get_unlocked
(
void
)
{
/*
* No locking is used when reading prof_gdump_val in the fast path, so
* there are no guarantees regarding how long it will take for all
* threads to notice state changes.
*/
return
(
prof_gdump_val
);
}
JEMALLOC_ALWAYS_INLINE
prof_tdata_t
*
prof_tdata_get
(
tsd_t
*
tsd
,
bool
create
)
{
prof_tdata_t
*
tdata
;
cassert
(
config_prof
);
tdata
=
tsd_prof_tdata_get
(
tsd
);
if
(
create
)
{
if
(
unlikely
(
tdata
==
NULL
))
{
if
(
tsd_nominal
(
tsd
))
{
tdata
=
prof_tdata_init
(
tsd
);
tsd_prof_tdata_set
(
tsd
,
tdata
);
}
}
else
if
(
unlikely
(
tdata
->
expired
))
{
tdata
=
prof_tdata_reinit
(
tsd
,
tdata
);
tsd_prof_tdata_set
(
tsd
,
tdata
);
}
assert
(
tdata
==
NULL
||
tdata
->
attached
);
}
return
(
tdata
);
}
JEMALLOC_ALWAYS_INLINE
prof_tctx_t
*
prof_tctx_get
(
const
void
*
ptr
)
{
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
return
(
arena_prof_tctx_get
(
ptr
));
}
JEMALLOC_ALWAYS_INLINE
void
prof_tctx_set
(
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
)
{
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
arena_prof_tctx_set
(
ptr
,
usize
,
tctx
);
}
JEMALLOC_ALWAYS_INLINE
void
prof_tctx_reset
(
const
void
*
ptr
,
size_t
usize
,
const
void
*
old_ptr
,
prof_tctx_t
*
old_tctx
)
{
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
arena_prof_tctx_reset
(
ptr
,
usize
,
old_ptr
,
old_tctx
);
}
JEMALLOC_ALWAYS_INLINE
bool
prof_sample_accum_update
(
tsd_t
*
tsd
,
size_t
usize
,
bool
update
,
prof_tdata_t
**
tdata_out
)
{
prof_tdata_t
*
tdata
;
cassert
(
config_prof
);
tdata
=
prof_tdata_get
(
tsd
,
true
);
if
((
uintptr_t
)
tdata
<=
(
uintptr_t
)
PROF_TDATA_STATE_MAX
)
tdata
=
NULL
;
if
(
tdata_out
!=
NULL
)
*
tdata_out
=
tdata
;
if
(
tdata
==
NULL
)
return
(
true
);
if
(
tdata
->
bytes_until_sample
>=
usize
)
{
if
(
update
)
tdata
->
bytes_until_sample
-=
usize
;
return
(
true
);
}
else
{
/* Compute new sample threshold. */
if
(
update
)
prof_sample_threshold_update
(
tdata
);
return
(
!
tdata
->
active
);
}
}
JEMALLOC_ALWAYS_INLINE
prof_tctx_t
*
prof_alloc_prep
(
tsd_t
*
tsd
,
size_t
usize
,
bool
prof_active
,
bool
update
)
{
prof_tctx_t
*
ret
;
prof_tdata_t
*
tdata
;
prof_bt_t
bt
;
assert
(
usize
==
s2u
(
usize
));
if
(
!
prof_active
||
likely
(
prof_sample_accum_update
(
tsd
,
usize
,
update
,
&
tdata
)))
ret
=
(
prof_tctx_t
*
)(
uintptr_t
)
1U
;
else
{
bt_init
(
&
bt
,
tdata
->
vec
);
prof_backtrace
(
&
bt
);
ret
=
prof_lookup
(
tsd
,
&
bt
);
}
return
(
ret
);
}
JEMALLOC_ALWAYS_INLINE
void
prof_malloc
(
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
)
{
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
assert
(
usize
==
isalloc
(
ptr
,
true
));
if
(
unlikely
((
uintptr_t
)
tctx
>
(
uintptr_t
)
1U
))
prof_malloc_sample_object
(
ptr
,
usize
,
tctx
);
else
prof_tctx_set
(
ptr
,
usize
,
(
prof_tctx_t
*
)(
uintptr_t
)
1U
);
}
JEMALLOC_ALWAYS_INLINE
void
prof_realloc
(
tsd_t
*
tsd
,
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
,
bool
prof_active
,
bool
updated
,
const
void
*
old_ptr
,
size_t
old_usize
,
prof_tctx_t
*
old_tctx
)
{
bool
sampled
,
old_sampled
;
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
||
(
uintptr_t
)
tctx
<=
(
uintptr_t
)
1U
);
if
(
prof_active
&&
!
updated
&&
ptr
!=
NULL
)
{
assert
(
usize
==
isalloc
(
ptr
,
true
));
if
(
prof_sample_accum_update
(
tsd
,
usize
,
true
,
NULL
))
{
/*
* Don't sample. The usize passed to prof_alloc_prep()
* was larger than what actually got allocated, so a
* backtrace was captured for this allocation, even
* though its actual usize was insufficient to cross the
* sample threshold.
*/
tctx
=
(
prof_tctx_t
*
)(
uintptr_t
)
1U
;
}
}
sampled
=
((
uintptr_t
)
tctx
>
(
uintptr_t
)
1U
);
old_sampled
=
((
uintptr_t
)
old_tctx
>
(
uintptr_t
)
1U
);
if
(
unlikely
(
sampled
))
prof_malloc_sample_object
(
ptr
,
usize
,
tctx
);
else
prof_tctx_reset
(
ptr
,
usize
,
old_ptr
,
old_tctx
);
if
(
unlikely
(
old_sampled
))
prof_free_sampled_object
(
tsd
,
old_usize
,
old_tctx
);
}
JEMALLOC_ALWAYS_INLINE
void
prof_free
(
tsd_t
*
tsd
,
const
void
*
ptr
,
size_t
usize
)
{
prof_tctx_t
*
tctx
=
prof_tctx_get
(
ptr
);
cassert
(
config_prof
);
assert
(
usize
==
isalloc
(
ptr
,
true
));
if
(
unlikely
((
uintptr_t
)
tctx
>
(
uintptr_t
)
1U
))
prof_free_sampled_object
(
tsd
,
usize
,
tctx
);
}
#endif
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
#endif
/* JEMALLOC_INTERNAL_PROF_STRUCTS_H */
deps/jemalloc/include/jemalloc/internal/prof_types.h
0 → 100644
View file @
fb1f4f4e
#ifndef JEMALLOC_INTERNAL_PROF_TYPES_H
#define JEMALLOC_INTERNAL_PROF_TYPES_H
typedef
struct
prof_bt_s
prof_bt_t
;
typedef
struct
prof_accum_s
prof_accum_t
;
typedef
struct
prof_cnt_s
prof_cnt_t
;
typedef
struct
prof_tctx_s
prof_tctx_t
;
typedef
struct
prof_gctx_s
prof_gctx_t
;
typedef
struct
prof_tdata_s
prof_tdata_t
;
/* Option defaults. */
#ifdef JEMALLOC_PROF
# define PROF_PREFIX_DEFAULT "jeprof"
#else
# define PROF_PREFIX_DEFAULT ""
#endif
#define LG_PROF_SAMPLE_DEFAULT 19
#define LG_PROF_INTERVAL_DEFAULT -1
/*
* Hard limit on stack backtrace depth. The version of prof_backtrace() that
* is based on __builtin_return_address() necessarily has a hard-coded number
* of backtrace frame handlers, and should be kept in sync with this setting.
*/
#define PROF_BT_MAX 128
/* Initial hash table size. */
#define PROF_CKH_MINITEMS 64
/* Size of memory buffer to use when writing dump files. */
#define PROF_DUMP_BUFSIZE 65536
/* Size of stack-allocated buffer used by prof_printf(). */
#define PROF_PRINTF_BUFSIZE 128
/*
* Number of mutexes shared among all gctx's. No space is allocated for these
* unless profiling is enabled, so it's okay to over-provision.
*/
#define PROF_NCTX_LOCKS 1024
/*
* Number of mutexes shared among all tdata's. No space is allocated for these
* unless profiling is enabled, so it's okay to over-provision.
*/
#define PROF_NTDATA_LOCKS 256
/*
* prof_tdata pointers close to NULL are used to encode state information that
* is used for cleaning up during thread shutdown.
*/
#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1)
#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2)
#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY
#endif
/* JEMALLOC_INTERNAL_PROF_TYPES_H */
deps/jemalloc/include/jemalloc/internal/public_namespace.sh
View file @
fb1f4f4e
Prev
1
…
3
4
5
6
7
8
9
10
11
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment