Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
d4439bd4
Commit
d4439bd4
authored
May 15, 2023
by
Oran Agra
Browse files
Merge remote-tracking branch 'origin/unstable' into 7.2
parents
e26a769d
2ffde15a
Changes
200
Show whitespace changes
Inline
Side-by-side
Too many changes to show.
To preserve performance only
200 of 200+
files are displayed.
Plain diff
Email patch
deps/jemalloc/include/jemalloc/internal/large_externs.h
View file @
d4439bd4
...
@@ -6,27 +6,19 @@
...
@@ -6,27 +6,19 @@
void
*
large_malloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
usize
,
bool
zero
);
void
*
large_malloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
usize
,
bool
zero
);
void
*
large_palloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
usize
,
size_t
alignment
,
void
*
large_palloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
usize
,
size_t
alignment
,
bool
zero
);
bool
zero
);
bool
large_ralloc_no_move
(
tsdn_t
*
tsdn
,
e
xtent_t
*
extent
,
size_t
usize_min
,
bool
large_ralloc_no_move
(
tsdn_t
*
tsdn
,
e
data_t
*
edata
,
size_t
usize_min
,
size_t
usize_max
,
bool
zero
);
size_t
usize_max
,
bool
zero
);
void
*
large_ralloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
void
*
ptr
,
size_t
usize
,
void
*
large_ralloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
void
*
ptr
,
size_t
usize
,
size_t
alignment
,
bool
zero
,
tcache_t
*
tcache
,
size_t
alignment
,
bool
zero
,
tcache_t
*
tcache
,
hook_ralloc_args_t
*
hook_args
);
hook_ralloc_args_t
*
hook_args
);
typedef
void
(
large_dalloc_junk_t
)(
void
*
,
size_t
);
void
large_dalloc_prep_locked
(
tsdn_t
*
tsdn
,
edata_t
*
edata
);
extern
large_dalloc_junk_t
*
JET_MUTABLE
large_dalloc_junk
;
void
large_dalloc_finish
(
tsdn_t
*
tsdn
,
edata_t
*
edata
);
void
large_dalloc
(
tsdn_t
*
tsdn
,
edata_t
*
edata
);
typedef
void
(
large_dalloc_maybe_junk_t
)(
void
*
,
size_t
);
size_t
large_salloc
(
tsdn_t
*
tsdn
,
const
edata_t
*
edata
);
extern
large_dalloc_maybe_junk_t
*
JET_MUTABLE
large_dalloc_maybe_junk
;
void
large_prof_info_get
(
tsd_t
*
tsd
,
edata_t
*
edata
,
prof_info_t
*
prof_info
,
bool
reset_recent
);
void
large_dalloc_prep_junked_locked
(
tsdn_t
*
tsdn
,
extent_t
*
extent
);
void
large_prof_tctx_reset
(
edata_t
*
edata
);
void
large_dalloc_finish
(
tsdn_t
*
tsdn
,
extent_t
*
extent
);
void
large_prof_info_set
(
edata_t
*
edata
,
prof_tctx_t
*
tctx
,
size_t
size
);
void
large_dalloc
(
tsdn_t
*
tsdn
,
extent_t
*
extent
);
size_t
large_salloc
(
tsdn_t
*
tsdn
,
const
extent_t
*
extent
);
prof_tctx_t
*
large_prof_tctx_get
(
tsdn_t
*
tsdn
,
const
extent_t
*
extent
);
void
large_prof_tctx_set
(
tsdn_t
*
tsdn
,
extent_t
*
extent
,
prof_tctx_t
*
tctx
);
void
large_prof_tctx_reset
(
tsdn_t
*
tsdn
,
extent_t
*
extent
);
nstime_t
large_prof_alloc_time_get
(
const
extent_t
*
extent
);
void
large_prof_alloc_time_set
(
extent_t
*
extent
,
nstime_t
time
);
#endif
/* JEMALLOC_INTERNAL_LARGE_EXTERNS_H */
#endif
/* JEMALLOC_INTERNAL_LARGE_EXTERNS_H */
deps/jemalloc/include/jemalloc/internal/lockedint.h
0 → 100644
View file @
d4439bd4
#ifndef JEMALLOC_INTERNAL_LOCKEDINT_H
#define JEMALLOC_INTERNAL_LOCKEDINT_H
/*
* In those architectures that support 64-bit atomics, we use atomic updates for
* our 64-bit values. Otherwise, we use a plain uint64_t and synchronize
* externally.
*/
typedef
struct
locked_u64_s
locked_u64_t
;
#ifdef JEMALLOC_ATOMIC_U64
struct
locked_u64_s
{
atomic_u64_t
val
;
};
#else
/* Must hold the associated mutex. */
struct
locked_u64_s
{
uint64_t
val
;
};
#endif
typedef
struct
locked_zu_s
locked_zu_t
;
struct
locked_zu_s
{
atomic_zu_t
val
;
};
#ifndef JEMALLOC_ATOMIC_U64
# define LOCKEDINT_MTX_DECLARE(name) malloc_mutex_t name;
# define LOCKEDINT_MTX_INIT(mu, name, rank, rank_mode) \
malloc_mutex_init(&(mu), name, rank, rank_mode)
# define LOCKEDINT_MTX(mtx) (&(mtx))
# define LOCKEDINT_MTX_LOCK(tsdn, mu) malloc_mutex_lock(tsdn, &(mu))
# define LOCKEDINT_MTX_UNLOCK(tsdn, mu) malloc_mutex_unlock(tsdn, &(mu))
# define LOCKEDINT_MTX_PREFORK(tsdn, mu) malloc_mutex_prefork(tsdn, &(mu))
# define LOCKEDINT_MTX_POSTFORK_PARENT(tsdn, mu) \
malloc_mutex_postfork_parent(tsdn, &(mu))
# define LOCKEDINT_MTX_POSTFORK_CHILD(tsdn, mu) \
malloc_mutex_postfork_child(tsdn, &(mu))
#else
# define LOCKEDINT_MTX_DECLARE(name)
# define LOCKEDINT_MTX(mtx) NULL
# define LOCKEDINT_MTX_INIT(mu, name, rank, rank_mode) false
# define LOCKEDINT_MTX_LOCK(tsdn, mu)
# define LOCKEDINT_MTX_UNLOCK(tsdn, mu)
# define LOCKEDINT_MTX_PREFORK(tsdn, mu)
# define LOCKEDINT_MTX_POSTFORK_PARENT(tsdn, mu)
# define LOCKEDINT_MTX_POSTFORK_CHILD(tsdn, mu)
#endif
#ifdef JEMALLOC_ATOMIC_U64
# define LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx) assert((mtx) == NULL)
#else
# define LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx) \
malloc_mutex_assert_owner(tsdn, (mtx))
#endif
static
inline
uint64_t
locked_read_u64
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mtx
,
locked_u64_t
*
p
)
{
LOCKEDINT_MTX_ASSERT_INTERNAL
(
tsdn
,
mtx
);
#ifdef JEMALLOC_ATOMIC_U64
return
atomic_load_u64
(
&
p
->
val
,
ATOMIC_RELAXED
);
#else
return
p
->
val
;
#endif
}
static
inline
void
locked_inc_u64
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mtx
,
locked_u64_t
*
p
,
uint64_t
x
)
{
LOCKEDINT_MTX_ASSERT_INTERNAL
(
tsdn
,
mtx
);
#ifdef JEMALLOC_ATOMIC_U64
atomic_fetch_add_u64
(
&
p
->
val
,
x
,
ATOMIC_RELAXED
);
#else
p
->
val
+=
x
;
#endif
}
static
inline
void
locked_dec_u64
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mtx
,
locked_u64_t
*
p
,
uint64_t
x
)
{
LOCKEDINT_MTX_ASSERT_INTERNAL
(
tsdn
,
mtx
);
#ifdef JEMALLOC_ATOMIC_U64
uint64_t
r
=
atomic_fetch_sub_u64
(
&
p
->
val
,
x
,
ATOMIC_RELAXED
);
assert
(
r
-
x
<=
r
);
#else
p
->
val
-=
x
;
assert
(
p
->
val
+
x
>=
p
->
val
);
#endif
}
/* Increment and take modulus. Returns whether the modulo made any change. */
static
inline
bool
locked_inc_mod_u64
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mtx
,
locked_u64_t
*
p
,
const
uint64_t
x
,
const
uint64_t
modulus
)
{
LOCKEDINT_MTX_ASSERT_INTERNAL
(
tsdn
,
mtx
);
uint64_t
before
,
after
;
bool
overflow
;
#ifdef JEMALLOC_ATOMIC_U64
before
=
atomic_load_u64
(
&
p
->
val
,
ATOMIC_RELAXED
);
do
{
after
=
before
+
x
;
assert
(
after
>=
before
);
overflow
=
(
after
>=
modulus
);
if
(
overflow
)
{
after
%=
modulus
;
}
}
while
(
!
atomic_compare_exchange_weak_u64
(
&
p
->
val
,
&
before
,
after
,
ATOMIC_RELAXED
,
ATOMIC_RELAXED
));
#else
before
=
p
->
val
;
after
=
before
+
x
;
overflow
=
(
after
>=
modulus
);
if
(
overflow
)
{
after
%=
modulus
;
}
p
->
val
=
after
;
#endif
return
overflow
;
}
/*
* Non-atomically sets *dst += src. *dst needs external synchronization.
* This lets us avoid the cost of a fetch_add when its unnecessary (note that
* the types here are atomic).
*/
static
inline
void
locked_inc_u64_unsynchronized
(
locked_u64_t
*
dst
,
uint64_t
src
)
{
#ifdef JEMALLOC_ATOMIC_U64
uint64_t
cur_dst
=
atomic_load_u64
(
&
dst
->
val
,
ATOMIC_RELAXED
);
atomic_store_u64
(
&
dst
->
val
,
src
+
cur_dst
,
ATOMIC_RELAXED
);
#else
dst
->
val
+=
src
;
#endif
}
static
inline
uint64_t
locked_read_u64_unsynchronized
(
locked_u64_t
*
p
)
{
#ifdef JEMALLOC_ATOMIC_U64
return
atomic_load_u64
(
&
p
->
val
,
ATOMIC_RELAXED
);
#else
return
p
->
val
;
#endif
}
static
inline
void
locked_init_u64_unsynchronized
(
locked_u64_t
*
p
,
uint64_t
x
)
{
#ifdef JEMALLOC_ATOMIC_U64
atomic_store_u64
(
&
p
->
val
,
x
,
ATOMIC_RELAXED
);
#else
p
->
val
=
x
;
#endif
}
static
inline
size_t
locked_read_zu
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mtx
,
locked_zu_t
*
p
)
{
LOCKEDINT_MTX_ASSERT_INTERNAL
(
tsdn
,
mtx
);
#ifdef JEMALLOC_ATOMIC_U64
return
atomic_load_zu
(
&
p
->
val
,
ATOMIC_RELAXED
);
#else
return
atomic_load_zu
(
&
p
->
val
,
ATOMIC_RELAXED
);
#endif
}
static
inline
void
locked_inc_zu
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mtx
,
locked_zu_t
*
p
,
size_t
x
)
{
LOCKEDINT_MTX_ASSERT_INTERNAL
(
tsdn
,
mtx
);
#ifdef JEMALLOC_ATOMIC_U64
atomic_fetch_add_zu
(
&
p
->
val
,
x
,
ATOMIC_RELAXED
);
#else
size_t
cur
=
atomic_load_zu
(
&
p
->
val
,
ATOMIC_RELAXED
);
atomic_store_zu
(
&
p
->
val
,
cur
+
x
,
ATOMIC_RELAXED
);
#endif
}
static
inline
void
locked_dec_zu
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mtx
,
locked_zu_t
*
p
,
size_t
x
)
{
LOCKEDINT_MTX_ASSERT_INTERNAL
(
tsdn
,
mtx
);
#ifdef JEMALLOC_ATOMIC_U64
size_t
r
=
atomic_fetch_sub_zu
(
&
p
->
val
,
x
,
ATOMIC_RELAXED
);
assert
(
r
-
x
<=
r
);
#else
size_t
cur
=
atomic_load_zu
(
&
p
->
val
,
ATOMIC_RELAXED
);
atomic_store_zu
(
&
p
->
val
,
cur
-
x
,
ATOMIC_RELAXED
);
#endif
}
/* Like the _u64 variant, needs an externally synchronized *dst. */
static
inline
void
locked_inc_zu_unsynchronized
(
locked_zu_t
*
dst
,
size_t
src
)
{
size_t
cur_dst
=
atomic_load_zu
(
&
dst
->
val
,
ATOMIC_RELAXED
);
atomic_store_zu
(
&
dst
->
val
,
src
+
cur_dst
,
ATOMIC_RELAXED
);
}
/*
* Unlike the _u64 variant, this is safe to call unconditionally.
*/
static
inline
size_t
locked_read_atomic_zu
(
locked_zu_t
*
p
)
{
return
atomic_load_zu
(
&
p
->
val
,
ATOMIC_RELAXED
);
}
#endif
/* JEMALLOC_INTERNAL_LOCKEDINT_H */
deps/jemalloc/include/jemalloc/internal/malloc_io.h
View file @
d4439bd4
#ifndef JEMALLOC_INTERNAL_MALLOC_IO_H
#ifndef JEMALLOC_INTERNAL_MALLOC_IO_H
#define JEMALLOC_INTERNAL_MALLOC_IO_H
#define JEMALLOC_INTERNAL_MALLOC_IO_H
#include "jemalloc/internal/jemalloc_internal_types.h"
#ifdef _WIN32
#ifdef _WIN32
# ifdef _WIN64
# ifdef _WIN64
# define FMT64_PREFIX "ll"
# define FMT64_PREFIX "ll"
...
@@ -40,6 +42,7 @@
...
@@ -40,6 +42,7 @@
*/
*/
#define MALLOC_PRINTF_BUFSIZE 4096
#define MALLOC_PRINTF_BUFSIZE 4096
write_cb_t
wrtmessage
;
int
buferror
(
int
err
,
char
*
buf
,
size_t
buflen
);
int
buferror
(
int
err
,
char
*
buf
,
size_t
buflen
);
uintmax_t
malloc_strtoumax
(
const
char
*
restrict
nptr
,
char
**
restrict
endptr
,
uintmax_t
malloc_strtoumax
(
const
char
*
restrict
nptr
,
char
**
restrict
endptr
,
int
base
);
int
base
);
...
@@ -57,10 +60,10 @@ size_t malloc_snprintf(char *str, size_t size, const char *format, ...)
...
@@ -57,10 +60,10 @@ size_t malloc_snprintf(char *str, size_t size, const char *format, ...)
* The caller can set write_cb to null to choose to print with the
* The caller can set write_cb to null to choose to print with the
* je_malloc_message hook.
* je_malloc_message hook.
*/
*/
void
malloc_vcprintf
(
void
(
*
write_cb
)(
void
*
,
const
char
*
),
void
*
cbopaque
,
void
malloc_vcprintf
(
write_cb_t
*
write_cb
,
void
*
cbopaque
,
const
char
*
format
,
const
char
*
format
,
va_list
ap
);
va_list
ap
);
void
malloc_cprintf
(
void
(
*
write_cb
)(
void
*
,
const
char
*
),
void
*
cbopaque
,
void
malloc_cprintf
(
write_cb_t
*
write_cb
,
void
*
cbopaque
,
const
char
*
format
,
const
char
*
format
,
...)
JEMALLOC_FORMAT_PRINTF
(
3
,
4
);
...)
JEMALLOC_FORMAT_PRINTF
(
3
,
4
);
void
malloc_printf
(
const
char
*
format
,
...)
JEMALLOC_FORMAT_PRINTF
(
1
,
2
);
void
malloc_printf
(
const
char
*
format
,
...)
JEMALLOC_FORMAT_PRINTF
(
1
,
2
);
static
inline
ssize_t
static
inline
ssize_t
...
...
deps/jemalloc/include/jemalloc/internal/mpsc_queue.h
0 → 100644
View file @
d4439bd4
#ifndef JEMALLOC_INTERNAL_MPSC_QUEUE_H
#define JEMALLOC_INTERNAL_MPSC_QUEUE_H
#include "jemalloc/internal/atomic.h"
/*
* A concurrent implementation of a multi-producer, single-consumer queue. It
* supports three concurrent operations:
* - Push
* - Push batch
* - Pop batch
*
* These operations are all lock-free.
*
* The implementation is the simple two-stack queue built on a Treiber stack.
* It's not terribly efficient, but this isn't expected to go into anywhere with
* hot code. In fact, we don't really even need queue semantics in any
* anticipated use cases; we could get away with just the stack. But this way
* lets us frame the API in terms of the existing list types, which is a nice
* convenience. We can save on cache misses by introducing our own (parallel)
* single-linked list type here, and dropping FIFO semantics, if we need this to
* get faster. Since we're currently providing queue semantics though, we use
* the prev field in the link rather than the next field for Treiber-stack
* linkage, so that we can preserve order for bash-pushed lists (recall that the
* two-stack tricks reverses orders in the lock-free first stack).
*/
#define mpsc_queue(a_type) \
struct { \
atomic_p_t tail; \
}
#define mpsc_queue_proto(a_attr, a_prefix, a_queue_type, a_type, \
a_list_type) \
/* Initialize a queue. */
\
a_attr void \
a_prefix##new(a_queue_type *queue); \
/* Insert all items in src into the queue, clearing src. */
\
a_attr void \
a_prefix##push_batch(a_queue_type *queue, a_list_type *src); \
/* Insert node into the queue. */
\
a_attr void \
a_prefix##push(a_queue_type *queue, a_type *node); \
/* \
* Pop all items in the queue into the list at dst. dst should already \
* be initialized (and may contain existing items, which then remain \
* in dst). \
*/
\
a_attr void \
a_prefix##pop_batch(a_queue_type *queue, a_list_type *dst);
#define mpsc_queue_gen(a_attr, a_prefix, a_queue_type, a_type, \
a_list_type, a_link) \
a_attr void \
a_prefix##new(a_queue_type *queue) { \
atomic_store_p(&queue->tail, NULL, ATOMIC_RELAXED); \
} \
a_attr void \
a_prefix##push_batch(a_queue_type *queue, a_list_type *src) { \
/* \
* Reuse the ql list next field as the Treiber stack next \
* field. \
*/
\
a_type *first = ql_first(src); \
a_type *last = ql_last(src, a_link); \
void* cur_tail = atomic_load_p(&queue->tail, ATOMIC_RELAXED); \
do { \
/* \
* Note that this breaks the queue ring structure; \
* it's not a ring any more! \
*/
\
first->a_link.qre_prev = cur_tail; \
/* \
* Note: the upcoming CAS doesn't need an atomic; every \
* push only needs to synchronize with the next pop, \
* which we get from the release sequence rules. \
*/
\
} while (!atomic_compare_exchange_weak_p(&queue->tail, \
&cur_tail, last, ATOMIC_RELEASE, ATOMIC_RELAXED)); \
ql_new(src); \
} \
a_attr void \
a_prefix##push(a_queue_type *queue, a_type *node) { \
ql_elm_new(node, a_link); \
a_list_type list; \
ql_new(&list); \
ql_head_insert(&list, node, a_link); \
a_prefix##push_batch(queue, &list); \
} \
a_attr void \
a_prefix##pop_batch(a_queue_type *queue, a_list_type *dst) { \
a_type *tail = atomic_load_p(&queue->tail, ATOMIC_RELAXED); \
if (tail == NULL) { \
/* \
* In the common special case where there are no \
* pending elements, bail early without a costly RMW. \
*/
\
return; \
} \
tail = atomic_exchange_p(&queue->tail, NULL, ATOMIC_ACQUIRE); \
/* \
* It's a single-consumer queue, so if cur started non-NULL, \
* it'd better stay non-NULL. \
*/
\
assert(tail != NULL); \
/* \
* We iterate through the stack and both fix up the link \
* structure (stack insertion broke the list requirement that \
* the list be circularly linked). It's just as efficient at \
* this point to make the queue a "real" queue, so do that as \
* well. \
* If this ever gets to be a hot spot, we can omit this fixup \
* and make the queue a bag (i.e. not necessarily ordered), but \
* that would mean jettisoning the existing list API as the \
* batch pushing/popping interface. \
*/
\
a_list_type reversed; \
ql_new(&reversed); \
while (tail != NULL) { \
/* \
* Pop an item off the stack, prepend it onto the list \
* (reversing the order). Recall that we use the \
* list prev field as the Treiber stack next field to \
* preserve order of batch-pushed items when reversed. \
*/
\
a_type *next = tail->a_link.qre_prev; \
ql_elm_new(tail, a_link); \
ql_head_insert(&reversed, tail, a_link); \
tail = next; \
} \
ql_concat(dst, &reversed, a_link); \
}
#endif
/* JEMALLOC_INTERNAL_MPSC_QUEUE_H */
deps/jemalloc/include/jemalloc/internal/mutex.h
View file @
d4439bd4
...
@@ -6,6 +6,8 @@
...
@@ -6,6 +6,8 @@
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/witness.h"
#include "jemalloc/internal/witness.h"
extern
int64_t
opt_mutex_max_spin
;
typedef
enum
{
typedef
enum
{
/* Can only acquire one mutex of a given witness rank at a time. */
/* Can only acquire one mutex of a given witness rank at a time. */
malloc_mutex_rank_exclusive
,
malloc_mutex_rank_exclusive
,
...
@@ -67,12 +69,6 @@ struct malloc_mutex_s {
...
@@ -67,12 +69,6 @@ struct malloc_mutex_s {
#endif
#endif
};
};
/*
* Based on benchmark results, a fixed spin with this amount of retries works
* well for our critical sections.
*/
#define MALLOC_MUTEX_MAX_SPIN 250
#ifdef _WIN32
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
# if _WIN32_WINNT >= 0x0600
# define MALLOC_MUTEX_LOCK(m) AcquireSRWLockExclusive(&(m)->lock)
# define MALLOC_MUTEX_LOCK(m) AcquireSRWLockExclusive(&(m)->lock)
...
@@ -245,22 +241,25 @@ malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
...
@@ -245,22 +241,25 @@ malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
witness_assert_not_owner
(
tsdn_witness_tsdp_get
(
tsdn
),
&
mutex
->
witness
);
witness_assert_not_owner
(
tsdn_witness_tsdp_get
(
tsdn
),
&
mutex
->
witness
);
}
}
/* Copy the prof data from mutex for processing. */
static
inline
void
static
inline
void
malloc_mutex_prof_read
(
tsdn_t
*
tsdn
,
mutex_prof_data_t
*
data
,
malloc_mutex_prof_copy
(
mutex_prof_data_t
*
dst
,
mutex_prof_data_t
*
source
)
{
malloc_mutex_t
*
mutex
)
{
mutex_prof_data_t
*
source
=
&
mutex
->
prof_data
;
/* Can only read holding the mutex. */
malloc_mutex_assert_owner
(
tsdn
,
mutex
);
/*
/*
* Not *really* allowed (we shouldn't be doing non-atomic loads of
* Not *really* allowed (we shouldn't be doing non-atomic loads of
* atomic data), but the mutex protection makes this safe, and writing
* atomic data), but the mutex protection makes this safe, and writing
* a member-for-member copy is tedious for this situation.
* a member-for-member copy is tedious for this situation.
*/
*/
*
d
ata
=
*
source
;
*
d
st
=
*
source
;
/* n_wait_thds is not reported (modified w/o locking). */
/* n_wait_thds is not reported (modified w/o locking). */
atomic_store_u32
(
&
data
->
n_waiting_thds
,
0
,
ATOMIC_RELAXED
);
atomic_store_u32
(
&
dst
->
n_waiting_thds
,
0
,
ATOMIC_RELAXED
);
}
/* Copy the prof data from mutex for processing. */
static
inline
void
malloc_mutex_prof_read
(
tsdn_t
*
tsdn
,
mutex_prof_data_t
*
data
,
malloc_mutex_t
*
mutex
)
{
/* Can only read holding the mutex. */
malloc_mutex_assert_owner
(
tsdn
,
mutex
);
malloc_mutex_prof_copy
(
data
,
&
mutex
->
prof_data
);
}
}
static
inline
void
static
inline
void
...
@@ -285,4 +284,36 @@ malloc_mutex_prof_accum(tsdn_t *tsdn, mutex_prof_data_t *data,
...
@@ -285,4 +284,36 @@ malloc_mutex_prof_accum(tsdn_t *tsdn, mutex_prof_data_t *data,
data
->
n_lock_ops
+=
source
->
n_lock_ops
;
data
->
n_lock_ops
+=
source
->
n_lock_ops
;
}
}
/* Compare the prof data and update to the maximum. */
static
inline
void
malloc_mutex_prof_max_update
(
tsdn_t
*
tsdn
,
mutex_prof_data_t
*
data
,
malloc_mutex_t
*
mutex
)
{
mutex_prof_data_t
*
source
=
&
mutex
->
prof_data
;
/* Can only read holding the mutex. */
malloc_mutex_assert_owner
(
tsdn
,
mutex
);
if
(
nstime_compare
(
&
source
->
tot_wait_time
,
&
data
->
tot_wait_time
)
>
0
)
{
nstime_copy
(
&
data
->
tot_wait_time
,
&
source
->
tot_wait_time
);
}
if
(
nstime_compare
(
&
source
->
max_wait_time
,
&
data
->
max_wait_time
)
>
0
)
{
nstime_copy
(
&
data
->
max_wait_time
,
&
source
->
max_wait_time
);
}
if
(
source
->
n_wait_times
>
data
->
n_wait_times
)
{
data
->
n_wait_times
=
source
->
n_wait_times
;
}
if
(
source
->
n_spin_acquired
>
data
->
n_spin_acquired
)
{
data
->
n_spin_acquired
=
source
->
n_spin_acquired
;
}
if
(
source
->
max_n_thds
>
data
->
max_n_thds
)
{
data
->
max_n_thds
=
source
->
max_n_thds
;
}
if
(
source
->
n_owner_switches
>
data
->
n_owner_switches
)
{
data
->
n_owner_switches
=
source
->
n_owner_switches
;
}
if
(
source
->
n_lock_ops
>
data
->
n_lock_ops
)
{
data
->
n_lock_ops
=
source
->
n_lock_ops
;
}
/* n_wait_thds is not reported. */
}
#endif
/* JEMALLOC_INTERNAL_MUTEX_H */
#endif
/* JEMALLOC_INTERNAL_MUTEX_H */
deps/jemalloc/include/jemalloc/internal/mutex_pool.h
deleted
100644 → 0
View file @
e26a769d
#ifndef JEMALLOC_INTERNAL_MUTEX_POOL_H
#define JEMALLOC_INTERNAL_MUTEX_POOL_H
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/witness.h"
/* We do mod reductions by this value, so it should be kept a power of 2. */
#define MUTEX_POOL_SIZE 256
typedef
struct
mutex_pool_s
mutex_pool_t
;
struct
mutex_pool_s
{
malloc_mutex_t
mutexes
[
MUTEX_POOL_SIZE
];
};
bool
mutex_pool_init
(
mutex_pool_t
*
pool
,
const
char
*
name
,
witness_rank_t
rank
);
/* Internal helper - not meant to be called outside this module. */
static
inline
malloc_mutex_t
*
mutex_pool_mutex
(
mutex_pool_t
*
pool
,
uintptr_t
key
)
{
size_t
hash_result
[
2
];
hash
(
&
key
,
sizeof
(
key
),
0xd50dcc1b
,
hash_result
);
return
&
pool
->
mutexes
[
hash_result
[
0
]
%
MUTEX_POOL_SIZE
];
}
static
inline
void
mutex_pool_assert_not_held
(
tsdn_t
*
tsdn
,
mutex_pool_t
*
pool
)
{
for
(
int
i
=
0
;
i
<
MUTEX_POOL_SIZE
;
i
++
)
{
malloc_mutex_assert_not_owner
(
tsdn
,
&
pool
->
mutexes
[
i
]);
}
}
/*
* Note that a mutex pool doesn't work exactly the way an embdedded mutex would.
* You're not allowed to acquire mutexes in the pool one at a time. You have to
* acquire all the mutexes you'll need in a single function call, and then
* release them all in a single function call.
*/
static
inline
void
mutex_pool_lock
(
tsdn_t
*
tsdn
,
mutex_pool_t
*
pool
,
uintptr_t
key
)
{
mutex_pool_assert_not_held
(
tsdn
,
pool
);
malloc_mutex_t
*
mutex
=
mutex_pool_mutex
(
pool
,
key
);
malloc_mutex_lock
(
tsdn
,
mutex
);
}
static
inline
void
mutex_pool_unlock
(
tsdn_t
*
tsdn
,
mutex_pool_t
*
pool
,
uintptr_t
key
)
{
malloc_mutex_t
*
mutex
=
mutex_pool_mutex
(
pool
,
key
);
malloc_mutex_unlock
(
tsdn
,
mutex
);
mutex_pool_assert_not_held
(
tsdn
,
pool
);
}
static
inline
void
mutex_pool_lock2
(
tsdn_t
*
tsdn
,
mutex_pool_t
*
pool
,
uintptr_t
key1
,
uintptr_t
key2
)
{
mutex_pool_assert_not_held
(
tsdn
,
pool
);
malloc_mutex_t
*
mutex1
=
mutex_pool_mutex
(
pool
,
key1
);
malloc_mutex_t
*
mutex2
=
mutex_pool_mutex
(
pool
,
key2
);
if
((
uintptr_t
)
mutex1
<
(
uintptr_t
)
mutex2
)
{
malloc_mutex_lock
(
tsdn
,
mutex1
);
malloc_mutex_lock
(
tsdn
,
mutex2
);
}
else
if
((
uintptr_t
)
mutex1
==
(
uintptr_t
)
mutex2
)
{
malloc_mutex_lock
(
tsdn
,
mutex1
);
}
else
{
malloc_mutex_lock
(
tsdn
,
mutex2
);
malloc_mutex_lock
(
tsdn
,
mutex1
);
}
}
static
inline
void
mutex_pool_unlock2
(
tsdn_t
*
tsdn
,
mutex_pool_t
*
pool
,
uintptr_t
key1
,
uintptr_t
key2
)
{
malloc_mutex_t
*
mutex1
=
mutex_pool_mutex
(
pool
,
key1
);
malloc_mutex_t
*
mutex2
=
mutex_pool_mutex
(
pool
,
key2
);
if
(
mutex1
==
mutex2
)
{
malloc_mutex_unlock
(
tsdn
,
mutex1
);
}
else
{
malloc_mutex_unlock
(
tsdn
,
mutex1
);
malloc_mutex_unlock
(
tsdn
,
mutex2
);
}
mutex_pool_assert_not_held
(
tsdn
,
pool
);
}
static
inline
void
mutex_pool_assert_owner
(
tsdn_t
*
tsdn
,
mutex_pool_t
*
pool
,
uintptr_t
key
)
{
malloc_mutex_assert_owner
(
tsdn
,
mutex_pool_mutex
(
pool
,
key
));
}
#endif
/* JEMALLOC_INTERNAL_MUTEX_POOL_H */
deps/jemalloc/include/jemalloc/internal/mutex_prof.h
View file @
d4439bd4
...
@@ -7,8 +7,14 @@
...
@@ -7,8 +7,14 @@
#define MUTEX_PROF_GLOBAL_MUTEXES \
#define MUTEX_PROF_GLOBAL_MUTEXES \
OP(background_thread) \
OP(background_thread) \
OP(max_per_bg_thd) \
OP(ctl) \
OP(ctl) \
OP(prof)
OP(prof) \
OP(prof_thds_data) \
OP(prof_dump) \
OP(prof_recent_alloc) \
OP(prof_recent_dump) \
OP(prof_stats)
typedef
enum
{
typedef
enum
{
#define OP(mtx) global_prof_mutex_##mtx,
#define OP(mtx) global_prof_mutex_##mtx,
...
@@ -26,7 +32,10 @@ typedef enum {
...
@@ -26,7 +32,10 @@ typedef enum {
OP(decay_dirty) \
OP(decay_dirty) \
OP(decay_muzzy) \
OP(decay_muzzy) \
OP(base) \
OP(base) \
OP(tcache_list)
OP(tcache_list) \
OP(hpa_shard) \
OP(hpa_shard_grow) \
OP(hpa_sec)
typedef
enum
{
typedef
enum
{
#define OP(mtx) arena_prof_mutex_##mtx,
#define OP(mtx) arena_prof_mutex_##mtx,
...
...
deps/jemalloc/include/jemalloc/internal/nstime.h
View file @
d4439bd4
...
@@ -3,12 +3,23 @@
...
@@ -3,12 +3,23 @@
/* Maximum supported number of seconds (~584 years). */
/* Maximum supported number of seconds (~584 years). */
#define NSTIME_SEC_MAX KQU(18446744072)
#define NSTIME_SEC_MAX KQU(18446744072)
#define NSTIME_ZERO_INITIALIZER {0}
#define NSTIME_MAGIC ((uint32_t)0xb8a9ce37)
#ifdef JEMALLOC_DEBUG
# define NSTIME_ZERO_INITIALIZER {0, NSTIME_MAGIC}
#else
# define NSTIME_ZERO_INITIALIZER {0}
#endif
typedef
struct
{
typedef
struct
{
uint64_t
ns
;
uint64_t
ns
;
#ifdef JEMALLOC_DEBUG
uint32_t
magic
;
/* Tracks if initialized. */
#endif
}
nstime_t
;
}
nstime_t
;
static
const
nstime_t
nstime_zero
=
NSTIME_ZERO_INITIALIZER
;
void
nstime_init
(
nstime_t
*
time
,
uint64_t
ns
);
void
nstime_init
(
nstime_t
*
time
,
uint64_t
ns
);
void
nstime_init2
(
nstime_t
*
time
,
uint64_t
sec
,
uint64_t
nsec
);
void
nstime_init2
(
nstime_t
*
time
,
uint64_t
sec
,
uint64_t
nsec
);
uint64_t
nstime_ns
(
const
nstime_t
*
time
);
uint64_t
nstime_ns
(
const
nstime_t
*
time
);
...
@@ -24,11 +35,39 @@ void nstime_isubtract(nstime_t *time, uint64_t subtrahend);
...
@@ -24,11 +35,39 @@ void nstime_isubtract(nstime_t *time, uint64_t subtrahend);
void
nstime_imultiply
(
nstime_t
*
time
,
uint64_t
multiplier
);
void
nstime_imultiply
(
nstime_t
*
time
,
uint64_t
multiplier
);
void
nstime_idivide
(
nstime_t
*
time
,
uint64_t
divisor
);
void
nstime_idivide
(
nstime_t
*
time
,
uint64_t
divisor
);
uint64_t
nstime_divide
(
const
nstime_t
*
time
,
const
nstime_t
*
divisor
);
uint64_t
nstime_divide
(
const
nstime_t
*
time
,
const
nstime_t
*
divisor
);
uint64_t
nstime_ns_since
(
const
nstime_t
*
past
);
typedef
bool
(
nstime_monotonic_t
)(
void
);
typedef
bool
(
nstime_monotonic_t
)(
void
);
extern
nstime_monotonic_t
*
JET_MUTABLE
nstime_monotonic
;
extern
nstime_monotonic_t
*
JET_MUTABLE
nstime_monotonic
;
typedef
bool
(
nstime_update_t
)(
nstime_t
*
);
typedef
void
(
nstime_update_t
)(
nstime_t
*
);
extern
nstime_update_t
*
JET_MUTABLE
nstime_update
;
extern
nstime_update_t
*
JET_MUTABLE
nstime_update
;
typedef
void
(
nstime_prof_update_t
)(
nstime_t
*
);
extern
nstime_prof_update_t
*
JET_MUTABLE
nstime_prof_update
;
void
nstime_init_update
(
nstime_t
*
time
);
void
nstime_prof_init_update
(
nstime_t
*
time
);
enum
prof_time_res_e
{
prof_time_res_default
=
0
,
prof_time_res_high
=
1
};
typedef
enum
prof_time_res_e
prof_time_res_t
;
extern
prof_time_res_t
opt_prof_time_res
;
extern
const
char
*
prof_time_res_mode_names
[];
JEMALLOC_ALWAYS_INLINE
void
nstime_init_zero
(
nstime_t
*
time
)
{
nstime_copy
(
time
,
&
nstime_zero
);
}
JEMALLOC_ALWAYS_INLINE
bool
nstime_equals_zero
(
nstime_t
*
time
)
{
int
diff
=
nstime_compare
(
time
,
&
nstime_zero
);
assert
(
diff
>=
0
);
return
diff
==
0
;
}
#endif
/* JEMALLOC_INTERNAL_NSTIME_H */
#endif
/* JEMALLOC_INTERNAL_NSTIME_H */
deps/jemalloc/include/jemalloc/internal/pa.h
0 → 100644
View file @
d4439bd4
#ifndef JEMALLOC_INTERNAL_PA_H
#define JEMALLOC_INTERNAL_PA_H
#include "jemalloc/internal/base.h"
#include "jemalloc/internal/decay.h"
#include "jemalloc/internal/ecache.h"
#include "jemalloc/internal/edata_cache.h"
#include "jemalloc/internal/emap.h"
#include "jemalloc/internal/hpa.h"
#include "jemalloc/internal/lockedint.h"
#include "jemalloc/internal/pac.h"
#include "jemalloc/internal/pai.h"
#include "jemalloc/internal/sec.h"
/*
* The page allocator; responsible for acquiring pages of memory for
* allocations. It picks the implementation of the page allocator interface
* (i.e. a pai_t) to handle a given page-level allocation request. For now, the
* only such implementation is the PAC code ("page allocator classic"), but
* others will be coming soon.
*/
typedef
struct
pa_central_s
pa_central_t
;
struct
pa_central_s
{
hpa_central_t
hpa
;
};
/*
* The stats for a particular pa_shard. Because of the way the ctl module
* handles stats epoch data collection (it has its own arena_stats, and merges
* the stats from each arena into it), this needs to live in the arena_stats_t;
* hence we define it here and let the pa_shard have a pointer (rather than the
* more natural approach of just embedding it in the pa_shard itself).
*
* We follow the arena_stats_t approach of marking the derived fields. These
* are the ones that are not maintained on their own; instead, their values are
* derived during those stats merges.
*/
typedef
struct
pa_shard_stats_s
pa_shard_stats_t
;
struct
pa_shard_stats_s
{
/* Number of edata_t structs allocated by base, but not being used. */
size_t
edata_avail
;
/* Derived. */
/*
* Stats specific to the PAC. For now, these are the only stats that
* exist, but there will eventually be other page allocators. Things
* like edata_avail make sense in a cross-PA sense, but things like
* npurges don't.
*/
pac_stats_t
pac_stats
;
};
/*
* The local allocator handle. Keeps the state necessary to satisfy page-sized
* allocations.
*
* The contents are mostly internal to the PA module. The key exception is that
* arena decay code is allowed to grab pointers to the dirty and muzzy ecaches
* decay_ts, for a couple of queries, passing them back to a PA function, or
* acquiring decay.mtx and looking at decay.purging. The reasoning is that,
* while PA decides what and how to purge, the arena code decides when and where
* (e.g. on what thread). It's allowed to use the presence of another purger to
* decide.
* (The background thread code also touches some other decay internals, but
* that's not fundamental; its' just an artifact of a partial refactoring, and
* its accesses could be straightforwardly moved inside the decay module).
*/
typedef
struct
pa_shard_s
pa_shard_t
;
struct
pa_shard_s
{
/* The central PA this shard is associated with. */
pa_central_t
*
central
;
/*
* Number of pages in active extents.
*
* Synchronization: atomic.
*/
atomic_zu_t
nactive
;
/*
* Whether or not we should prefer the hugepage allocator. Atomic since
* it may be concurrently modified by a thread setting extent hooks.
* Note that we still may do HPA operations in this arena; if use_hpa is
* changed from true to false, we'll free back to the hugepage allocator
* for those allocations.
*/
atomic_b_t
use_hpa
;
/*
* If we never used the HPA to begin with, it wasn't initialized, and so
* we shouldn't try to e.g. acquire its mutexes during fork. This
* tracks that knowledge.
*/
bool
ever_used_hpa
;
/* Allocates from a PAC. */
pac_t
pac
;
/*
* We place a small extent cache in front of the HPA, since we intend
* these configurations to use many fewer arenas, and therefore have a
* higher risk of hot locks.
*/
sec_t
hpa_sec
;
hpa_shard_t
hpa_shard
;
/* The source of edata_t objects. */
edata_cache_t
edata_cache
;
unsigned
ind
;
malloc_mutex_t
*
stats_mtx
;
pa_shard_stats_t
*
stats
;
/* The emap this shard is tied to. */
emap_t
*
emap
;
/* The base from which we get the ehooks and allocate metadat. */
base_t
*
base
;
};
static
inline
bool
pa_shard_dont_decay_muzzy
(
pa_shard_t
*
shard
)
{
return
ecache_npages_get
(
&
shard
->
pac
.
ecache_muzzy
)
==
0
&&
pac_decay_ms_get
(
&
shard
->
pac
,
extent_state_muzzy
)
<=
0
;
}
static
inline
ehooks_t
*
pa_shard_ehooks_get
(
pa_shard_t
*
shard
)
{
return
base_ehooks_get
(
shard
->
base
);
}
/* Returns true on error. */
bool
pa_central_init
(
pa_central_t
*
central
,
base_t
*
base
,
bool
hpa
,
hpa_hooks_t
*
hpa_hooks
);
/* Returns true on error. */
bool
pa_shard_init
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
,
pa_central_t
*
central
,
emap_t
*
emap
,
base_t
*
base
,
unsigned
ind
,
pa_shard_stats_t
*
stats
,
malloc_mutex_t
*
stats_mtx
,
nstime_t
*
cur_time
,
size_t
oversize_threshold
,
ssize_t
dirty_decay_ms
,
ssize_t
muzzy_decay_ms
);
/*
* This isn't exposed to users; we allow late enablement of the HPA shard so
* that we can boot without worrying about the HPA, then turn it on in a0.
*/
bool
pa_shard_enable_hpa
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
,
const
hpa_shard_opts_t
*
hpa_opts
,
const
sec_opts_t
*
hpa_sec_opts
);
/*
* We stop using the HPA when custom extent hooks are installed, but still
* redirect deallocations to it.
*/
void
pa_shard_disable_hpa
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
);
/*
* This does the PA-specific parts of arena reset (i.e. freeing all active
* allocations).
*/
void
pa_shard_reset
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
);
/*
* Destroy all the remaining retained extents. Should only be called after
* decaying all active, dirty, and muzzy extents to the retained state, as the
* last step in destroying the shard.
*/
void
pa_shard_destroy
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
);
/* Gets an edata for the given allocation. */
edata_t
*
pa_alloc
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
,
size_t
size
,
size_t
alignment
,
bool
slab
,
szind_t
szind
,
bool
zero
,
bool
guarded
,
bool
*
deferred_work_generated
);
/* Returns true on error, in which case nothing changed. */
bool
pa_expand
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
,
edata_t
*
edata
,
size_t
old_size
,
size_t
new_size
,
szind_t
szind
,
bool
zero
,
bool
*
deferred_work_generated
);
/*
* The same. Sets *generated_dirty to true if we produced new dirty pages, and
* false otherwise.
*/
bool
pa_shrink
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
,
edata_t
*
edata
,
size_t
old_size
,
size_t
new_size
,
szind_t
szind
,
bool
*
deferred_work_generated
);
/*
* Frees the given edata back to the pa. Sets *generated_dirty if we produced
* new dirty pages (well, we always set it for now; but this need not be the
* case).
* (We could make generated_dirty the return value of course, but this is more
* consistent with the shrink pathway and our error codes here).
*/
void
pa_dalloc
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
,
edata_t
*
edata
,
bool
*
deferred_work_generated
);
bool
pa_decay_ms_set
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
,
extent_state_t
state
,
ssize_t
decay_ms
,
pac_purge_eagerness_t
eagerness
);
ssize_t
pa_decay_ms_get
(
pa_shard_t
*
shard
,
extent_state_t
state
);
/*
* Do deferred work on this PA shard.
*
* Morally, this should do both PAC decay and the HPA deferred work. For now,
* though, the arena, background thread, and PAC modules are tightly interwoven
* in a way that's tricky to extricate, so we only do the HPA-specific parts.
*/
void
pa_shard_set_deferral_allowed
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
,
bool
deferral_allowed
);
void
pa_shard_do_deferred_work
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
);
void
pa_shard_try_deferred_work
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
);
uint64_t
pa_shard_time_until_deferred_work
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
);
/******************************************************************************/
/*
* Various bits of "boring" functionality that are still part of this module,
* but that we relegate to pa_extra.c, to keep the core logic in pa.c as
* readable as possible.
*/
/*
* These fork phases are synchronized with the arena fork phase numbering to
* make it easy to keep straight. That's why there's no prefork1.
*/
void
pa_shard_prefork0
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
);
void
pa_shard_prefork2
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
);
void
pa_shard_prefork3
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
);
void
pa_shard_prefork4
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
);
void
pa_shard_prefork5
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
);
void
pa_shard_postfork_parent
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
);
void
pa_shard_postfork_child
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
);
void
pa_shard_basic_stats_merge
(
pa_shard_t
*
shard
,
size_t
*
nactive
,
size_t
*
ndirty
,
size_t
*
nmuzzy
);
void
pa_shard_stats_merge
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
,
pa_shard_stats_t
*
pa_shard_stats_out
,
pac_estats_t
*
estats_out
,
hpa_shard_stats_t
*
hpa_stats_out
,
sec_stats_t
*
sec_stats_out
,
size_t
*
resident
);
/*
* Reads the PA-owned mutex stats into the output stats array, at the
* appropriate positions. Morally, these stats should really live in
* pa_shard_stats_t, but the indices are sort of baked into the various mutex
* prof macros. This would be a good thing to do at some point.
*/
void
pa_shard_mtx_stats_read
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
,
mutex_prof_data_t
mutex_prof_data
[
mutex_prof_num_arena_mutexes
]);
#endif
/* JEMALLOC_INTERNAL_PA_H */
deps/jemalloc/include/jemalloc/internal/pac.h
0 → 100644
View file @
d4439bd4
#ifndef JEMALLOC_INTERNAL_PAC_H
#define JEMALLOC_INTERNAL_PAC_H
#include "jemalloc/internal/exp_grow.h"
#include "jemalloc/internal/pai.h"
#include "san_bump.h"
/*
* Page allocator classic; an implementation of the PAI interface that:
* - Can be used for arenas with custom extent hooks.
* - Can always satisfy any allocation request (including highly-fragmentary
* ones).
* - Can use efficient OS-level zeroing primitives for demand-filled pages.
*/
/* How "eager" decay/purging should be. */
enum
pac_purge_eagerness_e
{
PAC_PURGE_ALWAYS
,
PAC_PURGE_NEVER
,
PAC_PURGE_ON_EPOCH_ADVANCE
};
typedef
enum
pac_purge_eagerness_e
pac_purge_eagerness_t
;
typedef
struct
pac_decay_stats_s
pac_decay_stats_t
;
struct
pac_decay_stats_s
{
/* Total number of purge sweeps. */
locked_u64_t
npurge
;
/* Total number of madvise calls made. */
locked_u64_t
nmadvise
;
/* Total number of pages purged. */
locked_u64_t
purged
;
};
typedef
struct
pac_estats_s
pac_estats_t
;
struct
pac_estats_s
{
/*
* Stats for a given index in the range [0, SC_NPSIZES] in the various
* ecache_ts.
* We track both bytes and # of extents: two extents in the same bucket
* may have different sizes if adjacent size classes differ by more than
* a page, so bytes cannot always be derived from # of extents.
*/
size_t
ndirty
;
size_t
dirty_bytes
;
size_t
nmuzzy
;
size_t
muzzy_bytes
;
size_t
nretained
;
size_t
retained_bytes
;
};
typedef
struct
pac_stats_s
pac_stats_t
;
struct
pac_stats_s
{
pac_decay_stats_t
decay_dirty
;
pac_decay_stats_t
decay_muzzy
;
/*
* Number of unused virtual memory bytes currently retained. Retained
* bytes are technically mapped (though always decommitted or purged),
* but they are excluded from the mapped statistic (above).
*/
size_t
retained
;
/* Derived. */
/*
* Number of bytes currently mapped, excluding retained memory (and any
* base-allocated memory, which is tracked by the arena stats).
*
* We name this "pac_mapped" to avoid confusion with the arena_stats
* "mapped".
*/
atomic_zu_t
pac_mapped
;
/* VM space had to be leaked (undocumented). Normally 0. */
atomic_zu_t
abandoned_vm
;
};
typedef
struct
pac_s
pac_t
;
struct
pac_s
{
/*
* Must be the first member (we convert it to a PAC given only a
* pointer). The handle to the allocation interface.
*/
pai_t
pai
;
/*
* Collections of extents that were previously allocated. These are
* used when allocating extents, in an attempt to re-use address space.
*
* Synchronization: internal.
*/
ecache_t
ecache_dirty
;
ecache_t
ecache_muzzy
;
ecache_t
ecache_retained
;
base_t
*
base
;
emap_t
*
emap
;
edata_cache_t
*
edata_cache
;
/* The grow info for the retained ecache. */
exp_grow_t
exp_grow
;
malloc_mutex_t
grow_mtx
;
/* Special allocator for guarded frequently reused extents. */
san_bump_alloc_t
sba
;
/* How large extents should be before getting auto-purged. */
atomic_zu_t
oversize_threshold
;
/*
* Decay-based purging state, responsible for scheduling extent state
* transitions.
*
* Synchronization: via the internal mutex.
*/
decay_t
decay_dirty
;
/* dirty --> muzzy */
decay_t
decay_muzzy
;
/* muzzy --> retained */
malloc_mutex_t
*
stats_mtx
;
pac_stats_t
*
stats
;
/* Extent serial number generator state. */
atomic_zu_t
extent_sn_next
;
};
bool
pac_init
(
tsdn_t
*
tsdn
,
pac_t
*
pac
,
base_t
*
base
,
emap_t
*
emap
,
edata_cache_t
*
edata_cache
,
nstime_t
*
cur_time
,
size_t
oversize_threshold
,
ssize_t
dirty_decay_ms
,
ssize_t
muzzy_decay_ms
,
pac_stats_t
*
pac_stats
,
malloc_mutex_t
*
stats_mtx
);
static
inline
size_t
pac_mapped
(
pac_t
*
pac
)
{
return
atomic_load_zu
(
&
pac
->
stats
->
pac_mapped
,
ATOMIC_RELAXED
);
}
static
inline
ehooks_t
*
pac_ehooks_get
(
pac_t
*
pac
)
{
return
base_ehooks_get
(
pac
->
base
);
}
/*
* All purging functions require holding decay->mtx. This is one of the few
* places external modules are allowed to peek inside pa_shard_t internals.
*/
/*
* Decays the number of pages currently in the ecache. This might not leave the
* ecache empty if other threads are inserting dirty objects into it
* concurrently with the call.
*/
void
pac_decay_all
(
tsdn_t
*
tsdn
,
pac_t
*
pac
,
decay_t
*
decay
,
pac_decay_stats_t
*
decay_stats
,
ecache_t
*
ecache
,
bool
fully_decay
);
/*
* Updates decay settings for the current time, and conditionally purges in
* response (depending on decay_purge_setting). Returns whether or not the
* epoch advanced.
*/
bool
pac_maybe_decay_purge
(
tsdn_t
*
tsdn
,
pac_t
*
pac
,
decay_t
*
decay
,
pac_decay_stats_t
*
decay_stats
,
ecache_t
*
ecache
,
pac_purge_eagerness_t
eagerness
);
/*
* Gets / sets the maximum amount that we'll grow an arena down the
* grow-retained pathways (unless forced to by an allocaction request).
*
* Set new_limit to NULL if it's just a query, or old_limit to NULL if you don't
* care about the previous value.
*
* Returns true on error (if the new limit is not valid).
*/
bool
pac_retain_grow_limit_get_set
(
tsdn_t
*
tsdn
,
pac_t
*
pac
,
size_t
*
old_limit
,
size_t
*
new_limit
);
bool
pac_decay_ms_set
(
tsdn_t
*
tsdn
,
pac_t
*
pac
,
extent_state_t
state
,
ssize_t
decay_ms
,
pac_purge_eagerness_t
eagerness
);
ssize_t
pac_decay_ms_get
(
pac_t
*
pac
,
extent_state_t
state
);
void
pac_reset
(
tsdn_t
*
tsdn
,
pac_t
*
pac
);
void
pac_destroy
(
tsdn_t
*
tsdn
,
pac_t
*
pac
);
#endif
/* JEMALLOC_INTERNAL_PAC_H */
deps/jemalloc/include/jemalloc/internal/pages.h
View file @
d4439bd4
...
@@ -13,10 +13,27 @@
...
@@ -13,10 +13,27 @@
/* Return the smallest pagesize multiple that is >= s. */
/* Return the smallest pagesize multiple that is >= s. */
#define PAGE_CEILING(s) \
#define PAGE_CEILING(s) \
(((s) + PAGE_MASK) & ~PAGE_MASK)
(((s) + PAGE_MASK) & ~PAGE_MASK)
/* Return the largest pagesize multiple that is <=s. */
#define PAGE_FLOOR(s) \
((s) & ~PAGE_MASK)
/* Huge page size. LG_HUGEPAGE is determined by the configure script. */
/* Huge page size. LG_HUGEPAGE is determined by the configure script. */
#define HUGEPAGE ((size_t)(1U << LG_HUGEPAGE))
#define HUGEPAGE ((size_t)(1U << LG_HUGEPAGE))
#define HUGEPAGE_MASK ((size_t)(HUGEPAGE - 1))
#define HUGEPAGE_MASK ((size_t)(HUGEPAGE - 1))
#if LG_HUGEPAGE != 0
# define HUGEPAGE_PAGES (HUGEPAGE / PAGE)
#else
/*
* It's convenient to define arrays (or bitmaps) of HUGEPAGE_PAGES lengths. If
* we can't autodetect the hugepage size, it gets treated as 0, in which case
* we'll trigger a compiler error in those arrays. Avoid this case by ensuring
* that this value is at least 1. (We won't ever run in this degraded state;
* hpa_supported() returns false in this case.
*/
# define HUGEPAGE_PAGES 1
#endif
/* Return the huge page base address for the huge page containing address a. */
/* Return the huge page base address for the huge page containing address a. */
#define HUGEPAGE_ADDR2BASE(a) \
#define HUGEPAGE_ADDR2BASE(a) \
((void *)((uintptr_t)(a) & ~HUGEPAGE_MASK))
((void *)((uintptr_t)(a) & ~HUGEPAGE_MASK))
...
@@ -58,6 +75,18 @@ static const bool pages_can_purge_forced =
...
@@ -58,6 +75,18 @@ static const bool pages_can_purge_forced =
#endif
#endif
;
;
#if defined(JEMALLOC_HAVE_MADVISE_HUGE) || defined(JEMALLOC_HAVE_MEMCNTL)
# define PAGES_CAN_HUGIFY
#endif
static
const
bool
pages_can_hugify
=
#ifdef PAGES_CAN_HUGIFY
true
#else
false
#endif
;
typedef
enum
{
typedef
enum
{
thp_mode_default
=
0
,
/* Do not change hugepage settings. */
thp_mode_default
=
0
,
/* Do not change hugepage settings. */
thp_mode_always
=
1
,
/* Always set MADV_HUGEPAGE. */
thp_mode_always
=
1
,
/* Always set MADV_HUGEPAGE. */
...
@@ -84,5 +113,7 @@ bool pages_dontdump(void *addr, size_t size);
...
@@ -84,5 +113,7 @@ bool pages_dontdump(void *addr, size_t size);
bool
pages_dodump
(
void
*
addr
,
size_t
size
);
bool
pages_dodump
(
void
*
addr
,
size_t
size
);
bool
pages_boot
(
void
);
bool
pages_boot
(
void
);
void
pages_set_thp_state
(
void
*
ptr
,
size_t
size
);
void
pages_set_thp_state
(
void
*
ptr
,
size_t
size
);
void
pages_mark_guards
(
void
*
head
,
void
*
tail
);
void
pages_unmark_guards
(
void
*
head
,
void
*
tail
);
#endif
/* JEMALLOC_INTERNAL_PAGES_EXTERNS_H */
#endif
/* JEMALLOC_INTERNAL_PAGES_EXTERNS_H */
deps/jemalloc/include/jemalloc/internal/pai.h
0 → 100644
View file @
d4439bd4
#ifndef JEMALLOC_INTERNAL_PAI_H
#define JEMALLOC_INTERNAL_PAI_H
/* An interface for page allocation. */
typedef
struct
pai_s
pai_t
;
struct
pai_s
{
/* Returns NULL on failure. */
edata_t
*
(
*
alloc
)(
tsdn_t
*
tsdn
,
pai_t
*
self
,
size_t
size
,
size_t
alignment
,
bool
zero
,
bool
guarded
,
bool
frequent_reuse
,
bool
*
deferred_work_generated
);
/*
* Returns the number of extents added to the list (which may be fewer
* than requested, in case of OOM). The list should already be
* initialized. The only alignment guarantee is page-alignment, and
* the results are not necessarily zeroed.
*/
size_t
(
*
alloc_batch
)(
tsdn_t
*
tsdn
,
pai_t
*
self
,
size_t
size
,
size_t
nallocs
,
edata_list_active_t
*
results
,
bool
*
deferred_work_generated
);
bool
(
*
expand
)(
tsdn_t
*
tsdn
,
pai_t
*
self
,
edata_t
*
edata
,
size_t
old_size
,
size_t
new_size
,
bool
zero
,
bool
*
deferred_work_generated
);
bool
(
*
shrink
)(
tsdn_t
*
tsdn
,
pai_t
*
self
,
edata_t
*
edata
,
size_t
old_size
,
size_t
new_size
,
bool
*
deferred_work_generated
);
void
(
*
dalloc
)(
tsdn_t
*
tsdn
,
pai_t
*
self
,
edata_t
*
edata
,
bool
*
deferred_work_generated
);
/* This function empties out list as a side-effect of being called. */
void
(
*
dalloc_batch
)(
tsdn_t
*
tsdn
,
pai_t
*
self
,
edata_list_active_t
*
list
,
bool
*
deferred_work_generated
);
uint64_t
(
*
time_until_deferred_work
)(
tsdn_t
*
tsdn
,
pai_t
*
self
);
};
/*
* These are just simple convenience functions to avoid having to reference the
* same pai_t twice on every invocation.
*/
static
inline
edata_t
*
pai_alloc
(
tsdn_t
*
tsdn
,
pai_t
*
self
,
size_t
size
,
size_t
alignment
,
bool
zero
,
bool
guarded
,
bool
frequent_reuse
,
bool
*
deferred_work_generated
)
{
return
self
->
alloc
(
tsdn
,
self
,
size
,
alignment
,
zero
,
guarded
,
frequent_reuse
,
deferred_work_generated
);
}
static
inline
size_t
pai_alloc_batch
(
tsdn_t
*
tsdn
,
pai_t
*
self
,
size_t
size
,
size_t
nallocs
,
edata_list_active_t
*
results
,
bool
*
deferred_work_generated
)
{
return
self
->
alloc_batch
(
tsdn
,
self
,
size
,
nallocs
,
results
,
deferred_work_generated
);
}
static
inline
bool
pai_expand
(
tsdn_t
*
tsdn
,
pai_t
*
self
,
edata_t
*
edata
,
size_t
old_size
,
size_t
new_size
,
bool
zero
,
bool
*
deferred_work_generated
)
{
return
self
->
expand
(
tsdn
,
self
,
edata
,
old_size
,
new_size
,
zero
,
deferred_work_generated
);
}
static
inline
bool
pai_shrink
(
tsdn_t
*
tsdn
,
pai_t
*
self
,
edata_t
*
edata
,
size_t
old_size
,
size_t
new_size
,
bool
*
deferred_work_generated
)
{
return
self
->
shrink
(
tsdn
,
self
,
edata
,
old_size
,
new_size
,
deferred_work_generated
);
}
static
inline
void
pai_dalloc
(
tsdn_t
*
tsdn
,
pai_t
*
self
,
edata_t
*
edata
,
bool
*
deferred_work_generated
)
{
self
->
dalloc
(
tsdn
,
self
,
edata
,
deferred_work_generated
);
}
static
inline
void
pai_dalloc_batch
(
tsdn_t
*
tsdn
,
pai_t
*
self
,
edata_list_active_t
*
list
,
bool
*
deferred_work_generated
)
{
self
->
dalloc_batch
(
tsdn
,
self
,
list
,
deferred_work_generated
);
}
static
inline
uint64_t
pai_time_until_deferred_work
(
tsdn_t
*
tsdn
,
pai_t
*
self
)
{
return
self
->
time_until_deferred_work
(
tsdn
,
self
);
}
/*
* An implementation of batch allocation that simply calls alloc once for
* each item in the list.
*/
size_t
pai_alloc_batch_default
(
tsdn_t
*
tsdn
,
pai_t
*
self
,
size_t
size
,
size_t
nallocs
,
edata_list_active_t
*
results
,
bool
*
deferred_work_generated
);
/* Ditto, for dalloc. */
void
pai_dalloc_batch_default
(
tsdn_t
*
tsdn
,
pai_t
*
self
,
edata_list_active_t
*
list
,
bool
*
deferred_work_generated
);
#endif
/* JEMALLOC_INTERNAL_PAI_H */
deps/jemalloc/include/jemalloc/internal/peak.h
0 → 100644
View file @
d4439bd4
#ifndef JEMALLOC_INTERNAL_PEAK_H
#define JEMALLOC_INTERNAL_PEAK_H
typedef
struct
peak_s
peak_t
;
struct
peak_s
{
/* The highest recorded peak value, after adjustment (see below). */
uint64_t
cur_max
;
/*
* The difference between alloc and dalloc at the last set_zero call;
* this lets us cancel out the appropriate amount of excess.
*/
uint64_t
adjustment
;
};
#define PEAK_INITIALIZER {0, 0}
static
inline
uint64_t
peak_max
(
peak_t
*
peak
)
{
return
peak
->
cur_max
;
}
static
inline
void
peak_update
(
peak_t
*
peak
,
uint64_t
alloc
,
uint64_t
dalloc
)
{
int64_t
candidate_max
=
(
int64_t
)(
alloc
-
dalloc
-
peak
->
adjustment
);
if
(
candidate_max
>
(
int64_t
)
peak
->
cur_max
)
{
peak
->
cur_max
=
candidate_max
;
}
}
/* Resets the counter to zero; all peaks are now relative to this point. */
static
inline
void
peak_set_zero
(
peak_t
*
peak
,
uint64_t
alloc
,
uint64_t
dalloc
)
{
peak
->
cur_max
=
0
;
peak
->
adjustment
=
alloc
-
dalloc
;
}
#endif
/* JEMALLOC_INTERNAL_PEAK_H */
deps/jemalloc/include/jemalloc/internal/peak_event.h
0 → 100644
View file @
d4439bd4
#ifndef JEMALLOC_INTERNAL_PEAK_EVENT_H
#define JEMALLOC_INTERNAL_PEAK_EVENT_H
/*
* While peak.h contains the simple helper struct that tracks state, this
* contains the allocator tie-ins (and knows about tsd, the event module, etc.).
*/
/* Update the peak with current tsd state. */
void
peak_event_update
(
tsd_t
*
tsd
);
/* Set current state to zero. */
void
peak_event_zero
(
tsd_t
*
tsd
);
uint64_t
peak_event_max
(
tsd_t
*
tsd
);
/* Manual hooks. */
/* The activity-triggered hooks. */
uint64_t
peak_alloc_new_event_wait
(
tsd_t
*
tsd
);
uint64_t
peak_alloc_postponed_event_wait
(
tsd_t
*
tsd
);
void
peak_alloc_event_handler
(
tsd_t
*
tsd
,
uint64_t
elapsed
);
uint64_t
peak_dalloc_new_event_wait
(
tsd_t
*
tsd
);
uint64_t
peak_dalloc_postponed_event_wait
(
tsd_t
*
tsd
);
void
peak_dalloc_event_handler
(
tsd_t
*
tsd
,
uint64_t
elapsed
);
#endif
/* JEMALLOC_INTERNAL_PEAK_EVENT_H */
deps/jemalloc/include/jemalloc/internal/ph.h
View file @
d4439bd4
#ifndef JEMALLOC_INTERNAL_PH_H
#define JEMALLOC_INTERNAL_PH_H
/*
/*
* A Pairing Heap implementation.
* A Pairing Heap implementation.
*
*
...
@@ -10,382 +13,508 @@
...
@@ -10,382 +13,508 @@
* http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf
* http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf
*
*
*******************************************************************************
*******************************************************************************
*
* We include a non-obvious optimization:
* - First, we introduce a new pop-and-link operation; pop the two most
* recently-inserted items off the aux-list, link them, and push the resulting
* heap.
* - We maintain a count of the number of insertions since the last time we
* merged the aux-list (i.e. via first() or remove_first()). After N inserts,
* we do ffs(N) pop-and-link operations.
*
* One way to think of this is that we're progressively building up a tree in
* the aux-list, rather than a linked-list (think of the series of merges that
* will be performed as the aux-count grows).
*
* There's a couple reasons we benefit from this:
* - Ordinarily, after N insertions, the aux-list is of size N. With our
* strategy, it's of size O(log(N)). So we decrease the worst-case time of
* first() calls, and reduce the average cost of remove_min calls. Since
* these almost always occur while holding a lock, we practically reduce the
* frequency of unusually long hold times.
* - This moves the bulk of the work of merging the aux-list onto the threads
* that are inserting into the heap. In some common scenarios, insertions
* happen in bulk, from a single thread (think tcache flushing; we potentially
* move many slabs from slabs_full to slabs_nonfull). All the nodes in this
* case are in the inserting threads cache, and linking them is very cheap
* (cache misses dominate linking cost). Without this optimization, linking
* happens on the next call to remove_first. Since that remove_first call
* likely happens on a different thread (or at least, after the cache has
* gotten cold if done on the same thread), deferring linking trades cheap
* link operations now for expensive ones later.
*
* The ffs trick keeps amortized insert cost at constant time. Similar
* strategies based on periodically sorting the list after a batch of operations
* perform worse than this in practice, even with various fancy tricks; they
* all took amortized complexity of an insert from O(1) to O(log(n)).
*/
*/
#ifndef PH_H_
typedef
int
(
*
ph_cmp_t
)(
void
*
,
void
*
);
#define PH_H_
/* Node structure. */
/* Node structure. */
#define phn(a_type) \
typedef
struct
phn_link_s
phn_link_t
;
struct { \
struct
phn_link_s
{
a_type *phn_prev; \
void
*
prev
;
a_type *phn_next; \
void
*
next
;
a_type *phn_lchild; \
void
*
lchild
;
};
typedef
struct
ph_s
ph_t
;
struct
ph_s
{
void
*
root
;
/*
* Inserts done since the last aux-list merge. This is not necessarily
* the size of the aux-list, since it's possible that removals have
* happened since, and we don't track whether or not those removals are
* from the aux list.
*/
size_t
auxcount
;
};
JEMALLOC_ALWAYS_INLINE
phn_link_t
*
phn_link_get
(
void
*
phn
,
size_t
offset
)
{
return
(
phn_link_t
*
)(((
uintptr_t
)
phn
)
+
offset
);
}
}
/* Root structure. */
JEMALLOC_ALWAYS_INLINE
void
#define ph(a_type) \
phn_link_init
(
void
*
phn
,
size_t
offset
)
{
struct { \
phn_link_get
(
phn
,
offset
)
->
prev
=
NULL
;
a_type *ph_root; \
phn_link_get
(
phn
,
offset
)
->
next
=
NULL
;
phn_link_get
(
phn
,
offset
)
->
lchild
=
NULL
;
}
}
/* Internal utility macros. */
/* Internal utility helpers. */
#define phn_lchild_get(a_type, a_field, a_phn) \
JEMALLOC_ALWAYS_INLINE
void
*
(a_phn->a_field.phn_lchild)
phn_lchild_get
(
void
*
phn
,
size_t
offset
)
{
#define phn_lchild_set(a_type, a_field, a_phn, a_lchild) do { \
return
phn_link_get
(
phn
,
offset
)
->
lchild
;
a_phn->a_field.phn_lchild = a_lchild; \
}
} while (0)
JEMALLOC_ALWAYS_INLINE
void
#define phn_next_get(a_type, a_field, a_phn) \
phn_lchild_set
(
void
*
phn
,
void
*
lchild
,
size_t
offset
)
{
(a_phn->a_field.phn_next)
phn_link_get
(
phn
,
offset
)
->
lchild
=
lchild
;
#define phn_prev_set(a_type, a_field, a_phn, a_prev) do { \
}
a_phn->a_field.phn_prev = a_prev; \
} while (0)
JEMALLOC_ALWAYS_INLINE
void
*
phn_next_get
(
void
*
phn
,
size_t
offset
)
{
#define phn_prev_get(a_type, a_field, a_phn) \
return
phn_link_get
(
phn
,
offset
)
->
next
;
(a_phn->a_field.phn_prev)
}
#define phn_next_set(a_type, a_field, a_phn, a_next) do { \
a_phn->a_field.phn_next = a_next; \
JEMALLOC_ALWAYS_INLINE
void
} while (0)
phn_next_set
(
void
*
phn
,
void
*
next
,
size_t
offset
)
{
phn_link_get
(
phn
,
offset
)
->
next
=
next
;
#define phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do { \
}
a_type *phn0child; \
\
JEMALLOC_ALWAYS_INLINE
void
*
assert(a_phn0 != NULL); \
phn_prev_get
(
void
*
phn
,
size_t
offset
)
{
assert(a_phn1 != NULL); \
return
phn_link_get
(
phn
,
offset
)
->
prev
;
assert(a_cmp(a_phn0, a_phn1) <= 0); \
}
\
phn_prev_set(a_type, a_field, a_phn1, a_phn0); \
JEMALLOC_ALWAYS_INLINE
void
phn0child = phn_lchild_get(a_type, a_field, a_phn0); \
phn_prev_set
(
void
*
phn
,
void
*
prev
,
size_t
offset
)
{
phn_next_set(a_type, a_field, a_phn1, phn0child); \
phn_link_get
(
phn
,
offset
)
->
prev
=
prev
;
if (phn0child != NULL) { \
}
phn_prev_set(a_type, a_field, phn0child, a_phn1); \
} \
JEMALLOC_ALWAYS_INLINE
void
phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \
phn_merge_ordered
(
void
*
phn0
,
void
*
phn1
,
size_t
offset
,
} while (0)
ph_cmp_t
cmp
)
{
void
*
phn0child
;
#define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \
if (a_phn0 == NULL) { \
assert
(
phn0
!=
NULL
);
r_phn = a_phn1; \
assert
(
phn1
!=
NULL
);
} else if (a_phn1 == NULL) { \
assert
(
cmp
(
phn0
,
phn1
)
<=
0
);
r_phn = a_phn0; \
} else if (a_cmp(a_phn0, a_phn1) < 0) { \
phn_prev_set
(
phn1
,
phn0
,
offset
);
phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \
phn0child
=
phn_lchild_get
(
phn0
,
offset
);
a_cmp); \
phn_next_set
(
phn1
,
phn0child
,
offset
);
r_phn = a_phn0; \
if
(
phn0child
!=
NULL
)
{
} else { \
phn_prev_set
(
phn0child
,
phn1
,
offset
);
phn_merge_ordered(a_type, a_field, a_phn1, a_phn0, \
}
a_cmp); \
phn_lchild_set
(
phn0
,
phn1
,
offset
);
r_phn = a_phn1; \
}
} \
} while (0)
JEMALLOC_ALWAYS_INLINE
void
*
phn_merge
(
void
*
phn0
,
void
*
phn1
,
size_t
offset
,
ph_cmp_t
cmp
)
{
void
*
result
;
if
(
phn0
==
NULL
)
{
result
=
phn1
;
}
else
if
(
phn1
==
NULL
)
{
result
=
phn0
;
}
else
if
(
cmp
(
phn0
,
phn1
)
<
0
)
{
phn_merge_ordered
(
phn0
,
phn1
,
offset
,
cmp
);
result
=
phn0
;
}
else
{
phn_merge_ordered
(
phn1
,
phn0
,
offset
,
cmp
);
result
=
phn1
;
}
return
result
;
}
JEMALLOC_ALWAYS_INLINE
void
*
phn_merge_siblings
(
void
*
phn
,
size_t
offset
,
ph_cmp_t
cmp
)
{
void
*
head
=
NULL
;
void
*
tail
=
NULL
;
void
*
phn0
=
phn
;
void
*
phn1
=
phn_next_get
(
phn0
,
offset
);
/*
* Multipass merge, wherein the first two elements of a FIFO
* are repeatedly merged, and each result is appended to the
* singly linked FIFO, until the FIFO contains only a single
* element. We start with a sibling list but no reference to
* its tail, so we do a single pass over the sibling list to
* populate the FIFO.
*/
if
(
phn1
!=
NULL
)
{
void
*
phnrest
=
phn_next_get
(
phn1
,
offset
);
if
(
phnrest
!=
NULL
)
{
phn_prev_set
(
phnrest
,
NULL
,
offset
);
}
phn_prev_set
(
phn0
,
NULL
,
offset
);
phn_next_set
(
phn0
,
NULL
,
offset
);
phn_prev_set
(
phn1
,
NULL
,
offset
);
phn_next_set
(
phn1
,
NULL
,
offset
);
phn0
=
phn_merge
(
phn0
,
phn1
,
offset
,
cmp
);
head
=
tail
=
phn0
;
phn0
=
phnrest
;
while
(
phn0
!=
NULL
)
{
phn1
=
phn_next_get
(
phn0
,
offset
);
if
(
phn1
!=
NULL
)
{
phnrest
=
phn_next_get
(
phn1
,
offset
);
if
(
phnrest
!=
NULL
)
{
phn_prev_set
(
phnrest
,
NULL
,
offset
);
}
phn_prev_set
(
phn0
,
NULL
,
offset
);
phn_next_set
(
phn0
,
NULL
,
offset
);
phn_prev_set
(
phn1
,
NULL
,
offset
);
phn_next_set
(
phn1
,
NULL
,
offset
);
phn0
=
phn_merge
(
phn0
,
phn1
,
offset
,
cmp
);
phn_next_set
(
tail
,
phn0
,
offset
);
tail
=
phn0
;
phn0
=
phnrest
;
}
else
{
phn_next_set
(
tail
,
phn0
,
offset
);
tail
=
phn0
;
phn0
=
NULL
;
}
}
phn0
=
head
;
phn1
=
phn_next_get
(
phn0
,
offset
);
if
(
phn1
!=
NULL
)
{
while
(
true
)
{
head
=
phn_next_get
(
phn1
,
offset
);
assert
(
phn_prev_get
(
phn0
,
offset
)
==
NULL
);
phn_next_set
(
phn0
,
NULL
,
offset
);
assert
(
phn_prev_get
(
phn1
,
offset
)
==
NULL
);
phn_next_set
(
phn1
,
NULL
,
offset
);
phn0
=
phn_merge
(
phn0
,
phn1
,
offset
,
cmp
);
if
(
head
==
NULL
)
{
break
;
}
phn_next_set
(
tail
,
phn0
,
offset
);
tail
=
phn0
;
phn0
=
head
;
phn1
=
phn_next_get
(
phn0
,
offset
);
}
}
}
return
phn0
;
}
JEMALLOC_ALWAYS_INLINE
void
ph_merge_aux
(
ph_t
*
ph
,
size_t
offset
,
ph_cmp_t
cmp
)
{
ph
->
auxcount
=
0
;
void
*
phn
=
phn_next_get
(
ph
->
root
,
offset
);
if
(
phn
!=
NULL
)
{
phn_prev_set
(
ph
->
root
,
NULL
,
offset
);
phn_next_set
(
ph
->
root
,
NULL
,
offset
);
phn_prev_set
(
phn
,
NULL
,
offset
);
phn
=
phn_merge_siblings
(
phn
,
offset
,
cmp
);
assert
(
phn_next_get
(
phn
,
offset
)
==
NULL
);
ph
->
root
=
phn_merge
(
ph
->
root
,
phn
,
offset
,
cmp
);
}
}
JEMALLOC_ALWAYS_INLINE
void
*
ph_merge_children
(
void
*
phn
,
size_t
offset
,
ph_cmp_t
cmp
)
{
void
*
result
;
void
*
lchild
=
phn_lchild_get
(
phn
,
offset
);
if
(
lchild
==
NULL
)
{
result
=
NULL
;
}
else
{
result
=
phn_merge_siblings
(
lchild
,
offset
,
cmp
);
}
return
result
;
}
JEMALLOC_ALWAYS_INLINE
void
ph_new
(
ph_t
*
ph
)
{
ph
->
root
=
NULL
;
ph
->
auxcount
=
0
;
}
JEMALLOC_ALWAYS_INLINE
bool
ph_empty
(
ph_t
*
ph
)
{
return
ph
->
root
==
NULL
;
}
JEMALLOC_ALWAYS_INLINE
void
*
ph_first
(
ph_t
*
ph
,
size_t
offset
,
ph_cmp_t
cmp
)
{
if
(
ph
->
root
==
NULL
)
{
return
NULL
;
}
ph_merge_aux
(
ph
,
offset
,
cmp
);
return
ph
->
root
;
}
JEMALLOC_ALWAYS_INLINE
void
*
ph_any
(
ph_t
*
ph
,
size_t
offset
)
{
if
(
ph
->
root
==
NULL
)
{
return
NULL
;
}
void
*
aux
=
phn_next_get
(
ph
->
root
,
offset
);
if
(
aux
!=
NULL
)
{
return
aux
;
}
return
ph
->
root
;
}
/* Returns true if we should stop trying to merge. */
JEMALLOC_ALWAYS_INLINE
bool
ph_try_aux_merge_pair
(
ph_t
*
ph
,
size_t
offset
,
ph_cmp_t
cmp
)
{
assert
(
ph
->
root
!=
NULL
);
void
*
phn0
=
phn_next_get
(
ph
->
root
,
offset
);
if
(
phn0
==
NULL
)
{
return
true
;
}
void
*
phn1
=
phn_next_get
(
phn0
,
offset
);
if
(
phn1
==
NULL
)
{
return
true
;
}
void
*
next_phn1
=
phn_next_get
(
phn1
,
offset
);
phn_next_set
(
phn0
,
NULL
,
offset
);
phn_prev_set
(
phn0
,
NULL
,
offset
);
phn_next_set
(
phn1
,
NULL
,
offset
);
phn_prev_set
(
phn1
,
NULL
,
offset
);
phn0
=
phn_merge
(
phn0
,
phn1
,
offset
,
cmp
);
phn_next_set
(
phn0
,
next_phn1
,
offset
);
if
(
next_phn1
!=
NULL
)
{
phn_prev_set
(
next_phn1
,
phn0
,
offset
);
}
phn_next_set
(
ph
->
root
,
phn0
,
offset
);
phn_prev_set
(
phn0
,
ph
->
root
,
offset
);
return
next_phn1
==
NULL
;
}
JEMALLOC_ALWAYS_INLINE
void
ph_insert
(
ph_t
*
ph
,
void
*
phn
,
size_t
offset
,
ph_cmp_t
cmp
)
{
phn_link_init
(
phn
,
offset
);
/*
* Treat the root as an aux list during insertion, and lazily merge
* during a_prefix##remove_first(). For elements that are inserted,
* then removed via a_prefix##remove() before the aux list is ever
* processed, this makes insert/remove constant-time, whereas eager
* merging would make insert O(log n).
*/
if
(
ph
->
root
==
NULL
)
{
ph
->
root
=
phn
;
}
else
{
/*
* As a special case, check to see if we can replace the root.
* This is practically common in some important cases, and lets
* us defer some insertions (hopefully, until the point where
* some of the items in the aux list have been removed, savings
* us from linking them at all).
*/
if
(
cmp
(
phn
,
ph
->
root
)
<
0
)
{
phn_lchild_set
(
phn
,
ph
->
root
,
offset
);
phn_prev_set
(
ph
->
root
,
phn
,
offset
);
ph
->
root
=
phn
;
ph
->
auxcount
=
0
;
return
;
}
ph
->
auxcount
++
;
phn_next_set
(
phn
,
phn_next_get
(
ph
->
root
,
offset
),
offset
);
if
(
phn_next_get
(
ph
->
root
,
offset
)
!=
NULL
)
{
phn_prev_set
(
phn_next_get
(
ph
->
root
,
offset
),
phn
,
offset
);
}
phn_prev_set
(
phn
,
ph
->
root
,
offset
);
phn_next_set
(
ph
->
root
,
phn
,
offset
);
}
if
(
ph
->
auxcount
>
1
)
{
unsigned
nmerges
=
ffs_zu
(
ph
->
auxcount
-
1
);
bool
done
=
false
;
for
(
unsigned
i
=
0
;
i
<
nmerges
&&
!
done
;
i
++
)
{
done
=
ph_try_aux_merge_pair
(
ph
,
offset
,
cmp
);
}
}
}
JEMALLOC_ALWAYS_INLINE
void
*
ph_remove_first
(
ph_t
*
ph
,
size_t
offset
,
ph_cmp_t
cmp
)
{
void
*
ret
;
if
(
ph
->
root
==
NULL
)
{
return
NULL
;
}
ph_merge_aux
(
ph
,
offset
,
cmp
);
ret
=
ph
->
root
;
ph
->
root
=
ph_merge_children
(
ph
->
root
,
offset
,
cmp
);
return
ret
;
}
JEMALLOC_ALWAYS_INLINE
void
ph_remove
(
ph_t
*
ph
,
void
*
phn
,
size_t
offset
,
ph_cmp_t
cmp
)
{
void
*
replace
;
void
*
parent
;
#define ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do { \
if
(
ph
->
root
==
phn
)
{
a_type *head = NULL; \
/*
a_type *tail = NULL; \
* We can delete from aux list without merging it, but we need
a_type *phn0 = a_phn; \
* to merge if we are dealing with the root node and it has
a_type *phn1 = phn_next_get(a_type, a_field, phn0); \
* children.
*/
if
(
phn_lchild_get
(
phn
,
offset
)
==
NULL
)
{
ph
->
root
=
phn_next_get
(
phn
,
offset
);
if
(
ph
->
root
!=
NULL
)
{
phn_prev_set
(
ph
->
root
,
NULL
,
offset
);
}
return
;
}
ph_merge_aux
(
ph
,
offset
,
cmp
);
if
(
ph
->
root
==
phn
)
{
ph
->
root
=
ph_merge_children
(
ph
->
root
,
offset
,
cmp
);
return
;
}
}
/* Get parent (if phn is leftmost child) before mutating. */
if
((
parent
=
phn_prev_get
(
phn
,
offset
))
!=
NULL
)
{
if
(
phn_lchild_get
(
parent
,
offset
)
!=
phn
)
{
parent
=
NULL
;
}
}
/* Find a possible replacement node, and link to parent. */
replace
=
ph_merge_children
(
phn
,
offset
,
cmp
);
/* Set next/prev for sibling linked list. */
if
(
replace
!=
NULL
)
{
if
(
parent
!=
NULL
)
{
phn_prev_set
(
replace
,
parent
,
offset
);
phn_lchild_set
(
parent
,
replace
,
offset
);
}
else
{
phn_prev_set
(
replace
,
phn_prev_get
(
phn
,
offset
),
offset
);
if
(
phn_prev_get
(
phn
,
offset
)
!=
NULL
)
{
phn_next_set
(
phn_prev_get
(
phn
,
offset
),
replace
,
offset
);
}
}
phn_next_set
(
replace
,
phn_next_get
(
phn
,
offset
),
offset
);
if
(
phn_next_get
(
phn
,
offset
)
!=
NULL
)
{
phn_prev_set
(
phn_next_get
(
phn
,
offset
),
replace
,
offset
);
}
}
else
{
if
(
parent
!=
NULL
)
{
void
*
next
=
phn_next_get
(
phn
,
offset
);
phn_lchild_set
(
parent
,
next
,
offset
);
if
(
next
!=
NULL
)
{
phn_prev_set
(
next
,
parent
,
offset
);
}
}
else
{
assert
(
phn_prev_get
(
phn
,
offset
)
!=
NULL
);
phn_next_set
(
phn_prev_get
(
phn
,
offset
),
phn_next_get
(
phn
,
offset
),
offset
);
}
if
(
phn_next_get
(
phn
,
offset
)
!=
NULL
)
{
phn_prev_set
(
phn_next_get
(
phn
,
offset
),
phn_prev_get
(
phn
,
offset
),
offset
);
}
}
}
#define ph_structs(a_prefix, a_type) \
typedef struct { \
phn_link_t link; \
} a_prefix##_link_t; \
\
\
/* \
typedef struct { \
* Multipass merge, wherein the first two elements of a FIFO \
ph_t ph; \
* are repeatedly merged, and each result is appended to the \
} a_prefix##_t;
* singly linked FIFO, until the FIFO contains only a single \
* element. We start with a sibling list but no reference to \
* its tail, so we do a single pass over the sibling list to \
* populate the FIFO. \
*/
\
if (phn1 != NULL) { \
a_type *phnrest = phn_next_get(a_type, a_field, phn1); \
if (phnrest != NULL) { \
phn_prev_set(a_type, a_field, phnrest, NULL); \
} \
phn_prev_set(a_type, a_field, phn0, NULL); \
phn_next_set(a_type, a_field, phn0, NULL); \
phn_prev_set(a_type, a_field, phn1, NULL); \
phn_next_set(a_type, a_field, phn1, NULL); \
phn_merge(a_type, a_field, phn0, phn1, a_cmp, phn0); \
head = tail = phn0; \
phn0 = phnrest; \
while (phn0 != NULL) { \
phn1 = phn_next_get(a_type, a_field, phn0); \
if (phn1 != NULL) { \
phnrest = phn_next_get(a_type, a_field, \
phn1); \
if (phnrest != NULL) { \
phn_prev_set(a_type, a_field, \
phnrest, NULL); \
} \
phn_prev_set(a_type, a_field, phn0, \
NULL); \
phn_next_set(a_type, a_field, phn0, \
NULL); \
phn_prev_set(a_type, a_field, phn1, \
NULL); \
phn_next_set(a_type, a_field, phn1, \
NULL); \
phn_merge(a_type, a_field, phn0, phn1, \
a_cmp, phn0); \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
phn0 = phnrest; \
} else { \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
phn0 = NULL; \
} \
} \
phn0 = head; \
phn1 = phn_next_get(a_type, a_field, phn0); \
if (phn1 != NULL) { \
while (true) { \
head = phn_next_get(a_type, a_field, \
phn1); \
assert(phn_prev_get(a_type, a_field, \
phn0) == NULL); \
phn_next_set(a_type, a_field, phn0, \
NULL); \
assert(phn_prev_get(a_type, a_field, \
phn1) == NULL); \
phn_next_set(a_type, a_field, phn1, \
NULL); \
phn_merge(a_type, a_field, phn0, phn1, \
a_cmp, phn0); \
if (head == NULL) { \
break; \
} \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
phn0 = head; \
phn1 = phn_next_get(a_type, a_field, \
phn0); \
} \
} \
} \
r_phn = phn0; \
} while (0)
#define ph_merge_aux(a_type, a_field, a_ph, a_cmp) do { \
a_type *phn = phn_next_get(a_type, a_field, a_ph->ph_root); \
if (phn != NULL) { \
phn_prev_set(a_type, a_field, a_ph->ph_root, NULL); \
phn_next_set(a_type, a_field, a_ph->ph_root, NULL); \
phn_prev_set(a_type, a_field, phn, NULL); \
ph_merge_siblings(a_type, a_field, phn, a_cmp, phn); \
assert(phn_next_get(a_type, a_field, phn) == NULL); \
phn_merge(a_type, a_field, a_ph->ph_root, phn, a_cmp, \
a_ph->ph_root); \
} \
} while (0)
#define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \
a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \
if (lchild == NULL) { \
r_phn = NULL; \
} else { \
ph_merge_siblings(a_type, a_field, lchild, a_cmp, \
r_phn); \
} \
} while (0)
/*
/*
* The ph_proto() macro generates function prototypes that correspond to the
* The ph_proto() macro generates function prototypes that correspond to the
* functions generated by an equivalently parameterized call to ph_gen().
* functions generated by an equivalently parameterized call to ph_gen().
*/
*/
#define ph_proto(a_attr, a_prefix, a_ph_type, a_type) \
#define ph_proto(a_attr, a_prefix, a_type) \
a_attr void a_prefix##new(a_ph_type *ph); \
\
a_attr bool a_prefix##empty(a_ph_type *ph); \
a_attr void a_prefix##_new(a_prefix##_t *ph); \
a_attr a_type *a_prefix##first(a_ph_type *ph); \
a_attr bool a_prefix##_empty(a_prefix##_t *ph); \
a_attr a_type *a_prefix##any(a_ph_type *ph); \
a_attr a_type *a_prefix##_first(a_prefix##_t *ph); \
a_attr void a_prefix##insert(a_ph_type *ph, a_type *phn); \
a_attr a_type *a_prefix##_any(a_prefix##_t *ph); \
a_attr a_type *a_prefix##remove_first(a_ph_type *ph); \
a_attr void a_prefix##_insert(a_prefix##_t *ph, a_type *phn); \
a_attr a_type *a_prefix##remove_any(a_ph_type *ph); \
a_attr a_type *a_prefix##_remove_first(a_prefix##_t *ph); \
a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn);
a_attr void a_prefix##_remove(a_prefix##_t *ph, a_type *phn); \
a_attr a_type *a_prefix##_remove_any(a_prefix##_t *ph);
/*
/* The ph_gen() macro generates a type-specific pairing heap implementation. */
* The ph_gen() macro generates a type-specific pairing heap implementation,
#define ph_gen(a_attr, a_prefix, a_type, a_field, a_cmp) \
* based on the above cpp macros.
JEMALLOC_ALWAYS_INLINE int \
*/
a_prefix##_ph_cmp(void *a, void *b) { \
#define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \
return a_cmp((a_type *)a, (a_type *)b); \
} \
\
a_attr void \
a_attr void \
a_prefix##new(a_p
h_type
*ph) {
\
a_prefix##
_
new(a_p
refix##_t
*ph) { \
memset(ph, 0, sizeof(ph(a_type)));
\
ph_new(&ph->ph);
\
} \
} \
\
a_attr bool \
a_attr bool \
a_prefix##empty(a_p
h_type
*ph) { \
a_prefix##
_
empty(a_p
refix##_t
*ph) { \
return
(ph->ph_root == NULL
); \
return
ph_empty(&ph->ph
); \
} \
} \
\
a_attr a_type * \
a_attr a_type * \
a_prefix##first(a_ph_type *ph) { \
a_prefix##_first(a_prefix##_t *ph) { \
if (ph->ph_root == NULL) { \
return ph_first(&ph->ph, offsetof(a_type, a_field), \
return NULL; \
&a_prefix##_ph_cmp); \
} \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
return ph->ph_root; \
} \
} \
\
a_attr a_type * \
a_attr a_type * \
a_prefix##any(a_ph_type *ph) { \
a_prefix##_any(a_prefix##_t *ph) { \
if (ph->ph_root == NULL) { \
return ph_any(&ph->ph, offsetof(a_type, a_field)); \
return NULL; \
} \
a_type *aux = phn_next_get(a_type, a_field, ph->ph_root); \
if (aux != NULL) { \
return aux; \
} \
return ph->ph_root; \
} \
} \
a_attr void \
a_prefix##insert(a_ph_type *ph, a_type *phn) { \
memset(&phn->a_field, 0, sizeof(phn(a_type))); \
\
\
/* \
a_attr void \
* Treat the root as an aux list during insertion, and lazily \
a_prefix##_insert(a_prefix##_t *ph, a_type *phn) { \
* merge during a_prefix##remove_first(). For elements that \
ph_insert(&ph->ph, phn, offsetof(a_type, a_field), \
* are inserted, then removed via a_prefix##remove() before the \
a_prefix##_ph_cmp); \
* aux list is ever processed, this makes insert/remove \
* constant-time, whereas eager merging would make insert \
* O(log n). \
*/
\
if (ph->ph_root == NULL) { \
ph->ph_root = phn; \
} else { \
phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \
a_field, ph->ph_root)); \
if (phn_next_get(a_type, a_field, ph->ph_root) != \
NULL) { \
phn_prev_set(a_type, a_field, \
phn_next_get(a_type, a_field, ph->ph_root), \
phn); \
} \
phn_prev_set(a_type, a_field, phn, ph->ph_root); \
phn_next_set(a_type, a_field, ph->ph_root, phn); \
} \
} \
} \
a_attr a_type * \
a_prefix##remove_first(a_ph_type *ph) { \
a_type *ret; \
\
if (ph->ph_root == NULL) { \
return NULL; \
} \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
\
ret = ph->ph_root; \
\
\
ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \
a_attr a_type * \
ph->ph_root); \
a_prefix##_remove_first(a_prefix##_t *ph) { \
return ph_remove_first(&ph->ph, offsetof(a_type, a_field), \
a_prefix##_ph_cmp); \
} \
\
\
return ret; \
a_attr void \
a_prefix##_remove(a_prefix##_t *ph, a_type *phn) { \
ph_remove(&ph->ph, phn, offsetof(a_type, a_field), \
a_prefix##_ph_cmp); \
} \
} \
\
a_attr a_type * \
a_attr a_type * \
a_prefix##remove_any(a_ph_type *ph) { \
a_prefix##_remove_any(a_prefix##_t *ph) { \
/* \
a_type *ret = a_prefix##_any(ph); \
* Remove the most recently inserted aux list element, or the \
* root if the aux list is empty. This has the effect of \
* behaving as a LIFO (and insertion/removal is therefore \
* constant-time) if a_prefix##[remove_]first() are never \
* called. \
*/
\
if (ph->ph_root == NULL) { \
return NULL; \
} \
a_type *ret = phn_next_get(a_type, a_field, ph->ph_root); \
if (ret != NULL) { \
if (ret != NULL) { \
a_type *aux = phn_next_get(a_type, a_field, ret); \
a_prefix##_remove(ph, ret); \
phn_next_set(a_type, a_field, ph->ph_root, aux); \
if (aux != NULL) { \
phn_prev_set(a_type, a_field, aux, \
ph->ph_root); \
} \
return ret; \
} \
} \
ret = ph->ph_root; \
ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \
ph->ph_root); \
return ret; \
return ret; \
} \
a_attr void \
a_prefix##remove(a_ph_type *ph, a_type *phn) { \
a_type *replace, *parent; \
\
if (ph->ph_root == phn) { \
/* \
* We can delete from aux list without merging it, but \
* we need to merge if we are dealing with the root \
* node and it has children. \
*/
\
if (phn_lchild_get(a_type, a_field, phn) == NULL) { \
ph->ph_root = phn_next_get(a_type, a_field, \
phn); \
if (ph->ph_root != NULL) { \
phn_prev_set(a_type, a_field, \
ph->ph_root, NULL); \
} \
return; \
} \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
if (ph->ph_root == phn) { \
ph_merge_children(a_type, a_field, ph->ph_root, \
a_cmp, ph->ph_root); \
return; \
} \
} \
\
/* Get parent (if phn is leftmost child) before mutating. */
\
if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \
if (phn_lchild_get(a_type, a_field, parent) != phn) { \
parent = NULL; \
} \
} \
/* Find a possible replacement node, and link to parent. */
\
ph_merge_children(a_type, a_field, phn, a_cmp, replace); \
/* Set next/prev for sibling linked list. */
\
if (replace != NULL) { \
if (parent != NULL) { \
phn_prev_set(a_type, a_field, replace, parent); \
phn_lchild_set(a_type, a_field, parent, \
replace); \
} else { \
phn_prev_set(a_type, a_field, replace, \
phn_prev_get(a_type, a_field, phn)); \
if (phn_prev_get(a_type, a_field, phn) != \
NULL) { \
phn_next_set(a_type, a_field, \
phn_prev_get(a_type, a_field, phn), \
replace); \
} \
} \
phn_next_set(a_type, a_field, replace, \
phn_next_get(a_type, a_field, phn)); \
if (phn_next_get(a_type, a_field, phn) != NULL) { \
phn_prev_set(a_type, a_field, \
phn_next_get(a_type, a_field, phn), \
replace); \
} \
} else { \
if (parent != NULL) { \
a_type *next = phn_next_get(a_type, a_field, \
phn); \
phn_lchild_set(a_type, a_field, parent, next); \
if (next != NULL) { \
phn_prev_set(a_type, a_field, next, \
parent); \
} \
} else { \
assert(phn_prev_get(a_type, a_field, phn) != \
NULL); \
phn_next_set(a_type, a_field, \
phn_prev_get(a_type, a_field, phn), \
phn_next_get(a_type, a_field, phn)); \
} \
if (phn_next_get(a_type, a_field, phn) != NULL) { \
phn_prev_set(a_type, a_field, \
phn_next_get(a_type, a_field, phn), \
phn_prev_get(a_type, a_field, phn)); \
} \
} \
}
}
#endif
/* PH_H
_
*/
#endif
/*
JEMALLOC_INTERNAL_
PH_H */
deps/jemalloc/include/jemalloc/internal/prng.h
View file @
d4439bd4
#ifndef JEMALLOC_INTERNAL_PRNG_H
#ifndef JEMALLOC_INTERNAL_PRNG_H
#define JEMALLOC_INTERNAL_PRNG_H
#define JEMALLOC_INTERNAL_PRNG_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bit_util.h"
#include "jemalloc/internal/bit_util.h"
/*
/*
...
@@ -59,66 +58,38 @@ prng_state_next_zu(size_t state) {
...
@@ -59,66 +58,38 @@ prng_state_next_zu(size_t state) {
/*
/*
* The prng_lg_range functions give a uniform int in the half-open range [0,
* The prng_lg_range functions give a uniform int in the half-open range [0,
* 2**lg_range). If atomic is true, they do so safely from multiple threads.
* 2**lg_range).
* Multithreaded 64-bit prngs aren't supported.
*/
*/
JEMALLOC_ALWAYS_INLINE
uint32_t
JEMALLOC_ALWAYS_INLINE
uint32_t
prng_lg_range_u32
(
atomic_u32_t
*
state
,
unsigned
lg_range
,
bool
atomic
)
{
prng_lg_range_u32
(
uint32_t
*
state
,
unsigned
lg_range
)
{
uint32_t
ret
,
state0
,
state1
;
assert
(
lg_range
>
0
);
assert
(
lg_range
>
0
);
assert
(
lg_range
<=
32
);
assert
(
lg_range
<=
32
);
state0
=
atomic_load_u32
(
state
,
ATOMIC_RELAXED
);
*
state
=
prng_state_next_u32
(
*
state
);
uint32_t
ret
=
*
state
>>
(
32
-
lg_range
);
if
(
atomic
)
{
do
{
state1
=
prng_state_next_u32
(
state0
);
}
while
(
!
atomic_compare_exchange_weak_u32
(
state
,
&
state0
,
state1
,
ATOMIC_RELAXED
,
ATOMIC_RELAXED
));
}
else
{
state1
=
prng_state_next_u32
(
state0
);
atomic_store_u32
(
state
,
state1
,
ATOMIC_RELAXED
);
}
ret
=
state1
>>
(
32
-
lg_range
);
return
ret
;
return
ret
;
}
}
JEMALLOC_ALWAYS_INLINE
uint64_t
JEMALLOC_ALWAYS_INLINE
uint64_t
prng_lg_range_u64
(
uint64_t
*
state
,
unsigned
lg_range
)
{
prng_lg_range_u64
(
uint64_t
*
state
,
unsigned
lg_range
)
{
uint64_t
ret
,
state1
;
assert
(
lg_range
>
0
);
assert
(
lg_range
>
0
);
assert
(
lg_range
<=
64
);
assert
(
lg_range
<=
64
);
state1
=
prng_state_next_u64
(
*
state
);
*
state
=
prng_state_next_u64
(
*
state
);
*
state
=
state1
;
uint64_t
ret
=
*
state
>>
(
64
-
lg_range
);
ret
=
state1
>>
(
64
-
lg_range
);
return
ret
;
return
ret
;
}
}
JEMALLOC_ALWAYS_INLINE
size_t
JEMALLOC_ALWAYS_INLINE
size_t
prng_lg_range_zu
(
atomic_zu_t
*
state
,
unsigned
lg_range
,
bool
atomic
)
{
prng_lg_range_zu
(
size_t
*
state
,
unsigned
lg_range
)
{
size_t
ret
,
state0
,
state1
;
assert
(
lg_range
>
0
);
assert
(
lg_range
>
0
);
assert
(
lg_range
<=
ZU
(
1
)
<<
(
3
+
LG_SIZEOF_PTR
));
assert
(
lg_range
<=
ZU
(
1
)
<<
(
3
+
LG_SIZEOF_PTR
));
state0
=
atomic_load_zu
(
state
,
ATOMIC_RELAXED
);
*
state
=
prng_state_next_zu
(
*
state
);
size_t
ret
=
*
state
>>
((
ZU
(
1
)
<<
(
3
+
LG_SIZEOF_PTR
))
-
lg_range
);
if
(
atomic
)
{
do
{
state1
=
prng_state_next_zu
(
state0
);
}
while
(
atomic_compare_exchange_weak_zu
(
state
,
&
state0
,
state1
,
ATOMIC_RELAXED
,
ATOMIC_RELAXED
));
}
else
{
state1
=
prng_state_next_zu
(
state0
);
atomic_store_zu
(
state
,
state1
,
ATOMIC_RELAXED
);
}
ret
=
state1
>>
((
ZU
(
1
)
<<
(
3
+
LG_SIZEOF_PTR
))
-
lg_range
);
return
ret
;
return
ret
;
}
}
...
@@ -129,18 +100,24 @@ prng_lg_range_zu(atomic_zu_t *state, unsigned lg_range, bool atomic) {
...
@@ -129,18 +100,24 @@ prng_lg_range_zu(atomic_zu_t *state, unsigned lg_range, bool atomic) {
*/
*/
JEMALLOC_ALWAYS_INLINE
uint32_t
JEMALLOC_ALWAYS_INLINE
uint32_t
prng_range_u32
(
atomic_u32_t
*
state
,
uint32_t
range
,
bool
atomic
)
{
prng_range_u32
(
uint32_t
*
state
,
uint32_t
range
)
{
uint32_t
ret
;
assert
(
range
!=
0
);
unsigned
lg_range
;
/*
* If range were 1, lg_range would be 0, so the shift in
assert
(
range
>
1
);
* prng_lg_range_u32 would be a shift of a 32-bit variable by 32 bits,
* which is UB. Just handle this case as a one-off.
*/
if
(
range
==
1
)
{
return
0
;
}
/* Compute the ceiling of lg(range). */
/* Compute the ceiling of lg(range). */
lg_range
=
ffs_u32
(
pow2_ceil_u32
(
range
))
-
1
;
unsigned
lg_range
=
ffs_u32
(
pow2_ceil_u32
(
range
));
/* Generate a result in [0..range) via repeated trial. */
/* Generate a result in [0..range) via repeated trial. */
uint32_t
ret
;
do
{
do
{
ret
=
prng_lg_range_u32
(
state
,
lg_range
,
atomic
);
ret
=
prng_lg_range_u32
(
state
,
lg_range
);
}
while
(
ret
>=
range
);
}
while
(
ret
>=
range
);
return
ret
;
return
ret
;
...
@@ -148,15 +125,18 @@ prng_range_u32(atomic_u32_t *state, uint32_t range, bool atomic) {
...
@@ -148,15 +125,18 @@ prng_range_u32(atomic_u32_t *state, uint32_t range, bool atomic) {
JEMALLOC_ALWAYS_INLINE
uint64_t
JEMALLOC_ALWAYS_INLINE
uint64_t
prng_range_u64
(
uint64_t
*
state
,
uint64_t
range
)
{
prng_range_u64
(
uint64_t
*
state
,
uint64_t
range
)
{
uint64_t
ret
;
assert
(
range
!=
0
);
unsigned
lg_range
;
assert
(
range
>
1
);
/* See the note in prng_range_u32. */
if
(
range
==
1
)
{
return
0
;
}
/* Compute the ceiling of lg(range). */
/* Compute the ceiling of lg(range). */
lg_range
=
ffs_u64
(
pow2_ceil_u64
(
range
))
-
1
;
unsigned
lg_range
=
ffs_u64
(
pow2_ceil_u64
(
range
));
/* Generate a result in [0..range) via repeated trial. */
/* Generate a result in [0..range) via repeated trial. */
uint64_t
ret
;
do
{
do
{
ret
=
prng_lg_range_u64
(
state
,
lg_range
);
ret
=
prng_lg_range_u64
(
state
,
lg_range
);
}
while
(
ret
>=
range
);
}
while
(
ret
>=
range
);
...
@@ -165,18 +145,21 @@ prng_range_u64(uint64_t *state, uint64_t range) {
...
@@ -165,18 +145,21 @@ prng_range_u64(uint64_t *state, uint64_t range) {
}
}
JEMALLOC_ALWAYS_INLINE
size_t
JEMALLOC_ALWAYS_INLINE
size_t
prng_range_zu
(
atomic_zu_t
*
state
,
size_t
range
,
bool
atomic
)
{
prng_range_zu
(
size_t
*
state
,
size_t
range
)
{
size_t
ret
;
assert
(
range
!=
0
);
unsigned
lg_range
;
assert
(
range
>
1
);
/* See the note in prng_range_u32. */
if
(
range
==
1
)
{
return
0
;
}
/* Compute the ceiling of lg(range). */
/* Compute the ceiling of lg(range). */
lg_range
=
ffs_u64
(
pow2_ceil_u64
(
range
))
-
1
;
unsigned
lg_range
=
ffs_u64
(
pow2_ceil_u64
(
range
));
/* Generate a result in [0..range) via repeated trial. */
/* Generate a result in [0..range) via repeated trial. */
size_t
ret
;
do
{
do
{
ret
=
prng_lg_range_zu
(
state
,
lg_range
,
atomic
);
ret
=
prng_lg_range_zu
(
state
,
lg_range
);
}
while
(
ret
>=
range
);
}
while
(
ret
>=
range
);
return
ret
;
return
ret
;
...
...
deps/jemalloc/include/jemalloc/internal/prof_data.h
0 → 100644
View file @
d4439bd4
#ifndef JEMALLOC_INTERNAL_PROF_DATA_H
#define JEMALLOC_INTERNAL_PROF_DATA_H
#include "jemalloc/internal/mutex.h"
extern
malloc_mutex_t
bt2gctx_mtx
;
extern
malloc_mutex_t
tdatas_mtx
;
extern
malloc_mutex_t
prof_dump_mtx
;
extern
malloc_mutex_t
*
gctx_locks
;
extern
malloc_mutex_t
*
tdata_locks
;
extern
size_t
prof_unbiased_sz
[
PROF_SC_NSIZES
];
extern
size_t
prof_shifted_unbiased_cnt
[
PROF_SC_NSIZES
];
void
prof_bt_hash
(
const
void
*
key
,
size_t
r_hash
[
2
]);
bool
prof_bt_keycomp
(
const
void
*
k1
,
const
void
*
k2
);
bool
prof_data_init
(
tsd_t
*
tsd
);
prof_tctx_t
*
prof_lookup
(
tsd_t
*
tsd
,
prof_bt_t
*
bt
);
char
*
prof_thread_name_alloc
(
tsd_t
*
tsd
,
const
char
*
thread_name
);
int
prof_thread_name_set_impl
(
tsd_t
*
tsd
,
const
char
*
thread_name
);
void
prof_unbias_map_init
();
void
prof_dump_impl
(
tsd_t
*
tsd
,
write_cb_t
*
prof_dump_write
,
void
*
cbopaque
,
prof_tdata_t
*
tdata
,
bool
leakcheck
);
prof_tdata_t
*
prof_tdata_init_impl
(
tsd_t
*
tsd
,
uint64_t
thr_uid
,
uint64_t
thr_discrim
,
char
*
thread_name
,
bool
active
);
void
prof_tdata_detach
(
tsd_t
*
tsd
,
prof_tdata_t
*
tdata
);
void
prof_reset
(
tsd_t
*
tsd
,
size_t
lg_sample
);
void
prof_tctx_try_destroy
(
tsd_t
*
tsd
,
prof_tctx_t
*
tctx
);
/* Used in unit tests. */
size_t
prof_tdata_count
(
void
);
size_t
prof_bt_count
(
void
);
void
prof_cnt_all
(
prof_cnt_t
*
cnt_all
);
#endif
/* JEMALLOC_INTERNAL_PROF_DATA_H */
deps/jemalloc/include/jemalloc/internal/prof_externs.h
View file @
d4439bd4
...
@@ -2,8 +2,7 @@
...
@@ -2,8 +2,7 @@
#define JEMALLOC_INTERNAL_PROF_EXTERNS_H
#define JEMALLOC_INTERNAL_PROF_EXTERNS_H
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/prof_hook.h"
extern
malloc_mutex_t
bt2gctx_mtx
;
extern
bool
opt_prof
;
extern
bool
opt_prof
;
extern
bool
opt_prof_active
;
extern
bool
opt_prof_active
;
...
@@ -13,6 +12,7 @@ extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */
...
@@ -13,6 +12,7 @@ extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */
extern
bool
opt_prof_gdump
;
/* High-water memory dumping. */
extern
bool
opt_prof_gdump
;
/* High-water memory dumping. */
extern
bool
opt_prof_final
;
/* Final profile dumping. */
extern
bool
opt_prof_final
;
/* Final profile dumping. */
extern
bool
opt_prof_leak
;
/* Dump leak summary at exit. */
extern
bool
opt_prof_leak
;
/* Dump leak summary at exit. */
extern
bool
opt_prof_leak_error
;
/* Exit with error code if memory leaked */
extern
bool
opt_prof_accum
;
/* Report cumulative bytes. */
extern
bool
opt_prof_accum
;
/* Report cumulative bytes. */
extern
bool
opt_prof_log
;
/* Turn logging on at boot. */
extern
bool
opt_prof_log
;
/* Turn logging on at boot. */
extern
char
opt_prof_prefix
[
extern
char
opt_prof_prefix
[
...
@@ -21,20 +21,24 @@ extern char opt_prof_prefix[
...
@@ -21,20 +21,24 @@ extern char opt_prof_prefix[
PATH_MAX
+
PATH_MAX
+
#endif
#endif
1
];
1
];
extern
bool
opt_prof_unbias
;
/* For recording recent allocations */
extern
ssize_t
opt_prof_recent_alloc_max
;
/* Whether to use thread name provided by the system or by mallctl. */
extern
bool
opt_prof_sys_thread_name
;
/* Whether to record per size class counts and request size totals. */
extern
bool
opt_prof_stats
;
/* Accessed via prof_active_[gs]et{_unlocked,}(). */
/* Accessed via prof_active_[gs]et{_unlocked,}(). */
extern
bool
prof_active
;
extern
bool
prof_active
_state
;
/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */
/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */
extern
bool
prof_gdump_val
;
extern
bool
prof_gdump_val
;
/*
/* Profile dump interval, measured in bytes allocated. */
* Profile dump interval, measured in bytes allocated. Each arena triggers a
* profile dump when it reaches this threshold. The effect is that the
* interval between profile dumps averages prof_interval, though the actual
* interval between dumps will tend to be sporadic, and the interval will be a
* maximum of approximately (prof_interval * narenas).
*/
extern
uint64_t
prof_interval
;
extern
uint64_t
prof_interval
;
/*
/*
...
@@ -43,34 +47,27 @@ extern uint64_t prof_interval;
...
@@ -43,34 +47,27 @@ extern uint64_t prof_interval;
*/
*/
extern
size_t
lg_prof_sample
;
extern
size_t
lg_prof_sample
;
void
prof_alloc_rollback
(
tsd_t
*
tsd
,
prof_tctx_t
*
tctx
,
bool
updated
);
extern
bool
prof_booted
;
void
prof_malloc_sample_object
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
);
void
prof_backtrace_hook_set
(
prof_backtrace_hook_t
hook
);
void
prof_free_sampled_object
(
tsd_t
*
tsd
,
const
void
*
ptr
,
size_t
usize
,
prof_backtrace_hook_t
prof_backtrace_hook_get
();
prof_tctx_t
*
tctx
);
void
bt_init
(
prof_bt_t
*
bt
,
void
**
vec
);
void
prof_dump_hook_set
(
prof_dump_hook_t
hook
);
void
prof_backtrace
(
prof_bt_t
*
bt
);
prof_dump_hook_t
prof_dump_hook_get
();
prof_tctx_t
*
prof_lookup
(
tsd_t
*
tsd
,
prof_bt_t
*
bt
);
#ifdef JEMALLOC_JET
/* Functions only accessed in prof_inlines.h */
size_t
prof_tdata_count
(
void
);
prof_tdata_t
*
prof_tdata_init
(
tsd_t
*
tsd
);
size_t
prof_bt_count
(
void
);
prof_tdata_t
*
prof_tdata_reinit
(
tsd_t
*
tsd
,
prof_tdata_t
*
tdata
);
#endif
typedef
int
(
prof_dump_open_t
)(
bool
,
const
char
*
);
void
prof_alloc_rollback
(
tsd_t
*
tsd
,
prof_tctx_t
*
tctx
);
extern
prof_dump_open_t
*
JET_MUTABLE
prof_dump_open
;
void
prof_malloc_sample_object
(
tsd_t
*
tsd
,
const
void
*
ptr
,
size_t
size
,
size_t
usize
,
prof_tctx_t
*
tctx
);
typedef
bool
(
prof_dump_header_t
)(
tsdn_t
*
,
bool
,
const
prof_cnt_t
*
);
void
prof_free_sampled_object
(
tsd_t
*
tsd
,
size_t
usize
,
prof_info_t
*
prof_info
);
extern
prof_dump_header_t
*
JET_MUTABLE
prof_dump_header
;
prof_tctx_t
*
prof_tctx_create
(
tsd_t
*
tsd
);
#ifdef JEMALLOC_JET
void
prof_cnt_all
(
uint64_t
*
curobjs
,
uint64_t
*
curbytes
,
uint64_t
*
accumobjs
,
uint64_t
*
accumbytes
);
#endif
bool
prof_accum_init
(
tsdn_t
*
tsdn
,
prof_accum_t
*
prof_accum
);
void
prof_idump
(
tsdn_t
*
tsdn
);
void
prof_idump
(
tsdn_t
*
tsdn
);
bool
prof_mdump
(
tsd_t
*
tsd
,
const
char
*
filename
);
bool
prof_mdump
(
tsd_t
*
tsd
,
const
char
*
filename
);
void
prof_gdump
(
tsdn_t
*
tsdn
);
void
prof_gdump
(
tsdn_t
*
tsdn
);
prof_tdata_t
*
prof_tdata_init
(
tsd_t
*
tsd
);
prof_tdata_t
*
prof_tdata_reinit
(
tsd_t
*
tsd
,
prof_tdata_t
*
tdata
);
void
prof_reset
(
tsd_t
*
tsd
,
size_t
lg_sample
);
void
prof_tdata_cleanup
(
tsd_t
*
tsd
);
void
prof_tdata_cleanup
(
tsd_t
*
tsd
);
bool
prof_active_get
(
tsdn_t
*
tsdn
);
bool
prof_active_get
(
tsdn_t
*
tsdn
);
bool
prof_active_set
(
tsdn_t
*
tsdn
,
bool
active
);
bool
prof_active_set
(
tsdn_t
*
tsdn
,
bool
active
);
...
@@ -84,22 +81,15 @@ bool prof_gdump_get(tsdn_t *tsdn);
...
@@ -84,22 +81,15 @@ bool prof_gdump_get(tsdn_t *tsdn);
bool
prof_gdump_set
(
tsdn_t
*
tsdn
,
bool
active
);
bool
prof_gdump_set
(
tsdn_t
*
tsdn
,
bool
active
);
void
prof_boot0
(
void
);
void
prof_boot0
(
void
);
void
prof_boot1
(
void
);
void
prof_boot1
(
void
);
bool
prof_boot2
(
tsd_t
*
tsd
);
bool
prof_boot2
(
tsd_t
*
tsd
,
base_t
*
base
);
void
prof_prefork0
(
tsdn_t
*
tsdn
);
void
prof_prefork0
(
tsdn_t
*
tsdn
);
void
prof_prefork1
(
tsdn_t
*
tsdn
);
void
prof_prefork1
(
tsdn_t
*
tsdn
);
void
prof_postfork_parent
(
tsdn_t
*
tsdn
);
void
prof_postfork_parent
(
tsdn_t
*
tsdn
);
void
prof_postfork_child
(
tsdn_t
*
tsdn
);
void
prof_postfork_child
(
tsdn_t
*
tsdn
);
void
prof_sample_threshold_update
(
prof_tdata_t
*
tdata
);
/* Only accessed by thread event. */
bool
prof_log_start
(
tsdn_t
*
tsdn
,
const
char
*
filename
);
uint64_t
prof_sample_new_event_wait
(
tsd_t
*
tsd
);
bool
prof_log_stop
(
tsdn_t
*
tsdn
);
uint64_t
prof_sample_postponed_event_wait
(
tsd_t
*
tsd
);
#ifdef JEMALLOC_JET
void
prof_sample_event_handler
(
tsd_t
*
tsd
,
uint64_t
elapsed
);
size_t
prof_log_bt_count
(
void
);
size_t
prof_log_alloc_count
(
void
);
size_t
prof_log_thr_count
(
void
);
bool
prof_log_is_logging
(
void
);
bool
prof_log_rep_check
(
void
);
void
prof_log_dummy_set
(
bool
new_value
);
#endif
#endif
/* JEMALLOC_INTERNAL_PROF_EXTERNS_H */
#endif
/* JEMALLOC_INTERNAL_PROF_EXTERNS_H */
deps/jemalloc/include/jemalloc/internal/prof_hook.h
0 → 100644
View file @
d4439bd4
#ifndef JEMALLOC_INTERNAL_PROF_HOOK_H
#define JEMALLOC_INTERNAL_PROF_HOOK_H
/*
* The hooks types of which are declared in this file are experimental and
* undocumented, thus the typedefs are located in an 'internal' header.
*/
/*
* A hook to mock out backtrace functionality. This can be handy, since it's
* otherwise difficult to guarantee that two allocations are reported as coming
* from the exact same stack trace in the presence of an optimizing compiler.
*/
typedef
void
(
*
prof_backtrace_hook_t
)(
void
**
,
unsigned
*
,
unsigned
);
/*
* A callback hook that notifies about recently dumped heap profile.
*/
typedef
void
(
*
prof_dump_hook_t
)(
const
char
*
filename
);
#endif
/* JEMALLOC_INTERNAL_PROF_HOOK_H */
deps/jemalloc/include/jemalloc/internal/prof_inlines
_b
.h
→
deps/jemalloc/include/jemalloc/internal/prof_inlines.h
View file @
d4439bd4
#ifndef JEMALLOC_INTERNAL_PROF_INLINES_
B_
H
#ifndef JEMALLOC_INTERNAL_PROF_INLINES_H
#define JEMALLOC_INTERNAL_PROF_INLINES_
B_
H
#define JEMALLOC_INTERNAL_PROF_INLINES_H
#include "jemalloc/internal/safety_check.h"
#include "jemalloc/internal/safety_check.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/thread_event.h"
JEMALLOC_ALWAYS_INLINE
void
prof_active_assert
()
{
cassert
(
config_prof
);
/*
* If opt_prof is off, then prof_active must always be off, regardless
* of whether prof_active_mtx is in effect or not.
*/
assert
(
opt_prof
||
!
prof_active_state
);
}
JEMALLOC_ALWAYS_INLINE
bool
prof_active_get_unlocked
(
void
)
{
prof_active_assert
();
/*
* Even if opt_prof is true, sampling can be temporarily disabled by
* setting prof_active to false. No locking is used when reading
* prof_active in the fast path, so there are no guarantees regarding
* how long it will take for all threads to notice state changes.
*/
return
prof_active_state
;
}
JEMALLOC_ALWAYS_INLINE
bool
JEMALLOC_ALWAYS_INLINE
bool
prof_gdump_get_unlocked
(
void
)
{
prof_gdump_get_unlocked
(
void
)
{
...
@@ -22,6 +45,7 @@ prof_tdata_get(tsd_t *tsd, bool create) {
...
@@ -22,6 +45,7 @@ prof_tdata_get(tsd_t *tsd, bool create) {
tdata
=
tsd_prof_tdata_get
(
tsd
);
tdata
=
tsd_prof_tdata_get
(
tsd
);
if
(
create
)
{
if
(
create
)
{
assert
(
tsd_reentrancy_level_get
(
tsd
)
==
0
);
if
(
unlikely
(
tdata
==
NULL
))
{
if
(
unlikely
(
tdata
==
NULL
))
{
if
(
tsd_nominal
(
tsd
))
{
if
(
tsd_nominal
(
tsd
))
{
tdata
=
prof_tdata_init
(
tsd
);
tdata
=
prof_tdata_init
(
tsd
);
...
@@ -37,158 +61,115 @@ prof_tdata_get(tsd_t *tsd, bool create) {
...
@@ -37,158 +61,115 @@ prof_tdata_get(tsd_t *tsd, bool create) {
return
tdata
;
return
tdata
;
}
}
JEMALLOC_ALWAYS_INLINE
prof_tctx_t
*
JEMALLOC_ALWAYS_INLINE
void
prof_tctx_get
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
alloc_ctx_t
*
alloc_ctx
)
{
prof_info_get
(
tsd_t
*
tsd
,
const
void
*
ptr
,
emap_alloc_ctx_t
*
alloc_ctx
,
prof_info_t
*
prof_info
)
{
cassert
(
config_prof
);
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
assert
(
ptr
!=
NULL
);
assert
(
prof_info
!=
NULL
);
return
arena_prof_
tctx
_get
(
tsd
n
,
ptr
,
alloc_ctx
);
arena_prof_
info
_get
(
tsd
,
ptr
,
alloc_ctx
,
prof_info
,
false
);
}
}
JEMALLOC_ALWAYS_INLINE
void
JEMALLOC_ALWAYS_INLINE
void
prof_
tctx_se
t
(
tsd
n
_t
*
tsd
n
,
const
void
*
ptr
,
size_t
usize
,
prof_
info_get_and_reset_recen
t
(
tsd_t
*
tsd
,
const
void
*
ptr
,
alloc_ctx_t
*
alloc_ctx
,
prof_
tctx_t
*
tctx
)
{
emap_
alloc_ctx_t
*
alloc_ctx
,
prof_
info_t
*
prof_info
)
{
cassert
(
config_prof
);
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
assert
(
ptr
!=
NULL
);
assert
(
prof_info
!=
NULL
);
arena_prof_
tctx_s
et
(
tsd
n
,
ptr
,
usize
,
alloc_ctx
,
tctx
);
arena_prof_
info_g
et
(
tsd
,
ptr
,
alloc_ctx
,
prof_info
,
true
);
}
}
JEMALLOC_ALWAYS_INLINE
void
JEMALLOC_ALWAYS_INLINE
void
prof_tctx_reset
(
tsd
n
_t
*
tsd
n
,
const
void
*
ptr
,
prof_t
ctx_t
*
t
ctx
)
{
prof_tctx_reset
(
tsd_t
*
tsd
,
const
void
*
ptr
,
emap_alloc_
ctx_t
*
alloc_
ctx
)
{
cassert
(
config_prof
);
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
assert
(
ptr
!=
NULL
);
arena_prof_tctx_reset
(
tsd
n
,
ptr
,
t
ctx
);
arena_prof_tctx_reset
(
tsd
,
ptr
,
alloc_
ctx
);
}
}
JEMALLOC_ALWAYS_INLINE
nstime_t
JEMALLOC_ALWAYS_INLINE
void
prof_
alloc_time_get
(
tsd
n
_t
*
tsd
n
,
const
void
*
ptr
,
alloc_ctx_t
*
alloc_ctx
)
{
prof_
tctx_reset_sampled
(
tsd_t
*
tsd
,
const
void
*
ptr
)
{
cassert
(
config_prof
);
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
assert
(
ptr
!=
NULL
);
return
arena_prof_
alloc_time_get
(
tsd
n
,
ptr
,
alloc_ctx
);
arena_prof_
tctx_reset_sampled
(
tsd
,
ptr
);
}
}
JEMALLOC_ALWAYS_INLINE
void
JEMALLOC_ALWAYS_INLINE
void
prof_alloc_time_set
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
alloc_ctx_t
*
alloc_ctx
,
prof_info_set
(
tsd_t
*
tsd
,
edata_t
*
edata
,
prof_tctx_t
*
tctx
,
size_t
size
)
{
nstime_t
t
)
{
cassert
(
config_prof
);
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
assert
(
edata
!=
NULL
);
assert
((
uintptr_t
)
tctx
>
(
uintptr_t
)
1U
);
arena_prof_alloc_time_set
(
tsdn
,
ptr
,
alloc_ctx
,
t
);
}
JEMALLOC_ALWAYS_INLINE
bool
prof_sample_check
(
tsd_t
*
tsd
,
size_t
usize
,
bool
update
)
{
ssize_t
check
=
update
?
0
:
usize
;
int64_t
bytes_until_sample
=
tsd_bytes_until_sample_get
(
tsd
);
if
(
update
)
{
bytes_until_sample
-=
usize
;
if
(
tsd_nominal
(
tsd
))
{
tsd_bytes_until_sample_set
(
tsd
,
bytes_until_sample
);
}
}
if
(
likely
(
bytes_until_sample
>=
check
))
{
return
true
;
}
re
turn
false
;
a
re
na_prof_info_set
(
tsd
,
edata
,
tctx
,
size
)
;
}
}
JEMALLOC_ALWAYS_INLINE
bool
JEMALLOC_ALWAYS_INLINE
bool
prof_sample_accum_update
(
tsd_t
*
tsd
,
size_t
usize
,
bool
update
,
prof_sample_should_skip
(
tsd_t
*
tsd
,
bool
sample_event
)
{
prof_tdata_t
**
tdata_out
)
{
prof_tdata_t
*
tdata
;
cassert
(
config_prof
);
cassert
(
config_prof
);
/* Fastpath: no need to load tdata */
/* Fastpath: no need to load tdata */
if
(
likely
(
prof_sample_check
(
tsd
,
usize
,
update
)))
{
if
(
likely
(
!
sample_event
))
{
return
true
;
}
bool
booted
=
tsd_prof_tdata_get
(
tsd
);
tdata
=
prof_tdata_get
(
tsd
,
true
);
if
(
unlikely
((
uintptr_t
)
tdata
<=
(
uintptr_t
)
PROF_TDATA_STATE_MAX
))
{
tdata
=
NULL
;
}
if
(
tdata_out
!=
NULL
)
{
*
tdata_out
=
tdata
;
}
if
(
unlikely
(
tdata
==
NULL
))
{
return
true
;
return
true
;
}
}
/*
/*
*
If th
is was
the first creation of tdata, then
*
sample_event
is
al
wa
y
s
obtained from the thread event module, and
*
prof_tdata_get() reset bytes_until_sample, so decrement and
*
whenever it's true, it means that the thread event module has
*
check it again
*
already checked the reentrancy level.
*/
*/
if
(
!
booted
&&
prof_sample_check
(
tsd
,
usize
,
update
))
{
assert
(
tsd_reentrancy_level_get
(
tsd
)
==
0
);
return
true
;
}
if
(
tsd_reentrancy_level_get
(
tsd
)
>
0
)
{
prof_tdata_t
*
tdata
=
prof_tdata_get
(
tsd
,
true
);
if
(
unlikely
(
tdata
==
NULL
))
{
return
true
;
return
true
;
}
}
/* Compute new sample threshold. */
if
(
update
)
{
prof_sample_threshold_update
(
tdata
);
}
return
!
tdata
->
active
;
return
!
tdata
->
active
;
}
}
JEMALLOC_ALWAYS_INLINE
prof_tctx_t
*
JEMALLOC_ALWAYS_INLINE
prof_tctx_t
*
prof_alloc_prep
(
tsd_t
*
tsd
,
size_t
usize
,
bool
prof_active
,
bool
update
)
{
prof_alloc_prep
(
tsd_t
*
tsd
,
bool
prof_active
,
bool
sample_event
)
{
prof_tctx_t
*
ret
;
prof_tctx_t
*
ret
;
prof_tdata_t
*
tdata
;
prof_bt_t
bt
;
assert
(
usize
==
sz_s2u
(
usize
));
if
(
!
prof_active
||
likely
(
prof_sample_accum_update
(
tsd
,
usize
,
update
,
if
(
!
prof_active
||
&
tdata
)))
{
likely
(
prof_sample_should_skip
(
tsd
,
sample_event
)))
{
ret
=
(
prof_tctx_t
*
)(
uintptr_t
)
1U
;
ret
=
(
prof_tctx_t
*
)(
uintptr_t
)
1U
;
}
else
{
}
else
{
bt_init
(
&
bt
,
tdata
->
vec
);
ret
=
prof_tctx_create
(
tsd
);
prof_backtrace
(
&
bt
);
ret
=
prof_lookup
(
tsd
,
&
bt
);
}
}
return
ret
;
return
ret
;
}
}
JEMALLOC_ALWAYS_INLINE
void
JEMALLOC_ALWAYS_INLINE
void
prof_malloc
(
tsd
n
_t
*
tsd
n
,
const
void
*
ptr
,
size_t
u
size
,
alloc_ctx_t
*
alloc_ctx
,
prof_malloc
(
tsd_t
*
tsd
,
const
void
*
ptr
,
size_t
size
,
size_t
usize
,
prof_tctx_t
*
tctx
)
{
emap_alloc_ctx_t
*
alloc_ctx
,
prof_tctx_t
*
tctx
)
{
cassert
(
config_prof
);
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
assert
(
ptr
!=
NULL
);
assert
(
usize
==
isalloc
(
tsd
n
,
ptr
));
assert
(
usize
==
isalloc
(
tsd
_tsdn
(
tsd
)
,
ptr
));
if
(
unlikely
((
uintptr_t
)
tctx
>
(
uintptr_t
)
1U
))
{
if
(
unlikely
((
uintptr_t
)
tctx
>
(
uintptr_t
)
1U
))
{
prof_malloc_sample_object
(
tsd
n
,
ptr
,
usize
,
tctx
);
prof_malloc_sample_object
(
tsd
,
ptr
,
size
,
usize
,
tctx
);
}
else
{
}
else
{
prof_tctx_set
(
tsdn
,
ptr
,
usize
,
alloc_ctx
,
prof_tctx_reset
(
tsd
,
ptr
,
alloc_ctx
);
(
prof_tctx_t
*
)(
uintptr_t
)
1U
);
}
}
}
}
JEMALLOC_ALWAYS_INLINE
void
JEMALLOC_ALWAYS_INLINE
void
prof_realloc
(
tsd_t
*
tsd
,
const
void
*
ptr
,
size_t
u
size
,
prof_tctx_t
*
tctx
,
prof_realloc
(
tsd_t
*
tsd
,
const
void
*
ptr
,
size_t
size
,
size_t
usize
,
bool
prof_active
,
bool
updated
,
const
void
*
old_ptr
,
size_t
old_usize
,
prof_tctx_t
*
tctx
,
bool
prof_active
,
const
void
*
old_ptr
,
size_t
old_usize
,
prof_
tctx
_t
*
old_
tctx
)
{
prof_
info
_t
*
old_
prof_info
,
bool
sample_event
)
{
bool
sampled
,
old_sampled
,
moved
;
bool
sampled
,
old_sampled
,
moved
;
cassert
(
config_prof
);
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
||
(
uintptr_t
)
tctx
<=
(
uintptr_t
)
1U
);
assert
(
ptr
!=
NULL
||
(
uintptr_t
)
tctx
<=
(
uintptr_t
)
1U
);
if
(
prof_active
&&
!
updated
&&
ptr
!=
NULL
)
{
if
(
prof_active
&&
ptr
!=
NULL
)
{
assert
(
usize
==
isalloc
(
tsd_tsdn
(
tsd
),
ptr
));
assert
(
usize
==
isalloc
(
tsd_tsdn
(
tsd
),
ptr
));
if
(
prof_sample_
accum_update
(
tsd
,
usize
,
true
,
NULL
))
{
if
(
prof_sample_
should_skip
(
tsd
,
sample_event
))
{
/*
/*
* Don't sample. The usize passed to prof_alloc_prep()
* Don't sample. The usize passed to prof_alloc_prep()
* was larger than what actually got allocated, so a
* was larger than what actually got allocated, so a
...
@@ -196,31 +177,31 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
...
@@ -196,31 +177,31 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
* though its actual usize was insufficient to cross the
* though its actual usize was insufficient to cross the
* sample threshold.
* sample threshold.
*/
*/
prof_alloc_rollback
(
tsd
,
tctx
,
true
);
prof_alloc_rollback
(
tsd
,
tctx
);
tctx
=
(
prof_tctx_t
*
)(
uintptr_t
)
1U
;
tctx
=
(
prof_tctx_t
*
)(
uintptr_t
)
1U
;
}
}
}
}
sampled
=
((
uintptr_t
)
tctx
>
(
uintptr_t
)
1U
);
sampled
=
((
uintptr_t
)
tctx
>
(
uintptr_t
)
1U
);
old_sampled
=
((
uintptr_t
)
old_tctx
>
(
uintptr_t
)
1U
);
old_sampled
=
((
uintptr_t
)
old_
prof_info
->
alloc_
tctx
>
(
uintptr_t
)
1U
);
moved
=
(
ptr
!=
old_ptr
);
moved
=
(
ptr
!=
old_ptr
);
if
(
unlikely
(
sampled
))
{
if
(
unlikely
(
sampled
))
{
prof_malloc_sample_object
(
tsd
_tsdn
(
tsd
),
ptr
,
usize
,
tctx
);
prof_malloc_sample_object
(
tsd
,
ptr
,
size
,
usize
,
tctx
);
}
else
if
(
moved
)
{
}
else
if
(
moved
)
{
prof_tctx_set
(
tsd_tsdn
(
tsd
),
ptr
,
usize
,
NULL
,
prof_tctx_reset
(
tsd
,
ptr
,
NULL
);
(
prof_tctx_t
*
)(
uintptr_t
)
1U
);
}
else
if
(
unlikely
(
old_sampled
))
{
}
else
if
(
unlikely
(
old_sampled
))
{
/*
/*
* prof_tctx_set() would work for the !moved case as well,
but
* prof_tctx_
re
set() would work for the !moved case as well,
* prof_tctx_reset() is slightly cheaper, and the
proper thing
*
but
prof_tctx_reset
_sampled
() is slightly cheaper, and the
* to do here in the presence of explicit
knowledge re: moved
*
proper thing
to do here in the presence of explicit
* state.
*
knowledge re: moved
state.
*/
*/
prof_tctx_reset
(
tsd_tsdn
(
tsd
)
,
ptr
,
tctx
);
prof_tctx_reset
_sampled
(
tsd
,
ptr
);
}
else
{
}
else
{
assert
((
uintptr_t
)
prof_tctx_get
(
tsd_tsdn
(
tsd
),
ptr
,
NULL
)
==
prof_info_t
prof_info
;
(
uintptr_t
)
1U
);
prof_info_get
(
tsd
,
ptr
,
NULL
,
&
prof_info
);
assert
((
uintptr_t
)
prof_info
.
alloc_tctx
==
(
uintptr_t
)
1U
);
}
}
/*
/*
...
@@ -231,20 +212,50 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
...
@@ -231,20 +212,50 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
* counters.
* counters.
*/
*/
if
(
unlikely
(
old_sampled
))
{
if
(
unlikely
(
old_sampled
))
{
prof_free_sampled_object
(
tsd
,
ptr
,
old_usize
,
old_
tctx
);
prof_free_sampled_object
(
tsd
,
old_usize
,
old_
prof_info
);
}
}
}
}
JEMALLOC_ALWAYS_INLINE
size_t
prof_sample_align
(
size_t
orig_align
)
{
/*
* Enforce page alignment, so that sampled allocations can be identified
* w/o metadata lookup.
*/
assert
(
opt_prof
);
return
(
opt_cache_oblivious
&&
orig_align
<
PAGE
)
?
PAGE
:
orig_align
;
}
JEMALLOC_ALWAYS_INLINE
bool
prof_sample_aligned
(
const
void
*
ptr
)
{
return
((
uintptr_t
)
ptr
&
PAGE_MASK
)
==
0
;
}
JEMALLOC_ALWAYS_INLINE
bool
prof_sampled
(
tsd_t
*
tsd
,
const
void
*
ptr
)
{
prof_info_t
prof_info
;
prof_info_get
(
tsd
,
ptr
,
NULL
,
&
prof_info
);
bool
sampled
=
(
uintptr_t
)
prof_info
.
alloc_tctx
>
(
uintptr_t
)
1U
;
if
(
sampled
)
{
assert
(
prof_sample_aligned
(
ptr
));
}
return
sampled
;
}
JEMALLOC_ALWAYS_INLINE
void
JEMALLOC_ALWAYS_INLINE
void
prof_free
(
tsd_t
*
tsd
,
const
void
*
ptr
,
size_t
usize
,
alloc_ctx_t
*
alloc_ctx
)
{
prof_free
(
tsd_t
*
tsd
,
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
=
prof_tctx_get
(
tsd_tsdn
(
tsd
),
ptr
,
alloc_ctx
);
emap_alloc_ctx_t
*
alloc_ctx
)
{
prof_info_t
prof_info
;
prof_info_get_and_reset_recent
(
tsd
,
ptr
,
alloc_ctx
,
&
prof_info
);
cassert
(
config_prof
);
cassert
(
config_prof
);
assert
(
usize
==
isalloc
(
tsd_tsdn
(
tsd
),
ptr
));
assert
(
usize
==
isalloc
(
tsd_tsdn
(
tsd
),
ptr
));
if
(
unlikely
((
uintptr_t
)
tctx
>
(
uintptr_t
)
1U
))
{
if
(
unlikely
((
uintptr_t
)
prof_info
.
alloc_tctx
>
(
uintptr_t
)
1U
))
{
prof_free_sampled_object
(
tsd
,
ptr
,
usize
,
tctx
);
assert
(
prof_sample_aligned
(
ptr
));
prof_free_sampled_object
(
tsd
,
usize
,
&
prof_info
);
}
}
}
}
#endif
/* JEMALLOC_INTERNAL_PROF_INLINES_
B_
H */
#endif
/* JEMALLOC_INTERNAL_PROF_INLINES_H */
Prev
1
2
3
4
5
6
7
8
9
10
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment