Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
b8beda3c
Commit
b8beda3c
authored
May 01, 2023
by
Oran Agra
Browse files
Merge commit jemalloc 5.3.0
parents
d659c734
6d23d3ac
Changes
195
Hide whitespace changes
Inline
Side-by-side
Too many changes to show.
To preserve performance only
195 of 195+
files are displayed.
Plain diff
Email patch
deps/jemalloc/include/jemalloc/internal/mutex_pool.h
deleted
100644 → 0
View file @
d659c734
#ifndef JEMALLOC_INTERNAL_MUTEX_POOL_H
#define JEMALLOC_INTERNAL_MUTEX_POOL_H
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/witness.h"
/* We do mod reductions by this value, so it should be kept a power of 2. */
#define MUTEX_POOL_SIZE 256
typedef
struct
mutex_pool_s
mutex_pool_t
;
struct
mutex_pool_s
{
malloc_mutex_t
mutexes
[
MUTEX_POOL_SIZE
];
};
bool
mutex_pool_init
(
mutex_pool_t
*
pool
,
const
char
*
name
,
witness_rank_t
rank
);
/* Internal helper - not meant to be called outside this module. */
static
inline
malloc_mutex_t
*
mutex_pool_mutex
(
mutex_pool_t
*
pool
,
uintptr_t
key
)
{
size_t
hash_result
[
2
];
hash
(
&
key
,
sizeof
(
key
),
0xd50dcc1b
,
hash_result
);
return
&
pool
->
mutexes
[
hash_result
[
0
]
%
MUTEX_POOL_SIZE
];
}
static
inline
void
mutex_pool_assert_not_held
(
tsdn_t
*
tsdn
,
mutex_pool_t
*
pool
)
{
for
(
int
i
=
0
;
i
<
MUTEX_POOL_SIZE
;
i
++
)
{
malloc_mutex_assert_not_owner
(
tsdn
,
&
pool
->
mutexes
[
i
]);
}
}
/*
* Note that a mutex pool doesn't work exactly the way an embdedded mutex would.
* You're not allowed to acquire mutexes in the pool one at a time. You have to
* acquire all the mutexes you'll need in a single function call, and then
* release them all in a single function call.
*/
static
inline
void
mutex_pool_lock
(
tsdn_t
*
tsdn
,
mutex_pool_t
*
pool
,
uintptr_t
key
)
{
mutex_pool_assert_not_held
(
tsdn
,
pool
);
malloc_mutex_t
*
mutex
=
mutex_pool_mutex
(
pool
,
key
);
malloc_mutex_lock
(
tsdn
,
mutex
);
}
static
inline
void
mutex_pool_unlock
(
tsdn_t
*
tsdn
,
mutex_pool_t
*
pool
,
uintptr_t
key
)
{
malloc_mutex_t
*
mutex
=
mutex_pool_mutex
(
pool
,
key
);
malloc_mutex_unlock
(
tsdn
,
mutex
);
mutex_pool_assert_not_held
(
tsdn
,
pool
);
}
static
inline
void
mutex_pool_lock2
(
tsdn_t
*
tsdn
,
mutex_pool_t
*
pool
,
uintptr_t
key1
,
uintptr_t
key2
)
{
mutex_pool_assert_not_held
(
tsdn
,
pool
);
malloc_mutex_t
*
mutex1
=
mutex_pool_mutex
(
pool
,
key1
);
malloc_mutex_t
*
mutex2
=
mutex_pool_mutex
(
pool
,
key2
);
if
((
uintptr_t
)
mutex1
<
(
uintptr_t
)
mutex2
)
{
malloc_mutex_lock
(
tsdn
,
mutex1
);
malloc_mutex_lock
(
tsdn
,
mutex2
);
}
else
if
((
uintptr_t
)
mutex1
==
(
uintptr_t
)
mutex2
)
{
malloc_mutex_lock
(
tsdn
,
mutex1
);
}
else
{
malloc_mutex_lock
(
tsdn
,
mutex2
);
malloc_mutex_lock
(
tsdn
,
mutex1
);
}
}
static
inline
void
mutex_pool_unlock2
(
tsdn_t
*
tsdn
,
mutex_pool_t
*
pool
,
uintptr_t
key1
,
uintptr_t
key2
)
{
malloc_mutex_t
*
mutex1
=
mutex_pool_mutex
(
pool
,
key1
);
malloc_mutex_t
*
mutex2
=
mutex_pool_mutex
(
pool
,
key2
);
if
(
mutex1
==
mutex2
)
{
malloc_mutex_unlock
(
tsdn
,
mutex1
);
}
else
{
malloc_mutex_unlock
(
tsdn
,
mutex1
);
malloc_mutex_unlock
(
tsdn
,
mutex2
);
}
mutex_pool_assert_not_held
(
tsdn
,
pool
);
}
static
inline
void
mutex_pool_assert_owner
(
tsdn_t
*
tsdn
,
mutex_pool_t
*
pool
,
uintptr_t
key
)
{
malloc_mutex_assert_owner
(
tsdn
,
mutex_pool_mutex
(
pool
,
key
));
}
#endif
/* JEMALLOC_INTERNAL_MUTEX_POOL_H */
deps/jemalloc/include/jemalloc/internal/mutex_prof.h
View file @
b8beda3c
...
@@ -7,8 +7,14 @@
...
@@ -7,8 +7,14 @@
#define MUTEX_PROF_GLOBAL_MUTEXES \
#define MUTEX_PROF_GLOBAL_MUTEXES \
OP(background_thread) \
OP(background_thread) \
OP(max_per_bg_thd) \
OP(ctl) \
OP(ctl) \
OP(prof)
OP(prof) \
OP(prof_thds_data) \
OP(prof_dump) \
OP(prof_recent_alloc) \
OP(prof_recent_dump) \
OP(prof_stats)
typedef
enum
{
typedef
enum
{
#define OP(mtx) global_prof_mutex_##mtx,
#define OP(mtx) global_prof_mutex_##mtx,
...
@@ -26,7 +32,10 @@ typedef enum {
...
@@ -26,7 +32,10 @@ typedef enum {
OP(decay_dirty) \
OP(decay_dirty) \
OP(decay_muzzy) \
OP(decay_muzzy) \
OP(base) \
OP(base) \
OP(tcache_list)
OP(tcache_list) \
OP(hpa_shard) \
OP(hpa_shard_grow) \
OP(hpa_sec)
typedef
enum
{
typedef
enum
{
#define OP(mtx) arena_prof_mutex_##mtx,
#define OP(mtx) arena_prof_mutex_##mtx,
...
...
deps/jemalloc/include/jemalloc/internal/nstime.h
View file @
b8beda3c
...
@@ -3,12 +3,23 @@
...
@@ -3,12 +3,23 @@
/* Maximum supported number of seconds (~584 years). */
/* Maximum supported number of seconds (~584 years). */
#define NSTIME_SEC_MAX KQU(18446744072)
#define NSTIME_SEC_MAX KQU(18446744072)
#define NSTIME_ZERO_INITIALIZER {0}
#define NSTIME_MAGIC ((uint32_t)0xb8a9ce37)
#ifdef JEMALLOC_DEBUG
# define NSTIME_ZERO_INITIALIZER {0, NSTIME_MAGIC}
#else
# define NSTIME_ZERO_INITIALIZER {0}
#endif
typedef
struct
{
typedef
struct
{
uint64_t
ns
;
uint64_t
ns
;
#ifdef JEMALLOC_DEBUG
uint32_t
magic
;
/* Tracks if initialized. */
#endif
}
nstime_t
;
}
nstime_t
;
static
const
nstime_t
nstime_zero
=
NSTIME_ZERO_INITIALIZER
;
void
nstime_init
(
nstime_t
*
time
,
uint64_t
ns
);
void
nstime_init
(
nstime_t
*
time
,
uint64_t
ns
);
void
nstime_init2
(
nstime_t
*
time
,
uint64_t
sec
,
uint64_t
nsec
);
void
nstime_init2
(
nstime_t
*
time
,
uint64_t
sec
,
uint64_t
nsec
);
uint64_t
nstime_ns
(
const
nstime_t
*
time
);
uint64_t
nstime_ns
(
const
nstime_t
*
time
);
...
@@ -24,11 +35,39 @@ void nstime_isubtract(nstime_t *time, uint64_t subtrahend);
...
@@ -24,11 +35,39 @@ void nstime_isubtract(nstime_t *time, uint64_t subtrahend);
void
nstime_imultiply
(
nstime_t
*
time
,
uint64_t
multiplier
);
void
nstime_imultiply
(
nstime_t
*
time
,
uint64_t
multiplier
);
void
nstime_idivide
(
nstime_t
*
time
,
uint64_t
divisor
);
void
nstime_idivide
(
nstime_t
*
time
,
uint64_t
divisor
);
uint64_t
nstime_divide
(
const
nstime_t
*
time
,
const
nstime_t
*
divisor
);
uint64_t
nstime_divide
(
const
nstime_t
*
time
,
const
nstime_t
*
divisor
);
uint64_t
nstime_ns_since
(
const
nstime_t
*
past
);
typedef
bool
(
nstime_monotonic_t
)(
void
);
typedef
bool
(
nstime_monotonic_t
)(
void
);
extern
nstime_monotonic_t
*
JET_MUTABLE
nstime_monotonic
;
extern
nstime_monotonic_t
*
JET_MUTABLE
nstime_monotonic
;
typedef
bool
(
nstime_update_t
)(
nstime_t
*
);
typedef
void
(
nstime_update_t
)(
nstime_t
*
);
extern
nstime_update_t
*
JET_MUTABLE
nstime_update
;
extern
nstime_update_t
*
JET_MUTABLE
nstime_update
;
typedef
void
(
nstime_prof_update_t
)(
nstime_t
*
);
extern
nstime_prof_update_t
*
JET_MUTABLE
nstime_prof_update
;
void
nstime_init_update
(
nstime_t
*
time
);
void
nstime_prof_init_update
(
nstime_t
*
time
);
enum
prof_time_res_e
{
prof_time_res_default
=
0
,
prof_time_res_high
=
1
};
typedef
enum
prof_time_res_e
prof_time_res_t
;
extern
prof_time_res_t
opt_prof_time_res
;
extern
const
char
*
prof_time_res_mode_names
[];
JEMALLOC_ALWAYS_INLINE
void
nstime_init_zero
(
nstime_t
*
time
)
{
nstime_copy
(
time
,
&
nstime_zero
);
}
JEMALLOC_ALWAYS_INLINE
bool
nstime_equals_zero
(
nstime_t
*
time
)
{
int
diff
=
nstime_compare
(
time
,
&
nstime_zero
);
assert
(
diff
>=
0
);
return
diff
==
0
;
}
#endif
/* JEMALLOC_INTERNAL_NSTIME_H */
#endif
/* JEMALLOC_INTERNAL_NSTIME_H */
deps/jemalloc/include/jemalloc/internal/pa.h
0 → 100644
View file @
b8beda3c
#ifndef JEMALLOC_INTERNAL_PA_H
#define JEMALLOC_INTERNAL_PA_H
#include "jemalloc/internal/base.h"
#include "jemalloc/internal/decay.h"
#include "jemalloc/internal/ecache.h"
#include "jemalloc/internal/edata_cache.h"
#include "jemalloc/internal/emap.h"
#include "jemalloc/internal/hpa.h"
#include "jemalloc/internal/lockedint.h"
#include "jemalloc/internal/pac.h"
#include "jemalloc/internal/pai.h"
#include "jemalloc/internal/sec.h"
/*
* The page allocator; responsible for acquiring pages of memory for
* allocations. It picks the implementation of the page allocator interface
* (i.e. a pai_t) to handle a given page-level allocation request. For now, the
* only such implementation is the PAC code ("page allocator classic"), but
* others will be coming soon.
*/
typedef
struct
pa_central_s
pa_central_t
;
struct
pa_central_s
{
hpa_central_t
hpa
;
};
/*
* The stats for a particular pa_shard. Because of the way the ctl module
* handles stats epoch data collection (it has its own arena_stats, and merges
* the stats from each arena into it), this needs to live in the arena_stats_t;
* hence we define it here and let the pa_shard have a pointer (rather than the
* more natural approach of just embedding it in the pa_shard itself).
*
* We follow the arena_stats_t approach of marking the derived fields. These
* are the ones that are not maintained on their own; instead, their values are
* derived during those stats merges.
*/
typedef
struct
pa_shard_stats_s
pa_shard_stats_t
;
struct
pa_shard_stats_s
{
/* Number of edata_t structs allocated by base, but not being used. */
size_t
edata_avail
;
/* Derived. */
/*
* Stats specific to the PAC. For now, these are the only stats that
* exist, but there will eventually be other page allocators. Things
* like edata_avail make sense in a cross-PA sense, but things like
* npurges don't.
*/
pac_stats_t
pac_stats
;
};
/*
* The local allocator handle. Keeps the state necessary to satisfy page-sized
* allocations.
*
* The contents are mostly internal to the PA module. The key exception is that
* arena decay code is allowed to grab pointers to the dirty and muzzy ecaches
* decay_ts, for a couple of queries, passing them back to a PA function, or
* acquiring decay.mtx and looking at decay.purging. The reasoning is that,
* while PA decides what and how to purge, the arena code decides when and where
* (e.g. on what thread). It's allowed to use the presence of another purger to
* decide.
* (The background thread code also touches some other decay internals, but
* that's not fundamental; its' just an artifact of a partial refactoring, and
* its accesses could be straightforwardly moved inside the decay module).
*/
typedef
struct
pa_shard_s
pa_shard_t
;
struct
pa_shard_s
{
/* The central PA this shard is associated with. */
pa_central_t
*
central
;
/*
* Number of pages in active extents.
*
* Synchronization: atomic.
*/
atomic_zu_t
nactive
;
/*
* Whether or not we should prefer the hugepage allocator. Atomic since
* it may be concurrently modified by a thread setting extent hooks.
* Note that we still may do HPA operations in this arena; if use_hpa is
* changed from true to false, we'll free back to the hugepage allocator
* for those allocations.
*/
atomic_b_t
use_hpa
;
/*
* If we never used the HPA to begin with, it wasn't initialized, and so
* we shouldn't try to e.g. acquire its mutexes during fork. This
* tracks that knowledge.
*/
bool
ever_used_hpa
;
/* Allocates from a PAC. */
pac_t
pac
;
/*
* We place a small extent cache in front of the HPA, since we intend
* these configurations to use many fewer arenas, and therefore have a
* higher risk of hot locks.
*/
sec_t
hpa_sec
;
hpa_shard_t
hpa_shard
;
/* The source of edata_t objects. */
edata_cache_t
edata_cache
;
unsigned
ind
;
malloc_mutex_t
*
stats_mtx
;
pa_shard_stats_t
*
stats
;
/* The emap this shard is tied to. */
emap_t
*
emap
;
/* The base from which we get the ehooks and allocate metadat. */
base_t
*
base
;
};
static
inline
bool
pa_shard_dont_decay_muzzy
(
pa_shard_t
*
shard
)
{
return
ecache_npages_get
(
&
shard
->
pac
.
ecache_muzzy
)
==
0
&&
pac_decay_ms_get
(
&
shard
->
pac
,
extent_state_muzzy
)
<=
0
;
}
static
inline
ehooks_t
*
pa_shard_ehooks_get
(
pa_shard_t
*
shard
)
{
return
base_ehooks_get
(
shard
->
base
);
}
/* Returns true on error. */
bool
pa_central_init
(
pa_central_t
*
central
,
base_t
*
base
,
bool
hpa
,
hpa_hooks_t
*
hpa_hooks
);
/* Returns true on error. */
bool
pa_shard_init
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
,
pa_central_t
*
central
,
emap_t
*
emap
,
base_t
*
base
,
unsigned
ind
,
pa_shard_stats_t
*
stats
,
malloc_mutex_t
*
stats_mtx
,
nstime_t
*
cur_time
,
size_t
oversize_threshold
,
ssize_t
dirty_decay_ms
,
ssize_t
muzzy_decay_ms
);
/*
* This isn't exposed to users; we allow late enablement of the HPA shard so
* that we can boot without worrying about the HPA, then turn it on in a0.
*/
bool
pa_shard_enable_hpa
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
,
const
hpa_shard_opts_t
*
hpa_opts
,
const
sec_opts_t
*
hpa_sec_opts
);
/*
* We stop using the HPA when custom extent hooks are installed, but still
* redirect deallocations to it.
*/
void
pa_shard_disable_hpa
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
);
/*
* This does the PA-specific parts of arena reset (i.e. freeing all active
* allocations).
*/
void
pa_shard_reset
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
);
/*
* Destroy all the remaining retained extents. Should only be called after
* decaying all active, dirty, and muzzy extents to the retained state, as the
* last step in destroying the shard.
*/
void
pa_shard_destroy
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
);
/* Gets an edata for the given allocation. */
edata_t
*
pa_alloc
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
,
size_t
size
,
size_t
alignment
,
bool
slab
,
szind_t
szind
,
bool
zero
,
bool
guarded
,
bool
*
deferred_work_generated
);
/* Returns true on error, in which case nothing changed. */
bool
pa_expand
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
,
edata_t
*
edata
,
size_t
old_size
,
size_t
new_size
,
szind_t
szind
,
bool
zero
,
bool
*
deferred_work_generated
);
/*
* The same. Sets *generated_dirty to true if we produced new dirty pages, and
* false otherwise.
*/
bool
pa_shrink
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
,
edata_t
*
edata
,
size_t
old_size
,
size_t
new_size
,
szind_t
szind
,
bool
*
deferred_work_generated
);
/*
* Frees the given edata back to the pa. Sets *generated_dirty if we produced
* new dirty pages (well, we always set it for now; but this need not be the
* case).
* (We could make generated_dirty the return value of course, but this is more
* consistent with the shrink pathway and our error codes here).
*/
void
pa_dalloc
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
,
edata_t
*
edata
,
bool
*
deferred_work_generated
);
bool
pa_decay_ms_set
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
,
extent_state_t
state
,
ssize_t
decay_ms
,
pac_purge_eagerness_t
eagerness
);
ssize_t
pa_decay_ms_get
(
pa_shard_t
*
shard
,
extent_state_t
state
);
/*
* Do deferred work on this PA shard.
*
* Morally, this should do both PAC decay and the HPA deferred work. For now,
* though, the arena, background thread, and PAC modules are tightly interwoven
* in a way that's tricky to extricate, so we only do the HPA-specific parts.
*/
void
pa_shard_set_deferral_allowed
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
,
bool
deferral_allowed
);
void
pa_shard_do_deferred_work
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
);
void
pa_shard_try_deferred_work
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
);
uint64_t
pa_shard_time_until_deferred_work
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
);
/******************************************************************************/
/*
* Various bits of "boring" functionality that are still part of this module,
* but that we relegate to pa_extra.c, to keep the core logic in pa.c as
* readable as possible.
*/
/*
* These fork phases are synchronized with the arena fork phase numbering to
* make it easy to keep straight. That's why there's no prefork1.
*/
void
pa_shard_prefork0
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
);
void
pa_shard_prefork2
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
);
void
pa_shard_prefork3
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
);
void
pa_shard_prefork4
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
);
void
pa_shard_prefork5
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
);
void
pa_shard_postfork_parent
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
);
void
pa_shard_postfork_child
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
);
void
pa_shard_basic_stats_merge
(
pa_shard_t
*
shard
,
size_t
*
nactive
,
size_t
*
ndirty
,
size_t
*
nmuzzy
);
void
pa_shard_stats_merge
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
,
pa_shard_stats_t
*
pa_shard_stats_out
,
pac_estats_t
*
estats_out
,
hpa_shard_stats_t
*
hpa_stats_out
,
sec_stats_t
*
sec_stats_out
,
size_t
*
resident
);
/*
* Reads the PA-owned mutex stats into the output stats array, at the
* appropriate positions. Morally, these stats should really live in
* pa_shard_stats_t, but the indices are sort of baked into the various mutex
* prof macros. This would be a good thing to do at some point.
*/
void
pa_shard_mtx_stats_read
(
tsdn_t
*
tsdn
,
pa_shard_t
*
shard
,
mutex_prof_data_t
mutex_prof_data
[
mutex_prof_num_arena_mutexes
]);
#endif
/* JEMALLOC_INTERNAL_PA_H */
deps/jemalloc/include/jemalloc/internal/pac.h
0 → 100644
View file @
b8beda3c
#ifndef JEMALLOC_INTERNAL_PAC_H
#define JEMALLOC_INTERNAL_PAC_H
#include "jemalloc/internal/exp_grow.h"
#include "jemalloc/internal/pai.h"
#include "san_bump.h"
/*
* Page allocator classic; an implementation of the PAI interface that:
* - Can be used for arenas with custom extent hooks.
* - Can always satisfy any allocation request (including highly-fragmentary
* ones).
* - Can use efficient OS-level zeroing primitives for demand-filled pages.
*/
/* How "eager" decay/purging should be. */
enum
pac_purge_eagerness_e
{
PAC_PURGE_ALWAYS
,
PAC_PURGE_NEVER
,
PAC_PURGE_ON_EPOCH_ADVANCE
};
typedef
enum
pac_purge_eagerness_e
pac_purge_eagerness_t
;
typedef
struct
pac_decay_stats_s
pac_decay_stats_t
;
struct
pac_decay_stats_s
{
/* Total number of purge sweeps. */
locked_u64_t
npurge
;
/* Total number of madvise calls made. */
locked_u64_t
nmadvise
;
/* Total number of pages purged. */
locked_u64_t
purged
;
};
typedef
struct
pac_estats_s
pac_estats_t
;
struct
pac_estats_s
{
/*
* Stats for a given index in the range [0, SC_NPSIZES] in the various
* ecache_ts.
* We track both bytes and # of extents: two extents in the same bucket
* may have different sizes if adjacent size classes differ by more than
* a page, so bytes cannot always be derived from # of extents.
*/
size_t
ndirty
;
size_t
dirty_bytes
;
size_t
nmuzzy
;
size_t
muzzy_bytes
;
size_t
nretained
;
size_t
retained_bytes
;
};
typedef
struct
pac_stats_s
pac_stats_t
;
struct
pac_stats_s
{
pac_decay_stats_t
decay_dirty
;
pac_decay_stats_t
decay_muzzy
;
/*
* Number of unused virtual memory bytes currently retained. Retained
* bytes are technically mapped (though always decommitted or purged),
* but they are excluded from the mapped statistic (above).
*/
size_t
retained
;
/* Derived. */
/*
* Number of bytes currently mapped, excluding retained memory (and any
* base-allocated memory, which is tracked by the arena stats).
*
* We name this "pac_mapped" to avoid confusion with the arena_stats
* "mapped".
*/
atomic_zu_t
pac_mapped
;
/* VM space had to be leaked (undocumented). Normally 0. */
atomic_zu_t
abandoned_vm
;
};
typedef
struct
pac_s
pac_t
;
struct
pac_s
{
/*
* Must be the first member (we convert it to a PAC given only a
* pointer). The handle to the allocation interface.
*/
pai_t
pai
;
/*
* Collections of extents that were previously allocated. These are
* used when allocating extents, in an attempt to re-use address space.
*
* Synchronization: internal.
*/
ecache_t
ecache_dirty
;
ecache_t
ecache_muzzy
;
ecache_t
ecache_retained
;
base_t
*
base
;
emap_t
*
emap
;
edata_cache_t
*
edata_cache
;
/* The grow info for the retained ecache. */
exp_grow_t
exp_grow
;
malloc_mutex_t
grow_mtx
;
/* Special allocator for guarded frequently reused extents. */
san_bump_alloc_t
sba
;
/* How large extents should be before getting auto-purged. */
atomic_zu_t
oversize_threshold
;
/*
* Decay-based purging state, responsible for scheduling extent state
* transitions.
*
* Synchronization: via the internal mutex.
*/
decay_t
decay_dirty
;
/* dirty --> muzzy */
decay_t
decay_muzzy
;
/* muzzy --> retained */
malloc_mutex_t
*
stats_mtx
;
pac_stats_t
*
stats
;
/* Extent serial number generator state. */
atomic_zu_t
extent_sn_next
;
};
bool
pac_init
(
tsdn_t
*
tsdn
,
pac_t
*
pac
,
base_t
*
base
,
emap_t
*
emap
,
edata_cache_t
*
edata_cache
,
nstime_t
*
cur_time
,
size_t
oversize_threshold
,
ssize_t
dirty_decay_ms
,
ssize_t
muzzy_decay_ms
,
pac_stats_t
*
pac_stats
,
malloc_mutex_t
*
stats_mtx
);
static
inline
size_t
pac_mapped
(
pac_t
*
pac
)
{
return
atomic_load_zu
(
&
pac
->
stats
->
pac_mapped
,
ATOMIC_RELAXED
);
}
static
inline
ehooks_t
*
pac_ehooks_get
(
pac_t
*
pac
)
{
return
base_ehooks_get
(
pac
->
base
);
}
/*
* All purging functions require holding decay->mtx. This is one of the few
* places external modules are allowed to peek inside pa_shard_t internals.
*/
/*
* Decays the number of pages currently in the ecache. This might not leave the
* ecache empty if other threads are inserting dirty objects into it
* concurrently with the call.
*/
void
pac_decay_all
(
tsdn_t
*
tsdn
,
pac_t
*
pac
,
decay_t
*
decay
,
pac_decay_stats_t
*
decay_stats
,
ecache_t
*
ecache
,
bool
fully_decay
);
/*
* Updates decay settings for the current time, and conditionally purges in
* response (depending on decay_purge_setting). Returns whether or not the
* epoch advanced.
*/
bool
pac_maybe_decay_purge
(
tsdn_t
*
tsdn
,
pac_t
*
pac
,
decay_t
*
decay
,
pac_decay_stats_t
*
decay_stats
,
ecache_t
*
ecache
,
pac_purge_eagerness_t
eagerness
);
/*
* Gets / sets the maximum amount that we'll grow an arena down the
* grow-retained pathways (unless forced to by an allocaction request).
*
* Set new_limit to NULL if it's just a query, or old_limit to NULL if you don't
* care about the previous value.
*
* Returns true on error (if the new limit is not valid).
*/
bool
pac_retain_grow_limit_get_set
(
tsdn_t
*
tsdn
,
pac_t
*
pac
,
size_t
*
old_limit
,
size_t
*
new_limit
);
bool
pac_decay_ms_set
(
tsdn_t
*
tsdn
,
pac_t
*
pac
,
extent_state_t
state
,
ssize_t
decay_ms
,
pac_purge_eagerness_t
eagerness
);
ssize_t
pac_decay_ms_get
(
pac_t
*
pac
,
extent_state_t
state
);
void
pac_reset
(
tsdn_t
*
tsdn
,
pac_t
*
pac
);
void
pac_destroy
(
tsdn_t
*
tsdn
,
pac_t
*
pac
);
#endif
/* JEMALLOC_INTERNAL_PAC_H */
deps/jemalloc/include/jemalloc/internal/pages.h
View file @
b8beda3c
...
@@ -13,10 +13,27 @@
...
@@ -13,10 +13,27 @@
/* Return the smallest pagesize multiple that is >= s. */
/* Return the smallest pagesize multiple that is >= s. */
#define PAGE_CEILING(s) \
#define PAGE_CEILING(s) \
(((s) + PAGE_MASK) & ~PAGE_MASK)
(((s) + PAGE_MASK) & ~PAGE_MASK)
/* Return the largest pagesize multiple that is <=s. */
#define PAGE_FLOOR(s) \
((s) & ~PAGE_MASK)
/* Huge page size. LG_HUGEPAGE is determined by the configure script. */
/* Huge page size. LG_HUGEPAGE is determined by the configure script. */
#define HUGEPAGE ((size_t)(1U << LG_HUGEPAGE))
#define HUGEPAGE ((size_t)(1U << LG_HUGEPAGE))
#define HUGEPAGE_MASK ((size_t)(HUGEPAGE - 1))
#define HUGEPAGE_MASK ((size_t)(HUGEPAGE - 1))
#if LG_HUGEPAGE != 0
# define HUGEPAGE_PAGES (HUGEPAGE / PAGE)
#else
/*
* It's convenient to define arrays (or bitmaps) of HUGEPAGE_PAGES lengths. If
* we can't autodetect the hugepage size, it gets treated as 0, in which case
* we'll trigger a compiler error in those arrays. Avoid this case by ensuring
* that this value is at least 1. (We won't ever run in this degraded state;
* hpa_supported() returns false in this case.
*/
# define HUGEPAGE_PAGES 1
#endif
/* Return the huge page base address for the huge page containing address a. */
/* Return the huge page base address for the huge page containing address a. */
#define HUGEPAGE_ADDR2BASE(a) \
#define HUGEPAGE_ADDR2BASE(a) \
((void *)((uintptr_t)(a) & ~HUGEPAGE_MASK))
((void *)((uintptr_t)(a) & ~HUGEPAGE_MASK))
...
@@ -58,6 +75,18 @@ static const bool pages_can_purge_forced =
...
@@ -58,6 +75,18 @@ static const bool pages_can_purge_forced =
#endif
#endif
;
;
#if defined(JEMALLOC_HAVE_MADVISE_HUGE) || defined(JEMALLOC_HAVE_MEMCNTL)
# define PAGES_CAN_HUGIFY
#endif
static
const
bool
pages_can_hugify
=
#ifdef PAGES_CAN_HUGIFY
true
#else
false
#endif
;
typedef
enum
{
typedef
enum
{
thp_mode_default
=
0
,
/* Do not change hugepage settings. */
thp_mode_default
=
0
,
/* Do not change hugepage settings. */
thp_mode_always
=
1
,
/* Always set MADV_HUGEPAGE. */
thp_mode_always
=
1
,
/* Always set MADV_HUGEPAGE. */
...
@@ -84,5 +113,7 @@ bool pages_dontdump(void *addr, size_t size);
...
@@ -84,5 +113,7 @@ bool pages_dontdump(void *addr, size_t size);
bool
pages_dodump
(
void
*
addr
,
size_t
size
);
bool
pages_dodump
(
void
*
addr
,
size_t
size
);
bool
pages_boot
(
void
);
bool
pages_boot
(
void
);
void
pages_set_thp_state
(
void
*
ptr
,
size_t
size
);
void
pages_set_thp_state
(
void
*
ptr
,
size_t
size
);
void
pages_mark_guards
(
void
*
head
,
void
*
tail
);
void
pages_unmark_guards
(
void
*
head
,
void
*
tail
);
#endif
/* JEMALLOC_INTERNAL_PAGES_EXTERNS_H */
#endif
/* JEMALLOC_INTERNAL_PAGES_EXTERNS_H */
deps/jemalloc/include/jemalloc/internal/pai.h
0 → 100644
View file @
b8beda3c
#ifndef JEMALLOC_INTERNAL_PAI_H
#define JEMALLOC_INTERNAL_PAI_H
/* An interface for page allocation. */
typedef
struct
pai_s
pai_t
;
struct
pai_s
{
/* Returns NULL on failure. */
edata_t
*
(
*
alloc
)(
tsdn_t
*
tsdn
,
pai_t
*
self
,
size_t
size
,
size_t
alignment
,
bool
zero
,
bool
guarded
,
bool
frequent_reuse
,
bool
*
deferred_work_generated
);
/*
* Returns the number of extents added to the list (which may be fewer
* than requested, in case of OOM). The list should already be
* initialized. The only alignment guarantee is page-alignment, and
* the results are not necessarily zeroed.
*/
size_t
(
*
alloc_batch
)(
tsdn_t
*
tsdn
,
pai_t
*
self
,
size_t
size
,
size_t
nallocs
,
edata_list_active_t
*
results
,
bool
*
deferred_work_generated
);
bool
(
*
expand
)(
tsdn_t
*
tsdn
,
pai_t
*
self
,
edata_t
*
edata
,
size_t
old_size
,
size_t
new_size
,
bool
zero
,
bool
*
deferred_work_generated
);
bool
(
*
shrink
)(
tsdn_t
*
tsdn
,
pai_t
*
self
,
edata_t
*
edata
,
size_t
old_size
,
size_t
new_size
,
bool
*
deferred_work_generated
);
void
(
*
dalloc
)(
tsdn_t
*
tsdn
,
pai_t
*
self
,
edata_t
*
edata
,
bool
*
deferred_work_generated
);
/* This function empties out list as a side-effect of being called. */
void
(
*
dalloc_batch
)(
tsdn_t
*
tsdn
,
pai_t
*
self
,
edata_list_active_t
*
list
,
bool
*
deferred_work_generated
);
uint64_t
(
*
time_until_deferred_work
)(
tsdn_t
*
tsdn
,
pai_t
*
self
);
};
/*
* These are just simple convenience functions to avoid having to reference the
* same pai_t twice on every invocation.
*/
static
inline
edata_t
*
pai_alloc
(
tsdn_t
*
tsdn
,
pai_t
*
self
,
size_t
size
,
size_t
alignment
,
bool
zero
,
bool
guarded
,
bool
frequent_reuse
,
bool
*
deferred_work_generated
)
{
return
self
->
alloc
(
tsdn
,
self
,
size
,
alignment
,
zero
,
guarded
,
frequent_reuse
,
deferred_work_generated
);
}
static
inline
size_t
pai_alloc_batch
(
tsdn_t
*
tsdn
,
pai_t
*
self
,
size_t
size
,
size_t
nallocs
,
edata_list_active_t
*
results
,
bool
*
deferred_work_generated
)
{
return
self
->
alloc_batch
(
tsdn
,
self
,
size
,
nallocs
,
results
,
deferred_work_generated
);
}
static
inline
bool
pai_expand
(
tsdn_t
*
tsdn
,
pai_t
*
self
,
edata_t
*
edata
,
size_t
old_size
,
size_t
new_size
,
bool
zero
,
bool
*
deferred_work_generated
)
{
return
self
->
expand
(
tsdn
,
self
,
edata
,
old_size
,
new_size
,
zero
,
deferred_work_generated
);
}
static
inline
bool
pai_shrink
(
tsdn_t
*
tsdn
,
pai_t
*
self
,
edata_t
*
edata
,
size_t
old_size
,
size_t
new_size
,
bool
*
deferred_work_generated
)
{
return
self
->
shrink
(
tsdn
,
self
,
edata
,
old_size
,
new_size
,
deferred_work_generated
);
}
static
inline
void
pai_dalloc
(
tsdn_t
*
tsdn
,
pai_t
*
self
,
edata_t
*
edata
,
bool
*
deferred_work_generated
)
{
self
->
dalloc
(
tsdn
,
self
,
edata
,
deferred_work_generated
);
}
static
inline
void
pai_dalloc_batch
(
tsdn_t
*
tsdn
,
pai_t
*
self
,
edata_list_active_t
*
list
,
bool
*
deferred_work_generated
)
{
self
->
dalloc_batch
(
tsdn
,
self
,
list
,
deferred_work_generated
);
}
static
inline
uint64_t
pai_time_until_deferred_work
(
tsdn_t
*
tsdn
,
pai_t
*
self
)
{
return
self
->
time_until_deferred_work
(
tsdn
,
self
);
}
/*
* An implementation of batch allocation that simply calls alloc once for
* each item in the list.
*/
size_t
pai_alloc_batch_default
(
tsdn_t
*
tsdn
,
pai_t
*
self
,
size_t
size
,
size_t
nallocs
,
edata_list_active_t
*
results
,
bool
*
deferred_work_generated
);
/* Ditto, for dalloc. */
void
pai_dalloc_batch_default
(
tsdn_t
*
tsdn
,
pai_t
*
self
,
edata_list_active_t
*
list
,
bool
*
deferred_work_generated
);
#endif
/* JEMALLOC_INTERNAL_PAI_H */
deps/jemalloc/include/jemalloc/internal/peak.h
0 → 100644
View file @
b8beda3c
#ifndef JEMALLOC_INTERNAL_PEAK_H
#define JEMALLOC_INTERNAL_PEAK_H
typedef
struct
peak_s
peak_t
;
struct
peak_s
{
/* The highest recorded peak value, after adjustment (see below). */
uint64_t
cur_max
;
/*
* The difference between alloc and dalloc at the last set_zero call;
* this lets us cancel out the appropriate amount of excess.
*/
uint64_t
adjustment
;
};
#define PEAK_INITIALIZER {0, 0}
static
inline
uint64_t
peak_max
(
peak_t
*
peak
)
{
return
peak
->
cur_max
;
}
static
inline
void
peak_update
(
peak_t
*
peak
,
uint64_t
alloc
,
uint64_t
dalloc
)
{
int64_t
candidate_max
=
(
int64_t
)(
alloc
-
dalloc
-
peak
->
adjustment
);
if
(
candidate_max
>
(
int64_t
)
peak
->
cur_max
)
{
peak
->
cur_max
=
candidate_max
;
}
}
/* Resets the counter to zero; all peaks are now relative to this point. */
static
inline
void
peak_set_zero
(
peak_t
*
peak
,
uint64_t
alloc
,
uint64_t
dalloc
)
{
peak
->
cur_max
=
0
;
peak
->
adjustment
=
alloc
-
dalloc
;
}
#endif
/* JEMALLOC_INTERNAL_PEAK_H */
deps/jemalloc/include/jemalloc/internal/peak_event.h
0 → 100644
View file @
b8beda3c
#ifndef JEMALLOC_INTERNAL_PEAK_EVENT_H
#define JEMALLOC_INTERNAL_PEAK_EVENT_H
/*
* While peak.h contains the simple helper struct that tracks state, this
* contains the allocator tie-ins (and knows about tsd, the event module, etc.).
*/
/* Update the peak with current tsd state. */
void
peak_event_update
(
tsd_t
*
tsd
);
/* Set current state to zero. */
void
peak_event_zero
(
tsd_t
*
tsd
);
uint64_t
peak_event_max
(
tsd_t
*
tsd
);
/* Manual hooks. */
/* The activity-triggered hooks. */
uint64_t
peak_alloc_new_event_wait
(
tsd_t
*
tsd
);
uint64_t
peak_alloc_postponed_event_wait
(
tsd_t
*
tsd
);
void
peak_alloc_event_handler
(
tsd_t
*
tsd
,
uint64_t
elapsed
);
uint64_t
peak_dalloc_new_event_wait
(
tsd_t
*
tsd
);
uint64_t
peak_dalloc_postponed_event_wait
(
tsd_t
*
tsd
);
void
peak_dalloc_event_handler
(
tsd_t
*
tsd
,
uint64_t
elapsed
);
#endif
/* JEMALLOC_INTERNAL_PEAK_EVENT_H */
deps/jemalloc/include/jemalloc/internal/ph.h
View file @
b8beda3c
#ifndef JEMALLOC_INTERNAL_PH_H
#define JEMALLOC_INTERNAL_PH_H
/*
/*
* A Pairing Heap implementation.
* A Pairing Heap implementation.
*
*
...
@@ -10,382 +13,508 @@
...
@@ -10,382 +13,508 @@
* http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf
* http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf
*
*
*******************************************************************************
*******************************************************************************
*
* We include a non-obvious optimization:
* - First, we introduce a new pop-and-link operation; pop the two most
* recently-inserted items off the aux-list, link them, and push the resulting
* heap.
* - We maintain a count of the number of insertions since the last time we
* merged the aux-list (i.e. via first() or remove_first()). After N inserts,
* we do ffs(N) pop-and-link operations.
*
* One way to think of this is that we're progressively building up a tree in
* the aux-list, rather than a linked-list (think of the series of merges that
* will be performed as the aux-count grows).
*
* There's a couple reasons we benefit from this:
* - Ordinarily, after N insertions, the aux-list is of size N. With our
* strategy, it's of size O(log(N)). So we decrease the worst-case time of
* first() calls, and reduce the average cost of remove_min calls. Since
* these almost always occur while holding a lock, we practically reduce the
* frequency of unusually long hold times.
* - This moves the bulk of the work of merging the aux-list onto the threads
* that are inserting into the heap. In some common scenarios, insertions
* happen in bulk, from a single thread (think tcache flushing; we potentially
* move many slabs from slabs_full to slabs_nonfull). All the nodes in this
* case are in the inserting threads cache, and linking them is very cheap
* (cache misses dominate linking cost). Without this optimization, linking
* happens on the next call to remove_first. Since that remove_first call
* likely happens on a different thread (or at least, after the cache has
* gotten cold if done on the same thread), deferring linking trades cheap
* link operations now for expensive ones later.
*
* The ffs trick keeps amortized insert cost at constant time. Similar
* strategies based on periodically sorting the list after a batch of operations
* perform worse than this in practice, even with various fancy tricks; they
* all took amortized complexity of an insert from O(1) to O(log(n)).
*/
*/
#ifndef PH_H_
typedef
int
(
*
ph_cmp_t
)(
void
*
,
void
*
);
#define PH_H_
/* Node structure. */
/* Node structure. */
#define phn(a_type) \
typedef
struct
phn_link_s
phn_link_t
;
struct { \
struct
phn_link_s
{
a_type *phn_prev; \
void
*
prev
;
a_type *phn_next; \
void
*
next
;
a_type *phn_lchild; \
void
*
lchild
;
};
typedef
struct
ph_s
ph_t
;
struct
ph_s
{
void
*
root
;
/*
* Inserts done since the last aux-list merge. This is not necessarily
* the size of the aux-list, since it's possible that removals have
* happened since, and we don't track whether or not those removals are
* from the aux list.
*/
size_t
auxcount
;
};
JEMALLOC_ALWAYS_INLINE
phn_link_t
*
phn_link_get
(
void
*
phn
,
size_t
offset
)
{
return
(
phn_link_t
*
)(((
uintptr_t
)
phn
)
+
offset
);
}
}
/* Root structure. */
JEMALLOC_ALWAYS_INLINE
void
#define ph(a_type) \
phn_link_init
(
void
*
phn
,
size_t
offset
)
{
struct { \
phn_link_get
(
phn
,
offset
)
->
prev
=
NULL
;
a_type *ph_root; \
phn_link_get
(
phn
,
offset
)
->
next
=
NULL
;
phn_link_get
(
phn
,
offset
)
->
lchild
=
NULL
;
}
}
/* Internal utility macros. */
/* Internal utility helpers. */
#define phn_lchild_get(a_type, a_field, a_phn) \
JEMALLOC_ALWAYS_INLINE
void
*
(a_phn->a_field.phn_lchild)
phn_lchild_get
(
void
*
phn
,
size_t
offset
)
{
#define phn_lchild_set(a_type, a_field, a_phn, a_lchild) do { \
return
phn_link_get
(
phn
,
offset
)
->
lchild
;
a_phn->a_field.phn_lchild = a_lchild; \
}
} while (0)
JEMALLOC_ALWAYS_INLINE
void
#define phn_next_get(a_type, a_field, a_phn) \
phn_lchild_set
(
void
*
phn
,
void
*
lchild
,
size_t
offset
)
{
(a_phn->a_field.phn_next)
phn_link_get
(
phn
,
offset
)
->
lchild
=
lchild
;
#define phn_prev_set(a_type, a_field, a_phn, a_prev) do { \
}
a_phn->a_field.phn_prev = a_prev; \
} while (0)
JEMALLOC_ALWAYS_INLINE
void
*
phn_next_get
(
void
*
phn
,
size_t
offset
)
{
#define phn_prev_get(a_type, a_field, a_phn) \
return
phn_link_get
(
phn
,
offset
)
->
next
;
(a_phn->a_field.phn_prev)
}
#define phn_next_set(a_type, a_field, a_phn, a_next) do { \
a_phn->a_field.phn_next = a_next; \
JEMALLOC_ALWAYS_INLINE
void
} while (0)
phn_next_set
(
void
*
phn
,
void
*
next
,
size_t
offset
)
{
phn_link_get
(
phn
,
offset
)
->
next
=
next
;
#define phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do { \
}
a_type *phn0child; \
\
JEMALLOC_ALWAYS_INLINE
void
*
assert(a_phn0 != NULL); \
phn_prev_get
(
void
*
phn
,
size_t
offset
)
{
assert(a_phn1 != NULL); \
return
phn_link_get
(
phn
,
offset
)
->
prev
;
assert(a_cmp(a_phn0, a_phn1) <= 0); \
}
\
phn_prev_set(a_type, a_field, a_phn1, a_phn0); \
JEMALLOC_ALWAYS_INLINE
void
phn0child = phn_lchild_get(a_type, a_field, a_phn0); \
phn_prev_set
(
void
*
phn
,
void
*
prev
,
size_t
offset
)
{
phn_next_set(a_type, a_field, a_phn1, phn0child); \
phn_link_get
(
phn
,
offset
)
->
prev
=
prev
;
if (phn0child != NULL) { \
}
phn_prev_set(a_type, a_field, phn0child, a_phn1); \
} \
JEMALLOC_ALWAYS_INLINE
void
phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \
phn_merge_ordered
(
void
*
phn0
,
void
*
phn1
,
size_t
offset
,
} while (0)
ph_cmp_t
cmp
)
{
void
*
phn0child
;
#define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \
if (a_phn0 == NULL) { \
assert
(
phn0
!=
NULL
);
r_phn = a_phn1; \
assert
(
phn1
!=
NULL
);
} else if (a_phn1 == NULL) { \
assert
(
cmp
(
phn0
,
phn1
)
<=
0
);
r_phn = a_phn0; \
} else if (a_cmp(a_phn0, a_phn1) < 0) { \
phn_prev_set
(
phn1
,
phn0
,
offset
);
phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \
phn0child
=
phn_lchild_get
(
phn0
,
offset
);
a_cmp); \
phn_next_set
(
phn1
,
phn0child
,
offset
);
r_phn = a_phn0; \
if
(
phn0child
!=
NULL
)
{
} else { \
phn_prev_set
(
phn0child
,
phn1
,
offset
);
phn_merge_ordered(a_type, a_field, a_phn1, a_phn0, \
}
a_cmp); \
phn_lchild_set
(
phn0
,
phn1
,
offset
);
r_phn = a_phn1; \
}
} \
} while (0)
JEMALLOC_ALWAYS_INLINE
void
*
phn_merge
(
void
*
phn0
,
void
*
phn1
,
size_t
offset
,
ph_cmp_t
cmp
)
{
void
*
result
;
if
(
phn0
==
NULL
)
{
result
=
phn1
;
}
else
if
(
phn1
==
NULL
)
{
result
=
phn0
;
}
else
if
(
cmp
(
phn0
,
phn1
)
<
0
)
{
phn_merge_ordered
(
phn0
,
phn1
,
offset
,
cmp
);
result
=
phn0
;
}
else
{
phn_merge_ordered
(
phn1
,
phn0
,
offset
,
cmp
);
result
=
phn1
;
}
return
result
;
}
JEMALLOC_ALWAYS_INLINE
void
*
phn_merge_siblings
(
void
*
phn
,
size_t
offset
,
ph_cmp_t
cmp
)
{
void
*
head
=
NULL
;
void
*
tail
=
NULL
;
void
*
phn0
=
phn
;
void
*
phn1
=
phn_next_get
(
phn0
,
offset
);
/*
* Multipass merge, wherein the first two elements of a FIFO
* are repeatedly merged, and each result is appended to the
* singly linked FIFO, until the FIFO contains only a single
* element. We start with a sibling list but no reference to
* its tail, so we do a single pass over the sibling list to
* populate the FIFO.
*/
if
(
phn1
!=
NULL
)
{
void
*
phnrest
=
phn_next_get
(
phn1
,
offset
);
if
(
phnrest
!=
NULL
)
{
phn_prev_set
(
phnrest
,
NULL
,
offset
);
}
phn_prev_set
(
phn0
,
NULL
,
offset
);
phn_next_set
(
phn0
,
NULL
,
offset
);
phn_prev_set
(
phn1
,
NULL
,
offset
);
phn_next_set
(
phn1
,
NULL
,
offset
);
phn0
=
phn_merge
(
phn0
,
phn1
,
offset
,
cmp
);
head
=
tail
=
phn0
;
phn0
=
phnrest
;
while
(
phn0
!=
NULL
)
{
phn1
=
phn_next_get
(
phn0
,
offset
);
if
(
phn1
!=
NULL
)
{
phnrest
=
phn_next_get
(
phn1
,
offset
);
if
(
phnrest
!=
NULL
)
{
phn_prev_set
(
phnrest
,
NULL
,
offset
);
}
phn_prev_set
(
phn0
,
NULL
,
offset
);
phn_next_set
(
phn0
,
NULL
,
offset
);
phn_prev_set
(
phn1
,
NULL
,
offset
);
phn_next_set
(
phn1
,
NULL
,
offset
);
phn0
=
phn_merge
(
phn0
,
phn1
,
offset
,
cmp
);
phn_next_set
(
tail
,
phn0
,
offset
);
tail
=
phn0
;
phn0
=
phnrest
;
}
else
{
phn_next_set
(
tail
,
phn0
,
offset
);
tail
=
phn0
;
phn0
=
NULL
;
}
}
phn0
=
head
;
phn1
=
phn_next_get
(
phn0
,
offset
);
if
(
phn1
!=
NULL
)
{
while
(
true
)
{
head
=
phn_next_get
(
phn1
,
offset
);
assert
(
phn_prev_get
(
phn0
,
offset
)
==
NULL
);
phn_next_set
(
phn0
,
NULL
,
offset
);
assert
(
phn_prev_get
(
phn1
,
offset
)
==
NULL
);
phn_next_set
(
phn1
,
NULL
,
offset
);
phn0
=
phn_merge
(
phn0
,
phn1
,
offset
,
cmp
);
if
(
head
==
NULL
)
{
break
;
}
phn_next_set
(
tail
,
phn0
,
offset
);
tail
=
phn0
;
phn0
=
head
;
phn1
=
phn_next_get
(
phn0
,
offset
);
}
}
}
return
phn0
;
}
JEMALLOC_ALWAYS_INLINE
void
ph_merge_aux
(
ph_t
*
ph
,
size_t
offset
,
ph_cmp_t
cmp
)
{
ph
->
auxcount
=
0
;
void
*
phn
=
phn_next_get
(
ph
->
root
,
offset
);
if
(
phn
!=
NULL
)
{
phn_prev_set
(
ph
->
root
,
NULL
,
offset
);
phn_next_set
(
ph
->
root
,
NULL
,
offset
);
phn_prev_set
(
phn
,
NULL
,
offset
);
phn
=
phn_merge_siblings
(
phn
,
offset
,
cmp
);
assert
(
phn_next_get
(
phn
,
offset
)
==
NULL
);
ph
->
root
=
phn_merge
(
ph
->
root
,
phn
,
offset
,
cmp
);
}
}
JEMALLOC_ALWAYS_INLINE
void
*
ph_merge_children
(
void
*
phn
,
size_t
offset
,
ph_cmp_t
cmp
)
{
void
*
result
;
void
*
lchild
=
phn_lchild_get
(
phn
,
offset
);
if
(
lchild
==
NULL
)
{
result
=
NULL
;
}
else
{
result
=
phn_merge_siblings
(
lchild
,
offset
,
cmp
);
}
return
result
;
}
JEMALLOC_ALWAYS_INLINE
void
ph_new
(
ph_t
*
ph
)
{
ph
->
root
=
NULL
;
ph
->
auxcount
=
0
;
}
JEMALLOC_ALWAYS_INLINE
bool
ph_empty
(
ph_t
*
ph
)
{
return
ph
->
root
==
NULL
;
}
JEMALLOC_ALWAYS_INLINE
void
*
ph_first
(
ph_t
*
ph
,
size_t
offset
,
ph_cmp_t
cmp
)
{
if
(
ph
->
root
==
NULL
)
{
return
NULL
;
}
ph_merge_aux
(
ph
,
offset
,
cmp
);
return
ph
->
root
;
}
JEMALLOC_ALWAYS_INLINE
void
*
ph_any
(
ph_t
*
ph
,
size_t
offset
)
{
if
(
ph
->
root
==
NULL
)
{
return
NULL
;
}
void
*
aux
=
phn_next_get
(
ph
->
root
,
offset
);
if
(
aux
!=
NULL
)
{
return
aux
;
}
return
ph
->
root
;
}
/* Returns true if we should stop trying to merge. */
JEMALLOC_ALWAYS_INLINE
bool
ph_try_aux_merge_pair
(
ph_t
*
ph
,
size_t
offset
,
ph_cmp_t
cmp
)
{
assert
(
ph
->
root
!=
NULL
);
void
*
phn0
=
phn_next_get
(
ph
->
root
,
offset
);
if
(
phn0
==
NULL
)
{
return
true
;
}
void
*
phn1
=
phn_next_get
(
phn0
,
offset
);
if
(
phn1
==
NULL
)
{
return
true
;
}
void
*
next_phn1
=
phn_next_get
(
phn1
,
offset
);
phn_next_set
(
phn0
,
NULL
,
offset
);
phn_prev_set
(
phn0
,
NULL
,
offset
);
phn_next_set
(
phn1
,
NULL
,
offset
);
phn_prev_set
(
phn1
,
NULL
,
offset
);
phn0
=
phn_merge
(
phn0
,
phn1
,
offset
,
cmp
);
phn_next_set
(
phn0
,
next_phn1
,
offset
);
if
(
next_phn1
!=
NULL
)
{
phn_prev_set
(
next_phn1
,
phn0
,
offset
);
}
phn_next_set
(
ph
->
root
,
phn0
,
offset
);
phn_prev_set
(
phn0
,
ph
->
root
,
offset
);
return
next_phn1
==
NULL
;
}
JEMALLOC_ALWAYS_INLINE
void
ph_insert
(
ph_t
*
ph
,
void
*
phn
,
size_t
offset
,
ph_cmp_t
cmp
)
{
phn_link_init
(
phn
,
offset
);
#define ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do { \
/*
a_type *head = NULL; \
* Treat the root as an aux list during insertion, and lazily merge
a_type *tail = NULL; \
* during a_prefix##remove_first(). For elements that are inserted,
a_type *phn0 = a_phn; \
* then removed via a_prefix##remove() before the aux list is ever
a_type *phn1 = phn_next_get(a_type, a_field, phn0); \
* processed, this makes insert/remove constant-time, whereas eager
* merging would make insert O(log n).
*/
if
(
ph
->
root
==
NULL
)
{
ph
->
root
=
phn
;
}
else
{
/*
* As a special case, check to see if we can replace the root.
* This is practically common in some important cases, and lets
* us defer some insertions (hopefully, until the point where
* some of the items in the aux list have been removed, savings
* us from linking them at all).
*/
if
(
cmp
(
phn
,
ph
->
root
)
<
0
)
{
phn_lchild_set
(
phn
,
ph
->
root
,
offset
);
phn_prev_set
(
ph
->
root
,
phn
,
offset
);
ph
->
root
=
phn
;
ph
->
auxcount
=
0
;
return
;
}
ph
->
auxcount
++
;
phn_next_set
(
phn
,
phn_next_get
(
ph
->
root
,
offset
),
offset
);
if
(
phn_next_get
(
ph
->
root
,
offset
)
!=
NULL
)
{
phn_prev_set
(
phn_next_get
(
ph
->
root
,
offset
),
phn
,
offset
);
}
phn_prev_set
(
phn
,
ph
->
root
,
offset
);
phn_next_set
(
ph
->
root
,
phn
,
offset
);
}
if
(
ph
->
auxcount
>
1
)
{
unsigned
nmerges
=
ffs_zu
(
ph
->
auxcount
-
1
);
bool
done
=
false
;
for
(
unsigned
i
=
0
;
i
<
nmerges
&&
!
done
;
i
++
)
{
done
=
ph_try_aux_merge_pair
(
ph
,
offset
,
cmp
);
}
}
}
JEMALLOC_ALWAYS_INLINE
void
*
ph_remove_first
(
ph_t
*
ph
,
size_t
offset
,
ph_cmp_t
cmp
)
{
void
*
ret
;
if
(
ph
->
root
==
NULL
)
{
return
NULL
;
}
ph_merge_aux
(
ph
,
offset
,
cmp
);
ret
=
ph
->
root
;
ph
->
root
=
ph_merge_children
(
ph
->
root
,
offset
,
cmp
);
return
ret
;
}
JEMALLOC_ALWAYS_INLINE
void
ph_remove
(
ph_t
*
ph
,
void
*
phn
,
size_t
offset
,
ph_cmp_t
cmp
)
{
void
*
replace
;
void
*
parent
;
if
(
ph
->
root
==
phn
)
{
/*
* We can delete from aux list without merging it, but we need
* to merge if we are dealing with the root node and it has
* children.
*/
if
(
phn_lchild_get
(
phn
,
offset
)
==
NULL
)
{
ph
->
root
=
phn_next_get
(
phn
,
offset
);
if
(
ph
->
root
!=
NULL
)
{
phn_prev_set
(
ph
->
root
,
NULL
,
offset
);
}
return
;
}
ph_merge_aux
(
ph
,
offset
,
cmp
);
if
(
ph
->
root
==
phn
)
{
ph
->
root
=
ph_merge_children
(
ph
->
root
,
offset
,
cmp
);
return
;
}
}
/* Get parent (if phn is leftmost child) before mutating. */
if
((
parent
=
phn_prev_get
(
phn
,
offset
))
!=
NULL
)
{
if
(
phn_lchild_get
(
parent
,
offset
)
!=
phn
)
{
parent
=
NULL
;
}
}
/* Find a possible replacement node, and link to parent. */
replace
=
ph_merge_children
(
phn
,
offset
,
cmp
);
/* Set next/prev for sibling linked list. */
if
(
replace
!=
NULL
)
{
if
(
parent
!=
NULL
)
{
phn_prev_set
(
replace
,
parent
,
offset
);
phn_lchild_set
(
parent
,
replace
,
offset
);
}
else
{
phn_prev_set
(
replace
,
phn_prev_get
(
phn
,
offset
),
offset
);
if
(
phn_prev_get
(
phn
,
offset
)
!=
NULL
)
{
phn_next_set
(
phn_prev_get
(
phn
,
offset
),
replace
,
offset
);
}
}
phn_next_set
(
replace
,
phn_next_get
(
phn
,
offset
),
offset
);
if
(
phn_next_get
(
phn
,
offset
)
!=
NULL
)
{
phn_prev_set
(
phn_next_get
(
phn
,
offset
),
replace
,
offset
);
}
}
else
{
if
(
parent
!=
NULL
)
{
void
*
next
=
phn_next_get
(
phn
,
offset
);
phn_lchild_set
(
parent
,
next
,
offset
);
if
(
next
!=
NULL
)
{
phn_prev_set
(
next
,
parent
,
offset
);
}
}
else
{
assert
(
phn_prev_get
(
phn
,
offset
)
!=
NULL
);
phn_next_set
(
phn_prev_get
(
phn
,
offset
),
phn_next_get
(
phn
,
offset
),
offset
);
}
if
(
phn_next_get
(
phn
,
offset
)
!=
NULL
)
{
phn_prev_set
(
phn_next_get
(
phn
,
offset
),
phn_prev_get
(
phn
,
offset
),
offset
);
}
}
}
#define ph_structs(a_prefix, a_type) \
typedef struct { \
phn_link_t link; \
} a_prefix##_link_t; \
\
\
/* \
typedef struct { \
* Multipass merge, wherein the first two elements of a FIFO \
ph_t ph; \
* are repeatedly merged, and each result is appended to the \
} a_prefix##_t;
* singly linked FIFO, until the FIFO contains only a single \
* element. We start with a sibling list but no reference to \
* its tail, so we do a single pass over the sibling list to \
* populate the FIFO. \
*/
\
if (phn1 != NULL) { \
a_type *phnrest = phn_next_get(a_type, a_field, phn1); \
if (phnrest != NULL) { \
phn_prev_set(a_type, a_field, phnrest, NULL); \
} \
phn_prev_set(a_type, a_field, phn0, NULL); \
phn_next_set(a_type, a_field, phn0, NULL); \
phn_prev_set(a_type, a_field, phn1, NULL); \
phn_next_set(a_type, a_field, phn1, NULL); \
phn_merge(a_type, a_field, phn0, phn1, a_cmp, phn0); \
head = tail = phn0; \
phn0 = phnrest; \
while (phn0 != NULL) { \
phn1 = phn_next_get(a_type, a_field, phn0); \
if (phn1 != NULL) { \
phnrest = phn_next_get(a_type, a_field, \
phn1); \
if (phnrest != NULL) { \
phn_prev_set(a_type, a_field, \
phnrest, NULL); \
} \
phn_prev_set(a_type, a_field, phn0, \
NULL); \
phn_next_set(a_type, a_field, phn0, \
NULL); \
phn_prev_set(a_type, a_field, phn1, \
NULL); \
phn_next_set(a_type, a_field, phn1, \
NULL); \
phn_merge(a_type, a_field, phn0, phn1, \
a_cmp, phn0); \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
phn0 = phnrest; \
} else { \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
phn0 = NULL; \
} \
} \
phn0 = head; \
phn1 = phn_next_get(a_type, a_field, phn0); \
if (phn1 != NULL) { \
while (true) { \
head = phn_next_get(a_type, a_field, \
phn1); \
assert(phn_prev_get(a_type, a_field, \
phn0) == NULL); \
phn_next_set(a_type, a_field, phn0, \
NULL); \
assert(phn_prev_get(a_type, a_field, \
phn1) == NULL); \
phn_next_set(a_type, a_field, phn1, \
NULL); \
phn_merge(a_type, a_field, phn0, phn1, \
a_cmp, phn0); \
if (head == NULL) { \
break; \
} \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
phn0 = head; \
phn1 = phn_next_get(a_type, a_field, \
phn0); \
} \
} \
} \
r_phn = phn0; \
} while (0)
#define ph_merge_aux(a_type, a_field, a_ph, a_cmp) do { \
a_type *phn = phn_next_get(a_type, a_field, a_ph->ph_root); \
if (phn != NULL) { \
phn_prev_set(a_type, a_field, a_ph->ph_root, NULL); \
phn_next_set(a_type, a_field, a_ph->ph_root, NULL); \
phn_prev_set(a_type, a_field, phn, NULL); \
ph_merge_siblings(a_type, a_field, phn, a_cmp, phn); \
assert(phn_next_get(a_type, a_field, phn) == NULL); \
phn_merge(a_type, a_field, a_ph->ph_root, phn, a_cmp, \
a_ph->ph_root); \
} \
} while (0)
#define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \
a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \
if (lchild == NULL) { \
r_phn = NULL; \
} else { \
ph_merge_siblings(a_type, a_field, lchild, a_cmp, \
r_phn); \
} \
} while (0)
/*
/*
* The ph_proto() macro generates function prototypes that correspond to the
* The ph_proto() macro generates function prototypes that correspond to the
* functions generated by an equivalently parameterized call to ph_gen().
* functions generated by an equivalently parameterized call to ph_gen().
*/
*/
#define ph_proto(a_attr, a_prefix, a_ph_type, a_type) \
#define ph_proto(a_attr, a_prefix, a_type) \
a_attr void a_prefix##new(a_ph_type *ph); \
\
a_attr bool a_prefix##empty(a_ph_type *ph); \
a_attr void a_prefix##_new(a_prefix##_t *ph); \
a_attr a_type *a_prefix##first(a_ph_type *ph); \
a_attr bool a_prefix##_empty(a_prefix##_t *ph); \
a_attr a_type *a_prefix##any(a_ph_type *ph); \
a_attr a_type *a_prefix##_first(a_prefix##_t *ph); \
a_attr void a_prefix##insert(a_ph_type *ph, a_type *phn); \
a_attr a_type *a_prefix##_any(a_prefix##_t *ph); \
a_attr a_type *a_prefix##remove_first(a_ph_type *ph); \
a_attr void a_prefix##_insert(a_prefix##_t *ph, a_type *phn); \
a_attr a_type *a_prefix##remove_any(a_ph_type *ph); \
a_attr a_type *a_prefix##_remove_first(a_prefix##_t *ph); \
a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn);
a_attr void a_prefix##_remove(a_prefix##_t *ph, a_type *phn); \
a_attr a_type *a_prefix##_remove_any(a_prefix##_t *ph);
/*
/* The ph_gen() macro generates a type-specific pairing heap implementation. */
* The ph_gen() macro generates a type-specific pairing heap implementation,
#define ph_gen(a_attr, a_prefix, a_type, a_field, a_cmp) \
* based on the above cpp macros.
JEMALLOC_ALWAYS_INLINE int \
*/
a_prefix##_ph_cmp(void *a, void *b) { \
#define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \
return a_cmp((a_type *)a, (a_type *)b); \
} \
\
a_attr void \
a_attr void \
a_prefix##new(a_p
h_type
*ph) {
\
a_prefix##
_
new(a_p
refix##_t
*ph) { \
memset(ph, 0, sizeof(ph(a_type)));
\
ph_new(&ph->ph);
\
} \
} \
\
a_attr bool \
a_attr bool \
a_prefix##empty(a_p
h_type
*ph) { \
a_prefix##
_
empty(a_p
refix##_t
*ph) { \
return
(ph->ph_root == NULL
); \
return
ph_empty(&ph->ph
); \
} \
} \
\
a_attr a_type * \
a_attr a_type * \
a_prefix##first(a_ph_type *ph) { \
a_prefix##_first(a_prefix##_t *ph) { \
if (ph->ph_root == NULL) { \
return ph_first(&ph->ph, offsetof(a_type, a_field), \
return NULL; \
&a_prefix##_ph_cmp); \
} \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
return ph->ph_root; \
} \
} \
\
a_attr a_type * \
a_attr a_type * \
a_prefix##any(a_ph_type *ph) { \
a_prefix##_any(a_prefix##_t *ph) { \
if (ph->ph_root == NULL) { \
return ph_any(&ph->ph, offsetof(a_type, a_field)); \
return NULL; \
} \
a_type *aux = phn_next_get(a_type, a_field, ph->ph_root); \
if (aux != NULL) { \
return aux; \
} \
return ph->ph_root; \
} \
} \
a_attr void \
a_prefix##insert(a_ph_type *ph, a_type *phn) { \
memset(&phn->a_field, 0, sizeof(phn(a_type))); \
\
\
/* \
a_attr void \
* Treat the root as an aux list during insertion, and lazily \
a_prefix##_insert(a_prefix##_t *ph, a_type *phn) { \
* merge during a_prefix##remove_first(). For elements that \
ph_insert(&ph->ph, phn, offsetof(a_type, a_field), \
* are inserted, then removed via a_prefix##remove() before the \
a_prefix##_ph_cmp); \
* aux list is ever processed, this makes insert/remove \
* constant-time, whereas eager merging would make insert \
* O(log n). \
*/
\
if (ph->ph_root == NULL) { \
ph->ph_root = phn; \
} else { \
phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \
a_field, ph->ph_root)); \
if (phn_next_get(a_type, a_field, ph->ph_root) != \
NULL) { \
phn_prev_set(a_type, a_field, \
phn_next_get(a_type, a_field, ph->ph_root), \
phn); \
} \
phn_prev_set(a_type, a_field, phn, ph->ph_root); \
phn_next_set(a_type, a_field, ph->ph_root, phn); \
} \
} \
} \
a_attr a_type * \
a_prefix##remove_first(a_ph_type *ph) { \
a_type *ret; \
\
\
if (ph->ph_root == NULL) { \
a_attr a_type * \
return NULL; \
a_prefix##_remove_first(a_prefix##_t *ph) { \
} \
return ph_remove_first(&ph->ph, offsetof(a_type, a_field), \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
a_prefix##_ph_cmp); \
\
} \
ret = ph->ph_root; \
\
ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \
ph->ph_root); \
\
\
return ret; \
a_attr void \
a_prefix##_remove(a_prefix##_t *ph, a_type *phn) { \
ph_remove(&ph->ph, phn, offsetof(a_type, a_field), \
a_prefix##_ph_cmp); \
} \
} \
\
a_attr a_type * \
a_attr a_type * \
a_prefix##remove_any(a_ph_type *ph) { \
a_prefix##_remove_any(a_prefix##_t *ph) { \
/* \
a_type *ret = a_prefix##_any(ph); \
* Remove the most recently inserted aux list element, or the \
* root if the aux list is empty. This has the effect of \
* behaving as a LIFO (and insertion/removal is therefore \
* constant-time) if a_prefix##[remove_]first() are never \
* called. \
*/
\
if (ph->ph_root == NULL) { \
return NULL; \
} \
a_type *ret = phn_next_get(a_type, a_field, ph->ph_root); \
if (ret != NULL) { \
if (ret != NULL) { \
a_type *aux = phn_next_get(a_type, a_field, ret); \
a_prefix##_remove(ph, ret); \
phn_next_set(a_type, a_field, ph->ph_root, aux); \
if (aux != NULL) { \
phn_prev_set(a_type, a_field, aux, \
ph->ph_root); \
} \
return ret; \
} \
} \
ret = ph->ph_root; \
ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \
ph->ph_root); \
return ret; \
return ret; \
} \
a_attr void \
a_prefix##remove(a_ph_type *ph, a_type *phn) { \
a_type *replace, *parent; \
\
if (ph->ph_root == phn) { \
/* \
* We can delete from aux list without merging it, but \
* we need to merge if we are dealing with the root \
* node and it has children. \
*/
\
if (phn_lchild_get(a_type, a_field, phn) == NULL) { \
ph->ph_root = phn_next_get(a_type, a_field, \
phn); \
if (ph->ph_root != NULL) { \
phn_prev_set(a_type, a_field, \
ph->ph_root, NULL); \
} \
return; \
} \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
if (ph->ph_root == phn) { \
ph_merge_children(a_type, a_field, ph->ph_root, \
a_cmp, ph->ph_root); \
return; \
} \
} \
\
/* Get parent (if phn is leftmost child) before mutating. */
\
if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \
if (phn_lchild_get(a_type, a_field, parent) != phn) { \
parent = NULL; \
} \
} \
/* Find a possible replacement node, and link to parent. */
\
ph_merge_children(a_type, a_field, phn, a_cmp, replace); \
/* Set next/prev for sibling linked list. */
\
if (replace != NULL) { \
if (parent != NULL) { \
phn_prev_set(a_type, a_field, replace, parent); \
phn_lchild_set(a_type, a_field, parent, \
replace); \
} else { \
phn_prev_set(a_type, a_field, replace, \
phn_prev_get(a_type, a_field, phn)); \
if (phn_prev_get(a_type, a_field, phn) != \
NULL) { \
phn_next_set(a_type, a_field, \
phn_prev_get(a_type, a_field, phn), \
replace); \
} \
} \
phn_next_set(a_type, a_field, replace, \
phn_next_get(a_type, a_field, phn)); \
if (phn_next_get(a_type, a_field, phn) != NULL) { \
phn_prev_set(a_type, a_field, \
phn_next_get(a_type, a_field, phn), \
replace); \
} \
} else { \
if (parent != NULL) { \
a_type *next = phn_next_get(a_type, a_field, \
phn); \
phn_lchild_set(a_type, a_field, parent, next); \
if (next != NULL) { \
phn_prev_set(a_type, a_field, next, \
parent); \
} \
} else { \
assert(phn_prev_get(a_type, a_field, phn) != \
NULL); \
phn_next_set(a_type, a_field, \
phn_prev_get(a_type, a_field, phn), \
phn_next_get(a_type, a_field, phn)); \
} \
if (phn_next_get(a_type, a_field, phn) != NULL) { \
phn_prev_set(a_type, a_field, \
phn_next_get(a_type, a_field, phn), \
phn_prev_get(a_type, a_field, phn)); \
} \
} \
}
}
#endif
/* PH_H
_
*/
#endif
/*
JEMALLOC_INTERNAL_
PH_H */
deps/jemalloc/include/jemalloc/internal/prng.h
View file @
b8beda3c
#ifndef JEMALLOC_INTERNAL_PRNG_H
#ifndef JEMALLOC_INTERNAL_PRNG_H
#define JEMALLOC_INTERNAL_PRNG_H
#define JEMALLOC_INTERNAL_PRNG_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bit_util.h"
#include "jemalloc/internal/bit_util.h"
/*
/*
...
@@ -59,66 +58,38 @@ prng_state_next_zu(size_t state) {
...
@@ -59,66 +58,38 @@ prng_state_next_zu(size_t state) {
/*
/*
* The prng_lg_range functions give a uniform int in the half-open range [0,
* The prng_lg_range functions give a uniform int in the half-open range [0,
* 2**lg_range). If atomic is true, they do so safely from multiple threads.
* 2**lg_range).
* Multithreaded 64-bit prngs aren't supported.
*/
*/
JEMALLOC_ALWAYS_INLINE
uint32_t
JEMALLOC_ALWAYS_INLINE
uint32_t
prng_lg_range_u32
(
atomic_u32_t
*
state
,
unsigned
lg_range
,
bool
atomic
)
{
prng_lg_range_u32
(
uint32_t
*
state
,
unsigned
lg_range
)
{
uint32_t
ret
,
state0
,
state1
;
assert
(
lg_range
>
0
);
assert
(
lg_range
>
0
);
assert
(
lg_range
<=
32
);
assert
(
lg_range
<=
32
);
state0
=
atomic_load_u32
(
state
,
ATOMIC_RELAXED
);
*
state
=
prng_state_next_u32
(
*
state
);
uint32_t
ret
=
*
state
>>
(
32
-
lg_range
);
if
(
atomic
)
{
do
{
state1
=
prng_state_next_u32
(
state0
);
}
while
(
!
atomic_compare_exchange_weak_u32
(
state
,
&
state0
,
state1
,
ATOMIC_RELAXED
,
ATOMIC_RELAXED
));
}
else
{
state1
=
prng_state_next_u32
(
state0
);
atomic_store_u32
(
state
,
state1
,
ATOMIC_RELAXED
);
}
ret
=
state1
>>
(
32
-
lg_range
);
return
ret
;
return
ret
;
}
}
JEMALLOC_ALWAYS_INLINE
uint64_t
JEMALLOC_ALWAYS_INLINE
uint64_t
prng_lg_range_u64
(
uint64_t
*
state
,
unsigned
lg_range
)
{
prng_lg_range_u64
(
uint64_t
*
state
,
unsigned
lg_range
)
{
uint64_t
ret
,
state1
;
assert
(
lg_range
>
0
);
assert
(
lg_range
>
0
);
assert
(
lg_range
<=
64
);
assert
(
lg_range
<=
64
);
state1
=
prng_state_next_u64
(
*
state
);
*
state
=
prng_state_next_u64
(
*
state
);
*
state
=
state1
;
uint64_t
ret
=
*
state
>>
(
64
-
lg_range
);
ret
=
state1
>>
(
64
-
lg_range
);
return
ret
;
return
ret
;
}
}
JEMALLOC_ALWAYS_INLINE
size_t
JEMALLOC_ALWAYS_INLINE
size_t
prng_lg_range_zu
(
atomic_zu_t
*
state
,
unsigned
lg_range
,
bool
atomic
)
{
prng_lg_range_zu
(
size_t
*
state
,
unsigned
lg_range
)
{
size_t
ret
,
state0
,
state1
;
assert
(
lg_range
>
0
);
assert
(
lg_range
>
0
);
assert
(
lg_range
<=
ZU
(
1
)
<<
(
3
+
LG_SIZEOF_PTR
));
assert
(
lg_range
<=
ZU
(
1
)
<<
(
3
+
LG_SIZEOF_PTR
));
state0
=
atomic_load_zu
(
state
,
ATOMIC_RELAXED
);
*
state
=
prng_state_next_zu
(
*
state
);
size_t
ret
=
*
state
>>
((
ZU
(
1
)
<<
(
3
+
LG_SIZEOF_PTR
))
-
lg_range
);
if
(
atomic
)
{
do
{
state1
=
prng_state_next_zu
(
state0
);
}
while
(
atomic_compare_exchange_weak_zu
(
state
,
&
state0
,
state1
,
ATOMIC_RELAXED
,
ATOMIC_RELAXED
));
}
else
{
state1
=
prng_state_next_zu
(
state0
);
atomic_store_zu
(
state
,
state1
,
ATOMIC_RELAXED
);
}
ret
=
state1
>>
((
ZU
(
1
)
<<
(
3
+
LG_SIZEOF_PTR
))
-
lg_range
);
return
ret
;
return
ret
;
}
}
...
@@ -129,18 +100,24 @@ prng_lg_range_zu(atomic_zu_t *state, unsigned lg_range, bool atomic) {
...
@@ -129,18 +100,24 @@ prng_lg_range_zu(atomic_zu_t *state, unsigned lg_range, bool atomic) {
*/
*/
JEMALLOC_ALWAYS_INLINE
uint32_t
JEMALLOC_ALWAYS_INLINE
uint32_t
prng_range_u32
(
atomic_u32_t
*
state
,
uint32_t
range
,
bool
atomic
)
{
prng_range_u32
(
uint32_t
*
state
,
uint32_t
range
)
{
uint32_t
ret
;
assert
(
range
!=
0
);
unsigned
lg_range
;
/*
* If range were 1, lg_range would be 0, so the shift in
assert
(
range
>
1
);
* prng_lg_range_u32 would be a shift of a 32-bit variable by 32 bits,
* which is UB. Just handle this case as a one-off.
*/
if
(
range
==
1
)
{
return
0
;
}
/* Compute the ceiling of lg(range). */
/* Compute the ceiling of lg(range). */
lg_range
=
ffs_u32
(
pow2_ceil_u32
(
range
))
-
1
;
unsigned
lg_range
=
ffs_u32
(
pow2_ceil_u32
(
range
));
/* Generate a result in [0..range) via repeated trial. */
/* Generate a result in [0..range) via repeated trial. */
uint32_t
ret
;
do
{
do
{
ret
=
prng_lg_range_u32
(
state
,
lg_range
,
atomic
);
ret
=
prng_lg_range_u32
(
state
,
lg_range
);
}
while
(
ret
>=
range
);
}
while
(
ret
>=
range
);
return
ret
;
return
ret
;
...
@@ -148,15 +125,18 @@ prng_range_u32(atomic_u32_t *state, uint32_t range, bool atomic) {
...
@@ -148,15 +125,18 @@ prng_range_u32(atomic_u32_t *state, uint32_t range, bool atomic) {
JEMALLOC_ALWAYS_INLINE
uint64_t
JEMALLOC_ALWAYS_INLINE
uint64_t
prng_range_u64
(
uint64_t
*
state
,
uint64_t
range
)
{
prng_range_u64
(
uint64_t
*
state
,
uint64_t
range
)
{
uint64_t
ret
;
assert
(
range
!=
0
);
unsigned
lg_range
;
assert
(
range
>
1
);
/* See the note in prng_range_u32. */
if
(
range
==
1
)
{
return
0
;
}
/* Compute the ceiling of lg(range). */
/* Compute the ceiling of lg(range). */
lg_range
=
ffs_u64
(
pow2_ceil_u64
(
range
))
-
1
;
unsigned
lg_range
=
ffs_u64
(
pow2_ceil_u64
(
range
));
/* Generate a result in [0..range) via repeated trial. */
/* Generate a result in [0..range) via repeated trial. */
uint64_t
ret
;
do
{
do
{
ret
=
prng_lg_range_u64
(
state
,
lg_range
);
ret
=
prng_lg_range_u64
(
state
,
lg_range
);
}
while
(
ret
>=
range
);
}
while
(
ret
>=
range
);
...
@@ -165,18 +145,21 @@ prng_range_u64(uint64_t *state, uint64_t range) {
...
@@ -165,18 +145,21 @@ prng_range_u64(uint64_t *state, uint64_t range) {
}
}
JEMALLOC_ALWAYS_INLINE
size_t
JEMALLOC_ALWAYS_INLINE
size_t
prng_range_zu
(
atomic_zu_t
*
state
,
size_t
range
,
bool
atomic
)
{
prng_range_zu
(
size_t
*
state
,
size_t
range
)
{
size_t
ret
;
assert
(
range
!=
0
);
unsigned
lg_range
;
assert
(
range
>
1
);
/* See the note in prng_range_u32. */
if
(
range
==
1
)
{
return
0
;
}
/* Compute the ceiling of lg(range). */
/* Compute the ceiling of lg(range). */
lg_range
=
ffs_u64
(
pow2_ceil_u64
(
range
))
-
1
;
unsigned
lg_range
=
ffs_u64
(
pow2_ceil_u64
(
range
));
/* Generate a result in [0..range) via repeated trial. */
/* Generate a result in [0..range) via repeated trial. */
size_t
ret
;
do
{
do
{
ret
=
prng_lg_range_zu
(
state
,
lg_range
,
atomic
);
ret
=
prng_lg_range_zu
(
state
,
lg_range
);
}
while
(
ret
>=
range
);
}
while
(
ret
>=
range
);
return
ret
;
return
ret
;
...
...
deps/jemalloc/include/jemalloc/internal/prof_data.h
0 → 100644
View file @
b8beda3c
#ifndef JEMALLOC_INTERNAL_PROF_DATA_H
#define JEMALLOC_INTERNAL_PROF_DATA_H
#include "jemalloc/internal/mutex.h"
extern
malloc_mutex_t
bt2gctx_mtx
;
extern
malloc_mutex_t
tdatas_mtx
;
extern
malloc_mutex_t
prof_dump_mtx
;
extern
malloc_mutex_t
*
gctx_locks
;
extern
malloc_mutex_t
*
tdata_locks
;
extern
size_t
prof_unbiased_sz
[
PROF_SC_NSIZES
];
extern
size_t
prof_shifted_unbiased_cnt
[
PROF_SC_NSIZES
];
void
prof_bt_hash
(
const
void
*
key
,
size_t
r_hash
[
2
]);
bool
prof_bt_keycomp
(
const
void
*
k1
,
const
void
*
k2
);
bool
prof_data_init
(
tsd_t
*
tsd
);
prof_tctx_t
*
prof_lookup
(
tsd_t
*
tsd
,
prof_bt_t
*
bt
);
char
*
prof_thread_name_alloc
(
tsd_t
*
tsd
,
const
char
*
thread_name
);
int
prof_thread_name_set_impl
(
tsd_t
*
tsd
,
const
char
*
thread_name
);
void
prof_unbias_map_init
();
void
prof_dump_impl
(
tsd_t
*
tsd
,
write_cb_t
*
prof_dump_write
,
void
*
cbopaque
,
prof_tdata_t
*
tdata
,
bool
leakcheck
);
prof_tdata_t
*
prof_tdata_init_impl
(
tsd_t
*
tsd
,
uint64_t
thr_uid
,
uint64_t
thr_discrim
,
char
*
thread_name
,
bool
active
);
void
prof_tdata_detach
(
tsd_t
*
tsd
,
prof_tdata_t
*
tdata
);
void
prof_reset
(
tsd_t
*
tsd
,
size_t
lg_sample
);
void
prof_tctx_try_destroy
(
tsd_t
*
tsd
,
prof_tctx_t
*
tctx
);
/* Used in unit tests. */
size_t
prof_tdata_count
(
void
);
size_t
prof_bt_count
(
void
);
void
prof_cnt_all
(
prof_cnt_t
*
cnt_all
);
#endif
/* JEMALLOC_INTERNAL_PROF_DATA_H */
deps/jemalloc/include/jemalloc/internal/prof_externs.h
View file @
b8beda3c
...
@@ -2,75 +2,72 @@
...
@@ -2,75 +2,72 @@
#define JEMALLOC_INTERNAL_PROF_EXTERNS_H
#define JEMALLOC_INTERNAL_PROF_EXTERNS_H
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/prof_hook.h"
extern
malloc_mutex_t
bt2gctx_mtx
;
extern
bool
opt_prof
;
extern
bool
opt_prof_active
;
extern
bool
opt_prof
;
extern
bool
opt_prof_thread_active_init
;
extern
bool
opt_prof_active
;
extern
size_t
opt_lg_prof_sample
;
/* Mean bytes between samples. */
extern
bool
opt_prof_thread_active_init
;
extern
ssize_t
opt_lg_prof_interval
;
/* lg(prof_interval). */
extern
size_t
opt_lg_prof_sample
;
/* Mean bytes between samples. */
extern
bool
opt_prof_gdump
;
/* High-water memory dumping. */
extern
ssize_t
opt_lg_prof_interval
;
/* lg(prof_interval). */
extern
bool
opt_prof_final
;
/* Final profile dumping. */
extern
bool
opt_prof_gdump
;
/* High-water memory dumping. */
extern
bool
opt_prof_leak
;
/* Dump leak summary at exit. */
extern
bool
opt_prof_final
;
/* Final profile dumping. */
extern
bool
opt_prof_leak_error
;
/* Exit with error code if memory leaked */
extern
bool
opt_prof_leak
;
/* Dump leak summary at exit. */
extern
bool
opt_prof_accum
;
/* Report cumulative bytes. */
extern
bool
opt_prof_accum
;
/* Report cumulative bytes. */
extern
bool
opt_prof_log
;
/* Turn logging on at boot. */
extern
bool
opt_prof_log
;
/* Turn logging on at boot. */
extern
char
opt_prof_prefix
[
extern
char
opt_prof_prefix
[
/* Minimize memory bloat for non-prof builds. */
/* Minimize memory bloat for non-prof builds. */
#ifdef JEMALLOC_PROF
#ifdef JEMALLOC_PROF
PATH_MAX
+
PATH_MAX
+
#endif
#endif
1
];
1
];
extern
bool
opt_prof_unbias
;
/* For recording recent allocations */
extern
ssize_t
opt_prof_recent_alloc_max
;
/* Whether to use thread name provided by the system or by mallctl. */
extern
bool
opt_prof_sys_thread_name
;
/* Whether to record per size class counts and request size totals. */
extern
bool
opt_prof_stats
;
/* Accessed via prof_active_[gs]et{_unlocked,}(). */
/* Accessed via prof_active_[gs]et{_unlocked,}(). */
extern
bool
prof_active
;
extern
bool
prof_active
_state
;
/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */
/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */
extern
bool
prof_gdump_val
;
extern
bool
prof_gdump_val
;
/*
/* Profile dump interval, measured in bytes allocated. */
* Profile dump interval, measured in bytes allocated. Each arena triggers a
extern
uint64_t
prof_interval
;
* profile dump when it reaches this threshold. The effect is that the
* interval between profile dumps averages prof_interval, though the actual
* interval between dumps will tend to be sporadic, and the interval will be a
* maximum of approximately (prof_interval * narenas).
*/
extern
uint64_t
prof_interval
;
/*
/*
* Initialized as opt_lg_prof_sample, and potentially modified during profiling
* Initialized as opt_lg_prof_sample, and potentially modified during profiling
* resets.
* resets.
*/
*/
extern
size_t
lg_prof_sample
;
extern
size_t
lg_prof_sample
;
void
prof_alloc_rollback
(
tsd_t
*
tsd
,
prof_tctx_t
*
tctx
,
bool
updated
);
extern
bool
prof_booted
;
void
prof_malloc_sample_object
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
);
void
prof_backtrace_hook_set
(
prof_backtrace_hook_t
hook
);
void
prof_free_sampled_object
(
tsd_t
*
tsd
,
const
void
*
ptr
,
size_t
usize
,
prof_backtrace_hook_t
prof_backtrace_hook_get
();
prof_tctx_t
*
tctx
);
void
bt_init
(
prof_bt_t
*
bt
,
void
**
vec
);
void
prof_dump_hook_set
(
prof_dump_hook_t
hook
);
void
prof_backtrace
(
prof_bt_t
*
bt
);
prof_dump_hook_t
prof_dump_hook_get
();
prof_tctx_t
*
prof_lookup
(
tsd_t
*
tsd
,
prof_bt_t
*
bt
);
#ifdef JEMALLOC_JET
/* Functions only accessed in prof_inlines.h */
size_t
prof_tdata_count
(
void
);
prof_tdata_t
*
prof_tdata_init
(
tsd_t
*
tsd
);
size_t
prof_bt_count
(
void
);
prof_tdata_t
*
prof_tdata_reinit
(
tsd_t
*
tsd
,
prof_tdata_t
*
tdata
);
#endif
typedef
int
(
prof_dump_open_t
)(
bool
,
const
char
*
);
void
prof_alloc_rollback
(
tsd_t
*
tsd
,
prof_tctx_t
*
tctx
);
extern
prof_dump_open_t
*
JET_MUTABLE
prof_dump_open
;
void
prof_malloc_sample_object
(
tsd_t
*
tsd
,
const
void
*
ptr
,
size_t
size
,
size_t
usize
,
prof_tctx_t
*
tctx
);
typedef
bool
(
prof_dump_header_t
)(
tsdn_t
*
,
bool
,
const
prof_cnt_t
*
);
void
prof_free_sampled_object
(
tsd_t
*
tsd
,
size_t
usize
,
prof_info_t
*
prof_info
);
extern
prof_dump_header_t
*
JET_MUTABLE
prof_dump_header
;
prof_tctx_t
*
prof_tctx_create
(
tsd_t
*
tsd
);
#ifdef JEMALLOC_JET
void
prof_cnt_all
(
uint64_t
*
curobjs
,
uint64_t
*
curbytes
,
uint64_t
*
accumobjs
,
uint64_t
*
accumbytes
);
#endif
bool
prof_accum_init
(
tsdn_t
*
tsdn
,
prof_accum_t
*
prof_accum
);
void
prof_idump
(
tsdn_t
*
tsdn
);
void
prof_idump
(
tsdn_t
*
tsdn
);
bool
prof_mdump
(
tsd_t
*
tsd
,
const
char
*
filename
);
bool
prof_mdump
(
tsd_t
*
tsd
,
const
char
*
filename
);
void
prof_gdump
(
tsdn_t
*
tsdn
);
void
prof_gdump
(
tsdn_t
*
tsdn
);
prof_tdata_t
*
prof_tdata_init
(
tsd_t
*
tsd
);
prof_tdata_t
*
prof_tdata_reinit
(
tsd_t
*
tsd
,
prof_tdata_t
*
tdata
);
void
prof_reset
(
tsd_t
*
tsd
,
size_t
lg_sample
);
void
prof_tdata_cleanup
(
tsd_t
*
tsd
);
void
prof_tdata_cleanup
(
tsd_t
*
tsd
);
bool
prof_active_get
(
tsdn_t
*
tsdn
);
bool
prof_active_get
(
tsdn_t
*
tsdn
);
bool
prof_active_set
(
tsdn_t
*
tsdn
,
bool
active
);
bool
prof_active_set
(
tsdn_t
*
tsdn
,
bool
active
);
...
@@ -84,22 +81,15 @@ bool prof_gdump_get(tsdn_t *tsdn);
...
@@ -84,22 +81,15 @@ bool prof_gdump_get(tsdn_t *tsdn);
bool
prof_gdump_set
(
tsdn_t
*
tsdn
,
bool
active
);
bool
prof_gdump_set
(
tsdn_t
*
tsdn
,
bool
active
);
void
prof_boot0
(
void
);
void
prof_boot0
(
void
);
void
prof_boot1
(
void
);
void
prof_boot1
(
void
);
bool
prof_boot2
(
tsd_t
*
tsd
);
bool
prof_boot2
(
tsd_t
*
tsd
,
base_t
*
base
);
void
prof_prefork0
(
tsdn_t
*
tsdn
);
void
prof_prefork0
(
tsdn_t
*
tsdn
);
void
prof_prefork1
(
tsdn_t
*
tsdn
);
void
prof_prefork1
(
tsdn_t
*
tsdn
);
void
prof_postfork_parent
(
tsdn_t
*
tsdn
);
void
prof_postfork_parent
(
tsdn_t
*
tsdn
);
void
prof_postfork_child
(
tsdn_t
*
tsdn
);
void
prof_postfork_child
(
tsdn_t
*
tsdn
);
void
prof_sample_threshold_update
(
prof_tdata_t
*
tdata
);
/* Only accessed by thread event. */
bool
prof_log_start
(
tsdn_t
*
tsdn
,
const
char
*
filename
);
uint64_t
prof_sample_new_event_wait
(
tsd_t
*
tsd
);
bool
prof_log_stop
(
tsdn_t
*
tsdn
);
uint64_t
prof_sample_postponed_event_wait
(
tsd_t
*
tsd
);
#ifdef JEMALLOC_JET
void
prof_sample_event_handler
(
tsd_t
*
tsd
,
uint64_t
elapsed
);
size_t
prof_log_bt_count
(
void
);
size_t
prof_log_alloc_count
(
void
);
size_t
prof_log_thr_count
(
void
);
bool
prof_log_is_logging
(
void
);
bool
prof_log_rep_check
(
void
);
void
prof_log_dummy_set
(
bool
new_value
);
#endif
#endif
/* JEMALLOC_INTERNAL_PROF_EXTERNS_H */
#endif
/* JEMALLOC_INTERNAL_PROF_EXTERNS_H */
deps/jemalloc/include/jemalloc/internal/prof_hook.h
0 → 100644
View file @
b8beda3c
#ifndef JEMALLOC_INTERNAL_PROF_HOOK_H
#define JEMALLOC_INTERNAL_PROF_HOOK_H
/*
* The hooks types of which are declared in this file are experimental and
* undocumented, thus the typedefs are located in an 'internal' header.
*/
/*
* A hook to mock out backtrace functionality. This can be handy, since it's
* otherwise difficult to guarantee that two allocations are reported as coming
* from the exact same stack trace in the presence of an optimizing compiler.
*/
typedef
void
(
*
prof_backtrace_hook_t
)(
void
**
,
unsigned
*
,
unsigned
);
/*
* A callback hook that notifies about recently dumped heap profile.
*/
typedef
void
(
*
prof_dump_hook_t
)(
const
char
*
filename
);
#endif
/* JEMALLOC_INTERNAL_PROF_HOOK_H */
deps/jemalloc/include/jemalloc/internal/prof_inlines
_b
.h
→
deps/jemalloc/include/jemalloc/internal/prof_inlines.h
View file @
b8beda3c
#ifndef JEMALLOC_INTERNAL_PROF_INLINES_
B_
H
#ifndef JEMALLOC_INTERNAL_PROF_INLINES_H
#define JEMALLOC_INTERNAL_PROF_INLINES_
B_
H
#define JEMALLOC_INTERNAL_PROF_INLINES_H
#include "jemalloc/internal/safety_check.h"
#include "jemalloc/internal/safety_check.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/thread_event.h"
JEMALLOC_ALWAYS_INLINE
void
prof_active_assert
()
{
cassert
(
config_prof
);
/*
* If opt_prof is off, then prof_active must always be off, regardless
* of whether prof_active_mtx is in effect or not.
*/
assert
(
opt_prof
||
!
prof_active_state
);
}
JEMALLOC_ALWAYS_INLINE
bool
prof_active_get_unlocked
(
void
)
{
prof_active_assert
();
/*
* Even if opt_prof is true, sampling can be temporarily disabled by
* setting prof_active to false. No locking is used when reading
* prof_active in the fast path, so there are no guarantees regarding
* how long it will take for all threads to notice state changes.
*/
return
prof_active_state
;
}
JEMALLOC_ALWAYS_INLINE
bool
JEMALLOC_ALWAYS_INLINE
bool
prof_gdump_get_unlocked
(
void
)
{
prof_gdump_get_unlocked
(
void
)
{
...
@@ -22,6 +45,7 @@ prof_tdata_get(tsd_t *tsd, bool create) {
...
@@ -22,6 +45,7 @@ prof_tdata_get(tsd_t *tsd, bool create) {
tdata
=
tsd_prof_tdata_get
(
tsd
);
tdata
=
tsd_prof_tdata_get
(
tsd
);
if
(
create
)
{
if
(
create
)
{
assert
(
tsd_reentrancy_level_get
(
tsd
)
==
0
);
if
(
unlikely
(
tdata
==
NULL
))
{
if
(
unlikely
(
tdata
==
NULL
))
{
if
(
tsd_nominal
(
tsd
))
{
if
(
tsd_nominal
(
tsd
))
{
tdata
=
prof_tdata_init
(
tsd
);
tdata
=
prof_tdata_init
(
tsd
);
...
@@ -37,158 +61,115 @@ prof_tdata_get(tsd_t *tsd, bool create) {
...
@@ -37,158 +61,115 @@ prof_tdata_get(tsd_t *tsd, bool create) {
return
tdata
;
return
tdata
;
}
}
JEMALLOC_ALWAYS_INLINE
prof_tctx_t
*
JEMALLOC_ALWAYS_INLINE
void
prof_tctx_get
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
alloc_ctx_t
*
alloc_ctx
)
{
prof_info_get
(
tsd_t
*
tsd
,
const
void
*
ptr
,
emap_alloc_ctx_t
*
alloc_ctx
,
prof_info_t
*
prof_info
)
{
cassert
(
config_prof
);
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
assert
(
ptr
!=
NULL
);
assert
(
prof_info
!=
NULL
);
return
arena_prof_
tctx
_get
(
tsd
n
,
ptr
,
alloc_ctx
);
arena_prof_
info
_get
(
tsd
,
ptr
,
alloc_ctx
,
prof_info
,
false
);
}
}
JEMALLOC_ALWAYS_INLINE
void
JEMALLOC_ALWAYS_INLINE
void
prof_
tctx_se
t
(
tsd
n
_t
*
tsd
n
,
const
void
*
ptr
,
size_t
usize
,
prof_
info_get_and_reset_recen
t
(
tsd_t
*
tsd
,
const
void
*
ptr
,
alloc_ctx_t
*
alloc_ctx
,
prof_
tctx_t
*
tctx
)
{
emap_
alloc_ctx_t
*
alloc_ctx
,
prof_
info_t
*
prof_info
)
{
cassert
(
config_prof
);
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
assert
(
ptr
!=
NULL
);
assert
(
prof_info
!=
NULL
);
arena_prof_
tctx_s
et
(
tsd
n
,
ptr
,
usize
,
alloc_ctx
,
tctx
);
arena_prof_
info_g
et
(
tsd
,
ptr
,
alloc_ctx
,
prof_info
,
true
);
}
}
JEMALLOC_ALWAYS_INLINE
void
JEMALLOC_ALWAYS_INLINE
void
prof_tctx_reset
(
tsd
n
_t
*
tsd
n
,
const
void
*
ptr
,
prof_t
ctx_t
*
t
ctx
)
{
prof_tctx_reset
(
tsd_t
*
tsd
,
const
void
*
ptr
,
emap_alloc_
ctx_t
*
alloc_
ctx
)
{
cassert
(
config_prof
);
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
assert
(
ptr
!=
NULL
);
arena_prof_tctx_reset
(
tsd
n
,
ptr
,
t
ctx
);
arena_prof_tctx_reset
(
tsd
,
ptr
,
alloc_
ctx
);
}
}
JEMALLOC_ALWAYS_INLINE
nstime_t
JEMALLOC_ALWAYS_INLINE
void
prof_
alloc_time_get
(
tsd
n
_t
*
tsd
n
,
const
void
*
ptr
,
alloc_ctx_t
*
alloc_ctx
)
{
prof_
tctx_reset_sampled
(
tsd_t
*
tsd
,
const
void
*
ptr
)
{
cassert
(
config_prof
);
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
assert
(
ptr
!=
NULL
);
return
arena_prof_
alloc_time_get
(
tsd
n
,
ptr
,
alloc_ctx
);
arena_prof_
tctx_reset_sampled
(
tsd
,
ptr
);
}
}
JEMALLOC_ALWAYS_INLINE
void
JEMALLOC_ALWAYS_INLINE
void
prof_alloc_time_set
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
alloc_ctx_t
*
alloc_ctx
,
prof_info_set
(
tsd_t
*
tsd
,
edata_t
*
edata
,
prof_tctx_t
*
tctx
,
size_t
size
)
{
nstime_t
t
)
{
cassert
(
config_prof
);
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
assert
(
edata
!=
NULL
);
assert
((
uintptr_t
)
tctx
>
(
uintptr_t
)
1U
);
arena_prof_
alloc_time
_set
(
tsd
n
,
ptr
,
alloc_
ctx
,
t
);
arena_prof_
info
_set
(
tsd
,
edata
,
t
ctx
,
size
);
}
}
JEMALLOC_ALWAYS_INLINE
bool
JEMALLOC_ALWAYS_INLINE
bool
prof_sample_check
(
tsd_t
*
tsd
,
size_t
usize
,
bool
update
)
{
prof_sample_should_skip
(
tsd_t
*
tsd
,
bool
sample_event
)
{
ssize_t
check
=
update
?
0
:
usize
;
int64_t
bytes_until_sample
=
tsd_bytes_until_sample_get
(
tsd
);
if
(
update
)
{
bytes_until_sample
-=
usize
;
if
(
tsd_nominal
(
tsd
))
{
tsd_bytes_until_sample_set
(
tsd
,
bytes_until_sample
);
}
}
if
(
likely
(
bytes_until_sample
>=
check
))
{
return
true
;
}
return
false
;
}
JEMALLOC_ALWAYS_INLINE
bool
prof_sample_accum_update
(
tsd_t
*
tsd
,
size_t
usize
,
bool
update
,
prof_tdata_t
**
tdata_out
)
{
prof_tdata_t
*
tdata
;
cassert
(
config_prof
);
cassert
(
config_prof
);
/* Fastpath: no need to load tdata */
/* Fastpath: no need to load tdata */
if
(
likely
(
prof_sample_check
(
tsd
,
usize
,
update
)))
{
if
(
likely
(
!
sample_event
))
{
return
true
;
}
bool
booted
=
tsd_prof_tdata_get
(
tsd
);
tdata
=
prof_tdata_get
(
tsd
,
true
);
if
(
unlikely
((
uintptr_t
)
tdata
<=
(
uintptr_t
)
PROF_TDATA_STATE_MAX
))
{
tdata
=
NULL
;
}
if
(
tdata_out
!=
NULL
)
{
*
tdata_out
=
tdata
;
}
if
(
unlikely
(
tdata
==
NULL
))
{
return
true
;
return
true
;
}
}
/*
/*
*
If th
is was
the first creation of tdata, then
*
sample_event
is
al
wa
y
s
obtained from the thread event module, and
*
prof_tdata_get() reset bytes_until_sample, so decrement and
*
whenever it's true, it means that the thread event module has
*
check it again
*
already checked the reentrancy level.
*/
*/
if
(
!
booted
&&
prof_sample_check
(
tsd
,
usize
,
update
))
{
assert
(
tsd_reentrancy_level_get
(
tsd
)
==
0
);
return
true
;
}
if
(
tsd_reentrancy_level_get
(
tsd
)
>
0
)
{
prof_tdata_t
*
tdata
=
prof_tdata_get
(
tsd
,
true
);
if
(
unlikely
(
tdata
==
NULL
))
{
return
true
;
return
true
;
}
}
/* Compute new sample threshold. */
if
(
update
)
{
prof_sample_threshold_update
(
tdata
);
}
return
!
tdata
->
active
;
return
!
tdata
->
active
;
}
}
JEMALLOC_ALWAYS_INLINE
prof_tctx_t
*
JEMALLOC_ALWAYS_INLINE
prof_tctx_t
*
prof_alloc_prep
(
tsd_t
*
tsd
,
size_t
usize
,
bool
prof_active
,
bool
update
)
{
prof_alloc_prep
(
tsd_t
*
tsd
,
bool
prof_active
,
bool
sample_event
)
{
prof_tctx_t
*
ret
;
prof_tctx_t
*
ret
;
prof_tdata_t
*
tdata
;
prof_bt_t
bt
;
assert
(
usize
==
sz_s2u
(
usize
));
if
(
!
prof_active
||
likely
(
prof_sample_accum_update
(
tsd
,
usize
,
update
,
if
(
!
prof_active
||
&
tdata
)))
{
likely
(
prof_sample_should_skip
(
tsd
,
sample_event
)))
{
ret
=
(
prof_tctx_t
*
)(
uintptr_t
)
1U
;
ret
=
(
prof_tctx_t
*
)(
uintptr_t
)
1U
;
}
else
{
}
else
{
bt_init
(
&
bt
,
tdata
->
vec
);
ret
=
prof_tctx_create
(
tsd
);
prof_backtrace
(
&
bt
);
ret
=
prof_lookup
(
tsd
,
&
bt
);
}
}
return
ret
;
return
ret
;
}
}
JEMALLOC_ALWAYS_INLINE
void
JEMALLOC_ALWAYS_INLINE
void
prof_malloc
(
tsd
n
_t
*
tsd
n
,
const
void
*
ptr
,
size_t
u
size
,
alloc_ctx_t
*
alloc_ctx
,
prof_malloc
(
tsd_t
*
tsd
,
const
void
*
ptr
,
size_t
size
,
size_t
usize
,
prof_tctx_t
*
tctx
)
{
emap_alloc_ctx_t
*
alloc_ctx
,
prof_tctx_t
*
tctx
)
{
cassert
(
config_prof
);
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
assert
(
ptr
!=
NULL
);
assert
(
usize
==
isalloc
(
tsd
n
,
ptr
));
assert
(
usize
==
isalloc
(
tsd
_tsdn
(
tsd
)
,
ptr
));
if
(
unlikely
((
uintptr_t
)
tctx
>
(
uintptr_t
)
1U
))
{
if
(
unlikely
((
uintptr_t
)
tctx
>
(
uintptr_t
)
1U
))
{
prof_malloc_sample_object
(
tsd
n
,
ptr
,
usize
,
tctx
);
prof_malloc_sample_object
(
tsd
,
ptr
,
size
,
usize
,
tctx
);
}
else
{
}
else
{
prof_tctx_set
(
tsdn
,
ptr
,
usize
,
alloc_ctx
,
prof_tctx_reset
(
tsd
,
ptr
,
alloc_ctx
);
(
prof_tctx_t
*
)(
uintptr_t
)
1U
);
}
}
}
}
JEMALLOC_ALWAYS_INLINE
void
JEMALLOC_ALWAYS_INLINE
void
prof_realloc
(
tsd_t
*
tsd
,
const
void
*
ptr
,
size_t
u
size
,
prof_tctx_t
*
tctx
,
prof_realloc
(
tsd_t
*
tsd
,
const
void
*
ptr
,
size_t
size
,
size_t
usize
,
bool
prof_active
,
bool
updated
,
const
void
*
old_ptr
,
size_t
old_usize
,
prof_tctx_t
*
tctx
,
bool
prof_active
,
const
void
*
old_ptr
,
size_t
old_usize
,
prof_
tctx
_t
*
old_
tctx
)
{
prof_
info
_t
*
old_
prof_info
,
bool
sample_event
)
{
bool
sampled
,
old_sampled
,
moved
;
bool
sampled
,
old_sampled
,
moved
;
cassert
(
config_prof
);
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
||
(
uintptr_t
)
tctx
<=
(
uintptr_t
)
1U
);
assert
(
ptr
!=
NULL
||
(
uintptr_t
)
tctx
<=
(
uintptr_t
)
1U
);
if
(
prof_active
&&
!
updated
&&
ptr
!=
NULL
)
{
if
(
prof_active
&&
ptr
!=
NULL
)
{
assert
(
usize
==
isalloc
(
tsd_tsdn
(
tsd
),
ptr
));
assert
(
usize
==
isalloc
(
tsd_tsdn
(
tsd
),
ptr
));
if
(
prof_sample_
accum_update
(
tsd
,
usize
,
true
,
NULL
))
{
if
(
prof_sample_
should_skip
(
tsd
,
sample_event
))
{
/*
/*
* Don't sample. The usize passed to prof_alloc_prep()
* Don't sample. The usize passed to prof_alloc_prep()
* was larger than what actually got allocated, so a
* was larger than what actually got allocated, so a
...
@@ -196,31 +177,31 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
...
@@ -196,31 +177,31 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
* though its actual usize was insufficient to cross the
* though its actual usize was insufficient to cross the
* sample threshold.
* sample threshold.
*/
*/
prof_alloc_rollback
(
tsd
,
tctx
,
true
);
prof_alloc_rollback
(
tsd
,
tctx
);
tctx
=
(
prof_tctx_t
*
)(
uintptr_t
)
1U
;
tctx
=
(
prof_tctx_t
*
)(
uintptr_t
)
1U
;
}
}
}
}
sampled
=
((
uintptr_t
)
tctx
>
(
uintptr_t
)
1U
);
sampled
=
((
uintptr_t
)
tctx
>
(
uintptr_t
)
1U
);
old_sampled
=
((
uintptr_t
)
old_tctx
>
(
uintptr_t
)
1U
);
old_sampled
=
((
uintptr_t
)
old_
prof_info
->
alloc_
tctx
>
(
uintptr_t
)
1U
);
moved
=
(
ptr
!=
old_ptr
);
moved
=
(
ptr
!=
old_ptr
);
if
(
unlikely
(
sampled
))
{
if
(
unlikely
(
sampled
))
{
prof_malloc_sample_object
(
tsd
_tsdn
(
tsd
),
ptr
,
usize
,
tctx
);
prof_malloc_sample_object
(
tsd
,
ptr
,
size
,
usize
,
tctx
);
}
else
if
(
moved
)
{
}
else
if
(
moved
)
{
prof_tctx_set
(
tsd_tsdn
(
tsd
),
ptr
,
usize
,
NULL
,
prof_tctx_reset
(
tsd
,
ptr
,
NULL
);
(
prof_tctx_t
*
)(
uintptr_t
)
1U
);
}
else
if
(
unlikely
(
old_sampled
))
{
}
else
if
(
unlikely
(
old_sampled
))
{
/*
/*
* prof_tctx_set() would work for the !moved case as well,
but
* prof_tctx_
re
set() would work for the !moved case as well,
* prof_tctx_reset() is slightly cheaper, and the
proper thing
*
but
prof_tctx_reset
_sampled
() is slightly cheaper, and the
* to do here in the presence of explicit
knowledge re: moved
*
proper thing
to do here in the presence of explicit
* state.
*
knowledge re: moved
state.
*/
*/
prof_tctx_reset
(
tsd_tsdn
(
tsd
)
,
ptr
,
tctx
);
prof_tctx_reset
_sampled
(
tsd
,
ptr
);
}
else
{
}
else
{
assert
((
uintptr_t
)
prof_tctx_get
(
tsd_tsdn
(
tsd
),
ptr
,
NULL
)
==
prof_info_t
prof_info
;
(
uintptr_t
)
1U
);
prof_info_get
(
tsd
,
ptr
,
NULL
,
&
prof_info
);
assert
((
uintptr_t
)
prof_info
.
alloc_tctx
==
(
uintptr_t
)
1U
);
}
}
/*
/*
...
@@ -231,20 +212,50 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
...
@@ -231,20 +212,50 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
* counters.
* counters.
*/
*/
if
(
unlikely
(
old_sampled
))
{
if
(
unlikely
(
old_sampled
))
{
prof_free_sampled_object
(
tsd
,
ptr
,
old_usize
,
old_
tctx
);
prof_free_sampled_object
(
tsd
,
old_usize
,
old_
prof_info
);
}
}
}
}
JEMALLOC_ALWAYS_INLINE
size_t
prof_sample_align
(
size_t
orig_align
)
{
/*
* Enforce page alignment, so that sampled allocations can be identified
* w/o metadata lookup.
*/
assert
(
opt_prof
);
return
(
opt_cache_oblivious
&&
orig_align
<
PAGE
)
?
PAGE
:
orig_align
;
}
JEMALLOC_ALWAYS_INLINE
bool
prof_sample_aligned
(
const
void
*
ptr
)
{
return
((
uintptr_t
)
ptr
&
PAGE_MASK
)
==
0
;
}
JEMALLOC_ALWAYS_INLINE
bool
prof_sampled
(
tsd_t
*
tsd
,
const
void
*
ptr
)
{
prof_info_t
prof_info
;
prof_info_get
(
tsd
,
ptr
,
NULL
,
&
prof_info
);
bool
sampled
=
(
uintptr_t
)
prof_info
.
alloc_tctx
>
(
uintptr_t
)
1U
;
if
(
sampled
)
{
assert
(
prof_sample_aligned
(
ptr
));
}
return
sampled
;
}
JEMALLOC_ALWAYS_INLINE
void
JEMALLOC_ALWAYS_INLINE
void
prof_free
(
tsd_t
*
tsd
,
const
void
*
ptr
,
size_t
usize
,
alloc_ctx_t
*
alloc_ctx
)
{
prof_free
(
tsd_t
*
tsd
,
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
=
prof_tctx_get
(
tsd_tsdn
(
tsd
),
ptr
,
alloc_ctx
);
emap_alloc_ctx_t
*
alloc_ctx
)
{
prof_info_t
prof_info
;
prof_info_get_and_reset_recent
(
tsd
,
ptr
,
alloc_ctx
,
&
prof_info
);
cassert
(
config_prof
);
cassert
(
config_prof
);
assert
(
usize
==
isalloc
(
tsd_tsdn
(
tsd
),
ptr
));
assert
(
usize
==
isalloc
(
tsd_tsdn
(
tsd
),
ptr
));
if
(
unlikely
((
uintptr_t
)
tctx
>
(
uintptr_t
)
1U
))
{
if
(
unlikely
((
uintptr_t
)
prof_info
.
alloc_tctx
>
(
uintptr_t
)
1U
))
{
prof_free_sampled_object
(
tsd
,
ptr
,
usize
,
tctx
);
assert
(
prof_sample_aligned
(
ptr
));
prof_free_sampled_object
(
tsd
,
usize
,
&
prof_info
);
}
}
}
}
#endif
/* JEMALLOC_INTERNAL_PROF_INLINES_
B_
H */
#endif
/* JEMALLOC_INTERNAL_PROF_INLINES_H */
deps/jemalloc/include/jemalloc/internal/prof_inlines_a.h
deleted
100644 → 0
View file @
d659c734
#ifndef JEMALLOC_INTERNAL_PROF_INLINES_A_H
#define JEMALLOC_INTERNAL_PROF_INLINES_A_H
#include "jemalloc/internal/mutex.h"
static
inline
bool
prof_accum_add
(
tsdn_t
*
tsdn
,
prof_accum_t
*
prof_accum
,
uint64_t
accumbytes
)
{
cassert
(
config_prof
);
bool
overflow
;
uint64_t
a0
,
a1
;
/*
* If the application allocates fast enough (and/or if idump is slow
* enough), extreme overflow here (a1 >= prof_interval * 2) can cause
* idump trigger coalescing. This is an intentional mechanism that
* avoids rate-limiting allocation.
*/
#ifdef JEMALLOC_ATOMIC_U64
a0
=
atomic_load_u64
(
&
prof_accum
->
accumbytes
,
ATOMIC_RELAXED
);
do
{
a1
=
a0
+
accumbytes
;
assert
(
a1
>=
a0
);
overflow
=
(
a1
>=
prof_interval
);
if
(
overflow
)
{
a1
%=
prof_interval
;
}
}
while
(
!
atomic_compare_exchange_weak_u64
(
&
prof_accum
->
accumbytes
,
&
a0
,
a1
,
ATOMIC_RELAXED
,
ATOMIC_RELAXED
));
#else
malloc_mutex_lock
(
tsdn
,
&
prof_accum
->
mtx
);
a0
=
prof_accum
->
accumbytes
;
a1
=
a0
+
accumbytes
;
overflow
=
(
a1
>=
prof_interval
);
if
(
overflow
)
{
a1
%=
prof_interval
;
}
prof_accum
->
accumbytes
=
a1
;
malloc_mutex_unlock
(
tsdn
,
&
prof_accum
->
mtx
);
#endif
return
overflow
;
}
static
inline
void
prof_accum_cancel
(
tsdn_t
*
tsdn
,
prof_accum_t
*
prof_accum
,
size_t
usize
)
{
cassert
(
config_prof
);
/*
* Cancel out as much of the excessive prof_accumbytes increase as
* possible without underflowing. Interval-triggered dumps occur
* slightly more often than intended as a result of incomplete
* canceling.
*/
uint64_t
a0
,
a1
;
#ifdef JEMALLOC_ATOMIC_U64
a0
=
atomic_load_u64
(
&
prof_accum
->
accumbytes
,
ATOMIC_RELAXED
);
do
{
a1
=
(
a0
>=
SC_LARGE_MINCLASS
-
usize
)
?
a0
-
(
SC_LARGE_MINCLASS
-
usize
)
:
0
;
}
while
(
!
atomic_compare_exchange_weak_u64
(
&
prof_accum
->
accumbytes
,
&
a0
,
a1
,
ATOMIC_RELAXED
,
ATOMIC_RELAXED
));
#else
malloc_mutex_lock
(
tsdn
,
&
prof_accum
->
mtx
);
a0
=
prof_accum
->
accumbytes
;
a1
=
(
a0
>=
SC_LARGE_MINCLASS
-
usize
)
?
a0
-
(
SC_LARGE_MINCLASS
-
usize
)
:
0
;
prof_accum
->
accumbytes
=
a1
;
malloc_mutex_unlock
(
tsdn
,
&
prof_accum
->
mtx
);
#endif
}
JEMALLOC_ALWAYS_INLINE
bool
prof_active_get_unlocked
(
void
)
{
/*
* Even if opt_prof is true, sampling can be temporarily disabled by
* setting prof_active to false. No locking is used when reading
* prof_active in the fast path, so there are no guarantees regarding
* how long it will take for all threads to notice state changes.
*/
return
prof_active
;
}
#endif
/* JEMALLOC_INTERNAL_PROF_INLINES_A_H */
deps/jemalloc/include/jemalloc/internal/prof_log.h
0 → 100644
View file @
b8beda3c
#ifndef JEMALLOC_INTERNAL_PROF_LOG_H
#define JEMALLOC_INTERNAL_PROF_LOG_H
#include "jemalloc/internal/mutex.h"
extern
malloc_mutex_t
log_mtx
;
void
prof_try_log
(
tsd_t
*
tsd
,
size_t
usize
,
prof_info_t
*
prof_info
);
bool
prof_log_init
(
tsd_t
*
tsdn
);
/* Used in unit tests. */
size_t
prof_log_bt_count
(
void
);
size_t
prof_log_alloc_count
(
void
);
size_t
prof_log_thr_count
(
void
);
bool
prof_log_is_logging
(
void
);
bool
prof_log_rep_check
(
void
);
void
prof_log_dummy_set
(
bool
new_value
);
bool
prof_log_start
(
tsdn_t
*
tsdn
,
const
char
*
filename
);
bool
prof_log_stop
(
tsdn_t
*
tsdn
);
#endif
/* JEMALLOC_INTERNAL_PROF_LOG_H */
deps/jemalloc/include/jemalloc/internal/prof_recent.h
0 → 100644
View file @
b8beda3c
#ifndef JEMALLOC_INTERNAL_PROF_RECENT_H
#define JEMALLOC_INTERNAL_PROF_RECENT_H
extern
malloc_mutex_t
prof_recent_alloc_mtx
;
extern
malloc_mutex_t
prof_recent_dump_mtx
;
bool
prof_recent_alloc_prepare
(
tsd_t
*
tsd
,
prof_tctx_t
*
tctx
);
void
prof_recent_alloc
(
tsd_t
*
tsd
,
edata_t
*
edata
,
size_t
size
,
size_t
usize
);
void
prof_recent_alloc_reset
(
tsd_t
*
tsd
,
edata_t
*
edata
);
bool
prof_recent_init
();
void
edata_prof_recent_alloc_init
(
edata_t
*
edata
);
/* Used in unit tests. */
typedef
ql_head
(
prof_recent_t
)
prof_recent_list_t
;
extern
prof_recent_list_t
prof_recent_alloc_list
;
edata_t
*
prof_recent_alloc_edata_get_no_lock_test
(
const
prof_recent_t
*
node
);
prof_recent_t
*
edata_prof_recent_alloc_get_no_lock_test
(
const
edata_t
*
edata
);
ssize_t
prof_recent_alloc_max_ctl_read
();
ssize_t
prof_recent_alloc_max_ctl_write
(
tsd_t
*
tsd
,
ssize_t
max
);
void
prof_recent_alloc_dump
(
tsd_t
*
tsd
,
write_cb_t
*
write_cb
,
void
*
cbopaque
);
#endif
/* JEMALLOC_INTERNAL_PROF_RECENT_H */
deps/jemalloc/include/jemalloc/internal/prof_stats.h
0 → 100644
View file @
b8beda3c
#ifndef JEMALLOC_INTERNAL_PROF_STATS_H
#define JEMALLOC_INTERNAL_PROF_STATS_H
typedef
struct
prof_stats_s
prof_stats_t
;
struct
prof_stats_s
{
uint64_t
req_sum
;
uint64_t
count
;
};
extern
malloc_mutex_t
prof_stats_mtx
;
void
prof_stats_inc
(
tsd_t
*
tsd
,
szind_t
ind
,
size_t
size
);
void
prof_stats_dec
(
tsd_t
*
tsd
,
szind_t
ind
,
size_t
size
);
void
prof_stats_get_live
(
tsd_t
*
tsd
,
szind_t
ind
,
prof_stats_t
*
stats
);
void
prof_stats_get_accum
(
tsd_t
*
tsd
,
szind_t
ind
,
prof_stats_t
*
stats
);
#endif
/* JEMALLOC_INTERNAL_PROF_STATS_H */
deps/jemalloc/include/jemalloc/internal/prof_structs.h
View file @
b8beda3c
...
@@ -2,6 +2,7 @@
...
@@ -2,6 +2,7 @@
#define JEMALLOC_INTERNAL_PROF_STRUCTS_H
#define JEMALLOC_INTERNAL_PROF_STRUCTS_H
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/edata.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/rb.h"
#include "jemalloc/internal/rb.h"
...
@@ -15,26 +16,22 @@ struct prof_bt_s {
...
@@ -15,26 +16,22 @@ struct prof_bt_s {
#ifdef JEMALLOC_PROF_LIBGCC
#ifdef JEMALLOC_PROF_LIBGCC
/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
typedef
struct
{
typedef
struct
{
prof_bt_t
*
bt
;
void
**
vec
;
unsigned
*
len
;
unsigned
max
;
unsigned
max
;
}
prof_unwind_data_t
;
}
prof_unwind_data_t
;
#endif
#endif
struct
prof_accum_s
{
#ifndef JEMALLOC_ATOMIC_U64
malloc_mutex_t
mtx
;
uint64_t
accumbytes
;
#else
atomic_u64_t
accumbytes
;
#endif
};
struct
prof_cnt_s
{
struct
prof_cnt_s
{
/* Profiling counters. */
/* Profiling counters. */
uint64_t
curobjs
;
uint64_t
curobjs
;
uint64_t
curobjs_shifted_unbiased
;
uint64_t
curbytes
;
uint64_t
curbytes
;
uint64_t
curbytes_unbiased
;
uint64_t
accumobjs
;
uint64_t
accumobjs
;
uint64_t
accumobjs_shifted_unbiased
;
uint64_t
accumbytes
;
uint64_t
accumbytes
;
uint64_t
accumbytes_unbiased
;
};
};
typedef
enum
{
typedef
enum
{
...
@@ -55,6 +52,12 @@ struct prof_tctx_s {
...
@@ -55,6 +52,12 @@ struct prof_tctx_s {
uint64_t
thr_uid
;
uint64_t
thr_uid
;
uint64_t
thr_discrim
;
uint64_t
thr_discrim
;
/*
* Reference count of how many times this tctx object is referenced in
* recent allocation / deallocation records, protected by tdata->lock.
*/
uint64_t
recent_count
;
/* Profiling counters, protected by tdata->lock. */
/* Profiling counters, protected by tdata->lock. */
prof_cnt_t
cnts
;
prof_cnt_t
cnts
;
...
@@ -96,6 +99,15 @@ struct prof_tctx_s {
...
@@ -96,6 +99,15 @@ struct prof_tctx_s {
};
};
typedef
rb_tree
(
prof_tctx_t
)
prof_tctx_tree_t
;
typedef
rb_tree
(
prof_tctx_t
)
prof_tctx_tree_t
;
struct
prof_info_s
{
/* Time when the allocation was made. */
nstime_t
alloc_time
;
/* Points to the prof_tctx_t corresponding to the allocation. */
prof_tctx_t
*
alloc_tctx
;
/* Allocation request size. */
size_t
alloc_size
;
};
struct
prof_gctx_s
{
struct
prof_gctx_s
{
/* Protects nlimbo, cnt_summed, and tctxs. */
/* Protects nlimbo, cnt_summed, and tctxs. */
malloc_mutex_t
*
lock
;
malloc_mutex_t
*
lock
;
...
@@ -167,9 +179,6 @@ struct prof_tdata_s {
...
@@ -167,9 +179,6 @@ struct prof_tdata_s {
*/
*/
ckh_t
bt2tctx
;
ckh_t
bt2tctx
;
/* Sampling state. */
uint64_t
prng_state
;
/* State used to avoid dumping while operating on prof internals. */
/* State used to avoid dumping while operating on prof internals. */
bool
enq
;
bool
enq
;
bool
enq_idump
;
bool
enq_idump
;
...
@@ -197,4 +206,16 @@ struct prof_tdata_s {
...
@@ -197,4 +206,16 @@ struct prof_tdata_s {
};
};
typedef
rb_tree
(
prof_tdata_t
)
prof_tdata_tree_t
;
typedef
rb_tree
(
prof_tdata_t
)
prof_tdata_tree_t
;
struct
prof_recent_s
{
nstime_t
alloc_time
;
nstime_t
dalloc_time
;
ql_elm
(
prof_recent_t
)
link
;
size_t
size
;
size_t
usize
;
atomic_p_t
alloc_edata
;
/* NULL means allocation has been freed. */
prof_tctx_t
*
alloc_tctx
;
prof_tctx_t
*
dalloc_tctx
;
};
#endif
/* JEMALLOC_INTERNAL_PROF_STRUCTS_H */
#endif
/* JEMALLOC_INTERNAL_PROF_STRUCTS_H */
Prev
1
2
3
4
5
6
7
8
9
10
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment