Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
71a8df6a
Commit
71a8df6a
authored
Mar 02, 2017
by
Guy Benoish
Browse files
Merge branch 'unstable' of
https://github.com/antirez/redis
into unstable
parents
56c01c95
9cc83d2a
Changes
182
Hide whitespace changes
Inline
Side-by-side
deps/jemalloc/include/jemalloc/internal/arena.h
View file @
71a8df6a
...
...
@@ -23,14 +23,29 @@
*/
#define LG_DIRTY_MULT_DEFAULT 3
typedef
enum
{
purge_mode_ratio
=
0
,
purge_mode_decay
=
1
,
purge_mode_limit
=
2
}
purge_mode_t
;
#define PURGE_DEFAULT purge_mode_ratio
/* Default decay time in seconds. */
#define DECAY_TIME_DEFAULT 10
/* Number of event ticks between time checks. */
#define DECAY_NTICKS_PER_UPDATE 1000
typedef
struct
arena_runs_dirty_link_s
arena_runs_dirty_link_t
;
typedef
struct
arena_avail_links_s
arena_avail_links_t
;
typedef
struct
arena_run_s
arena_run_t
;
typedef
struct
arena_chunk_map_bits_s
arena_chunk_map_bits_t
;
typedef
struct
arena_chunk_map_misc_s
arena_chunk_map_misc_t
;
typedef
struct
arena_chunk_s
arena_chunk_t
;
typedef
struct
arena_bin_info_s
arena_bin_info_t
;
typedef
struct
arena_decay_s
arena_decay_t
;
typedef
struct
arena_bin_s
arena_bin_t
;
typedef
struct
arena_s
arena_t
;
typedef
struct
arena_tdata_s
arena_tdata_t
;
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
...
...
@@ -140,13 +155,13 @@ struct arena_runs_dirty_link_s {
*/
struct
arena_chunk_map_misc_s
{
/*
* Linkage for run
tree
s. There are two disjoint uses:
* Linkage for run
heap
s. There are two disjoint uses:
*
* 1) arena_t's runs_avail
tree
.
* 1) arena_t's runs_avail
heaps
.
* 2) arena_run_t conceptually uses this linkage for in-use non-full
* runs, rather than directly embedding linkage.
*/
rb_node
(
arena_chunk_map_misc_t
)
rb
_link
;
phn
(
arena_chunk_map_misc_t
)
ph
_link
;
union
{
/* Linkage for list of dirty runs. */
...
...
@@ -154,16 +169,15 @@ struct arena_chunk_map_misc_s {
/* Profile counters, used for large object runs. */
union
{
void
*
prof_tctx_pun
;
prof_tctx_t
*
prof_tctx
;
void
*
prof_tctx_pun
;
prof_tctx_t
*
prof_tctx
;
};
/* Small region run metadata. */
arena_run_t
run
;
};
};
typedef
rb_tree
(
arena_chunk_map_misc_t
)
arena_avail_tree_t
;
typedef
rb_tree
(
arena_chunk_map_misc_t
)
arena_run_tree_t
;
typedef
ph
(
arena_chunk_map_misc_t
)
arena_run_heap_t
;
#endif
/* JEMALLOC_ARENA_STRUCTS_A */
#ifdef JEMALLOC_ARENA_STRUCTS_B
...
...
@@ -176,6 +190,14 @@ struct arena_chunk_s {
*/
extent_node_t
node
;
/*
* True if memory could be backed by transparent huge pages. This is
* only directly relevant to Linux, since it is the only supported
* platform on which jemalloc interacts with explicit transparent huge
* page controls.
*/
bool
hugepage
;
/*
* Map of pages within chunk that keeps track of free/large/small. The
* first map_bias entries are omitted, since the chunk header does not
...
...
@@ -220,28 +242,71 @@ struct arena_chunk_s {
*/
struct
arena_bin_info_s
{
/* Size of regions in a run for this bin's size class. */
size_t
reg_size
;
size_t
reg_size
;
/* Redzone size. */
size_t
redzone_size
;
size_t
redzone_size
;
/* Interval between regions (reg_size + (redzone_size << 1)). */
size_t
reg_interval
;
size_t
reg_interval
;
/* Total size of a run for this bin's size class. */
size_t
run_size
;
size_t
run_size
;
/* Total number of regions in a run for this bin's size class. */
uint32_t
nregs
;
uint32_t
nregs
;
/*
* Metadata used to manipulate bitmaps for runs associated with this
* bin.
*/
bitmap_info_t
bitmap_info
;
bitmap_info_t
bitmap_info
;
/* Offset of first region in a run for this bin's size class. */
uint32_t
reg0_offset
;
uint32_t
reg0_offset
;
};
struct
arena_decay_s
{
/*
* Approximate time in seconds from the creation of a set of unused
* dirty pages until an equivalent set of unused dirty pages is purged
* and/or reused.
*/
ssize_t
time
;
/* time / SMOOTHSTEP_NSTEPS. */
nstime_t
interval
;
/*
* Time at which the current decay interval logically started. We do
* not actually advance to a new epoch until sometime after it starts
* because of scheduling and computation delays, and it is even possible
* to completely skip epochs. In all cases, during epoch advancement we
* merge all relevant activity into the most recently recorded epoch.
*/
nstime_t
epoch
;
/* Deadline randomness generator. */
uint64_t
jitter_state
;
/*
* Deadline for current epoch. This is the sum of interval and per
* epoch jitter which is a uniform random variable in [0..interval).
* Epochs always advance by precise multiples of interval, but we
* randomize the deadline to reduce the likelihood of arenas purging in
* lockstep.
*/
nstime_t
deadline
;
/*
* Number of dirty pages at beginning of current epoch. During epoch
* advancement we use the delta between arena->decay.ndirty and
* arena->ndirty to determine how many dirty pages, if any, were
* generated.
*/
size_t
ndirty
;
/*
* Trailing log of how many unused dirty pages were generated during
* each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
* element is the most recent epoch. Corresponding epoch times are
* relative to epoch.
*/
size_t
backlog
[
SMOOTHSTEP_NSTEPS
];
};
struct
arena_bin_s
{
...
...
@@ -251,25 +316,25 @@ struct arena_bin_s {
* which may be acquired while holding one or more bin locks, but not
* vise versa.
*/
malloc_mutex_t
lock
;
malloc_mutex_t
lock
;
/*
* Current run being used to service allocations of this bin's size
* class.
*/
arena_run_t
*
runcur
;
arena_run_t
*
runcur
;
/*
*
Tree
of non-full runs. This
tree
is used when looking for an
*
Heap
of non-full runs. This
heap
is used when looking for an
* existing run when runcur is no longer usable. We choose the
* non-full run that is lowest in memory; this policy tends to keep
* objects packed well, and it can also help reduce the number of
* almost-empty chunks.
*/
arena_run_
tree
_t
runs
;
arena_run_
heap
_t
runs
;
/* Bin statistics. */
malloc_bin_stats_t
stats
;
malloc_bin_stats_t
stats
;
};
struct
arena_s
{
...
...
@@ -277,15 +342,23 @@ struct arena_s {
unsigned
ind
;
/*
* Number of threads currently assigned to this arena. This field is
* protected by arenas_lock.
* Number of threads currently assigned to this arena, synchronized via
* atomic operations. Each thread has two distinct assignments, one for
* application-serving allocation, and the other for internal metadata
* allocation. Internal metadata must not be allocated from arenas
* created via the arenas.extend mallctl, because the arena.<i>.reset
* mallctl indiscriminately discards all allocations for the affected
* arena.
*
* 0: Application allocation.
* 1: Internal metadata allocation.
*/
unsigned
nthreads
;
unsigned
nthreads
[
2
]
;
/*
* There are three classes of arena operations from a locking
* perspective:
* 1) Thread assignment (modifies nthreads) is
protected by arenas_lock
.
* 1) Thread assignment (modifies nthreads) is
synchronized via atomics
.
* 2) Bin-related operations are protected by bin locks.
* 3) Chunk- and run-related operations are protected by this mutex.
*/
...
...
@@ -305,10 +378,16 @@ struct arena_s {
* PRNG state for cache index randomization of large allocation base
* pointers.
*/
uint64
_t
offset_state
;
size
_t
offset_state
;
dss_prec_t
dss_prec
;
/* Extant arena chunks. */
ql_head
(
extent_node_t
)
achunks
;
/* Extent serial number generator state. */
size_t
extent_sn_next
;
/*
* In order to avoid rapid chunk allocation/deallocation when an arena
* oscillates right on the cusp of needing a new chunk, cache the most
...
...
@@ -324,7 +403,7 @@ struct arena_s {
/* Minimum ratio (log base 2) of nactive:ndirty. */
ssize_t
lg_dirty_mult
;
/* True if a thread is currently executing arena_purge(). */
/* True if a thread is currently executing arena_purge
_to_limit
(). */
bool
purging
;
/* Number of pages in active runs and huge regions. */
...
...
@@ -338,12 +417,6 @@ struct arena_s {
*/
size_t
ndirty
;
/*
* Size/address-ordered tree of this arena's available runs. The tree
* is used for first-best-fit run allocation.
*/
arena_avail_tree_t
runs_avail
;
/*
* Unused dirty memory this arena manages. Dirty memory is conceptually
* tracked as an arbitrarily interleaved LRU of dirty runs and cached
...
...
@@ -375,6 +448,9 @@ struct arena_s {
arena_runs_dirty_link_t
runs_dirty
;
extent_node_t
chunks_cache
;
/* Decay-based purging state. */
arena_decay_t
decay
;
/* Extant huge allocations. */
ql_head
(
extent_node_t
)
huge
;
/* Synchronizes all huge allocation/update/deallocation. */
...
...
@@ -387,9 +463,9 @@ struct arena_s {
* orderings are needed, which is why there are two trees with the same
* contents.
*/
extent_tree_t
chunks_szad_cached
;
extent_tree_t
chunks_sz
sn
ad_cached
;
extent_tree_t
chunks_ad_cached
;
extent_tree_t
chunks_szad_retained
;
extent_tree_t
chunks_sz
sn
ad_retained
;
extent_tree_t
chunks_ad_retained
;
malloc_mutex_t
chunks_mtx
;
...
...
@@ -402,6 +478,19 @@ struct arena_s {
/* bins is used to store trees of free regions. */
arena_bin_t
bins
[
NBINS
];
/*
* Size-segregated address-ordered heaps of this arena's available runs,
* used for first-best-fit run allocation. Runs are quantized, i.e.
* they reside in the last heap which corresponds to a size class less
* than or equal to the run size.
*/
arena_run_heap_t
runs_avail
[
NPSIZES
];
};
/* Used in conjunction with tsd for fast arena-related context lookup. */
struct
arena_tdata_s
{
ticker_t
decay_ticker
;
};
#endif
/* JEMALLOC_ARENA_STRUCTS_B */
...
...
@@ -417,7 +506,10 @@ static const size_t large_pad =
#endif
;
extern
purge_mode_t
opt_purge
;
extern
const
char
*
purge_mode_names
[];
extern
ssize_t
opt_lg_dirty_mult
;
extern
ssize_t
opt_decay_time
;
extern
arena_bin_info_t
arena_bin_info
[
NBINS
];
...
...
@@ -428,27 +520,37 @@ extern size_t large_maxclass; /* Max large size class. */
extern
unsigned
nlclasses
;
/* Number of large size classes. */
extern
unsigned
nhclasses
;
/* Number of huge size classes. */
#ifdef JEMALLOC_JET
typedef
size_t
(
run_quantize_t
)(
size_t
);
extern
run_quantize_t
*
run_quantize_floor
;
extern
run_quantize_t
*
run_quantize_ceil
;
#endif
void
arena_chunk_cache_maybe_insert
(
arena_t
*
arena
,
extent_node_t
*
node
,
bool
cache
);
void
arena_chunk_cache_maybe_remove
(
arena_t
*
arena
,
extent_node_t
*
node
,
bool
cache
);
extent_node_t
*
arena_node_alloc
(
arena_t
*
arena
);
void
arena_node_dalloc
(
arena_t
*
arena
,
extent_node_t
*
node
);
void
*
arena_chunk_alloc_huge
(
arena_t
*
arena
,
size_t
usize
,
size_t
alignment
,
bool
*
zero
);
void
arena_chunk_dalloc_huge
(
arena_t
*
arena
,
void
*
chunk
,
size_t
usize
);
void
arena_chunk_ralloc_huge_similar
(
arena_t
*
arena
,
void
*
chunk
,
size_t
oldsize
,
size_t
usize
);
void
arena_chunk_ralloc_huge_shrink
(
arena_t
*
arena
,
void
*
chunk
,
size_t
oldsize
,
size_t
usize
);
bool
arena_chunk_ralloc_huge_expand
(
arena_t
*
arena
,
void
*
chunk
,
size_t
oldsize
,
size_t
usize
,
bool
*
zero
);
ssize_t
arena_lg_dirty_mult_get
(
arena_t
*
arena
);
bool
arena_lg_dirty_mult_set
(
arena_t
*
arena
,
ssize_t
lg_dirty_mult
);
void
arena_maybe_purge
(
arena_t
*
arena
);
void
arena_purge_all
(
arena_t
*
arena
);
void
arena_tcache_fill_small
(
arena_t
*
arena
,
tcache_bin_t
*
tbin
,
szind_t
binind
,
uint64_t
prof_accumbytes
);
extent_node_t
*
arena_node_alloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
);
void
arena_node_dalloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_node_t
*
node
);
void
*
arena_chunk_alloc_huge
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
usize
,
size_t
alignment
,
size_t
*
sn
,
bool
*
zero
);
void
arena_chunk_dalloc_huge
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
void
*
chunk
,
size_t
usize
,
size_t
sn
);
void
arena_chunk_ralloc_huge_similar
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
void
*
chunk
,
size_t
oldsize
,
size_t
usize
);
void
arena_chunk_ralloc_huge_shrink
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
void
*
chunk
,
size_t
oldsize
,
size_t
usize
,
size_t
sn
);
bool
arena_chunk_ralloc_huge_expand
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
void
*
chunk
,
size_t
oldsize
,
size_t
usize
,
bool
*
zero
);
ssize_t
arena_lg_dirty_mult_get
(
tsdn_t
*
tsdn
,
arena_t
*
arena
);
bool
arena_lg_dirty_mult_set
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
ssize_t
lg_dirty_mult
);
ssize_t
arena_decay_time_get
(
tsdn_t
*
tsdn
,
arena_t
*
arena
);
bool
arena_decay_time_set
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
ssize_t
decay_time
);
void
arena_purge
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bool
all
);
void
arena_maybe_purge
(
tsdn_t
*
tsdn
,
arena_t
*
arena
);
void
arena_reset
(
tsd_t
*
tsd
,
arena_t
*
arena
);
void
arena_tcache_fill_small
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
tcache_bin_t
*
tbin
,
szind_t
binind
,
uint64_t
prof_accumbytes
);
void
arena_alloc_junk_small
(
void
*
ptr
,
arena_bin_info_t
*
bin_info
,
bool
zero
);
#ifdef JEMALLOC_JET
...
...
@@ -461,75 +563,100 @@ extern arena_dalloc_junk_small_t *arena_dalloc_junk_small;
void
arena_dalloc_junk_small
(
void
*
ptr
,
arena_bin_info_t
*
bin_info
);
#endif
void
arena_quarantine_junk_small
(
void
*
ptr
,
size_t
usize
);
void
*
arena_malloc_small
(
arena_t
*
arena
,
size_t
size
,
bool
zero
);
void
*
arena_malloc_large
(
arena_t
*
arena
,
size_t
size
,
bool
zero
);
void
*
arena_palloc
(
tsd_t
*
tsd
,
arena_t
*
arena
,
size_t
usize
,
void
*
arena_malloc_large
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
szind_t
ind
,
bool
zero
);
void
*
arena_malloc_hard
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
size
,
szind_t
ind
,
bool
zero
);
void
*
arena_palloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
usize
,
size_t
alignment
,
bool
zero
,
tcache_t
*
tcache
);
void
arena_prof_promoted
(
const
void
*
ptr
,
size_t
size
);
void
arena_dalloc_bin_junked_locked
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
void
*
ptr
,
arena_chunk_map_bits_t
*
bitselm
);
void
arena_dalloc_bin
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
void
*
ptr
,
size_t
pageind
,
arena_chunk_map_bits_t
*
bitselm
);
void
arena_dalloc_small
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
void
*
ptr
,
size_t
pageind
);
void
arena_prof_promoted
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
size_t
size
);
void
arena_dalloc_bin_junked_locked
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
void
*
ptr
,
arena_chunk_map_bits_t
*
bitselm
);
void
arena_dalloc_bin
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
void
*
ptr
,
size_t
pageind
,
arena_chunk_map_bits_t
*
bitselm
);
void
arena_dalloc_small
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
void
*
ptr
,
size_t
pageind
);
#ifdef JEMALLOC_JET
typedef
void
(
arena_dalloc_junk_large_t
)(
void
*
,
size_t
);
extern
arena_dalloc_junk_large_t
*
arena_dalloc_junk_large
;
#else
void
arena_dalloc_junk_large
(
void
*
ptr
,
size_t
usize
);
#endif
void
arena_dalloc_large_junked_locked
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
void
arena_dalloc_large_junked_locked
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
void
*
ptr
);
void
arena_dalloc_large
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
void
*
ptr
);
void
arena_dalloc_large
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
void
*
ptr
);
#ifdef JEMALLOC_JET
typedef
void
(
arena_ralloc_junk_large_t
)(
void
*
,
size_t
,
size_t
);
extern
arena_ralloc_junk_large_t
*
arena_ralloc_junk_large
;
#endif
bool
arena_ralloc_no_move
(
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
extra
,
bool
zero
);
bool
arena_ralloc_no_move
(
tsdn_t
*
tsdn
,
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
extra
,
bool
zero
);
void
*
arena_ralloc
(
tsd_t
*
tsd
,
arena_t
*
arena
,
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
alignment
,
bool
zero
,
tcache_t
*
tcache
);
dss_prec_t
arena_dss_prec_get
(
arena_t
*
arena
);
bool
arena_dss_prec_set
(
arena_t
*
arena
,
dss_prec_t
dss_prec
);
dss_prec_t
arena_dss_prec_get
(
tsdn_t
*
tsdn
,
arena_t
*
arena
);
bool
arena_dss_prec_set
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
dss_prec_t
dss_prec
);
ssize_t
arena_lg_dirty_mult_default_get
(
void
);
bool
arena_lg_dirty_mult_default_set
(
ssize_t
lg_dirty_mult
);
void
arena_stats_merge
(
arena_t
*
arena
,
const
char
**
dss
,
ssize_t
*
lg_dirty_mult
,
size_t
*
nactive
,
size_t
*
ndirty
,
arena_stats_t
*
astats
,
malloc_bin_stats_t
*
bstats
,
malloc_large_stats_t
*
lstats
,
malloc_huge_stats_t
*
hstats
);
arena_t
*
arena_new
(
unsigned
ind
);
bool
arena_boot
(
void
);
void
arena_prefork
(
arena_t
*
arena
);
void
arena_postfork_parent
(
arena_t
*
arena
);
void
arena_postfork_child
(
arena_t
*
arena
);
ssize_t
arena_decay_time_default_get
(
void
);
bool
arena_decay_time_default_set
(
ssize_t
decay_time
);
void
arena_basic_stats_merge
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
unsigned
*
nthreads
,
const
char
**
dss
,
ssize_t
*
lg_dirty_mult
,
ssize_t
*
decay_time
,
size_t
*
nactive
,
size_t
*
ndirty
);
void
arena_stats_merge
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
unsigned
*
nthreads
,
const
char
**
dss
,
ssize_t
*
lg_dirty_mult
,
ssize_t
*
decay_time
,
size_t
*
nactive
,
size_t
*
ndirty
,
arena_stats_t
*
astats
,
malloc_bin_stats_t
*
bstats
,
malloc_large_stats_t
*
lstats
,
malloc_huge_stats_t
*
hstats
);
unsigned
arena_nthreads_get
(
arena_t
*
arena
,
bool
internal
);
void
arena_nthreads_inc
(
arena_t
*
arena
,
bool
internal
);
void
arena_nthreads_dec
(
arena_t
*
arena
,
bool
internal
);
size_t
arena_extent_sn_next
(
arena_t
*
arena
);
arena_t
*
arena_new
(
tsdn_t
*
tsdn
,
unsigned
ind
);
void
arena_boot
(
void
);
void
arena_prefork0
(
tsdn_t
*
tsdn
,
arena_t
*
arena
);
void
arena_prefork1
(
tsdn_t
*
tsdn
,
arena_t
*
arena
);
void
arena_prefork2
(
tsdn_t
*
tsdn
,
arena_t
*
arena
);
void
arena_prefork3
(
tsdn_t
*
tsdn
,
arena_t
*
arena
);
void
arena_postfork_parent
(
tsdn_t
*
tsdn
,
arena_t
*
arena
);
void
arena_postfork_child
(
tsdn_t
*
tsdn
,
arena_t
*
arena
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
arena_chunk_map_bits_t
*
arena_bitselm_get
(
arena_chunk_t
*
chunk
,
arena_chunk_map_bits_t
*
arena_bitselm_get
_mutable
(
arena_chunk_t
*
chunk
,
size_t
pageind
);
arena_chunk_map_misc_t
*
arena_miscelm_get
(
arena_chunk_t
*
chunk
,
const
arena_chunk_map_bits_t
*
arena_bitselm_get_const
(
const
arena_chunk_t
*
chunk
,
size_t
pageind
);
arena_chunk_map_misc_t
*
arena_miscelm_get_mutable
(
arena_chunk_t
*
chunk
,
size_t
pageind
);
size_t
arena_miscelm_to_pageind
(
arena_chunk_map_misc_t
*
miscelm
);
void
*
arena_miscelm_to_rpages
(
arena_chunk_map_misc_t
*
miscelm
);
const
arena_chunk_map_misc_t
*
arena_miscelm_get_const
(
const
arena_chunk_t
*
chunk
,
size_t
pageind
);
size_t
arena_miscelm_to_pageind
(
const
arena_chunk_map_misc_t
*
miscelm
);
void
*
arena_miscelm_to_rpages
(
const
arena_chunk_map_misc_t
*
miscelm
);
arena_chunk_map_misc_t
*
arena_rd_to_miscelm
(
arena_runs_dirty_link_t
*
rd
);
arena_chunk_map_misc_t
*
arena_run_to_miscelm
(
arena_run_t
*
run
);
size_t
*
arena_mapbitsp_get
(
arena_chunk_t
*
chunk
,
size_t
pageind
);
size_t
arena_mapbitsp_read
(
size_t
*
mapbitsp
);
size_t
arena_mapbits_get
(
arena_chunk_t
*
chunk
,
size_t
pageind
);
size_t
*
arena_mapbitsp_get_mutable
(
arena_chunk_t
*
chunk
,
size_t
pageind
);
const
size_t
*
arena_mapbitsp_get_const
(
const
arena_chunk_t
*
chunk
,
size_t
pageind
);
size_t
arena_mapbitsp_read
(
const
size_t
*
mapbitsp
);
size_t
arena_mapbits_get
(
const
arena_chunk_t
*
chunk
,
size_t
pageind
);
size_t
arena_mapbits_size_decode
(
size_t
mapbits
);
size_t
arena_mapbits_unallocated_size_get
(
arena_chunk_t
*
chunk
,
size_t
arena_mapbits_unallocated_size_get
(
const
arena_chunk_t
*
chunk
,
size_t
pageind
);
size_t
arena_mapbits_large_size_get
(
const
arena_chunk_t
*
chunk
,
size_t
pageind
);
size_t
arena_mapbits_small_runind_get
(
const
arena_chunk_t
*
chunk
,
size_t
pageind
);
size_t
arena_mapbits_large_size_get
(
arena_chunk_t
*
chunk
,
size_t
pageind
);
size_t
arena_mapbits_small_runind_get
(
arena_chunk_t
*
chunk
,
size_t
pageind
);
szind_t
arena_mapbits_binind_get
(
arena_chunk_t
*
chunk
,
size_t
pageind
);
size_t
arena_mapbits_dirty_get
(
arena_chunk_t
*
chunk
,
size_t
pageind
);
size_t
arena_mapbits_unzeroed_get
(
arena_chunk_t
*
chunk
,
size_t
pageind
);
size_t
arena_mapbits_decommitted_get
(
arena_chunk_t
*
chunk
,
size_t
pageind
);
size_t
arena_mapbits_large_get
(
arena_chunk_t
*
chunk
,
size_t
pageind
);
size_t
arena_mapbits_allocated_get
(
arena_chunk_t
*
chunk
,
size_t
pageind
);
szind_t
arena_mapbits_binind_get
(
const
arena_chunk_t
*
chunk
,
size_t
pageind
);
size_t
arena_mapbits_dirty_get
(
const
arena_chunk_t
*
chunk
,
size_t
pageind
);
size_t
arena_mapbits_unzeroed_get
(
const
arena_chunk_t
*
chunk
,
size_t
pageind
);
size_t
arena_mapbits_decommitted_get
(
const
arena_chunk_t
*
chunk
,
size_t
pageind
);
size_t
arena_mapbits_large_get
(
const
arena_chunk_t
*
chunk
,
size_t
pageind
);
size_t
arena_mapbits_allocated_get
(
const
arena_chunk_t
*
chunk
,
size_t
pageind
);
void
arena_mapbitsp_write
(
size_t
*
mapbitsp
,
size_t
mapbits
);
size_t
arena_mapbits_size_encode
(
size_t
size
);
void
arena_mapbits_unallocated_set
(
arena_chunk_t
*
chunk
,
size_t
pageind
,
...
...
@@ -549,27 +676,31 @@ void arena_metadata_allocated_sub(arena_t *arena, size_t size);
size_t
arena_metadata_allocated_get
(
arena_t
*
arena
);
bool
arena_prof_accum_impl
(
arena_t
*
arena
,
uint64_t
accumbytes
);
bool
arena_prof_accum_locked
(
arena_t
*
arena
,
uint64_t
accumbytes
);
bool
arena_prof_accum
(
arena_t
*
arena
,
uint64_t
accumbytes
);
bool
arena_prof_accum
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
uint64_t
accumbytes
);
szind_t
arena_ptr_small_binind_get
(
const
void
*
ptr
,
size_t
mapbits
);
szind_t
arena_bin_index
(
arena_t
*
arena
,
arena_bin_t
*
bin
);
unsigned
arena_run_regind
(
arena_run_t
*
run
,
arena_bin_info_t
*
bin_info
,
size_t
arena_run_regind
(
arena_run_t
*
run
,
arena_bin_info_t
*
bin_info
,
const
void
*
ptr
);
prof_tctx_t
*
arena_prof_tctx_get
(
const
void
*
ptr
);
void
arena_prof_tctx_set
(
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
);
void
arena_prof_tctx_reset
(
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
arena_prof_tctx_get
(
tsdn_t
*
tsdn
,
const
void
*
ptr
);
void
arena_prof_tctx_set
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
);
void
arena_prof_tctx_reset
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
size_t
usize
,
const
void
*
old_ptr
,
prof_tctx_t
*
old_tctx
);
void
*
arena_malloc
(
tsd_t
*
tsd
,
arena_t
*
arena
,
size_t
size
,
bool
zero
,
tcache_t
*
tcache
);
void
arena_decay_ticks
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
unsigned
nticks
);
void
arena_decay_tick
(
tsdn_t
*
tsdn
,
arena_t
*
arena
);
void
*
arena_malloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
size
,
szind_t
ind
,
bool
zero
,
tcache_t
*
tcache
,
bool
slow_path
);
arena_t
*
arena_aalloc
(
const
void
*
ptr
);
size_t
arena_salloc
(
const
void
*
ptr
,
bool
demote
);
void
arena_dalloc
(
tsd_t
*
tsd
,
void
*
ptr
,
tcache_t
*
tcache
);
void
arena_sdalloc
(
tsd_t
*
tsd
,
void
*
ptr
,
size_t
size
,
tcache_t
*
tcache
);
size_t
arena_salloc
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
bool
demote
);
void
arena_dalloc
(
tsdn_t
*
tsdn
,
void
*
ptr
,
tcache_t
*
tcache
,
bool
slow_path
);
void
arena_sdalloc
(
tsdn_t
*
tsdn
,
void
*
ptr
,
size_t
size
,
tcache_t
*
tcache
,
bool
slow_path
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
# ifdef JEMALLOC_ARENA_INLINE_A
JEMALLOC_ALWAYS_INLINE
arena_chunk_map_bits_t
*
arena_bitselm_get
(
arena_chunk_t
*
chunk
,
size_t
pageind
)
arena_bitselm_get
_mutable
(
arena_chunk_t
*
chunk
,
size_t
pageind
)
{
assert
(
pageind
>=
map_bias
);
...
...
@@ -578,8 +709,15 @@ arena_bitselm_get(arena_chunk_t *chunk, size_t pageind)
return
(
&
chunk
->
map_bits
[
pageind
-
map_bias
]);
}
JEMALLOC_ALWAYS_INLINE
const
arena_chunk_map_bits_t
*
arena_bitselm_get_const
(
const
arena_chunk_t
*
chunk
,
size_t
pageind
)
{
return
(
arena_bitselm_get_mutable
((
arena_chunk_t
*
)
chunk
,
pageind
));
}
JEMALLOC_ALWAYS_INLINE
arena_chunk_map_misc_t
*
arena_miscelm_get
(
arena_chunk_t
*
chunk
,
size_t
pageind
)
arena_miscelm_get
_mutable
(
arena_chunk_t
*
chunk
,
size_t
pageind
)
{
assert
(
pageind
>=
map_bias
);
...
...
@@ -589,8 +727,15 @@ arena_miscelm_get(arena_chunk_t *chunk, size_t pageind)
(
uintptr_t
)
map_misc_offset
)
+
pageind
-
map_bias
);
}
JEMALLOC_ALWAYS_INLINE
const
arena_chunk_map_misc_t
*
arena_miscelm_get_const
(
const
arena_chunk_t
*
chunk
,
size_t
pageind
)
{
return
(
arena_miscelm_get_mutable
((
arena_chunk_t
*
)
chunk
,
pageind
));
}
JEMALLOC_ALWAYS_INLINE
size_t
arena_miscelm_to_pageind
(
arena_chunk_map_misc_t
*
miscelm
)
arena_miscelm_to_pageind
(
const
arena_chunk_map_misc_t
*
miscelm
)
{
arena_chunk_t
*
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
miscelm
);
size_t
pageind
=
((
uintptr_t
)
miscelm
-
((
uintptr_t
)
chunk
+
...
...
@@ -603,7 +748,7 @@ arena_miscelm_to_pageind(arena_chunk_map_misc_t *miscelm)
}
JEMALLOC_ALWAYS_INLINE
void
*
arena_miscelm_to_rpages
(
arena_chunk_map_misc_t
*
miscelm
)
arena_miscelm_to_rpages
(
const
arena_chunk_map_misc_t
*
miscelm
)
{
arena_chunk_t
*
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
miscelm
);
size_t
pageind
=
arena_miscelm_to_pageind
(
miscelm
);
...
...
@@ -636,24 +781,31 @@ arena_run_to_miscelm(arena_run_t *run)
}
JEMALLOC_ALWAYS_INLINE
size_t
*
arena_mapbitsp_get
(
arena_chunk_t
*
chunk
,
size_t
pageind
)
arena_mapbitsp_get
_mutable
(
arena_chunk_t
*
chunk
,
size_t
pageind
)
{
return
(
&
arena_bitselm_get
(
chunk
,
pageind
)
->
bits
);
return
(
&
arena_bitselm_get_mutable
(
chunk
,
pageind
)
->
bits
);
}
JEMALLOC_ALWAYS_INLINE
const
size_t
*
arena_mapbitsp_get_const
(
const
arena_chunk_t
*
chunk
,
size_t
pageind
)
{
return
(
arena_mapbitsp_get_mutable
((
arena_chunk_t
*
)
chunk
,
pageind
));
}
JEMALLOC_ALWAYS_INLINE
size_t
arena_mapbitsp_read
(
size_t
*
mapbitsp
)
arena_mapbitsp_read
(
const
size_t
*
mapbitsp
)
{
return
(
*
mapbitsp
);
}
JEMALLOC_ALWAYS_INLINE
size_t
arena_mapbits_get
(
arena_chunk_t
*
chunk
,
size_t
pageind
)
arena_mapbits_get
(
const
arena_chunk_t
*
chunk
,
size_t
pageind
)
{
return
(
arena_mapbitsp_read
(
arena_mapbitsp_get
(
chunk
,
pageind
)));
return
(
arena_mapbitsp_read
(
arena_mapbitsp_get
_const
(
chunk
,
pageind
)));
}
JEMALLOC_ALWAYS_INLINE
size_t
...
...
@@ -673,7 +825,7 @@ arena_mapbits_size_decode(size_t mapbits)
}
JEMALLOC_ALWAYS_INLINE
size_t
arena_mapbits_unallocated_size_get
(
arena_chunk_t
*
chunk
,
size_t
pageind
)
arena_mapbits_unallocated_size_get
(
const
arena_chunk_t
*
chunk
,
size_t
pageind
)
{
size_t
mapbits
;
...
...
@@ -683,7 +835,7 @@ arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE
size_t
arena_mapbits_large_size_get
(
arena_chunk_t
*
chunk
,
size_t
pageind
)
arena_mapbits_large_size_get
(
const
arena_chunk_t
*
chunk
,
size_t
pageind
)
{
size_t
mapbits
;
...
...
@@ -694,7 +846,7 @@ arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE
size_t
arena_mapbits_small_runind_get
(
arena_chunk_t
*
chunk
,
size_t
pageind
)
arena_mapbits_small_runind_get
(
const
arena_chunk_t
*
chunk
,
size_t
pageind
)
{
size_t
mapbits
;
...
...
@@ -705,7 +857,7 @@ arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE
szind_t
arena_mapbits_binind_get
(
arena_chunk_t
*
chunk
,
size_t
pageind
)
arena_mapbits_binind_get
(
const
arena_chunk_t
*
chunk
,
size_t
pageind
)
{
size_t
mapbits
;
szind_t
binind
;
...
...
@@ -717,7 +869,7 @@ arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE
size_t
arena_mapbits_dirty_get
(
arena_chunk_t
*
chunk
,
size_t
pageind
)
arena_mapbits_dirty_get
(
const
arena_chunk_t
*
chunk
,
size_t
pageind
)
{
size_t
mapbits
;
...
...
@@ -728,7 +880,7 @@ arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE
size_t
arena_mapbits_unzeroed_get
(
arena_chunk_t
*
chunk
,
size_t
pageind
)
arena_mapbits_unzeroed_get
(
const
arena_chunk_t
*
chunk
,
size_t
pageind
)
{
size_t
mapbits
;
...
...
@@ -739,7 +891,7 @@ arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE
size_t
arena_mapbits_decommitted_get
(
arena_chunk_t
*
chunk
,
size_t
pageind
)
arena_mapbits_decommitted_get
(
const
arena_chunk_t
*
chunk
,
size_t
pageind
)
{
size_t
mapbits
;
...
...
@@ -750,7 +902,7 @@ arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE
size_t
arena_mapbits_large_get
(
arena_chunk_t
*
chunk
,
size_t
pageind
)
arena_mapbits_large_get
(
const
arena_chunk_t
*
chunk
,
size_t
pageind
)
{
size_t
mapbits
;
...
...
@@ -759,7 +911,7 @@ arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE
size_t
arena_mapbits_allocated_get
(
arena_chunk_t
*
chunk
,
size_t
pageind
)
arena_mapbits_allocated_get
(
const
arena_chunk_t
*
chunk
,
size_t
pageind
)
{
size_t
mapbits
;
...
...
@@ -795,7 +947,7 @@ JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unallocated_set
(
arena_chunk_t
*
chunk
,
size_t
pageind
,
size_t
size
,
size_t
flags
)
{
size_t
*
mapbitsp
=
arena_mapbitsp_get
(
chunk
,
pageind
);
size_t
*
mapbitsp
=
arena_mapbitsp_get
_mutable
(
chunk
,
pageind
);
assert
((
size
&
PAGE_MASK
)
==
0
);
assert
((
flags
&
CHUNK_MAP_FLAGS_MASK
)
==
flags
);
...
...
@@ -809,7 +961,7 @@ JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unallocated_size_set
(
arena_chunk_t
*
chunk
,
size_t
pageind
,
size_t
size
)
{
size_t
*
mapbitsp
=
arena_mapbitsp_get
(
chunk
,
pageind
);
size_t
*
mapbitsp
=
arena_mapbitsp_get
_mutable
(
chunk
,
pageind
);
size_t
mapbits
=
arena_mapbitsp_read
(
mapbitsp
);
assert
((
size
&
PAGE_MASK
)
==
0
);
...
...
@@ -821,7 +973,7 @@ arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
JEMALLOC_ALWAYS_INLINE
void
arena_mapbits_internal_set
(
arena_chunk_t
*
chunk
,
size_t
pageind
,
size_t
flags
)
{
size_t
*
mapbitsp
=
arena_mapbitsp_get
(
chunk
,
pageind
);
size_t
*
mapbitsp
=
arena_mapbitsp_get
_mutable
(
chunk
,
pageind
);
assert
((
flags
&
CHUNK_MAP_UNZEROED
)
==
flags
);
arena_mapbitsp_write
(
mapbitsp
,
flags
);
...
...
@@ -831,7 +983,7 @@ JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_set
(
arena_chunk_t
*
chunk
,
size_t
pageind
,
size_t
size
,
size_t
flags
)
{
size_t
*
mapbitsp
=
arena_mapbitsp_get
(
chunk
,
pageind
);
size_t
*
mapbitsp
=
arena_mapbitsp_get
_mutable
(
chunk
,
pageind
);
assert
((
size
&
PAGE_MASK
)
==
0
);
assert
((
flags
&
CHUNK_MAP_FLAGS_MASK
)
==
flags
);
...
...
@@ -846,7 +998,7 @@ JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_binind_set
(
arena_chunk_t
*
chunk
,
size_t
pageind
,
szind_t
binind
)
{
size_t
*
mapbitsp
=
arena_mapbitsp_get
(
chunk
,
pageind
);
size_t
*
mapbitsp
=
arena_mapbitsp_get
_mutable
(
chunk
,
pageind
);
size_t
mapbits
=
arena_mapbitsp_read
(
mapbitsp
);
assert
(
binind
<=
BININD_INVALID
);
...
...
@@ -860,7 +1012,7 @@ JEMALLOC_ALWAYS_INLINE void
arena_mapbits_small_set
(
arena_chunk_t
*
chunk
,
size_t
pageind
,
size_t
runind
,
szind_t
binind
,
size_t
flags
)
{
size_t
*
mapbitsp
=
arena_mapbitsp_get
(
chunk
,
pageind
);
size_t
*
mapbitsp
=
arena_mapbitsp_get
_mutable
(
chunk
,
pageind
);
assert
(
binind
<
BININD_INVALID
);
assert
(
pageind
-
runind
>=
map_bias
);
...
...
@@ -917,7 +1069,7 @@ arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes)
}
JEMALLOC_INLINE
bool
arena_prof_accum
(
arena_t
*
arena
,
uint64_t
accumbytes
)
arena_prof_accum
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
uint64_t
accumbytes
)
{
cassert
(
config_prof
);
...
...
@@ -928,9 +1080,9 @@ arena_prof_accum(arena_t *arena, uint64_t accumbytes)
{
bool
ret
;
malloc_mutex_lock
(
&
arena
->
lock
);
malloc_mutex_lock
(
tsdn
,
&
arena
->
lock
);
ret
=
arena_prof_accum_impl
(
arena
,
accumbytes
);
malloc_mutex_unlock
(
&
arena
->
lock
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
lock
);
return
(
ret
);
}
}
...
...
@@ -948,12 +1100,12 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
size_t
pageind
;
size_t
actual_mapbits
;
size_t
rpages_ind
;
arena_run_t
*
run
;
const
arena_run_t
*
run
;
arena_bin_t
*
bin
;
szind_t
run_binind
,
actual_binind
;
arena_bin_info_t
*
bin_info
;
arena_chunk_map_misc_t
*
miscelm
;
void
*
rpages
;
const
arena_chunk_map_misc_t
*
miscelm
;
const
void
*
rpages
;
assert
(
binind
!=
BININD_INVALID
);
assert
(
binind
<
NBINS
);
...
...
@@ -966,11 +1118,11 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
assert
(
arena_mapbits_allocated_get
(
chunk
,
pageind
)
!=
0
);
rpages_ind
=
pageind
-
arena_mapbits_small_runind_get
(
chunk
,
pageind
);
miscelm
=
arena_miscelm_get
(
chunk
,
rpages_ind
);
miscelm
=
arena_miscelm_get
_const
(
chunk
,
rpages_ind
);
run
=
&
miscelm
->
run
;
run_binind
=
run
->
binind
;
bin
=
&
arena
->
bins
[
run_binind
];
actual_binind
=
bin
-
arena
->
bins
;
actual_binind
=
(
szind_t
)(
bin
-
arena
->
bins
)
;
assert
(
run_binind
==
actual_binind
);
bin_info
=
&
arena_bin_info
[
actual_binind
];
rpages
=
arena_miscelm_to_rpages
(
miscelm
);
...
...
@@ -987,16 +1139,15 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
JEMALLOC_INLINE
szind_t
arena_bin_index
(
arena_t
*
arena
,
arena_bin_t
*
bin
)
{
szind_t
binind
=
bin
-
arena
->
bins
;
szind_t
binind
=
(
szind_t
)(
bin
-
arena
->
bins
)
;
assert
(
binind
<
NBINS
);
return
(
binind
);
}
JEMALLOC_INLINE
unsigned
JEMALLOC_INLINE
size_t
arena_run_regind
(
arena_run_t
*
run
,
arena_bin_info_t
*
bin_info
,
const
void
*
ptr
)
{
unsigned
shift
,
diff
,
regind
;
size_t
interval
;
size_t
diff
,
interval
,
shift
,
regind
;
arena_chunk_map_misc_t
*
miscelm
=
arena_run_to_miscelm
(
run
);
void
*
rpages
=
arena_miscelm_to_rpages
(
miscelm
);
...
...
@@ -1011,12 +1162,12 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
* Avoid doing division with a variable divisor if possible. Using
* actual division here can reduce allocator throughput by over 20%!
*/
diff
=
(
unsigned
)((
uintptr_t
)
ptr
-
(
uintptr_t
)
rpages
-
diff
=
(
size_t
)((
uintptr_t
)
ptr
-
(
uintptr_t
)
rpages
-
bin_info
->
reg0_offset
);
/* Rescale (factor powers of 2 out of the numerator and denominator). */
interval
=
bin_info
->
reg_interval
;
shift
=
jemalloc_
ffs
(
interval
)
-
1
;
shift
=
ffs
_zu
(
interval
)
-
1
;
diff
>>=
shift
;
interval
>>=
shift
;
...
...
@@ -1038,9 +1189,9 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
* divide by 0, and 1 and 2 are both powers of two, which are
* handled above.
*/
#define SIZE_INV_SHIFT ((sizeof(
unsigned
) << 3) - LG_RUN_MAXREGS)
#define SIZE_INV(s) (((
1U
<< SIZE_INV_SHIFT) / (s)) + 1)
static
const
unsigned
interval_invs
[]
=
{
#define SIZE_INV_SHIFT ((sizeof(
size_t
) << 3) - LG_RUN_MAXREGS)
#define SIZE_INV(s) (((
ZU(1)
<< SIZE_INV_SHIFT) / (s)) + 1)
static
const
size_t
interval_invs
[]
=
{
SIZE_INV
(
3
),
SIZE_INV
(
4
),
SIZE_INV
(
5
),
SIZE_INV
(
6
),
SIZE_INV
(
7
),
SIZE_INV
(
8
),
SIZE_INV
(
9
),
SIZE_INV
(
10
),
SIZE_INV
(
11
),
...
...
@@ -1051,8 +1202,8 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
SIZE_INV
(
28
),
SIZE_INV
(
29
),
SIZE_INV
(
30
),
SIZE_INV
(
31
)
};
if
(
likely
(
interval
<=
((
sizeof
(
interval_invs
)
/
sizeof
(
unsigned
))
+
2
)))
{
if
(
likely
(
interval
<=
((
sizeof
(
interval_invs
)
/
sizeof
(
size_t
))
+
2
)))
{
regind
=
(
diff
*
interval_invs
[
interval
-
3
])
>>
SIZE_INV_SHIFT
;
}
else
...
...
@@ -1067,7 +1218,7 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
}
JEMALLOC_INLINE
prof_tctx_t
*
arena_prof_tctx_get
(
const
void
*
ptr
)
arena_prof_tctx_get
(
tsdn_t
*
tsdn
,
const
void
*
ptr
)
{
prof_tctx_t
*
ret
;
arena_chunk_t
*
chunk
;
...
...
@@ -1083,18 +1234,19 @@ arena_prof_tctx_get(const void *ptr)
if
(
likely
((
mapbits
&
CHUNK_MAP_LARGE
)
==
0
))
ret
=
(
prof_tctx_t
*
)(
uintptr_t
)
1U
;
else
{
arena_chunk_map_misc_t
*
elm
=
arena_miscelm_get
(
chunk
,
pageind
);
arena_chunk_map_misc_t
*
elm
=
arena_miscelm_get_mutable
(
chunk
,
pageind
);
ret
=
atomic_read_p
(
&
elm
->
prof_tctx_pun
);
}
}
else
ret
=
huge_prof_tctx_get
(
ptr
);
ret
=
huge_prof_tctx_get
(
tsdn
,
ptr
);
return
(
ret
);
}
JEMALLOC_INLINE
void
arena_prof_tctx_set
(
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
)
arena_prof_tctx_set
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
)
{
arena_chunk_t
*
chunk
;
...
...
@@ -1113,7 +1265,7 @@ arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx)
assert
(
arena_mapbits_large_get
(
chunk
,
pageind
)
!=
0
);
elm
=
arena_miscelm_get
(
chunk
,
pageind
);
elm
=
arena_miscelm_get
_mutable
(
chunk
,
pageind
);
atomic_write_p
(
&
elm
->
prof_tctx_pun
,
tctx
);
}
else
{
/*
...
...
@@ -1125,12 +1277,12 @@ arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx)
assert
(
arena_mapbits_large_get
(
chunk
,
pageind
)
==
0
);
}
}
else
huge_prof_tctx_set
(
ptr
,
tctx
);
huge_prof_tctx_set
(
tsdn
,
ptr
,
tctx
);
}
JEMALLOC_INLINE
void
arena_prof_tctx_reset
(
const
void
*
ptr
,
size_t
usize
,
const
void
*
old_ptr
,
prof_tctx_t
*
old_tctx
)
arena_prof_tctx_reset
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
size_t
usize
,
const
void
*
old_ptr
,
prof_tctx_t
*
old_tctx
)
{
cassert
(
config_prof
);
...
...
@@ -1149,43 +1301,59 @@ arena_prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
0
);
assert
(
arena_mapbits_large_get
(
chunk
,
pageind
)
!=
0
);
elm
=
arena_miscelm_get
(
chunk
,
pageind
);
elm
=
arena_miscelm_get
_mutable
(
chunk
,
pageind
);
atomic_write_p
(
&
elm
->
prof_tctx_pun
,
(
prof_tctx_t
*
)(
uintptr_t
)
1U
);
}
else
huge_prof_tctx_reset
(
ptr
);
huge_prof_tctx_reset
(
tsdn
,
ptr
);
}
}
JEMALLOC_ALWAYS_INLINE
void
arena_decay_ticks
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
unsigned
nticks
)
{
tsd_t
*
tsd
;
ticker_t
*
decay_ticker
;
if
(
unlikely
(
tsdn_null
(
tsdn
)))
return
;
tsd
=
tsdn_tsd
(
tsdn
);
decay_ticker
=
decay_ticker_get
(
tsd
,
arena
->
ind
);
if
(
unlikely
(
decay_ticker
==
NULL
))
return
;
if
(
unlikely
(
ticker_ticks
(
decay_ticker
,
nticks
)))
arena_purge
(
tsdn
,
arena
,
false
);
}
JEMALLOC_ALWAYS_INLINE
void
arena_decay_tick
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
arena_decay_ticks
(
tsdn
,
arena
,
1
);
}
JEMALLOC_ALWAYS_INLINE
void
*
arena_malloc
(
tsd_t
*
tsd
,
arena_t
*
arena
,
size_t
size
,
bool
zero
,
tcache_t
*
tcache
)
arena_malloc
(
tsd
n
_t
*
tsd
n
,
arena_t
*
arena
,
size_t
size
,
szind_t
ind
,
bool
zero
,
tcache_t
*
tcache
,
bool
slow_path
)
{
assert
(
!
tsdn_null
(
tsdn
)
||
tcache
==
NULL
);
assert
(
size
!=
0
);
arena
=
arena_choose
(
tsd
,
arena
);
if
(
unlikely
(
arena
==
NULL
))
return
(
NULL
);
if
(
likely
(
tcache
!=
NULL
))
{
if
(
likely
(
size
<=
SMALL_MAXCLASS
))
{
return
(
tcache_alloc_small
(
tsdn_tsd
(
tsdn
),
arena
,
tcache
,
size
,
ind
,
zero
,
slow_path
));
}
if
(
likely
(
size
<=
tcache_maxclass
))
{
return
(
tcache_alloc_large
(
tsdn_tsd
(
tsdn
),
arena
,
tcache
,
size
,
ind
,
zero
,
slow_path
));
}
/* (size > tcache_maxclass) case falls through. */
assert
(
size
>
tcache_maxclass
);
}
if
(
likely
(
size
<=
SMALL_MAXCLASS
))
{
if
(
likely
(
tcache
!=
NULL
))
{
return
(
tcache_alloc_small
(
tsd
,
arena
,
tcache
,
size
,
zero
));
}
else
return
(
arena_malloc_small
(
arena
,
size
,
zero
));
}
else
if
(
likely
(
size
<=
large_maxclass
))
{
/*
* Initialize tcache after checking size in order to avoid
* infinite recursion during tcache initialization.
*/
if
(
likely
(
tcache
!=
NULL
)
&&
size
<=
tcache_maxclass
)
{
return
(
tcache_alloc_large
(
tsd
,
arena
,
tcache
,
size
,
zero
));
}
else
return
(
arena_malloc_large
(
arena
,
size
,
zero
));
}
else
return
(
huge_malloc
(
tsd
,
arena
,
size
,
zero
,
tcache
));
return
(
arena_malloc_hard
(
tsdn
,
arena
,
size
,
ind
,
zero
));
}
JEMALLOC_ALWAYS_INLINE
arena_t
*
...
...
@@ -1202,7 +1370,7 @@ arena_aalloc(const void *ptr)
/* Return the size of the allocation pointed to by ptr. */
JEMALLOC_ALWAYS_INLINE
size_t
arena_salloc
(
const
void
*
ptr
,
bool
demote
)
arena_salloc
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
bool
demote
)
{
size_t
ret
;
arena_chunk_t
*
chunk
;
...
...
@@ -1245,17 +1413,18 @@ arena_salloc(const void *ptr, bool demote)
ret
=
index2size
(
binind
);
}
}
else
ret
=
huge_salloc
(
ptr
);
ret
=
huge_salloc
(
tsdn
,
ptr
);
return
(
ret
);
}
JEMALLOC_ALWAYS_INLINE
void
arena_dalloc
(
tsd_t
*
tsd
,
void
*
ptr
,
tcache_t
*
tcache
)
arena_dalloc
(
tsd
n
_t
*
tsd
n
,
void
*
ptr
,
tcache_t
*
tcache
,
bool
slow_path
)
{
arena_chunk_t
*
chunk
;
size_t
pageind
,
mapbits
;
assert
(
!
tsdn_null
(
tsdn
)
||
tcache
==
NULL
);
assert
(
ptr
!=
NULL
);
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
ptr
);
...
...
@@ -1268,10 +1437,12 @@ arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
if
(
likely
(
tcache
!=
NULL
))
{
szind_t
binind
=
arena_ptr_small_binind_get
(
ptr
,
mapbits
);
tcache_dalloc_small
(
tsd
,
tcache
,
ptr
,
binind
);
tcache_dalloc_small
(
tsdn_tsd
(
tsdn
),
tcache
,
ptr
,
binind
,
slow_path
);
}
else
{
arena_dalloc_small
(
extent_node_arena_get
(
&
chunk
->
node
),
chunk
,
ptr
,
pageind
);
arena_dalloc_small
(
tsdn
,
extent_node_arena_get
(
&
chunk
->
node
),
chunk
,
ptr
,
pageind
);
}
}
else
{
size_t
size
=
arena_mapbits_large_size_get
(
chunk
,
...
...
@@ -1282,28 +1453,33 @@ arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
if
(
likely
(
tcache
!=
NULL
)
&&
size
-
large_pad
<=
tcache_maxclass
)
{
tcache_dalloc_large
(
tsd
,
tcache
,
ptr
,
size
-
large_pad
);
tcache_dalloc_large
(
tsd
n_tsd
(
tsdn
)
,
tcache
,
ptr
,
size
-
large_pad
,
slow_path
);
}
else
{
arena_dalloc_large
(
extent_node_arena_get
(
&
chunk
->
node
),
chunk
,
ptr
);
arena_dalloc_large
(
tsdn
,
extent_node_arena_get
(
&
chunk
->
node
),
chunk
,
ptr
);
}
}
}
else
huge_dalloc
(
tsd
,
ptr
,
tcache
);
huge_dalloc
(
tsd
n
,
ptr
);
}
JEMALLOC_ALWAYS_INLINE
void
arena_sdalloc
(
tsd_t
*
tsd
,
void
*
ptr
,
size_t
size
,
tcache_t
*
tcache
)
arena_sdalloc
(
tsdn_t
*
tsdn
,
void
*
ptr
,
size_t
size
,
tcache_t
*
tcache
,
bool
slow_path
)
{
arena_chunk_t
*
chunk
;
assert
(
!
tsdn_null
(
tsdn
)
||
tcache
==
NULL
);
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
ptr
);
if
(
likely
(
chunk
!=
ptr
))
{
if
(
config_prof
&&
opt_prof
)
{
size_t
pageind
=
((
uintptr_t
)
ptr
-
(
uintptr_t
)
chunk
)
>>
LG_PAGE
;
assert
(
arena_mapbits_allocated_get
(
chunk
,
pageind
)
!=
0
);
assert
(
arena_mapbits_allocated_get
(
chunk
,
pageind
)
!=
0
);
if
(
arena_mapbits_large_get
(
chunk
,
pageind
)
!=
0
)
{
/*
* Make sure to use promoted size, not request
...
...
@@ -1313,32 +1489,36 @@ arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
pageind
)
-
large_pad
;
}
}
assert
(
s2u
(
size
)
==
s2u
(
arena_salloc
(
ptr
,
false
)));
assert
(
s2u
(
size
)
==
s2u
(
arena_salloc
(
tsdn
,
ptr
,
false
)));
if
(
likely
(
size
<=
SMALL_MAXCLASS
))
{
/* Small allocation. */
if
(
likely
(
tcache
!=
NULL
))
{
szind_t
binind
=
size2index
(
size
);
tcache_dalloc_small
(
tsd
,
tcache
,
ptr
,
binind
);
tcache_dalloc_small
(
tsdn_tsd
(
tsdn
),
tcache
,
ptr
,
binind
,
slow_path
);
}
else
{
size_t
pageind
=
((
uintptr_t
)
ptr
-
(
uintptr_t
)
chunk
)
>>
LG_PAGE
;
arena_dalloc_small
(
extent_node_arena_get
(
&
chunk
->
node
),
chunk
,
ptr
,
pageind
);
arena_dalloc_small
(
tsdn
,
extent_node_arena_get
(
&
chunk
->
node
),
chunk
,
ptr
,
pageind
);
}
}
else
{
assert
(
config_cache_oblivious
||
((
uintptr_t
)
ptr
&
PAGE_MASK
)
==
0
);
if
(
likely
(
tcache
!=
NULL
)
&&
size
<=
tcache_maxclass
)
tcache_dalloc_large
(
tsd
,
tcache
,
ptr
,
size
);
else
{
arena_dalloc_large
(
extent_node_arena_get
(
&
chunk
->
node
),
chunk
,
ptr
);
if
(
likely
(
tcache
!=
NULL
)
&&
size
<=
tcache_maxclass
)
{
tcache_dalloc_large
(
tsdn_tsd
(
tsdn
),
tcache
,
ptr
,
size
,
slow_path
);
}
else
{
arena_dalloc_large
(
tsdn
,
extent_node_arena_get
(
&
chunk
->
node
),
chunk
,
ptr
);
}
}
}
else
huge_dalloc
(
tsd
,
ptr
,
tcache
);
huge_dalloc
(
tsd
n
,
ptr
);
}
# endif
/* JEMALLOC_ARENA_INLINE_B */
#endif
...
...
deps/jemalloc/include/jemalloc/internal/assert.h
0 → 100644
View file @
71a8df6a
/*
* Define a custom assert() in order to reduce the chances of deadlock during
* assertion failure.
*/
#ifndef assert
#define assert(e) do { \
if (unlikely(config_debug && !(e))) { \
malloc_printf( \
"<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
__FILE__, __LINE__, #e); \
abort(); \
} \
} while (0)
#endif
#ifndef not_reached
#define not_reached() do { \
if (config_debug) { \
malloc_printf( \
"<jemalloc>: %s:%d: Unreachable code reached\n", \
__FILE__, __LINE__); \
abort(); \
} \
unreachable(); \
} while (0)
#endif
#ifndef not_implemented
#define not_implemented() do { \
if (config_debug) { \
malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
__FILE__, __LINE__); \
abort(); \
} \
} while (0)
#endif
#ifndef assert_not_implemented
#define assert_not_implemented(e) do { \
if (unlikely(config_debug && !(e))) \
not_implemented(); \
} while (0)
#endif
deps/jemalloc/include/jemalloc/internal/atomic.h
View file @
71a8df6a
...
...
@@ -28,8 +28,8 @@
* callers.
*
* <t> atomic_read_<t>(<t> *p) { return (*p); }
* <t> atomic_add_<t>(<t> *p, <t> x) { return (*p + x); }
* <t> atomic_sub_<t>(<t> *p, <t> x) { return (*p - x); }
* <t> atomic_add_<t>(<t> *p, <t> x) { return (*p +
=
x); }
* <t> atomic_sub_<t>(<t> *p, <t> x) { return (*p -
=
x); }
* bool atomic_cas_<t>(<t> *p, <t> c, <t> s)
* {
* if (*p != c)
...
...
deps/jemalloc/include/jemalloc/internal/base.h
View file @
71a8df6a
...
...
@@ -9,12 +9,13 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void
*
base_alloc
(
size_t
size
);
void
base_stats_get
(
size_t
*
allocated
,
size_t
*
resident
,
size_t
*
mapped
);
void
*
base_alloc
(
tsdn_t
*
tsdn
,
size_t
size
);
void
base_stats_get
(
tsdn_t
*
tsdn
,
size_t
*
allocated
,
size_t
*
resident
,
size_t
*
mapped
);
bool
base_boot
(
void
);
void
base_prefork
(
void
);
void
base_postfork_parent
(
void
);
void
base_postfork_child
(
void
);
void
base_prefork
(
tsdn_t
*
tsdn
);
void
base_postfork_parent
(
tsdn_t
*
tsdn
);
void
base_postfork_child
(
tsdn_t
*
tsdn
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
...
...
deps/jemalloc/include/jemalloc/internal/bitmap.h
View file @
71a8df6a
...
...
@@ -15,6 +15,15 @@ typedef unsigned long bitmap_t;
#define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS)
#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)
/*
* Do some analysis on how big the bitmap is before we use a tree. For a brute
* force linear search, if we would have to call ffs_lu() more than 2^3 times,
* use a tree instead.
*/
#if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3
# define USE_TREE
#endif
/* Number of groups required to store a given number of bits. */
#define BITMAP_BITS2GROUPS(nbits) \
((nbits + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS)
...
...
@@ -48,6 +57,8 @@ typedef unsigned long bitmap_t;
/*
* Maximum number of groups required to support LG_BITMAP_MAXBITS.
*/
#ifdef USE_TREE
#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2
...
...
@@ -65,6 +76,12 @@ typedef unsigned long bitmap_t;
(LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \
+ !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP)
#else
/* USE_TREE */
#define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS)
#endif
/* USE_TREE */
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
...
...
@@ -78,6 +95,7 @@ struct bitmap_info_s {
/* Logical number of bits in bitmap (stored at bottom level). */
size_t
nbits
;
#ifdef USE_TREE
/* Number of levels necessary for nbits. */
unsigned
nlevels
;
...
...
@@ -86,6 +104,10 @@ struct bitmap_info_s {
* bottom to top (e.g. the bottom level is stored in levels[0]).
*/
bitmap_level_t
levels
[
BITMAP_MAX_LEVELS
+
1
];
#else
/* USE_TREE */
/* Number of groups necessary for nbits. */
size_t
ngroups
;
#endif
/* USE_TREE */
};
#endif
/* JEMALLOC_H_STRUCTS */
...
...
@@ -93,9 +115,8 @@ struct bitmap_info_s {
#ifdef JEMALLOC_H_EXTERNS
void
bitmap_info_init
(
bitmap_info_t
*
binfo
,
size_t
nbits
);
size_t
bitmap_info_ngroups
(
const
bitmap_info_t
*
binfo
);
size_t
bitmap_size
(
size_t
nbits
);
void
bitmap_init
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
);
size_t
bitmap_size
(
const
bitmap_info_t
*
binfo
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
...
...
@@ -113,10 +134,20 @@ void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
JEMALLOC_INLINE
bool
bitmap_full
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
)
{
unsigned
rgoff
=
binfo
->
levels
[
binfo
->
nlevels
].
group_offset
-
1
;
#ifdef USE_TREE
size_t
rgoff
=
binfo
->
levels
[
binfo
->
nlevels
].
group_offset
-
1
;
bitmap_t
rg
=
bitmap
[
rgoff
];
/* The bitmap is full iff the root group is 0. */
return
(
rg
==
0
);
#else
size_t
i
;
for
(
i
=
0
;
i
<
binfo
->
ngroups
;
i
++
)
{
if
(
bitmap
[
i
]
!=
0
)
return
(
false
);
}
return
(
true
);
#endif
}
JEMALLOC_INLINE
bool
...
...
@@ -128,7 +159,7 @@ bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
assert
(
bit
<
binfo
->
nbits
);
goff
=
bit
>>
LG_BITMAP_GROUP_NBITS
;
g
=
bitmap
[
goff
];
return
(
!
(
g
&
(
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
))));
return
(
!
(
g
&
(
ZU
(
1
)
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
))));
}
JEMALLOC_INLINE
void
...
...
@@ -143,10 +174,11 @@ bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
goff
=
bit
>>
LG_BITMAP_GROUP_NBITS
;
gp
=
&
bitmap
[
goff
];
g
=
*
gp
;
assert
(
g
&
(
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)));
g
^=
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
);
assert
(
g
&
(
ZU
(
1
)
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)));
g
^=
ZU
(
1
)
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
);
*
gp
=
g
;
assert
(
bitmap_get
(
bitmap
,
binfo
,
bit
));
#ifdef USE_TREE
/* Propagate group state transitions up the tree. */
if
(
g
==
0
)
{
unsigned
i
;
...
...
@@ -155,13 +187,14 @@ bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
goff
=
bit
>>
LG_BITMAP_GROUP_NBITS
;
gp
=
&
bitmap
[
binfo
->
levels
[
i
].
group_offset
+
goff
];
g
=
*
gp
;
assert
(
g
&
(
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)));
g
^=
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
);
assert
(
g
&
(
ZU
(
1
)
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)));
g
^=
ZU
(
1
)
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
);
*
gp
=
g
;
if
(
g
!=
0
)
break
;
}
}
#endif
}
/* sfu: set first unset. */
...
...
@@ -174,15 +207,24 @@ bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo)
assert
(
!
bitmap_full
(
bitmap
,
binfo
));
#ifdef USE_TREE
i
=
binfo
->
nlevels
-
1
;
g
=
bitmap
[
binfo
->
levels
[
i
].
group_offset
];
bit
=
jemalloc_ffsl
(
g
)
-
1
;
bit
=
ffs_lu
(
g
)
-
1
;
while
(
i
>
0
)
{
i
--
;
g
=
bitmap
[
binfo
->
levels
[
i
].
group_offset
+
bit
];
bit
=
(
bit
<<
LG_BITMAP_GROUP_NBITS
)
+
(
jemalloc_ffsl
(
g
)
-
1
);
bit
=
(
bit
<<
LG_BITMAP_GROUP_NBITS
)
+
(
ffs_lu
(
g
)
-
1
);
}
#else
i
=
0
;
g
=
bitmap
[
0
];
while
((
bit
=
ffs_lu
(
g
))
==
0
)
{
i
++
;
g
=
bitmap
[
i
];
}
bit
=
(
i
<<
LG_BITMAP_GROUP_NBITS
)
+
(
bit
-
1
);
#endif
bitmap_set
(
bitmap
,
binfo
,
bit
);
return
(
bit
);
}
...
...
@@ -193,7 +235,7 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
size_t
goff
;
bitmap_t
*
gp
;
bitmap_t
g
;
bool
propagate
;
UNUSED
bool
propagate
;
assert
(
bit
<
binfo
->
nbits
);
assert
(
bitmap_get
(
bitmap
,
binfo
,
bit
));
...
...
@@ -201,10 +243,11 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
gp
=
&
bitmap
[
goff
];
g
=
*
gp
;
propagate
=
(
g
==
0
);
assert
((
g
&
(
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)))
==
0
);
g
^=
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
);
assert
((
g
&
(
ZU
(
1
)
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)))
==
0
);
g
^=
ZU
(
1
)
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
);
*
gp
=
g
;
assert
(
!
bitmap_get
(
bitmap
,
binfo
,
bit
));
#ifdef USE_TREE
/* Propagate group state transitions up the tree. */
if
(
propagate
)
{
unsigned
i
;
...
...
@@ -214,14 +257,15 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
gp
=
&
bitmap
[
binfo
->
levels
[
i
].
group_offset
+
goff
];
g
=
*
gp
;
propagate
=
(
g
==
0
);
assert
((
g
&
(
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)))
assert
((
g
&
(
ZU
(
1
)
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)))
==
0
);
g
^=
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
);
g
^=
ZU
(
1
)
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
);
*
gp
=
g
;
if
(
!
propagate
)
break
;
}
}
#endif
/* USE_TREE */
}
#endif
...
...
deps/jemalloc/include/jemalloc/internal/chunk.h
View file @
71a8df6a
...
...
@@ -48,32 +48,30 @@ extern size_t chunk_npages;
extern
const
chunk_hooks_t
chunk_hooks_default
;
chunk_hooks_t
chunk_hooks_get
(
arena_t
*
arena
);
chunk_hooks_t
chunk_hooks_set
(
arena_t
*
arena
,
chunk_hooks_t
chunk_hooks_get
(
tsdn_t
*
tsdn
,
arena_t
*
arena
);
chunk_hooks_t
chunk_hooks_set
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
const
chunk_hooks_t
*
chunk_hooks
);
bool
chunk_register
(
const
void
*
chunk
,
const
extent_node_t
*
node
);
bool
chunk_register
(
tsdn_t
*
tsdn
,
const
void
*
chunk
,
const
extent_node_t
*
node
);
void
chunk_deregister
(
const
void
*
chunk
,
const
extent_node_t
*
node
);
void
*
chunk_alloc_base
(
size_t
size
);
void
*
chunk_alloc_cache
(
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
dalloc_node
);
void
*
chunk_alloc_wrapper
(
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
*
commit
);
void
chunk_dalloc_cache
(
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
chunk
,
size_t
size
,
bool
committed
);
void
chunk_dalloc_arena
(
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
chunk
,
size_t
size
,
bool
zeroed
,
bool
committed
);
void
chunk_dalloc_wrapper
(
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
chunk
,
size_t
size
,
bool
committed
);
bool
chunk_purge_arena
(
arena_t
*
arena
,
void
*
chunk
,
size_t
offset
,
void
*
chunk_alloc_cache
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
size_t
*
sn
,
bool
*
zero
,
bool
*
commit
,
bool
dalloc_node
);
void
*
chunk_alloc_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
size_t
*
sn
,
bool
*
zero
,
bool
*
commit
);
void
chunk_dalloc_cache
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
chunk
,
size_t
size
,
size_t
sn
,
bool
committed
);
void
chunk_dalloc_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
chunk
,
size_t
size
,
size_t
sn
,
bool
zeroed
,
bool
committed
);
bool
chunk_purge_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
chunk
,
size_t
size
,
size_t
offset
,
size_t
length
);
bool
chunk_purge_wrapper
(
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
chunk
,
size_t
size
,
size_t
offset
,
size_t
length
);
bool
chunk_boot
(
void
);
void
chunk_prefork
(
void
);
void
chunk_postfork_parent
(
void
);
void
chunk_postfork_child
(
void
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
...
...
deps/jemalloc/include/jemalloc/internal/chunk_dss.h
View file @
71a8df6a
...
...
@@ -23,13 +23,11 @@ extern const char *dss_prec_names[];
dss_prec_t
chunk_dss_prec_get
(
void
);
bool
chunk_dss_prec_set
(
dss_prec_t
dss_prec
);
void
*
chunk_alloc_dss
(
arena_t
*
arena
,
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
*
commit
);
void
*
chunk_alloc_dss
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
*
commit
);
bool
chunk_in_dss
(
void
*
chunk
);
bool
chunk_dss_boot
(
void
);
void
chunk_dss_prefork
(
void
);
void
chunk_dss_postfork_parent
(
void
);
void
chunk_dss_postfork_child
(
void
);
bool
chunk_dss_mergeable
(
void
*
chunk_a
,
void
*
chunk_b
);
void
chunk_dss_boot
(
void
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
...
...
deps/jemalloc/include/jemalloc/internal/chunk_mmap.h
View file @
71a8df6a
...
...
@@ -9,8 +9,8 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void
*
chunk_alloc_mmap
(
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
*
commit
);
void
*
chunk_alloc_mmap
(
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
*
commit
);
bool
chunk_dalloc_mmap
(
void
*
chunk
,
size_t
size
);
#endif
/* JEMALLOC_H_EXTERNS */
...
...
deps/jemalloc/include/jemalloc/internal/ckh.h
View file @
71a8df6a
...
...
@@ -40,9 +40,7 @@ struct ckh_s {
#endif
/* Used for pseudo-random number generation. */
#define CKH_A 1103515241
#define CKH_C 12347
uint32_t
prng_state
;
uint64_t
prng_state
;
/* Total number of items. */
size_t
count
;
...
...
@@ -74,7 +72,7 @@ bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
bool
ckh_insert
(
tsd_t
*
tsd
,
ckh_t
*
ckh
,
const
void
*
key
,
const
void
*
data
);
bool
ckh_remove
(
tsd_t
*
tsd
,
ckh_t
*
ckh
,
const
void
*
searchkey
,
void
**
key
,
void
**
data
);
bool
ckh_search
(
ckh_t
*
ckh
,
const
void
*
seachkey
,
void
**
key
,
void
**
data
);
bool
ckh_search
(
ckh_t
*
ckh
,
const
void
*
sea
r
chkey
,
void
**
key
,
void
**
data
);
void
ckh_string_hash
(
const
void
*
key
,
size_t
r_hash
[
2
]);
bool
ckh_string_keycomp
(
const
void
*
k1
,
const
void
*
k2
);
void
ckh_pointer_hash
(
const
void
*
key
,
size_t
r_hash
[
2
]);
...
...
deps/jemalloc/include/jemalloc/internal/ctl.h
View file @
71a8df6a
...
...
@@ -21,13 +21,14 @@ struct ctl_named_node_s {
/* If (nchildren == 0), this is a terminal node. */
unsigned
nchildren
;
const
ctl_node_t
*
children
;
int
(
*
ctl
)(
const
size_t
*
,
size_t
,
void
*
,
size_t
*
,
void
*
,
size_t
);
int
(
*
ctl
)(
tsd_t
*
,
const
size_t
*
,
size_t
,
void
*
,
size_t
*
,
void
*
,
size_t
);
};
struct
ctl_indexed_node_s
{
struct
ctl_node_s
node
;
const
ctl_named_node_t
*
(
*
index
)(
const
size_t
*
,
size_t
,
size_t
);
const
ctl_named_node_t
*
(
*
index
)(
tsdn_t
*
,
const
size_t
*
,
size_t
,
size_t
);
};
struct
ctl_arena_stats_s
{
...
...
@@ -35,8 +36,12 @@ struct ctl_arena_stats_s {
unsigned
nthreads
;
const
char
*
dss
;
ssize_t
lg_dirty_mult
;
ssize_t
decay_time
;
size_t
pactive
;
size_t
pdirty
;
/* The remainder are only populated if config_stats is true. */
arena_stats_t
astats
;
/* Aggregate stats for small size classes, based on bin stats. */
...
...
@@ -56,6 +61,7 @@ struct ctl_stats_s {
size_t
metadata
;
size_t
resident
;
size_t
mapped
;
size_t
retained
;
unsigned
narenas
;
ctl_arena_stats_t
*
arenas
;
/* (narenas + 1) elements. */
};
...
...
@@ -64,16 +70,17 @@ struct ctl_stats_s {
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
int
ctl_byname
(
const
char
*
name
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
);
int
ctl_nametomib
(
const
char
*
name
,
size_t
*
mibp
,
size_t
*
miblenp
);
int
ctl_bymib
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
int
ctl_byname
(
tsd_t
*
tsd
,
const
char
*
name
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
);
int
ctl_nametomib
(
tsdn_t
*
tsdn
,
const
char
*
name
,
size_t
*
mibp
,
size_t
*
miblenp
);
int
ctl_bymib
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
);
bool
ctl_boot
(
void
);
void
ctl_prefork
(
void
);
void
ctl_postfork_parent
(
void
);
void
ctl_postfork_child
(
void
);
void
ctl_prefork
(
tsdn_t
*
tsdn
);
void
ctl_postfork_parent
(
tsdn_t
*
tsdn
);
void
ctl_postfork_child
(
tsdn_t
*
tsdn
);
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
...
...
deps/jemalloc/include/jemalloc/internal/extent.h
View file @
71a8df6a
...
...
@@ -18,6 +18,20 @@ struct extent_node_s {
/* Total region size. */
size_t
en_size
;
/*
* Serial number (potentially non-unique).
*
* In principle serial numbers can wrap around on 32-bit systems if
* JEMALLOC_MUNMAP is defined, but as long as comparison functions fall
* back on address comparison for equal serial numbers, stable (if
* imperfect) ordering is maintained.
*
* Serial numbers may not be unique even in the absence of wrap-around,
* e.g. when splitting an extent and assigning the same serial number to
* both resulting adjacent extents.
*/
size_t
en_sn
;
/*
* The zeroed flag is used by chunk recycling code to track whether
* memory is zero-filled.
...
...
@@ -45,10 +59,10 @@ struct extent_node_s {
qr
(
extent_node_t
)
cc_link
;
union
{
/* Linkage for the size/address-ordered tree. */
rb_node
(
extent_node_t
)
szad_link
;
/* Linkage for the size/
sn/
address-ordered tree. */
rb_node
(
extent_node_t
)
sz
sn
ad_link
;
/* Linkage for arena's huge and node_cache lists. */
/* Linkage for arena's
achunks,
huge
,
and node_cache lists. */
ql_elm
(
extent_node_t
)
ql_link
;
};
...
...
@@ -61,7 +75,7 @@ typedef rb_tree(extent_node_t) extent_tree_t;
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
rb_proto
(,
extent_tree_szad_
,
extent_tree_t
,
extent_node_t
)
rb_proto
(,
extent_tree_sz
sn
ad_
,
extent_tree_t
,
extent_node_t
)
rb_proto
(,
extent_tree_ad_
,
extent_tree_t
,
extent_node_t
)
...
...
@@ -73,6 +87,7 @@ rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
arena_t
*
extent_node_arena_get
(
const
extent_node_t
*
node
);
void
*
extent_node_addr_get
(
const
extent_node_t
*
node
);
size_t
extent_node_size_get
(
const
extent_node_t
*
node
);
size_t
extent_node_sn_get
(
const
extent_node_t
*
node
);
bool
extent_node_zeroed_get
(
const
extent_node_t
*
node
);
bool
extent_node_committed_get
(
const
extent_node_t
*
node
);
bool
extent_node_achunk_get
(
const
extent_node_t
*
node
);
...
...
@@ -80,12 +95,13 @@ prof_tctx_t *extent_node_prof_tctx_get(const extent_node_t *node);
void
extent_node_arena_set
(
extent_node_t
*
node
,
arena_t
*
arena
);
void
extent_node_addr_set
(
extent_node_t
*
node
,
void
*
addr
);
void
extent_node_size_set
(
extent_node_t
*
node
,
size_t
size
);
void
extent_node_sn_set
(
extent_node_t
*
node
,
size_t
sn
);
void
extent_node_zeroed_set
(
extent_node_t
*
node
,
bool
zeroed
);
void
extent_node_committed_set
(
extent_node_t
*
node
,
bool
committed
);
void
extent_node_achunk_set
(
extent_node_t
*
node
,
bool
achunk
);
void
extent_node_prof_tctx_set
(
extent_node_t
*
node
,
prof_tctx_t
*
tctx
);
void
extent_node_init
(
extent_node_t
*
node
,
arena_t
*
arena
,
void
*
addr
,
size_t
size
,
bool
zeroed
,
bool
committed
);
size_t
size
,
size_t
sn
,
bool
zeroed
,
bool
committed
);
void
extent_node_dirty_linkage_init
(
extent_node_t
*
node
);
void
extent_node_dirty_insert
(
extent_node_t
*
node
,
arena_runs_dirty_link_t
*
runs_dirty
,
extent_node_t
*
chunks_dirty
);
...
...
@@ -114,6 +130,13 @@ extent_node_size_get(const extent_node_t *node)
return
(
node
->
en_size
);
}
JEMALLOC_INLINE
size_t
extent_node_sn_get
(
const
extent_node_t
*
node
)
{
return
(
node
->
en_sn
);
}
JEMALLOC_INLINE
bool
extent_node_zeroed_get
(
const
extent_node_t
*
node
)
{
...
...
@@ -164,6 +187,13 @@ extent_node_size_set(extent_node_t *node, size_t size)
node
->
en_size
=
size
;
}
JEMALLOC_INLINE
void
extent_node_sn_set
(
extent_node_t
*
node
,
size_t
sn
)
{
node
->
en_sn
=
sn
;
}
JEMALLOC_INLINE
void
extent_node_zeroed_set
(
extent_node_t
*
node
,
bool
zeroed
)
{
...
...
@@ -194,12 +224,13 @@ extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx)
JEMALLOC_INLINE
void
extent_node_init
(
extent_node_t
*
node
,
arena_t
*
arena
,
void
*
addr
,
size_t
size
,
bool
zeroed
,
bool
committed
)
size_t
sn
,
bool
zeroed
,
bool
committed
)
{
extent_node_arena_set
(
node
,
arena
);
extent_node_addr_set
(
node
,
addr
);
extent_node_size_set
(
node
,
size
);
extent_node_sn_set
(
node
,
sn
);
extent_node_zeroed_set
(
node
,
zeroed
);
extent_node_committed_set
(
node
,
committed
);
extent_node_achunk_set
(
node
,
false
);
...
...
deps/jemalloc/include/jemalloc/internal/hash.h
View file @
71a8df6a
/*
* The following hash function is based on MurmurHash3, placed into the public
* domain by Austin Appleby. See http://
code.google.com/p
/smhasher
/
for
* domain by Austin Appleby. See http
s
://
github.com/aappleby
/smhasher for
* details.
*/
/******************************************************************************/
...
...
@@ -49,6 +49,14 @@ JEMALLOC_INLINE uint32_t
hash_get_block_32
(
const
uint32_t
*
p
,
int
i
)
{
/* Handle unaligned read. */
if
(
unlikely
((
uintptr_t
)
p
&
(
sizeof
(
uint32_t
)
-
1
))
!=
0
)
{
uint32_t
ret
;
memcpy
(
&
ret
,
(
uint8_t
*
)(
p
+
i
),
sizeof
(
uint32_t
));
return
(
ret
);
}
return
(
p
[
i
]);
}
...
...
@@ -56,6 +64,14 @@ JEMALLOC_INLINE uint64_t
hash_get_block_64
(
const
uint64_t
*
p
,
int
i
)
{
/* Handle unaligned read. */
if
(
unlikely
((
uintptr_t
)
p
&
(
sizeof
(
uint64_t
)
-
1
))
!=
0
)
{
uint64_t
ret
;
memcpy
(
&
ret
,
(
uint8_t
*
)(
p
+
i
),
sizeof
(
uint64_t
));
return
(
ret
);
}
return
(
p
[
i
]);
}
...
...
@@ -321,13 +337,18 @@ hash_x64_128(const void *key, const int len, const uint32_t seed,
JEMALLOC_INLINE
void
hash
(
const
void
*
key
,
size_t
len
,
const
uint32_t
seed
,
size_t
r_hash
[
2
])
{
assert
(
len
<=
INT_MAX
);
/* Unfortunate implementation limitation. */
#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
hash_x64_128
(
key
,
len
,
seed
,
(
uint64_t
*
)
r_hash
);
hash_x64_128
(
key
,
(
int
)
len
,
seed
,
(
uint64_t
*
)
r_hash
);
#else
uint64_t
hashes
[
2
];
hash_x86_128
(
key
,
len
,
seed
,
hashes
);
r_hash
[
0
]
=
(
size_t
)
hashes
[
0
];
r_hash
[
1
]
=
(
size_t
)
hashes
[
1
];
{
uint64_t
hashes
[
2
];
hash_x86_128
(
key
,
(
int
)
len
,
seed
,
hashes
);
r_hash
[
0
]
=
(
size_t
)
hashes
[
0
];
r_hash
[
1
]
=
(
size_t
)
hashes
[
1
];
}
#endif
}
#endif
...
...
deps/jemalloc/include/jemalloc/internal/huge.h
View file @
71a8df6a
...
...
@@ -9,24 +9,23 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void
*
huge_malloc
(
tsd_t
*
tsd
,
arena_t
*
arena
,
size_t
size
,
bool
zero
,
tcache_t
*
tcache
);
void
*
huge_palloc
(
tsd_t
*
tsd
,
arena_t
*
arena
,
size_t
size
,
size_t
alignment
,
bool
zero
,
tcache_t
*
tcache
);
bool
huge_ralloc_no_move
(
void
*
ptr
,
size_t
oldsize
,
size_t
usize_min
,
size_t
usize_max
,
bool
zero
);
void
*
huge_malloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
usize
,
bool
zero
);
void
*
huge_palloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
usize
,
size_t
alignment
,
bool
zero
);
bool
huge_ralloc_no_move
(
tsdn_t
*
tsdn
,
void
*
ptr
,
size_t
oldsize
,
size_t
usize_min
,
size_t
usize_max
,
bool
zero
);
void
*
huge_ralloc
(
tsd_t
*
tsd
,
arena_t
*
arena
,
void
*
ptr
,
size_t
oldsize
,
size_t
usize
,
size_t
alignment
,
bool
zero
,
tcache_t
*
tcache
);
#ifdef JEMALLOC_JET
typedef
void
(
huge_dalloc_junk_t
)(
void
*
,
size_t
);
extern
huge_dalloc_junk_t
*
huge_dalloc_junk
;
#endif
void
huge_dalloc
(
tsd_t
*
tsd
,
void
*
ptr
,
tcache_t
*
tcache
);
void
huge_dalloc
(
tsd
n
_t
*
tsd
n
,
void
*
ptr
);
arena_t
*
huge_aalloc
(
const
void
*
ptr
);
size_t
huge_salloc
(
const
void
*
ptr
);
prof_tctx_t
*
huge_prof_tctx_get
(
const
void
*
ptr
);
void
huge_prof_tctx_set
(
const
void
*
ptr
,
prof_tctx_t
*
tctx
);
void
huge_prof_tctx_reset
(
const
void
*
ptr
);
size_t
huge_salloc
(
tsdn_t
*
tsdn
,
const
void
*
ptr
);
prof_tctx_t
*
huge_prof_tctx_get
(
tsdn_t
*
tsdn
,
const
void
*
ptr
);
void
huge_prof_tctx_set
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
prof_tctx_t
*
tctx
);
void
huge_prof_tctx_reset
(
tsdn_t
*
tsdn
,
const
void
*
ptr
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
...
...
deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in
View file @
71a8df6a
...
...
@@ -49,6 +49,7 @@ static const bool config_lazy_lock =
false
#endif
;
static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF;
static const bool config_prof =
#ifdef JEMALLOC_PROF
true
...
...
@@ -160,7 +161,10 @@ static const bool config_cache_oblivious =
#include <malloc/malloc.h>
#endif
#include "jemalloc/internal/ph.h"
#ifndef __PGI
#define RB_COMPACT
#endif
#include "jemalloc/internal/rb.h"
#include "jemalloc/internal/qr.h"
#include "jemalloc/internal/ql.h"
...
...
@@ -183,6 +187,9 @@ static const bool config_cache_oblivious =
#include "jemalloc/internal/jemalloc_internal_macros.h"
/* Page size index type. */
typedef unsigned pszind_t;
/* Size class index type. */
typedef unsigned szind_t;
...
...
@@ -232,7 +239,7 @@ typedef unsigned szind_t;
# ifdef __alpha__
# define LG_QUANTUM 4
# endif
# if (defined(__sparc64__) || defined(__sparcv9))
# if (defined(__sparc64__) || defined(__sparcv9)
|| defined(__sparc_v9__)
)
# define LG_QUANTUM 4
# endif
# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
...
...
@@ -256,6 +263,9 @@ typedef unsigned szind_t;
# ifdef __powerpc__
# define LG_QUANTUM 4
# endif
# ifdef __riscv__
# define LG_QUANTUM 4
# endif
# ifdef __s390__
# define LG_QUANTUM 4
# endif
...
...
@@ -317,13 +327,17 @@ typedef unsigned szind_t;
#define PAGE ((size_t)(1U << LG_PAGE))
#define PAGE_MASK ((size_t)(PAGE - 1))
/* Return the page base address for the page containing address a. */
#define PAGE_ADDR2BASE(a) \
((void *)((uintptr_t)(a) & ~PAGE_MASK))
/* Return the smallest pagesize multiple that is >= s. */
#define PAGE_CEILING(s) \
(((s) + PAGE_MASK) & ~PAGE_MASK)
/* Return the nearest aligned address at or below a. */
#define ALIGNMENT_ADDR2BASE(a, alignment) \
((void *)((uintptr_t)(a) & (
-
(alignment))))
((void *)((uintptr_t)(a) & (
(~
(alignment))
+ 1)
))
/* Return the offset between a and the nearest aligned address at or below a. */
#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
...
...
@@ -331,7 +345,7 @@ typedef unsigned szind_t;
/* Return the smallest alignment multiple that is >= s. */
#define ALIGNMENT_CEILING(s, alignment) \
(((s) + (alignment - 1)) & (
-
(alignment)))
(((s) + (alignment - 1)) & (
(~
(alignment))
+ 1)
)
/* Declare a variable-length array. */
#if __STDC_VERSION__ < 199901L
...
...
@@ -351,14 +365,19 @@ typedef unsigned szind_t;
# define VARIABLE_ARRAY(type, name, count) type name[(count)]
#endif
#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/spin.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/witness.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h"
...
...
@@ -379,14 +398,19 @@ typedef unsigned szind_t;
/******************************************************************************/
#define JEMALLOC_H_STRUCTS
#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/spin.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/witness.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/bitmap.h"
...
...
@@ -422,13 +446,27 @@ extern bool opt_redzone;
extern bool opt_utrace;
extern bool opt_xmalloc;
extern bool opt_zero;
extern
size_t
opt_narenas;
extern
unsigned
opt_narenas;
extern bool in_valgrind;
/* Number of CPUs. */
extern unsigned ncpus;
extern unsigned ncpus;
/* Number of arenas used for automatic multiplexing of threads and arenas. */
extern unsigned narenas_auto;
/*
* Arenas that are used to service external requests. Not all elements of the
* arenas array are necessarily used; arenas are created lazily as needed.
*/
extern arena_t **arenas;
/*
* pind2sz_tab encodes the same information as could be computed by
* pind2sz_compute().
*/
extern size_t const pind2sz_tab[NPSIZES];
/*
* index2size_tab encodes the same information as could be computed (at
* unacceptable cost in some code paths) by index2size_compute().
...
...
@@ -447,31 +485,35 @@ void a0dalloc(void *ptr);
void *bootstrap_malloc(size_t size);
void *bootstrap_calloc(size_t num, size_t size);
void bootstrap_free(void *ptr);
arena_t *arenas_extend(unsigned ind);
arena_t *arena_init(unsigned ind);
unsigned narenas_total_get(void);
arena_t *arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing);
arena_t *arena_choose_hard(tsd_t *tsd);
arena_t *arena_init(tsdn_t *tsdn, unsigned ind);
arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind);
arena_t *arena_choose_hard(tsd_t *tsd, bool internal);
void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
unsigned arena_nbound(unsigned ind);
void thread_allocated_cleanup(tsd_t *tsd);
void thread_deallocated_cleanup(tsd_t *tsd);
void iarena_cleanup(tsd_t *tsd);
void arena_cleanup(tsd_t *tsd);
void arenas_
cache
_cleanup(tsd_t *tsd);
void narenas_
cache
_cleanup(tsd_t *tsd);
void arenas_
cache
_bypass_cleanup(tsd_t *tsd);
void arenas_
tdata
_cleanup(tsd_t *tsd);
void narenas_
tdata
_cleanup(tsd_t *tsd);
void arenas_
tdata
_bypass_cleanup(tsd_t *tsd);
void jemalloc_prefork(void);
void jemalloc_postfork_parent(void);
void jemalloc_postfork_child(void);
#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/spin.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/witness.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/bitmap.h"
...
...
@@ -492,16 +534,21 @@ void jemalloc_postfork_child(void);
/******************************************************************************/
#define JEMALLOC_H_INLINES
#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/spin.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/witness.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/base.h"
...
...
@@ -511,6 +558,11 @@ void jemalloc_postfork_child(void);
#include "jemalloc/internal/huge.h"
#ifndef JEMALLOC_ENABLE_INLINE
pszind_t psz2ind(size_t psz);
size_t pind2sz_compute(pszind_t pind);
size_t pind2sz_lookup(pszind_t pind);
size_t pind2sz(pszind_t pind);
size_t psz2u(size_t psz);
szind_t size2index_compute(size_t size);
szind_t size2index_lookup(size_t size);
szind_t size2index(size_t size);
...
...
@@ -521,39 +573,121 @@ size_t s2u_compute(size_t size);
size_t s2u_lookup(size_t size);
size_t s2u(size_t size);
size_t sa2u(size_t size, size_t alignment);
arena_t *arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal);
arena_t *arena_choose(tsd_t *tsd, arena_t *arena);
arena_t *arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing,
arena_t *arena_ichoose(tsd_t *tsd, arena_t *arena);
arena_tdata_t *arena_tdata_get(tsd_t *tsd, unsigned ind,
bool refresh_if_missing);
arena_t *arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing);
ticker_t *decay_ticker_get(tsd_t *tsd, unsigned ind);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
JEMALLOC_INLINE pszind_t
psz2ind(size_t psz)
{
if (unlikely(psz > HUGE_MAXCLASS))
return (NPSIZES);
{
pszind_t x = lg_floor((psz<<1)-1);
pszind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_PAGE) ? 0 : x -
(LG_SIZE_CLASS_GROUP + LG_PAGE);
pszind_t grp = shift << LG_SIZE_CLASS_GROUP;
pszind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1;
size_t delta_inverse_mask = ZI(-1) << lg_delta;
pszind_t mod = ((((psz-1) & delta_inverse_mask) >> lg_delta)) &
((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
pszind_t ind = grp + mod;
return (ind);
}
}
JEMALLOC_INLINE size_t
pind2sz_compute(pszind_t pind)
{
{
size_t grp = pind >> LG_SIZE_CLASS_GROUP;
size_t mod = pind & ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
size_t grp_size_mask = ~((!!grp)-1);
size_t grp_size = ((ZU(1) << (LG_PAGE +
(LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
size_t shift = (grp == 0) ? 1 : grp;
size_t lg_delta = shift + (LG_PAGE-1);
size_t mod_size = (mod+1) << lg_delta;
size_t sz = grp_size + mod_size;
return (sz);
}
}
JEMALLOC_INLINE size_t
pind2sz_lookup(pszind_t pind)
{
size_t ret = (size_t)pind2sz_tab[pind];
assert(ret == pind2sz_compute(pind));
return (ret);
}
JEMALLOC_INLINE size_t
pind2sz(pszind_t pind)
{
assert(pind < NPSIZES);
return (pind2sz_lookup(pind));
}
JEMALLOC_INLINE size_t
psz2u(size_t psz)
{
if (unlikely(psz > HUGE_MAXCLASS))
return (0);
{
size_t x = lg_floor((psz<<1)-1);
size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1;
size_t delta = ZU(1) << lg_delta;
size_t delta_mask = delta - 1;
size_t usize = (psz + delta_mask) & ~delta_mask;
return (usize);
}
}
JEMALLOC_INLINE szind_t
size2index_compute(size_t size)
{
if (unlikely(size > HUGE_MAXCLASS))
return (NSIZES);
#if (NTBINS != 0)
if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
s
ize
_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
s
ize
_t lg_ceil = lg_floor(pow2_ceil(size));
s
zind
_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
s
zind
_t lg_ceil = lg_floor(pow2_ceil
_zu
(size));
return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin);
}
#endif
{
size_t x = unlikely(ZI(size) < 0) ? ((size<<1) ?
(ZU(1)<<(LG_SIZEOF_PTR+3)) : ((ZU(1)<<(LG_SIZEOF_PTR+3))-1))
: lg_floor((size<<1)-1);
size_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 :
szind_t x = lg_floor((size<<1)-1);
szind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 :
x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM);
s
ize
_t grp = shift << LG_SIZE_CLASS_GROUP;
s
zind
_t grp = shift << LG_SIZE_CLASS_GROUP;
s
ize
_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
s
zind
_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
size_t delta_inverse_mask = ZI(-1) << lg_delta;
s
ize
_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
s
zind
_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
s
ize
_t index = NTBINS + grp + mod;
s
zind
_t index = NTBINS + grp + mod;
return (index);
}
}
...
...
@@ -564,8 +698,7 @@ size2index_lookup(size_t size)
assert(size <= LOOKUP_MAXCLASS);
{
size_t ret = ((size_t)(size2index_tab[(size-1) >>
LG_TINY_MIN]));
szind_t ret = (size2index_tab[(size-1) >> LG_TINY_MIN]);
assert(ret == size2index_compute(size));
return (ret);
}
...
...
@@ -628,18 +761,18 @@ JEMALLOC_ALWAYS_INLINE size_t
s2u_compute(size_t size)
{
if (unlikely(size > HUGE_MAXCLASS))
return (0);
#if (NTBINS > 0)
if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
size_t lg_ceil = lg_floor(pow2_ceil(size));
size_t lg_ceil = lg_floor(pow2_ceil
_zu
(size));
return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
(ZU(1) << lg_ceil));
}
#endif
{
size_t x = unlikely(ZI(size) < 0) ? ((size<<1) ?
(ZU(1)<<(LG_SIZEOF_PTR+3)) : ((ZU(1)<<(LG_SIZEOF_PTR+3))-1))
: lg_floor((size<<1)-1);
size_t x = lg_floor((size<<1)-1);
size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
size_t delta = ZU(1) << lg_delta;
...
...
@@ -723,17 +856,16 @@ sa2u(size_t size, size_t alignment)
return (usize);
}
/* Huge size class. Beware of size_t overflow. */
/* Huge size class. Beware of overflow. */
if (unlikely(alignment > HUGE_MAXCLASS))
return (0);
/*
* We can't achieve subchunk alignment, so round up alignment to the
* minimum that can actually be supported.
*/
alignment = CHUNK_CEILING(alignment);
if (alignment == 0) {
/* size_t overflow. */
return (0);
}
/* Make sure result is a huge size class. */
if (size <= chunksize)
...
...
@@ -759,45 +891,84 @@ sa2u(size_t size, size_t alignment)
/* Choose an arena based on a per-thread value. */
JEMALLOC_INLINE arena_t *
arena_choose(tsd_t *tsd, arena_t *arena)
arena_choose
_impl
(tsd_t *tsd, arena_t *arena
, bool internal
)
{
arena_t *ret;
if (arena != NULL)
return (arena);
if (unlikely((ret = tsd_arena_get(tsd)) == NULL))
ret = arena_choose_hard(tsd);
ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd);
if (unlikely(ret == NULL))
ret = arena_choose_hard(tsd, internal);
return (ret);
}
JEMALLOC_INLINE arena_t *
arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing,
bool refresh_if_missing)
arena_choose(tsd_t *tsd, arena_t *arena)
{
arena_t *arena;
arena_t **arenas_cache = tsd_arenas_cache_get(tsd);
/* init_if_missing requires refresh_if_missing. */
assert(!init_if_missing || refresh_if_missing);
return (arena_choose_impl(tsd, arena, false));
}
JEMALLOC_INLINE arena_t *
arena_ichoose(tsd_t *tsd, arena_t *arena)
{
if (unlikely(arenas_cache == NULL)) {
/* arenas_cache hasn't been initialized yet. */
return (arena_get_hard(tsd, ind, init_if_missing));
return (arena_choose_impl(tsd, arena, true));
}
JEMALLOC_INLINE arena_tdata_t *
arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing)
{
arena_tdata_t *tdata;
arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
if (unlikely(arenas_tdata == NULL)) {
/* arenas_tdata hasn't been initialized yet. */
return (arena_tdata_get_hard(tsd, ind));
}
if (unlikely(ind >= tsd_narenas_
cache
_get(tsd))) {
if (unlikely(ind >= tsd_narenas_
tdata
_get(tsd))) {
/*
* ind is invalid, cache is old (too small), or
aren
a to be
* ind is invalid, cache is old (too small), or
tdat
a to be
* initialized.
*/
return (refresh_if_missing ? arena_get_hard(tsd, ind
,
init_if_missing) :
NULL);
return (refresh_if_missing ? arena_
tdata_
get_hard(tsd, ind
) :
NULL);
}
arena = arenas_cache[ind];
if (likely(arena != NULL) || !refresh_if_missing)
return (arena);
return (arena_get_hard(tsd, ind, init_if_missing));
tdata = &arenas_tdata[ind];
if (likely(tdata != NULL) || !refresh_if_missing)
return (tdata);
return (arena_tdata_get_hard(tsd, ind));
}
JEMALLOC_INLINE arena_t *
arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing)
{
arena_t *ret;
assert(ind <= MALLOCX_ARENA_MAX);
ret = arenas[ind];
if (unlikely(ret == NULL)) {
ret = atomic_read_p((void *)&arenas[ind]);
if (init_if_missing && unlikely(ret == NULL))
ret = arena_init(tsdn, ind);
}
return (ret);
}
JEMALLOC_INLINE ticker_t *
decay_ticker_get(tsd_t *tsd, unsigned ind)
{
arena_tdata_t *tdata;
tdata = arena_tdata_get(tsd, ind, true);
if (unlikely(tdata == NULL))
return (NULL);
return (&tdata->decay_ticker);
}
#endif
...
...
@@ -818,27 +989,27 @@ arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing,
#ifndef JEMALLOC_ENABLE_INLINE
arena_t *iaalloc(const void *ptr);
size_t isalloc(const void *ptr, bool demote);
void *iallocztm(tsd_t *tsd, size_t size, bool zero, tcache_t *tcache,
bool is_metadata, arena_t *arena);
void *imalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena);
void *imalloc(tsd_t *tsd, size_t size);
void *icalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena);
void *icalloc(tsd_t *tsd, size_t size);
void *ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
size_t isalloc(tsdn_t *tsdn, const void *ptr, bool demote);
void *iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero,
tcache_t *tcache, bool is_metadata, arena_t *arena, bool slow_path);
void *ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero,
bool slow_path);
void *ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, bool is_metadata, arena_t *arena);
void *ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
void *ipalloct(tsd
n
_t *tsd
n
, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, arena_t *arena);
void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero);
size_t ivsalloc(const void *ptr, bool demote);
size_t ivsalloc(
tsdn_t *tsdn,
const void *ptr, bool demote);
size_t u2rz(size_t usize);
size_t p2rz(const void *ptr);
void idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata
);
void idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache
);
size_t p2rz(
tsdn_t *tsdn,
const void *ptr);
void idalloctm(tsd
n
_t *tsd
n
, void *ptr, tcache_t *tcache, bool is_metadata
,
bool slow_path
);
void idalloc(tsd_t *tsd, void *ptr);
void iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache);
void isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
void isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
void iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path);
void isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
bool slow_path);
void isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache,
bool slow_path);
void *iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t extra, size_t alignment, bool zero, tcache_t *tcache,
arena_t *arena);
...
...
@@ -846,8 +1017,8 @@ void *iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t alignment, bool zero, tcache_t *tcache, arena_t *arena);
void *iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t alignment, bool zero);
bool ixalloc(void *ptr, size_t oldsize, size_t size,
size_t extra,
size_t alignment, bool zero);
bool ixalloc(
tsdn_t *tsdn,
void *ptr, size_t oldsize, size_t size,
size_t
extra, size_t
alignment, bool zero);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
...
...
@@ -862,100 +1033,85 @@ iaalloc(const void *ptr)
/*
* Typical usage:
* tsdn_t *tsdn = [...]
* void *ptr = [...]
* size_t sz = isalloc(ptr, config_prof);
* size_t sz = isalloc(
tsdn,
ptr, config_prof);
*/
JEMALLOC_ALWAYS_INLINE size_t
isalloc(const void *ptr, bool demote)
isalloc(
tsdn_t *tsdn,
const void *ptr, bool demote)
{
assert(ptr != NULL);
/* Demotion only makes sense if config_prof is true. */
assert(config_prof || !demote);
return (arena_salloc(ptr, demote));
return (arena_salloc(
tsdn,
ptr, demote));
}
JEMALLOC_ALWAYS_INLINE void *
iallocztm(tsd_t *tsd, size_t size, bool zero, tcache_t *tcache,
bool is_metadata,
arena_t *arena
)
iallocztm(tsd
n
_t *tsd
n
, size_t size,
szind_t ind,
bool zero, tcache_t *tcache,
bool is_metadata, arena_t *arena, bool slow_path
)
{
void *ret;
assert(size != 0);
assert(!is_metadata || tcache == NULL);
assert(!is_metadata || arena == NULL || arena->ind < narenas_auto);
ret = arena_malloc(tsd, arena, size, zero, tcache);
ret = arena_malloc(tsd
n
, arena, size,
ind,
zero, tcache
, slow_path
);
if (config_stats && is_metadata && likely(ret != NULL)) {
arena_metadata_allocated_add(iaalloc(ret),
isalloc(ret,
config_prof));
arena_metadata_allocated_add(iaalloc(ret),
isalloc(tsdn, ret,
config_prof));
}
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
i
m
alloc
t
(tsd_t *tsd, size_t size,
tcache_t *tcache, arena_t *arena
)
ialloc(tsd_t *tsd, size_t size,
szind_t ind, bool zero, bool slow_path
)
{
return (iallocztm(tsd, size, false, tcache, false, arena));
return (iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd, true),
false, NULL, slow_path));
}
JEMALLOC_ALWAYS_INLINE void *
imalloc(tsd_t *tsd, size_t size)
{
return (iallocztm(tsd, size, false, tcache_get(tsd, true), false, NULL));
}
JEMALLOC_ALWAYS_INLINE void *
icalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena)
{
return (iallocztm(tsd, size, true, tcache, false, arena));
}
JEMALLOC_ALWAYS_INLINE void *
icalloc(tsd_t *tsd, size_t size)
{
return (iallocztm(tsd, size, true, tcache_get(tsd, true), false, NULL));
}
JEMALLOC_ALWAYS_INLINE void *
ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, bool is_metadata, arena_t *arena)
{
void *ret;
assert(usize != 0);
assert(usize == sa2u(usize, alignment));
assert(!is_metadata || tcache == NULL);
assert(!is_metadata || arena == NULL || arena->ind < narenas_auto);
ret = arena_palloc(tsd, arena, usize, alignment, zero, tcache);
ret = arena_palloc(tsd
n
, arena, usize, alignment, zero, tcache);
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
if (config_stats && is_metadata && likely(ret != NULL)) {
arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
arena_metadata_allocated_add(iaalloc(ret), isalloc(
tsdn,
ret,
config_prof));
}
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
ipalloct(tsd
n
_t *tsd
n
, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, arena_t *arena)
{
return (ipallocztm(tsd, usize, alignment, zero, tcache, false, arena));
return (ipallocztm(tsd
n
, usize, alignment, zero, tcache, false, arena));
}
JEMALLOC_ALWAYS_INLINE void *
ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero)
{
return (ipallocztm(tsd, usize, alignment, zero,
tcache_get(tsd,
NULL
), false, NULL));
return (ipallocztm(tsd
_tsdn(tsd)
, usize, alignment, zero,
tcache_get(tsd, true
), false, NULL));
}
JEMALLOC_ALWAYS_INLINE size_t
ivsalloc(const void *ptr, bool demote)
ivsalloc(
tsdn_t *tsdn,
const void *ptr, bool demote)
{
extent_node_t *node;
...
...
@@ -967,7 +1123,7 @@ ivsalloc(const void *ptr, bool demote)
assert(extent_node_addr_get(node) == ptr ||
extent_node_achunk_get(node));
return (isalloc(ptr, demote));
return (isalloc(
tsdn,
ptr, demote));
}
JEMALLOC_INLINE size_t
...
...
@@ -985,65 +1141,62 @@ u2rz(size_t usize)
}
JEMALLOC_INLINE size_t
p2rz(const void *ptr)
p2rz(
tsdn_t *tsdn,
const void *ptr)
{
size_t usize = isalloc(ptr, false);
size_t usize = isalloc(
tsdn,
ptr, false);
return (u2rz(usize));
}
JEMALLOC_ALWAYS_INLINE void
idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata)
idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata,
bool slow_path)
{
assert(ptr != NULL);
assert(!is_metadata || tcache == NULL);
assert(!is_metadata || iaalloc(ptr)->ind < narenas_auto);
if (config_stats && is_metadata) {
arena_metadata_allocated_sub(iaalloc(ptr), isalloc(ptr,
arena_metadata_allocated_sub(iaalloc(ptr), isalloc(
tsdn,
ptr,
config_prof));
}
arena_dalloc(tsd, ptr, tcache);
}
JEMALLOC_ALWAYS_INLINE void
idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache)
{
idalloctm(tsd, ptr, tcache, false);
arena_dalloc(tsdn, ptr, tcache, slow_path);
}
JEMALLOC_ALWAYS_INLINE void
idalloc(tsd_t *tsd, void *ptr)
{
idalloctm(tsd, ptr, tcache_get(tsd, false), false);
idalloctm(tsd
_tsdn(tsd)
, ptr, tcache_get(tsd, false), false
, true
);
}
JEMALLOC_ALWAYS_INLINE void
iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache
, bool slow_path
)
{
if (config_fill && unlikely(opt_quarantine))
if (
slow_path &&
config_fill && unlikely(opt_quarantine))
quarantine(tsd, ptr);
else
idalloctm(tsd, ptr, tcache, false);
idalloctm(tsd
_tsdn(tsd)
, ptr, tcache, false
, slow_path
);
}
JEMALLOC_ALWAYS_INLINE void
isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
bool slow_path)
{
arena_sdalloc(tsd, ptr, size, tcache);
arena_sdalloc(tsd
n
, ptr, size, tcache
, slow_path
);
}
JEMALLOC_ALWAYS_INLINE void
isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache
, bool slow_path
)
{
if (config_fill && unlikely(opt_quarantine))
if (
slow_path &&
config_fill && unlikely(opt_quarantine))
quarantine(tsd, ptr);
else
isdalloct(tsd, ptr, size, tcache);
isdalloct(tsd
_tsdn(tsd)
, ptr, size, tcache
, slow_path
);
}
JEMALLOC_ALWAYS_INLINE void *
...
...
@@ -1054,17 +1207,18 @@ iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t usize, copysize;
usize = sa2u(size + extra, alignment);
if (u
size == 0
)
if (u
nlikely(usize == 0 || usize > HUGE_MAXCLASS)
)
return (NULL);
p = ipalloct(tsd, usize, alignment, zero, tcache, arena);
p = ipalloct(tsd
_tsdn(tsd)
, usize, alignment, zero, tcache, arena);
if (p == NULL) {
if (extra == 0)
return (NULL);
/* Try again, without extra this time. */
usize = sa2u(size, alignment);
if (u
size == 0
)
if (u
nlikely(usize == 0 || usize > HUGE_MAXCLASS)
)
return (NULL);
p = ipalloct(tsd, usize, alignment, zero, tcache, arena);
p = ipalloct(tsd_tsdn(tsd), usize, alignment, zero, tcache,
arena);
if (p == NULL)
return (NULL);
}
...
...
@@ -1074,7 +1228,7 @@ iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
*/
copysize = (size < oldsize) ? size : oldsize;
memcpy(p, ptr, copysize);
isqalloc(tsd, ptr, oldsize, tcache);
isqalloc(tsd, ptr, oldsize, tcache
, true
);
return (p);
}
...
...
@@ -1110,8 +1264,8 @@ iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
}
JEMALLOC_ALWAYS_INLINE bool
ixalloc(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment,
bool zero)
ixalloc(
tsdn_t *tsdn,
void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment,
bool zero)
{
assert(ptr != NULL);
...
...
@@ -1123,7 +1277,7 @@ ixalloc(void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment,
return (true);
}
return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero));
return (arena_ralloc_no_move(
tsdn,
ptr, oldsize, size, extra, zero));
}
#endif
...
...
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h
View file @
71a8df6a
...
...
@@ -17,7 +17,18 @@
# include <sys/uio.h>
# endif
# include <pthread.h>
# ifdef JEMALLOC_OS_UNFAIR_LOCK
# include <os/lock.h>
# endif
# ifdef JEMALLOC_GLIBC_MALLOC_HOOK
# include <sched.h>
# endif
# include <errno.h>
# include <sys/time.h>
# include <time.h>
# ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
# include <mach/mach_time.h>
# endif
#endif
#include <sys/types.h>
...
...
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in
View file @
71a8df6a
...
...
@@ -56,9 +56,9 @@
#undef JEMALLOC_HAVE_BUILTIN_CLZ
/*
* Defined if
madvise(2) is available
.
* Defined if
os_unfair_lock_*() functions are available, as provided by Darwin
.
*/
#undef JEMALLOC_
HAVE_MADVISE
#undef JEMALLOC_
OS_UNFAIR_LOCK
/*
* Defined if OSSpin*() functions are available, as provided by Darwin, and
...
...
@@ -66,6 +66,9 @@
*/
#undef JEMALLOC_OSSPIN
/* Defined if syscall(2) is usable. */
#undef JEMALLOC_USE_SYSCALL
/*
* Defined if secure_getenv(3) is available.
*/
...
...
@@ -76,6 +79,24 @@
*/
#undef JEMALLOC_HAVE_ISSETUGID
/* Defined if pthread_atfork(3) is available. */
#undef JEMALLOC_HAVE_PTHREAD_ATFORK
/*
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
*/
#undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
/*
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
*/
#undef JEMALLOC_HAVE_CLOCK_MONOTONIC
/*
* Defined if mach_absolute_time() is available.
*/
#undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
/*
* Defined if _malloc_thread_cleanup() exists. At least in the case of
* FreeBSD, pthread_key_create() allocates, which if used during malloc
...
...
@@ -189,9 +210,16 @@
#undef JEMALLOC_TLS
/*
*
ffs()/ffsl() functions to use for bitmapping. Don't use these directly;
* instead
,
use
jemalloc_ffs() or jemalloc_ffsl
() from util.h
.
*
Used to mark unreachable code to quiet "end of non-void" compiler warnings.
*
Don't use this directly;
instead use
unreachable
() from util.h
*/
#undef JEMALLOC_INTERNAL_UNREACHABLE
/*
* ffs*() functions to use for bitmapping. Don't use these directly; instead,
* use ffs_*() from util.h.
*/
#undef JEMALLOC_INTERNAL_FFSLL
#undef JEMALLOC_INTERNAL_FFSL
#undef JEMALLOC_INTERNAL_FFS
...
...
@@ -213,18 +241,35 @@
#undef JEMALLOC_ZONE
#undef JEMALLOC_ZONE_VERSION
/*
* Methods for determining whether the OS overcommits.
* JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
* /proc/sys/vm.overcommit_memory file.
* JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
*/
#undef JEMALLOC_SYSCTL_VM_OVERCOMMIT
#undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
/* Defined if madvise(2) is available. */
#undef JEMALLOC_HAVE_MADVISE
/*
* Methods for purging unused pages differ between operating systems.
*
* madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages,
* such that new pages will be demand-zeroed if
* the address region is later touched.
* madvise(..., MADV_FREE) : On FreeBSD and Darwin, this marks pages as being
* unused, such that they will be discarded rather
* than swapped out.
* madvise(..., MADV_FREE) : This marks pages as being unused, such that they
* will be discarded rather than swapped out.
* madvise(..., MADV_DONTNEED) : This immediately discards pages, such that
* new pages will be demand-zeroed if the
* address region is later touched.
*/
#undef JEMALLOC_PURGE_MADVISE_DONTNEED
#undef JEMALLOC_PURGE_MADVISE_FREE
#undef JEMALLOC_PURGE_MADVISE_DONTNEED
/*
* Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
* arguments to madvise(2).
*/
#undef JEMALLOC_THP
/* Define if operating system has alloca.h header. */
#undef JEMALLOC_HAS_ALLOCA_H
...
...
@@ -241,6 +286,9 @@
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
#undef LG_SIZEOF_LONG
/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
#undef LG_SIZEOF_LONG_LONG
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
#undef LG_SIZEOF_INTMAX_T
...
...
@@ -259,4 +307,7 @@
*/
#undef JEMALLOC_EXPORT
/* config.malloc_conf options string. */
#undef JEMALLOC_CONFIG_MALLOC_CONF
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
deps/jemalloc/include/jemalloc/internal/mb.h
View file @
71a8df6a
...
...
@@ -42,7 +42,7 @@ mb_write(void)
:
/* Inputs. */
:
"memory"
/* Clobbers. */
);
#else
#
else
/*
* This is hopefully enough to keep the compiler from reordering
* instructions around this one.
...
...
@@ -52,7 +52,7 @@ mb_write(void)
:
/* Inputs. */
:
"memory"
/* Clobbers. */
);
#endif
#
endif
}
#elif (defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE
void
...
...
@@ -104,9 +104,9 @@ mb_write(void)
{
malloc_mutex_t
mtx
;
malloc_mutex_init
(
&
mtx
);
malloc_mutex_lock
(
&
mtx
);
malloc_mutex_unlock
(
&
mtx
);
malloc_mutex_init
(
&
mtx
,
"mb"
,
WITNESS_RANK_OMIT
);
malloc_mutex_lock
(
TSDN_NULL
,
&
mtx
);
malloc_mutex_unlock
(
TSDN_NULL
,
&
mtx
);
}
#endif
#endif
...
...
deps/jemalloc/include/jemalloc/internal/mutex.h
View file @
71a8df6a
...
...
@@ -5,18 +5,25 @@ typedef struct malloc_mutex_s malloc_mutex_t;
#ifdef _WIN32
# define MALLOC_MUTEX_INITIALIZER
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
# define MALLOC_MUTEX_INITIALIZER \
{OS_UNFAIR_LOCK_INIT, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
#elif (defined(JEMALLOC_OSSPIN))
# define MALLOC_MUTEX_INITIALIZER {0}
# define MALLOC_MUTEX_INITIALIZER {0
, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)
}
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, NULL}
# define MALLOC_MUTEX_INITIALIZER \
{PTHREAD_MUTEX_INITIALIZER, NULL, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
#else
# if (defined(JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP) && \
defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP))
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP}
# define MALLOC_MUTEX_INITIALIZER \
{PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP, \
WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
# else
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER}
# define MALLOC_MUTEX_INITIALIZER \
{PTHREAD_MUTEX_INITIALIZER, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
# endif
#endif
...
...
@@ -31,6 +38,8 @@ struct malloc_mutex_s {
# else
CRITICAL_SECTION
lock
;
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
os_unfair_lock
lock
;
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLock
lock
;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
...
...
@@ -39,6 +48,7 @@ struct malloc_mutex_s {
#else
pthread_mutex_t
lock
;
#endif
witness_t
witness
;
};
#endif
/* JEMALLOC_H_STRUCTS */
...
...
@@ -52,52 +62,62 @@ extern bool isthreaded;
# define isthreaded true
#endif
bool
malloc_mutex_init
(
malloc_mutex_t
*
mutex
);
void
malloc_mutex_prefork
(
malloc_mutex_t
*
mutex
);
void
malloc_mutex_postfork_parent
(
malloc_mutex_t
*
mutex
);
void
malloc_mutex_postfork_child
(
malloc_mutex_t
*
mutex
);
bool
mutex_boot
(
void
);
bool
malloc_mutex_init
(
malloc_mutex_t
*
mutex
,
const
char
*
name
,
witness_rank_t
rank
);
void
malloc_mutex_prefork
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
);
void
malloc_mutex_postfork_parent
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
);
void
malloc_mutex_postfork_child
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
);
bool
malloc_mutex_boot
(
void
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
void
malloc_mutex_lock
(
malloc_mutex_t
*
mutex
);
void
malloc_mutex_unlock
(
malloc_mutex_t
*
mutex
);
void
malloc_mutex_lock
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
);
void
malloc_mutex_unlock
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
);
void
malloc_mutex_assert_owner
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
);
void
malloc_mutex_assert_not_owner
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
JEMALLOC_INLINE
void
malloc_mutex_lock
(
malloc_mutex_t
*
mutex
)
malloc_mutex_lock
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
)
{
if
(
isthreaded
)
{
witness_assert_not_owner
(
tsdn
,
&
mutex
->
witness
);
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
AcquireSRWLockExclusive
(
&
mutex
->
lock
);
# else
EnterCriticalSection
(
&
mutex
->
lock
);
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
os_unfair_lock_lock
(
&
mutex
->
lock
);
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockLock
(
&
mutex
->
lock
);
#else
pthread_mutex_lock
(
&
mutex
->
lock
);
#endif
witness_lock
(
tsdn
,
&
mutex
->
witness
);
}
}
JEMALLOC_INLINE
void
malloc_mutex_unlock
(
malloc_mutex_t
*
mutex
)
malloc_mutex_unlock
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
)
{
if
(
isthreaded
)
{
witness_unlock
(
tsdn
,
&
mutex
->
witness
);
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
ReleaseSRWLockExclusive
(
&
mutex
->
lock
);
# else
LeaveCriticalSection
(
&
mutex
->
lock
);
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
os_unfair_lock_unlock
(
&
mutex
->
lock
);
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockUnlock
(
&
mutex
->
lock
);
#else
...
...
@@ -105,6 +125,22 @@ malloc_mutex_unlock(malloc_mutex_t *mutex)
#endif
}
}
JEMALLOC_INLINE
void
malloc_mutex_assert_owner
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
)
{
if
(
isthreaded
)
witness_assert_owner
(
tsdn
,
&
mutex
->
witness
);
}
JEMALLOC_INLINE
void
malloc_mutex_assert_not_owner
(
tsdn_t
*
tsdn
,
malloc_mutex_t
*
mutex
)
{
if
(
isthreaded
)
witness_assert_not_owner
(
tsdn
,
&
mutex
->
witness
);
}
#endif
#endif
/* JEMALLOC_H_INLINES */
...
...
deps/jemalloc/include/jemalloc/internal/nstime.h
0 → 100644
View file @
71a8df6a
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef
struct
nstime_s
nstime_t
;
/* Maximum supported number of seconds (~584 years). */
#define NSTIME_SEC_MAX KQU(18446744072)
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct
nstime_s
{
uint64_t
ns
;
};
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void
nstime_init
(
nstime_t
*
time
,
uint64_t
ns
);
void
nstime_init2
(
nstime_t
*
time
,
uint64_t
sec
,
uint64_t
nsec
);
uint64_t
nstime_ns
(
const
nstime_t
*
time
);
uint64_t
nstime_sec
(
const
nstime_t
*
time
);
uint64_t
nstime_nsec
(
const
nstime_t
*
time
);
void
nstime_copy
(
nstime_t
*
time
,
const
nstime_t
*
source
);
int
nstime_compare
(
const
nstime_t
*
a
,
const
nstime_t
*
b
);
void
nstime_add
(
nstime_t
*
time
,
const
nstime_t
*
addend
);
void
nstime_subtract
(
nstime_t
*
time
,
const
nstime_t
*
subtrahend
);
void
nstime_imultiply
(
nstime_t
*
time
,
uint64_t
multiplier
);
void
nstime_idivide
(
nstime_t
*
time
,
uint64_t
divisor
);
uint64_t
nstime_divide
(
const
nstime_t
*
time
,
const
nstime_t
*
divisor
);
#ifdef JEMALLOC_JET
typedef
bool
(
nstime_monotonic_t
)(
void
);
extern
nstime_monotonic_t
*
nstime_monotonic
;
typedef
bool
(
nstime_update_t
)(
nstime_t
*
);
extern
nstime_update_t
*
nstime_update
;
#else
bool
nstime_monotonic
(
void
);
bool
nstime_update
(
nstime_t
*
time
);
#endif
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/pages.h
View file @
71a8df6a
...
...
@@ -9,13 +9,16 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void
*
pages_map
(
void
*
addr
,
size_t
size
);
void
*
pages_map
(
void
*
addr
,
size_t
size
,
bool
*
commit
);
void
pages_unmap
(
void
*
addr
,
size_t
size
);
void
*
pages_trim
(
void
*
addr
,
size_t
alloc_size
,
size_t
leadsize
,
size_t
size
);
size_t
size
,
bool
*
commit
);
bool
pages_commit
(
void
*
addr
,
size_t
size
);
bool
pages_decommit
(
void
*
addr
,
size_t
size
);
bool
pages_purge
(
void
*
addr
,
size_t
size
);
bool
pages_huge
(
void
*
addr
,
size_t
size
);
bool
pages_nohuge
(
void
*
addr
,
size_t
size
);
void
pages_boot
(
void
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
...
...
Prev
1
2
3
4
5
6
…
10
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment