Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
d4439bd4
Commit
d4439bd4
authored
May 15, 2023
by
Oran Agra
Browse files
Merge remote-tracking branch 'origin/unstable' into 7.2
parents
e26a769d
2ffde15a
Changes
200
Show whitespace changes
Inline
Side-by-side
Too many changes to show.
To preserve performance only
200 of 200+
files are displayed.
Plain diff
Email patch
deps/jemalloc/include/jemalloc/internal/slab_data.h
0 → 100644
View file @
d4439bd4
#ifndef JEMALLOC_INTERNAL_SLAB_DATA_H
#define JEMALLOC_INTERNAL_SLAB_DATA_H
#include "jemalloc/internal/bitmap.h"
typedef
struct
slab_data_s
slab_data_t
;
struct
slab_data_s
{
/* Per region allocated/deallocated bitmap. */
bitmap_t
bitmap
[
BITMAP_GROUPS_MAX
];
};
#endif
/* JEMALLOC_INTERNAL_SLAB_DATA_H */
deps/jemalloc/include/jemalloc/internal/stats.h
View file @
d4439bd4
...
...
@@ -11,7 +11,8 @@
OPTION('b', bins, true, false) \
OPTION('l', large, true, false) \
OPTION('x', mutex, true, false) \
OPTION('e', extents, true, false)
OPTION('e', extents, true, false) \
OPTION('h', hpa, config_stats, false)
enum
{
#define OPTION(o, v, d, s) stats_print_option_num_##v,
...
...
@@ -24,8 +25,30 @@ enum {
extern
bool
opt_stats_print
;
extern
char
opt_stats_print_opts
[
stats_print_tot_num_options
+
1
];
/* Utilities for stats_interval. */
extern
int64_t
opt_stats_interval
;
extern
char
opt_stats_interval_opts
[
stats_print_tot_num_options
+
1
];
#define STATS_INTERVAL_DEFAULT -1
/*
* Batch-increment the counter to reduce synchronization overhead. Each thread
* merges after (interval >> LG_BATCH_SIZE) bytes of allocations; also limit the
* BATCH_MAX for accuracy when the interval is huge (which is expected).
*/
#define STATS_INTERVAL_ACCUM_LG_BATCH_SIZE 6
#define STATS_INTERVAL_ACCUM_BATCH_MAX (4 << 20)
/* Only accessed by thread event. */
uint64_t
stats_interval_new_event_wait
(
tsd_t
*
tsd
);
uint64_t
stats_interval_postponed_event_wait
(
tsd_t
*
tsd
);
void
stats_interval_event_handler
(
tsd_t
*
tsd
,
uint64_t
elapsed
);
/* Implements je_malloc_stats_print. */
void
stats_print
(
void
(
*
write_cb
)(
void
*
,
const
char
*
),
void
*
cbopaque
,
const
char
*
opts
);
void
stats_print
(
write_cb_t
*
write_cb
,
void
*
cbopaque
,
const
char
*
opts
);
bool
stats_boot
(
void
);
void
stats_prefork
(
tsdn_t
*
tsdn
);
void
stats_postfork_parent
(
tsdn_t
*
tsdn
);
void
stats_postfork_child
(
tsdn_t
*
tsdn
);
#endif
/* JEMALLOC_INTERNAL_STATS_H */
deps/jemalloc/include/jemalloc/internal/sz.h
View file @
d4439bd4
...
...
@@ -22,6 +22,12 @@
* size that would result from such an allocation.
*/
/* Page size index type. */
typedef
unsigned
pszind_t
;
/* Size class index type. */
typedef
unsigned
szind_t
;
/*
* sz_pind2sz_tab encodes the same information as could be computed by
* sz_pind2sz_compute().
...
...
@@ -39,34 +45,62 @@ extern size_t sz_index2size_tab[SC_NSIZES];
*/
extern
uint8_t
sz_size2index_tab
[];
static
const
size_t
sz_large_pad
=
#ifdef JEMALLOC_CACHE_OBLIVIOUS
PAGE
#else
0
#endif
;
/*
* Padding for large allocations: PAGE when opt_cache_oblivious == true (to
* enable cache index randomization); 0 otherwise.
*/
extern
size_t
sz_large_pad
;
extern
void
sz_boot
(
const
sc_data_t
*
sc_data
);
extern
void
sz_boot
(
const
sc_data_t
*
sc_data
,
bool
cache_oblivious
);
JEMALLOC_ALWAYS_INLINE
pszind_t
sz_psz2ind
(
size_t
psz
)
{
assert
(
psz
>
0
);
if
(
unlikely
(
psz
>
SC_LARGE_MAXCLASS
))
{
return
SC_NPSIZES
;
}
pszind_t
x
=
lg_floor
((
psz
<<
1
)
-
1
);
pszind_t
shift
=
(
x
<
SC_LG_NGROUP
+
LG_PAGE
)
?
/* x is the lg of the first base >= psz. */
pszind_t
x
=
lg_ceil
(
psz
);
/*
* sc.h introduces a lot of size classes. These size classes are divided
* into different size class groups. There is a very special size class
* group, each size class in or after it is an integer multiple of PAGE.
* We call it first_ps_rg. It means first page size regular group. The
* range of first_ps_rg is (base, base * 2], and base == PAGE *
* SC_NGROUP. off_to_first_ps_rg begins from 1, instead of 0. e.g.
* off_to_first_ps_rg is 1 when psz is (PAGE * SC_NGROUP + 1).
*/
pszind_t
off_to_first_ps_rg
=
(
x
<
SC_LG_NGROUP
+
LG_PAGE
)
?
0
:
x
-
(
SC_LG_NGROUP
+
LG_PAGE
);
pszind_t
grp
=
shift
<<
SC_LG_NGROUP
;
pszind_t
lg_delta
=
(
x
<
SC_LG_NGROUP
+
LG_PAGE
+
1
)
?
LG_PAGE
:
x
-
SC_LG_NGROUP
-
1
;
/*
* Same as sc_s::lg_delta.
* Delta for off_to_first_ps_rg == 1 is PAGE,
* for each increase in offset, it's multiplied by two.
* Therefore, lg_delta = LG_PAGE + (off_to_first_ps_rg - 1).
*/
pszind_t
lg_delta
=
(
off_to_first_ps_rg
==
0
)
?
LG_PAGE
:
LG_PAGE
+
(
off_to_first_ps_rg
-
1
);
size_t
delta_inverse_mask
=
ZU
(
-
1
)
<<
lg_delta
;
pszind_t
mod
=
((((
psz
-
1
)
&
delta_inverse_mask
)
>>
lg_delta
))
&
((
ZU
(
1
)
<<
SC_LG_NGROUP
)
-
1
);
/*
* Let's write psz in binary, e.g. 0011 for 0x3, 0111 for 0x7.
* The leftmost bits whose len is lg_base decide the base of psz.
* The rightmost bits whose len is lg_delta decide (pgz % PAGE).
* The middle bits whose len is SC_LG_NGROUP decide ndelta.
* ndelta is offset to the first size class in the size class group,
* starts from 1.
* If you don't know lg_base, ndelta or lg_delta, see sc.h.
* |xxxxxxxxxxxxxxxxxxxx|------------------------|yyyyyyyyyyyyyyyyyyyyy|
* |<-- len: lg_base -->|<-- len: SC_LG_NGROUP-->|<-- len: lg_delta -->|
* |<-- ndelta -->|
* rg_inner_off = ndelta - 1
* Why use (psz - 1)?
* To handle case: psz % (1 << lg_delta) == 0.
*/
pszind_t
rg_inner_off
=
(((
psz
-
1
))
>>
lg_delta
)
&
(
SC_NGROUP
-
1
);
pszind_t
ind
=
grp
+
mod
;
pszind_t
base_ind
=
off_to_first_ps_rg
<<
SC_LG_NGROUP
;
pszind_t
ind
=
base_ind
+
rg_inner_off
;
return
ind
;
}
...
...
@@ -152,10 +186,15 @@ sz_size2index_compute(size_t size) {
}
JEMALLOC_ALWAYS_INLINE
szind_t
sz_size2index_lookup
(
size_t
size
)
{
sz_size2index_lookup
_impl
(
size_t
size
)
{
assert
(
size
<=
SC_LOOKUP_MAXCLASS
);
szind_t
ret
=
(
sz_size2index_tab
[(
size
+
(
ZU
(
1
)
<<
SC_LG_TINY_MIN
)
-
1
)
>>
SC_LG_TINY_MIN
]);
return
sz_size2index_tab
[(
size
+
(
ZU
(
1
)
<<
SC_LG_TINY_MIN
)
-
1
)
>>
SC_LG_TINY_MIN
];
}
JEMALLOC_ALWAYS_INLINE
szind_t
sz_size2index_lookup
(
size_t
size
)
{
szind_t
ret
=
sz_size2index_lookup_impl
(
size
);
assert
(
ret
==
sz_size2index_compute
(
size
));
return
ret
;
}
...
...
@@ -194,9 +233,14 @@ sz_index2size_compute(szind_t index) {
}
}
JEMALLOC_ALWAYS_INLINE
size_t
sz_index2size_lookup_impl
(
szind_t
index
)
{
return
sz_index2size_tab
[
index
];
}
JEMALLOC_ALWAYS_INLINE
size_t
sz_index2size_lookup
(
szind_t
index
)
{
size_t
ret
=
(
size_t
)
sz_index2size_
tab
[
index
]
;
size_t
ret
=
sz_index2size_
lookup_impl
(
index
)
;
assert
(
ret
==
sz_index2size_compute
(
index
));
return
ret
;
}
...
...
@@ -207,6 +251,12 @@ sz_index2size(szind_t index) {
return
sz_index2size_lookup
(
index
);
}
JEMALLOC_ALWAYS_INLINE
void
sz_size2index_usize_fastpath
(
size_t
size
,
szind_t
*
ind
,
size_t
*
usize
)
{
*
ind
=
sz_size2index_lookup_impl
(
size
);
*
usize
=
sz_index2size_lookup_impl
(
*
ind
);
}
JEMALLOC_ALWAYS_INLINE
size_t
sz_s2u_compute
(
size_t
size
)
{
if
(
unlikely
(
size
>
SC_LARGE_MAXCLASS
))
{
...
...
@@ -266,7 +316,7 @@ sz_sa2u(size_t size, size_t alignment) {
assert
(
alignment
!=
0
&&
((
alignment
-
1
)
&
alignment
)
==
0
);
/* Try for a small size class. */
if
(
size
<=
SC_SMALL_MAXCLASS
&&
alignment
<
PAGE
)
{
if
(
size
<=
SC_SMALL_MAXCLASS
&&
alignment
<
=
PAGE
)
{
/*
* Round size up to the nearest multiple of alignment.
*
...
...
@@ -315,4 +365,7 @@ sz_sa2u(size_t size, size_t alignment) {
return
usize
;
}
size_t
sz_psz_quantize_floor
(
size_t
size
);
size_t
sz_psz_quantize_ceil
(
size_t
size
);
#endif
/* JEMALLOC_INTERNAL_SIZE_H */
deps/jemalloc/include/jemalloc/internal/tcache_externs.h
View file @
d4439bd4
...
...
@@ -2,9 +2,16 @@
#define JEMALLOC_INTERNAL_TCACHE_EXTERNS_H
extern
bool
opt_tcache
;
extern
ssize_t
opt_lg_tcache_max
;
extern
cache_bin_info_t
*
tcache_bin_info
;
extern
size_t
opt_tcache_max
;
extern
ssize_t
opt_lg_tcache_nslots_mul
;
extern
unsigned
opt_tcache_nslots_small_min
;
extern
unsigned
opt_tcache_nslots_small_max
;
extern
unsigned
opt_tcache_nslots_large
;
extern
ssize_t
opt_lg_tcache_shift
;
extern
size_t
opt_tcache_gc_incr_bytes
;
extern
size_t
opt_tcache_gc_delay_bytes
;
extern
unsigned
opt_lg_tcache_flush_small_div
;
extern
unsigned
opt_lg_tcache_flush_large_div
;
/*
* Number of tcache bins. There are SC_NBINS small-object bins, plus 0 or more
...
...
@@ -15,6 +22,8 @@ extern unsigned nhbins;
/* Maximum cached size class. */
extern
size_t
tcache_maxclass
;
extern
cache_bin_info_t
*
tcache_bin_info
;
/*
* Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and
* usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are
...
...
@@ -26,23 +35,26 @@ extern size_t tcache_maxclass;
extern
tcaches_t
*
tcaches
;
size_t
tcache_salloc
(
tsdn_t
*
tsdn
,
const
void
*
ptr
);
void
tcache_event_hard
(
tsd_t
*
tsd
,
tcache_t
*
tcache
);
void
*
tcache_alloc_small_hard
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
tcache_t
*
tcache
,
cache_bin_t
*
tbin
,
szind_t
binind
,
bool
*
tcache_success
);
void
tcache_bin_flush_small
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
cache_bin_t
*
tbin
,
szind_t
binind
,
unsigned
rem
);
void
tcache_bin_flush_large
(
tsd_t
*
tsd
,
cache_bin_t
*
tbin
,
szind_t
binind
,
unsigned
rem
,
tcache_t
*
tcache
);
void
tcache_arena_reassociate
(
tsdn_t
*
tsdn
,
tcache_t
*
tcache
,
arena_t
*
arena
);
void
tcache_bin_flush_large
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
cache_bin_t
*
tbin
,
szind_t
binind
,
unsigned
rem
);
void
tcache_bin_flush_stashed
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
cache_bin_t
*
bin
,
szind_t
binind
,
bool
is_small
);
void
tcache_arena_reassociate
(
tsdn_t
*
tsdn
,
tcache_slow_t
*
tcache_slow
,
tcache_t
*
tcache
,
arena_t
*
arena
);
tcache_t
*
tcache_create_explicit
(
tsd_t
*
tsd
);
void
tcache_cleanup
(
tsd_t
*
tsd
);
void
tcache_stats_merge
(
tsdn_t
*
tsdn
,
tcache_t
*
tcache
,
arena_t
*
arena
);
bool
tcaches_create
(
tsd_t
*
tsd
,
unsigned
*
r_ind
);
bool
tcaches_create
(
tsd_t
*
tsd
,
base_t
*
base
,
unsigned
*
r_ind
);
void
tcaches_flush
(
tsd_t
*
tsd
,
unsigned
ind
);
void
tcaches_destroy
(
tsd_t
*
tsd
,
unsigned
ind
);
bool
tcache_boot
(
tsdn_t
*
tsdn
);
void
tcache_arena_associate
(
tsdn_t
*
tsdn
,
tcache_t
*
tcache
,
arena_t
*
arena
);
bool
tcache_boot
(
tsdn_t
*
tsdn
,
base_t
*
base
);
void
tcache_arena_associate
(
tsdn_t
*
tsdn
,
tcache_slow_t
*
tcache_slow
,
tcache_t
*
tcache
,
arena_t
*
arena
);
void
tcache_prefork
(
tsdn_t
*
tsdn
);
void
tcache_postfork_parent
(
tsdn_t
*
tsdn
);
void
tcache_postfork_child
(
tsdn_t
*
tsdn
);
...
...
@@ -50,4 +62,14 @@ void tcache_flush(tsd_t *tsd);
bool
tsd_tcache_data_init
(
tsd_t
*
tsd
);
bool
tsd_tcache_enabled_data_init
(
tsd_t
*
tsd
);
void
tcache_assert_initialized
(
tcache_t
*
tcache
);
/* Only accessed by thread event. */
uint64_t
tcache_gc_new_event_wait
(
tsd_t
*
tsd
);
uint64_t
tcache_gc_postponed_event_wait
(
tsd_t
*
tsd
);
void
tcache_gc_event_handler
(
tsd_t
*
tsd
,
uint64_t
elapsed
);
uint64_t
tcache_gc_dalloc_new_event_wait
(
tsd_t
*
tsd
);
uint64_t
tcache_gc_dalloc_postponed_event_wait
(
tsd_t
*
tsd
);
void
tcache_gc_dalloc_event_handler
(
tsd_t
*
tsd
,
uint64_t
elapsed
);
#endif
/* JEMALLOC_INTERNAL_TCACHE_EXTERNS_H */
deps/jemalloc/include/jemalloc/internal/tcache_inlines.h
View file @
d4439bd4
...
...
@@ -3,9 +3,9 @@
#include "jemalloc/internal/bin.h"
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/san.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/util.h"
static
inline
bool
...
...
@@ -27,28 +27,29 @@ tcache_enabled_set(tsd_t *tsd, bool enabled) {
tsd_slow_update
(
tsd
);
}
JEMALLOC_ALWAYS_INLINE
void
tcache_event
(
tsd_t
*
tsd
,
tcache_t
*
tcache
)
{
if
(
TCACHE_GC_INCR
==
0
)
{
return
;
JEMALLOC_ALWAYS_INLINE
bool
tcache_small_bin_disabled
(
szind_t
ind
,
cache_bin_t
*
bin
)
{
assert
(
ind
<
SC_NBINS
);
bool
ret
=
(
cache_bin_info_ncached_max
(
&
tcache_bin_info
[
ind
])
==
0
);
if
(
ret
&&
bin
!=
NULL
)
{
/* small size class but cache bin disabled. */
assert
(
ind
>=
nhbins
);
assert
((
uintptr_t
)(
*
bin
->
stack_head
)
==
cache_bin_preceding_junk
);
}
if
(
unlikely
(
ticker_tick
(
&
tcache
->
gc_ticker
)))
{
tcache_event_hard
(
tsd
,
tcache
);
}
return
ret
;
}
JEMALLOC_ALWAYS_INLINE
void
*
tcache_alloc_small
(
tsd_t
*
tsd
,
arena_t
*
arena
,
tcache_t
*
tcache
,
size_t
size
,
szind_t
binind
,
bool
zero
,
bool
slow_path
)
{
void
*
ret
;
cache_bin_t
*
bin
;
bool
tcache_success
;
size_t
usize
JEMALLOC_CC_SILENCE_INIT
(
0
);
assert
(
binind
<
SC_NBINS
);
bin
=
tcache_small_bin_get
(
tcache
,
binind
)
;
ret
=
cache_bin_alloc
_easy
(
bin
,
&
tcache_success
);
cache_bin_t
*
bin
=
&
tcache
->
bins
[
binind
]
;
ret
=
cache_bin_alloc
(
bin
,
&
tcache_success
);
assert
(
tcache_success
==
(
ret
!=
NULL
));
if
(
unlikely
(
!
tcache_success
))
{
bool
tcache_hard_success
;
...
...
@@ -56,6 +57,13 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
if
(
unlikely
(
arena
==
NULL
))
{
return
NULL
;
}
if
(
unlikely
(
tcache_small_bin_disabled
(
binind
,
bin
)))
{
/* stats and zero are handled directly by the arena. */
return
arena_malloc_hard
(
tsd_tsdn
(
tsd
),
arena
,
size
,
binind
,
zero
);
}
tcache_bin_flush_stashed
(
tsd
,
tcache
,
bin
,
binind
,
/* is_small */
true
);
ret
=
tcache_alloc_small_hard
(
tsd_tsdn
(
tsd
),
arena
,
tcache
,
bin
,
binind
,
&
tcache_hard_success
);
...
...
@@ -65,38 +73,14 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
}
assert
(
ret
);
/*
* Only compute usize if required. The checks in the following if
* statement are all static.
*/
if
(
config_prof
||
(
slow_path
&&
config_fill
)
||
unlikely
(
zero
))
{
usize
=
sz_index2size
(
binind
);
if
(
unlikely
(
zero
))
{
size_t
usize
=
sz_index2size
(
binind
);
assert
(
tcache_salloc
(
tsd_tsdn
(
tsd
),
ret
)
==
usize
);
}
if
(
likely
(
!
zero
))
{
if
(
slow_path
&&
config_fill
)
{
if
(
unlikely
(
opt_junk_alloc
))
{
arena_alloc_junk_small
(
ret
,
&
bin_infos
[
binind
],
false
);
}
else
if
(
unlikely
(
opt_zero
))
{
memset
(
ret
,
0
,
usize
);
}
}
}
else
{
if
(
slow_path
&&
config_fill
&&
unlikely
(
opt_junk_alloc
))
{
arena_alloc_junk_small
(
ret
,
&
bin_infos
[
binind
],
true
);
}
memset
(
ret
,
0
,
usize
);
}
if
(
config_stats
)
{
bin
->
tstats
.
nrequests
++
;
}
if
(
config_prof
)
{
tcache
->
prof_accumbytes
+=
usize
;
}
tcache_event
(
tsd
,
tcache
);
return
ret
;
}
...
...
@@ -104,12 +88,11 @@ JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_large
(
tsd_t
*
tsd
,
arena_t
*
arena
,
tcache_t
*
tcache
,
size_t
size
,
szind_t
binind
,
bool
zero
,
bool
slow_path
)
{
void
*
ret
;
cache_bin_t
*
bin
;
bool
tcache_success
;
assert
(
binind
>=
SC_NBINS
&&
binind
<
nhbins
);
bin
=
tcache_large_bin_get
(
tcache
,
binind
)
;
ret
=
cache_bin_alloc
_easy
(
bin
,
&
tcache_success
);
assert
(
binind
>=
SC_NBINS
&&
binind
<
nhbins
);
cache_bin_t
*
bin
=
&
tcache
->
bins
[
binind
]
;
ret
=
cache_bin_alloc
(
bin
,
&
tcache_success
);
assert
(
tcache_success
==
(
ret
!=
NULL
));
if
(
unlikely
(
!
tcache_success
))
{
/*
...
...
@@ -120,96 +103,79 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
if
(
unlikely
(
arena
==
NULL
))
{
return
NULL
;
}
tcache_bin_flush_stashed
(
tsd
,
tcache
,
bin
,
binind
,
/* is_small */
false
);
ret
=
large_malloc
(
tsd_tsdn
(
tsd
),
arena
,
sz_s2u
(
size
),
zero
);
if
(
ret
==
NULL
)
{
return
NULL
;
}
}
else
{
size_t
usize
JEMALLOC_CC_SILENCE_INIT
(
0
);
/* Only compute usize on demand */
if
(
config_prof
||
(
slow_path
&&
config_fill
)
||
unlikely
(
zero
))
{
usize
=
sz_index2size
(
binind
);
if
(
unlikely
(
zero
))
{
size_t
usize
=
sz_index2size
(
binind
);
assert
(
usize
<=
tcache_maxclass
);
}
if
(
likely
(
!
zero
))
{
if
(
slow_path
&&
config_fill
)
{
if
(
unlikely
(
opt_junk_alloc
))
{
memset
(
ret
,
JEMALLOC_ALLOC_JUNK
,
usize
);
}
else
if
(
unlikely
(
opt_zero
))
{
memset
(
ret
,
0
,
usize
);
}
}
}
else
{
memset
(
ret
,
0
,
usize
);
}
if
(
config_stats
)
{
bin
->
tstats
.
nrequests
++
;
}
if
(
config_prof
)
{
tcache
->
prof_accumbytes
+=
usize
;
}
}
tcache_event
(
tsd
,
tcache
);
return
ret
;
}
JEMALLOC_ALWAYS_INLINE
void
tcache_dalloc_small
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
void
*
ptr
,
szind_t
binind
,
bool
slow_path
)
{
cache_bin_t
*
bin
;
cache_bin_info_t
*
bin_info
;
assert
(
tcache_salloc
(
tsd_tsdn
(
tsd
),
ptr
)
<=
SC_SMALL_MAXCLASS
);
assert
(
tcache_salloc
(
tsd_tsdn
(
tsd
),
ptr
)
<=
SC_SMALL_MAXCLASS
);
if
(
slow_path
&&
config_fill
&&
unlikely
(
opt_junk_free
))
{
arena_dalloc_junk_small
(
ptr
,
&
bin_infos
[
binind
]);
cache_bin_t
*
bin
=
&
tcache
->
bins
[
binind
];
/*
* Not marking the branch unlikely because this is past free_fastpath()
* (which handles the most common cases), i.e. at this point it's often
* uncommon cases.
*/
if
(
cache_bin_nonfast_aligned
(
ptr
))
{
/* Junk unconditionally, even if bin is full. */
san_junk_ptr
(
ptr
,
sz_index2size
(
binind
));
if
(
cache_bin_stash
(
bin
,
ptr
))
{
return
;
}
assert
(
cache_bin_full
(
bin
));
/* Bin full; fall through into the flush branch. */
}
bin
=
tcache_small_bin_get
(
tcache
,
binind
);
bin_info
=
&
tcache_bin_info
[
binind
];
if
(
unlikely
(
!
cache_bin_dalloc_easy
(
bin
,
bin_info
,
ptr
)))
{
tcache_bin_flush_small
(
tsd
,
tcache
,
bin
,
binind
,
(
bin_info
->
ncached_max
>>
1
));
bool
ret
=
cache_bin_dalloc_easy
(
bin
,
bin_info
,
ptr
);
if
(
unlikely
(
!
cache_bin_dalloc_easy
(
bin
,
ptr
)))
{
if
(
unlikely
(
tcache_small_bin_disabled
(
binind
,
bin
)))
{
arena_dalloc_small
(
tsd_tsdn
(
tsd
),
ptr
);
return
;
}
cache_bin_sz_t
max
=
cache_bin_info_ncached_max
(
&
tcache_bin_info
[
binind
]);
unsigned
remain
=
max
>>
opt_lg_tcache_flush_small_div
;
tcache_bin_flush_small
(
tsd
,
tcache
,
bin
,
binind
,
remain
);
bool
ret
=
cache_bin_dalloc_easy
(
bin
,
ptr
);
assert
(
ret
);
}
tcache_event
(
tsd
,
tcache
);
}
JEMALLOC_ALWAYS_INLINE
void
tcache_dalloc_large
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
void
*
ptr
,
szind_t
binind
,
bool
slow_path
)
{
cache_bin_t
*
bin
;
cache_bin_info_t
*
bin_info
;
assert
(
tcache_salloc
(
tsd_tsdn
(
tsd
),
ptr
)
>
SC_SMALL_MAXCLASS
);
assert
(
tcache_salloc
(
tsd_tsdn
(
tsd
),
ptr
)
<=
tcache_maxclass
);
if
(
slow_path
&&
config_fill
&&
unlikely
(
opt_junk_free
))
{
large_dalloc_junk
(
ptr
,
sz_index2size
(
binind
));
}
bin
=
tcache_large_bin_get
(
tcache
,
binind
);
bin_info
=
&
tcache_bin_info
[
binind
];
if
(
unlikely
(
bin
->
ncached
==
bin_info
->
ncached_max
))
{
tcache_bin_flush_large
(
tsd
,
bin
,
binind
,
(
bin_info
->
ncached_max
>>
1
),
tcache
);
cache_bin_t
*
bin
=
&
tcache
->
bins
[
binind
];
if
(
unlikely
(
!
cache_bin_dalloc_easy
(
bin
,
ptr
)))
{
unsigned
remain
=
cache_bin_info_ncached_max
(
&
tcache_bin_info
[
binind
])
>>
opt_lg_tcache_flush_large_div
;
tcache_bin_flush_large
(
tsd
,
tcache
,
bin
,
binind
,
remain
);
bool
ret
=
cache_bin_dalloc_easy
(
bin
,
ptr
);
assert
(
ret
);
}
assert
(
bin
->
ncached
<
bin_info
->
ncached_max
);
bin
->
ncached
++
;
*
(
bin
->
avail
-
bin
->
ncached
)
=
ptr
;
tcache_event
(
tsd
,
tcache
);
}
JEMALLOC_ALWAYS_INLINE
tcache_t
*
...
...
deps/jemalloc/include/jemalloc/internal/tcache_structs.h
View file @
d4439bd4
...
...
@@ -7,36 +7,19 @@
#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/tsd_types.h"
/* Various uses of this struct need it to be a named type. */
typedef
ql_elm
(
tsd_t
)
tsd_link_t
;
struct
tcache_s
{
/*
* To minimize our cache-footprint, we put the frequently accessed data
* together at the start of this struct.
*/
/* Cleared after arena_prof_accum(). */
uint64_t
prof_accumbytes
;
/* Drives incremental GC. */
ticker_t
gc_ticker
;
/*
* The pointer stacks associated with bins follow as a contiguous array.
* During tcache initialization, the avail pointer in each element of
* tbins is initialized to point to the proper offset within this array.
/*
* The tcache state is split into the slow and hot path data. Each has a
* pointer to the other, and the data always comes in pairs. The layout of each
* of them varies in practice; tcache_slow lives in the TSD for the automatic
* tcache, and as part of a dynamic allocation for manual allocations. Keeping
* a pointer to tcache_slow lets us treat these cases uniformly, rather than
* splitting up the tcache [de]allocation code into those paths called with the
* TSD tcache and those called with a manual tcache.
*/
cache_bin_t
bins_small
[
SC_NBINS
];
/*
* This data is less hot; we can be a little less careful with our
* footprint here.
*/
struct
tcache_slow_s
{
/* Lets us track all the tcaches in an arena. */
ql_elm
(
tcache_t
)
link
;
/* Logically scoped to tsd, but put here for cache layout reasons. */
ql_elm
(
tsd_t
)
tsd_link
;
bool
in_hook
;
ql_elm
(
tcache_slow_t
)
link
;
/*
* The descriptor lets the arena find our cache bins without seeing the
...
...
@@ -51,12 +34,27 @@ struct tcache_s {
szind_t
next_gc_bin
;
/* For small bins, fill (ncached_max >> lg_fill_div). */
uint8_t
lg_fill_div
[
SC_NBINS
];
/* For small bins, whether has been refilled since last GC. */
bool
bin_refilled
[
SC_NBINS
];
/*
* For small bins, the number of items we can pretend to flush before
* actually flushing.
*/
uint8_t
bin_flush_delay_items
[
SC_NBINS
];
/*
*
We put the cache bins for large size classes at the end of the
*
struct, since some
o
f
the
m might not get used. This might end up
*
letting us avoid touching an extra page if we don't have to
.
*
The start of the allocation containing the dynamic allocation for
*
either the cache bins alone,
o
r
the
cache bin memory as well as this
*
tcache_slow_t and its associated tcache_t
.
*/
cache_bin_t
bins_large
[
SC_NSIZES
-
SC_NBINS
];
void
*
dyn_alloc
;
/* The associated bins. */
tcache_t
*
tcache
;
};
struct
tcache_s
{
tcache_slow_t
*
tcache_slow
;
cache_bin_t
bins
[
TCACHE_NBINS_MAX
];
};
/* Linkage for list of available (previously used) explicit tcache IDs. */
...
...
deps/jemalloc/include/jemalloc/internal/tcache_types.h
View file @
d4439bd4
...
...
@@ -3,6 +3,7 @@
#include "jemalloc/internal/sc.h"
typedef
struct
tcache_slow_s
tcache_slow_t
;
typedef
struct
tcache_s
tcache_t
;
typedef
struct
tcaches_s
tcaches_t
;
...
...
@@ -16,39 +17,9 @@ typedef struct tcaches_s tcaches_t;
#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3)
#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY
/*
* Absolute minimum number of cache slots for each small bin.
*/
#define TCACHE_NSLOTS_SMALL_MIN 20
/*
* Absolute maximum number of cache slots for each small bin in the thread
* cache. This is an additional constraint beyond that imposed as: twice the
* number of regions per slab for this size class.
*
* This constant must be an even number.
*/
#define TCACHE_NSLOTS_SMALL_MAX 200
/* Number of cache slots for large size classes. */
#define TCACHE_NSLOTS_LARGE 20
/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
#define LG_TCACHE_MAXCLASS_DEFAULT 15
/*
* TCACHE_GC_SWEEP is the approximate number of allocation events between
* full GC sweeps. Integer rounding may cause the actual number to be
* slightly higher, since GC is performed incrementally.
*/
#define TCACHE_GC_SWEEP 8192
/* Number of tcache allocation/deallocation events between incremental GCs. */
#define TCACHE_GC_INCR \
((TCACHE_GC_SWEEP / SC_NBINS) + ((TCACHE_GC_SWEEP / SC_NBINS == 0) ? 0 : 1))
/* Used in TSD static initializer only. Real init in tcache_data_init(). */
/* Used in TSD static initializer only. Real init in tsd_tcache_data_init(). */
#define TCACHE_ZERO_INITIALIZER {0}
#define TCACHE_SLOW_ZERO_INITIALIZER {0}
/* Used in TSD static initializer only. Will be initialized to opt_tcache. */
#define TCACHE_ENABLED_ZERO_INITIALIZER false
...
...
@@ -56,4 +27,9 @@ typedef struct tcaches_s tcaches_t;
/* Used for explicit tcache only. Means flushed but not destroyed. */
#define TCACHES_ELM_NEED_REINIT ((tcache_t *)(uintptr_t)1)
#define TCACHE_LG_MAXCLASS_LIMIT 23
/* tcache_maxclass = 8M */
#define TCACHE_MAXCLASS_LIMIT ((size_t)1 << TCACHE_LG_MAXCLASS_LIMIT)
#define TCACHE_NBINS_MAX (SC_NBINS + SC_NGROUP * \
(TCACHE_LG_MAXCLASS_LIMIT - SC_LG_LARGE_MINCLASS) + 1)
#endif
/* JEMALLOC_INTERNAL_TCACHE_TYPES_H */
deps/jemalloc/include/jemalloc/internal/test_hooks.h
View file @
d4439bd4
...
...
@@ -4,16 +4,21 @@
extern
JEMALLOC_EXPORT
void
(
*
test_hooks_arena_new_hook
)();
extern
JEMALLOC_EXPORT
void
(
*
test_hooks_libc_hook
)();
#define JEMALLOC_HOOK(fn, hook) ((void)(hook != NULL && (hook(), 0)), fn)
#if defined(JEMALLOC_JET) || defined(JEMALLOC_UNIT_TEST)
# define JEMALLOC_TEST_HOOK(fn, hook) ((void)(hook != NULL && (hook(), 0)), fn)
#define open JEMALLOC_HOOK(open, test_hooks_libc_hook)
#define read JEMALLOC_HOOK(read, test_hooks_libc_hook)
#define write JEMALLOC_HOOK(write, test_hooks_libc_hook)
#define readlink JEMALLOC_HOOK(readlink, test_hooks_libc_hook)
#define close JEMALLOC_HOOK(close, test_hooks_libc_hook)
#define creat JEMALLOC_HOOK(creat, test_hooks_libc_hook)
#define secure_getenv JEMALLOC_HOOK(secure_getenv, test_hooks_libc_hook)
#
define open JEMALLOC_
TEST_
HOOK(open, test_hooks_libc_hook)
#
define read JEMALLOC_
TEST_
HOOK(read, test_hooks_libc_hook)
#
define write JEMALLOC_
TEST_
HOOK(write, test_hooks_libc_hook)
#
define readlink JEMALLOC_
TEST_
HOOK(readlink, test_hooks_libc_hook)
#
define close JEMALLOC_
TEST_
HOOK(close, test_hooks_libc_hook)
#
define creat JEMALLOC_
TEST_
HOOK(creat, test_hooks_libc_hook)
#
define secure_getenv JEMALLOC_
TEST_
HOOK(secure_getenv, test_hooks_libc_hook)
/* Note that this is undef'd and re-define'd in src/prof.c. */
#define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, test_hooks_libc_hook)
# define _Unwind_Backtrace JEMALLOC_TEST_HOOK(_Unwind_Backtrace, test_hooks_libc_hook)
#else
# define JEMALLOC_TEST_HOOK(fn, hook) fn
#endif
#endif
/* JEMALLOC_INTERNAL_TEST_HOOKS_H */
deps/jemalloc/include/jemalloc/internal/thread_event.h
0 → 100644
View file @
d4439bd4
#ifndef JEMALLOC_INTERNAL_THREAD_EVENT_H
#define JEMALLOC_INTERNAL_THREAD_EVENT_H
#include "jemalloc/internal/tsd.h"
/* "te" is short for "thread_event" */
/*
* TE_MIN_START_WAIT should not exceed the minimal allocation usize.
*/
#define TE_MIN_START_WAIT ((uint64_t)1U)
#define TE_MAX_START_WAIT UINT64_MAX
/*
* Maximum threshold on thread_(de)allocated_next_event_fast, so that there is
* no need to check overflow in malloc fast path. (The allocation size in malloc
* fast path never exceeds SC_LOOKUP_MAXCLASS.)
*/
#define TE_NEXT_EVENT_FAST_MAX (UINT64_MAX - SC_LOOKUP_MAXCLASS + 1U)
/*
* The max interval helps make sure that malloc stays on the fast path in the
* common case, i.e. thread_allocated < thread_allocated_next_event_fast. When
* thread_allocated is within an event's distance to TE_NEXT_EVENT_FAST_MAX
* above, thread_allocated_next_event_fast is wrapped around and we fall back to
* the medium-fast path. The max interval makes sure that we're not staying on
* the fallback case for too long, even if there's no active event or if all
* active events have long wait times.
*/
#define TE_MAX_INTERVAL ((uint64_t)(4U << 20))
/*
* Invalid elapsed time, for situations where elapsed time is not needed. See
* comments in thread_event.c for more info.
*/
#define TE_INVALID_ELAPSED UINT64_MAX
typedef
struct
te_ctx_s
{
bool
is_alloc
;
uint64_t
*
current
;
uint64_t
*
last_event
;
uint64_t
*
next_event
;
uint64_t
*
next_event_fast
;
}
te_ctx_t
;
void
te_assert_invariants_debug
(
tsd_t
*
tsd
);
void
te_event_trigger
(
tsd_t
*
tsd
,
te_ctx_t
*
ctx
);
void
te_recompute_fast_threshold
(
tsd_t
*
tsd
);
void
tsd_te_init
(
tsd_t
*
tsd
);
/*
* List of all events, in the following format:
* E(event, (condition), is_alloc_event)
*/
#define ITERATE_OVER_ALL_EVENTS \
E(tcache_gc, (opt_tcache_gc_incr_bytes > 0), true) \
E(prof_sample, (config_prof && opt_prof), true) \
E(stats_interval, (opt_stats_interval >= 0), true) \
E(tcache_gc_dalloc, (opt_tcache_gc_incr_bytes > 0), false) \
E(peak_alloc, config_stats, true) \
E(peak_dalloc, config_stats, false)
#define E(event, condition_unused, is_alloc_event_unused) \
C(event##_event_wait)
/* List of all thread event counters. */
#define ITERATE_OVER_ALL_COUNTERS \
C(thread_allocated) \
C(thread_allocated_last_event) \
ITERATE_OVER_ALL_EVENTS \
C(prof_sample_last_event) \
C(stats_interval_last_event)
/* Getters directly wrap TSD getters. */
#define C(counter) \
JEMALLOC_ALWAYS_INLINE uint64_t \
counter##_get(tsd_t *tsd) { \
return tsd_##counter##_get(tsd); \
}
ITERATE_OVER_ALL_COUNTERS
#undef C
/*
* Setters call the TSD pointer getters rather than the TSD setters, so that
* the counters can be modified even when TSD state is reincarnated or
* minimal_initialized: if an event is triggered in such cases, we will
* temporarily delay the event and let it be immediately triggered at the next
* allocation call.
*/
#define C(counter) \
JEMALLOC_ALWAYS_INLINE void \
counter##_set(tsd_t *tsd, uint64_t v) { \
*tsd_##counter##p_get(tsd) = v; \
}
ITERATE_OVER_ALL_COUNTERS
#undef C
/*
* For generating _event_wait getter / setter functions for each individual
* event.
*/
#undef E
/*
* The malloc and free fastpath getters -- use the unsafe getters since tsd may
* be non-nominal, in which case the fast_threshold will be set to 0. This
* allows checking for events and tsd non-nominal in a single branch.
*
* Note that these can only be used on the fastpath.
*/
JEMALLOC_ALWAYS_INLINE
void
te_malloc_fastpath_ctx
(
tsd_t
*
tsd
,
uint64_t
*
allocated
,
uint64_t
*
threshold
)
{
*
allocated
=
*
tsd_thread_allocatedp_get_unsafe
(
tsd
);
*
threshold
=
*
tsd_thread_allocated_next_event_fastp_get_unsafe
(
tsd
);
assert
(
*
threshold
<=
TE_NEXT_EVENT_FAST_MAX
);
}
JEMALLOC_ALWAYS_INLINE
void
te_free_fastpath_ctx
(
tsd_t
*
tsd
,
uint64_t
*
deallocated
,
uint64_t
*
threshold
)
{
/* Unsafe getters since this may happen before tsd_init. */
*
deallocated
=
*
tsd_thread_deallocatedp_get_unsafe
(
tsd
);
*
threshold
=
*
tsd_thread_deallocated_next_event_fastp_get_unsafe
(
tsd
);
assert
(
*
threshold
<=
TE_NEXT_EVENT_FAST_MAX
);
}
JEMALLOC_ALWAYS_INLINE
bool
te_ctx_is_alloc
(
te_ctx_t
*
ctx
)
{
return
ctx
->
is_alloc
;
}
JEMALLOC_ALWAYS_INLINE
uint64_t
te_ctx_current_bytes_get
(
te_ctx_t
*
ctx
)
{
return
*
ctx
->
current
;
}
JEMALLOC_ALWAYS_INLINE
void
te_ctx_current_bytes_set
(
te_ctx_t
*
ctx
,
uint64_t
v
)
{
*
ctx
->
current
=
v
;
}
JEMALLOC_ALWAYS_INLINE
uint64_t
te_ctx_last_event_get
(
te_ctx_t
*
ctx
)
{
return
*
ctx
->
last_event
;
}
JEMALLOC_ALWAYS_INLINE
void
te_ctx_last_event_set
(
te_ctx_t
*
ctx
,
uint64_t
v
)
{
*
ctx
->
last_event
=
v
;
}
/* Below 3 for next_event_fast. */
JEMALLOC_ALWAYS_INLINE
uint64_t
te_ctx_next_event_fast_get
(
te_ctx_t
*
ctx
)
{
uint64_t
v
=
*
ctx
->
next_event_fast
;
assert
(
v
<=
TE_NEXT_EVENT_FAST_MAX
);
return
v
;
}
JEMALLOC_ALWAYS_INLINE
void
te_ctx_next_event_fast_set
(
te_ctx_t
*
ctx
,
uint64_t
v
)
{
assert
(
v
<=
TE_NEXT_EVENT_FAST_MAX
);
*
ctx
->
next_event_fast
=
v
;
}
JEMALLOC_ALWAYS_INLINE
void
te_next_event_fast_set_non_nominal
(
tsd_t
*
tsd
)
{
/*
* Set the fast thresholds to zero when tsd is non-nominal. Use the
* unsafe getter as this may get called during tsd init and clean up.
*/
*
tsd_thread_allocated_next_event_fastp_get_unsafe
(
tsd
)
=
0
;
*
tsd_thread_deallocated_next_event_fastp_get_unsafe
(
tsd
)
=
0
;
}
/* For next_event. Setter also updates the fast threshold. */
JEMALLOC_ALWAYS_INLINE
uint64_t
te_ctx_next_event_get
(
te_ctx_t
*
ctx
)
{
return
*
ctx
->
next_event
;
}
JEMALLOC_ALWAYS_INLINE
void
te_ctx_next_event_set
(
tsd_t
*
tsd
,
te_ctx_t
*
ctx
,
uint64_t
v
)
{
*
ctx
->
next_event
=
v
;
te_recompute_fast_threshold
(
tsd
);
}
/*
* The function checks in debug mode whether the thread event counters are in
* a consistent state, which forms the invariants before and after each round
* of thread event handling that we can rely on and need to promise.
* The invariants are only temporarily violated in the middle of
* te_event_advance() if an event is triggered (the te_event_trigger() call at
* the end will restore the invariants).
*/
JEMALLOC_ALWAYS_INLINE
void
te_assert_invariants
(
tsd_t
*
tsd
)
{
if
(
config_debug
)
{
te_assert_invariants_debug
(
tsd
);
}
}
JEMALLOC_ALWAYS_INLINE
void
te_ctx_get
(
tsd_t
*
tsd
,
te_ctx_t
*
ctx
,
bool
is_alloc
)
{
ctx
->
is_alloc
=
is_alloc
;
if
(
is_alloc
)
{
ctx
->
current
=
tsd_thread_allocatedp_get
(
tsd
);
ctx
->
last_event
=
tsd_thread_allocated_last_eventp_get
(
tsd
);
ctx
->
next_event
=
tsd_thread_allocated_next_eventp_get
(
tsd
);
ctx
->
next_event_fast
=
tsd_thread_allocated_next_event_fastp_get
(
tsd
);
}
else
{
ctx
->
current
=
tsd_thread_deallocatedp_get
(
tsd
);
ctx
->
last_event
=
tsd_thread_deallocated_last_eventp_get
(
tsd
);
ctx
->
next_event
=
tsd_thread_deallocated_next_eventp_get
(
tsd
);
ctx
->
next_event_fast
=
tsd_thread_deallocated_next_event_fastp_get
(
tsd
);
}
}
/*
* The lookahead functionality facilitates events to be able to lookahead, i.e.
* without touching the event counters, to determine whether an event would be
* triggered. The event counters are not advanced until the end of the
* allocation / deallocation calls, so the lookahead can be useful if some
* preparation work for some event must be done early in the allocation /
* deallocation calls.
*
* Currently only the profiling sampling event needs the lookahead
* functionality, so we don't yet define general purpose lookahead functions.
*
* Surplus is a terminology referring to the amount of bytes beyond what's
* needed for triggering an event, which can be a useful quantity to have in
* general when lookahead is being called.
*/
JEMALLOC_ALWAYS_INLINE
bool
te_prof_sample_event_lookahead_surplus
(
tsd_t
*
tsd
,
size_t
usize
,
size_t
*
surplus
)
{
if
(
surplus
!=
NULL
)
{
/*
* This is a dead store: the surplus will be overwritten before
* any read. The initialization suppresses compiler warnings.
* Meanwhile, using SIZE_MAX to initialize is good for
* debugging purpose, because a valid surplus value is strictly
* less than usize, which is at most SIZE_MAX.
*/
*
surplus
=
SIZE_MAX
;
}
if
(
unlikely
(
!
tsd_nominal
(
tsd
)
||
tsd_reentrancy_level_get
(
tsd
)
>
0
))
{
return
false
;
}
/* The subtraction is intentionally susceptible to underflow. */
uint64_t
accumbytes
=
tsd_thread_allocated_get
(
tsd
)
+
usize
-
tsd_thread_allocated_last_event_get
(
tsd
);
uint64_t
sample_wait
=
tsd_prof_sample_event_wait_get
(
tsd
);
if
(
accumbytes
<
sample_wait
)
{
return
false
;
}
assert
(
accumbytes
-
sample_wait
<
(
uint64_t
)
usize
);
if
(
surplus
!=
NULL
)
{
*
surplus
=
(
size_t
)(
accumbytes
-
sample_wait
);
}
return
true
;
}
JEMALLOC_ALWAYS_INLINE
bool
te_prof_sample_event_lookahead
(
tsd_t
*
tsd
,
size_t
usize
)
{
return
te_prof_sample_event_lookahead_surplus
(
tsd
,
usize
,
NULL
);
}
JEMALLOC_ALWAYS_INLINE
void
te_event_advance
(
tsd_t
*
tsd
,
size_t
usize
,
bool
is_alloc
)
{
te_assert_invariants
(
tsd
);
te_ctx_t
ctx
;
te_ctx_get
(
tsd
,
&
ctx
,
is_alloc
);
uint64_t
bytes_before
=
te_ctx_current_bytes_get
(
&
ctx
);
te_ctx_current_bytes_set
(
&
ctx
,
bytes_before
+
usize
);
/* The subtraction is intentionally susceptible to underflow. */
if
(
likely
(
usize
<
te_ctx_next_event_get
(
&
ctx
)
-
bytes_before
))
{
te_assert_invariants
(
tsd
);
}
else
{
te_event_trigger
(
tsd
,
&
ctx
);
}
}
JEMALLOC_ALWAYS_INLINE
void
thread_dalloc_event
(
tsd_t
*
tsd
,
size_t
usize
)
{
te_event_advance
(
tsd
,
usize
,
false
);
}
JEMALLOC_ALWAYS_INLINE
void
thread_alloc_event
(
tsd_t
*
tsd
,
size_t
usize
)
{
te_event_advance
(
tsd
,
usize
,
true
);
}
#endif
/* JEMALLOC_INTERNAL_THREAD_EVENT_H */
deps/jemalloc/include/jemalloc/internal/ticker.h
View file @
d4439bd4
#ifndef JEMALLOC_INTERNAL_TICKER_H
#define JEMALLOC_INTERNAL_TICKER_H
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/util.h"
/**
...
...
@@ -10,11 +11,11 @@
* have occurred with a call to ticker_ticks), which will return true (and reset
* the counter) if the countdown hit zero.
*/
typedef
struct
{
typedef
struct
ticker_s
ticker_t
;
struct
ticker_s
{
int32_t
tick
;
int32_t
nticks
;
}
ticker_t
;
};
static
inline
void
ticker_init
(
ticker_t
*
ticker
,
int32_t
nticks
)
{
...
...
@@ -88,4 +89,87 @@ ticker_trytick(ticker_t *ticker) {
return
false
;
}
/*
* The ticker_geom_t is much like the ticker_t, except that instead of ticker
* having a constant countdown, it has an approximate one; each tick has
* approximately a 1/nticks chance of triggering the count.
*
* The motivation is in triggering arena decay. With a naive strategy, each
* thread would maintain a ticker per arena, and check if decay is necessary
* each time that the arena's ticker fires. This has two costs:
* - Since under reasonable assumptions both threads and arenas can scale
* linearly with the number of CPUs, maintaining per-arena data in each thread
* scales quadratically with the number of CPUs.
* - These tickers are often a cache miss down tcache flush pathways.
*
* By giving each tick a 1/nticks chance of firing, we still maintain the same
* average number of ticks-until-firing per arena, with only a single ticker's
* worth of metadata.
*/
/* See ticker.c for an explanation of these constants. */
#define TICKER_GEOM_NBITS 6
#define TICKER_GEOM_MUL 61
extern
const
uint8_t
ticker_geom_table
[
1
<<
TICKER_GEOM_NBITS
];
/* Not actually any different from ticker_t; just for type safety. */
typedef
struct
ticker_geom_s
ticker_geom_t
;
struct
ticker_geom_s
{
int32_t
tick
;
int32_t
nticks
;
};
/*
* Just pick the average delay for the first counter. We're more concerned with
* the behavior over long periods of time rather than the exact timing of the
* initial ticks.
*/
#define TICKER_GEOM_INIT(nticks) {nticks, nticks}
static
inline
void
ticker_geom_init
(
ticker_geom_t
*
ticker
,
int32_t
nticks
)
{
/*
* Make sure there's no overflow possible. This shouldn't really be a
* problem for reasonable nticks choices, which are all static and
* relatively small.
*/
assert
((
uint64_t
)
nticks
*
(
uint64_t
)
255
/
(
uint64_t
)
TICKER_GEOM_MUL
<=
(
uint64_t
)
INT32_MAX
);
ticker
->
tick
=
nticks
;
ticker
->
nticks
=
nticks
;
}
static
inline
int32_t
ticker_geom_read
(
const
ticker_geom_t
*
ticker
)
{
return
ticker
->
tick
;
}
/* Same deal as above. */
#if defined(__GNUC__) && !defined(__clang__) \
&& (defined(__x86_64__) || defined(__i386__))
JEMALLOC_NOINLINE
#endif
static
bool
ticker_geom_fixup
(
ticker_geom_t
*
ticker
,
uint64_t
*
prng_state
)
{
uint64_t
idx
=
prng_lg_range_u64
(
prng_state
,
TICKER_GEOM_NBITS
);
ticker
->
tick
=
(
uint32_t
)(
(
uint64_t
)
ticker
->
nticks
*
(
uint64_t
)
ticker_geom_table
[
idx
]
/
(
uint64_t
)
TICKER_GEOM_MUL
);
return
true
;
}
static
inline
bool
ticker_geom_ticks
(
ticker_geom_t
*
ticker
,
uint64_t
*
prng_state
,
int32_t
nticks
)
{
ticker
->
tick
-=
nticks
;
if
(
unlikely
(
ticker
->
tick
<
0
))
{
return
ticker_geom_fixup
(
ticker
,
prng_state
);
}
return
false
;
}
static
inline
bool
ticker_geom_tick
(
ticker_geom_t
*
ticker
,
uint64_t
*
prng_state
)
{
return
ticker_geom_ticks
(
ticker
,
prng_state
,
1
);
}
#endif
/* JEMALLOC_INTERNAL_TICKER_H */
deps/jemalloc/include/jemalloc/internal/tsd.h
View file @
d4439bd4
#ifndef JEMALLOC_INTERNAL_TSD_H
#define JEMALLOC_INTERNAL_TSD_H
#include "jemalloc/internal/activity_callback.h"
#include "jemalloc/internal/arena_types.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/bin_types.h"
#include "jemalloc/internal/jemalloc_internal_externs.h"
#include "jemalloc/internal/peak.h"
#include "jemalloc/internal/prof_types.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/rtree_tsd.h"
...
...
@@ -15,39 +17,30 @@
/*
* Thread-Specific-Data layout
* --- data accessed on tcache fast path: state, rtree_ctx, stats, prof ---
* s: state
* e: tcache_enabled
* m: thread_allocated (config_stats)
* f: thread_deallocated (config_stats)
* p: prof_tdata (config_prof)
* c: rtree_ctx (rtree cache accessed on deallocation)
* t: tcache
* --- data not accessed on tcache fast path: arena-related fields ---
* d: arenas_tdata_bypass
* r: reentrancy_level
* x: narenas_tdata
* i: iarena
* a: arena
* o: arenas_tdata
* Loading TSD data is on the critical path of basically all malloc operations.
* In particular, tcache and rtree_ctx rely on hot CPU cache to be effective.
* Use a compact layout to reduce cache footprint.
* +--- 64-bit and 64B cacheline; 1B each letter; First byte on the left. ---+
* |---------------------------- 1st cacheline ----------------------------|
* | sedrxxxx mmmmmmmm ffffffff pppppppp [c * 32 ........ ........ .......] |
* |---------------------------- 2nd cacheline ----------------------------|
* | [c * 64 ........ ........ ........ ........ ........ ........ .......] |
* |---------------------------- 3nd cacheline ----------------------------|
* | [c * 32 ........ ........ .......] iiiiiiii aaaaaaaa oooooooo [t...... |
* +-------------------------------------------------------------------------+
* Note: the entire tcache is embedded into TSD and spans multiple cachelines.
*
* The last 3 members (i, a and o) before tcache isn't really needed on tcache
* fast path. However we have a number of unused tcache bins and witnesses
* (never touched unless config_debug) at the end of tcache, so we place them
* there to avoid breaking the cachelines and possibly paging in an extra page.
* At least some thread-local data gets touched on the fast-path of almost all
* malloc operations. But much of it is only necessary down slow-paths, or
* testing. We want to colocate the fast-path data so that it can live on the
* same cacheline if possible. So we define three tiers of hotness:
* TSD_DATA_FAST: Touched on the alloc/dalloc fast paths.
* TSD_DATA_SLOW: Touched down slow paths. "Slow" here is sort of general;
* there are "semi-slow" paths like "not a sized deallocation, but can still
* live in the tcache". We'll want to keep these closer to the fast-path
* data.
* TSD_DATA_SLOWER: Only touched in test or debug modes, or not touched at all.
*
* An additional concern is that the larger tcache bins won't be used (we have a
* bin per size class, but by default only cache relatively small objects). So
* the earlier bins are in the TSD_DATA_FAST tier, but the later ones are in the
* TSD_DATA_SLOWER tier.
*
* As a result of all this, we put the slow data first, then the fast data, then
* the slower data, while keeping the tcache as the last element of the fast
* data (so that the fast -> slower transition happens midway through the
* tcache). While we don't yet play alignment tricks to guarantee it, this
* increases our odds of getting some cache/page locality on fast paths.
*/
#ifdef JEMALLOC_JET
typedef
void
(
*
test_callback_t
)(
int
*
);
# define MALLOC_TSD_TEST_DATA_INIT 0x72b65c10
...
...
@@ -60,50 +53,112 @@ typedef void (*test_callback_t)(int *);
# define MALLOC_TEST_TSD_INITIALIZER
#endif
/* O(name, type, nullable type */
#define MALLOC_TSD \
typedef
ql_elm
(
tsd_t
)
tsd_link_t
;
/* O(name, type, nullable type) */
#define TSD_DATA_SLOW \
O(tcache_enabled, bool, bool) \
O(arenas_tdata_bypass, bool, bool) \
O(reentrancy_level, int8_t, int8_t) \
O(narenas_tdata, uint32_t, uint32_t) \
O(offset_state, uint64_t, uint64_t) \
O(thread_allocated, uint64_t, uint64_t) \
O(thread_deallocated, uint64_t, uint64_t) \
O(bytes_until_sample, int64_t, int64_t) \
O(thread_allocated_last_event, uint64_t, uint64_t) \
O(thread_allocated_next_event, uint64_t, uint64_t) \
O(thread_deallocated_last_event, uint64_t, uint64_t) \
O(thread_deallocated_next_event, uint64_t, uint64_t) \
O(tcache_gc_event_wait, uint64_t, uint64_t) \
O(tcache_gc_dalloc_event_wait, uint64_t, uint64_t) \
O(prof_sample_event_wait, uint64_t, uint64_t) \
O(prof_sample_last_event, uint64_t, uint64_t) \
O(stats_interval_event_wait, uint64_t, uint64_t) \
O(stats_interval_last_event, uint64_t, uint64_t) \
O(peak_alloc_event_wait, uint64_t, uint64_t) \
O(peak_dalloc_event_wait, uint64_t, uint64_t) \
O(prof_tdata, prof_tdata_t *, prof_tdata_t *) \
O(rtree_ctx, rtree_ctx_t, rtree_ctx_t) \
O(prng_state, uint64_t, uint64_t) \
O(san_extents_until_guard_small, uint64_t, uint64_t) \
O(san_extents_until_guard_large, uint64_t, uint64_t) \
O(iarena, arena_t *, arena_t *) \
O(arena, arena_t *, arena_t *) \
O(arenas_tdata, arena_tdata_t *, arena_tdata_t *)\
O(arena_decay_ticker, ticker_geom_t, ticker_geom_t) \
O(sec_shard, uint8_t, uint8_t) \
O(binshards, tsd_binshards_t, tsd_binshards_t)\
O(tcache, tcache_t, tcache_t) \
O(tsd_link, tsd_link_t, tsd_link_t) \
O(in_hook, bool, bool) \
O(peak, peak_t, peak_t) \
O(activity_callback_thunk, activity_callback_thunk_t, \
activity_callback_thunk_t) \
O(tcache_slow, tcache_slow_t, tcache_slow_t) \
O(rtree_ctx, rtree_ctx_t, rtree_ctx_t)
#define TSD_DATA_SLOW_INITIALIZER \
/* tcache_enabled */
TCACHE_ENABLED_ZERO_INITIALIZER, \
/* reentrancy_level */
0, \
/* thread_allocated_last_event */
0, \
/* thread_allocated_next_event */
0, \
/* thread_deallocated_last_event */
0, \
/* thread_deallocated_next_event */
0, \
/* tcache_gc_event_wait */
0, \
/* tcache_gc_dalloc_event_wait */
0, \
/* prof_sample_event_wait */
0, \
/* prof_sample_last_event */
0, \
/* stats_interval_event_wait */
0, \
/* stats_interval_last_event */
0, \
/* peak_alloc_event_wait */
0, \
/* peak_dalloc_event_wait */
0, \
/* prof_tdata */
NULL, \
/* prng_state */
0, \
/* san_extents_until_guard_small */
0, \
/* san_extents_until_guard_large */
0, \
/* iarena */
NULL, \
/* arena */
NULL, \
/* arena_decay_ticker */
\
TICKER_GEOM_INIT(ARENA_DECAY_NTICKS_PER_UPDATE), \
/* sec_shard */
(uint8_t)-1, \
/* binshards */
TSD_BINSHARDS_ZERO_INITIALIZER, \
/* tsd_link */
{NULL}, \
/* in_hook */
false, \
/* peak */
PEAK_INITIALIZER, \
/* activity_callback_thunk */
\
ACTIVITY_CALLBACK_THUNK_INITIALIZER, \
/* tcache_slow */
TCACHE_SLOW_ZERO_INITIALIZER, \
/* rtree_ctx */
RTREE_CTX_INITIALIZER,
/* O(name, type, nullable type) */
#define TSD_DATA_FAST \
O(thread_allocated, uint64_t, uint64_t) \
O(thread_allocated_next_event_fast, uint64_t, uint64_t) \
O(thread_deallocated, uint64_t, uint64_t) \
O(thread_deallocated_next_event_fast, uint64_t, uint64_t) \
O(tcache, tcache_t, tcache_t)
#define TSD_DATA_FAST_INITIALIZER \
/* thread_allocated */
0, \
/* thread_allocated_next_event_fast */
0, \
/* thread_deallocated */
0, \
/* thread_deallocated_next_event_fast */
0, \
/* tcache */
TCACHE_ZERO_INITIALIZER,
/* O(name, type, nullable type) */
#define TSD_DATA_SLOWER \
O(witness_tsd, witness_tsd_t, witness_tsdn_t) \
MALLOC_TEST_TSD
#define TSD_DATA_SLOWER_INITIALIZER \
/* witness */
WITNESS_TSD_INITIALIZER \
/* test data */
MALLOC_TEST_TSD_INITIALIZER
#define TSD_INITIALIZER { \
ATOMIC_INIT(tsd_state_uninitialized), \
TCACHE_ENABLED_ZERO_INITIALIZER, \
false, \
0, \
0, \
0, \
0, \
0, \
0, \
NULL, \
RTREE_CTX_ZERO_INITIALIZER, \
NULL, \
NULL, \
NULL, \
TSD_BINSHARDS_ZERO_INITIALIZER, \
TCACHE_ZERO_INITIALIZER, \
WITNESS_TSD_INITIALIZER \
MALLOC_TEST_TSD_INITIALIZER \
TSD_DATA_SLOW_INITIALIZER \
/* state */
ATOMIC_INIT(tsd_state_uninitialized), \
TSD_DATA_FAST_INITIALIZER \
TSD_DATA_SLOWER_INITIALIZER \
}
#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32)
void
_malloc_tsd_cleanup_register
(
bool
(
*
f
)(
void
));
#endif
void
*
malloc_tsd_malloc
(
size_t
size
);
void
malloc_tsd_dalloc
(
void
*
wrapper
);
void
malloc_tsd_cleanup_register
(
bool
(
*
f
)(
void
));
tsd_t
*
malloc_tsd_boot0
(
void
);
void
malloc_tsd_boot1
(
void
);
void
tsd_cleanup
(
void
*
arg
);
...
...
@@ -189,14 +244,17 @@ struct tsd_s {
* setters below.
*/
#define O(n, t, nt) \
t TSD_MANGLE(n);
TSD_DATA_SLOW
/*
* We manually limit the state to just a single byte. Unless the 8-bit
* atomics are unavailable (which is rare).
*/
tsd_state_t
state
;
#define O(n, t, nt) \
t TSD_MANGLE(n);
MALLOC_TSD
TSD_DATA_FAST
TSD_DATA_SLOWER
#undef O
};
...
...
@@ -262,7 +320,9 @@ JEMALLOC_ALWAYS_INLINE t * \
tsd_##n##p_get_unsafe(tsd_t *tsd) { \
return &tsd->TSD_MANGLE(n); \
}
MALLOC_TSD
TSD_DATA_SLOW
TSD_DATA_FAST
TSD_DATA_SLOWER
#undef O
/* tsd_foop_get(tsd) returns a pointer to the thread-local instance of foo. */
...
...
@@ -281,7 +341,9 @@ tsd_##n##p_get(tsd_t *tsd) { \
state == tsd_state_minimal_initialized); \
return tsd_##n##p_get_unsafe(tsd); \
}
MALLOC_TSD
TSD_DATA_SLOW
TSD_DATA_FAST
TSD_DATA_SLOWER
#undef O
/*
...
...
@@ -297,7 +359,9 @@ tsdn_##n##p_get(tsdn_t *tsdn) { \
tsd_t *tsd = tsdn_tsd(tsdn); \
return (nt *)tsd_##n##p_get(tsd); \
}
MALLOC_TSD
TSD_DATA_SLOW
TSD_DATA_FAST
TSD_DATA_SLOWER
#undef O
/* tsd_foo_get(tsd) returns the value of the thread-local instance of foo. */
...
...
@@ -306,7 +370,9 @@ JEMALLOC_ALWAYS_INLINE t \
tsd_##n##_get(tsd_t *tsd) { \
return *tsd_##n##p_get(tsd); \
}
MALLOC_TSD
TSD_DATA_SLOW
TSD_DATA_FAST
TSD_DATA_SLOWER
#undef O
/* tsd_foo_set(tsd, val) updates the thread-local instance of foo to be val. */
...
...
@@ -317,7 +383,9 @@ tsd_##n##_set(tsd_t *tsd, t val) { \
tsd_state_get(tsd) != tsd_state_minimal_initialized); \
*tsd_##n##p_get(tsd) = val; \
}
MALLOC_TSD
TSD_DATA_SLOW
TSD_DATA_FAST
TSD_DATA_SLOWER
#undef O
JEMALLOC_ALWAYS_INLINE
void
...
...
@@ -382,7 +450,10 @@ tsd_fetch(void) {
static
inline
bool
tsd_nominal
(
tsd_t
*
tsd
)
{
return
(
tsd_state_get
(
tsd
)
<=
tsd_state_nominal_max
);
bool
nominal
=
tsd_state_get
(
tsd
)
<=
tsd_state_nominal_max
;
assert
(
nominal
||
tsd_reentrancy_level_get
(
tsd
)
>
0
);
return
nominal
;
}
JEMALLOC_ALWAYS_INLINE
tsdn_t
*
...
...
@@ -412,4 +483,36 @@ tsdn_rtree_ctx(tsdn_t *tsdn, rtree_ctx_t *fallback) {
return
tsd_rtree_ctx
(
tsdn_tsd
(
tsdn
));
}
static
inline
bool
tsd_state_nocleanup
(
tsd_t
*
tsd
)
{
return
tsd_state_get
(
tsd
)
==
tsd_state_reincarnated
||
tsd_state_get
(
tsd
)
==
tsd_state_minimal_initialized
;
}
/*
* These "raw" tsd reentrancy functions don't have any debug checking to make
* sure that we're not touching arena 0. Better is to call pre_reentrancy and
* post_reentrancy if this is possible.
*/
static
inline
void
tsd_pre_reentrancy_raw
(
tsd_t
*
tsd
)
{
bool
fast
=
tsd_fast
(
tsd
);
assert
(
tsd_reentrancy_level_get
(
tsd
)
<
INT8_MAX
);
++*
tsd_reentrancy_levelp_get
(
tsd
);
if
(
fast
)
{
/* Prepare slow path for reentrancy. */
tsd_slow_update
(
tsd
);
assert
(
tsd_state_get
(
tsd
)
==
tsd_state_nominal_slow
);
}
}
static
inline
void
tsd_post_reentrancy_raw
(
tsd_t
*
tsd
)
{
int8_t
*
reentrancy_level
=
tsd_reentrancy_levelp_get
(
tsd
);
assert
(
*
reentrancy_level
>
0
);
if
(
--*
reentrancy_level
==
0
)
{
tsd_slow_update
(
tsd
);
}
}
#endif
/* JEMALLOC_INTERNAL_TSD_H */
deps/jemalloc/include/jemalloc/internal/tsd_generic.h
View file @
d4439bd4
...
...
@@ -52,6 +52,9 @@ tsd_cleanup_wrapper(void *arg) {
JEMALLOC_ALWAYS_INLINE
void
tsd_wrapper_set
(
tsd_wrapper_t
*
wrapper
)
{
if
(
unlikely
(
!
tsd_booted
))
{
return
;
}
if
(
pthread_setspecific
(
tsd_tsd
,
(
void
*
)
wrapper
)
!=
0
)
{
malloc_write
(
"<jemalloc>: Error setting TSD
\n
"
);
abort
();
...
...
@@ -60,7 +63,13 @@ tsd_wrapper_set(tsd_wrapper_t *wrapper) {
JEMALLOC_ALWAYS_INLINE
tsd_wrapper_t
*
tsd_wrapper_get
(
bool
init
)
{
tsd_wrapper_t
*
wrapper
=
(
tsd_wrapper_t
*
)
pthread_getspecific
(
tsd_tsd
);
tsd_wrapper_t
*
wrapper
;
if
(
unlikely
(
!
tsd_booted
))
{
return
&
tsd_boot_wrapper
;
}
wrapper
=
(
tsd_wrapper_t
*
)
pthread_getspecific
(
tsd_tsd
);
if
(
init
&&
unlikely
(
wrapper
==
NULL
))
{
tsd_init_block_t
block
;
...
...
@@ -91,11 +100,21 @@ tsd_wrapper_get(bool init) {
JEMALLOC_ALWAYS_INLINE
bool
tsd_boot0
(
void
)
{
tsd_wrapper_t
*
wrapper
;
tsd_init_block_t
block
;
wrapper
=
(
tsd_wrapper_t
*
)
tsd_init_check_recursion
(
&
tsd_init_head
,
&
block
);
if
(
wrapper
)
{
return
false
;
}
block
.
data
=
&
tsd_boot_wrapper
;
if
(
pthread_key_create
(
&
tsd_tsd
,
tsd_cleanup_wrapper
)
!=
0
)
{
return
true
;
}
tsd_wrapper_set
(
&
tsd_boot_wrapper
);
tsd_booted
=
true
;
tsd_wrapper_set
(
&
tsd_boot_wrapper
);
tsd_init_finish
(
&
tsd_init_head
,
&
block
);
return
false
;
}
...
...
deps/jemalloc/include/jemalloc/internal/tsd_malloc_thread_cleanup.h
View file @
d4439bd4
...
...
@@ -21,7 +21,7 @@ tsd_cleanup_wrapper(void) {
JEMALLOC_ALWAYS_INLINE
bool
tsd_boot0
(
void
)
{
malloc_tsd_cleanup_register
(
&
tsd_cleanup_wrapper
);
_
malloc_tsd_cleanup_register
(
&
tsd_cleanup_wrapper
);
tsd_booted
=
true
;
return
false
;
}
...
...
deps/jemalloc/include/jemalloc/internal/tsd_types.h
View file @
d4439bd4
#ifndef JEMALLOC_INTERNAL_TSD_TYPES_H
#define JEMALLOC_INTERNAL_TSD_TYPES_H
#define MALLOC_TSD_CLEANUPS_MAX
2
#define MALLOC_TSD_CLEANUPS_MAX
4
typedef
struct
tsd_s
tsd_t
;
typedef
struct
tsdn_s
tsdn_t
;
...
...
deps/jemalloc/include/jemalloc/internal/tsd_win.h
View file @
d4439bd4
...
...
@@ -72,7 +72,7 @@ tsd_boot0(void) {
if
(
tsd_tsd
==
TLS_OUT_OF_INDEXES
)
{
return
true
;
}
malloc_tsd_cleanup_register
(
&
tsd_cleanup_wrapper
);
_
malloc_tsd_cleanup_register
(
&
tsd_cleanup_wrapper
);
tsd_wrapper_set
(
&
tsd_boot_wrapper
);
tsd_booted
=
true
;
return
false
;
...
...
deps/jemalloc/include/jemalloc/internal/typed_list.h
0 → 100644
View file @
d4439bd4
#ifndef JEMALLOC_INTERNAL_TYPED_LIST_H
#define JEMALLOC_INTERNAL_TYPED_LIST_H
/*
* This wraps the ql module to implement a list class in a way that's a little
* bit easier to use; it handles ql_elm_new calls and provides type safety.
*/
#define TYPED_LIST(list_type, el_type, linkage) \
typedef struct { \
ql_head(el_type) head; \
} list_type##_t; \
static inline void \
list_type##_init(list_type##_t *list) { \
ql_new(&list->head); \
} \
static inline el_type * \
list_type##_first(const list_type##_t *list) { \
return ql_first(&list->head); \
} \
static inline el_type * \
list_type##_last(const list_type##_t *list) { \
return ql_last(&list->head, linkage); \
} \
static inline void \
list_type##_append(list_type##_t *list, el_type *item) { \
ql_elm_new(item, linkage); \
ql_tail_insert(&list->head, item, linkage); \
} \
static inline void \
list_type##_prepend(list_type##_t *list, el_type *item) { \
ql_elm_new(item, linkage); \
ql_head_insert(&list->head, item, linkage); \
} \
static inline void \
list_type##_replace(list_type##_t *list, el_type *to_remove, \
el_type *to_insert) { \
ql_elm_new(to_insert, linkage); \
ql_after_insert(to_remove, to_insert, linkage); \
ql_remove(&list->head, to_remove, linkage); \
} \
static inline void \
list_type##_remove(list_type##_t *list, el_type *item) { \
ql_remove(&list->head, item, linkage); \
} \
static inline bool \
list_type##_empty(list_type##_t *list) { \
return ql_empty(&list->head); \
} \
static inline void \
list_type##_concat(list_type##_t *list_a, list_type##_t *list_b) { \
ql_concat(&list_a->head, &list_b->head, linkage); \
}
#endif
/* JEMALLOC_INTERNAL_TYPED_LIST_H */
deps/jemalloc/include/jemalloc/internal/util.h
View file @
d4439bd4
...
...
@@ -62,6 +62,62 @@ get_errno(void) {
#endif
}
JEMALLOC_ALWAYS_INLINE
void
util_assume
(
bool
b
)
{
if
(
!
b
)
{
unreachable
();
}
}
/* ptr should be valid. */
JEMALLOC_ALWAYS_INLINE
void
util_prefetch_read
(
void
*
ptr
)
{
/*
* This should arguably be a config check; but any version of GCC so old
* that it doesn't support __builtin_prefetch is also too old to build
* jemalloc.
*/
#ifdef __GNUC__
if
(
config_debug
)
{
/* Enforce the "valid ptr" requirement. */
*
(
volatile
char
*
)
ptr
;
}
__builtin_prefetch
(
ptr
,
/* read or write */
0
,
/* locality hint */
3
);
#else
*
(
volatile
char
*
)
ptr
;
#endif
}
JEMALLOC_ALWAYS_INLINE
void
util_prefetch_write
(
void
*
ptr
)
{
#ifdef __GNUC__
if
(
config_debug
)
{
*
(
volatile
char
*
)
ptr
;
}
/*
* The only difference from the read variant is that this has a 1 as the
* second argument (the write hint).
*/
__builtin_prefetch
(
ptr
,
1
,
3
);
#else
*
(
volatile
char
*
)
ptr
;
#endif
}
JEMALLOC_ALWAYS_INLINE
void
util_prefetch_read_range
(
void
*
ptr
,
size_t
sz
)
{
for
(
size_t
i
=
0
;
i
<
sz
;
i
+=
CACHELINE
)
{
util_prefetch_read
((
void
*
)((
uintptr_t
)
ptr
+
i
));
}
}
JEMALLOC_ALWAYS_INLINE
void
util_prefetch_write_range
(
void
*
ptr
,
size_t
sz
)
{
for
(
size_t
i
=
0
;
i
<
sz
;
i
+=
CACHELINE
)
{
util_prefetch_write
((
void
*
)((
uintptr_t
)
ptr
+
i
));
}
}
#undef UTIL_INLINE
#endif
/* JEMALLOC_INTERNAL_UTIL_H */
deps/jemalloc/include/jemalloc/internal/witness.h
View file @
d4439bd4
...
...
@@ -7,60 +7,76 @@
/* LOCK RANKS */
/******************************************************************************/
/*
* Witnesses with rank WITNESS_RANK_OMIT are completely ignored by the witness
* machinery.
enum
witness_rank_e
{
/*
* Order matters within this enum listing -- higher valued locks can
* only be acquired after lower-valued ones. We use the
* auto-incrementing-ness of enum values to enforce this.
*/
#define WITNESS_RANK_OMIT 0U
#define WITNESS_RANK_MIN 1U
#define WITNESS_RANK_INIT 1U
#define WITNESS_RANK_CTL 1U
#define WITNESS_RANK_TCACHES 2U
#define WITNESS_RANK_ARENAS 3U
#define WITNESS_RANK_BACKGROUND_THREAD_GLOBAL 4U
#define WITNESS_RANK_PROF_DUMP 5U
#define WITNESS_RANK_PROF_BT2GCTX 6U
#define WITNESS_RANK_PROF_TDATAS 7U
#define WITNESS_RANK_PROF_TDATA 8U
#define WITNESS_RANK_PROF_LOG 9U
#define WITNESS_RANK_PROF_GCTX 10U
#define WITNESS_RANK_BACKGROUND_THREAD 11U
/*
* Used as an argument to witness_assert_depth_to_rank() in order to validate
* depth excluding non-core locks with lower ranks. Since the rank argument to
* witness_assert_depth_to_rank() is inclusive rather than exclusive, this
* definition can have the same value as the minimally ranked core lock.
/*
* Witnesses with rank WITNESS_RANK_OMIT are completely ignored by the
* witness machinery.
*/
WITNESS_RANK_OMIT
,
WITNESS_RANK_MIN
,
WITNESS_RANK_INIT
=
WITNESS_RANK_MIN
,
WITNESS_RANK_CTL
,
WITNESS_RANK_TCACHES
,
WITNESS_RANK_ARENAS
,
WITNESS_RANK_BACKGROUND_THREAD_GLOBAL
,
WITNESS_RANK_PROF_DUMP
,
WITNESS_RANK_PROF_BT2GCTX
,
WITNESS_RANK_PROF_TDATAS
,
WITNESS_RANK_PROF_TDATA
,
WITNESS_RANK_PROF_LOG
,
WITNESS_RANK_PROF_GCTX
,
WITNESS_RANK_PROF_RECENT_DUMP
,
WITNESS_RANK_BACKGROUND_THREAD
,
/*
* Used as an argument to witness_assert_depth_to_rank() in order to
* validate depth excluding non-core locks with lower ranks. Since the
* rank argument to witness_assert_depth_to_rank() is inclusive rather
* than exclusive, this definition can have the same value as the
* minimally ranked core lock.
*/
#define WITNESS_RANK_CORE 12U
#define WITNESS_RANK_DECAY 12U
#define WITNESS_RANK_TCACHE_QL 13U
#define WITNESS_RANK_EXTENT_GROW 14U
#define WITNESS_RANK_EXTENTS 15U
#define WITNESS_RANK_EXTENT_AVAIL 16U
#define WITNESS_RANK_EXTENT_POOL 17U
#define WITNESS_RANK_RTREE 18U
#define WITNESS_RANK_BASE 19U
#define WITNESS_RANK_ARENA_LARGE 20U
#define WITNESS_RANK_HOOK 21U
#define WITNESS_RANK_LEAF 0xffffffffU
#define WITNESS_RANK_BIN WITNESS_RANK_LEAF
#define WITNESS_RANK_ARENA_STATS WITNESS_RANK_LEAF
#define WITNESS_RANK_DSS WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_ACCUM WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF
WITNESS_RANK_CORE
,
WITNESS_RANK_DECAY
=
WITNESS_RANK_CORE
,
WITNESS_RANK_TCACHE_QL
,
WITNESS_RANK_SEC_SHARD
,
WITNESS_RANK_EXTENT_GROW
,
WITNESS_RANK_HPA_SHARD_GROW
=
WITNESS_RANK_EXTENT_GROW
,
WITNESS_RANK_SAN_BUMP_ALLOC
=
WITNESS_RANK_EXTENT_GROW
,
WITNESS_RANK_EXTENTS
,
WITNESS_RANK_HPA_SHARD
=
WITNESS_RANK_EXTENTS
,
WITNESS_RANK_HPA_CENTRAL_GROW
,
WITNESS_RANK_HPA_CENTRAL
,
WITNESS_RANK_EDATA_CACHE
,
WITNESS_RANK_RTREE
,
WITNESS_RANK_BASE
,
WITNESS_RANK_ARENA_LARGE
,
WITNESS_RANK_HOOK
,
WITNESS_RANK_LEAF
=
0x1000
,
WITNESS_RANK_BIN
=
WITNESS_RANK_LEAF
,
WITNESS_RANK_ARENA_STATS
=
WITNESS_RANK_LEAF
,
WITNESS_RANK_COUNTER_ACCUM
=
WITNESS_RANK_LEAF
,
WITNESS_RANK_DSS
=
WITNESS_RANK_LEAF
,
WITNESS_RANK_PROF_ACTIVE
=
WITNESS_RANK_LEAF
,
WITNESS_RANK_PROF_DUMP_FILENAME
=
WITNESS_RANK_LEAF
,
WITNESS_RANK_PROF_GDUMP
=
WITNESS_RANK_LEAF
,
WITNESS_RANK_PROF_NEXT_THR_UID
=
WITNESS_RANK_LEAF
,
WITNESS_RANK_PROF_RECENT_ALLOC
=
WITNESS_RANK_LEAF
,
WITNESS_RANK_PROF_STATS
=
WITNESS_RANK_LEAF
,
WITNESS_RANK_PROF_THREAD_ACTIVE_INIT
=
WITNESS_RANK_LEAF
,
};
typedef
enum
witness_rank_e
witness_rank_t
;
/******************************************************************************/
/* PER-WITNESS DATA */
...
...
@@ -72,7 +88,6 @@
#endif
typedef
struct
witness_s
witness_t
;
typedef
unsigned
witness_rank_t
;
typedef
ql_head
(
witness_t
)
witness_list_t
;
typedef
int
witness_comp_t
(
const
witness_t
*
,
void
*
,
const
witness_t
*
,
void
*
);
...
...
@@ -82,8 +97,8 @@ struct witness_s {
const
char
*
name
;
/*
* Witness rank, where 0 is lowest and
UINT_MAX
is highest.
Witnesses
* must be acquired in order of increasing rank.
* Witness rank, where 0 is lowest and
WITNESS_RANK_LEAF
is highest.
*
Witnesses
must be acquired in order of increasing rank.
*/
witness_rank_t
rank
;
...
...
@@ -228,26 +243,13 @@ witness_assert_not_owner(witness_tsdn_t *witness_tsdn,
}
}
static
inline
void
witness_assert_depth_to_rank
(
witness_tsdn_t
*
witness_tsdn
,
witness_rank_t
rank_inclusive
,
unsigned
depth
)
{
witness_tsd_t
*
witness_tsd
;
unsigned
d
;
witness_list_t
*
witnesses
;
witness_t
*
w
;
/* Returns depth. Not intended for direct use. */
static
inline
unsigned
witness_depth_to_rank
(
witness_list_t
*
witnesses
,
witness_rank_t
rank_inclusive
)
{
unsigned
d
=
0
;
witness_t
*
w
=
ql_last
(
witnesses
,
link
);
if
(
!
config_debug
)
{
return
;
}
if
(
witness_tsdn_null
(
witness_tsdn
))
{
return
;
}
witness_tsd
=
witness_tsdn_tsd
(
witness_tsdn
);
d
=
0
;
witnesses
=
&
witness_tsd
->
witnesses
;
w
=
ql_last
(
witnesses
,
link
);
if
(
w
!=
NULL
)
{
ql_reverse_foreach
(
w
,
witnesses
,
link
)
{
if
(
w
->
rank
<
rank_inclusive
)
{
...
...
@@ -256,6 +258,20 @@ witness_assert_depth_to_rank(witness_tsdn_t *witness_tsdn,
d
++
;
}
}
return
d
;
}
static
inline
void
witness_assert_depth_to_rank
(
witness_tsdn_t
*
witness_tsdn
,
witness_rank_t
rank_inclusive
,
unsigned
depth
)
{
if
(
!
config_debug
||
witness_tsdn_null
(
witness_tsdn
))
{
return
;
}
witness_list_t
*
witnesses
=
&
witness_tsdn_tsd
(
witness_tsdn
)
->
witnesses
;
unsigned
d
=
witness_depth_to_rank
(
witnesses
,
rank_inclusive
);
if
(
d
!=
depth
)
{
witness_depth_error
(
witnesses
,
rank_inclusive
,
depth
);
}
...
...
@@ -271,6 +287,21 @@ witness_assert_lockless(witness_tsdn_t *witness_tsdn) {
witness_assert_depth
(
witness_tsdn
,
0
);
}
static
inline
void
witness_assert_positive_depth_to_rank
(
witness_tsdn_t
*
witness_tsdn
,
witness_rank_t
rank_inclusive
)
{
if
(
!
config_debug
||
witness_tsdn_null
(
witness_tsdn
))
{
return
;
}
witness_list_t
*
witnesses
=
&
witness_tsdn_tsd
(
witness_tsdn
)
->
witnesses
;
unsigned
d
=
witness_depth_to_rank
(
witnesses
,
rank_inclusive
);
if
(
d
==
0
)
{
witness_depth_error
(
witnesses
,
rank_inclusive
,
1
);
}
}
static
inline
void
witness_lock
(
witness_tsdn_t
*
witness_tsdn
,
witness_t
*
witness
)
{
witness_tsd_t
*
witness_tsd
;
...
...
deps/jemalloc/include/jemalloc/jemalloc_defs.h.in
View file @
d4439bd4
...
...
@@ -13,6 +13,12 @@
/* Defined if format(printf, ...) attribute is supported. */
#undef JEMALLOC_HAVE_ATTR_FORMAT_PRINTF
/* Defined if fallthrough attribute is supported. */
#undef JEMALLOC_HAVE_ATTR_FALLTHROUGH
/* Defined if cold attribute is supported. */
#undef JEMALLOC_HAVE_ATTR_COLD
/*
* Define overrides for non-standard allocator-related functions if they are
* present on the system.
...
...
deps/jemalloc/include/jemalloc/jemalloc_macros.h.in
View file @
d4439bd4
...
...
@@ -71,6 +71,7 @@
# endif
# define JEMALLOC_FORMAT_ARG(i)
# define JEMALLOC_FORMAT_PRINTF(s, i)
# define JEMALLOC_FALLTHROUGH
# define JEMALLOC_NOINLINE __declspec(noinline)
# ifdef __cplusplus
# define JEMALLOC_NOTHROW __declspec(nothrow)
...
...
@@ -84,6 +85,7 @@
# else
# define JEMALLOC_ALLOCATOR
# endif
# define JEMALLOC_COLD
#elif defined(JEMALLOC_HAVE_ATTR)
# define JEMALLOC_ATTR(s) __attribute__((s))
# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
...
...
@@ -109,11 +111,21 @@
# else
# define JEMALLOC_FORMAT_PRINTF(s, i)
# endif
# ifdef JEMALLOC_HAVE_ATTR_FALLTHROUGH
# define JEMALLOC_FALLTHROUGH JEMALLOC_ATTR(fallthrough)
# else
# define JEMALLOC_FALLTHROUGH
# endif
# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow)
# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
# define JEMALLOC_RESTRICT_RETURN
# define JEMALLOC_ALLOCATOR
# ifdef JEMALLOC_HAVE_ATTR_COLD
# define JEMALLOC_COLD JEMALLOC_ATTR(__cold__)
# else
# define JEMALLOC_COLD
# endif
#else
# define JEMALLOC_ATTR(s)
# define JEMALLOC_ALIGNED(s)
...
...
@@ -121,11 +133,19 @@
# define JEMALLOC_ALLOC_SIZE2(s1, s2)
# define JEMALLOC_EXPORT
# define JEMALLOC_FORMAT_PRINTF(s, i)
# define JEMALLOC_FALLTHROUGH
# define JEMALLOC_NOINLINE
# define JEMALLOC_NOTHROW
# define JEMALLOC_SECTION(s)
# define JEMALLOC_RESTRICT_RETURN
# define JEMALLOC_ALLOCATOR
# define JEMALLOC_COLD
#endif
#if (defined(__APPLE__) || defined(__FreeBSD__)) && !defined(JEMALLOC_NO_RENAME)
# define JEMALLOC_SYS_NOTHROW
#else
# define JEMALLOC_SYS_NOTHROW JEMALLOC_NOTHROW
#endif
/* This version of Jemalloc, modified for Redis, has the je_get_defrag_hint()
...
...
Prev
1
…
3
4
5
6
7
8
9
10
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment