Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
a51eb05b
Unverified
Commit
a51eb05b
authored
May 15, 2023
by
Oran Agra
Committed by
GitHub
May 15, 2023
Browse files
Release Redis 7.2 RC2
parents
e26a769d
986dbf71
Changes
200
Hide whitespace changes
Inline
Side-by-side
Too many changes to show.
To preserve performance only
200 of 200+
files are displayed.
Plain diff
Email patch
deps/jemalloc/include/jemalloc/internal/prof_inlines
_b
.h
→
deps/jemalloc/include/jemalloc/internal/prof_inlines.h
View file @
a51eb05b
#ifndef JEMALLOC_INTERNAL_PROF_INLINES_
B_
H
#define JEMALLOC_INTERNAL_PROF_INLINES_
B_
H
#ifndef JEMALLOC_INTERNAL_PROF_INLINES_H
#define JEMALLOC_INTERNAL_PROF_INLINES_H
#include "jemalloc/internal/safety_check.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/thread_event.h"
JEMALLOC_ALWAYS_INLINE
void
prof_active_assert
()
{
cassert
(
config_prof
);
/*
* If opt_prof is off, then prof_active must always be off, regardless
* of whether prof_active_mtx is in effect or not.
*/
assert
(
opt_prof
||
!
prof_active_state
);
}
JEMALLOC_ALWAYS_INLINE
bool
prof_active_get_unlocked
(
void
)
{
prof_active_assert
();
/*
* Even if opt_prof is true, sampling can be temporarily disabled by
* setting prof_active to false. No locking is used when reading
* prof_active in the fast path, so there are no guarantees regarding
* how long it will take for all threads to notice state changes.
*/
return
prof_active_state
;
}
JEMALLOC_ALWAYS_INLINE
bool
prof_gdump_get_unlocked
(
void
)
{
...
...
@@ -22,6 +45,7 @@ prof_tdata_get(tsd_t *tsd, bool create) {
tdata
=
tsd_prof_tdata_get
(
tsd
);
if
(
create
)
{
assert
(
tsd_reentrancy_level_get
(
tsd
)
==
0
);
if
(
unlikely
(
tdata
==
NULL
))
{
if
(
tsd_nominal
(
tsd
))
{
tdata
=
prof_tdata_init
(
tsd
);
...
...
@@ -37,158 +61,115 @@ prof_tdata_get(tsd_t *tsd, bool create) {
return
tdata
;
}
JEMALLOC_ALWAYS_INLINE
prof_tctx_t
*
prof_tctx_get
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
alloc_ctx_t
*
alloc_ctx
)
{
JEMALLOC_ALWAYS_INLINE
void
prof_info_get
(
tsd_t
*
tsd
,
const
void
*
ptr
,
emap_alloc_ctx_t
*
alloc_ctx
,
prof_info_t
*
prof_info
)
{
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
assert
(
prof_info
!=
NULL
);
return
arena_prof_
tctx
_get
(
tsd
n
,
ptr
,
alloc_ctx
);
arena_prof_
info
_get
(
tsd
,
ptr
,
alloc_ctx
,
prof_info
,
false
);
}
JEMALLOC_ALWAYS_INLINE
void
prof_
tctx_se
t
(
tsd
n
_t
*
tsd
n
,
const
void
*
ptr
,
size_t
usize
,
alloc_ctx_t
*
alloc_ctx
,
prof_
tctx_t
*
tctx
)
{
prof_
info_get_and_reset_recen
t
(
tsd_t
*
tsd
,
const
void
*
ptr
,
emap_
alloc_ctx_t
*
alloc_ctx
,
prof_
info_t
*
prof_info
)
{
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
assert
(
prof_info
!=
NULL
);
arena_prof_
tctx_s
et
(
tsd
n
,
ptr
,
usize
,
alloc_ctx
,
tctx
);
arena_prof_
info_g
et
(
tsd
,
ptr
,
alloc_ctx
,
prof_info
,
true
);
}
JEMALLOC_ALWAYS_INLINE
void
prof_tctx_reset
(
tsd
n
_t
*
tsd
n
,
const
void
*
ptr
,
prof_t
ctx_t
*
t
ctx
)
{
prof_tctx_reset
(
tsd_t
*
tsd
,
const
void
*
ptr
,
emap_alloc_
ctx_t
*
alloc_
ctx
)
{
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
arena_prof_tctx_reset
(
tsd
n
,
ptr
,
t
ctx
);
arena_prof_tctx_reset
(
tsd
,
ptr
,
alloc_
ctx
);
}
JEMALLOC_ALWAYS_INLINE
nstime_t
prof_
alloc_time_get
(
tsd
n
_t
*
tsd
n
,
const
void
*
ptr
,
alloc_ctx_t
*
alloc_ctx
)
{
JEMALLOC_ALWAYS_INLINE
void
prof_
tctx_reset_sampled
(
tsd_t
*
tsd
,
const
void
*
ptr
)
{
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
return
arena_prof_
alloc_time_get
(
tsd
n
,
ptr
,
alloc_ctx
);
arena_prof_
tctx_reset_sampled
(
tsd
,
ptr
);
}
JEMALLOC_ALWAYS_INLINE
void
prof_alloc_time_set
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
alloc_ctx_t
*
alloc_ctx
,
nstime_t
t
)
{
prof_info_set
(
tsd_t
*
tsd
,
edata_t
*
edata
,
prof_tctx_t
*
tctx
,
size_t
size
)
{
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
assert
(
edata
!=
NULL
);
assert
((
uintptr_t
)
tctx
>
(
uintptr_t
)
1U
);
arena_prof_
alloc_time
_set
(
tsd
n
,
ptr
,
alloc_
ctx
,
t
);
arena_prof_
info
_set
(
tsd
,
edata
,
t
ctx
,
size
);
}
JEMALLOC_ALWAYS_INLINE
bool
prof_sample_check
(
tsd_t
*
tsd
,
size_t
usize
,
bool
update
)
{
ssize_t
check
=
update
?
0
:
usize
;
int64_t
bytes_until_sample
=
tsd_bytes_until_sample_get
(
tsd
);
if
(
update
)
{
bytes_until_sample
-=
usize
;
if
(
tsd_nominal
(
tsd
))
{
tsd_bytes_until_sample_set
(
tsd
,
bytes_until_sample
);
}
}
if
(
likely
(
bytes_until_sample
>=
check
))
{
return
true
;
}
return
false
;
}
JEMALLOC_ALWAYS_INLINE
bool
prof_sample_accum_update
(
tsd_t
*
tsd
,
size_t
usize
,
bool
update
,
prof_tdata_t
**
tdata_out
)
{
prof_tdata_t
*
tdata
;
prof_sample_should_skip
(
tsd_t
*
tsd
,
bool
sample_event
)
{
cassert
(
config_prof
);
/* Fastpath: no need to load tdata */
if
(
likely
(
prof_sample_check
(
tsd
,
usize
,
update
)))
{
return
true
;
}
bool
booted
=
tsd_prof_tdata_get
(
tsd
);
tdata
=
prof_tdata_get
(
tsd
,
true
);
if
(
unlikely
((
uintptr_t
)
tdata
<=
(
uintptr_t
)
PROF_TDATA_STATE_MAX
))
{
tdata
=
NULL
;
}
if
(
tdata_out
!=
NULL
)
{
*
tdata_out
=
tdata
;
}
if
(
unlikely
(
tdata
==
NULL
))
{
if
(
likely
(
!
sample_event
))
{
return
true
;
}
/*
*
If th
is was
the first creation of tdata, then
*
prof_tdata_get() reset bytes_until_sample, so decrement and
*
check it again
*
sample_event
is
al
wa
y
s
obtained from the thread event module, and
*
whenever it's true, it means that the thread event module has
*
already checked the reentrancy level.
*/
if
(
!
booted
&&
prof_sample_check
(
tsd
,
usize
,
update
))
{
return
true
;
}
assert
(
tsd_reentrancy_level_get
(
tsd
)
==
0
);
if
(
tsd_reentrancy_level_get
(
tsd
)
>
0
)
{
prof_tdata_t
*
tdata
=
prof_tdata_get
(
tsd
,
true
);
if
(
unlikely
(
tdata
==
NULL
))
{
return
true
;
}
/* Compute new sample threshold. */
if
(
update
)
{
prof_sample_threshold_update
(
tdata
);
}
return
!
tdata
->
active
;
}
JEMALLOC_ALWAYS_INLINE
prof_tctx_t
*
prof_alloc_prep
(
tsd_t
*
tsd
,
size_t
usize
,
bool
prof_active
,
bool
update
)
{
prof_alloc_prep
(
tsd_t
*
tsd
,
bool
prof_active
,
bool
sample_event
)
{
prof_tctx_t
*
ret
;
prof_tdata_t
*
tdata
;
prof_bt_t
bt
;
assert
(
usize
==
sz_s2u
(
usize
));
if
(
!
prof_active
||
likely
(
prof_sample_accum_update
(
tsd
,
usize
,
update
,
&
tdata
)))
{
if
(
!
prof_active
||
likely
(
prof_sample_should_skip
(
tsd
,
sample_event
)))
{
ret
=
(
prof_tctx_t
*
)(
uintptr_t
)
1U
;
}
else
{
bt_init
(
&
bt
,
tdata
->
vec
);
prof_backtrace
(
&
bt
);
ret
=
prof_lookup
(
tsd
,
&
bt
);
ret
=
prof_tctx_create
(
tsd
);
}
return
ret
;
}
JEMALLOC_ALWAYS_INLINE
void
prof_malloc
(
tsd
n
_t
*
tsd
n
,
const
void
*
ptr
,
size_t
u
size
,
alloc_ctx_t
*
alloc_ctx
,
prof_tctx_t
*
tctx
)
{
prof_malloc
(
tsd_t
*
tsd
,
const
void
*
ptr
,
size_t
size
,
size_t
usize
,
emap_alloc_ctx_t
*
alloc_ctx
,
prof_tctx_t
*
tctx
)
{
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
assert
(
usize
==
isalloc
(
tsd
n
,
ptr
));
assert
(
usize
==
isalloc
(
tsd
_tsdn
(
tsd
)
,
ptr
));
if
(
unlikely
((
uintptr_t
)
tctx
>
(
uintptr_t
)
1U
))
{
prof_malloc_sample_object
(
tsd
n
,
ptr
,
usize
,
tctx
);
prof_malloc_sample_object
(
tsd
,
ptr
,
size
,
usize
,
tctx
);
}
else
{
prof_tctx_set
(
tsdn
,
ptr
,
usize
,
alloc_ctx
,
(
prof_tctx_t
*
)(
uintptr_t
)
1U
);
prof_tctx_reset
(
tsd
,
ptr
,
alloc_ctx
);
}
}
JEMALLOC_ALWAYS_INLINE
void
prof_realloc
(
tsd_t
*
tsd
,
const
void
*
ptr
,
size_t
u
size
,
prof_tctx_t
*
tctx
,
bool
prof_active
,
bool
updated
,
const
void
*
old_ptr
,
size_t
old_usize
,
prof_
tctx
_t
*
old_
tctx
)
{
prof_realloc
(
tsd_t
*
tsd
,
const
void
*
ptr
,
size_t
size
,
size_t
usize
,
prof_tctx_t
*
tctx
,
bool
prof_active
,
const
void
*
old_ptr
,
size_t
old_usize
,
prof_
info
_t
*
old_
prof_info
,
bool
sample_event
)
{
bool
sampled
,
old_sampled
,
moved
;
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
||
(
uintptr_t
)
tctx
<=
(
uintptr_t
)
1U
);
if
(
prof_active
&&
!
updated
&&
ptr
!=
NULL
)
{
if
(
prof_active
&&
ptr
!=
NULL
)
{
assert
(
usize
==
isalloc
(
tsd_tsdn
(
tsd
),
ptr
));
if
(
prof_sample_
accum_update
(
tsd
,
usize
,
true
,
NULL
))
{
if
(
prof_sample_
should_skip
(
tsd
,
sample_event
))
{
/*
* Don't sample. The usize passed to prof_alloc_prep()
* was larger than what actually got allocated, so a
...
...
@@ -196,31 +177,31 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
* though its actual usize was insufficient to cross the
* sample threshold.
*/
prof_alloc_rollback
(
tsd
,
tctx
,
true
);
prof_alloc_rollback
(
tsd
,
tctx
);
tctx
=
(
prof_tctx_t
*
)(
uintptr_t
)
1U
;
}
}
sampled
=
((
uintptr_t
)
tctx
>
(
uintptr_t
)
1U
);
old_sampled
=
((
uintptr_t
)
old_tctx
>
(
uintptr_t
)
1U
);
old_sampled
=
((
uintptr_t
)
old_
prof_info
->
alloc_
tctx
>
(
uintptr_t
)
1U
);
moved
=
(
ptr
!=
old_ptr
);
if
(
unlikely
(
sampled
))
{
prof_malloc_sample_object
(
tsd
_tsdn
(
tsd
),
ptr
,
usize
,
tctx
);
prof_malloc_sample_object
(
tsd
,
ptr
,
size
,
usize
,
tctx
);
}
else
if
(
moved
)
{
prof_tctx_set
(
tsd_tsdn
(
tsd
),
ptr
,
usize
,
NULL
,
(
prof_tctx_t
*
)(
uintptr_t
)
1U
);
prof_tctx_reset
(
tsd
,
ptr
,
NULL
);
}
else
if
(
unlikely
(
old_sampled
))
{
/*
* prof_tctx_set() would work for the !moved case as well,
but
* prof_tctx_reset() is slightly cheaper, and the
proper thing
* to do here in the presence of explicit
knowledge re: moved
* state.
* prof_tctx_
re
set() would work for the !moved case as well,
*
but
prof_tctx_reset
_sampled
() is slightly cheaper, and the
*
proper thing
to do here in the presence of explicit
*
knowledge re: moved
state.
*/
prof_tctx_reset
(
tsd_tsdn
(
tsd
)
,
ptr
,
tctx
);
prof_tctx_reset
_sampled
(
tsd
,
ptr
);
}
else
{
assert
((
uintptr_t
)
prof_tctx_get
(
tsd_tsdn
(
tsd
),
ptr
,
NULL
)
==
(
uintptr_t
)
1U
);
prof_info_t
prof_info
;
prof_info_get
(
tsd
,
ptr
,
NULL
,
&
prof_info
);
assert
((
uintptr_t
)
prof_info
.
alloc_tctx
==
(
uintptr_t
)
1U
);
}
/*
...
...
@@ -231,20 +212,50 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
* counters.
*/
if
(
unlikely
(
old_sampled
))
{
prof_free_sampled_object
(
tsd
,
ptr
,
old_usize
,
old_
tctx
);
prof_free_sampled_object
(
tsd
,
old_usize
,
old_
prof_info
);
}
}
JEMALLOC_ALWAYS_INLINE
size_t
prof_sample_align
(
size_t
orig_align
)
{
/*
* Enforce page alignment, so that sampled allocations can be identified
* w/o metadata lookup.
*/
assert
(
opt_prof
);
return
(
opt_cache_oblivious
&&
orig_align
<
PAGE
)
?
PAGE
:
orig_align
;
}
JEMALLOC_ALWAYS_INLINE
bool
prof_sample_aligned
(
const
void
*
ptr
)
{
return
((
uintptr_t
)
ptr
&
PAGE_MASK
)
==
0
;
}
JEMALLOC_ALWAYS_INLINE
bool
prof_sampled
(
tsd_t
*
tsd
,
const
void
*
ptr
)
{
prof_info_t
prof_info
;
prof_info_get
(
tsd
,
ptr
,
NULL
,
&
prof_info
);
bool
sampled
=
(
uintptr_t
)
prof_info
.
alloc_tctx
>
(
uintptr_t
)
1U
;
if
(
sampled
)
{
assert
(
prof_sample_aligned
(
ptr
));
}
return
sampled
;
}
JEMALLOC_ALWAYS_INLINE
void
prof_free
(
tsd_t
*
tsd
,
const
void
*
ptr
,
size_t
usize
,
alloc_ctx_t
*
alloc_ctx
)
{
prof_tctx_t
*
tctx
=
prof_tctx_get
(
tsd_tsdn
(
tsd
),
ptr
,
alloc_ctx
);
prof_free
(
tsd_t
*
tsd
,
const
void
*
ptr
,
size_t
usize
,
emap_alloc_ctx_t
*
alloc_ctx
)
{
prof_info_t
prof_info
;
prof_info_get_and_reset_recent
(
tsd
,
ptr
,
alloc_ctx
,
&
prof_info
);
cassert
(
config_prof
);
assert
(
usize
==
isalloc
(
tsd_tsdn
(
tsd
),
ptr
));
if
(
unlikely
((
uintptr_t
)
tctx
>
(
uintptr_t
)
1U
))
{
prof_free_sampled_object
(
tsd
,
ptr
,
usize
,
tctx
);
if
(
unlikely
((
uintptr_t
)
prof_info
.
alloc_tctx
>
(
uintptr_t
)
1U
))
{
assert
(
prof_sample_aligned
(
ptr
));
prof_free_sampled_object
(
tsd
,
usize
,
&
prof_info
);
}
}
#endif
/* JEMALLOC_INTERNAL_PROF_INLINES_
B_
H */
#endif
/* JEMALLOC_INTERNAL_PROF_INLINES_H */
deps/jemalloc/include/jemalloc/internal/prof_inlines_a.h
deleted
100644 → 0
View file @
e26a769d
#ifndef JEMALLOC_INTERNAL_PROF_INLINES_A_H
#define JEMALLOC_INTERNAL_PROF_INLINES_A_H
#include "jemalloc/internal/mutex.h"
static
inline
bool
prof_accum_add
(
tsdn_t
*
tsdn
,
prof_accum_t
*
prof_accum
,
uint64_t
accumbytes
)
{
cassert
(
config_prof
);
bool
overflow
;
uint64_t
a0
,
a1
;
/*
* If the application allocates fast enough (and/or if idump is slow
* enough), extreme overflow here (a1 >= prof_interval * 2) can cause
* idump trigger coalescing. This is an intentional mechanism that
* avoids rate-limiting allocation.
*/
#ifdef JEMALLOC_ATOMIC_U64
a0
=
atomic_load_u64
(
&
prof_accum
->
accumbytes
,
ATOMIC_RELAXED
);
do
{
a1
=
a0
+
accumbytes
;
assert
(
a1
>=
a0
);
overflow
=
(
a1
>=
prof_interval
);
if
(
overflow
)
{
a1
%=
prof_interval
;
}
}
while
(
!
atomic_compare_exchange_weak_u64
(
&
prof_accum
->
accumbytes
,
&
a0
,
a1
,
ATOMIC_RELAXED
,
ATOMIC_RELAXED
));
#else
malloc_mutex_lock
(
tsdn
,
&
prof_accum
->
mtx
);
a0
=
prof_accum
->
accumbytes
;
a1
=
a0
+
accumbytes
;
overflow
=
(
a1
>=
prof_interval
);
if
(
overflow
)
{
a1
%=
prof_interval
;
}
prof_accum
->
accumbytes
=
a1
;
malloc_mutex_unlock
(
tsdn
,
&
prof_accum
->
mtx
);
#endif
return
overflow
;
}
static
inline
void
prof_accum_cancel
(
tsdn_t
*
tsdn
,
prof_accum_t
*
prof_accum
,
size_t
usize
)
{
cassert
(
config_prof
);
/*
* Cancel out as much of the excessive prof_accumbytes increase as
* possible without underflowing. Interval-triggered dumps occur
* slightly more often than intended as a result of incomplete
* canceling.
*/
uint64_t
a0
,
a1
;
#ifdef JEMALLOC_ATOMIC_U64
a0
=
atomic_load_u64
(
&
prof_accum
->
accumbytes
,
ATOMIC_RELAXED
);
do
{
a1
=
(
a0
>=
SC_LARGE_MINCLASS
-
usize
)
?
a0
-
(
SC_LARGE_MINCLASS
-
usize
)
:
0
;
}
while
(
!
atomic_compare_exchange_weak_u64
(
&
prof_accum
->
accumbytes
,
&
a0
,
a1
,
ATOMIC_RELAXED
,
ATOMIC_RELAXED
));
#else
malloc_mutex_lock
(
tsdn
,
&
prof_accum
->
mtx
);
a0
=
prof_accum
->
accumbytes
;
a1
=
(
a0
>=
SC_LARGE_MINCLASS
-
usize
)
?
a0
-
(
SC_LARGE_MINCLASS
-
usize
)
:
0
;
prof_accum
->
accumbytes
=
a1
;
malloc_mutex_unlock
(
tsdn
,
&
prof_accum
->
mtx
);
#endif
}
JEMALLOC_ALWAYS_INLINE
bool
prof_active_get_unlocked
(
void
)
{
/*
* Even if opt_prof is true, sampling can be temporarily disabled by
* setting prof_active to false. No locking is used when reading
* prof_active in the fast path, so there are no guarantees regarding
* how long it will take for all threads to notice state changes.
*/
return
prof_active
;
}
#endif
/* JEMALLOC_INTERNAL_PROF_INLINES_A_H */
deps/jemalloc/include/jemalloc/internal/prof_log.h
0 → 100644
View file @
a51eb05b
#ifndef JEMALLOC_INTERNAL_PROF_LOG_H
#define JEMALLOC_INTERNAL_PROF_LOG_H
#include "jemalloc/internal/mutex.h"
extern
malloc_mutex_t
log_mtx
;
void
prof_try_log
(
tsd_t
*
tsd
,
size_t
usize
,
prof_info_t
*
prof_info
);
bool
prof_log_init
(
tsd_t
*
tsdn
);
/* Used in unit tests. */
size_t
prof_log_bt_count
(
void
);
size_t
prof_log_alloc_count
(
void
);
size_t
prof_log_thr_count
(
void
);
bool
prof_log_is_logging
(
void
);
bool
prof_log_rep_check
(
void
);
void
prof_log_dummy_set
(
bool
new_value
);
bool
prof_log_start
(
tsdn_t
*
tsdn
,
const
char
*
filename
);
bool
prof_log_stop
(
tsdn_t
*
tsdn
);
#endif
/* JEMALLOC_INTERNAL_PROF_LOG_H */
deps/jemalloc/include/jemalloc/internal/prof_recent.h
0 → 100644
View file @
a51eb05b
#ifndef JEMALLOC_INTERNAL_PROF_RECENT_H
#define JEMALLOC_INTERNAL_PROF_RECENT_H
extern
malloc_mutex_t
prof_recent_alloc_mtx
;
extern
malloc_mutex_t
prof_recent_dump_mtx
;
bool
prof_recent_alloc_prepare
(
tsd_t
*
tsd
,
prof_tctx_t
*
tctx
);
void
prof_recent_alloc
(
tsd_t
*
tsd
,
edata_t
*
edata
,
size_t
size
,
size_t
usize
);
void
prof_recent_alloc_reset
(
tsd_t
*
tsd
,
edata_t
*
edata
);
bool
prof_recent_init
();
void
edata_prof_recent_alloc_init
(
edata_t
*
edata
);
/* Used in unit tests. */
typedef
ql_head
(
prof_recent_t
)
prof_recent_list_t
;
extern
prof_recent_list_t
prof_recent_alloc_list
;
edata_t
*
prof_recent_alloc_edata_get_no_lock_test
(
const
prof_recent_t
*
node
);
prof_recent_t
*
edata_prof_recent_alloc_get_no_lock_test
(
const
edata_t
*
edata
);
ssize_t
prof_recent_alloc_max_ctl_read
();
ssize_t
prof_recent_alloc_max_ctl_write
(
tsd_t
*
tsd
,
ssize_t
max
);
void
prof_recent_alloc_dump
(
tsd_t
*
tsd
,
write_cb_t
*
write_cb
,
void
*
cbopaque
);
#endif
/* JEMALLOC_INTERNAL_PROF_RECENT_H */
deps/jemalloc/include/jemalloc/internal/prof_stats.h
0 → 100644
View file @
a51eb05b
#ifndef JEMALLOC_INTERNAL_PROF_STATS_H
#define JEMALLOC_INTERNAL_PROF_STATS_H
typedef
struct
prof_stats_s
prof_stats_t
;
struct
prof_stats_s
{
uint64_t
req_sum
;
uint64_t
count
;
};
extern
malloc_mutex_t
prof_stats_mtx
;
void
prof_stats_inc
(
tsd_t
*
tsd
,
szind_t
ind
,
size_t
size
);
void
prof_stats_dec
(
tsd_t
*
tsd
,
szind_t
ind
,
size_t
size
);
void
prof_stats_get_live
(
tsd_t
*
tsd
,
szind_t
ind
,
prof_stats_t
*
stats
);
void
prof_stats_get_accum
(
tsd_t
*
tsd
,
szind_t
ind
,
prof_stats_t
*
stats
);
#endif
/* JEMALLOC_INTERNAL_PROF_STATS_H */
deps/jemalloc/include/jemalloc/internal/prof_structs.h
View file @
a51eb05b
...
...
@@ -2,6 +2,7 @@
#define JEMALLOC_INTERNAL_PROF_STRUCTS_H
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/edata.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/rb.h"
...
...
@@ -15,26 +16,22 @@ struct prof_bt_s {
#ifdef JEMALLOC_PROF_LIBGCC
/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
typedef
struct
{
prof_bt_t
*
bt
;
void
**
vec
;
unsigned
*
len
;
unsigned
max
;
}
prof_unwind_data_t
;
#endif
struct
prof_accum_s
{
#ifndef JEMALLOC_ATOMIC_U64
malloc_mutex_t
mtx
;
uint64_t
accumbytes
;
#else
atomic_u64_t
accumbytes
;
#endif
};
struct
prof_cnt_s
{
/* Profiling counters. */
uint64_t
curobjs
;
uint64_t
curobjs_shifted_unbiased
;
uint64_t
curbytes
;
uint64_t
curbytes_unbiased
;
uint64_t
accumobjs
;
uint64_t
accumobjs_shifted_unbiased
;
uint64_t
accumbytes
;
uint64_t
accumbytes_unbiased
;
};
typedef
enum
{
...
...
@@ -55,6 +52,12 @@ struct prof_tctx_s {
uint64_t
thr_uid
;
uint64_t
thr_discrim
;
/*
* Reference count of how many times this tctx object is referenced in
* recent allocation / deallocation records, protected by tdata->lock.
*/
uint64_t
recent_count
;
/* Profiling counters, protected by tdata->lock. */
prof_cnt_t
cnts
;
...
...
@@ -96,6 +99,15 @@ struct prof_tctx_s {
};
typedef
rb_tree
(
prof_tctx_t
)
prof_tctx_tree_t
;
struct
prof_info_s
{
/* Time when the allocation was made. */
nstime_t
alloc_time
;
/* Points to the prof_tctx_t corresponding to the allocation. */
prof_tctx_t
*
alloc_tctx
;
/* Allocation request size. */
size_t
alloc_size
;
};
struct
prof_gctx_s
{
/* Protects nlimbo, cnt_summed, and tctxs. */
malloc_mutex_t
*
lock
;
...
...
@@ -167,9 +179,6 @@ struct prof_tdata_s {
*/
ckh_t
bt2tctx
;
/* Sampling state. */
uint64_t
prng_state
;
/* State used to avoid dumping while operating on prof internals. */
bool
enq
;
bool
enq_idump
;
...
...
@@ -197,4 +206,16 @@ struct prof_tdata_s {
};
typedef
rb_tree
(
prof_tdata_t
)
prof_tdata_tree_t
;
struct
prof_recent_s
{
nstime_t
alloc_time
;
nstime_t
dalloc_time
;
ql_elm
(
prof_recent_t
)
link
;
size_t
size
;
size_t
usize
;
atomic_p_t
alloc_edata
;
/* NULL means allocation has been freed. */
prof_tctx_t
*
alloc_tctx
;
prof_tctx_t
*
dalloc_tctx
;
};
#endif
/* JEMALLOC_INTERNAL_PROF_STRUCTS_H */
deps/jemalloc/include/jemalloc/internal/prof_sys.h
0 → 100644
View file @
a51eb05b
#ifndef JEMALLOC_INTERNAL_PROF_SYS_H
#define JEMALLOC_INTERNAL_PROF_SYS_H
extern
malloc_mutex_t
prof_dump_filename_mtx
;
extern
base_t
*
prof_base
;
void
bt_init
(
prof_bt_t
*
bt
,
void
**
vec
);
void
prof_backtrace
(
tsd_t
*
tsd
,
prof_bt_t
*
bt
);
void
prof_hooks_init
();
void
prof_unwind_init
();
void
prof_sys_thread_name_fetch
(
tsd_t
*
tsd
);
int
prof_getpid
(
void
);
void
prof_get_default_filename
(
tsdn_t
*
tsdn
,
char
*
filename
,
uint64_t
ind
);
bool
prof_prefix_set
(
tsdn_t
*
tsdn
,
const
char
*
prefix
);
void
prof_fdump_impl
(
tsd_t
*
tsd
);
void
prof_idump_impl
(
tsd_t
*
tsd
);
bool
prof_mdump_impl
(
tsd_t
*
tsd
,
const
char
*
filename
);
void
prof_gdump_impl
(
tsd_t
*
tsd
);
/* Used in unit tests. */
typedef
int
(
prof_sys_thread_name_read_t
)(
char
*
buf
,
size_t
limit
);
extern
prof_sys_thread_name_read_t
*
JET_MUTABLE
prof_sys_thread_name_read
;
typedef
int
(
prof_dump_open_file_t
)(
const
char
*
,
int
);
extern
prof_dump_open_file_t
*
JET_MUTABLE
prof_dump_open_file
;
typedef
ssize_t
(
prof_dump_write_file_t
)(
int
,
const
void
*
,
size_t
);
extern
prof_dump_write_file_t
*
JET_MUTABLE
prof_dump_write_file
;
typedef
int
(
prof_dump_open_maps_t
)();
extern
prof_dump_open_maps_t
*
JET_MUTABLE
prof_dump_open_maps
;
#endif
/* JEMALLOC_INTERNAL_PROF_SYS_H */
deps/jemalloc/include/jemalloc/internal/prof_types.h
View file @
a51eb05b
...
...
@@ -2,11 +2,12 @@
#define JEMALLOC_INTERNAL_PROF_TYPES_H
typedef
struct
prof_bt_s
prof_bt_t
;
typedef
struct
prof_accum_s
prof_accum_t
;
typedef
struct
prof_cnt_s
prof_cnt_t
;
typedef
struct
prof_tctx_s
prof_tctx_t
;
typedef
struct
prof_info_s
prof_info_t
;
typedef
struct
prof_gctx_s
prof_gctx_t
;
typedef
struct
prof_tdata_s
prof_tdata_t
;
typedef
struct
prof_recent_s
prof_recent_t
;
/* Option defaults. */
#ifdef JEMALLOC_PROF
...
...
@@ -28,7 +29,23 @@ typedef struct prof_tdata_s prof_tdata_t;
#define PROF_CKH_MINITEMS 64
/* Size of memory buffer to use when writing dump files. */
#define PROF_DUMP_BUFSIZE 65536
#ifndef JEMALLOC_PROF
/* Minimize memory bloat for non-prof builds. */
# define PROF_DUMP_BUFSIZE 1
#elif defined(JEMALLOC_DEBUG)
/* Use a small buffer size in debug build, mainly to facilitate testing. */
# define PROF_DUMP_BUFSIZE 16
#else
# define PROF_DUMP_BUFSIZE 65536
#endif
/* Size of size class related tables */
#ifdef JEMALLOC_PROF
# define PROF_SC_NSIZES SC_NSIZES
#else
/* Minimize memory bloat for non-prof builds. */
# define PROF_SC_NSIZES 1
#endif
/* Size of stack-allocated buffer used by prof_printf(). */
#define PROF_PRINTF_BUFSIZE 128
...
...
@@ -45,12 +62,14 @@ typedef struct prof_tdata_s prof_tdata_t;
*/
#define PROF_NTDATA_LOCKS 256
/*
* prof_tdata pointers close to NULL are used to encode state information that
* is used for cleaning up during thread shutdown.
*/
#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1)
#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2)
#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY
/* Minimize memory bloat for non-prof builds. */
#ifdef JEMALLOC_PROF
#define PROF_DUMP_FILENAME_LEN (PATH_MAX + 1)
#else
#define PROF_DUMP_FILENAME_LEN 1
#endif
/* Default number of recent allocations to record. */
#define PROF_RECENT_ALLOC_MAX_DEFAULT 0
#endif
/* JEMALLOC_INTERNAL_PROF_TYPES_H */
deps/jemalloc/include/jemalloc/internal/psset.h
0 → 100644
View file @
a51eb05b
#ifndef JEMALLOC_INTERNAL_PSSET_H
#define JEMALLOC_INTERNAL_PSSET_H
#include "jemalloc/internal/hpdata.h"
/*
* A page-slab set. What the eset is to PAC, the psset is to HPA. It maintains
* a collection of page-slabs (the intent being that they are backed by
* hugepages, or at least could be), and handles allocation and deallocation
* requests.
*/
/*
* One more than the maximum pszind_t we will serve out of the HPA.
* Practically, we expect only the first few to be actually used. This
* corresponds to a maximum size of of 512MB on systems with 4k pages and
* SC_NGROUP == 4, which is already an unreasonably large maximum. Morally, you
* can think of this as being SC_NPSIZES, but there's no sense in wasting that
* much space in the arena, making bitmaps that much larger, etc.
*/
#define PSSET_NPSIZES 64
/*
* We keep two purge lists per page size class; one for hugified hpdatas (at
* index 2*pszind), and one for the non-hugified hpdatas (at index 2*pszind +
* 1). This lets us implement a preference for purging non-hugified hpdatas
* among similarly-dirty ones.
* We reserve the last two indices for empty slabs, in that case purging
* hugified ones (which are definitionally all waste) before non-hugified ones
* (i.e. reversing the order).
*/
#define PSSET_NPURGE_LISTS (2 * PSSET_NPSIZES)
typedef
struct
psset_bin_stats_s
psset_bin_stats_t
;
struct
psset_bin_stats_s
{
/* How many pageslabs are in this bin? */
size_t
npageslabs
;
/* Of them, how many pages are active? */
size_t
nactive
;
/* And how many are dirty? */
size_t
ndirty
;
};
typedef
struct
psset_stats_s
psset_stats_t
;
struct
psset_stats_s
{
/*
* The second index is huge stats; nonfull_slabs[pszind][0] contains
* stats for the non-huge slabs in bucket pszind, while
* nonfull_slabs[pszind][1] contains stats for the huge slabs.
*/
psset_bin_stats_t
nonfull_slabs
[
PSSET_NPSIZES
][
2
];
/*
* Full slabs don't live in any edata heap, but we still track their
* stats.
*/
psset_bin_stats_t
full_slabs
[
2
];
/* Empty slabs are similar. */
psset_bin_stats_t
empty_slabs
[
2
];
};
typedef
struct
psset_s
psset_t
;
struct
psset_s
{
/*
* The pageslabs, quantized by the size class of the largest contiguous
* free run of pages in a pageslab.
*/
hpdata_age_heap_t
pageslabs
[
PSSET_NPSIZES
];
/* Bitmap for which set bits correspond to non-empty heaps. */
fb_group_t
pageslab_bitmap
[
FB_NGROUPS
(
PSSET_NPSIZES
)];
/*
* The sum of all bin stats in stats. This lets us quickly answer
* queries for the number of dirty, active, and retained pages in the
* entire set.
*/
psset_bin_stats_t
merged_stats
;
psset_stats_t
stats
;
/*
* Slabs with no active allocations, but which are allowed to serve new
* allocations.
*/
hpdata_empty_list_t
empty
;
/*
* Slabs which are available to be purged, ordered by how much we want
* to purge them (with later indices indicating slabs we want to purge
* more).
*/
hpdata_purge_list_t
to_purge
[
PSSET_NPURGE_LISTS
];
/* Bitmap for which set bits correspond to non-empty purge lists. */
fb_group_t
purge_bitmap
[
FB_NGROUPS
(
PSSET_NPURGE_LISTS
)];
/* Slabs which are available to be hugified. */
hpdata_hugify_list_t
to_hugify
;
};
void
psset_init
(
psset_t
*
psset
);
void
psset_stats_accum
(
psset_stats_t
*
dst
,
psset_stats_t
*
src
);
/*
* Begin or end updating the given pageslab's metadata. While the pageslab is
* being updated, it won't be returned from psset_fit calls.
*/
void
psset_update_begin
(
psset_t
*
psset
,
hpdata_t
*
ps
);
void
psset_update_end
(
psset_t
*
psset
,
hpdata_t
*
ps
);
/* Analogous to the eset_fit; pick a hpdata to serve the request. */
hpdata_t
*
psset_pick_alloc
(
psset_t
*
psset
,
size_t
size
);
/* Pick one to purge. */
hpdata_t
*
psset_pick_purge
(
psset_t
*
psset
);
/* Pick one to hugify. */
hpdata_t
*
psset_pick_hugify
(
psset_t
*
psset
);
void
psset_insert
(
psset_t
*
psset
,
hpdata_t
*
ps
);
void
psset_remove
(
psset_t
*
psset
,
hpdata_t
*
ps
);
static
inline
size_t
psset_npageslabs
(
psset_t
*
psset
)
{
return
psset
->
merged_stats
.
npageslabs
;
}
static
inline
size_t
psset_nactive
(
psset_t
*
psset
)
{
return
psset
->
merged_stats
.
nactive
;
}
static
inline
size_t
psset_ndirty
(
psset_t
*
psset
)
{
return
psset
->
merged_stats
.
ndirty
;
}
#endif
/* JEMALLOC_INTERNAL_PSSET_H */
deps/jemalloc/include/jemalloc/internal/ql.h
View file @
a51eb05b
...
...
@@ -3,37 +3,85 @@
#include "jemalloc/internal/qr.h"
/*
* A linked-list implementation.
*
* This is built on top of the ring implementation, but that can be viewed as an
* implementation detail (i.e. trying to advance past the tail of the list
* doesn't wrap around).
*
* You define a struct like so:
* typedef strucy my_s my_t;
* struct my_s {
* int data;
* ql_elm(my_t) my_link;
* };
*
* // We wobble between "list" and "head" for this type; we're now mostly
* // heading towards "list".
* typedef ql_head(my_t) my_list_t;
*
* You then pass a my_list_t * for a_head arguments, a my_t * for a_elm
* arguments, the token "my_link" for a_field arguments, and the token "my_t"
* for a_type arguments.
*/
/* List definitions. */
#define ql_head(a_type) \
struct { \
a_type *qlh_first; \
}
/* Static initializer for an empty list. */
#define ql_head_initializer(a_head) {NULL}
/* The field definition. */
#define ql_elm(a_type) qr(a_type)
/* List functions. */
/* A pointer to the first element in the list, or NULL if the list is empty. */
#define ql_first(a_head) ((a_head)->qlh_first)
/* Dynamically initializes a list. */
#define ql_new(a_head) do { \
(a_head)
->qlh_first
= NULL; \
ql_first
(a_head) = NULL; \
} while (0)
#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
/*
* Sets dest to be the contents of src (overwriting any elements there), leaving
* src empty.
*/
#define ql_move(a_head_dest, a_head_src) do { \
ql_first(a_head_dest) = ql_first(a_head_src); \
ql_new(a_head_src); \
} while (0)
#define ql_first(a_head) ((a_head)->qlh_first)
/* True if the list is empty, otherwise false. */
#define ql_empty(a_head) (ql_first(a_head) == NULL)
/*
* Initializes a ql_elm. Must be called even if the field is about to be
* overwritten.
*/
#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
/*
* Obtains the last item in the list.
*/
#define ql_last(a_head, a_field) \
((ql_first(a_head) != NULL) \
? qr_prev(ql_first(a_head), a_field) : NULL)
(ql_empty(a_head) ? NULL : qr_prev(ql_first(a_head), a_field))
/*
* Gets a pointer to the next/prev element in the list. Trying to advance past
* the end or retreat before the beginning of the list returns NULL.
*/
#define ql_next(a_head, a_elm, a_field) \
((ql_last(a_head, a_field) != (a_elm)) \
? qr_next((a_elm), a_field) : NULL)
#define ql_prev(a_head, a_elm, a_field) \
((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \
: NULL)
/* Inserts a_elm before a_qlelm in the list. */
#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
qr_before_insert((a_qlelm), (a_elm), a_field); \
if (ql_first(a_head) == (a_qlelm)) { \
...
...
@@ -41,23 +89,41 @@ struct { \
} \
} while (0)
/* Inserts a_elm after a_qlelm in the list. */
#define ql_after_insert(a_qlelm, a_elm, a_field) \
qr_after_insert((a_qlelm), (a_elm), a_field)
/* Inserts a_elm as the first item in the list. */
#define ql_head_insert(a_head, a_elm, a_field) do { \
if (ql_
first
(a_head)
!= NULL
) { \
if (
!
ql_
empty
(a_head)) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = (a_elm); \
} while (0)
/* Inserts a_elm as the last item in the list. */
#define ql_tail_insert(a_head, a_elm, a_field) do { \
if (ql_
first
(a_head)
!= NULL
) { \
if (
!
ql_
empty
(a_head)) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = qr_next((a_elm), a_field); \
} while (0)
/*
* Given lists a = [a_1, ..., a_n] and [b_1, ..., b_n], results in:
* a = [a1, ..., a_n, b_1, ..., b_n] and b = [].
*/
#define ql_concat(a_head_a, a_head_b, a_field) do { \
if (ql_empty(a_head_a)) { \
ql_move(a_head_a, a_head_b); \
} else if (!ql_empty(a_head_b)) { \
qr_meld(ql_first(a_head_a), ql_first(a_head_b), \
a_field); \
ql_new(a_head_b); \
} \
} while (0)
/* Removes a_elm from the list. */
#define ql_remove(a_head, a_elm, a_field) do { \
if (ql_first(a_head) == (a_elm)) { \
ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
...
...
@@ -65,20 +131,63 @@ struct { \
if (ql_first(a_head) != (a_elm)) { \
qr_remove((a_elm), a_field); \
} else { \
ql_
first
(a_head)
= NULL;
\
ql_
new
(a_head)
;
\
} \
} while (0)
/* Removes the first item in the list. */
#define ql_head_remove(a_head, a_type, a_field) do { \
a_type *t = ql_first(a_head); \
ql_remove((a_head), t, a_field); \
} while (0)
/* Removes the last item in the list. */
#define ql_tail_remove(a_head, a_type, a_field) do { \
a_type *t = ql_last(a_head, a_field); \
ql_remove((a_head), t, a_field); \
} while (0)
/*
* Given a = [a_1, a_2, ..., a_n-1, a_n, a_n+1, ...],
* ql_split(a, a_n, b, some_field) results in
* a = [a_1, a_2, ..., a_n-1]
* and replaces b's contents with:
* b = [a_n, a_n+1, ...]
*/
#define ql_split(a_head_a, a_elm, a_head_b, a_field) do { \
if (ql_first(a_head_a) == (a_elm)) { \
ql_move(a_head_b, a_head_a); \
} else { \
qr_split(ql_first(a_head_a), (a_elm), a_field); \
ql_first(a_head_b) = (a_elm); \
} \
} while (0)
/*
* An optimized version of:
* a_type *t = ql_first(a_head);
* ql_remove((a_head), t, a_field);
* ql_tail_insert((a_head), t, a_field);
*/
#define ql_rotate(a_head, a_field) do { \
ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
} while (0)
/*
* Helper macro to iterate over each element in a list in order, starting from
* the head (or in reverse order, starting from the tail). The usage is
* (assuming my_t and my_list_t defined as above).
*
* int sum(my_list_t *list) {
* int sum = 0;
* my_t *iter;
* ql_foreach(iter, list, link) {
* sum += iter->data;
* }
* return sum;
* }
*/
#define ql_foreach(a_var, a_head, a_field) \
qr_foreach((a_var), ql_first(a_head), a_field)
...
...
deps/jemalloc/include/jemalloc/internal/qr.h
View file @
a51eb05b
#ifndef JEMALLOC_INTERNAL_QR_H
#define JEMALLOC_INTERNAL_QR_H
/*
* A ring implementation based on an embedded circular doubly-linked list.
*
* You define your struct like so:
*
* typedef struct my_s my_t;
* struct my_s {
* int data;
* qr(my_t) my_link;
* };
*
* And then pass a my_t * into macros for a_qr arguments, and the token
* "my_link" into a_field fields.
*/
/* Ring definitions. */
#define qr(a_type) \
struct { \
...
...
@@ -8,61 +23,114 @@ struct { \
a_type *qre_prev; \
}
/* Ring functions. */
/*
* Initialize a qr link. Every link must be initialized before being used, even
* if that initialization is going to be immediately overwritten (say, by being
* passed into an insertion macro).
*/
#define qr_new(a_qr, a_field) do { \
(a_qr)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
/*
* Go forwards or backwards in the ring. Note that (the ring being circular), this
* always succeeds -- you just keep looping around and around the ring if you
* chase pointers without end.
*/
#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
#define qr_before_insert(a_qrelm, a_qr, a_field) do { \
(a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \
(a_qr)->a_field.qre_next = (a_qrelm); \
(a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \
(a_qrelm)->a_field.qre_prev = (a_qr); \
/*
* Given two rings:
* a -> a_1 -> ... -> a_n --
* ^ |
* |------------------------
*
* b -> b_1 -> ... -> b_n --
* ^ |
* |------------------------
*
* Results in the ring:
* a -> a_1 -> ... -> a_n -> b -> b_1 -> ... -> b_n --
* ^ |
* |-------------------------------------------------|
*
* a_qr_a can directly be a qr_next() macro, but a_qr_b cannot.
*/
#define qr_meld(a_qr_a, a_qr_b, a_field) do { \
(a_qr_b)->a_field.qre_prev->a_field.qre_next = \
(a_qr_a)->a_field.qre_prev; \
(a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \
(a_qr_b)->a_field.qre_prev = \
(a_qr_b)->a_field.qre_prev->a_field.qre_next; \
(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
} while (0)
#define qr_after_insert(a_qrelm, a_qr, a_field) do { \
(a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
(a_qr)->a_field.qre_prev = (a_qrelm); \
(a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \
(a_qrelm)->a_field.qre_next = (a_qr); \
} while (0)
/*
* Logically, this is just a meld. The intent, though, is that a_qrelm is a
* single-element ring, so that "before" has a more obvious interpretation than
* meld.
*/
#define qr_before_insert(a_qrelm, a_qr, a_field) \
qr_meld((a_qrelm), (a_qr), a_field)
#define qr_meld(a_qr_a, a_qr_b, a_type, a_field) do { \
a_type *t; \
(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
t = (a_qr_a)->a_field.qre_prev; \
(a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \
(a_qr_b)->a_field.qre_prev = t; \
} while (0)
/* Ditto, but inserting after rather than before. */
#define qr_after_insert(a_qrelm, a_qr, a_field) \
qr_before_insert(qr_next(a_qrelm, a_field), (a_qr), a_field)
/*
* Inverts meld; given the ring:
* a -> a_1 -> ... -> a_n -> b -> b_1 -> ... -> b_n --
* ^ |
* |-------------------------------------------------|
*
* Results in two rings:
* a -> a_1 -> ... -> a_n --
* ^ |
* |------------------------
*
* b -> b_1 -> ... -> b_n --
* ^ |
* |------------------------
*
* qr_meld() and qr_split() are functionally equivalent, so there's no need to
* have two copies of the code.
*/
#define qr_split(a_qr_a, a_qr_b,
a_type,
a_field) \
qr_meld((a_qr_a), (a_qr_b),
a_type,
a_field)
#define qr_split(a_qr_a, a_qr_b, a_field)
\
qr_meld((a_qr_a), (a_qr_b), a_field)
#define qr_remove(a_qr, a_field) do { \
(a_qr)->a_field.qre_prev->a_field.qre_next \
= (a_qr)->a_field.qre_next; \
(a_qr)->a_field.qre_next->a_field.qre_prev \
= (a_qr)->a_field.qre_prev; \
(a_qr)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
/*
* Splits off a_qr from the rest of its ring, so that it becomes a
* single-element ring.
*/
#define qr_remove(a_qr, a_field) \
qr_split(qr_next(a_qr, a_field), (a_qr), a_field)
/*
* Helper macro to iterate over each element in a ring exactly once, starting
* with a_qr. The usage is (assuming my_t defined as above):
*
* int sum(my_t *item) {
* int sum = 0;
* my_t *iter;
* qr_foreach(iter, item, link) {
* sum += iter->data;
* }
* return sum;
* }
*/
#define qr_foreach(var, a_qr, a_field) \
for ((var) = (a_qr); \
(var) != NULL; \
(var) = (((var)->a_field.qre_next != (a_qr)) \
? (var)->a_field.qre_next : NULL))
/*
* The same (and with the same usage) as qr_foreach, but in the opposite order,
* ending with a_qr.
*/
#define qr_reverse_foreach(var, a_qr, a_field) \
for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \
(var) != NULL; \
...
...
deps/jemalloc/include/jemalloc/internal/quantum.h
View file @
a51eb05b
...
...
@@ -30,11 +30,18 @@
# ifdef __hppa__
# define LG_QUANTUM 4
# endif
# ifdef __loongarch__
# define LG_QUANTUM 4
# endif
# ifdef __m68k__
# define LG_QUANTUM 3
# endif
# ifdef __mips__
# define LG_QUANTUM 3
# if defined(__mips_n32) || defined(__mips_n64)
# define LG_QUANTUM 4
# else
# define LG_QUANTUM 3
# endif
# endif
# ifdef __nios2__
# define LG_QUANTUM 3
...
...
@@ -61,6 +68,9 @@
# ifdef __le32__
# define LG_QUANTUM 4
# endif
# ifdef __arc__
# define LG_QUANTUM 3
# endif
# ifndef LG_QUANTUM
# error "Unknown minimum alignment for architecture; specify via "
"--with-lg-quantum"
...
...
deps/jemalloc/include/jemalloc/internal/rb.h
View file @
a51eb05b
#ifndef JEMALLOC_INTERNAL_RB_H
#define JEMALLOC_INTERNAL_RB_H
/*-
*******************************************************************************
*
...
...
@@ -19,13 +22,19 @@
*******************************************************************************
*/
#ifndef RB_H_
#define RB_H_
#ifndef __PGI
#define RB_COMPACT
#endif
/*
* Each node in the RB tree consumes at least 1 byte of space (for the linkage
* if nothing else, so there are a maximum of sizeof(void *) << 3 rb tree nodes
* in any process (and thus, at most sizeof(void *) << 3 nodes in any rb tree).
* The choice of algorithm bounds the depth of a tree to twice the binary log of
* the number of elements in the tree; the following bound follows.
*/
#define RB_MAX_DEPTH (sizeof(void *) << 4)
#ifdef RB_COMPACT
/* Node structure. */
#define rb_node(a_type) \
...
...
@@ -159,12 +168,22 @@ struct { \
rbtn_right_set(a_type, a_field, (r_node), (a_node)); \
} while (0)
#define rb_summarized_only_false(...)
#define rb_summarized_only_true(...) __VA_ARGS__
#define rb_empty_summarize(a_node, a_lchild, a_rchild) false
/*
* The rb_proto() macro generates function prototypes that correspond to the
* functions generated by an equivalently parameterized call to rb_gen().
* The rb_proto() and rb_summarized_proto() macros generate function prototypes
* that correspond to the functions generated by an equivalently parameterized
* call to rb_gen() or rb_summarized_gen(), respectively.
*/
#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \
rb_proto_impl(a_attr, a_prefix, a_rbt_type, a_type, false)
#define rb_summarized_proto(a_attr, a_prefix, a_rbt_type, a_type) \
rb_proto_impl(a_attr, a_prefix, a_rbt_type, a_type, true)
#define rb_proto_impl(a_attr, a_prefix, a_rbt_type, a_type, \
a_is_summarized) \
a_attr void \
a_prefix##new(a_rbt_type *rbtree); \
a_attr bool \
...
...
@@ -195,31 +214,94 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); \
a_attr void \
a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
void *arg);
void *arg); \
/* Extended API */
\
rb_summarized_only_##a_is_summarized( \
a_attr void \
a_prefix##update_summaries(a_rbt_type *rbtree, a_type *node); \
a_attr bool \
a_prefix##empty_filtered(a_rbt_type *rbtree, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx); \
a_attr a_type * \
a_prefix##first_filtered(a_rbt_type *rbtree, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx); \
a_attr a_type * \
a_prefix##last_filtered(a_rbt_type *rbtree, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx); \
a_attr a_type * \
a_prefix##next_filtered(a_rbt_type *rbtree, a_type *node, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx); \
a_attr a_type * \
a_prefix##prev_filtered(a_rbt_type *rbtree, a_type *node, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx); \
a_attr a_type * \
a_prefix##search_filtered(a_rbt_type *rbtree, const a_type *key, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx); \
a_attr a_type * \
a_prefix##nsearch_filtered(a_rbt_type *rbtree, const a_type *key, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx); \
a_attr a_type * \
a_prefix##psearch_filtered(a_rbt_type *rbtree, const a_type *key, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx); \
a_attr a_type * \
a_prefix##iter_filtered(a_rbt_type *rbtree, a_type *start, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx); \
a_attr a_type * \
a_prefix##reverse_iter_filtered(a_rbt_type *rbtree, a_type *start, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx); \
)
/*
* The rb_gen() macro generates a type-specific red-black tree implementation,
* based on the above cpp macros.
*
* Arguments:
*
* a_attr : Function attribute for generated functions (ex: static).
* a_prefix : Prefix for generated functions (ex: ex_).
* a_rb_type : Type for red-black tree data structure (ex: ex_t).
* a_type : Type for red-black tree node data structure (ex: ex_node_t).
* a_field : Name of red-black tree node linkage (ex: ex_link).
* a_cmp : Node comparison function name, with the following prototype:
* int (a_cmp *)(a_type *a_node, a_type *a_other);
* ^^^^^^
* or a_key
* Interpretation of comparison function return values:
* -1 : a_node < a_other
* 0 : a_node == a_other
* 1 : a_node > a_other
* In all cases, the a_node or a_key macro argument is the first
* argument to the comparison function, which makes it possible
* to write comparison functions that treat the first argument
* specially.
* a_attr:
* Function attribute for generated functions (ex: static).
* a_prefix:
* Prefix for generated functions (ex: ex_).
* a_rb_type:
* Type for red-black tree data structure (ex: ex_t).
* a_type:
* Type for red-black tree node data structure (ex: ex_node_t).
* a_field:
* Name of red-black tree node linkage (ex: ex_link).
* a_cmp:
* Node comparison function name, with the following prototype:
*
* int a_cmp(a_type *a_node, a_type *a_other);
* ^^^^^^
* or a_key
* Interpretation of comparison function return values:
* -1 : a_node < a_other
* 0 : a_node == a_other
* 1 : a_node > a_other
* In all cases, the a_node or a_key macro argument is the first argument to
* the comparison function, which makes it possible to write comparison
* functions that treat the first argument specially. a_cmp must be a total
* order on values inserted into the tree -- duplicates are not allowed.
*
* Assuming the following setup:
*
...
...
@@ -338,8 +420,193 @@ a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
* during iteration. There is no way to stop iteration once it
* has begun.
* arg : Opaque pointer passed to cb().
*
* The rb_summarized_gen() macro generates all the functions above, but has an
* expanded interface. In introduces the notion of summarizing subtrees, and of
* filtering searches in the tree according to the information contained in
* those summaries.
* The extra macro argument is:
* a_summarize:
* Tree summarization function name, with the following prototype:
*
* bool a_summarize(a_type *a_node, const a_type *a_left_child,
* const a_type *a_right_child);
*
* This function should update a_node with the summary of the subtree rooted
* there, using the data contained in it and the summaries in a_left_child
* and a_right_child. One or both of them may be NULL. When the tree
* changes due to an insertion or removal, it updates the summaries of all
* nodes whose subtrees have changed (always updating the summaries of
* children before their parents). If the user alters a node in the tree in
* a way that may change its summary, they can call the generated
* update_summaries function to bubble up the summary changes to the root.
* It should return true if the summary changed (or may have changed), and
* false if it didn't (which will allow the implementation to terminate
* "bubbling up" the summaries early).
* As the parameter names indicate, the children are ordered as they are in
* the tree, a_left_child, if it is not NULL, compares less than a_node,
* which in turn compares less than a_right_child (if a_right_child is not
* NULL).
*
* Using the same setup as above but replacing the macro with
* rb_summarized_gen(static, ex_, ex_t, ex_node_t, ex_link, ex_cmp,
* ex_summarize)
*
* Generates all the previous functions, but adds some more:
*
* static void
* ex_update_summaries(ex_t *tree, ex_node_t *node);
* Description: Recompute all summaries of ancestors of node.
* Args:
* tree: Pointer to an initialized red-black tree object.
* node: The element of the tree whose summary may have changed.
*
* For each of ex_empty, ex_first, ex_last, ex_next, ex_prev, ex_search,
* ex_nsearch, ex_psearch, ex_iter, and ex_reverse_iter, an additional function
* is generated as well, with the suffix _filtered (e.g. ex_empty_filtered,
* ex_first_filtered, etc.). These use the concept of a "filter"; a binary
* property some node either satisfies or does not satisfy. Clever use of the
* a_summary argument to rb_summarized_gen can allow efficient computation of
* these predicates across whole subtrees of the tree.
* The extended API functions accept three additional arguments after the
* arguments to the corresponding non-extended equivalent.
*
* ex_fn(..., bool (*filter_node)(void *, ex_node_t *),
* bool (*filter_subtree)(void *, ex_node_t *), void *filter_ctx);
* filter_node : Returns true if the node passes the filter.
* filter_subtree : Returns true if some node in the subtree rooted at
* node passes the filter.
* filter_ctx : A context argument passed to the filters.
*
* For a more concrete example of summarizing and filtering, suppose we're using
* the red-black tree to track a set of integers:
*
* struct ex_node_s {
* rb_node(ex_node_t) ex_link;
* unsigned data;
* };
*
* Suppose, for some application-specific reason, we want to be able to quickly
* find numbers in the set which are divisible by large powers of 2 (say, for
* aligned allocation purposes). We augment the node with a summary field:
*
* struct ex_node_s {
* rb_node(ex_node_t) ex_link;
* unsigned data;
* unsigned max_subtree_ffs;
* }
*
* and define our summarization function as follows:
*
* bool
* ex_summarize(ex_node_t *node, const ex_node_t *lchild,
* const ex_node_t *rchild) {
* unsigned new_max_subtree_ffs = ffs(node->data);
* if (lchild != NULL && lchild->max_subtree_ffs > new_max_subtree_ffs) {
* new_max_subtree_ffs = lchild->max_subtree_ffs;
* }
* if (rchild != NULL && rchild->max_subtree_ffs > new_max_subtree_ffs) {
* new_max_subtree_ffs = rchild->max_subtree_ffs;
* }
* bool changed = (node->max_subtree_ffs != new_max_subtree_ffs)
* node->max_subtree_ffs = new_max_subtree_ffs;
* // This could be "return true" without any correctness or big-O
* // performance changes; but practically, precisely reporting summary
* // changes reduces the amount of work that has to be done when "bubbling
* // up" summary changes.
* return changed;
* }
*
* We can now implement our filter functions as follows:
* bool
* ex_filter_node(void *filter_ctx, ex_node_t *node) {
* unsigned required_ffs = *(unsigned *)filter_ctx;
* return ffs(node->data) >= required_ffs;
* }
* bool
* ex_filter_subtree(void *filter_ctx, ex_node_t *node) {
* unsigned required_ffs = *(unsigned *)filter_ctx;
* return node->max_subtree_ffs >= required_ffs;
* }
*
* We can now easily search for, e.g., the smallest integer in the set that's
* divisible by 128:
* ex_node_t *
* find_div_128(ex_tree_t *tree) {
* unsigned min_ffs = 7;
* return ex_first_filtered(tree, &ex_filter_node, &ex_filter_subtree,
* &min_ffs);
* }
*
* We could with similar ease:
* - Fnd the next multiple of 128 in the set that's larger than 12345 (with
* ex_nsearch_filtered)
* - Iterate over just those multiples of 64 that are in the set (with
* ex_iter_filtered)
* - Determine if the set contains any multiples of 1024 (with
* ex_empty_filtered).
*
* Some possibly subtle API notes:
* - The node argument to ex_next_filtered and ex_prev_filtered need not pass
* the filter; it will find the next/prev node that passes the filter.
* - ex_search_filtered will fail even for a node in the tree, if that node does
* not pass the filter. ex_psearch_filtered and ex_nsearch_filtered behave
* similarly; they may return a node larger/smaller than the key, even if a
* node equivalent to the key is in the tree (but does not pass the filter).
* - Similarly, if the start argument to a filtered iteration function does not
* pass the filter, the callback won't be invoked on it.
*
* These should make sense after a moment's reflection; each post-condition is
* the same as with the unfiltered version, with the added constraint that the
* returned node must pass the filter.
*/
#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \
rb_gen_impl(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp, \
rb_empty_summarize, false)
#define rb_summarized_gen(a_attr, a_prefix, a_rbt_type, a_type, \
a_field, a_cmp, a_summarize) \
rb_gen_impl(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp, \
a_summarize, true)
#define rb_gen_impl(a_attr, a_prefix, a_rbt_type, a_type, \
a_field, a_cmp, a_summarize, a_is_summarized) \
typedef struct { \
a_type *node; \
int cmp; \
} a_prefix##path_entry_t; \
static inline void \
a_prefix##summarize_range(a_prefix##path_entry_t *rfirst, \
a_prefix##path_entry_t *rlast) { \
while ((uintptr_t)rlast >= (uintptr_t)rfirst) { \
a_type *node = rlast->node; \
/* Avoid a warning when a_summarize is rb_empty_summarize. */
\
(void)node; \
bool changed = a_summarize(node, rbtn_left_get(a_type, a_field, \
node), rbtn_right_get(a_type, a_field, node)); \
if (!changed) { \
break; \
} \
rlast--; \
} \
} \
/* On the remove pathways, we sometimes swap the node being removed */
\
/* and its first successor; in such cases we need to do two range */
\
/* updates; one from the node to its (former) swapped successor, the */
\
/* next from that successor to the root (with either allowed to */
\
/* bail out early if appropriate. */
\
static inline void \
a_prefix##summarize_swapped_range(a_prefix##path_entry_t *rfirst, \
a_prefix##path_entry_t *rlast, a_prefix##path_entry_t *swap_loc) { \
if (swap_loc == NULL || rlast <= swap_loc) { \
a_prefix##summarize_range(rfirst, rlast); \
} else { \
a_prefix##summarize_range(swap_loc + 1, rlast); \
(void)a_summarize(swap_loc->node, \
rbtn_left_get(a_type, a_field, swap_loc->node), \
rbtn_right_get(a_type, a_field, swap_loc->node)); \
a_prefix##summarize_range(rfirst, swap_loc - 1); \
} \
} \
a_attr void \
a_prefix##new(a_rbt_type *rbtree) { \
rb_new(a_type, a_field, rbtree); \
...
...
@@ -465,10 +732,8 @@ a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \
} \
a_attr void \
a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
struct { \
a_type *node; \
int cmp; \
} path[sizeof(void *) << 4], *pathp; \
a_prefix##path_entry_t path[RB_MAX_DEPTH]; \
a_prefix##path_entry_t *pathp; \
rbt_node_new(a_type, a_field, rbtree, node); \
/* Wind. */
\
path->node = rbtree->rbt_root; \
...
...
@@ -484,6 +749,13 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
} \
} \
pathp->node = node; \
/* A loop invariant we maintain is that all nodes with */
\
/* out-of-date summaries live in path[0], path[1], ..., *pathp. */
\
/* To maintain this, we have to summarize node, since we */
\
/* decrement pathp before the first iteration. */
\
assert(rbtn_left_get(a_type, a_field, node) == NULL); \
assert(rbtn_right_get(a_type, a_field, node) == NULL); \
(void)a_summarize(node, NULL, NULL); \
/* Unwind. */
\
for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \
a_type *cnode = pathp->node; \
...
...
@@ -498,9 +770,13 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
a_type *tnode; \
rbtn_black_set(a_type, a_field, leftleft); \
rbtn_rotate_right(a_type, a_field, cnode, tnode); \
(void)a_summarize(cnode, \
rbtn_left_get(a_type, a_field, cnode), \
rbtn_right_get(a_type, a_field, cnode)); \
cnode = tnode; \
} \
} else { \
a_prefix##summarize_range(path, pathp); \
return; \
} \
} else { \
...
...
@@ -521,13 +797,20 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
rbtn_rotate_left(a_type, a_field, cnode, tnode); \
rbtn_color_set(a_type, a_field, tnode, tred); \
rbtn_red_set(a_type, a_field, cnode); \
(void)a_summarize(cnode, \
rbtn_left_get(a_type, a_field, cnode), \
rbtn_right_get(a_type, a_field, cnode)); \
cnode = tnode; \
} \
} else { \
a_prefix##summarize_range(path, pathp); \
return; \
} \
} \
pathp->node = cnode; \
(void)a_summarize(cnode, \
rbtn_left_get(a_type, a_field, cnode), \
rbtn_right_get(a_type, a_field, cnode)); \
} \
/* Set root, and make it black. */
\
rbtree->rbt_root = path->node; \
...
...
@@ -535,12 +818,18 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
} \
a_attr void \
a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
struct { \
a_type *node; \
int cmp; \
} *pathp, *nodep, path[sizeof(void *) << 4]; \
a_prefix##path_entry_t path[RB_MAX_DEPTH]; \
a_prefix##path_entry_t *pathp; \
a_prefix##path_entry_t *nodep; \
a_prefix##path_entry_t *swap_loc; \
/* This is a "real" sentinel -- NULL means we didn't swap the */
\
/* node to be pruned with one of its successors, and so */
\
/* summarization can terminate early whenever some summary */
\
/* doesn't change. */
\
swap_loc = NULL; \
/* This is just to silence a compiler warning. */
\
nodep = NULL; \
/* Wind. */
\
nodep = NULL;
/* Silence compiler warning. */
\
path->node = rbtree->rbt_root; \
for (pathp = path; pathp->node != NULL; pathp++) { \
int cmp = pathp->cmp = a_cmp(node, pathp->node); \
...
...
@@ -567,6 +856,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
pathp--; \
if (pathp->node != node) { \
/* Swap node with its successor. */
\
swap_loc = nodep; \
bool tred = rbtn_red_get(a_type, a_field, pathp->node); \
rbtn_color_set(a_type, a_field, pathp->node, \
rbtn_red_get(a_type, a_field, node)); \
...
...
@@ -604,6 +894,9 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_black_set(a_type, a_field, left); \
if (pathp == path) { \
rbtree->rbt_root = left; \
/* Nothing to summarize -- the subtree rooted at the */
\
/* node's left child hasn't changed, and it's now the */
\
/* root. */
\
} else { \
if (pathp[-1].cmp < 0) { \
rbtn_left_set(a_type, a_field, pathp[-1].node, \
...
...
@@ -612,6 +905,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, pathp[-1].node, \
left); \
} \
a_prefix##summarize_swapped_range(path, &pathp[-1], \
swap_loc); \
} \
return; \
} else if (pathp == path) { \
...
...
@@ -620,10 +915,15 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
return; \
} \
} \
/* We've now established the invariant that the node has no right */
\
/* child (well, morally; we didn't bother nulling it out if we */
\
/* swapped it with its successor), and that the only nodes with */
\
/* out-of-date summaries live in path[0], path[1], ..., pathp[-1].*/
\
if (rbtn_red_get(a_type, a_field, pathp->node)) { \
/* Prune red node, which requires no fixup. */
\
assert(pathp[-1].cmp < 0); \
rbtn_left_set(a_type, a_field, pathp[-1].node, NULL); \
a_prefix##summarize_swapped_range(path, &pathp[-1], swap_loc); \
return; \
} \
/* The node to be pruned is black, so unwind until balance is */
\
...
...
@@ -657,6 +957,12 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, pathp->node, tnode);\
rbtn_rotate_left(a_type, a_field, pathp->node, \
tnode); \
(void)a_summarize(pathp->node, \
rbtn_left_get(a_type, a_field, pathp->node), \
rbtn_right_get(a_type, a_field, pathp->node)); \
(void)a_summarize(right, \
rbtn_left_get(a_type, a_field, right), \
rbtn_right_get(a_type, a_field, right)); \
} else { \
/* || */
\
/* pathp(r) */
\
...
...
@@ -667,7 +973,12 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
/* */
\
rbtn_rotate_left(a_type, a_field, pathp->node, \
tnode); \
(void)a_summarize(pathp->node, \
rbtn_left_get(a_type, a_field, pathp->node), \
rbtn_right_get(a_type, a_field, pathp->node)); \
} \
(void)a_summarize(tnode, rbtn_left_get(a_type, a_field, \
tnode), rbtn_right_get(a_type, a_field, tnode)); \
/* Balance restored, but rotation modified subtree */
\
/* root. */
\
assert((uintptr_t)pathp > (uintptr_t)path); \
...
...
@@ -678,6 +989,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, pathp[-1].node, \
tnode); \
} \
a_prefix##summarize_swapped_range(path, &pathp[-1], \
swap_loc); \
return; \
} else { \
a_type *right = rbtn_right_get(a_type, a_field, \
...
...
@@ -698,6 +1011,15 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, pathp->node, tnode);\
rbtn_rotate_left(a_type, a_field, pathp->node, \
tnode); \
(void)a_summarize(pathp->node, \
rbtn_left_get(a_type, a_field, pathp->node), \
rbtn_right_get(a_type, a_field, pathp->node)); \
(void)a_summarize(right, \
rbtn_left_get(a_type, a_field, right), \
rbtn_right_get(a_type, a_field, right)); \
(void)a_summarize(tnode, \
rbtn_left_get(a_type, a_field, tnode), \
rbtn_right_get(a_type, a_field, tnode)); \
/* Balance restored, but rotation modified */
\
/* subtree root, which may actually be the tree */
\
/* root. */
\
...
...
@@ -712,6 +1034,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, \
pathp[-1].node, tnode); \
} \
a_prefix##summarize_swapped_range(path, \
&pathp[-1], swap_loc); \
} \
return; \
} else { \
...
...
@@ -725,6 +1049,12 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_red_set(a_type, a_field, pathp->node); \
rbtn_rotate_left(a_type, a_field, pathp->node, \
tnode); \
(void)a_summarize(pathp->node, \
rbtn_left_get(a_type, a_field, pathp->node), \
rbtn_right_get(a_type, a_field, pathp->node)); \
(void)a_summarize(tnode, \
rbtn_left_get(a_type, a_field, tnode), \
rbtn_right_get(a_type, a_field, tnode)); \
pathp->node = tnode; \
} \
} \
...
...
@@ -757,6 +1087,12 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
tnode); \
rbtn_right_set(a_type, a_field, unode, tnode); \
rbtn_rotate_left(a_type, a_field, unode, tnode); \
(void)a_summarize(pathp->node, \
rbtn_left_get(a_type, a_field, pathp->node), \
rbtn_right_get(a_type, a_field, pathp->node)); \
(void)a_summarize(unode, \
rbtn_left_get(a_type, a_field, unode), \
rbtn_right_get(a_type, a_field, unode)); \
} else { \
/* || */
\
/* pathp(b) */
\
...
...
@@ -771,7 +1107,13 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_rotate_right(a_type, a_field, pathp->node, \
tnode); \
rbtn_black_set(a_type, a_field, tnode); \
(void)a_summarize(pathp->node, \
rbtn_left_get(a_type, a_field, pathp->node), \
rbtn_right_get(a_type, a_field, pathp->node)); \
} \
(void)a_summarize(tnode, \
rbtn_left_get(a_type, a_field, tnode), \
rbtn_right_get(a_type, a_field, tnode)); \
/* Balance restored, but rotation modified subtree */
\
/* root, which may actually be the tree root. */
\
if (pathp == path) { \
...
...
@@ -785,6 +1127,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, pathp[-1].node, \
tnode); \
} \
a_prefix##summarize_swapped_range(path, &pathp[-1], \
swap_loc); \
} \
return; \
} else if (rbtn_red_get(a_type, a_field, pathp->node)) { \
...
...
@@ -803,6 +1147,12 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_black_set(a_type, a_field, leftleft); \
rbtn_rotate_right(a_type, a_field, pathp->node, \
tnode); \
(void)a_summarize(pathp->node, \
rbtn_left_get(a_type, a_field, pathp->node), \
rbtn_right_get(a_type, a_field, pathp->node)); \
(void)a_summarize(tnode, \
rbtn_left_get(a_type, a_field, tnode), \
rbtn_right_get(a_type, a_field, tnode)); \
/* Balance restored, but rotation modified */
\
/* subtree root. */
\
assert((uintptr_t)pathp > (uintptr_t)path); \
...
...
@@ -813,6 +1163,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, pathp[-1].node, \
tnode); \
} \
a_prefix##summarize_swapped_range(path, &pathp[-1], \
swap_loc); \
return; \
} else { \
/* || */
\
...
...
@@ -824,6 +1176,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_red_set(a_type, a_field, left); \
rbtn_black_set(a_type, a_field, pathp->node); \
/* Balance restored. */
\
a_prefix##summarize_swapped_range(path, pathp, \
swap_loc); \
return; \
} \
} else { \
...
...
@@ -840,6 +1194,12 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_black_set(a_type, a_field, leftleft); \
rbtn_rotate_right(a_type, a_field, pathp->node, \
tnode); \
(void)a_summarize(pathp->node, \
rbtn_left_get(a_type, a_field, pathp->node), \
rbtn_right_get(a_type, a_field, pathp->node)); \
(void)a_summarize(tnode, \
rbtn_left_get(a_type, a_field, tnode), \
rbtn_right_get(a_type, a_field, tnode)); \
/* Balance restored, but rotation modified */
\
/* subtree root, which may actually be the tree */
\
/* root. */
\
...
...
@@ -854,6 +1214,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, \
pathp[-1].node, tnode); \
} \
a_prefix##summarize_swapped_range(path, \
&pathp[-1], swap_loc); \
} \
return; \
} else { \
...
...
@@ -864,6 +1226,9 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
/* / */
\
/* (b) */
\
rbtn_red_set(a_type, a_field, left); \
(void)a_summarize(pathp->node, \
rbtn_left_get(a_type, a_field, pathp->node), \
rbtn_right_get(a_type, a_field, pathp->node)); \
} \
} \
} \
...
...
@@ -1001,6 +1366,491 @@ a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
void *arg) { \
a_prefix##destroy_recurse(rbtree, rbtree->rbt_root, cb, arg); \
rbtree->rbt_root = NULL; \
}
} \
/* BEGIN SUMMARIZED-ONLY IMPLEMENTATION */
\
rb_summarized_only_##a_is_summarized( \
static inline a_prefix##path_entry_t * \
a_prefix##wind(a_rbt_type *rbtree, \
a_prefix##path_entry_t path[RB_MAX_DEPTH], a_type *node) { \
a_prefix##path_entry_t *pathp; \
path->node = rbtree->rbt_root; \
for (pathp = path; ; pathp++) { \
assert((size_t)(pathp - path) < RB_MAX_DEPTH); \
pathp->cmp = a_cmp(node, pathp->node); \
if (pathp->cmp < 0) { \
pathp[1].node = rbtn_left_get(a_type, a_field, \
pathp->node); \
} else if (pathp->cmp == 0) { \
return pathp; \
} else { \
pathp[1].node = rbtn_right_get(a_type, a_field, \
pathp->node); \
} \
} \
unreachable(); \
} \
a_attr void \
a_prefix##update_summaries(a_rbt_type *rbtree, a_type *node) { \
a_prefix##path_entry_t path[RB_MAX_DEPTH]; \
a_prefix##path_entry_t *pathp = a_prefix##wind(rbtree, path, node); \
a_prefix##summarize_range(path, pathp); \
} \
a_attr bool \
a_prefix##empty_filtered(a_rbt_type *rbtree, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
a_type *node = rbtree->rbt_root; \
return node == NULL || !filter_subtree(filter_ctx, node); \
} \
static inline a_type * \
a_prefix##first_filtered_from_node(a_type *node, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
assert(node != NULL && filter_subtree(filter_ctx, node)); \
while (true) { \
a_type *left = rbtn_left_get(a_type, a_field, node); \
a_type *right = rbtn_right_get(a_type, a_field, node); \
if (left != NULL && filter_subtree(filter_ctx, left)) { \
node = left; \
} else if (filter_node(filter_ctx, node)) { \
return node; \
} else { \
assert(right != NULL \
&& filter_subtree(filter_ctx, right)); \
node = right; \
} \
} \
unreachable(); \
} \
a_attr a_type * \
a_prefix##first_filtered(a_rbt_type *rbtree, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
a_type *node = rbtree->rbt_root; \
if (node == NULL || !filter_subtree(filter_ctx, node)) { \
return NULL; \
} \
return a_prefix##first_filtered_from_node(node, filter_node, \
filter_subtree, filter_ctx); \
} \
static inline a_type * \
a_prefix##last_filtered_from_node(a_type *node, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
assert(node != NULL && filter_subtree(filter_ctx, node)); \
while (true) { \
a_type *left = rbtn_left_get(a_type, a_field, node); \
a_type *right = rbtn_right_get(a_type, a_field, node); \
if (right != NULL && filter_subtree(filter_ctx, right)) { \
node = right; \
} else if (filter_node(filter_ctx, node)) { \
return node; \
} else { \
assert(left != NULL \
&& filter_subtree(filter_ctx, left)); \
node = left; \
} \
} \
unreachable(); \
} \
a_attr a_type * \
a_prefix##last_filtered(a_rbt_type *rbtree, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
a_type *node = rbtree->rbt_root; \
if (node == NULL || !filter_subtree(filter_ctx, node)) { \
return NULL; \
} \
return a_prefix##last_filtered_from_node(node, filter_node, \
filter_subtree, filter_ctx); \
} \
/* Internal implementation function. Search for a node comparing */
\
/* equal to key matching the filter. If such a node is in the tree, */
\
/* return it. Additionally, the caller has the option to ask for */
\
/* bounds on the next / prev node in the tree passing the filter. */
\
/* If nextbound is true, then this function will do one of the */
\
/* following: */
\
/* - Fill in *nextbound_node with the smallest node in the tree */
\
/* greater than key passing the filter, and NULL-out */
\
/* *nextbound_subtree. */
\
/* - Fill in *nextbound_subtree with a parent of that node which is */
\
/* not a parent of the searched-for node, and NULL-out */
\
/* *nextbound_node. */
\
/* - NULL-out both *nextbound_node and *nextbound_subtree, in which */
\
/* case no node greater than key but passing the filter is in the */
\
/* tree. */
\
/* The prevbound case is similar. If the caller knows that key is in */
\
/* the tree and that the subtree rooted at key does not contain a */
\
/* node satisfying the bound being searched for, then they can pass */
\
/* false for include_subtree, in which case we won't bother searching */
\
/* there (risking a cache miss). */
\
/* */
\
/* This API is unfortunately complex; but the logic for filtered */
\
/* searches is very subtle, and otherwise we would have to repeat it */
\
/* multiple times for filtered search, nsearch, psearch, next, and */
\
/* prev. */
\
static inline a_type * \
a_prefix##search_with_filter_bounds(a_rbt_type *rbtree, \
const a_type *key, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx, \
bool include_subtree, \
bool nextbound, a_type **nextbound_node, a_type **nextbound_subtree, \
bool prevbound, a_type **prevbound_node, a_type **prevbound_subtree) {\
if (nextbound) { \
*nextbound_node = NULL; \
*nextbound_subtree = NULL; \
} \
if (prevbound) { \
*prevbound_node = NULL; \
*prevbound_subtree = NULL; \
} \
a_type *tnode = rbtree->rbt_root; \
while (tnode != NULL && filter_subtree(filter_ctx, tnode)) { \
int cmp = a_cmp(key, tnode); \
a_type *tleft = rbtn_left_get(a_type, a_field, tnode); \
a_type *tright = rbtn_right_get(a_type, a_field, tnode); \
if (cmp < 0) { \
if (nextbound) { \
if (filter_node(filter_ctx, tnode)) { \
*nextbound_node = tnode; \
*nextbound_subtree = NULL; \
} else if (tright != NULL && filter_subtree( \
filter_ctx, tright)) { \
*nextbound_node = NULL; \
*nextbound_subtree = tright; \
} \
} \
tnode = tleft; \
} else if (cmp > 0) { \
if (prevbound) { \
if (filter_node(filter_ctx, tnode)) { \
*prevbound_node = tnode; \
*prevbound_subtree = NULL; \
} else if (tleft != NULL && filter_subtree( \
filter_ctx, tleft)) { \
*prevbound_node = NULL; \
*prevbound_subtree = tleft; \
} \
} \
tnode = tright; \
} else { \
if (filter_node(filter_ctx, tnode)) { \
return tnode; \
} \
if (include_subtree) { \
if (prevbound && tleft != NULL && filter_subtree( \
filter_ctx, tleft)) { \
*prevbound_node = NULL; \
*prevbound_subtree = tleft; \
} \
if (nextbound && tright != NULL && filter_subtree( \
filter_ctx, tright)) { \
*nextbound_node = NULL; \
*nextbound_subtree = tright; \
} \
} \
return NULL; \
} \
} \
return NULL; \
} \
a_attr a_type * \
a_prefix##next_filtered(a_rbt_type *rbtree, a_type *node, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
a_type *nright = rbtn_right_get(a_type, a_field, node); \
if (nright != NULL && filter_subtree(filter_ctx, nright)) { \
return a_prefix##first_filtered_from_node(nright, filter_node, \
filter_subtree, filter_ctx); \
} \
a_type *node_candidate; \
a_type *subtree_candidate; \
a_type *search_result = a_prefix##search_with_filter_bounds( \
rbtree, node, filter_node, filter_subtree, filter_ctx, \
/* include_subtree */
false, \
/* nextbound */
true, &node_candidate, &subtree_candidate, \
/* prevbound */
false, NULL, NULL); \
assert(node == search_result \
|| !filter_node(filter_ctx, node)); \
if (node_candidate != NULL) { \
return node_candidate; \
} \
if (subtree_candidate != NULL) { \
return a_prefix##first_filtered_from_node( \
subtree_candidate, filter_node, filter_subtree, \
filter_ctx); \
} \
return NULL; \
} \
a_attr a_type * \
a_prefix##prev_filtered(a_rbt_type *rbtree, a_type *node, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
a_type *nleft = rbtn_left_get(a_type, a_field, node); \
if (nleft != NULL && filter_subtree(filter_ctx, nleft)) { \
return a_prefix##last_filtered_from_node(nleft, filter_node, \
filter_subtree, filter_ctx); \
} \
a_type *node_candidate; \
a_type *subtree_candidate; \
a_type *search_result = a_prefix##search_with_filter_bounds( \
rbtree, node, filter_node, filter_subtree, filter_ctx, \
/* include_subtree */
false, \
/* nextbound */
false, NULL, NULL, \
/* prevbound */
true, &node_candidate, &subtree_candidate); \
assert(node == search_result \
|| !filter_node(filter_ctx, node)); \
if (node_candidate != NULL) { \
return node_candidate; \
} \
if (subtree_candidate != NULL) { \
return a_prefix##last_filtered_from_node( \
subtree_candidate, filter_node, filter_subtree, \
filter_ctx); \
} \
return NULL; \
} \
a_attr a_type * \
a_prefix##search_filtered(a_rbt_type *rbtree, const a_type *key, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
a_type *result = a_prefix##search_with_filter_bounds(rbtree, key, \
filter_node, filter_subtree, filter_ctx, \
/* include_subtree */
false, \
/* nextbound */
false, NULL, NULL, \
/* prevbound */
false, NULL, NULL); \
return result; \
} \
a_attr a_type * \
a_prefix##nsearch_filtered(a_rbt_type *rbtree, const a_type *key, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
a_type *node_candidate; \
a_type *subtree_candidate; \
a_type *result = a_prefix##search_with_filter_bounds(rbtree, key, \
filter_node, filter_subtree, filter_ctx, \
/* include_subtree */
true, \
/* nextbound */
true, &node_candidate, &subtree_candidate, \
/* prevbound */
false, NULL, NULL); \
if (result != NULL) { \
return result; \
} \
if (node_candidate != NULL) { \
return node_candidate; \
} \
if (subtree_candidate != NULL) { \
return a_prefix##first_filtered_from_node( \
subtree_candidate, filter_node, filter_subtree, \
filter_ctx); \
} \
return NULL; \
} \
a_attr a_type * \
a_prefix##psearch_filtered(a_rbt_type *rbtree, const a_type *key, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
a_type *node_candidate; \
a_type *subtree_candidate; \
a_type *result = a_prefix##search_with_filter_bounds(rbtree, key, \
filter_node, filter_subtree, filter_ctx, \
/* include_subtree */
true, \
/* nextbound */
false, NULL, NULL, \
/* prevbound */
true, &node_candidate, &subtree_candidate); \
if (result != NULL) { \
return result; \
} \
if (node_candidate != NULL) { \
return node_candidate; \
} \
if (subtree_candidate != NULL) { \
return a_prefix##last_filtered_from_node( \
subtree_candidate, filter_node, filter_subtree, \
filter_ctx); \
} \
return NULL; \
} \
a_attr a_type * \
a_prefix##iter_recurse_filtered(a_rbt_type *rbtree, a_type *node, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
if (node == NULL || !filter_subtree(filter_ctx, node)) { \
return NULL; \
} \
a_type *ret; \
a_type *left = rbtn_left_get(a_type, a_field, node); \
a_type *right = rbtn_right_get(a_type, a_field, node); \
ret = a_prefix##iter_recurse_filtered(rbtree, left, cb, arg, \
filter_node, filter_subtree, filter_ctx); \
if (ret != NULL) { \
return ret; \
} \
if (filter_node(filter_ctx, node)) { \
ret = cb(rbtree, node, arg); \
} \
if (ret != NULL) { \
return ret; \
} \
return a_prefix##iter_recurse_filtered(rbtree, right, cb, arg, \
filter_node, filter_subtree, filter_ctx); \
} \
a_attr a_type * \
a_prefix##iter_start_filtered(a_rbt_type *rbtree, a_type *start, \
a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \
void *arg, bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
if (!filter_subtree(filter_ctx, node)) { \
return NULL; \
} \
int cmp = a_cmp(start, node); \
a_type *ret; \
a_type *left = rbtn_left_get(a_type, a_field, node); \
a_type *right = rbtn_right_get(a_type, a_field, node); \
if (cmp < 0) { \
ret = a_prefix##iter_start_filtered(rbtree, start, left, cb, \
arg, filter_node, filter_subtree, filter_ctx); \
if (ret != NULL) { \
return ret; \
} \
if (filter_node(filter_ctx, node)) { \
ret = cb(rbtree, node, arg); \
if (ret != NULL) { \
return ret; \
} \
} \
return a_prefix##iter_recurse_filtered(rbtree, right, cb, arg, \
filter_node, filter_subtree, filter_ctx); \
} else if (cmp > 0) { \
return a_prefix##iter_start_filtered(rbtree, start, right, \
cb, arg, filter_node, filter_subtree, filter_ctx); \
} else { \
if (filter_node(filter_ctx, node)) { \
ret = cb(rbtree, node, arg); \
if (ret != NULL) { \
return ret; \
} \
} \
return a_prefix##iter_recurse_filtered(rbtree, right, cb, arg, \
filter_node, filter_subtree, filter_ctx); \
} \
} \
a_attr a_type * \
a_prefix##iter_filtered(a_rbt_type *rbtree, a_type *start, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
a_type *ret; \
if (start != NULL) { \
ret = a_prefix##iter_start_filtered(rbtree, start, \
rbtree->rbt_root, cb, arg, filter_node, filter_subtree, \
filter_ctx); \
} else { \
ret = a_prefix##iter_recurse_filtered(rbtree, rbtree->rbt_root, \
cb, arg, filter_node, filter_subtree, filter_ctx); \
} \
return ret; \
} \
a_attr a_type * \
a_prefix##reverse_iter_recurse_filtered(a_rbt_type *rbtree, \
a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \
void *arg, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
if (node == NULL || !filter_subtree(filter_ctx, node)) { \
return NULL; \
} \
a_type *ret; \
a_type *left = rbtn_left_get(a_type, a_field, node); \
a_type *right = rbtn_right_get(a_type, a_field, node); \
ret = a_prefix##reverse_iter_recurse_filtered(rbtree, right, cb, \
arg, filter_node, filter_subtree, filter_ctx); \
if (ret != NULL) { \
return ret; \
} \
if (filter_node(filter_ctx, node)) { \
ret = cb(rbtree, node, arg); \
} \
if (ret != NULL) { \
return ret; \
} \
return a_prefix##reverse_iter_recurse_filtered(rbtree, left, cb, \
arg, filter_node, filter_subtree, filter_ctx); \
} \
a_attr a_type * \
a_prefix##reverse_iter_start_filtered(a_rbt_type *rbtree, a_type *start,\
a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \
void *arg, bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
if (!filter_subtree(filter_ctx, node)) { \
return NULL; \
} \
int cmp = a_cmp(start, node); \
a_type *ret; \
a_type *left = rbtn_left_get(a_type, a_field, node); \
a_type *right = rbtn_right_get(a_type, a_field, node); \
if (cmp > 0) { \
ret = a_prefix##reverse_iter_start_filtered(rbtree, start, \
right, cb, arg, filter_node, filter_subtree, filter_ctx); \
if (ret != NULL) { \
return ret; \
} \
if (filter_node(filter_ctx, node)) { \
ret = cb(rbtree, node, arg); \
if (ret != NULL) { \
return ret; \
} \
} \
return a_prefix##reverse_iter_recurse_filtered(rbtree, left, cb,\
arg, filter_node, filter_subtree, filter_ctx); \
} else if (cmp < 0) { \
return a_prefix##reverse_iter_start_filtered(rbtree, start, \
left, cb, arg, filter_node, filter_subtree, filter_ctx); \
} else { \
if (filter_node(filter_ctx, node)) { \
ret = cb(rbtree, node, arg); \
if (ret != NULL) { \
return ret; \
} \
} \
return a_prefix##reverse_iter_recurse_filtered(rbtree, left, cb,\
arg, filter_node, filter_subtree, filter_ctx); \
} \
} \
a_attr a_type * \
a_prefix##reverse_iter_filtered(a_rbt_type *rbtree, a_type *start, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg, \
bool (*filter_node)(void *, a_type *), \
bool (*filter_subtree)(void *, a_type *), \
void *filter_ctx) { \
a_type *ret; \
if (start != NULL) { \
ret = a_prefix##reverse_iter_start_filtered(rbtree, start, \
rbtree->rbt_root, cb, arg, filter_node, filter_subtree, \
filter_ctx); \
} else { \
ret = a_prefix##reverse_iter_recurse_filtered(rbtree, \
rbtree->rbt_root, cb, arg, filter_node, filter_subtree, \
filter_ctx); \
} \
return ret; \
} \
)
/* end rb_summarized_only */
#endif
/* RB_H
_
*/
#endif
/*
JEMALLOC_INTERNAL_
RB_H */
deps/jemalloc/include/jemalloc/internal/rtree.h
View file @
a51eb05b
...
...
@@ -35,33 +35,52 @@
# define RTREE_LEAF_COMPACT
#endif
/* Needed for initialization only. */
#define RTREE_LEAFKEY_INVALID ((uintptr_t)1)
typedef
struct
rtree_node_elm_s
rtree_node_elm_t
;
struct
rtree_node_elm_s
{
atomic_p_t
child
;
/* (rtree_{node,leaf}_elm_t *) */
};
typedef
struct
rtree_metadata_s
rtree_metadata_t
;
struct
rtree_metadata_s
{
szind_t
szind
;
extent_state_t
state
;
/* Mirrors edata->state. */
bool
is_head
;
/* Mirrors edata->is_head. */
bool
slab
;
};
typedef
struct
rtree_contents_s
rtree_contents_t
;
struct
rtree_contents_s
{
edata_t
*
edata
;
rtree_metadata_t
metadata
;
};
#define RTREE_LEAF_STATE_WIDTH EDATA_BITS_STATE_WIDTH
#define RTREE_LEAF_STATE_SHIFT 2
#define RTREE_LEAF_STATE_MASK MASK(RTREE_LEAF_STATE_WIDTH, RTREE_LEAF_STATE_SHIFT)
struct
rtree_leaf_elm_s
{
#ifdef RTREE_LEAF_COMPACT
/*
* Single pointer-width field containing all three leaf element fields.
* For example, on a 64-bit x64 system with 48 significant virtual
* memory address bits, the index, e
xtent
, and slab fields are packed as
* memory address bits, the index, e
data
, and slab fields are packed as
* such:
*
* x: index
* e: extent
* e: edata
* s: state
* h: is_head
* b: slab
*
* 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee e
eee000
b
* 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee e
00sssh
b
*/
atomic_p_t
le_bits
;
#else
atomic_p_t
le_extent
;
/* (extent_t *) */
atomic_u_t
le_szind
;
/* (szind_t) */
atomic_b_t
le_slab
;
/* (bool) */
atomic_p_t
le_edata
;
/* (edata_t *) */
/*
* From high to low bits: szind (8 bits), state (4 bits), is_head, slab
*/
atomic_u_t
le_metadata
;
#endif
};
...
...
@@ -78,6 +97,7 @@ struct rtree_level_s {
typedef
struct
rtree_s
rtree_t
;
struct
rtree_s
{
base_t
*
base
;
malloc_mutex_t
init_lock
;
/* Number of elements based on rtree_levels[0].bits. */
#if RTREE_HEIGHT > 1
...
...
@@ -109,42 +129,29 @@ static const rtree_level_t rtree_levels[] = {
#endif
};
bool
rtree_new
(
rtree_t
*
rtree
,
bool
zeroed
);
typedef
rtree_node_elm_t
*
(
rtree_node_alloc_t
)(
tsdn_t
*
,
rtree_t
*
,
size_t
);
extern
rtree_node_alloc_t
*
JET_MUTABLE
rtree_node_alloc
;
bool
rtree_new
(
rtree_t
*
rtree
,
base_t
*
base
,
bool
zeroed
);
typedef
rtree_leaf_elm_t
*
(
rtree_leaf_alloc_t
)(
tsdn_t
*
,
rtree_t
*
,
size_t
);
extern
rtree_leaf_alloc_t
*
JET_MUTABLE
rtree_leaf_alloc
;
typedef
void
(
rtree_node_dalloc_t
)(
tsdn_t
*
,
rtree_t
*
,
rtree_node_elm_t
*
);
extern
rtree_node_dalloc_t
*
JET_MUTABLE
rtree_node_dalloc
;
typedef
void
(
rtree_leaf_dalloc_t
)(
tsdn_t
*
,
rtree_t
*
,
rtree_leaf_elm_t
*
);
extern
rtree_leaf_dalloc_t
*
JET_MUTABLE
rtree_leaf_dalloc
;
#ifdef JEMALLOC_JET
void
rtree_delete
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
);
#endif
rtree_leaf_elm_t
*
rtree_leaf_elm_lookup_hard
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
bool
dependent
,
bool
init_missing
);
JEMALLOC_ALWAYS_INLINE
u
intptr_t
rtree_leaf
key
(
uintptr_t
key
)
{
JEMALLOC_ALWAYS_INLINE
u
nsigned
rtree_leaf
_maskbits
(
void
)
{
unsigned
ptrbits
=
ZU
(
1
)
<<
(
LG_SIZEOF_PTR
+
3
);
unsigned
cumbits
=
(
rtree_levels
[
RTREE_HEIGHT
-
1
].
cumbits
-
rtree_levels
[
RTREE_HEIGHT
-
1
].
bits
);
unsigned
maskbits
=
ptrbits
-
cumbits
;
uintptr_t
mask
=
~
((
ZU
(
1
)
<<
maskbits
)
-
1
);
return
ptrbits
-
cumbits
;
}
JEMALLOC_ALWAYS_INLINE
uintptr_t
rtree_leafkey
(
uintptr_t
key
)
{
uintptr_t
mask
=
~
((
ZU
(
1
)
<<
rtree_leaf_maskbits
())
-
1
);
return
(
key
&
mask
);
}
JEMALLOC_ALWAYS_INLINE
size_t
rtree_cache_direct_map
(
uintptr_t
key
)
{
unsigned
ptrbits
=
ZU
(
1
)
<<
(
LG_SIZEOF_PTR
+
3
);
unsigned
cumbits
=
(
rtree_levels
[
RTREE_HEIGHT
-
1
].
cumbits
-
rtree_levels
[
RTREE_HEIGHT
-
1
].
bits
);
unsigned
maskbits
=
ptrbits
-
cumbits
;
return
(
size_t
)((
key
>>
maskbits
)
&
(
RTREE_CTX_NCACHE
-
1
));
return
(
size_t
)((
key
>>
rtree_leaf_maskbits
())
&
(
RTREE_CTX_NCACHE
-
1
));
}
JEMALLOC_ALWAYS_INLINE
uintptr_t
...
...
@@ -176,151 +183,174 @@ rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree,
?
ATOMIC_RELAXED
:
ATOMIC_ACQUIRE
);
}
JEMALLOC_ALWAYS_INLINE
extent_t
*
rtree_leaf_elm_bits_extent_get
(
uintptr_t
bits
)
{
JEMALLOC_ALWAYS_INLINE
uintptr_t
rtree_leaf_elm_bits_encode
(
rtree_contents_t
contents
)
{
assert
((
uintptr_t
)
contents
.
edata
%
(
uintptr_t
)
EDATA_ALIGNMENT
==
0
);
uintptr_t
edata_bits
=
(
uintptr_t
)
contents
.
edata
&
(((
uintptr_t
)
1
<<
LG_VADDR
)
-
1
);
uintptr_t
szind_bits
=
(
uintptr_t
)
contents
.
metadata
.
szind
<<
LG_VADDR
;
uintptr_t
slab_bits
=
(
uintptr_t
)
contents
.
metadata
.
slab
;
uintptr_t
is_head_bits
=
(
uintptr_t
)
contents
.
metadata
.
is_head
<<
1
;
uintptr_t
state_bits
=
(
uintptr_t
)
contents
.
metadata
.
state
<<
RTREE_LEAF_STATE_SHIFT
;
uintptr_t
metadata_bits
=
szind_bits
|
state_bits
|
is_head_bits
|
slab_bits
;
assert
((
edata_bits
&
metadata_bits
)
==
0
);
return
edata_bits
|
metadata_bits
;
}
JEMALLOC_ALWAYS_INLINE
rtree_contents_t
rtree_leaf_elm_bits_decode
(
uintptr_t
bits
)
{
rtree_contents_t
contents
;
/* Do the easy things first. */
contents
.
metadata
.
szind
=
bits
>>
LG_VADDR
;
contents
.
metadata
.
slab
=
(
bool
)(
bits
&
1
);
contents
.
metadata
.
is_head
=
(
bool
)(
bits
&
(
1
<<
1
));
uintptr_t
state_bits
=
(
bits
&
RTREE_LEAF_STATE_MASK
)
>>
RTREE_LEAF_STATE_SHIFT
;
assert
(
state_bits
<=
extent_state_max
);
contents
.
metadata
.
state
=
(
extent_state_t
)
state_bits
;
uintptr_t
low_bit_mask
=
~
((
uintptr_t
)
EDATA_ALIGNMENT
-
1
);
# ifdef __aarch64__
/*
* aarch64 doesn't sign extend the highest virtual address bit to set
* the higher ones. Instead, the high bits get
s
zeroed.
* the higher ones. Instead, the high bits get zeroed.
*/
uintptr_t
high_bit_mask
=
((
uintptr_t
)
1
<<
LG_VADDR
)
-
1
;
/* Mask off the slab bit. */
uintptr_t
low_bit_mask
=
~
(
uintptr_t
)
1
;
/* Mask off metadata. */
uintptr_t
mask
=
high_bit_mask
&
low_bit_mask
;
return
(
extent
_t
*
)(
bits
&
mask
);
contents
.
edata
=
(
edata
_t
*
)(
bits
&
mask
);
# else
/* Restore sign-extended high bits, mask
slab
bit. */
return
(
extent
_t
*
)((
uintptr_t
)((
intptr_t
)(
bits
<<
RTREE_NHIB
)
>>
RTREE_NHIB
)
&
~
((
uintptr_t
)
0x1
)
);
/* Restore sign-extended high bits, mask
metadata
bit
s
. */
contents
.
edata
=
(
edata
_t
*
)((
uintptr_t
)((
intptr_t
)(
bits
<<
RTREE_NHIB
)
>>
RTREE_NHIB
)
&
low_bit_mask
);
# endif
assert
((
uintptr_t
)
contents
.
edata
%
(
uintptr_t
)
EDATA_ALIGNMENT
==
0
);
return
contents
;
}
JEMALLOC_ALWAYS_INLINE
szind_t
rtree_leaf_elm_bits_szind_get
(
uintptr_t
bits
)
{
return
(
szind_t
)(
bits
>>
LG_VADDR
);
}
JEMALLOC_ALWAYS_INLINE
bool
rtree_leaf_elm_bits_slab_get
(
uintptr_t
bits
)
{
return
(
bool
)(
bits
&
(
uintptr_t
)
0x1
);
}
# endif
/* RTREE_LEAF_COMPACT */
# endif
JEMALLOC_ALWAYS_INLINE
extent_t
*
rtree_leaf_elm_extent_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
bool
dependent
)
{
JEMALLOC_ALWAYS_INLINE
rtree_contents_t
rtree_leaf_elm_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
bool
dependent
)
{
#ifdef RTREE_LEAF_COMPACT
uintptr_t
bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm
,
dependent
);
return
rtree_leaf_elm_bits_extent_get
(
bits
);
rtree_contents_t
contents
=
rtree_leaf_elm_bits_decode
(
bits
);
return
contents
;
#else
extent_t
*
extent
=
(
extent_t
*
)
atomic_load_p
(
&
elm
->
le_extent
,
dependent
rtree_contents_t
contents
;
unsigned
metadata_bits
=
atomic_load_u
(
&
elm
->
le_metadata
,
dependent
?
ATOMIC_RELAXED
:
ATOMIC_ACQUIRE
);
return
extent
;
#endif
}
contents
.
metadata
.
slab
=
(
bool
)(
metadata_bits
&
1
);
contents
.
metadata
.
is_head
=
(
bool
)(
metadata_bits
&
(
1
<<
1
));
JEMALLOC_ALWAYS_INLINE
szind_t
rtree_leaf_elm_szind_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
bool
dependent
)
{
#ifdef RTREE_LEAF_COMPACT
uintptr_t
bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm
,
dependent
);
return
rtree_leaf_elm_bits_szind_get
(
bits
);
#else
return
(
szind_t
)
atomic_load_u
(
&
elm
->
le_szind
,
dependent
?
ATOMIC_RELAXED
:
ATOMIC_ACQUIRE
);
uintptr_t
state_bits
=
(
metadata_bits
&
RTREE_LEAF_STATE_MASK
)
>>
RTREE_LEAF_STATE_SHIFT
;
assert
(
state_bits
<=
extent_state_max
);
contents
.
metadata
.
state
=
(
extent_state_t
)
state_bits
;
contents
.
metadata
.
szind
=
metadata_bits
>>
(
RTREE_LEAF_STATE_SHIFT
+
RTREE_LEAF_STATE_WIDTH
);
contents
.
edata
=
(
edata_t
*
)
atomic_load_p
(
&
elm
->
le_edata
,
dependent
?
ATOMIC_RELAXED
:
ATOMIC_ACQUIRE
);
return
contents
;
#endif
}
JEMALLOC_ALWAYS_INLINE
bool
rtree_
leaf_elm_slab_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
bool
dependent
)
{
JEMALLOC_ALWAYS_INLINE
void
rtree_
contents_encode
(
rtree_contents_t
contents
,
void
**
bits
,
unsigned
*
additional
)
{
#ifdef RTREE_LEAF_COMPACT
uintptr_t
bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm
,
dependent
);
return
rtree_leaf_elm_bits_slab_get
(
bits
);
*
bits
=
(
void
*
)
rtree_leaf_elm_bits_encode
(
contents
);
#else
return
atomic_load_b
(
&
elm
->
le_slab
,
dependent
?
ATOMIC_RELAXED
:
ATOMIC_ACQUIRE
);
*
additional
=
(
unsigned
)
contents
.
metadata
.
slab
|
((
unsigned
)
contents
.
metadata
.
is_head
<<
1
)
|
((
unsigned
)
contents
.
metadata
.
state
<<
RTREE_LEAF_STATE_SHIFT
)
|
((
unsigned
)
contents
.
metadata
.
szind
<<
(
RTREE_LEAF_STATE_SHIFT
+
RTREE_LEAF_STATE_WIDTH
));
*
bits
=
contents
.
edata
;
#endif
}
static
inline
void
rtree_leaf_elm_
extent_write
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
extent_t
*
extent
)
{
JEMALLOC_ALWAYS_INLINE
void
rtree_leaf_elm_
write_commit
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
void
*
bits
,
unsigned
additional
)
{
#ifdef RTREE_LEAF_COMPACT
uintptr_t
old_bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm
,
true
);
uintptr_t
bits
=
((
uintptr_t
)
rtree_leaf_elm_bits_szind_get
(
old_bits
)
<<
LG_VADDR
)
|
((
uintptr_t
)
extent
&
(((
uintptr_t
)
0x1
<<
LG_VADDR
)
-
1
))
|
((
uintptr_t
)
rtree_leaf_elm_bits_slab_get
(
old_bits
));
atomic_store_p
(
&
elm
->
le_bits
,
(
void
*
)
bits
,
ATOMIC_RELEASE
);
atomic_store_p
(
&
elm
->
le_bits
,
bits
,
ATOMIC_RELEASE
);
#else
atomic_store_p
(
&
elm
->
le_extent
,
extent
,
ATOMIC_RELEASE
);
atomic_store_u
(
&
elm
->
le_metadata
,
additional
,
ATOMIC_RELEASE
);
/*
* Write edata last, since the element is atomically considered valid
* as soon as the edata field is non-NULL.
*/
atomic_store_p
(
&
elm
->
le_edata
,
bits
,
ATOMIC_RELEASE
);
#endif
}
static
inline
void
rtree_leaf_elm_szind_write
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
szind_t
szind
)
{
assert
(
szind
<=
SC_NSIZES
);
JEMALLOC_ALWAYS_INLINE
void
rtree_leaf_elm_write
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
rtree_contents_t
contents
)
{
assert
((
uintptr_t
)
contents
.
edata
%
EDATA_ALIGNMENT
==
0
);
void
*
bits
;
unsigned
additional
;
#ifdef RTREE_LEAF_COMPACT
uintptr_t
old_bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm
,
true
);
uintptr_t
bits
=
((
uintptr_t
)
szind
<<
LG_VADDR
)
|
((
uintptr_t
)
rtree_leaf_elm_bits_extent_get
(
old_bits
)
&
(((
uintptr_t
)
0x1
<<
LG_VADDR
)
-
1
))
|
((
uintptr_t
)
rtree_leaf_elm_bits_slab_get
(
old_bits
));
atomic_store_p
(
&
elm
->
le_bits
,
(
void
*
)
bits
,
ATOMIC_RELEASE
);
#else
atomic_store_u
(
&
elm
->
le_szind
,
szind
,
ATOMIC_RELEASE
);
#endif
rtree_contents_encode
(
contents
,
&
bits
,
&
additional
);
rtree_leaf_elm_write_commit
(
tsdn
,
rtree
,
elm
,
bits
,
additional
);
}
static
inline
void
rtree_leaf_elm_slab_write
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
bool
slab
)
{
/* The state field can be updated independently (and more frequently). */
JEMALLOC_ALWAYS_INLINE
void
rtree_leaf_elm_state_update
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm1
,
rtree_leaf_elm_t
*
elm2
,
extent_state_t
state
)
{
assert
(
elm1
!=
NULL
);
#ifdef RTREE_LEAF_COMPACT
uintptr_t
old_bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm
,
true
);
uintptr_t
bits
=
((
uintptr_t
)
rtree_leaf_elm_bits_szind_get
(
old_bits
)
<<
LG_VADDR
)
|
((
uintptr_t
)
rtree_leaf_elm_bits_extent_get
(
old_bits
)
&
(((
uintptr_t
)
0x1
<<
LG_VADDR
)
-
1
))
|
((
uintptr_t
)
slab
);
atomic_store_p
(
&
elm
->
le_bits
,
(
void
*
)
bits
,
ATOMIC_RELEASE
);
uintptr_t
bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm1
,
/* dependent */
true
);
bits
&=
~
RTREE_LEAF_STATE_MASK
;
bits
|=
state
<<
RTREE_LEAF_STATE_SHIFT
;
atomic_store_p
(
&
elm1
->
le_bits
,
(
void
*
)
bits
,
ATOMIC_RELEASE
);
if
(
elm2
!=
NULL
)
{
atomic_store_p
(
&
elm2
->
le_bits
,
(
void
*
)
bits
,
ATOMIC_RELEASE
);
}
#else
atomic_store_b
(
&
elm
->
le_slab
,
slab
,
ATOMIC_RELEASE
);
unsigned
bits
=
atomic_load_u
(
&
elm1
->
le_metadata
,
ATOMIC_RELAXED
);
bits
&=
~
RTREE_LEAF_STATE_MASK
;
bits
|=
state
<<
RTREE_LEAF_STATE_SHIFT
;
atomic_store_u
(
&
elm1
->
le_metadata
,
bits
,
ATOMIC_RELEASE
);
if
(
elm2
!=
NULL
)
{
atomic_store_u
(
&
elm2
->
le_metadata
,
bits
,
ATOMIC_RELEASE
);
}
#endif
}
static
inline
void
rtree_leaf_elm_write
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
extent_t
*
extent
,
szind_t
szind
,
bool
slab
)
{
#ifdef RTREE_LEAF_COMPACT
uintptr_t
bits
=
((
uintptr_t
)
szind
<<
LG_VADDR
)
|
((
uintptr_t
)
extent
&
(((
uintptr_t
)
0x1
<<
LG_VADDR
)
-
1
))
|
((
uintptr_t
)
slab
);
atomic_store_p
(
&
elm
->
le_bits
,
(
void
*
)
bits
,
ATOMIC_RELEASE
);
#else
rtree_leaf_elm_slab_write
(
tsdn
,
rtree
,
elm
,
slab
);
rtree_leaf_elm_szind_write
(
tsdn
,
rtree
,
elm
,
szind
);
/*
* Write extent last, since the element is atomically considered valid
* as soon as the extent field is non-NULL.
*/
rtree_leaf_elm_extent_write
(
tsdn
,
rtree
,
elm
,
extent
);
#endif
}
/*
* Tries to look up the key in the L1 cache, returning false if there's a hit, or
* true if there's a miss.
* Key is allowed to be NULL; returns true in this case.
*/
JEMALLOC_ALWAYS_INLINE
bool
rtree_leaf_elm_lookup_fast
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
rtree_leaf_elm_t
**
elm
)
{
size_t
slot
=
rtree_cache_direct_map
(
key
);
uintptr_t
leafkey
=
rtree_leafkey
(
key
);
assert
(
leafkey
!=
RTREE_LEAFKEY_INVALID
);
static
inline
void
rtree_leaf_elm_szind_slab_update
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_leaf_elm_t
*
elm
,
szind_t
szind
,
bool
slab
)
{
assert
(
!
slab
||
szind
<
SC_NBINS
);
if
(
unlikely
(
rtree_ctx
->
cache
[
slot
].
leafkey
!=
leafkey
))
{
return
true
;
}
/*
* The caller implicitly assures that it is the only writer to the szind
* and slab fields, and that the extent field cannot currently change.
*/
rtree_leaf_elm_slab_write
(
tsdn
,
rtree
,
elm
,
slab
);
r
tree_leaf_elm_szind_write
(
tsdn
,
rtree
,
elm
,
szind
)
;
rtree_leaf_elm_t
*
leaf
=
rtree_ctx
->
cache
[
slot
].
leaf
;
assert
(
leaf
!=
NULL
);
uintptr_t
subkey
=
rtree_subkey
(
key
,
RTREE_HEIGHT
-
1
);
*
elm
=
&
leaf
[
subkey
];
r
eturn
false
;
}
JEMALLOC_ALWAYS_INLINE
rtree_leaf_elm_t
*
...
...
@@ -382,147 +412,143 @@ rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
dependent
,
init_missing
);
}
/*
* Returns true on lookup failure.
*/
static
inline
bool
rtree_write
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
extent_t
*
extent
,
szind_t
szind
,
bool
slab
)
{
/* Use rtree_clear() to set the extent to NULL. */
assert
(
extent
!=
NULL
);
rtree_read_independent
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
rtree_contents_t
*
r_contents
)
{
rtree_leaf_elm_t
*
elm
=
rtree_leaf_elm_lookup
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
false
,
tru
e
);
key
,
/* dependent */
false
,
/* init_missing */
fals
e
);
if
(
elm
==
NULL
)
{
return
true
;
}
assert
(
rtree_leaf_elm_extent_read
(
tsdn
,
rtree
,
elm
,
false
)
==
NULL
);
rtree_leaf_elm_write
(
tsdn
,
rtree
,
elm
,
extent
,
szind
,
slab
);
*
r_contents
=
rtree_leaf_elm_read
(
tsdn
,
rtree
,
elm
,
/* dependent */
false
);
return
false
;
}
JEMALLOC_ALWAYS_INLINE
rtree_leaf_elm_t
*
rtree_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
bool
dependent
)
{
static
inline
rtree_contents_t
rtree_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
)
{
rtree_leaf_elm_t
*
elm
=
rtree_leaf_elm_lookup
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
dependent
,
false
);
if
(
!
dependent
&&
elm
==
NULL
)
{
return
NULL
;
}
key
,
/* dependent */
true
,
/* init_missing */
false
);
assert
(
elm
!=
NULL
);
return
elm
;
return
rtree_leaf_elm_read
(
tsdn
,
rtree
,
elm
,
/* dependent */
true
)
;
}
JEMALLOC_ALWAYS_INLINE
extent_t
*
rtree_extent_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
bool
dependent
)
{
rtree_leaf_elm_t
*
elm
=
rtree_read
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
dependent
);
if
(
!
dependent
&&
elm
==
NULL
)
{
return
NULL
;
}
return
rtree_leaf_elm_extent_read
(
tsdn
,
rtree
,
elm
,
dependent
);
}
JEMALLOC_ALWAYS_INLINE
szind_t
rtree_szind_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
bool
dependent
)
{
rtree_leaf_elm_t
*
elm
=
rtree_read
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
dependent
);
if
(
!
dependent
&&
elm
==
NULL
)
{
return
SC_NSIZES
;
}
return
rtree_leaf_elm_szind_read
(
tsdn
,
rtree
,
elm
,
dependent
);
static
inline
rtree_metadata_t
rtree_metadata_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
)
{
rtree_leaf_elm_t
*
elm
=
rtree_leaf_elm_lookup
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
/* dependent */
true
,
/* init_missing */
false
);
assert
(
elm
!=
NULL
);
return
rtree_leaf_elm_read
(
tsdn
,
rtree
,
elm
,
/* dependent */
true
).
metadata
;
}
/*
* rtree_slab_read() is intentionally omitted because slab is always read in
* conjunction with szind, which makes rtree_szind_slab_read() a better choice.
* Returns true when the request cannot be fulfilled by fastpath.
*/
JEMALLOC_ALWAYS_INLINE
bool
rtree_extent_szind_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
bool
dependent
,
extent_t
**
r_extent
,
szind_t
*
r_szind
)
{
rtree_leaf_elm_t
*
elm
=
rtree_read
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
dependent
);
if
(
!
dependent
&&
elm
==
NULL
)
{
static
inline
bool
rtree_metadata_try_read_fast
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
rtree_metadata_t
*
r_rtree_metadata
)
{
rtree_leaf_elm_t
*
elm
;
/*
* Should check the bool return value (lookup success or not) instead of
* elm == NULL (which will result in an extra branch). This is because
* when the cache lookup succeeds, there will never be a NULL pointer
* returned (which is unknown to the compiler).
*/
if
(
rtree_leaf_elm_lookup_fast
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
&
elm
))
{
return
true
;
}
*
r_extent
=
rtree_leaf_elm_extent_read
(
tsdn
,
rtree
,
elm
,
dependent
);
*
r_szind
=
rtree_leaf_elm_szind_read
(
tsdn
,
rtree
,
elm
,
dependent
);
assert
(
elm
!=
NULL
);
*
r_rtree_metadata
=
rtree_leaf_elm_read
(
tsdn
,
rtree
,
elm
,
/* dependent */
true
).
metadata
;
return
false
;
}
/*
* Try to read szind_slab from the L1 cache. Returns true on a hit,
* and fills in r_szind and r_slab. Otherwise returns false.
*
* Key is allowed to be NULL in order to save an extra branch on the
* fastpath. returns false in this case.
*/
JEMALLOC_ALWAYS_INLINE
bool
rtree_szind_slab_read_fast
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
szind_t
*
r_szind
,
bool
*
r_slab
)
{
rtree_leaf_elm_t
*
elm
;
size_t
slot
=
rtree_cache_direct_map
(
key
);
uintptr_t
leafkey
=
rtree_leafkey
(
key
);
assert
(
leafkey
!=
RTREE_LEAFKEY_INVALID
);
if
(
likely
(
rtree_ctx
->
cache
[
slot
].
leafkey
==
leafkey
))
{
rtree_leaf_elm_t
*
leaf
=
rtree_ctx
->
cache
[
slot
].
leaf
;
assert
(
leaf
!=
NULL
);
uintptr_t
subkey
=
rtree_subkey
(
key
,
RTREE_HEIGHT
-
1
);
elm
=
&
leaf
[
subkey
];
#ifdef RTREE_LEAF_COMPACT
uintptr_t
bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm
,
true
);
*
r_szind
=
rtree_leaf_elm_bits_szind_get
(
bits
);
*
r_slab
=
rtree_leaf_elm_bits_slab_get
(
bits
);
#else
*
r_szind
=
rtree_leaf_elm_szind_read
(
tsdn
,
rtree
,
elm
,
true
);
*
r_slab
=
rtree_leaf_elm_slab_read
(
tsdn
,
rtree
,
elm
,
true
);
#endif
return
true
;
}
else
{
return
false
;
JEMALLOC_ALWAYS_INLINE
void
rtree_write_range_impl
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
base
,
uintptr_t
end
,
rtree_contents_t
contents
,
bool
clearing
)
{
assert
((
base
&
PAGE_MASK
)
==
0
&&
(
end
&
PAGE_MASK
)
==
0
);
/*
* Only used for emap_(de)register_interior, which implies the
* boundaries have been registered already. Therefore all the lookups
* are dependent w/o init_missing, assuming the range spans across at
* most 2 rtree leaf nodes (each covers 1 GiB of vaddr).
*/
void
*
bits
;
unsigned
additional
;
rtree_contents_encode
(
contents
,
&
bits
,
&
additional
);
rtree_leaf_elm_t
*
elm
=
NULL
;
/* Dead store. */
for
(
uintptr_t
addr
=
base
;
addr
<=
end
;
addr
+=
PAGE
)
{
if
(
addr
==
base
||
(
addr
&
((
ZU
(
1
)
<<
rtree_leaf_maskbits
())
-
1
))
==
0
)
{
elm
=
rtree_leaf_elm_lookup
(
tsdn
,
rtree
,
rtree_ctx
,
addr
,
/* dependent */
true
,
/* init_missing */
false
);
assert
(
elm
!=
NULL
);
}
assert
(
elm
==
rtree_leaf_elm_lookup
(
tsdn
,
rtree
,
rtree_ctx
,
addr
,
/* dependent */
true
,
/* init_missing */
false
));
assert
(
!
clearing
||
rtree_leaf_elm_read
(
tsdn
,
rtree
,
elm
,
/* dependent */
true
).
edata
!=
NULL
);
rtree_leaf_elm_write_commit
(
tsdn
,
rtree
,
elm
,
bits
,
additional
);
elm
++
;
}
}
JEMALLOC_ALWAYS_INLINE
void
rtree_write_range
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
base
,
uintptr_t
end
,
rtree_contents_t
contents
)
{
rtree_write_range_impl
(
tsdn
,
rtree
,
rtree_ctx
,
base
,
end
,
contents
,
/* clearing */
false
);
}
JEMALLOC_ALWAYS_INLINE
bool
rtree_
szind_slab_read
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
bool
dependent
,
szind_t
*
r_szind
,
bool
*
r_slab
)
{
rtree_leaf_elm_t
*
elm
=
rtree_
r
ea
d
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
dependent
);
if
(
!
dependent
&&
elm
==
NULL
)
{
rtree_
write
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
rtree_contents_t
contents
)
{
rtree_leaf_elm_t
*
elm
=
rtree_
l
ea
f_elm_lookup
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
/*
dependent
*/
false
,
/* init_missing */
true
);
if
(
elm
==
NULL
)
{
return
true
;
}
#ifdef RTREE_LEAF_COMPACT
uintptr_t
bits
=
rtree_leaf_elm_bits_read
(
tsdn
,
rtree
,
elm
,
dependent
);
*
r_szind
=
rtree_leaf_elm_bits_szind_get
(
bits
);
*
r_slab
=
rtree_leaf_elm_bits_slab_get
(
bits
);
#else
*
r_szind
=
rtree_leaf_elm_szind_read
(
tsdn
,
rtree
,
elm
,
dependent
);
*
r_slab
=
rtree_leaf_elm_slab_read
(
tsdn
,
rtree
,
elm
,
dependent
);
#endif
return
false
;
}
static
inline
void
rtree_szind_slab_update
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
,
szind_t
szind
,
bool
slab
)
{
assert
(
!
slab
||
szind
<
SC_NBINS
);
rtree_leaf_elm_write
(
tsdn
,
rtree
,
elm
,
contents
);
rtree_leaf_elm_t
*
elm
=
rtree_read
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
true
);
rtree_leaf_elm_szind_slab_update
(
tsdn
,
rtree
,
elm
,
szind
,
slab
);
return
false
;
}
static
inline
void
rtree_clear
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
key
)
{
rtree_leaf_elm_t
*
elm
=
rtree_read
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
true
);
assert
(
rtree_leaf_elm_extent_read
(
tsdn
,
rtree
,
elm
,
false
)
!=
NULL
);
rtree_leaf_elm_write
(
tsdn
,
rtree
,
elm
,
NULL
,
SC_NSIZES
,
false
);
rtree_leaf_elm_t
*
elm
=
rtree_leaf_elm_lookup
(
tsdn
,
rtree
,
rtree_ctx
,
key
,
/* dependent */
true
,
/* init_missing */
false
);
assert
(
elm
!=
NULL
);
assert
(
rtree_leaf_elm_read
(
tsdn
,
rtree
,
elm
,
/* dependent */
true
).
edata
!=
NULL
);
rtree_contents_t
contents
;
contents
.
edata
=
NULL
;
contents
.
metadata
.
szind
=
SC_NSIZES
;
contents
.
metadata
.
slab
=
false
;
contents
.
metadata
.
is_head
=
false
;
contents
.
metadata
.
state
=
(
extent_state_t
)
0
;
rtree_leaf_elm_write
(
tsdn
,
rtree
,
elm
,
contents
);
}
static
inline
void
rtree_clear_range
(
tsdn_t
*
tsdn
,
rtree_t
*
rtree
,
rtree_ctx_t
*
rtree_ctx
,
uintptr_t
base
,
uintptr_t
end
)
{
rtree_contents_t
contents
;
contents
.
edata
=
NULL
;
contents
.
metadata
.
szind
=
SC_NSIZES
;
contents
.
metadata
.
slab
=
false
;
contents
.
metadata
.
is_head
=
false
;
contents
.
metadata
.
state
=
(
extent_state_t
)
0
;
rtree_write_range_impl
(
tsdn
,
rtree
,
rtree_ctx
,
base
,
end
,
contents
,
/* clearing */
true
);
}
#endif
/* JEMALLOC_INTERNAL_RTREE_H */
deps/jemalloc/include/jemalloc/internal/rtree_tsd.h
View file @
a51eb05b
...
...
@@ -18,16 +18,28 @@
* cache misses if made overly large, plus the cost of linear search in the LRU
* cache.
*/
#define RTREE_CTX_LG_NCACHE 4
#define RTREE_CTX_NCACHE (1 << RTREE_CTX_LG_NCACHE)
#define RTREE_CTX_NCACHE 16
#define RTREE_CTX_NCACHE_L2 8
/* Needed for initialization only. */
#define RTREE_LEAFKEY_INVALID ((uintptr_t)1)
#define RTREE_CTX_CACHE_ELM_INVALID {RTREE_LEAFKEY_INVALID, NULL}
#define RTREE_CTX_INIT_ELM_1 RTREE_CTX_CACHE_ELM_INVALID
#define RTREE_CTX_INIT_ELM_2 RTREE_CTX_INIT_ELM_1, RTREE_CTX_INIT_ELM_1
#define RTREE_CTX_INIT_ELM_4 RTREE_CTX_INIT_ELM_2, RTREE_CTX_INIT_ELM_2
#define RTREE_CTX_INIT_ELM_8 RTREE_CTX_INIT_ELM_4, RTREE_CTX_INIT_ELM_4
#define RTREE_CTX_INIT_ELM_16 RTREE_CTX_INIT_ELM_8, RTREE_CTX_INIT_ELM_8
#define _RTREE_CTX_INIT_ELM_DATA(n) RTREE_CTX_INIT_ELM_##n
#define RTREE_CTX_INIT_ELM_DATA(n) _RTREE_CTX_INIT_ELM_DATA(n)
/*
*
Zero
initializer
required for tsd initi
ali
z
at
ion only. Proper initialization
*
done via rtree_ctx_data_init()
.
*
Static
initializer
(to inv
ali
d
at
e the cache entries) is required because the
*
free fastpath may access the rtree cache before a full tsd initialization
.
*/
#define RTREE_CTX_
ZERO_
INITIALIZER {{
{0, 0}}, {{0, 0}}}
#define RTREE_CTX_INITIALIZER {{
RTREE_CTX_INIT_ELM_DATA(RTREE_CTX_NCACHE)}, \
{RTREE_CTX_INIT_ELM_DATA(RTREE_CTX_NCACHE_L2)}}
typedef
struct
rtree_leaf_elm_s
rtree_leaf_elm_t
;
...
...
deps/jemalloc/include/jemalloc/internal/safety_check.h
View file @
a51eb05b
#ifndef JEMALLOC_INTERNAL_SAFETY_CHECK_H
#define JEMALLOC_INTERNAL_SAFETY_CHECK_H
void
safety_check_fail_sized_dealloc
(
bool
current_dealloc
,
const
void
*
ptr
,
size_t
true_size
,
size_t
input_size
);
void
safety_check_fail
(
const
char
*
format
,
...);
typedef
void
(
*
safety_check_abort_hook_t
)(
const
char
*
message
);
/* Can set to NULL for a default. */
void
safety_check_set_abort
(
void
(
*
abort_fn
)
())
;
void
safety_check_set_abort
(
safety_check_abort_hook_t
abort_fn
);
JEMALLOC_ALWAYS_INLINE
void
safety_check_set_redzone
(
void
*
ptr
,
size_t
usize
,
size_t
bumped_usize
)
{
...
...
deps/jemalloc/include/jemalloc/internal/san.h
0 → 100644
View file @
a51eb05b
#ifndef JEMALLOC_INTERNAL_GUARD_H
#define JEMALLOC_INTERNAL_GUARD_H
#include "jemalloc/internal/ehooks.h"
#include "jemalloc/internal/emap.h"
#define SAN_PAGE_GUARD PAGE
#define SAN_PAGE_GUARDS_SIZE (SAN_PAGE_GUARD * 2)
#define SAN_GUARD_LARGE_EVERY_N_EXTENTS_DEFAULT 0
#define SAN_GUARD_SMALL_EVERY_N_EXTENTS_DEFAULT 0
#define SAN_LG_UAF_ALIGN_DEFAULT (-1)
#define SAN_CACHE_BIN_NONFAST_MASK_DEFAULT (uintptr_t)(-1)
static
const
uintptr_t
uaf_detect_junk
=
(
uintptr_t
)
0x5b5b5b5b5b5b5b5bULL
;
/* 0 means disabled, i.e. never guarded. */
extern
size_t
opt_san_guard_large
;
extern
size_t
opt_san_guard_small
;
/* -1 means disabled, i.e. never check for use-after-free. */
extern
ssize_t
opt_lg_san_uaf_align
;
void
san_guard_pages
(
tsdn_t
*
tsdn
,
ehooks_t
*
ehooks
,
edata_t
*
edata
,
emap_t
*
emap
,
bool
left
,
bool
right
,
bool
remap
);
void
san_unguard_pages
(
tsdn_t
*
tsdn
,
ehooks_t
*
ehooks
,
edata_t
*
edata
,
emap_t
*
emap
,
bool
left
,
bool
right
);
/*
* Unguard the extent, but don't modify emap boundaries. Must be called on an
* extent that has been erased from emap and shouldn't be placed back.
*/
void
san_unguard_pages_pre_destroy
(
tsdn_t
*
tsdn
,
ehooks_t
*
ehooks
,
edata_t
*
edata
,
emap_t
*
emap
);
void
san_check_stashed_ptrs
(
void
**
ptrs
,
size_t
nstashed
,
size_t
usize
);
void
tsd_san_init
(
tsd_t
*
tsd
);
void
san_init
(
ssize_t
lg_san_uaf_align
);
static
inline
void
san_guard_pages_two_sided
(
tsdn_t
*
tsdn
,
ehooks_t
*
ehooks
,
edata_t
*
edata
,
emap_t
*
emap
,
bool
remap
)
{
san_guard_pages
(
tsdn
,
ehooks
,
edata
,
emap
,
true
,
true
,
remap
);
}
static
inline
void
san_unguard_pages_two_sided
(
tsdn_t
*
tsdn
,
ehooks_t
*
ehooks
,
edata_t
*
edata
,
emap_t
*
emap
)
{
san_unguard_pages
(
tsdn
,
ehooks
,
edata
,
emap
,
true
,
true
);
}
static
inline
size_t
san_two_side_unguarded_sz
(
size_t
size
)
{
assert
(
size
%
PAGE
==
0
);
assert
(
size
>=
SAN_PAGE_GUARDS_SIZE
);
return
size
-
SAN_PAGE_GUARDS_SIZE
;
}
static
inline
size_t
san_two_side_guarded_sz
(
size_t
size
)
{
assert
(
size
%
PAGE
==
0
);
return
size
+
SAN_PAGE_GUARDS_SIZE
;
}
static
inline
size_t
san_one_side_unguarded_sz
(
size_t
size
)
{
assert
(
size
%
PAGE
==
0
);
assert
(
size
>=
SAN_PAGE_GUARD
);
return
size
-
SAN_PAGE_GUARD
;
}
static
inline
size_t
san_one_side_guarded_sz
(
size_t
size
)
{
assert
(
size
%
PAGE
==
0
);
return
size
+
SAN_PAGE_GUARD
;
}
static
inline
bool
san_guard_enabled
(
void
)
{
return
(
opt_san_guard_large
!=
0
||
opt_san_guard_small
!=
0
);
}
static
inline
bool
san_large_extent_decide_guard
(
tsdn_t
*
tsdn
,
ehooks_t
*
ehooks
,
size_t
size
,
size_t
alignment
)
{
if
(
opt_san_guard_large
==
0
||
ehooks_guard_will_fail
(
ehooks
)
||
tsdn_null
(
tsdn
))
{
return
false
;
}
tsd_t
*
tsd
=
tsdn_tsd
(
tsdn
);
uint64_t
n
=
tsd_san_extents_until_guard_large_get
(
tsd
);
assert
(
n
>=
1
);
if
(
n
>
1
)
{
/*
* Subtract conditionally because the guard may not happen due
* to alignment or size restriction below.
*/
*
tsd_san_extents_until_guard_largep_get
(
tsd
)
=
n
-
1
;
}
if
(
n
==
1
&&
(
alignment
<=
PAGE
)
&&
(
san_two_side_guarded_sz
(
size
)
<=
SC_LARGE_MAXCLASS
))
{
*
tsd_san_extents_until_guard_largep_get
(
tsd
)
=
opt_san_guard_large
;
return
true
;
}
else
{
assert
(
tsd_san_extents_until_guard_large_get
(
tsd
)
>=
1
);
return
false
;
}
}
static
inline
bool
san_slab_extent_decide_guard
(
tsdn_t
*
tsdn
,
ehooks_t
*
ehooks
)
{
if
(
opt_san_guard_small
==
0
||
ehooks_guard_will_fail
(
ehooks
)
||
tsdn_null
(
tsdn
))
{
return
false
;
}
tsd_t
*
tsd
=
tsdn_tsd
(
tsdn
);
uint64_t
n
=
tsd_san_extents_until_guard_small_get
(
tsd
);
assert
(
n
>=
1
);
if
(
n
==
1
)
{
*
tsd_san_extents_until_guard_smallp_get
(
tsd
)
=
opt_san_guard_small
;
return
true
;
}
else
{
*
tsd_san_extents_until_guard_smallp_get
(
tsd
)
=
n
-
1
;
assert
(
tsd_san_extents_until_guard_small_get
(
tsd
)
>=
1
);
return
false
;
}
}
static
inline
void
san_junk_ptr_locations
(
void
*
ptr
,
size_t
usize
,
void
**
first
,
void
**
mid
,
void
**
last
)
{
size_t
ptr_sz
=
sizeof
(
void
*
);
*
first
=
ptr
;
*
mid
=
(
void
*
)((
uintptr_t
)
ptr
+
((
usize
>>
1
)
&
~
(
ptr_sz
-
1
)));
assert
(
*
first
!=
*
mid
||
usize
==
ptr_sz
);
assert
((
uintptr_t
)
*
first
<=
(
uintptr_t
)
*
mid
);
/*
* When usize > 32K, the gap between requested_size and usize might be
* greater than 4K -- this means the last write may access an
* likely-untouched page (default settings w/ 4K pages). However by
* default the tcache only goes up to the 32K size class, and is usually
* tuned lower instead of higher, which makes it less of a concern.
*/
*
last
=
(
void
*
)((
uintptr_t
)
ptr
+
usize
-
sizeof
(
uaf_detect_junk
));
assert
(
*
first
!=
*
last
||
usize
==
ptr_sz
);
assert
(
*
mid
!=
*
last
||
usize
<=
ptr_sz
*
2
);
assert
((
uintptr_t
)
*
mid
<=
(
uintptr_t
)
*
last
);
}
static
inline
bool
san_junk_ptr_should_slow
(
void
)
{
/*
* The latter condition (pointer size greater than the min size class)
* is not expected -- fall back to the slow path for simplicity.
*/
return
config_debug
||
(
LG_SIZEOF_PTR
>
SC_LG_TINY_MIN
);
}
static
inline
void
san_junk_ptr
(
void
*
ptr
,
size_t
usize
)
{
if
(
san_junk_ptr_should_slow
())
{
memset
(
ptr
,
(
char
)
uaf_detect_junk
,
usize
);
return
;
}
void
*
first
,
*
mid
,
*
last
;
san_junk_ptr_locations
(
ptr
,
usize
,
&
first
,
&
mid
,
&
last
);
*
(
uintptr_t
*
)
first
=
uaf_detect_junk
;
*
(
uintptr_t
*
)
mid
=
uaf_detect_junk
;
*
(
uintptr_t
*
)
last
=
uaf_detect_junk
;
}
static
inline
bool
san_uaf_detection_enabled
(
void
)
{
bool
ret
=
config_uaf_detection
&&
(
opt_lg_san_uaf_align
!=
-
1
);
if
(
config_uaf_detection
&&
ret
)
{
assert
(
san_cache_bin_nonfast_mask
==
((
uintptr_t
)
1
<<
opt_lg_san_uaf_align
)
-
1
);
}
return
ret
;
}
#endif
/* JEMALLOC_INTERNAL_GUARD_H */
deps/jemalloc/include/jemalloc/internal/san_bump.h
0 → 100644
View file @
a51eb05b
#ifndef JEMALLOC_INTERNAL_SAN_BUMP_H
#define JEMALLOC_INTERNAL_SAN_BUMP_H
#include "jemalloc/internal/edata.h"
#include "jemalloc/internal/exp_grow.h"
#include "jemalloc/internal/mutex.h"
#define SBA_RETAINED_ALLOC_SIZE ((size_t)4 << 20)
extern
bool
opt_retain
;
typedef
struct
ehooks_s
ehooks_t
;
typedef
struct
pac_s
pac_t
;
typedef
struct
san_bump_alloc_s
san_bump_alloc_t
;
struct
san_bump_alloc_s
{
malloc_mutex_t
mtx
;
edata_t
*
curr_reg
;
};
static
inline
bool
san_bump_enabled
()
{
/*
* We enable san_bump allocator only when it's possible to break up a
* mapping and unmap a part of it (maps_coalesce). This is needed to
* ensure the arena destruction process can destroy all retained guarded
* extents one by one and to unmap a trailing part of a retained guarded
* region when it's too small to fit a pending allocation.
* opt_retain is required, because this allocator retains a large
* virtual memory mapping and returns smaller parts of it.
*/
return
maps_coalesce
&&
opt_retain
;
}
static
inline
bool
san_bump_alloc_init
(
san_bump_alloc_t
*
sba
)
{
bool
err
=
malloc_mutex_init
(
&
sba
->
mtx
,
"sanitizer_bump_allocator"
,
WITNESS_RANK_SAN_BUMP_ALLOC
,
malloc_mutex_rank_exclusive
);
if
(
err
)
{
return
true
;
}
sba
->
curr_reg
=
NULL
;
return
false
;
}
edata_t
*
san_bump_alloc
(
tsdn_t
*
tsdn
,
san_bump_alloc_t
*
sba
,
pac_t
*
pac
,
ehooks_t
*
ehooks
,
size_t
size
,
bool
zero
);
#endif
/* JEMALLOC_INTERNAL_SAN_BUMP_H */
deps/jemalloc/include/jemalloc/internal/sc.h
View file @
a51eb05b
...
...
@@ -197,30 +197,34 @@
(SC_LG_BASE_MAX - SC_LG_FIRST_REGULAR_BASE + 1) - 1)
#define SC_NSIZES (SC_NTINY + SC_NPSEUDO + SC_NREGULAR)
/* The number of size classes that are a multiple of the page size. */
#define SC_NPSIZES ( \
/* Start with all the size classes. */
\
SC_NSIZES \
/* Subtract out those groups with too small a base. */
\
- (LG_PAGE - 1 - SC_LG_FIRST_REGULAR_BASE) * SC_NGROUP \
/* And the pseudo-group. */
\
- SC_NPSEUDO \
/* And the tiny group. */
\
- SC_NTINY \
/* Sizes where ndelta*delta is not a multiple of the page size. */
\
- (SC_LG_NGROUP * SC_NGROUP))
/*
* Note that the last line is computed as the sum of the second column in the
* following table:
* lg(base) | count of sizes to exclude
* ------------------------------|-----------------------------
* LG_PAGE - 1 | SC_NGROUP - 1
* LG_PAGE | SC_NGROUP - 1
* LG_PAGE + 1 | SC_NGROUP - 2
* LG_PAGE + 2 | SC_NGROUP - 4
* ... | ...
* LG_PAGE + (SC_LG_NGROUP - 1) | SC_NGROUP - (SC_NGROUP / 2)
* The number of size classes that are a multiple of the page size.
*
* Here are the first few bases that have a page-sized SC.
*
* lg(base) | base | highest SC | page-multiple SCs
* --------------|------------------------------------------
* LG_PAGE - 1 | PAGE / 2 | PAGE | 1
* LG_PAGE | PAGE | 2 * PAGE | 1
* LG_PAGE + 1 | 2 * PAGE | 4 * PAGE | 2
* LG_PAGE + 2 | 4 * PAGE | 8 * PAGE | 4
*
* The number of page-multiple SCs continues to grow in powers of two, up until
* lg_delta == lg_page, which corresponds to setting lg_base to lg_page +
* SC_LG_NGROUP. So, then, the number of size classes that are multiples of the
* page size whose lg_delta is less than the page size are
* is 1 + (2**0 + 2**1 + ... + 2**(lg_ngroup - 1) == 2**lg_ngroup.
*
* For each base with lg_base in [lg_page + lg_ngroup, lg_base_max), there are
* NGROUP page-sized size classes, and when lg_base == lg_base_max, there are
* NGROUP - 1.
*
* This gives us the quantity we seek.
*/
#define SC_NPSIZES ( \
SC_NGROUP \
+ (SC_LG_BASE_MAX - (LG_PAGE + SC_LG_NGROUP)) * SC_NGROUP \
+ SC_NGROUP - 1)
/*
* We declare a size class is binnable if size < page size * group. Or, in other
...
...
@@ -242,17 +246,23 @@
# error "Too many small size classes"
#endif
/* The largest size class in the lookup table. */
#define SC_LOOKUP_MAXCLASS ((size_t)1 << 12)
/* The largest size class in the lookup table, and its binary log. */
#define SC_LG_MAX_LOOKUP 12
#define SC_LOOKUP_MAXCLASS (1 << SC_LG_MAX_LOOKUP)
/* Internal, only used for the definition of SC_SMALL_MAXCLASS. */
#define SC_SMALL_MAX_BASE (
(size_t)
1 << (LG_PAGE + SC_LG_NGROUP - 1))
#define SC_SMALL_MAX_DELTA (
(size_t)
1 << (LG_PAGE - 1))
#define SC_SMALL_MAX_BASE (1 << (LG_PAGE + SC_LG_NGROUP - 1))
#define SC_SMALL_MAX_DELTA (1 << (LG_PAGE - 1))
/* The largest size class allocated out of a slab. */
#define SC_SMALL_MAXCLASS (SC_SMALL_MAX_BASE \
+ (SC_NGROUP - 1) * SC_SMALL_MAX_DELTA)
/* The fastpath assumes all lookup-able sizes are small. */
#if (SC_SMALL_MAXCLASS < SC_LOOKUP_MAXCLASS)
# error "Lookup table sizes must be small"
#endif
/* The smallest size class not allocated out of a slab. */
#define SC_LARGE_MINCLASS ((size_t)1ULL << (LG_PAGE + SC_LG_NGROUP))
#define SC_LG_LARGE_MINCLASS (LG_PAGE + SC_LG_NGROUP)
...
...
@@ -264,6 +274,19 @@
/* The largest size class supported. */
#define SC_LARGE_MAXCLASS (SC_MAX_BASE + (SC_NGROUP - 1) * SC_MAX_DELTA)
/* Maximum number of regions in one slab. */
#ifndef CONFIG_LG_SLAB_MAXREGS
# define SC_LG_SLAB_MAXREGS (LG_PAGE - SC_LG_TINY_MIN)
#else
# if CONFIG_LG_SLAB_MAXREGS < (LG_PAGE - SC_LG_TINY_MIN)
# error "Unsupported SC_LG_SLAB_MAXREGS"
# else
# define SC_LG_SLAB_MAXREGS CONFIG_LG_SLAB_MAXREGS
# endif
#endif
#define SC_SLAB_MAXREGS (1U << SC_LG_SLAB_MAXREGS)
typedef
struct
sc_s
sc_t
;
struct
sc_s
{
/* Size class index, or -1 if not a valid size class. */
...
...
@@ -321,10 +344,11 @@ struct sc_data_s {
sc_t
sc
[
SC_NSIZES
];
};
size_t
reg_size_compute
(
int
lg_base
,
int
lg_delta
,
int
ndelta
);
void
sc_data_init
(
sc_data_t
*
data
);
/*
* Updates slab sizes in [begin, end] to be pgs pages in length, if possible.
* Otherwise, does its best to accomodate the request.
* Otherwise, does its best to accom
m
odate the request.
*/
void
sc_data_update_slab_size
(
sc_data_t
*
data
,
size_t
begin
,
size_t
end
,
int
pgs
);
...
...
deps/jemalloc/include/jemalloc/internal/sec.h
0 → 100644
View file @
a51eb05b
#ifndef JEMALLOC_INTERNAL_SEC_H
#define JEMALLOC_INTERNAL_SEC_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/pai.h"
/*
* Small extent cache.
*
* This includes some utilities to cache small extents. We have a per-pszind
* bin with its own list of extents of that size. We don't try to do any
* coalescing of extents (since it would in general require cross-shard locks or
* knowledge of the underlying PAI implementation).
*/
/*
* For now, this is just one field; eventually, we'll probably want to get more
* fine-grained data out (like per-size class statistics).
*/
typedef
struct
sec_stats_s
sec_stats_t
;
struct
sec_stats_s
{
/* Sum of bytes_cur across all shards. */
size_t
bytes
;
};
static
inline
void
sec_stats_accum
(
sec_stats_t
*
dst
,
sec_stats_t
*
src
)
{
dst
->
bytes
+=
src
->
bytes
;
}
/* A collections of free extents, all of the same size. */
typedef
struct
sec_bin_s
sec_bin_t
;
struct
sec_bin_s
{
/*
* When we fail to fulfill an allocation, we do a batch-alloc on the
* underlying allocator to fill extra items, as well. We drop the SEC
* lock while doing so, to allow operations on other bins to succeed.
* That introduces the possibility of other threads also trying to
* allocate out of this bin, failing, and also going to the backing
* allocator. To avoid a thundering herd problem in which lots of
* threads do batch allocs and overfill this bin as a result, we only
* allow one batch allocation at a time for a bin. This bool tracks
* whether or not some thread is already batch allocating.
*
* Eventually, the right answer may be a smarter sharding policy for the
* bins (e.g. a mutex per bin, which would also be more scalable
* generally; the batch-allocating thread could hold it while
* batch-allocating).
*/
bool
being_batch_filled
;
/*
* Number of bytes in this particular bin (as opposed to the
* sec_shard_t's bytes_cur. This isn't user visible or reported in
* stats; rather, it allows us to quickly determine the change in the
* centralized counter when flushing.
*/
size_t
bytes_cur
;
edata_list_active_t
freelist
;
};
typedef
struct
sec_shard_s
sec_shard_t
;
struct
sec_shard_s
{
/*
* We don't keep per-bin mutexes, even though that would allow more
* sharding; this allows global cache-eviction, which in turn allows for
* better balancing across free lists.
*/
malloc_mutex_t
mtx
;
/*
* A SEC may need to be shut down (i.e. flushed of its contents and
* prevented from further caching). To avoid tricky synchronization
* issues, we just track enabled-status in each shard, guarded by a
* mutex. In practice, this is only ever checked during brief races,
* since the arena-level atomic boolean tracking HPA enabled-ness means
* that we won't go down these pathways very often after custom extent
* hooks are installed.
*/
bool
enabled
;
sec_bin_t
*
bins
;
/* Number of bytes in all bins in the shard. */
size_t
bytes_cur
;
/* The next pszind to flush in the flush-some pathways. */
pszind_t
to_flush_next
;
};
typedef
struct
sec_s
sec_t
;
struct
sec_s
{
pai_t
pai
;
pai_t
*
fallback
;
sec_opts_t
opts
;
sec_shard_t
*
shards
;
pszind_t
npsizes
;
};
bool
sec_init
(
tsdn_t
*
tsdn
,
sec_t
*
sec
,
base_t
*
base
,
pai_t
*
fallback
,
const
sec_opts_t
*
opts
);
void
sec_flush
(
tsdn_t
*
tsdn
,
sec_t
*
sec
);
void
sec_disable
(
tsdn_t
*
tsdn
,
sec_t
*
sec
);
/*
* Morally, these two stats methods probably ought to be a single one (and the
* mutex_prof_data ought to live in the sec_stats_t. But splitting them apart
* lets them fit easily into the pa_shard stats framework (which also has this
* split), which simplifies the stats management.
*/
void
sec_stats_merge
(
tsdn_t
*
tsdn
,
sec_t
*
sec
,
sec_stats_t
*
stats
);
void
sec_mutex_stats_read
(
tsdn_t
*
tsdn
,
sec_t
*
sec
,
mutex_prof_data_t
*
mutex_prof_data
);
/*
* We use the arena lock ordering; these are acquired in phase 2 of forking, but
* should be acquired before the underlying allocator mutexes.
*/
void
sec_prefork2
(
tsdn_t
*
tsdn
,
sec_t
*
sec
);
void
sec_postfork_parent
(
tsdn_t
*
tsdn
,
sec_t
*
sec
);
void
sec_postfork_child
(
tsdn_t
*
tsdn
,
sec_t
*
sec
);
#endif
/* JEMALLOC_INTERNAL_SEC_H */
Prev
1
2
3
4
5
6
7
8
9
10
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment