Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
4a884343
Commit
4a884343
authored
Oct 10, 2021
by
Yoav Steinberg
Browse files
Delete old jemalloc before pulling in subtree.
parent
7ff7536e
Changes
169
Show whitespace changes
Inline
Side-by-side
Too many changes to show.
To preserve performance only
169 of 169+
files are displayed.
Plain diff
Email patch
deps/jemalloc/include/jemalloc/internal/spin.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_SPIN_H
#define JEMALLOC_INTERNAL_SPIN_H
#define SPIN_INITIALIZER {0U}
typedef
struct
{
unsigned
iteration
;
}
spin_t
;
static
inline
void
spin_cpu_spinwait
()
{
# if HAVE_CPU_SPINWAIT
CPU_SPINWAIT
;
# else
volatile
int
x
=
0
;
x
=
x
;
# endif
}
static
inline
void
spin_adaptive
(
spin_t
*
spin
)
{
volatile
uint32_t
i
;
if
(
spin
->
iteration
<
5
)
{
for
(
i
=
0
;
i
<
(
1U
<<
spin
->
iteration
);
i
++
)
{
spin_cpu_spinwait
();
}
spin
->
iteration
++
;
}
else
{
#ifdef _WIN32
SwitchToThread
();
#else
sched_yield
();
#endif
}
}
#undef SPIN_INLINE
#endif
/* JEMALLOC_INTERNAL_SPIN_H */
deps/jemalloc/include/jemalloc/internal/stats.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_STATS_H
#define JEMALLOC_INTERNAL_STATS_H
/* OPTION(opt, var_name, default, set_value_to) */
#define STATS_PRINT_OPTIONS \
OPTION('J', json, false, true) \
OPTION('g', general, true, false) \
OPTION('m', merged, config_stats, false) \
OPTION('d', destroyed, config_stats, false) \
OPTION('a', unmerged, config_stats, false) \
OPTION('b', bins, true, false) \
OPTION('l', large, true, false) \
OPTION('x', mutex, true, false)
enum
{
#define OPTION(o, v, d, s) stats_print_option_num_##v,
STATS_PRINT_OPTIONS
#undef OPTION
stats_print_tot_num_options
};
/* Options for stats_print. */
extern
bool
opt_stats_print
;
extern
char
opt_stats_print_opts
[
stats_print_tot_num_options
+
1
];
/* Implements je_malloc_stats_print. */
void
stats_print
(
void
(
*
write_cb
)(
void
*
,
const
char
*
),
void
*
cbopaque
,
const
char
*
opts
);
#endif
/* JEMALLOC_INTERNAL_STATS_H */
deps/jemalloc/include/jemalloc/internal/sz.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_SIZE_H
#define JEMALLOC_INTERNAL_SIZE_H
#include "jemalloc/internal/bit_util.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/util.h"
/*
* sz module: Size computations.
*
* Some abbreviations used here:
* p: Page
* ind: Index
* s, sz: Size
* u: Usable size
* a: Aligned
*
* These are not always used completely consistently, but should be enough to
* interpret function names. E.g. sz_psz2ind converts page size to page size
* index; sz_sa2u converts a (size, alignment) allocation request to the usable
* size that would result from such an allocation.
*/
/*
* sz_pind2sz_tab encodes the same information as could be computed by
* sz_pind2sz_compute().
*/
extern
size_t
const
sz_pind2sz_tab
[
NPSIZES
+
1
];
/*
* sz_index2size_tab encodes the same information as could be computed (at
* unacceptable cost in some code paths) by sz_index2size_compute().
*/
extern
size_t
const
sz_index2size_tab
[
NSIZES
];
/*
* sz_size2index_tab is a compact lookup table that rounds request sizes up to
* size classes. In order to reduce cache footprint, the table is compressed,
* and all accesses are via sz_size2index().
*/
extern
uint8_t
const
sz_size2index_tab
[];
static
const
size_t
sz_large_pad
=
#ifdef JEMALLOC_CACHE_OBLIVIOUS
PAGE
#else
0
#endif
;
JEMALLOC_ALWAYS_INLINE
pszind_t
sz_psz2ind
(
size_t
psz
)
{
if
(
unlikely
(
psz
>
LARGE_MAXCLASS
))
{
return
NPSIZES
;
}
{
pszind_t
x
=
lg_floor
((
psz
<<
1
)
-
1
);
pszind_t
shift
=
(
x
<
LG_SIZE_CLASS_GROUP
+
LG_PAGE
)
?
0
:
x
-
(
LG_SIZE_CLASS_GROUP
+
LG_PAGE
);
pszind_t
grp
=
shift
<<
LG_SIZE_CLASS_GROUP
;
pszind_t
lg_delta
=
(
x
<
LG_SIZE_CLASS_GROUP
+
LG_PAGE
+
1
)
?
LG_PAGE
:
x
-
LG_SIZE_CLASS_GROUP
-
1
;
size_t
delta_inverse_mask
=
ZU
(
-
1
)
<<
lg_delta
;
pszind_t
mod
=
((((
psz
-
1
)
&
delta_inverse_mask
)
>>
lg_delta
))
&
((
ZU
(
1
)
<<
LG_SIZE_CLASS_GROUP
)
-
1
);
pszind_t
ind
=
grp
+
mod
;
return
ind
;
}
}
static
inline
size_t
sz_pind2sz_compute
(
pszind_t
pind
)
{
if
(
unlikely
(
pind
==
NPSIZES
))
{
return
LARGE_MAXCLASS
+
PAGE
;
}
{
size_t
grp
=
pind
>>
LG_SIZE_CLASS_GROUP
;
size_t
mod
=
pind
&
((
ZU
(
1
)
<<
LG_SIZE_CLASS_GROUP
)
-
1
);
size_t
grp_size_mask
=
~
((
!!
grp
)
-
1
);
size_t
grp_size
=
((
ZU
(
1
)
<<
(
LG_PAGE
+
(
LG_SIZE_CLASS_GROUP
-
1
)))
<<
grp
)
&
grp_size_mask
;
size_t
shift
=
(
grp
==
0
)
?
1
:
grp
;
size_t
lg_delta
=
shift
+
(
LG_PAGE
-
1
);
size_t
mod_size
=
(
mod
+
1
)
<<
lg_delta
;
size_t
sz
=
grp_size
+
mod_size
;
return
sz
;
}
}
static
inline
size_t
sz_pind2sz_lookup
(
pszind_t
pind
)
{
size_t
ret
=
(
size_t
)
sz_pind2sz_tab
[
pind
];
assert
(
ret
==
sz_pind2sz_compute
(
pind
));
return
ret
;
}
static
inline
size_t
sz_pind2sz
(
pszind_t
pind
)
{
assert
(
pind
<
NPSIZES
+
1
);
return
sz_pind2sz_lookup
(
pind
);
}
static
inline
size_t
sz_psz2u
(
size_t
psz
)
{
if
(
unlikely
(
psz
>
LARGE_MAXCLASS
))
{
return
LARGE_MAXCLASS
+
PAGE
;
}
{
size_t
x
=
lg_floor
((
psz
<<
1
)
-
1
);
size_t
lg_delta
=
(
x
<
LG_SIZE_CLASS_GROUP
+
LG_PAGE
+
1
)
?
LG_PAGE
:
x
-
LG_SIZE_CLASS_GROUP
-
1
;
size_t
delta
=
ZU
(
1
)
<<
lg_delta
;
size_t
delta_mask
=
delta
-
1
;
size_t
usize
=
(
psz
+
delta_mask
)
&
~
delta_mask
;
return
usize
;
}
}
static
inline
szind_t
sz_size2index_compute
(
size_t
size
)
{
if
(
unlikely
(
size
>
LARGE_MAXCLASS
))
{
return
NSIZES
;
}
#if (NTBINS != 0)
if
(
size
<=
(
ZU
(
1
)
<<
LG_TINY_MAXCLASS
))
{
szind_t
lg_tmin
=
LG_TINY_MAXCLASS
-
NTBINS
+
1
;
szind_t
lg_ceil
=
lg_floor
(
pow2_ceil_zu
(
size
));
return
(
lg_ceil
<
lg_tmin
?
0
:
lg_ceil
-
lg_tmin
);
}
#endif
{
szind_t
x
=
lg_floor
((
size
<<
1
)
-
1
);
szind_t
shift
=
(
x
<
LG_SIZE_CLASS_GROUP
+
LG_QUANTUM
)
?
0
:
x
-
(
LG_SIZE_CLASS_GROUP
+
LG_QUANTUM
);
szind_t
grp
=
shift
<<
LG_SIZE_CLASS_GROUP
;
szind_t
lg_delta
=
(
x
<
LG_SIZE_CLASS_GROUP
+
LG_QUANTUM
+
1
)
?
LG_QUANTUM
:
x
-
LG_SIZE_CLASS_GROUP
-
1
;
size_t
delta_inverse_mask
=
ZU
(
-
1
)
<<
lg_delta
;
szind_t
mod
=
((((
size
-
1
)
&
delta_inverse_mask
)
>>
lg_delta
))
&
((
ZU
(
1
)
<<
LG_SIZE_CLASS_GROUP
)
-
1
);
szind_t
index
=
NTBINS
+
grp
+
mod
;
return
index
;
}
}
JEMALLOC_ALWAYS_INLINE
szind_t
sz_size2index_lookup
(
size_t
size
)
{
assert
(
size
<=
LOOKUP_MAXCLASS
);
{
szind_t
ret
=
(
sz_size2index_tab
[(
size
-
1
)
>>
LG_TINY_MIN
]);
assert
(
ret
==
sz_size2index_compute
(
size
));
return
ret
;
}
}
JEMALLOC_ALWAYS_INLINE
szind_t
sz_size2index
(
size_t
size
)
{
assert
(
size
>
0
);
if
(
likely
(
size
<=
LOOKUP_MAXCLASS
))
{
return
sz_size2index_lookup
(
size
);
}
return
sz_size2index_compute
(
size
);
}
static
inline
size_t
sz_index2size_compute
(
szind_t
index
)
{
#if (NTBINS > 0)
if
(
index
<
NTBINS
)
{
return
(
ZU
(
1
)
<<
(
LG_TINY_MAXCLASS
-
NTBINS
+
1
+
index
));
}
#endif
{
size_t
reduced_index
=
index
-
NTBINS
;
size_t
grp
=
reduced_index
>>
LG_SIZE_CLASS_GROUP
;
size_t
mod
=
reduced_index
&
((
ZU
(
1
)
<<
LG_SIZE_CLASS_GROUP
)
-
1
);
size_t
grp_size_mask
=
~
((
!!
grp
)
-
1
);
size_t
grp_size
=
((
ZU
(
1
)
<<
(
LG_QUANTUM
+
(
LG_SIZE_CLASS_GROUP
-
1
)))
<<
grp
)
&
grp_size_mask
;
size_t
shift
=
(
grp
==
0
)
?
1
:
grp
;
size_t
lg_delta
=
shift
+
(
LG_QUANTUM
-
1
);
size_t
mod_size
=
(
mod
+
1
)
<<
lg_delta
;
size_t
usize
=
grp_size
+
mod_size
;
return
usize
;
}
}
JEMALLOC_ALWAYS_INLINE
size_t
sz_index2size_lookup
(
szind_t
index
)
{
size_t
ret
=
(
size_t
)
sz_index2size_tab
[
index
];
assert
(
ret
==
sz_index2size_compute
(
index
));
return
ret
;
}
JEMALLOC_ALWAYS_INLINE
size_t
sz_index2size
(
szind_t
index
)
{
assert
(
index
<
NSIZES
);
return
sz_index2size_lookup
(
index
);
}
JEMALLOC_ALWAYS_INLINE
size_t
sz_s2u_compute
(
size_t
size
)
{
if
(
unlikely
(
size
>
LARGE_MAXCLASS
))
{
return
0
;
}
#if (NTBINS > 0)
if
(
size
<=
(
ZU
(
1
)
<<
LG_TINY_MAXCLASS
))
{
size_t
lg_tmin
=
LG_TINY_MAXCLASS
-
NTBINS
+
1
;
size_t
lg_ceil
=
lg_floor
(
pow2_ceil_zu
(
size
));
return
(
lg_ceil
<
lg_tmin
?
(
ZU
(
1
)
<<
lg_tmin
)
:
(
ZU
(
1
)
<<
lg_ceil
));
}
#endif
{
size_t
x
=
lg_floor
((
size
<<
1
)
-
1
);
size_t
lg_delta
=
(
x
<
LG_SIZE_CLASS_GROUP
+
LG_QUANTUM
+
1
)
?
LG_QUANTUM
:
x
-
LG_SIZE_CLASS_GROUP
-
1
;
size_t
delta
=
ZU
(
1
)
<<
lg_delta
;
size_t
delta_mask
=
delta
-
1
;
size_t
usize
=
(
size
+
delta_mask
)
&
~
delta_mask
;
return
usize
;
}
}
JEMALLOC_ALWAYS_INLINE
size_t
sz_s2u_lookup
(
size_t
size
)
{
size_t
ret
=
sz_index2size_lookup
(
sz_size2index_lookup
(
size
));
assert
(
ret
==
sz_s2u_compute
(
size
));
return
ret
;
}
/*
* Compute usable size that would result from allocating an object with the
* specified size.
*/
JEMALLOC_ALWAYS_INLINE
size_t
sz_s2u
(
size_t
size
)
{
assert
(
size
>
0
);
if
(
likely
(
size
<=
LOOKUP_MAXCLASS
))
{
return
sz_s2u_lookup
(
size
);
}
return
sz_s2u_compute
(
size
);
}
/*
* Compute usable size that would result from allocating an object with the
* specified size and alignment.
*/
JEMALLOC_ALWAYS_INLINE
size_t
sz_sa2u
(
size_t
size
,
size_t
alignment
)
{
size_t
usize
;
assert
(
alignment
!=
0
&&
((
alignment
-
1
)
&
alignment
)
==
0
);
/* Try for a small size class. */
if
(
size
<=
SMALL_MAXCLASS
&&
alignment
<
PAGE
)
{
/*
* Round size up to the nearest multiple of alignment.
*
* This done, we can take advantage of the fact that for each
* small size class, every object is aligned at the smallest
* power of two that is non-zero in the base two representation
* of the size. For example:
*
* Size | Base 2 | Minimum alignment
* -----+----------+------------------
* 96 | 1100000 | 32
* 144 | 10100000 | 32
* 192 | 11000000 | 64
*/
usize
=
sz_s2u
(
ALIGNMENT_CEILING
(
size
,
alignment
));
if
(
usize
<
LARGE_MINCLASS
)
{
return
usize
;
}
}
/* Large size class. Beware of overflow. */
if
(
unlikely
(
alignment
>
LARGE_MAXCLASS
))
{
return
0
;
}
/* Make sure result is a large size class. */
if
(
size
<=
LARGE_MINCLASS
)
{
usize
=
LARGE_MINCLASS
;
}
else
{
usize
=
sz_s2u
(
size
);
if
(
usize
<
size
)
{
/* size_t overflow. */
return
0
;
}
}
/*
* Calculate the multi-page mapping that large_palloc() would need in
* order to guarantee the alignment.
*/
if
(
usize
+
sz_large_pad
+
PAGE_CEILING
(
alignment
)
-
PAGE
<
usize
)
{
/* size_t overflow. */
return
0
;
}
return
usize
;
}
#endif
/* JEMALLOC_INTERNAL_SIZE_H */
deps/jemalloc/include/jemalloc/internal/tcache_externs.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_TCACHE_EXTERNS_H
#define JEMALLOC_INTERNAL_TCACHE_EXTERNS_H
#include "jemalloc/internal/size_classes.h"
extern
bool
opt_tcache
;
extern
ssize_t
opt_lg_tcache_max
;
extern
cache_bin_info_t
*
tcache_bin_info
;
/*
* Number of tcache bins. There are NBINS small-object bins, plus 0 or more
* large-object bins.
*/
extern
unsigned
nhbins
;
/* Maximum cached size class. */
extern
size_t
tcache_maxclass
;
/*
* Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and
* usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are
* completely disjoint from this data structure. tcaches starts off as a sparse
* array, so it has no physical memory footprint until individual pages are
* touched. This allows the entire array to be allocated the first time an
* explicit tcache is created without a disproportionate impact on memory usage.
*/
extern
tcaches_t
*
tcaches
;
size_t
tcache_salloc
(
tsdn_t
*
tsdn
,
const
void
*
ptr
);
void
tcache_event_hard
(
tsd_t
*
tsd
,
tcache_t
*
tcache
);
void
*
tcache_alloc_small_hard
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
tcache_t
*
tcache
,
cache_bin_t
*
tbin
,
szind_t
binind
,
bool
*
tcache_success
);
void
tcache_bin_flush_small
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
cache_bin_t
*
tbin
,
szind_t
binind
,
unsigned
rem
);
void
tcache_bin_flush_large
(
tsd_t
*
tsd
,
cache_bin_t
*
tbin
,
szind_t
binind
,
unsigned
rem
,
tcache_t
*
tcache
);
void
tcache_arena_reassociate
(
tsdn_t
*
tsdn
,
tcache_t
*
tcache
,
arena_t
*
arena
);
tcache_t
*
tcache_create_explicit
(
tsd_t
*
tsd
);
void
tcache_cleanup
(
tsd_t
*
tsd
);
void
tcache_stats_merge
(
tsdn_t
*
tsdn
,
tcache_t
*
tcache
,
arena_t
*
arena
);
bool
tcaches_create
(
tsd_t
*
tsd
,
unsigned
*
r_ind
);
void
tcaches_flush
(
tsd_t
*
tsd
,
unsigned
ind
);
void
tcaches_destroy
(
tsd_t
*
tsd
,
unsigned
ind
);
bool
tcache_boot
(
tsdn_t
*
tsdn
);
void
tcache_arena_associate
(
tsdn_t
*
tsdn
,
tcache_t
*
tcache
,
arena_t
*
arena
);
void
tcache_prefork
(
tsdn_t
*
tsdn
);
void
tcache_postfork_parent
(
tsdn_t
*
tsdn
);
void
tcache_postfork_child
(
tsdn_t
*
tsdn
);
void
tcache_flush
(
tsd_t
*
tsd
);
bool
tsd_tcache_data_init
(
tsd_t
*
tsd
);
bool
tsd_tcache_enabled_data_init
(
tsd_t
*
tsd
);
#endif
/* JEMALLOC_INTERNAL_TCACHE_EXTERNS_H */
deps/jemalloc/include/jemalloc/internal/tcache_inlines.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_TCACHE_INLINES_H
#define JEMALLOC_INTERNAL_TCACHE_INLINES_H
#include "jemalloc/internal/bin.h"
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/util.h"
static
inline
bool
tcache_enabled_get
(
tsd_t
*
tsd
)
{
return
tsd_tcache_enabled_get
(
tsd
);
}
static
inline
void
tcache_enabled_set
(
tsd_t
*
tsd
,
bool
enabled
)
{
bool
was_enabled
=
tsd_tcache_enabled_get
(
tsd
);
if
(
!
was_enabled
&&
enabled
)
{
tsd_tcache_data_init
(
tsd
);
}
else
if
(
was_enabled
&&
!
enabled
)
{
tcache_cleanup
(
tsd
);
}
/* Commit the state last. Above calls check current state. */
tsd_tcache_enabled_set
(
tsd
,
enabled
);
tsd_slow_update
(
tsd
);
}
JEMALLOC_ALWAYS_INLINE
void
tcache_event
(
tsd_t
*
tsd
,
tcache_t
*
tcache
)
{
if
(
TCACHE_GC_INCR
==
0
)
{
return
;
}
if
(
unlikely
(
ticker_tick
(
&
tcache
->
gc_ticker
)))
{
tcache_event_hard
(
tsd
,
tcache
);
}
}
JEMALLOC_ALWAYS_INLINE
void
*
tcache_alloc_small
(
tsd_t
*
tsd
,
arena_t
*
arena
,
tcache_t
*
tcache
,
UNUSED
size_t
size
,
szind_t
binind
,
bool
zero
,
bool
slow_path
)
{
void
*
ret
;
cache_bin_t
*
bin
;
bool
tcache_success
;
size_t
usize
JEMALLOC_CC_SILENCE_INIT
(
0
);
assert
(
binind
<
NBINS
);
bin
=
tcache_small_bin_get
(
tcache
,
binind
);
ret
=
cache_bin_alloc_easy
(
bin
,
&
tcache_success
);
assert
(
tcache_success
==
(
ret
!=
NULL
));
if
(
unlikely
(
!
tcache_success
))
{
bool
tcache_hard_success
;
arena
=
arena_choose
(
tsd
,
arena
);
if
(
unlikely
(
arena
==
NULL
))
{
return
NULL
;
}
ret
=
tcache_alloc_small_hard
(
tsd_tsdn
(
tsd
),
arena
,
tcache
,
bin
,
binind
,
&
tcache_hard_success
);
if
(
tcache_hard_success
==
false
)
{
return
NULL
;
}
}
assert
(
ret
);
/*
* Only compute usize if required. The checks in the following if
* statement are all static.
*/
if
(
config_prof
||
(
slow_path
&&
config_fill
)
||
unlikely
(
zero
))
{
usize
=
sz_index2size
(
binind
);
assert
(
tcache_salloc
(
tsd_tsdn
(
tsd
),
ret
)
==
usize
);
}
if
(
likely
(
!
zero
))
{
if
(
slow_path
&&
config_fill
)
{
if
(
unlikely
(
opt_junk_alloc
))
{
arena_alloc_junk_small
(
ret
,
&
bin_infos
[
binind
],
false
);
}
else
if
(
unlikely
(
opt_zero
))
{
memset
(
ret
,
0
,
usize
);
}
}
}
else
{
if
(
slow_path
&&
config_fill
&&
unlikely
(
opt_junk_alloc
))
{
arena_alloc_junk_small
(
ret
,
&
bin_infos
[
binind
],
true
);
}
memset
(
ret
,
0
,
usize
);
}
if
(
config_stats
)
{
bin
->
tstats
.
nrequests
++
;
}
if
(
config_prof
)
{
tcache
->
prof_accumbytes
+=
usize
;
}
tcache_event
(
tsd
,
tcache
);
return
ret
;
}
JEMALLOC_ALWAYS_INLINE
void
*
tcache_alloc_large
(
tsd_t
*
tsd
,
arena_t
*
arena
,
tcache_t
*
tcache
,
size_t
size
,
szind_t
binind
,
bool
zero
,
bool
slow_path
)
{
void
*
ret
;
cache_bin_t
*
bin
;
bool
tcache_success
;
assert
(
binind
>=
NBINS
&&
binind
<
nhbins
);
bin
=
tcache_large_bin_get
(
tcache
,
binind
);
ret
=
cache_bin_alloc_easy
(
bin
,
&
tcache_success
);
assert
(
tcache_success
==
(
ret
!=
NULL
));
if
(
unlikely
(
!
tcache_success
))
{
/*
* Only allocate one large object at a time, because it's quite
* expensive to create one and not use it.
*/
arena
=
arena_choose
(
tsd
,
arena
);
if
(
unlikely
(
arena
==
NULL
))
{
return
NULL
;
}
ret
=
large_malloc
(
tsd_tsdn
(
tsd
),
arena
,
sz_s2u
(
size
),
zero
);
if
(
ret
==
NULL
)
{
return
NULL
;
}
}
else
{
size_t
usize
JEMALLOC_CC_SILENCE_INIT
(
0
);
/* Only compute usize on demand */
if
(
config_prof
||
(
slow_path
&&
config_fill
)
||
unlikely
(
zero
))
{
usize
=
sz_index2size
(
binind
);
assert
(
usize
<=
tcache_maxclass
);
}
if
(
likely
(
!
zero
))
{
if
(
slow_path
&&
config_fill
)
{
if
(
unlikely
(
opt_junk_alloc
))
{
memset
(
ret
,
JEMALLOC_ALLOC_JUNK
,
usize
);
}
else
if
(
unlikely
(
opt_zero
))
{
memset
(
ret
,
0
,
usize
);
}
}
}
else
{
memset
(
ret
,
0
,
usize
);
}
if
(
config_stats
)
{
bin
->
tstats
.
nrequests
++
;
}
if
(
config_prof
)
{
tcache
->
prof_accumbytes
+=
usize
;
}
}
tcache_event
(
tsd
,
tcache
);
return
ret
;
}
JEMALLOC_ALWAYS_INLINE
void
tcache_dalloc_small
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
void
*
ptr
,
szind_t
binind
,
bool
slow_path
)
{
cache_bin_t
*
bin
;
cache_bin_info_t
*
bin_info
;
assert
(
tcache_salloc
(
tsd_tsdn
(
tsd
),
ptr
)
<=
SMALL_MAXCLASS
);
if
(
slow_path
&&
config_fill
&&
unlikely
(
opt_junk_free
))
{
arena_dalloc_junk_small
(
ptr
,
&
bin_infos
[
binind
]);
}
bin
=
tcache_small_bin_get
(
tcache
,
binind
);
bin_info
=
&
tcache_bin_info
[
binind
];
if
(
unlikely
(
bin
->
ncached
==
bin_info
->
ncached_max
))
{
tcache_bin_flush_small
(
tsd
,
tcache
,
bin
,
binind
,
(
bin_info
->
ncached_max
>>
1
));
}
assert
(
bin
->
ncached
<
bin_info
->
ncached_max
);
bin
->
ncached
++
;
*
(
bin
->
avail
-
bin
->
ncached
)
=
ptr
;
tcache_event
(
tsd
,
tcache
);
}
JEMALLOC_ALWAYS_INLINE
void
tcache_dalloc_large
(
tsd_t
*
tsd
,
tcache_t
*
tcache
,
void
*
ptr
,
szind_t
binind
,
bool
slow_path
)
{
cache_bin_t
*
bin
;
cache_bin_info_t
*
bin_info
;
assert
(
tcache_salloc
(
tsd_tsdn
(
tsd
),
ptr
)
>
SMALL_MAXCLASS
);
assert
(
tcache_salloc
(
tsd_tsdn
(
tsd
),
ptr
)
<=
tcache_maxclass
);
if
(
slow_path
&&
config_fill
&&
unlikely
(
opt_junk_free
))
{
large_dalloc_junk
(
ptr
,
sz_index2size
(
binind
));
}
bin
=
tcache_large_bin_get
(
tcache
,
binind
);
bin_info
=
&
tcache_bin_info
[
binind
];
if
(
unlikely
(
bin
->
ncached
==
bin_info
->
ncached_max
))
{
tcache_bin_flush_large
(
tsd
,
bin
,
binind
,
(
bin_info
->
ncached_max
>>
1
),
tcache
);
}
assert
(
bin
->
ncached
<
bin_info
->
ncached_max
);
bin
->
ncached
++
;
*
(
bin
->
avail
-
bin
->
ncached
)
=
ptr
;
tcache_event
(
tsd
,
tcache
);
}
JEMALLOC_ALWAYS_INLINE
tcache_t
*
tcaches_get
(
tsd_t
*
tsd
,
unsigned
ind
)
{
tcaches_t
*
elm
=
&
tcaches
[
ind
];
if
(
unlikely
(
elm
->
tcache
==
NULL
))
{
elm
->
tcache
=
tcache_create_explicit
(
tsd
);
}
return
elm
->
tcache
;
}
#endif
/* JEMALLOC_INTERNAL_TCACHE_INLINES_H */
deps/jemalloc/include/jemalloc/internal/tcache_structs.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_TCACHE_STRUCTS_H
#define JEMALLOC_INTERNAL_TCACHE_STRUCTS_H
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/cache_bin.h"
#include "jemalloc/internal/ticker.h"
struct
tcache_s
{
/*
* To minimize our cache-footprint, we put the frequently accessed data
* together at the start of this struct.
*/
/* Cleared after arena_prof_accum(). */
uint64_t
prof_accumbytes
;
/* Drives incremental GC. */
ticker_t
gc_ticker
;
/*
* The pointer stacks associated with bins follow as a contiguous array.
* During tcache initialization, the avail pointer in each element of
* tbins is initialized to point to the proper offset within this array.
*/
cache_bin_t
bins_small
[
NBINS
];
/*
* This data is less hot; we can be a little less careful with our
* footprint here.
*/
/* Lets us track all the tcaches in an arena. */
ql_elm
(
tcache_t
)
link
;
/*
* The descriptor lets the arena find our cache bins without seeing the
* tcache definition. This enables arenas to aggregate stats across
* tcaches without having a tcache dependency.
*/
cache_bin_array_descriptor_t
cache_bin_array_descriptor
;
/* The arena this tcache is associated with. */
arena_t
*
arena
;
/* Next bin to GC. */
szind_t
next_gc_bin
;
/* For small bins, fill (ncached_max >> lg_fill_div). */
uint8_t
lg_fill_div
[
NBINS
];
/*
* We put the cache bins for large size classes at the end of the
* struct, since some of them might not get used. This might end up
* letting us avoid touching an extra page if we don't have to.
*/
cache_bin_t
bins_large
[
NSIZES
-
NBINS
];
};
/* Linkage for list of available (previously used) explicit tcache IDs. */
struct
tcaches_s
{
union
{
tcache_t
*
tcache
;
tcaches_t
*
next
;
};
};
#endif
/* JEMALLOC_INTERNAL_TCACHE_STRUCTS_H */
deps/jemalloc/include/jemalloc/internal/tcache_types.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_TCACHE_TYPES_H
#define JEMALLOC_INTERNAL_TCACHE_TYPES_H
#include "jemalloc/internal/size_classes.h"
typedef
struct
tcache_s
tcache_t
;
typedef
struct
tcaches_s
tcaches_t
;
/*
* tcache pointers close to NULL are used to encode state information that is
* used for two purposes: preventing thread caching on a per thread basis and
* cleaning up during thread shutdown.
*/
#define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1)
#define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2)
#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3)
#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY
/*
* Absolute minimum number of cache slots for each small bin.
*/
#define TCACHE_NSLOTS_SMALL_MIN 20
/*
* Absolute maximum number of cache slots for each small bin in the thread
* cache. This is an additional constraint beyond that imposed as: twice the
* number of regions per slab for this size class.
*
* This constant must be an even number.
*/
#define TCACHE_NSLOTS_SMALL_MAX 200
/* Number of cache slots for large size classes. */
#define TCACHE_NSLOTS_LARGE 20
/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
#define LG_TCACHE_MAXCLASS_DEFAULT 15
/*
* TCACHE_GC_SWEEP is the approximate number of allocation events between
* full GC sweeps. Integer rounding may cause the actual number to be
* slightly higher, since GC is performed incrementally.
*/
#define TCACHE_GC_SWEEP 8192
/* Number of tcache allocation/deallocation events between incremental GCs. */
#define TCACHE_GC_INCR \
((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1))
/* Used in TSD static initializer only. Real init in tcache_data_init(). */
#define TCACHE_ZERO_INITIALIZER {0}
/* Used in TSD static initializer only. Will be initialized to opt_tcache. */
#define TCACHE_ENABLED_ZERO_INITIALIZER false
#endif
/* JEMALLOC_INTERNAL_TCACHE_TYPES_H */
deps/jemalloc/include/jemalloc/internal/ticker.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_TICKER_H
#define JEMALLOC_INTERNAL_TICKER_H
#include "jemalloc/internal/util.h"
/**
* A ticker makes it easy to count-down events until some limit. You
* ticker_init the ticker to trigger every nticks events. You then notify it
* that an event has occurred with calls to ticker_tick (or that nticks events
* have occurred with a call to ticker_ticks), which will return true (and reset
* the counter) if the countdown hit zero.
*/
typedef
struct
{
int32_t
tick
;
int32_t
nticks
;
}
ticker_t
;
static
inline
void
ticker_init
(
ticker_t
*
ticker
,
int32_t
nticks
)
{
ticker
->
tick
=
nticks
;
ticker
->
nticks
=
nticks
;
}
static
inline
void
ticker_copy
(
ticker_t
*
ticker
,
const
ticker_t
*
other
)
{
*
ticker
=
*
other
;
}
static
inline
int32_t
ticker_read
(
const
ticker_t
*
ticker
)
{
return
ticker
->
tick
;
}
/*
* Not intended to be a public API. Unfortunately, on x86, neither gcc nor
* clang seems smart enough to turn
* ticker->tick -= nticks;
* if (unlikely(ticker->tick < 0)) {
* fixup ticker
* return true;
* }
* return false;
* into
* subq %nticks_reg, (%ticker_reg)
* js fixup ticker
*
* unless we force "fixup ticker" out of line. In that case, gcc gets it right,
* but clang now does worse than before. So, on x86 with gcc, we force it out
* of line, but otherwise let the inlining occur. Ordinarily this wouldn't be
* worth the hassle, but this is on the fast path of both malloc and free (via
* tcache_event).
*/
#if defined(__GNUC__) && !defined(__clang__) \
&& (defined(__x86_64__) || defined(__i386__))
JEMALLOC_NOINLINE
#endif
static
bool
ticker_fixup
(
ticker_t
*
ticker
)
{
ticker
->
tick
=
ticker
->
nticks
;
return
true
;
}
static
inline
bool
ticker_ticks
(
ticker_t
*
ticker
,
int32_t
nticks
)
{
ticker
->
tick
-=
nticks
;
if
(
unlikely
(
ticker
->
tick
<
0
))
{
return
ticker_fixup
(
ticker
);
}
return
false
;
}
static
inline
bool
ticker_tick
(
ticker_t
*
ticker
)
{
return
ticker_ticks
(
ticker
,
1
);
}
#endif
/* JEMALLOC_INTERNAL_TICKER_H */
deps/jemalloc/include/jemalloc/internal/tsd.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_TSD_H
#define JEMALLOC_INTERNAL_TSD_H
#include "jemalloc/internal/arena_types.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/jemalloc_internal_externs.h"
#include "jemalloc/internal/prof_types.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/rtree_tsd.h"
#include "jemalloc/internal/tcache_types.h"
#include "jemalloc/internal/tcache_structs.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/witness.h"
/*
* Thread-Specific-Data layout
* --- data accessed on tcache fast path: state, rtree_ctx, stats, prof ---
* s: state
* e: tcache_enabled
* m: thread_allocated (config_stats)
* f: thread_deallocated (config_stats)
* p: prof_tdata (config_prof)
* c: rtree_ctx (rtree cache accessed on deallocation)
* t: tcache
* --- data not accessed on tcache fast path: arena-related fields ---
* d: arenas_tdata_bypass
* r: reentrancy_level
* x: narenas_tdata
* i: iarena
* a: arena
* o: arenas_tdata
* Loading TSD data is on the critical path of basically all malloc operations.
* In particular, tcache and rtree_ctx rely on hot CPU cache to be effective.
* Use a compact layout to reduce cache footprint.
* +--- 64-bit and 64B cacheline; 1B each letter; First byte on the left. ---+
* |---------------------------- 1st cacheline ----------------------------|
* | sedrxxxx mmmmmmmm ffffffff pppppppp [c * 32 ........ ........ .......] |
* |---------------------------- 2nd cacheline ----------------------------|
* | [c * 64 ........ ........ ........ ........ ........ ........ .......] |
* |---------------------------- 3nd cacheline ----------------------------|
* | [c * 32 ........ ........ .......] iiiiiiii aaaaaaaa oooooooo [t...... |
* +-------------------------------------------------------------------------+
* Note: the entire tcache is embedded into TSD and spans multiple cachelines.
*
* The last 3 members (i, a and o) before tcache isn't really needed on tcache
* fast path. However we have a number of unused tcache bins and witnesses
* (never touched unless config_debug) at the end of tcache, so we place them
* there to avoid breaking the cachelines and possibly paging in an extra page.
*/
#ifdef JEMALLOC_JET
typedef
void
(
*
test_callback_t
)(
int
*
);
# define MALLOC_TSD_TEST_DATA_INIT 0x72b65c10
# define MALLOC_TEST_TSD \
O(test_data, int, int) \
O(test_callback, test_callback_t, int)
# define MALLOC_TEST_TSD_INITIALIZER , MALLOC_TSD_TEST_DATA_INIT, NULL
#else
# define MALLOC_TEST_TSD
# define MALLOC_TEST_TSD_INITIALIZER
#endif
/* O(name, type, nullable type */
#define MALLOC_TSD \
O(tcache_enabled, bool, bool) \
O(arenas_tdata_bypass, bool, bool) \
O(reentrancy_level, int8_t, int8_t) \
O(narenas_tdata, uint32_t, uint32_t) \
O(offset_state, uint64_t, uint64_t) \
O(thread_allocated, uint64_t, uint64_t) \
O(thread_deallocated, uint64_t, uint64_t) \
O(prof_tdata, prof_tdata_t *, prof_tdata_t *) \
O(rtree_ctx, rtree_ctx_t, rtree_ctx_t) \
O(iarena, arena_t *, arena_t *) \
O(arena, arena_t *, arena_t *) \
O(arenas_tdata, arena_tdata_t *, arena_tdata_t *)\
O(tcache, tcache_t, tcache_t) \
O(witness_tsd, witness_tsd_t, witness_tsdn_t) \
MALLOC_TEST_TSD
#define TSD_INITIALIZER { \
tsd_state_uninitialized, \
TCACHE_ENABLED_ZERO_INITIALIZER, \
false, \
0, \
0, \
0, \
0, \
0, \
NULL, \
RTREE_CTX_ZERO_INITIALIZER, \
NULL, \
NULL, \
NULL, \
TCACHE_ZERO_INITIALIZER, \
WITNESS_TSD_INITIALIZER \
MALLOC_TEST_TSD_INITIALIZER \
}
enum
{
tsd_state_nominal
=
0
,
/* Common case --> jnz. */
tsd_state_nominal_slow
=
1
,
/* Initialized but on slow path. */
/* the above 2 nominal states should be lower values. */
tsd_state_nominal_max
=
1
,
/* used for comparison only. */
tsd_state_minimal_initialized
=
2
,
tsd_state_purgatory
=
3
,
tsd_state_reincarnated
=
4
,
tsd_state_uninitialized
=
5
};
/* Manually limit tsd_state_t to a single byte. */
typedef
uint8_t
tsd_state_t
;
/* The actual tsd. */
struct
tsd_s
{
/*
* The contents should be treated as totally opaque outside the tsd
* module. Access any thread-local state through the getters and
* setters below.
*/
tsd_state_t
state
;
#define O(n, t, nt) \
t use_a_getter_or_setter_instead_##n;
MALLOC_TSD
#undef O
};
/*
* Wrapper around tsd_t that makes it possible to avoid implicit conversion
* between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be
* explicitly converted to tsd_t, which is non-nullable.
*/
struct
tsdn_s
{
tsd_t
tsd
;
};
#define TSDN_NULL ((tsdn_t *)0)
JEMALLOC_ALWAYS_INLINE
tsdn_t
*
tsd_tsdn
(
tsd_t
*
tsd
)
{
return
(
tsdn_t
*
)
tsd
;
}
JEMALLOC_ALWAYS_INLINE
bool
tsdn_null
(
const
tsdn_t
*
tsdn
)
{
return
tsdn
==
NULL
;
}
JEMALLOC_ALWAYS_INLINE
tsd_t
*
tsdn_tsd
(
tsdn_t
*
tsdn
)
{
assert
(
!
tsdn_null
(
tsdn
));
return
&
tsdn
->
tsd
;
}
void
*
malloc_tsd_malloc
(
size_t
size
);
void
malloc_tsd_dalloc
(
void
*
wrapper
);
void
malloc_tsd_cleanup_register
(
bool
(
*
f
)(
void
));
tsd_t
*
malloc_tsd_boot0
(
void
);
void
malloc_tsd_boot1
(
void
);
void
tsd_cleanup
(
void
*
arg
);
tsd_t
*
tsd_fetch_slow
(
tsd_t
*
tsd
,
bool
internal
);
void
tsd_slow_update
(
tsd_t
*
tsd
);
/*
* We put the platform-specific data declarations and inlines into their own
* header files to avoid cluttering this file. They define tsd_boot0,
* tsd_boot1, tsd_boot, tsd_booted_get, tsd_get_allocates, tsd_get, and tsd_set.
*/
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
#include "jemalloc/internal/tsd_malloc_thread_cleanup.h"
#elif (defined(JEMALLOC_TLS))
#include "jemalloc/internal/tsd_tls.h"
#elif (defined(_WIN32))
#include "jemalloc/internal/tsd_win.h"
#else
#include "jemalloc/internal/tsd_generic.h"
#endif
/*
* tsd_foop_get_unsafe(tsd) returns a pointer to the thread-local instance of
* foo. This omits some safety checks, and so can be used during tsd
* initialization and cleanup.
*/
#define O(n, t, nt) \
JEMALLOC_ALWAYS_INLINE t * \
tsd_##n##p_get_unsafe(tsd_t *tsd) { \
return &tsd->use_a_getter_or_setter_instead_##n; \
}
MALLOC_TSD
#undef O
/* tsd_foop_get(tsd) returns a pointer to the thread-local instance of foo. */
#define O(n, t, nt) \
JEMALLOC_ALWAYS_INLINE t * \
tsd_##n##p_get(tsd_t *tsd) { \
assert(tsd->state == tsd_state_nominal || \
tsd->state == tsd_state_nominal_slow || \
tsd->state == tsd_state_reincarnated || \
tsd->state == tsd_state_minimal_initialized); \
return tsd_##n##p_get_unsafe(tsd); \
}
MALLOC_TSD
#undef O
/*
* tsdn_foop_get(tsdn) returns either the thread-local instance of foo (if tsdn
* isn't NULL), or NULL (if tsdn is NULL), cast to the nullable pointer type.
*/
#define O(n, t, nt) \
JEMALLOC_ALWAYS_INLINE nt * \
tsdn_##n##p_get(tsdn_t *tsdn) { \
if (tsdn_null(tsdn)) { \
return NULL; \
} \
tsd_t *tsd = tsdn_tsd(tsdn); \
return (nt *)tsd_##n##p_get(tsd); \
}
MALLOC_TSD
#undef O
/* tsd_foo_get(tsd) returns the value of the thread-local instance of foo. */
#define O(n, t, nt) \
JEMALLOC_ALWAYS_INLINE t \
tsd_##n##_get(tsd_t *tsd) { \
return *tsd_##n##p_get(tsd); \
}
MALLOC_TSD
#undef O
/* tsd_foo_set(tsd, val) updates the thread-local instance of foo to be val. */
#define O(n, t, nt) \
JEMALLOC_ALWAYS_INLINE void \
tsd_##n##_set(tsd_t *tsd, t val) { \
assert(tsd->state != tsd_state_reincarnated && \
tsd->state != tsd_state_minimal_initialized); \
*tsd_##n##p_get(tsd) = val; \
}
MALLOC_TSD
#undef O
JEMALLOC_ALWAYS_INLINE
void
tsd_assert_fast
(
tsd_t
*
tsd
)
{
assert
(
!
malloc_slow
&&
tsd_tcache_enabled_get
(
tsd
)
&&
tsd_reentrancy_level_get
(
tsd
)
==
0
);
}
JEMALLOC_ALWAYS_INLINE
bool
tsd_fast
(
tsd_t
*
tsd
)
{
bool
fast
=
(
tsd
->
state
==
tsd_state_nominal
);
if
(
fast
)
{
tsd_assert_fast
(
tsd
);
}
return
fast
;
}
JEMALLOC_ALWAYS_INLINE
tsd_t
*
tsd_fetch_impl
(
bool
init
,
bool
minimal
)
{
tsd_t
*
tsd
=
tsd_get
(
init
);
if
(
!
init
&&
tsd_get_allocates
()
&&
tsd
==
NULL
)
{
return
NULL
;
}
assert
(
tsd
!=
NULL
);
if
(
unlikely
(
tsd
->
state
!=
tsd_state_nominal
))
{
return
tsd_fetch_slow
(
tsd
,
minimal
);
}
assert
(
tsd_fast
(
tsd
));
tsd_assert_fast
(
tsd
);
return
tsd
;
}
/* Get a minimal TSD that requires no cleanup. See comments in free(). */
JEMALLOC_ALWAYS_INLINE
tsd_t
*
tsd_fetch_min
(
void
)
{
return
tsd_fetch_impl
(
true
,
true
);
}
/* For internal background threads use only. */
JEMALLOC_ALWAYS_INLINE
tsd_t
*
tsd_internal_fetch
(
void
)
{
tsd_t
*
tsd
=
tsd_fetch_min
();
/* Use reincarnated state to prevent full initialization. */
tsd
->
state
=
tsd_state_reincarnated
;
return
tsd
;
}
JEMALLOC_ALWAYS_INLINE
tsd_t
*
tsd_fetch
(
void
)
{
return
tsd_fetch_impl
(
true
,
false
);
}
static
inline
bool
tsd_nominal
(
tsd_t
*
tsd
)
{
return
(
tsd
->
state
<=
tsd_state_nominal_max
);
}
JEMALLOC_ALWAYS_INLINE
tsdn_t
*
tsdn_fetch
(
void
)
{
if
(
!
tsd_booted_get
())
{
return
NULL
;
}
return
tsd_tsdn
(
tsd_fetch_impl
(
false
,
false
));
}
JEMALLOC_ALWAYS_INLINE
rtree_ctx_t
*
tsd_rtree_ctx
(
tsd_t
*
tsd
)
{
return
tsd_rtree_ctxp_get
(
tsd
);
}
JEMALLOC_ALWAYS_INLINE
rtree_ctx_t
*
tsdn_rtree_ctx
(
tsdn_t
*
tsdn
,
rtree_ctx_t
*
fallback
)
{
/*
* If tsd cannot be accessed, initialize the fallback rtree_ctx and
* return a pointer to it.
*/
if
(
unlikely
(
tsdn_null
(
tsdn
)))
{
rtree_ctx_data_init
(
fallback
);
return
fallback
;
}
return
tsd_rtree_ctx
(
tsdn_tsd
(
tsdn
));
}
#endif
/* JEMALLOC_INTERNAL_TSD_H */
deps/jemalloc/include/jemalloc/internal/tsd_generic.h
deleted
100644 → 0
View file @
7ff7536e
#ifdef JEMALLOC_INTERNAL_TSD_GENERIC_H
#error This file should be included only once, by tsd.h.
#endif
#define JEMALLOC_INTERNAL_TSD_GENERIC_H
typedef
struct
tsd_init_block_s
tsd_init_block_t
;
struct
tsd_init_block_s
{
ql_elm
(
tsd_init_block_t
)
link
;
pthread_t
thread
;
void
*
data
;
};
/* Defined in tsd.c, to allow the mutex headers to have tsd dependencies. */
typedef
struct
tsd_init_head_s
tsd_init_head_t
;
typedef
struct
{
bool
initialized
;
tsd_t
val
;
}
tsd_wrapper_t
;
void
*
tsd_init_check_recursion
(
tsd_init_head_t
*
head
,
tsd_init_block_t
*
block
);
void
tsd_init_finish
(
tsd_init_head_t
*
head
,
tsd_init_block_t
*
block
);
extern
pthread_key_t
tsd_tsd
;
extern
tsd_init_head_t
tsd_init_head
;
extern
tsd_wrapper_t
tsd_boot_wrapper
;
extern
bool
tsd_booted
;
/* Initialization/cleanup. */
JEMALLOC_ALWAYS_INLINE
void
tsd_cleanup_wrapper
(
void
*
arg
)
{
tsd_wrapper_t
*
wrapper
=
(
tsd_wrapper_t
*
)
arg
;
if
(
wrapper
->
initialized
)
{
wrapper
->
initialized
=
false
;
tsd_cleanup
(
&
wrapper
->
val
);
if
(
wrapper
->
initialized
)
{
/* Trigger another cleanup round. */
if
(
pthread_setspecific
(
tsd_tsd
,
(
void
*
)
wrapper
)
!=
0
)
{
malloc_write
(
"<jemalloc>: Error setting TSD
\n
"
);
if
(
opt_abort
)
{
abort
();
}
}
return
;
}
}
malloc_tsd_dalloc
(
wrapper
);
}
JEMALLOC_ALWAYS_INLINE
void
tsd_wrapper_set
(
tsd_wrapper_t
*
wrapper
)
{
if
(
pthread_setspecific
(
tsd_tsd
,
(
void
*
)
wrapper
)
!=
0
)
{
malloc_write
(
"<jemalloc>: Error setting TSD
\n
"
);
abort
();
}
}
JEMALLOC_ALWAYS_INLINE
tsd_wrapper_t
*
tsd_wrapper_get
(
bool
init
)
{
tsd_wrapper_t
*
wrapper
=
(
tsd_wrapper_t
*
)
pthread_getspecific
(
tsd_tsd
);
if
(
init
&&
unlikely
(
wrapper
==
NULL
))
{
tsd_init_block_t
block
;
wrapper
=
(
tsd_wrapper_t
*
)
tsd_init_check_recursion
(
&
tsd_init_head
,
&
block
);
if
(
wrapper
)
{
return
wrapper
;
}
wrapper
=
(
tsd_wrapper_t
*
)
malloc_tsd_malloc
(
sizeof
(
tsd_wrapper_t
));
block
.
data
=
(
void
*
)
wrapper
;
if
(
wrapper
==
NULL
)
{
malloc_write
(
"<jemalloc>: Error allocating TSD
\n
"
);
abort
();
}
else
{
wrapper
->
initialized
=
false
;
tsd_t
initializer
=
TSD_INITIALIZER
;
wrapper
->
val
=
initializer
;
}
tsd_wrapper_set
(
wrapper
);
tsd_init_finish
(
&
tsd_init_head
,
&
block
);
}
return
wrapper
;
}
JEMALLOC_ALWAYS_INLINE
bool
tsd_boot0
(
void
)
{
if
(
pthread_key_create
(
&
tsd_tsd
,
tsd_cleanup_wrapper
)
!=
0
)
{
return
true
;
}
tsd_wrapper_set
(
&
tsd_boot_wrapper
);
tsd_booted
=
true
;
return
false
;
}
JEMALLOC_ALWAYS_INLINE
void
tsd_boot1
(
void
)
{
tsd_wrapper_t
*
wrapper
;
wrapper
=
(
tsd_wrapper_t
*
)
malloc_tsd_malloc
(
sizeof
(
tsd_wrapper_t
));
if
(
wrapper
==
NULL
)
{
malloc_write
(
"<jemalloc>: Error allocating TSD
\n
"
);
abort
();
}
tsd_boot_wrapper
.
initialized
=
false
;
tsd_cleanup
(
&
tsd_boot_wrapper
.
val
);
wrapper
->
initialized
=
false
;
tsd_t
initializer
=
TSD_INITIALIZER
;
wrapper
->
val
=
initializer
;
tsd_wrapper_set
(
wrapper
);
}
JEMALLOC_ALWAYS_INLINE
bool
tsd_boot
(
void
)
{
if
(
tsd_boot0
())
{
return
true
;
}
tsd_boot1
();
return
false
;
}
JEMALLOC_ALWAYS_INLINE
bool
tsd_booted_get
(
void
)
{
return
tsd_booted
;
}
JEMALLOC_ALWAYS_INLINE
bool
tsd_get_allocates
(
void
)
{
return
true
;
}
/* Get/set. */
JEMALLOC_ALWAYS_INLINE
tsd_t
*
tsd_get
(
bool
init
)
{
tsd_wrapper_t
*
wrapper
;
assert
(
tsd_booted
);
wrapper
=
tsd_wrapper_get
(
init
);
if
(
tsd_get_allocates
()
&&
!
init
&&
wrapper
==
NULL
)
{
return
NULL
;
}
return
&
wrapper
->
val
;
}
JEMALLOC_ALWAYS_INLINE
void
tsd_set
(
tsd_t
*
val
)
{
tsd_wrapper_t
*
wrapper
;
assert
(
tsd_booted
);
wrapper
=
tsd_wrapper_get
(
true
);
if
(
likely
(
&
wrapper
->
val
!=
val
))
{
wrapper
->
val
=
*
(
val
);
}
wrapper
->
initialized
=
true
;
}
deps/jemalloc/include/jemalloc/internal/tsd_malloc_thread_cleanup.h
deleted
100644 → 0
View file @
7ff7536e
#ifdef JEMALLOC_INTERNAL_TSD_MALLOC_THREAD_CLEANUP_H
#error This file should be included only once, by tsd.h.
#endif
#define JEMALLOC_INTERNAL_TSD_MALLOC_THREAD_CLEANUP_H
extern
__thread
tsd_t
tsd_tls
;
extern
__thread
bool
tsd_initialized
;
extern
bool
tsd_booted
;
/* Initialization/cleanup. */
JEMALLOC_ALWAYS_INLINE
bool
tsd_cleanup_wrapper
(
void
)
{
if
(
tsd_initialized
)
{
tsd_initialized
=
false
;
tsd_cleanup
(
&
tsd_tls
);
}
return
tsd_initialized
;
}
JEMALLOC_ALWAYS_INLINE
bool
tsd_boot0
(
void
)
{
malloc_tsd_cleanup_register
(
&
tsd_cleanup_wrapper
);
tsd_booted
=
true
;
return
false
;
}
JEMALLOC_ALWAYS_INLINE
void
tsd_boot1
(
void
)
{
/* Do nothing. */
}
JEMALLOC_ALWAYS_INLINE
bool
tsd_boot
(
void
)
{
return
tsd_boot0
();
}
JEMALLOC_ALWAYS_INLINE
bool
tsd_booted_get
(
void
)
{
return
tsd_booted
;
}
JEMALLOC_ALWAYS_INLINE
bool
tsd_get_allocates
(
void
)
{
return
false
;
}
/* Get/set. */
JEMALLOC_ALWAYS_INLINE
tsd_t
*
tsd_get
(
bool
init
)
{
assert
(
tsd_booted
);
return
&
tsd_tls
;
}
JEMALLOC_ALWAYS_INLINE
void
tsd_set
(
tsd_t
*
val
)
{
assert
(
tsd_booted
);
if
(
likely
(
&
tsd_tls
!=
val
))
{
tsd_tls
=
(
*
val
);
}
tsd_initialized
=
true
;
}
deps/jemalloc/include/jemalloc/internal/tsd_tls.h
deleted
100644 → 0
View file @
7ff7536e
#ifdef JEMALLOC_INTERNAL_TSD_TLS_H
#error This file should be included only once, by tsd.h.
#endif
#define JEMALLOC_INTERNAL_TSD_TLS_H
extern
__thread
tsd_t
tsd_tls
;
extern
pthread_key_t
tsd_tsd
;
extern
bool
tsd_booted
;
/* Initialization/cleanup. */
JEMALLOC_ALWAYS_INLINE
bool
tsd_boot0
(
void
)
{
if
(
pthread_key_create
(
&
tsd_tsd
,
&
tsd_cleanup
)
!=
0
)
{
return
true
;
}
tsd_booted
=
true
;
return
false
;
}
JEMALLOC_ALWAYS_INLINE
void
tsd_boot1
(
void
)
{
/* Do nothing. */
}
JEMALLOC_ALWAYS_INLINE
bool
tsd_boot
(
void
)
{
return
tsd_boot0
();
}
JEMALLOC_ALWAYS_INLINE
bool
tsd_booted_get
(
void
)
{
return
tsd_booted
;
}
JEMALLOC_ALWAYS_INLINE
bool
tsd_get_allocates
(
void
)
{
return
false
;
}
/* Get/set. */
JEMALLOC_ALWAYS_INLINE
tsd_t
*
tsd_get
(
UNUSED
bool
init
)
{
assert
(
tsd_booted
);
return
&
tsd_tls
;
}
JEMALLOC_ALWAYS_INLINE
void
tsd_set
(
tsd_t
*
val
)
{
assert
(
tsd_booted
);
if
(
likely
(
&
tsd_tls
!=
val
))
{
tsd_tls
=
(
*
val
);
}
if
(
pthread_setspecific
(
tsd_tsd
,
(
void
*
)(
&
tsd_tls
))
!=
0
)
{
malloc_write
(
"<jemalloc>: Error setting tsd.
\n
"
);
if
(
opt_abort
)
{
abort
();
}
}
}
deps/jemalloc/include/jemalloc/internal/tsd_types.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_TSD_TYPES_H
#define JEMALLOC_INTERNAL_TSD_TYPES_H
#define MALLOC_TSD_CLEANUPS_MAX 2
typedef
struct
tsd_s
tsd_t
;
typedef
struct
tsdn_s
tsdn_t
;
typedef
bool
(
*
malloc_tsd_cleanup_t
)(
void
);
#endif
/* JEMALLOC_INTERNAL_TSD_TYPES_H */
deps/jemalloc/include/jemalloc/internal/tsd_win.h
deleted
100644 → 0
View file @
7ff7536e
#ifdef JEMALLOC_INTERNAL_TSD_WIN_H
#error This file should be included only once, by tsd.h.
#endif
#define JEMALLOC_INTERNAL_TSD_WIN_H
typedef
struct
{
bool
initialized
;
tsd_t
val
;
}
tsd_wrapper_t
;
extern
DWORD
tsd_tsd
;
extern
tsd_wrapper_t
tsd_boot_wrapper
;
extern
bool
tsd_booted
;
/* Initialization/cleanup. */
JEMALLOC_ALWAYS_INLINE
bool
tsd_cleanup_wrapper
(
void
)
{
DWORD
error
=
GetLastError
();
tsd_wrapper_t
*
wrapper
=
(
tsd_wrapper_t
*
)
TlsGetValue
(
tsd_tsd
);
SetLastError
(
error
);
if
(
wrapper
==
NULL
)
{
return
false
;
}
if
(
wrapper
->
initialized
)
{
wrapper
->
initialized
=
false
;
tsd_cleanup
(
&
wrapper
->
val
);
if
(
wrapper
->
initialized
)
{
/* Trigger another cleanup round. */
return
true
;
}
}
malloc_tsd_dalloc
(
wrapper
);
return
false
;
}
JEMALLOC_ALWAYS_INLINE
void
tsd_wrapper_set
(
tsd_wrapper_t
*
wrapper
)
{
if
(
!
TlsSetValue
(
tsd_tsd
,
(
void
*
)
wrapper
))
{
malloc_write
(
"<jemalloc>: Error setting TSD
\n
"
);
abort
();
}
}
JEMALLOC_ALWAYS_INLINE
tsd_wrapper_t
*
tsd_wrapper_get
(
bool
init
)
{
DWORD
error
=
GetLastError
();
tsd_wrapper_t
*
wrapper
=
(
tsd_wrapper_t
*
)
TlsGetValue
(
tsd_tsd
);
SetLastError
(
error
);
if
(
init
&&
unlikely
(
wrapper
==
NULL
))
{
wrapper
=
(
tsd_wrapper_t
*
)
malloc_tsd_malloc
(
sizeof
(
tsd_wrapper_t
));
if
(
wrapper
==
NULL
)
{
malloc_write
(
"<jemalloc>: Error allocating TSD
\n
"
);
abort
();
}
else
{
wrapper
->
initialized
=
false
;
/* MSVC is finicky about aggregate initialization. */
tsd_t
tsd_initializer
=
TSD_INITIALIZER
;
wrapper
->
val
=
tsd_initializer
;
}
tsd_wrapper_set
(
wrapper
);
}
return
wrapper
;
}
JEMALLOC_ALWAYS_INLINE
bool
tsd_boot0
(
void
)
{
tsd_tsd
=
TlsAlloc
();
if
(
tsd_tsd
==
TLS_OUT_OF_INDEXES
)
{
return
true
;
}
malloc_tsd_cleanup_register
(
&
tsd_cleanup_wrapper
);
tsd_wrapper_set
(
&
tsd_boot_wrapper
);
tsd_booted
=
true
;
return
false
;
}
JEMALLOC_ALWAYS_INLINE
void
tsd_boot1
(
void
)
{
tsd_wrapper_t
*
wrapper
;
wrapper
=
(
tsd_wrapper_t
*
)
malloc_tsd_malloc
(
sizeof
(
tsd_wrapper_t
));
if
(
wrapper
==
NULL
)
{
malloc_write
(
"<jemalloc>: Error allocating TSD
\n
"
);
abort
();
}
tsd_boot_wrapper
.
initialized
=
false
;
tsd_cleanup
(
&
tsd_boot_wrapper
.
val
);
wrapper
->
initialized
=
false
;
tsd_t
initializer
=
TSD_INITIALIZER
;
wrapper
->
val
=
initializer
;
tsd_wrapper_set
(
wrapper
);
}
JEMALLOC_ALWAYS_INLINE
bool
tsd_boot
(
void
)
{
if
(
tsd_boot0
())
{
return
true
;
}
tsd_boot1
();
return
false
;
}
JEMALLOC_ALWAYS_INLINE
bool
tsd_booted_get
(
void
)
{
return
tsd_booted
;
}
JEMALLOC_ALWAYS_INLINE
bool
tsd_get_allocates
(
void
)
{
return
true
;
}
/* Get/set. */
JEMALLOC_ALWAYS_INLINE
tsd_t
*
tsd_get
(
bool
init
)
{
tsd_wrapper_t
*
wrapper
;
assert
(
tsd_booted
);
wrapper
=
tsd_wrapper_get
(
init
);
if
(
tsd_get_allocates
()
&&
!
init
&&
wrapper
==
NULL
)
{
return
NULL
;
}
return
&
wrapper
->
val
;
}
JEMALLOC_ALWAYS_INLINE
void
tsd_set
(
tsd_t
*
val
)
{
tsd_wrapper_t
*
wrapper
;
assert
(
tsd_booted
);
wrapper
=
tsd_wrapper_get
(
true
);
if
(
likely
(
&
wrapper
->
val
!=
val
))
{
wrapper
->
val
=
*
(
val
);
}
wrapper
->
initialized
=
true
;
}
deps/jemalloc/include/jemalloc/internal/util.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_UTIL_H
#define JEMALLOC_INTERNAL_UTIL_H
#define UTIL_INLINE static inline
/* Junk fill patterns. */
#ifndef JEMALLOC_ALLOC_JUNK
# define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5)
#endif
#ifndef JEMALLOC_FREE_JUNK
# define JEMALLOC_FREE_JUNK ((uint8_t)0x5a)
#endif
/*
* Wrap a cpp argument that contains commas such that it isn't broken up into
* multiple arguments.
*/
#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__
/* cpp macro definition stringification. */
#define STRINGIFY_HELPER(x) #x
#define STRINGIFY(x) STRINGIFY_HELPER(x)
/*
* Silence compiler warnings due to uninitialized values. This is used
* wherever the compiler fails to recognize that the variable is never used
* uninitialized.
*/
#define JEMALLOC_CC_SILENCE_INIT(v) = v
#ifdef __GNUC__
# define likely(x) __builtin_expect(!!(x), 1)
# define unlikely(x) __builtin_expect(!!(x), 0)
#else
# define likely(x) !!(x)
# define unlikely(x) !!(x)
#endif
#if !defined(JEMALLOC_INTERNAL_UNREACHABLE)
# error JEMALLOC_INTERNAL_UNREACHABLE should have been defined by configure
#endif
#define unreachable() JEMALLOC_INTERNAL_UNREACHABLE()
/* Set error code. */
UTIL_INLINE
void
set_errno
(
int
errnum
)
{
#ifdef _WIN32
SetLastError
(
errnum
);
#else
errno
=
errnum
;
#endif
}
/* Get last error code. */
UTIL_INLINE
int
get_errno
(
void
)
{
#ifdef _WIN32
return
GetLastError
();
#else
return
errno
;
#endif
}
#undef UTIL_INLINE
#endif
/* JEMALLOC_INTERNAL_UTIL_H */
deps/jemalloc/include/jemalloc/internal/witness.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_WITNESS_H
#define JEMALLOC_INTERNAL_WITNESS_H
#include "jemalloc/internal/ql.h"
/******************************************************************************/
/* LOCK RANKS */
/******************************************************************************/
/*
* Witnesses with rank WITNESS_RANK_OMIT are completely ignored by the witness
* machinery.
*/
#define WITNESS_RANK_OMIT 0U
#define WITNESS_RANK_MIN 1U
#define WITNESS_RANK_INIT 1U
#define WITNESS_RANK_CTL 1U
#define WITNESS_RANK_TCACHES 2U
#define WITNESS_RANK_ARENAS 3U
#define WITNESS_RANK_BACKGROUND_THREAD_GLOBAL 4U
#define WITNESS_RANK_PROF_DUMP 5U
#define WITNESS_RANK_PROF_BT2GCTX 6U
#define WITNESS_RANK_PROF_TDATAS 7U
#define WITNESS_RANK_PROF_TDATA 8U
#define WITNESS_RANK_PROF_GCTX 9U
#define WITNESS_RANK_BACKGROUND_THREAD 10U
/*
* Used as an argument to witness_assert_depth_to_rank() in order to validate
* depth excluding non-core locks with lower ranks. Since the rank argument to
* witness_assert_depth_to_rank() is inclusive rather than exclusive, this
* definition can have the same value as the minimally ranked core lock.
*/
#define WITNESS_RANK_CORE 11U
#define WITNESS_RANK_DECAY 11U
#define WITNESS_RANK_TCACHE_QL 12U
#define WITNESS_RANK_EXTENT_GROW 13U
#define WITNESS_RANK_EXTENTS 14U
#define WITNESS_RANK_EXTENT_AVAIL 15U
#define WITNESS_RANK_EXTENT_POOL 16U
#define WITNESS_RANK_RTREE 17U
#define WITNESS_RANK_BASE 18U
#define WITNESS_RANK_ARENA_LARGE 19U
#define WITNESS_RANK_LEAF 0xffffffffU
#define WITNESS_RANK_BIN WITNESS_RANK_LEAF
#define WITNESS_RANK_ARENA_STATS WITNESS_RANK_LEAF
#define WITNESS_RANK_DSS WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_ACCUM WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF
/******************************************************************************/
/* PER-WITNESS DATA */
/******************************************************************************/
#if defined(JEMALLOC_DEBUG)
# define WITNESS_INITIALIZER(name, rank) {name, rank, NULL, NULL, {NULL, NULL}}
#else
# define WITNESS_INITIALIZER(name, rank)
#endif
typedef
struct
witness_s
witness_t
;
typedef
unsigned
witness_rank_t
;
typedef
ql_head
(
witness_t
)
witness_list_t
;
typedef
int
witness_comp_t
(
const
witness_t
*
,
void
*
,
const
witness_t
*
,
void
*
);
struct
witness_s
{
/* Name, used for printing lock order reversal messages. */
const
char
*
name
;
/*
* Witness rank, where 0 is lowest and UINT_MAX is highest. Witnesses
* must be acquired in order of increasing rank.
*/
witness_rank_t
rank
;
/*
* If two witnesses are of equal rank and they have the samp comp
* function pointer, it is called as a last attempt to differentiate
* between witnesses of equal rank.
*/
witness_comp_t
*
comp
;
/* Opaque data, passed to comp(). */
void
*
opaque
;
/* Linkage for thread's currently owned locks. */
ql_elm
(
witness_t
)
link
;
};
/******************************************************************************/
/* PER-THREAD DATA */
/******************************************************************************/
typedef
struct
witness_tsd_s
witness_tsd_t
;
struct
witness_tsd_s
{
witness_list_t
witnesses
;
bool
forking
;
};
#define WITNESS_TSD_INITIALIZER { ql_head_initializer(witnesses), false }
#define WITNESS_TSDN_NULL ((witness_tsdn_t *)0)
/******************************************************************************/
/* (PER-THREAD) NULLABILITY HELPERS */
/******************************************************************************/
typedef
struct
witness_tsdn_s
witness_tsdn_t
;
struct
witness_tsdn_s
{
witness_tsd_t
witness_tsd
;
};
JEMALLOC_ALWAYS_INLINE
witness_tsdn_t
*
witness_tsd_tsdn
(
witness_tsd_t
*
witness_tsd
)
{
return
(
witness_tsdn_t
*
)
witness_tsd
;
}
JEMALLOC_ALWAYS_INLINE
bool
witness_tsdn_null
(
witness_tsdn_t
*
witness_tsdn
)
{
return
witness_tsdn
==
NULL
;
}
JEMALLOC_ALWAYS_INLINE
witness_tsd_t
*
witness_tsdn_tsd
(
witness_tsdn_t
*
witness_tsdn
)
{
assert
(
!
witness_tsdn_null
(
witness_tsdn
));
return
&
witness_tsdn
->
witness_tsd
;
}
/******************************************************************************/
/* API */
/******************************************************************************/
void
witness_init
(
witness_t
*
witness
,
const
char
*
name
,
witness_rank_t
rank
,
witness_comp_t
*
comp
,
void
*
opaque
);
typedef
void
(
witness_lock_error_t
)(
const
witness_list_t
*
,
const
witness_t
*
);
extern
witness_lock_error_t
*
JET_MUTABLE
witness_lock_error
;
typedef
void
(
witness_owner_error_t
)(
const
witness_t
*
);
extern
witness_owner_error_t
*
JET_MUTABLE
witness_owner_error
;
typedef
void
(
witness_not_owner_error_t
)(
const
witness_t
*
);
extern
witness_not_owner_error_t
*
JET_MUTABLE
witness_not_owner_error
;
typedef
void
(
witness_depth_error_t
)(
const
witness_list_t
*
,
witness_rank_t
rank_inclusive
,
unsigned
depth
);
extern
witness_depth_error_t
*
JET_MUTABLE
witness_depth_error
;
void
witnesses_cleanup
(
witness_tsd_t
*
witness_tsd
);
void
witness_prefork
(
witness_tsd_t
*
witness_tsd
);
void
witness_postfork_parent
(
witness_tsd_t
*
witness_tsd
);
void
witness_postfork_child
(
witness_tsd_t
*
witness_tsd
);
/* Helper, not intended for direct use. */
static
inline
bool
witness_owner
(
witness_tsd_t
*
witness_tsd
,
const
witness_t
*
witness
)
{
witness_list_t
*
witnesses
;
witness_t
*
w
;
cassert
(
config_debug
);
witnesses
=
&
witness_tsd
->
witnesses
;
ql_foreach
(
w
,
witnesses
,
link
)
{
if
(
w
==
witness
)
{
return
true
;
}
}
return
false
;
}
static
inline
void
witness_assert_owner
(
witness_tsdn_t
*
witness_tsdn
,
const
witness_t
*
witness
)
{
witness_tsd_t
*
witness_tsd
;
if
(
!
config_debug
)
{
return
;
}
if
(
witness_tsdn_null
(
witness_tsdn
))
{
return
;
}
witness_tsd
=
witness_tsdn_tsd
(
witness_tsdn
);
if
(
witness
->
rank
==
WITNESS_RANK_OMIT
)
{
return
;
}
if
(
witness_owner
(
witness_tsd
,
witness
))
{
return
;
}
witness_owner_error
(
witness
);
}
static
inline
void
witness_assert_not_owner
(
witness_tsdn_t
*
witness_tsdn
,
const
witness_t
*
witness
)
{
witness_tsd_t
*
witness_tsd
;
witness_list_t
*
witnesses
;
witness_t
*
w
;
if
(
!
config_debug
)
{
return
;
}
if
(
witness_tsdn_null
(
witness_tsdn
))
{
return
;
}
witness_tsd
=
witness_tsdn_tsd
(
witness_tsdn
);
if
(
witness
->
rank
==
WITNESS_RANK_OMIT
)
{
return
;
}
witnesses
=
&
witness_tsd
->
witnesses
;
ql_foreach
(
w
,
witnesses
,
link
)
{
if
(
w
==
witness
)
{
witness_not_owner_error
(
witness
);
}
}
}
static
inline
void
witness_assert_depth_to_rank
(
witness_tsdn_t
*
witness_tsdn
,
witness_rank_t
rank_inclusive
,
unsigned
depth
)
{
witness_tsd_t
*
witness_tsd
;
unsigned
d
;
witness_list_t
*
witnesses
;
witness_t
*
w
;
if
(
!
config_debug
)
{
return
;
}
if
(
witness_tsdn_null
(
witness_tsdn
))
{
return
;
}
witness_tsd
=
witness_tsdn_tsd
(
witness_tsdn
);
d
=
0
;
witnesses
=
&
witness_tsd
->
witnesses
;
w
=
ql_last
(
witnesses
,
link
);
if
(
w
!=
NULL
)
{
ql_reverse_foreach
(
w
,
witnesses
,
link
)
{
if
(
w
->
rank
<
rank_inclusive
)
{
break
;
}
d
++
;
}
}
if
(
d
!=
depth
)
{
witness_depth_error
(
witnesses
,
rank_inclusive
,
depth
);
}
}
static
inline
void
witness_assert_depth
(
witness_tsdn_t
*
witness_tsdn
,
unsigned
depth
)
{
witness_assert_depth_to_rank
(
witness_tsdn
,
WITNESS_RANK_MIN
,
depth
);
}
static
inline
void
witness_assert_lockless
(
witness_tsdn_t
*
witness_tsdn
)
{
witness_assert_depth
(
witness_tsdn
,
0
);
}
static
inline
void
witness_lock
(
witness_tsdn_t
*
witness_tsdn
,
witness_t
*
witness
)
{
witness_tsd_t
*
witness_tsd
;
witness_list_t
*
witnesses
;
witness_t
*
w
;
if
(
!
config_debug
)
{
return
;
}
if
(
witness_tsdn_null
(
witness_tsdn
))
{
return
;
}
witness_tsd
=
witness_tsdn_tsd
(
witness_tsdn
);
if
(
witness
->
rank
==
WITNESS_RANK_OMIT
)
{
return
;
}
witness_assert_not_owner
(
witness_tsdn
,
witness
);
witnesses
=
&
witness_tsd
->
witnesses
;
w
=
ql_last
(
witnesses
,
link
);
if
(
w
==
NULL
)
{
/* No other locks; do nothing. */
}
else
if
(
witness_tsd
->
forking
&&
w
->
rank
<=
witness
->
rank
)
{
/* Forking, and relaxed ranking satisfied. */
}
else
if
(
w
->
rank
>
witness
->
rank
)
{
/* Not forking, rank order reversal. */
witness_lock_error
(
witnesses
,
witness
);
}
else
if
(
w
->
rank
==
witness
->
rank
&&
(
w
->
comp
==
NULL
||
w
->
comp
!=
witness
->
comp
||
w
->
comp
(
w
,
w
->
opaque
,
witness
,
witness
->
opaque
)
>
0
))
{
/*
* Missing/incompatible comparison function, or comparison
* function indicates rank order reversal.
*/
witness_lock_error
(
witnesses
,
witness
);
}
ql_elm_new
(
witness
,
link
);
ql_tail_insert
(
witnesses
,
witness
,
link
);
}
static
inline
void
witness_unlock
(
witness_tsdn_t
*
witness_tsdn
,
witness_t
*
witness
)
{
witness_tsd_t
*
witness_tsd
;
witness_list_t
*
witnesses
;
if
(
!
config_debug
)
{
return
;
}
if
(
witness_tsdn_null
(
witness_tsdn
))
{
return
;
}
witness_tsd
=
witness_tsdn_tsd
(
witness_tsdn
);
if
(
witness
->
rank
==
WITNESS_RANK_OMIT
)
{
return
;
}
/*
* Check whether owner before removal, rather than relying on
* witness_assert_owner() to abort, so that unit tests can test this
* function's failure mode without causing undefined behavior.
*/
if
(
witness_owner
(
witness_tsd
,
witness
))
{
witnesses
=
&
witness_tsd
->
witnesses
;
ql_remove
(
witnesses
,
witness
,
link
);
}
else
{
witness_assert_owner
(
witness_tsdn
,
witness
);
}
}
#endif
/* JEMALLOC_INTERNAL_WITNESS_H */
deps/jemalloc/include/jemalloc/jemalloc.sh
deleted
100755 → 0
View file @
7ff7536e
#!/bin/sh
objroot
=
$1
cat
<<
EOF
#ifndef JEMALLOC_H_
#define JEMALLOC_H_
#ifdef __cplusplus
extern "C" {
#endif
EOF
for
hdr
in
jemalloc_defs.h jemalloc_rename.h jemalloc_macros.h
\
jemalloc_protos.h jemalloc_typedefs.h jemalloc_mangle.h
;
do
cat
"
${
objroot
}
include/jemalloc/
${
hdr
}
"
\
|
grep
-v
'Generated from .* by configure\.'
\
|
sed
-e
's/ $//g'
echo
done
cat
<<
EOF
#ifdef __cplusplus
}
#endif
#endif /* JEMALLOC_H_ */
EOF
deps/jemalloc/include/jemalloc/jemalloc_defs.h.in
deleted
100644 → 0
View file @
7ff7536e
/* Defined if __attribute__((...)) syntax is supported. */
#undef JEMALLOC_HAVE_ATTR
/* Defined if alloc_size attribute is supported. */
#undef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
/* Defined if format(gnu_printf, ...) attribute is supported. */
#undef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
/* Defined if format(printf, ...) attribute is supported. */
#undef JEMALLOC_HAVE_ATTR_FORMAT_PRINTF
/*
* Define overrides for non-standard allocator-related functions if they are
* present on the system.
*/
#undef JEMALLOC_OVERRIDE_MEMALIGN
#undef JEMALLOC_OVERRIDE_VALLOC
/*
* At least Linux omits the "const" in:
*
* size_t malloc_usable_size(const void *ptr);
*
* Match the operating system's prototype.
*/
#undef JEMALLOC_USABLE_SIZE_CONST
/*
* If defined, specify throw() for the public function prototypes when compiling
* with C++. The only justification for this is to match the prototypes that
* glibc defines.
*/
#undef JEMALLOC_USE_CXX_THROW
#ifdef _MSC_VER
# ifdef _WIN64
# define LG_SIZEOF_PTR_WIN 3
# else
# define LG_SIZEOF_PTR_WIN 2
# endif
#endif
/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
#undef LG_SIZEOF_PTR
deps/jemalloc/include/jemalloc/jemalloc_macros.h.in
deleted
100644 → 0
View file @
7ff7536e
#include <stdlib.h>
#include <stdbool.h>
#include <stdint.h>
#include <limits.h>
#include <strings.h>
#define JEMALLOC_VERSION "@jemalloc_version@"
#define JEMALLOC_VERSION_MAJOR @jemalloc_version_major@
#define JEMALLOC_VERSION_MINOR @jemalloc_version_minor@
#define JEMALLOC_VERSION_BUGFIX @jemalloc_version_bugfix@
#define JEMALLOC_VERSION_NREV @jemalloc_version_nrev@
#define JEMALLOC_VERSION_GID "@jemalloc_version_gid@"
#define MALLOCX_LG_ALIGN(la) ((int)(la))
#if LG_SIZEOF_PTR == 2
# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1))
#else
# define MALLOCX_ALIGN(a) \
((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \
ffs((int)(((size_t)(a))>>32))+31))
#endif
#define MALLOCX_ZERO ((int)0x40)
/*
* Bias tcache index bits so that 0 encodes "automatic tcache management", and 1
* encodes MALLOCX_TCACHE_NONE.
*/
#define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8))
#define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1)
/*
* Bias arena index bits so that 0 encodes "use an automatically chosen arena".
*/
#define MALLOCX_ARENA(a) ((((int)(a))+1) << 20)
/*
* Use as arena index in "arena.<i>.{purge,decay,dss}" and
* "stats.arenas.<i>.*" mallctl interfaces to select all arenas. This
* definition is intentionally specified in raw decimal format to support
* cpp-based string concatenation, e.g.
*
* #define STRINGIFY_HELPER(x) #x
* #define STRINGIFY(x) STRINGIFY_HELPER(x)
*
* mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", NULL, NULL, NULL,
* 0);
*/
#define MALLCTL_ARENAS_ALL 4096
/*
* Use as arena index in "stats.arenas.<i>.*" mallctl interfaces to select
* destroyed arenas.
*/
#define MALLCTL_ARENAS_DESTROYED 4097
#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW)
# define JEMALLOC_CXX_THROW throw()
#else
# define JEMALLOC_CXX_THROW
#endif
#if defined(_MSC_VER)
# define JEMALLOC_ATTR(s)
# define JEMALLOC_ALIGNED(s) __declspec(align(s))
# define JEMALLOC_ALLOC_SIZE(s)
# define JEMALLOC_ALLOC_SIZE2(s1, s2)
# ifndef JEMALLOC_EXPORT
# ifdef DLLEXPORT
# define JEMALLOC_EXPORT __declspec(dllexport)
# else
# define JEMALLOC_EXPORT __declspec(dllimport)
# endif
# endif
# define JEMALLOC_FORMAT_PRINTF(s, i)
# define JEMALLOC_NOINLINE __declspec(noinline)
# ifdef __cplusplus
# define JEMALLOC_NOTHROW __declspec(nothrow)
# else
# define JEMALLOC_NOTHROW
# endif
# define JEMALLOC_SECTION(s) __declspec(allocate(s))
# define JEMALLOC_RESTRICT_RETURN __declspec(restrict)
# if _MSC_VER >= 1900 && !defined(__EDG__)
# define JEMALLOC_ALLOCATOR __declspec(allocator)
# else
# define JEMALLOC_ALLOCATOR
# endif
#elif defined(JEMALLOC_HAVE_ATTR)
# define JEMALLOC_ATTR(s) __attribute__((s))
# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s))
# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2))
# else
# define JEMALLOC_ALLOC_SIZE(s)
# define JEMALLOC_ALLOC_SIZE2(s1, s2)
# endif
# ifndef JEMALLOC_EXPORT
# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
# endif
# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i))
# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF)
# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i))
# else
# define JEMALLOC_FORMAT_PRINTF(s, i)
# endif
# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow)
# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
# define JEMALLOC_RESTRICT_RETURN
# define JEMALLOC_ALLOCATOR
#else
# define JEMALLOC_ATTR(s)
# define JEMALLOC_ALIGNED(s)
# define JEMALLOC_ALLOC_SIZE(s)
# define JEMALLOC_ALLOC_SIZE2(s1, s2)
# define JEMALLOC_EXPORT
# define JEMALLOC_FORMAT_PRINTF(s, i)
# define JEMALLOC_NOINLINE
# define JEMALLOC_NOTHROW
# define JEMALLOC_SECTION(s)
# define JEMALLOC_RESTRICT_RETURN
# define JEMALLOC_ALLOCATOR
#endif
/* This version of Jemalloc, modified for Redis, has the je_get_defrag_hint()
* function. */
#define JEMALLOC_FRAG_HINT
deps/jemalloc/include/jemalloc/jemalloc_mangle.sh
deleted
100755 → 0
View file @
7ff7536e
#!/bin/sh -eu
public_symbols_txt
=
$1
symbol_prefix
=
$2
cat
<<
EOF
/*
* By default application code must explicitly refer to mangled symbol names,
* so that it is possible to use jemalloc in conjunction with another allocator
* in the same application. Define JEMALLOC_MANGLE in order to cause automatic
* name mangling that matches the API prefixing that happened as a result of
* --with-mangling and/or --with-jemalloc-prefix configuration settings.
*/
#ifdef JEMALLOC_MANGLE
# ifndef JEMALLOC_NO_DEMANGLE
# define JEMALLOC_NO_DEMANGLE
# endif
EOF
for
nm
in
`
cat
${
public_symbols_txt
}
`
;
do
n
=
`
echo
${
nm
}
|tr
':'
' '
|awk
'{print $1}'
`
echo
"# define
${
n
}
${
symbol_prefix
}${
n
}
"
done
cat
<<
EOF
#endif
/*
* The
${
symbol_prefix
}
* macros can be used as stable alternative names for the
* public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily
* meant for use in jemalloc itself, but it can be used by application code to
* provide isolation from the name mangling specified via --with-mangling
* and/or --with-jemalloc-prefix.
*/
#ifndef JEMALLOC_NO_DEMANGLE
EOF
for
nm
in
`
cat
${
public_symbols_txt
}
`
;
do
n
=
`
echo
${
nm
}
|tr
':'
' '
|awk
'{print $1}'
`
echo
"# undef
${
symbol_prefix
}${
n
}
"
done
cat
<<
EOF
#endif
EOF
Prev
1
2
3
4
5
6
7
8
9
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment