Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
fb1f4f4e
Unverified
Commit
fb1f4f4e
authored
Oct 25, 2019
by
Wander Hillen
Committed by
GitHub
Oct 25, 2019
Browse files
Merge branch 'unstable' into minor-typos
parents
dda8cc18
6e98214f
Changes
203
Hide whitespace changes
Inline
Side-by-side
Too many changes to show.
To preserve performance only
203 of 203+
files are displayed.
Plain diff
Email patch
deps/jemalloc/include/jemalloc/internal/extent_externs.h
0 → 100644
View file @
fb1f4f4e
#ifndef JEMALLOC_INTERNAL_EXTENT_EXTERNS_H
#define JEMALLOC_INTERNAL_EXTENT_EXTERNS_H
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex_pool.h"
#include "jemalloc/internal/ph.h"
#include "jemalloc/internal/rtree.h"
extern
size_t
opt_lg_extent_max_active_fit
;
extern
rtree_t
extents_rtree
;
extern
const
extent_hooks_t
extent_hooks_default
;
extern
mutex_pool_t
extent_mutex_pool
;
extent_t
*
extent_alloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
);
void
extent_dalloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_t
*
extent
);
extent_hooks_t
*
extent_hooks_get
(
arena_t
*
arena
);
extent_hooks_t
*
extent_hooks_set
(
tsd_t
*
tsd
,
arena_t
*
arena
,
extent_hooks_t
*
extent_hooks
);
#ifdef JEMALLOC_JET
size_t
extent_size_quantize_floor
(
size_t
size
);
size_t
extent_size_quantize_ceil
(
size_t
size
);
#endif
rb_proto
(,
extent_avail_
,
extent_tree_t
,
extent_t
)
ph_proto
(,
extent_heap_
,
extent_heap_t
,
extent_t
)
bool
extents_init
(
tsdn_t
*
tsdn
,
extents_t
*
extents
,
extent_state_t
state
,
bool
delay_coalesce
);
extent_state_t
extents_state_get
(
const
extents_t
*
extents
);
size_t
extents_npages_get
(
extents_t
*
extents
);
extent_t
*
extents_alloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extents_t
*
extents
,
void
*
new_addr
,
size_t
size
,
size_t
pad
,
size_t
alignment
,
bool
slab
,
szind_t
szind
,
bool
*
zero
,
bool
*
commit
);
void
extents_dalloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extents_t
*
extents
,
extent_t
*
extent
);
extent_t
*
extents_evict
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extents_t
*
extents
,
size_t
npages_min
);
void
extents_prefork
(
tsdn_t
*
tsdn
,
extents_t
*
extents
);
void
extents_postfork_parent
(
tsdn_t
*
tsdn
,
extents_t
*
extents
);
void
extents_postfork_child
(
tsdn_t
*
tsdn
,
extents_t
*
extents
);
extent_t
*
extent_alloc_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
void
*
new_addr
,
size_t
size
,
size_t
pad
,
size_t
alignment
,
bool
slab
,
szind_t
szind
,
bool
*
zero
,
bool
*
commit
);
void
extent_dalloc_gap
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_t
*
extent
);
void
extent_dalloc_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
);
void
extent_destroy_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
);
bool
extent_commit_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
,
size_t
offset
,
size_t
length
);
bool
extent_decommit_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
,
size_t
offset
,
size_t
length
);
bool
extent_purge_lazy_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
,
size_t
offset
,
size_t
length
);
bool
extent_purge_forced_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
,
size_t
offset
,
size_t
length
);
extent_t
*
extent_split_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
,
size_t
size_a
,
szind_t
szind_a
,
bool
slab_a
,
size_t
size_b
,
szind_t
szind_b
,
bool
slab_b
);
bool
extent_merge_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
a
,
extent_t
*
b
);
bool
extent_boot
(
void
);
#endif
/* JEMALLOC_INTERNAL_EXTENT_EXTERNS_H */
deps/jemalloc/include/jemalloc/internal/extent_inlines.h
0 → 100644
View file @
fb1f4f4e
#ifndef JEMALLOC_INTERNAL_EXTENT_INLINES_H
#define JEMALLOC_INTERNAL_EXTENT_INLINES_H
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex_pool.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/sz.h"
static
inline
void
extent_lock
(
tsdn_t
*
tsdn
,
extent_t
*
extent
)
{
assert
(
extent
!=
NULL
);
mutex_pool_lock
(
tsdn
,
&
extent_mutex_pool
,
(
uintptr_t
)
extent
);
}
static
inline
void
extent_unlock
(
tsdn_t
*
tsdn
,
extent_t
*
extent
)
{
assert
(
extent
!=
NULL
);
mutex_pool_unlock
(
tsdn
,
&
extent_mutex_pool
,
(
uintptr_t
)
extent
);
}
static
inline
void
extent_lock2
(
tsdn_t
*
tsdn
,
extent_t
*
extent1
,
extent_t
*
extent2
)
{
assert
(
extent1
!=
NULL
&&
extent2
!=
NULL
);
mutex_pool_lock2
(
tsdn
,
&
extent_mutex_pool
,
(
uintptr_t
)
extent1
,
(
uintptr_t
)
extent2
);
}
static
inline
void
extent_unlock2
(
tsdn_t
*
tsdn
,
extent_t
*
extent1
,
extent_t
*
extent2
)
{
assert
(
extent1
!=
NULL
&&
extent2
!=
NULL
);
mutex_pool_unlock2
(
tsdn
,
&
extent_mutex_pool
,
(
uintptr_t
)
extent1
,
(
uintptr_t
)
extent2
);
}
static
inline
arena_t
*
extent_arena_get
(
const
extent_t
*
extent
)
{
unsigned
arena_ind
=
(
unsigned
)((
extent
->
e_bits
&
EXTENT_BITS_ARENA_MASK
)
>>
EXTENT_BITS_ARENA_SHIFT
);
/*
* The following check is omitted because we should never actually read
* a NULL arena pointer.
*/
if
(
false
&&
arena_ind
>=
MALLOCX_ARENA_LIMIT
)
{
return
NULL
;
}
assert
(
arena_ind
<
MALLOCX_ARENA_LIMIT
);
return
(
arena_t
*
)
atomic_load_p
(
&
arenas
[
arena_ind
],
ATOMIC_ACQUIRE
);
}
static
inline
szind_t
extent_szind_get_maybe_invalid
(
const
extent_t
*
extent
)
{
szind_t
szind
=
(
szind_t
)((
extent
->
e_bits
&
EXTENT_BITS_SZIND_MASK
)
>>
EXTENT_BITS_SZIND_SHIFT
);
assert
(
szind
<=
NSIZES
);
return
szind
;
}
static
inline
szind_t
extent_szind_get
(
const
extent_t
*
extent
)
{
szind_t
szind
=
extent_szind_get_maybe_invalid
(
extent
);
assert
(
szind
<
NSIZES
);
/* Never call when "invalid". */
return
szind
;
}
static
inline
size_t
extent_usize_get
(
const
extent_t
*
extent
)
{
return
sz_index2size
(
extent_szind_get
(
extent
));
}
static
inline
size_t
extent_sn_get
(
const
extent_t
*
extent
)
{
return
(
size_t
)((
extent
->
e_bits
&
EXTENT_BITS_SN_MASK
)
>>
EXTENT_BITS_SN_SHIFT
);
}
static
inline
extent_state_t
extent_state_get
(
const
extent_t
*
extent
)
{
return
(
extent_state_t
)((
extent
->
e_bits
&
EXTENT_BITS_STATE_MASK
)
>>
EXTENT_BITS_STATE_SHIFT
);
}
static
inline
bool
extent_zeroed_get
(
const
extent_t
*
extent
)
{
return
(
bool
)((
extent
->
e_bits
&
EXTENT_BITS_ZEROED_MASK
)
>>
EXTENT_BITS_ZEROED_SHIFT
);
}
static
inline
bool
extent_committed_get
(
const
extent_t
*
extent
)
{
return
(
bool
)((
extent
->
e_bits
&
EXTENT_BITS_COMMITTED_MASK
)
>>
EXTENT_BITS_COMMITTED_SHIFT
);
}
static
inline
bool
extent_dumpable_get
(
const
extent_t
*
extent
)
{
return
(
bool
)((
extent
->
e_bits
&
EXTENT_BITS_DUMPABLE_MASK
)
>>
EXTENT_BITS_DUMPABLE_SHIFT
);
}
static
inline
bool
extent_slab_get
(
const
extent_t
*
extent
)
{
return
(
bool
)((
extent
->
e_bits
&
EXTENT_BITS_SLAB_MASK
)
>>
EXTENT_BITS_SLAB_SHIFT
);
}
static
inline
unsigned
extent_nfree_get
(
const
extent_t
*
extent
)
{
assert
(
extent_slab_get
(
extent
));
return
(
unsigned
)((
extent
->
e_bits
&
EXTENT_BITS_NFREE_MASK
)
>>
EXTENT_BITS_NFREE_SHIFT
);
}
static
inline
void
*
extent_base_get
(
const
extent_t
*
extent
)
{
assert
(
extent
->
e_addr
==
PAGE_ADDR2BASE
(
extent
->
e_addr
)
||
!
extent_slab_get
(
extent
));
return
PAGE_ADDR2BASE
(
extent
->
e_addr
);
}
static
inline
void
*
extent_addr_get
(
const
extent_t
*
extent
)
{
assert
(
extent
->
e_addr
==
PAGE_ADDR2BASE
(
extent
->
e_addr
)
||
!
extent_slab_get
(
extent
));
return
extent
->
e_addr
;
}
static
inline
size_t
extent_size_get
(
const
extent_t
*
extent
)
{
return
(
extent
->
e_size_esn
&
EXTENT_SIZE_MASK
);
}
static
inline
size_t
extent_esn_get
(
const
extent_t
*
extent
)
{
return
(
extent
->
e_size_esn
&
EXTENT_ESN_MASK
);
}
static
inline
size_t
extent_bsize_get
(
const
extent_t
*
extent
)
{
return
extent
->
e_bsize
;
}
static
inline
void
*
extent_before_get
(
const
extent_t
*
extent
)
{
return
(
void
*
)((
uintptr_t
)
extent_base_get
(
extent
)
-
PAGE
);
}
static
inline
void
*
extent_last_get
(
const
extent_t
*
extent
)
{
return
(
void
*
)((
uintptr_t
)
extent_base_get
(
extent
)
+
extent_size_get
(
extent
)
-
PAGE
);
}
static
inline
void
*
extent_past_get
(
const
extent_t
*
extent
)
{
return
(
void
*
)((
uintptr_t
)
extent_base_get
(
extent
)
+
extent_size_get
(
extent
));
}
static
inline
arena_slab_data_t
*
extent_slab_data_get
(
extent_t
*
extent
)
{
assert
(
extent_slab_get
(
extent
));
return
&
extent
->
e_slab_data
;
}
static
inline
const
arena_slab_data_t
*
extent_slab_data_get_const
(
const
extent_t
*
extent
)
{
assert
(
extent_slab_get
(
extent
));
return
&
extent
->
e_slab_data
;
}
static
inline
prof_tctx_t
*
extent_prof_tctx_get
(
const
extent_t
*
extent
)
{
return
(
prof_tctx_t
*
)
atomic_load_p
(
&
extent
->
e_prof_tctx
,
ATOMIC_ACQUIRE
);
}
static
inline
void
extent_arena_set
(
extent_t
*
extent
,
arena_t
*
arena
)
{
unsigned
arena_ind
=
(
arena
!=
NULL
)
?
arena_ind_get
(
arena
)
:
((
1U
<<
MALLOCX_ARENA_BITS
)
-
1
);
extent
->
e_bits
=
(
extent
->
e_bits
&
~
EXTENT_BITS_ARENA_MASK
)
|
((
uint64_t
)
arena_ind
<<
EXTENT_BITS_ARENA_SHIFT
);
}
static
inline
void
extent_addr_set
(
extent_t
*
extent
,
void
*
addr
)
{
extent
->
e_addr
=
addr
;
}
static
inline
void
extent_addr_randomize
(
UNUSED
tsdn_t
*
tsdn
,
extent_t
*
extent
,
size_t
alignment
)
{
assert
(
extent_base_get
(
extent
)
==
extent_addr_get
(
extent
));
if
(
alignment
<
PAGE
)
{
unsigned
lg_range
=
LG_PAGE
-
lg_floor
(
CACHELINE_CEILING
(
alignment
));
size_t
r
;
if
(
!
tsdn_null
(
tsdn
))
{
tsd_t
*
tsd
=
tsdn_tsd
(
tsdn
);
r
=
(
size_t
)
prng_lg_range_u64
(
tsd_offset_statep_get
(
tsd
),
lg_range
);
}
else
{
r
=
prng_lg_range_zu
(
&
extent_arena_get
(
extent
)
->
offset_state
,
lg_range
,
true
);
}
uintptr_t
random_offset
=
((
uintptr_t
)
r
)
<<
(
LG_PAGE
-
lg_range
);
extent
->
e_addr
=
(
void
*
)((
uintptr_t
)
extent
->
e_addr
+
random_offset
);
assert
(
ALIGNMENT_ADDR2BASE
(
extent
->
e_addr
,
alignment
)
==
extent
->
e_addr
);
}
}
static
inline
void
extent_size_set
(
extent_t
*
extent
,
size_t
size
)
{
assert
((
size
&
~
EXTENT_SIZE_MASK
)
==
0
);
extent
->
e_size_esn
=
size
|
(
extent
->
e_size_esn
&
~
EXTENT_SIZE_MASK
);
}
static
inline
void
extent_esn_set
(
extent_t
*
extent
,
size_t
esn
)
{
extent
->
e_size_esn
=
(
extent
->
e_size_esn
&
~
EXTENT_ESN_MASK
)
|
(
esn
&
EXTENT_ESN_MASK
);
}
static
inline
void
extent_bsize_set
(
extent_t
*
extent
,
size_t
bsize
)
{
extent
->
e_bsize
=
bsize
;
}
static
inline
void
extent_szind_set
(
extent_t
*
extent
,
szind_t
szind
)
{
assert
(
szind
<=
NSIZES
);
/* NSIZES means "invalid". */
extent
->
e_bits
=
(
extent
->
e_bits
&
~
EXTENT_BITS_SZIND_MASK
)
|
((
uint64_t
)
szind
<<
EXTENT_BITS_SZIND_SHIFT
);
}
static
inline
void
extent_nfree_set
(
extent_t
*
extent
,
unsigned
nfree
)
{
assert
(
extent_slab_get
(
extent
));
extent
->
e_bits
=
(
extent
->
e_bits
&
~
EXTENT_BITS_NFREE_MASK
)
|
((
uint64_t
)
nfree
<<
EXTENT_BITS_NFREE_SHIFT
);
}
static
inline
void
extent_nfree_inc
(
extent_t
*
extent
)
{
assert
(
extent_slab_get
(
extent
));
extent
->
e_bits
+=
((
uint64_t
)
1U
<<
EXTENT_BITS_NFREE_SHIFT
);
}
static
inline
void
extent_nfree_dec
(
extent_t
*
extent
)
{
assert
(
extent_slab_get
(
extent
));
extent
->
e_bits
-=
((
uint64_t
)
1U
<<
EXTENT_BITS_NFREE_SHIFT
);
}
static
inline
void
extent_sn_set
(
extent_t
*
extent
,
size_t
sn
)
{
extent
->
e_bits
=
(
extent
->
e_bits
&
~
EXTENT_BITS_SN_MASK
)
|
((
uint64_t
)
sn
<<
EXTENT_BITS_SN_SHIFT
);
}
static
inline
void
extent_state_set
(
extent_t
*
extent
,
extent_state_t
state
)
{
extent
->
e_bits
=
(
extent
->
e_bits
&
~
EXTENT_BITS_STATE_MASK
)
|
((
uint64_t
)
state
<<
EXTENT_BITS_STATE_SHIFT
);
}
static
inline
void
extent_zeroed_set
(
extent_t
*
extent
,
bool
zeroed
)
{
extent
->
e_bits
=
(
extent
->
e_bits
&
~
EXTENT_BITS_ZEROED_MASK
)
|
((
uint64_t
)
zeroed
<<
EXTENT_BITS_ZEROED_SHIFT
);
}
static
inline
void
extent_committed_set
(
extent_t
*
extent
,
bool
committed
)
{
extent
->
e_bits
=
(
extent
->
e_bits
&
~
EXTENT_BITS_COMMITTED_MASK
)
|
((
uint64_t
)
committed
<<
EXTENT_BITS_COMMITTED_SHIFT
);
}
static
inline
void
extent_dumpable_set
(
extent_t
*
extent
,
bool
dumpable
)
{
extent
->
e_bits
=
(
extent
->
e_bits
&
~
EXTENT_BITS_DUMPABLE_MASK
)
|
((
uint64_t
)
dumpable
<<
EXTENT_BITS_DUMPABLE_SHIFT
);
}
static
inline
void
extent_slab_set
(
extent_t
*
extent
,
bool
slab
)
{
extent
->
e_bits
=
(
extent
->
e_bits
&
~
EXTENT_BITS_SLAB_MASK
)
|
((
uint64_t
)
slab
<<
EXTENT_BITS_SLAB_SHIFT
);
}
static
inline
void
extent_prof_tctx_set
(
extent_t
*
extent
,
prof_tctx_t
*
tctx
)
{
atomic_store_p
(
&
extent
->
e_prof_tctx
,
tctx
,
ATOMIC_RELEASE
);
}
static
inline
void
extent_init
(
extent_t
*
extent
,
arena_t
*
arena
,
void
*
addr
,
size_t
size
,
bool
slab
,
szind_t
szind
,
size_t
sn
,
extent_state_t
state
,
bool
zeroed
,
bool
committed
,
bool
dumpable
)
{
assert
(
addr
==
PAGE_ADDR2BASE
(
addr
)
||
!
slab
);
extent_arena_set
(
extent
,
arena
);
extent_addr_set
(
extent
,
addr
);
extent_size_set
(
extent
,
size
);
extent_slab_set
(
extent
,
slab
);
extent_szind_set
(
extent
,
szind
);
extent_sn_set
(
extent
,
sn
);
extent_state_set
(
extent
,
state
);
extent_zeroed_set
(
extent
,
zeroed
);
extent_committed_set
(
extent
,
committed
);
extent_dumpable_set
(
extent
,
dumpable
);
ql_elm_new
(
extent
,
ql_link
);
if
(
config_prof
)
{
extent_prof_tctx_set
(
extent
,
NULL
);
}
}
static
inline
void
extent_binit
(
extent_t
*
extent
,
void
*
addr
,
size_t
bsize
,
size_t
sn
)
{
extent_arena_set
(
extent
,
NULL
);
extent_addr_set
(
extent
,
addr
);
extent_bsize_set
(
extent
,
bsize
);
extent_slab_set
(
extent
,
false
);
extent_szind_set
(
extent
,
NSIZES
);
extent_sn_set
(
extent
,
sn
);
extent_state_set
(
extent
,
extent_state_active
);
extent_zeroed_set
(
extent
,
true
);
extent_committed_set
(
extent
,
true
);
extent_dumpable_set
(
extent
,
true
);
}
static
inline
void
extent_list_init
(
extent_list_t
*
list
)
{
ql_new
(
list
);
}
static
inline
extent_t
*
extent_list_first
(
const
extent_list_t
*
list
)
{
return
ql_first
(
list
);
}
static
inline
extent_t
*
extent_list_last
(
const
extent_list_t
*
list
)
{
return
ql_last
(
list
,
ql_link
);
}
static
inline
void
extent_list_append
(
extent_list_t
*
list
,
extent_t
*
extent
)
{
ql_tail_insert
(
list
,
extent
,
ql_link
);
}
static
inline
void
extent_list_prepend
(
extent_list_t
*
list
,
extent_t
*
extent
)
{
ql_head_insert
(
list
,
extent
,
ql_link
);
}
static
inline
void
extent_list_replace
(
extent_list_t
*
list
,
extent_t
*
to_remove
,
extent_t
*
to_insert
)
{
ql_after_insert
(
to_remove
,
to_insert
,
ql_link
);
ql_remove
(
list
,
to_remove
,
ql_link
);
}
static
inline
void
extent_list_remove
(
extent_list_t
*
list
,
extent_t
*
extent
)
{
ql_remove
(
list
,
extent
,
ql_link
);
}
static
inline
int
extent_sn_comp
(
const
extent_t
*
a
,
const
extent_t
*
b
)
{
size_t
a_sn
=
extent_sn_get
(
a
);
size_t
b_sn
=
extent_sn_get
(
b
);
return
(
a_sn
>
b_sn
)
-
(
a_sn
<
b_sn
);
}
static
inline
int
extent_esn_comp
(
const
extent_t
*
a
,
const
extent_t
*
b
)
{
size_t
a_esn
=
extent_esn_get
(
a
);
size_t
b_esn
=
extent_esn_get
(
b
);
return
(
a_esn
>
b_esn
)
-
(
a_esn
<
b_esn
);
}
static
inline
int
extent_ad_comp
(
const
extent_t
*
a
,
const
extent_t
*
b
)
{
uintptr_t
a_addr
=
(
uintptr_t
)
extent_addr_get
(
a
);
uintptr_t
b_addr
=
(
uintptr_t
)
extent_addr_get
(
b
);
return
(
a_addr
>
b_addr
)
-
(
a_addr
<
b_addr
);
}
static
inline
int
extent_ead_comp
(
const
extent_t
*
a
,
const
extent_t
*
b
)
{
uintptr_t
a_eaddr
=
(
uintptr_t
)
a
;
uintptr_t
b_eaddr
=
(
uintptr_t
)
b
;
return
(
a_eaddr
>
b_eaddr
)
-
(
a_eaddr
<
b_eaddr
);
}
static
inline
int
extent_snad_comp
(
const
extent_t
*
a
,
const
extent_t
*
b
)
{
int
ret
;
ret
=
extent_sn_comp
(
a
,
b
);
if
(
ret
!=
0
)
{
return
ret
;
}
ret
=
extent_ad_comp
(
a
,
b
);
return
ret
;
}
static
inline
int
extent_esnead_comp
(
const
extent_t
*
a
,
const
extent_t
*
b
)
{
int
ret
;
ret
=
extent_esn_comp
(
a
,
b
);
if
(
ret
!=
0
)
{
return
ret
;
}
ret
=
extent_ead_comp
(
a
,
b
);
return
ret
;
}
#endif
/* JEMALLOC_INTERNAL_EXTENT_INLINES_H */
deps/jemalloc/include/jemalloc/internal/extent_mmap.h
0 → 100644
View file @
fb1f4f4e
#ifndef JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H
#define JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H
extern
bool
opt_retain
;
void
*
extent_alloc_mmap
(
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
*
commit
);
bool
extent_dalloc_mmap
(
void
*
addr
,
size_t
size
);
#endif
/* JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H */
deps/jemalloc/include/jemalloc/internal/extent_structs.h
0 → 100644
View file @
fb1f4f4e
#ifndef JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
#define JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/ph.h"
#include "jemalloc/internal/size_classes.h"
typedef
enum
{
extent_state_active
=
0
,
extent_state_dirty
=
1
,
extent_state_muzzy
=
2
,
extent_state_retained
=
3
}
extent_state_t
;
/* Extent (span of pages). Use accessor functions for e_* fields. */
struct
extent_s
{
/*
* Bitfield containing several fields:
*
* a: arena_ind
* b: slab
* c: committed
* d: dumpable
* z: zeroed
* t: state
* i: szind
* f: nfree
* n: sn
*
* nnnnnnnn ... nnnnffff ffffffii iiiiiitt zdcbaaaa aaaaaaaa
*
* arena_ind: Arena from which this extent came, or all 1 bits if
* unassociated.
*
* slab: The slab flag indicates whether the extent is used for a slab
* of small regions. This helps differentiate small size classes,
* and it indicates whether interior pointers can be looked up via
* iealloc().
*
* committed: The committed flag indicates whether physical memory is
* committed to the extent, whether explicitly or implicitly
* as on a system that overcommits and satisfies physical
* memory needs on demand via soft page faults.
*
* dumpable: The dumpable flag indicates whether or not we've set the
* memory in question to be dumpable. Note that this
* interacts somewhat subtly with user-specified extent hooks,
* since we don't know if *they* are fiddling with
* dumpability (in which case, we don't want to undo whatever
* they're doing). To deal with this scenario, we:
* - Make dumpable false only for memory allocated with the
* default hooks.
* - Only allow memory to go from non-dumpable to dumpable,
* and only once.
* - Never make the OS call to allow dumping when the
* dumpable bit is already set.
* These three constraints mean that we will never
* accidentally dump user memory that the user meant to set
* nondumpable with their extent hooks.
*
*
* zeroed: The zeroed flag is used by extent recycling code to track
* whether memory is zero-filled.
*
* state: The state flag is an extent_state_t.
*
* szind: The szind flag indicates usable size class index for
* allocations residing in this extent, regardless of whether the
* extent is a slab. Extent size and usable size often differ
* even for non-slabs, either due to sz_large_pad or promotion of
* sampled small regions.
*
* nfree: Number of free regions in slab.
*
* sn: Serial number (potentially non-unique).
*
* Serial numbers may wrap around if !opt_retain, but as long as
* comparison functions fall back on address comparison for equal
* serial numbers, stable (if imperfect) ordering is maintained.
*
* Serial numbers may not be unique even in the absence of
* wrap-around, e.g. when splitting an extent and assigning the same
* serial number to both resulting adjacent extents.
*/
uint64_t
e_bits
;
#define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT))
#define EXTENT_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS
#define EXTENT_BITS_ARENA_SHIFT 0
#define EXTENT_BITS_ARENA_MASK MASK(EXTENT_BITS_ARENA_WIDTH, EXTENT_BITS_ARENA_SHIFT)
#define EXTENT_BITS_SLAB_WIDTH 1
#define EXTENT_BITS_SLAB_SHIFT (EXTENT_BITS_ARENA_WIDTH + EXTENT_BITS_ARENA_SHIFT)
#define EXTENT_BITS_SLAB_MASK MASK(EXTENT_BITS_SLAB_WIDTH, EXTENT_BITS_SLAB_SHIFT)
#define EXTENT_BITS_COMMITTED_WIDTH 1
#define EXTENT_BITS_COMMITTED_SHIFT (EXTENT_BITS_SLAB_WIDTH + EXTENT_BITS_SLAB_SHIFT)
#define EXTENT_BITS_COMMITTED_MASK MASK(EXTENT_BITS_COMMITTED_WIDTH, EXTENT_BITS_COMMITTED_SHIFT)
#define EXTENT_BITS_DUMPABLE_WIDTH 1
#define EXTENT_BITS_DUMPABLE_SHIFT (EXTENT_BITS_COMMITTED_WIDTH + EXTENT_BITS_COMMITTED_SHIFT)
#define EXTENT_BITS_DUMPABLE_MASK MASK(EXTENT_BITS_DUMPABLE_WIDTH, EXTENT_BITS_DUMPABLE_SHIFT)
#define EXTENT_BITS_ZEROED_WIDTH 1
#define EXTENT_BITS_ZEROED_SHIFT (EXTENT_BITS_DUMPABLE_WIDTH + EXTENT_BITS_DUMPABLE_SHIFT)
#define EXTENT_BITS_ZEROED_MASK MASK(EXTENT_BITS_ZEROED_WIDTH, EXTENT_BITS_ZEROED_SHIFT)
#define EXTENT_BITS_STATE_WIDTH 2
#define EXTENT_BITS_STATE_SHIFT (EXTENT_BITS_ZEROED_WIDTH + EXTENT_BITS_ZEROED_SHIFT)
#define EXTENT_BITS_STATE_MASK MASK(EXTENT_BITS_STATE_WIDTH, EXTENT_BITS_STATE_SHIFT)
#define EXTENT_BITS_SZIND_WIDTH LG_CEIL_NSIZES
#define EXTENT_BITS_SZIND_SHIFT (EXTENT_BITS_STATE_WIDTH + EXTENT_BITS_STATE_SHIFT)
#define EXTENT_BITS_SZIND_MASK MASK(EXTENT_BITS_SZIND_WIDTH, EXTENT_BITS_SZIND_SHIFT)
#define EXTENT_BITS_NFREE_WIDTH (LG_SLAB_MAXREGS + 1)
#define EXTENT_BITS_NFREE_SHIFT (EXTENT_BITS_SZIND_WIDTH + EXTENT_BITS_SZIND_SHIFT)
#define EXTENT_BITS_NFREE_MASK MASK(EXTENT_BITS_NFREE_WIDTH, EXTENT_BITS_NFREE_SHIFT)
#define EXTENT_BITS_SN_SHIFT (EXTENT_BITS_NFREE_WIDTH + EXTENT_BITS_NFREE_SHIFT)
#define EXTENT_BITS_SN_MASK (UINT64_MAX << EXTENT_BITS_SN_SHIFT)
/* Pointer to the extent that this structure is responsible for. */
void
*
e_addr
;
union
{
/*
* Extent size and serial number associated with the extent
* structure (different than the serial number for the extent at
* e_addr).
*
* ssssssss [...] ssssssss ssssnnnn nnnnnnnn
*/
size_t
e_size_esn
;
#define EXTENT_SIZE_MASK ((size_t)~(PAGE-1))
#define EXTENT_ESN_MASK ((size_t)PAGE-1)
/* Base extent size, which may not be a multiple of PAGE. */
size_t
e_bsize
;
};
/*
* List linkage, used by a variety of lists:
* - bin_t's slabs_full
* - extents_t's LRU
* - stashed dirty extents
* - arena's large allocations
*/
ql_elm
(
extent_t
)
ql_link
;
/*
* Linkage for per size class sn/address-ordered heaps, and
* for extent_avail
*/
phn
(
extent_t
)
ph_link
;
union
{
/* Small region slab metadata. */
arena_slab_data_t
e_slab_data
;
/*
* Profile counters, used for large objects. Points to a
* prof_tctx_t.
*/
atomic_p_t
e_prof_tctx
;
};
};
typedef
ql_head
(
extent_t
)
extent_list_t
;
typedef
ph
(
extent_t
)
extent_tree_t
;
typedef
ph
(
extent_t
)
extent_heap_t
;
/* Quantized collection of extents, with built-in LRU queue. */
struct
extents_s
{
malloc_mutex_t
mtx
;
/*
* Quantized per size class heaps of extents.
*
* Synchronization: mtx.
*/
extent_heap_t
heaps
[
NPSIZES
+
1
];
/*
* Bitmap for which set bits correspond to non-empty heaps.
*
* Synchronization: mtx.
*/
bitmap_t
bitmap
[
BITMAP_GROUPS
(
NPSIZES
+
1
)];
/*
* LRU of all extents in heaps.
*
* Synchronization: mtx.
*/
extent_list_t
lru
;
/*
* Page sum for all extents in heaps.
*
* The synchronization here is a little tricky. Modifications to npages
* must hold mtx, but reads need not (though, a reader who sees npages
* without holding the mutex can't assume anything about the rest of the
* state of the extents_t).
*/
atomic_zu_t
npages
;
/* All stored extents must be in the same state. */
extent_state_t
state
;
/*
* If true, delay coalescing until eviction; otherwise coalesce during
* deallocation.
*/
bool
delay_coalesce
;
};
#endif
/* JEMALLOC_INTERNAL_EXTENT_STRUCTS_H */
deps/jemalloc/include/jemalloc/internal/extent_types.h
0 → 100644
View file @
fb1f4f4e
#ifndef JEMALLOC_INTERNAL_EXTENT_TYPES_H
#define JEMALLOC_INTERNAL_EXTENT_TYPES_H
typedef
struct
extent_s
extent_t
;
typedef
struct
extents_s
extents_t
;
#define EXTENT_HOOKS_INITIALIZER NULL
#define EXTENT_GROW_MAX_PIND (NPSIZES - 1)
/*
* When reuse (and split) an active extent, (1U << opt_lg_extent_max_active_fit)
* is the max ratio between the size of the active extent and the new extent.
*/
#define LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT 6
#endif
/* JEMALLOC_INTERNAL_EXTENT_TYPES_H */
deps/jemalloc/include/jemalloc/internal/hash.h
View file @
fb1f4f4e
#ifndef JEMALLOC_INTERNAL_HASH_H
#define JEMALLOC_INTERNAL_HASH_H
#include "jemalloc/internal/assert.h"
/*
* The following hash function is based on MurmurHash3, placed into the public
* domain by Austin Appleby. See http://
code.google.com/p
/smhasher
/
for
* domain by Austin Appleby. See http
s
://
github.com/aappleby
/smhasher for
* details.
*/
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
uint32_t
hash_x86_32
(
const
void
*
key
,
int
len
,
uint32_t
seed
);
void
hash_x86_128
(
const
void
*
key
,
const
int
len
,
uint32_t
seed
,
uint64_t
r_out
[
2
]);
void
hash_x64_128
(
const
void
*
key
,
const
int
len
,
const
uint32_t
seed
,
uint64_t
r_out
[
2
]);
void
hash
(
const
void
*
key
,
size_t
len
,
const
uint32_t
seed
,
size_t
r_hash
[
2
]);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_))
/******************************************************************************/
/* Internal implementation. */
JEMALLOC_INLINE
uint32_t
hash_rotl_32
(
uint32_t
x
,
int8_t
r
)
{
static
inline
uint32_t
hash_rotl_32
(
uint32_t
x
,
int8_t
r
)
{
return
((
x
<<
r
)
|
(
x
>>
(
32
-
r
)));
}
JEMALLOC_INLINE
uint64_t
hash_rotl_64
(
uint64_t
x
,
int8_t
r
)
{
static
inline
uint64_t
hash_rotl_64
(
uint64_t
x
,
int8_t
r
)
{
return
((
x
<<
r
)
|
(
x
>>
(
64
-
r
)));
}
JEMALLOC_INLINE
uint32_t
hash_get_block_32
(
const
uint32_t
*
p
,
int
i
)
{
static
inline
uint32_t
hash_get_block_32
(
const
uint32_t
*
p
,
int
i
)
{
/* Handle unaligned read. */
if
(
unlikely
((
uintptr_t
)
p
&
(
sizeof
(
uint32_t
)
-
1
))
!=
0
)
{
uint32_t
ret
;
return
(
p
[
i
]);
memcpy
(
&
ret
,
(
uint8_t
*
)(
p
+
i
),
sizeof
(
uint32_t
));
return
ret
;
}
return
p
[
i
];
}
JEMALLOC_INLINE
uint64_t
hash_get_block_64
(
const
uint64_t
*
p
,
int
i
)
{
static
inline
uint64_t
hash_get_block_64
(
const
uint64_t
*
p
,
int
i
)
{
/* Handle unaligned read. */
if
(
unlikely
((
uintptr_t
)
p
&
(
sizeof
(
uint64_t
)
-
1
))
!=
0
)
{
uint64_t
ret
;
return
(
p
[
i
]);
}
memcpy
(
&
ret
,
(
uint8_t
*
)(
p
+
i
),
sizeof
(
uint64_t
));
return
ret
;
}
JEMALLOC_INLINE
uint32_t
hash_fmix_32
(
uint32_t
h
)
{
return
p
[
i
];
}
static
inline
uint32_t
hash_fmix_32
(
uint32_t
h
)
{
h
^=
h
>>
16
;
h
*=
0x85ebca6b
;
h
^=
h
>>
13
;
h
*=
0xc2b2ae35
;
h
^=
h
>>
16
;
return
(
h
)
;
return
h
;
}
JEMALLOC_INLINE
uint64_t
hash_fmix_64
(
uint64_t
k
)
{
static
inline
uint64_t
hash_fmix_64
(
uint64_t
k
)
{
k
^=
k
>>
33
;
k
*=
KQU
(
0xff51afd7ed558ccd
);
k
^=
k
>>
33
;
k
*=
KQU
(
0xc4ceb9fe1a85ec53
);
k
^=
k
>>
33
;
return
(
k
)
;
return
k
;
}
JEMALLOC_INLINE
uint32_t
hash_x86_32
(
const
void
*
key
,
int
len
,
uint32_t
seed
)
{
static
inline
uint32_t
hash_x86_32
(
const
void
*
key
,
int
len
,
uint32_t
seed
)
{
const
uint8_t
*
data
=
(
const
uint8_t
*
)
key
;
const
int
nblocks
=
len
/
4
;
...
...
@@ -133,13 +116,12 @@ hash_x86_32(const void *key, int len, uint32_t seed)
h1
=
hash_fmix_32
(
h1
);
return
(
h1
)
;
return
h1
;
}
UNUSED
JEMALLOC_INLINE
void
UNUSED
static
inline
void
hash_x86_128
(
const
void
*
key
,
const
int
len
,
uint32_t
seed
,
uint64_t
r_out
[
2
])
{
uint64_t
r_out
[
2
])
{
const
uint8_t
*
data
=
(
const
uint8_t
*
)
key
;
const
int
nblocks
=
len
/
16
;
...
...
@@ -238,10 +220,9 @@ hash_x86_128(const void *key, const int len, uint32_t seed,
r_out
[
1
]
=
(((
uint64_t
)
h4
)
<<
32
)
|
h3
;
}
UNUSED
JEMALLOC_INLINE
void
UNUSED
static
inline
void
hash_x64_128
(
const
void
*
key
,
const
int
len
,
const
uint32_t
seed
,
uint64_t
r_out
[
2
])
{
uint64_t
r_out
[
2
])
{
const
uint8_t
*
data
=
(
const
uint8_t
*
)
key
;
const
int
nblocks
=
len
/
16
;
...
...
@@ -279,22 +260,22 @@ hash_x64_128(const void *key, const int len, const uint32_t seed,
uint64_t
k2
=
0
;
switch
(
len
&
15
)
{
case
15
:
k2
^=
((
uint64_t
)(
tail
[
14
]))
<<
48
;
case
14
:
k2
^=
((
uint64_t
)(
tail
[
13
]))
<<
40
;
case
13
:
k2
^=
((
uint64_t
)(
tail
[
12
]))
<<
32
;
case
12
:
k2
^=
((
uint64_t
)(
tail
[
11
]))
<<
24
;
case
11
:
k2
^=
((
uint64_t
)(
tail
[
10
]))
<<
16
;
case
10
:
k2
^=
((
uint64_t
)(
tail
[
9
]))
<<
8
;
case
15
:
k2
^=
((
uint64_t
)(
tail
[
14
]))
<<
48
;
/* falls through */
case
14
:
k2
^=
((
uint64_t
)(
tail
[
13
]))
<<
40
;
/* falls through */
case
13
:
k2
^=
((
uint64_t
)(
tail
[
12
]))
<<
32
;
/* falls through */
case
12
:
k2
^=
((
uint64_t
)(
tail
[
11
]))
<<
24
;
/* falls through */
case
11
:
k2
^=
((
uint64_t
)(
tail
[
10
]))
<<
16
;
/* falls through */
case
10
:
k2
^=
((
uint64_t
)(
tail
[
9
]))
<<
8
;
/* falls through */
case
9
:
k2
^=
((
uint64_t
)(
tail
[
8
]))
<<
0
;
k2
*=
c2
;
k2
=
hash_rotl_64
(
k2
,
33
);
k2
*=
c1
;
h2
^=
k2
;
case
8
:
k1
^=
((
uint64_t
)(
tail
[
7
]))
<<
56
;
case
7
:
k1
^=
((
uint64_t
)(
tail
[
6
]))
<<
48
;
case
6
:
k1
^=
((
uint64_t
)(
tail
[
5
]))
<<
40
;
case
5
:
k1
^=
((
uint64_t
)(
tail
[
4
]))
<<
32
;
case
4
:
k1
^=
((
uint64_t
)(
tail
[
3
]))
<<
24
;
case
3
:
k1
^=
((
uint64_t
)(
tail
[
2
]))
<<
16
;
case
2
:
k1
^=
((
uint64_t
)(
tail
[
1
]))
<<
8
;
/* falls through */
case
8
:
k1
^=
((
uint64_t
)(
tail
[
7
]))
<<
56
;
/* falls through */
case
7
:
k1
^=
((
uint64_t
)(
tail
[
6
]))
<<
48
;
/* falls through */
case
6
:
k1
^=
((
uint64_t
)(
tail
[
5
]))
<<
40
;
/* falls through */
case
5
:
k1
^=
((
uint64_t
)(
tail
[
4
]))
<<
32
;
/* falls through */
case
4
:
k1
^=
((
uint64_t
)(
tail
[
3
]))
<<
24
;
/* falls through */
case
3
:
k1
^=
((
uint64_t
)(
tail
[
2
]))
<<
16
;
/* falls through */
case
2
:
k1
^=
((
uint64_t
)(
tail
[
1
]))
<<
8
;
/* falls through */
case
1
:
k1
^=
((
uint64_t
)(
tail
[
0
]))
<<
0
;
k1
*=
c1
;
k1
=
hash_rotl_64
(
k1
,
31
);
k1
*=
c2
;
h1
^=
k1
;
}
...
...
@@ -318,19 +299,20 @@ hash_x64_128(const void *key, const int len, const uint32_t seed,
/******************************************************************************/
/* API. */
JEMALLOC_INLINE
void
hash
(
const
void
*
key
,
size_t
len
,
const
uint32_t
seed
,
size_t
r_hash
[
2
])
{
static
inline
void
hash
(
const
void
*
key
,
size_t
len
,
const
uint32_t
seed
,
size_t
r_hash
[
2
])
{
assert
(
len
<=
INT_MAX
);
/* Unfortunate implementation limitation. */
#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
hash_x64_128
(
key
,
len
,
seed
,
(
uint64_t
*
)
r_hash
);
hash_x64_128
(
key
,
(
int
)
len
,
seed
,
(
uint64_t
*
)
r_hash
);
#else
uint64_t
hashes
[
2
];
hash_x86_128
(
key
,
len
,
seed
,
hashes
);
r_hash
[
0
]
=
(
size_t
)
hashes
[
0
];
r_hash
[
1
]
=
(
size_t
)
hashes
[
1
];
{
uint64_t
hashes
[
2
];
hash_x86_128
(
key
,
(
int
)
len
,
seed
,
hashes
);
r_hash
[
0
]
=
(
size_t
)
hashes
[
0
];
r_hash
[
1
]
=
(
size_t
)
hashes
[
1
];
}
#endif
}
#endif
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
#endif
/* JEMALLOC_INTERNAL_HASH_H */
deps/jemalloc/include/jemalloc/internal/hooks.h
0 → 100644
View file @
fb1f4f4e
#ifndef JEMALLOC_INTERNAL_HOOKS_H
#define JEMALLOC_INTERNAL_HOOKS_H
extern
JEMALLOC_EXPORT
void
(
*
hooks_arena_new_hook
)();
extern
JEMALLOC_EXPORT
void
(
*
hooks_libc_hook
)();
#define JEMALLOC_HOOK(fn, hook) ((void)(hook != NULL && (hook(), 0)), fn)
#define open JEMALLOC_HOOK(open, hooks_libc_hook)
#define read JEMALLOC_HOOK(read, hooks_libc_hook)
#define write JEMALLOC_HOOK(write, hooks_libc_hook)
#define readlink JEMALLOC_HOOK(readlink, hooks_libc_hook)
#define close JEMALLOC_HOOK(close, hooks_libc_hook)
#define creat JEMALLOC_HOOK(creat, hooks_libc_hook)
#define secure_getenv JEMALLOC_HOOK(secure_getenv, hooks_libc_hook)
/* Note that this is undef'd and re-define'd in src/prof.c. */
#define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, hooks_libc_hook)
#endif
/* JEMALLOC_INTERNAL_HOOKS_H */
deps/jemalloc/include/jemalloc/internal/huge.h
deleted
100644 → 0
View file @
dda8cc18
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void
*
huge_malloc
(
tsd_t
*
tsd
,
arena_t
*
arena
,
size_t
size
,
bool
zero
,
tcache_t
*
tcache
);
void
*
huge_palloc
(
tsd_t
*
tsd
,
arena_t
*
arena
,
size_t
size
,
size_t
alignment
,
bool
zero
,
tcache_t
*
tcache
);
bool
huge_ralloc_no_move
(
void
*
ptr
,
size_t
oldsize
,
size_t
usize_min
,
size_t
usize_max
,
bool
zero
);
void
*
huge_ralloc
(
tsd_t
*
tsd
,
arena_t
*
arena
,
void
*
ptr
,
size_t
oldsize
,
size_t
usize
,
size_t
alignment
,
bool
zero
,
tcache_t
*
tcache
);
#ifdef JEMALLOC_JET
typedef
void
(
huge_dalloc_junk_t
)(
void
*
,
size_t
);
extern
huge_dalloc_junk_t
*
huge_dalloc_junk
;
#endif
void
huge_dalloc
(
tsd_t
*
tsd
,
void
*
ptr
,
tcache_t
*
tcache
);
arena_t
*
huge_aalloc
(
const
void
*
ptr
);
size_t
huge_salloc
(
const
void
*
ptr
);
prof_tctx_t
*
huge_prof_tctx_get
(
const
void
*
ptr
);
void
huge_prof_tctx_set
(
const
void
*
ptr
,
prof_tctx_t
*
tctx
);
void
huge_prof_tctx_reset
(
const
void
*
ptr
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in
deleted
100644 → 0
View file @
dda8cc18
#ifndef JEMALLOC_INTERNAL_H
#define JEMALLOC_INTERNAL_H
#include "jemalloc_internal_defs.h"
#include "jemalloc/internal/jemalloc_internal_decls.h"
#ifdef JEMALLOC_UTRACE
#include <sys/ktrace.h>
#endif
#define JEMALLOC_NO_DEMANGLE
#ifdef JEMALLOC_JET
# define JEMALLOC_N(n) jet_##n
# include "jemalloc/internal/public_namespace.h"
# define JEMALLOC_NO_RENAME
# include "../jemalloc@install_suffix@.h"
# undef JEMALLOC_NO_RENAME
#else
# define JEMALLOC_N(n) @private_namespace@##n
# include "../jemalloc@install_suffix@.h"
#endif
#include "jemalloc/internal/private_namespace.h"
static const bool config_debug =
#ifdef JEMALLOC_DEBUG
true
#else
false
#endif
;
static const bool have_dss =
#ifdef JEMALLOC_DSS
true
#else
false
#endif
;
static const bool config_fill =
#ifdef JEMALLOC_FILL
true
#else
false
#endif
;
static const bool config_lazy_lock =
#ifdef JEMALLOC_LAZY_LOCK
true
#else
false
#endif
;
static const bool config_prof =
#ifdef JEMALLOC_PROF
true
#else
false
#endif
;
static const bool config_prof_libgcc =
#ifdef JEMALLOC_PROF_LIBGCC
true
#else
false
#endif
;
static const bool config_prof_libunwind =
#ifdef JEMALLOC_PROF_LIBUNWIND
true
#else
false
#endif
;
static const bool maps_coalesce =
#ifdef JEMALLOC_MAPS_COALESCE
true
#else
false
#endif
;
static const bool config_munmap =
#ifdef JEMALLOC_MUNMAP
true
#else
false
#endif
;
static const bool config_stats =
#ifdef JEMALLOC_STATS
true
#else
false
#endif
;
static const bool config_tcache =
#ifdef JEMALLOC_TCACHE
true
#else
false
#endif
;
static const bool config_tls =
#ifdef JEMALLOC_TLS
true
#else
false
#endif
;
static const bool config_utrace =
#ifdef JEMALLOC_UTRACE
true
#else
false
#endif
;
static const bool config_valgrind =
#ifdef JEMALLOC_VALGRIND
true
#else
false
#endif
;
static const bool config_xmalloc =
#ifdef JEMALLOC_XMALLOC
true
#else
false
#endif
;
static const bool config_ivsalloc =
#ifdef JEMALLOC_IVSALLOC
true
#else
false
#endif
;
static const bool config_cache_oblivious =
#ifdef JEMALLOC_CACHE_OBLIVIOUS
true
#else
false
#endif
;
#ifdef JEMALLOC_C11ATOMICS
#include <stdatomic.h>
#endif
#ifdef JEMALLOC_ATOMIC9
#include <machine/atomic.h>
#endif
#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
#include <libkern/OSAtomic.h>
#endif
#ifdef JEMALLOC_ZONE
#include <mach/mach_error.h>
#include <mach/mach_init.h>
#include <mach/vm_map.h>
#include <malloc/malloc.h>
#endif
#define RB_COMPACT
#include "jemalloc/internal/rb.h"
#include "jemalloc/internal/qr.h"
#include "jemalloc/internal/ql.h"
/*
* jemalloc can conceptually be broken into components (arena, tcache, etc.),
* but there are circular dependencies that cannot be broken without
* substantial performance degradation. In order to reduce the effect on
* visual code flow, read the header files in multiple passes, with one of the
* following cpp variables defined during each pass:
*
* JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data
* types.
* JEMALLOC_H_STRUCTS : Data structures.
* JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
* JEMALLOC_H_INLINES : Inline functions.
*/
/******************************************************************************/
#define JEMALLOC_H_TYPES
#include "jemalloc/internal/jemalloc_internal_macros.h"
/* Size class index type. */
typedef unsigned szind_t;
/*
* Flags bits:
*
* a: arena
* t: tcache
* 0: unused
* z: zero
* n: alignment
*
* aaaaaaaa aaaatttt tttttttt 0znnnnnn
*/
#define MALLOCX_ARENA_MASK ((int)~0xfffff)
#define MALLOCX_ARENA_MAX 0xffe
#define MALLOCX_TCACHE_MASK ((int)~0xfff000ffU)
#define MALLOCX_TCACHE_MAX 0xffd
#define MALLOCX_LG_ALIGN_MASK ((int)0x3f)
/* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */
#define MALLOCX_ALIGN_GET_SPECIFIED(flags) \
(ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK))
#define MALLOCX_ALIGN_GET(flags) \
(MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1))
#define MALLOCX_ZERO_GET(flags) \
((bool)(flags & MALLOCX_ZERO))
#define MALLOCX_TCACHE_GET(flags) \
(((unsigned)((flags & MALLOCX_TCACHE_MASK) >> 8)) - 2)
#define MALLOCX_ARENA_GET(flags) \
(((unsigned)(((unsigned)flags) >> 20)) - 1)
/* Smallest size class to support. */
#define TINY_MIN (1U << LG_TINY_MIN)
/*
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
* classes).
*/
#ifndef LG_QUANTUM
# if (defined(__i386__) || defined(_M_IX86))
# define LG_QUANTUM 4
# endif
# ifdef __ia64__
# define LG_QUANTUM 4
# endif
# ifdef __alpha__
# define LG_QUANTUM 4
# endif
# if (defined(__sparc64__) || defined(__sparcv9))
# define LG_QUANTUM 4
# endif
# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
# define LG_QUANTUM 4
# endif
# ifdef __arm__
# define LG_QUANTUM 3
# endif
# ifdef __aarch64__
# define LG_QUANTUM 4
# endif
# ifdef __hppa__
# define LG_QUANTUM 4
# endif
# ifdef __mips__
# define LG_QUANTUM 3
# endif
# ifdef __or1k__
# define LG_QUANTUM 3
# endif
# ifdef __powerpc__
# define LG_QUANTUM 4
# endif
# ifdef __s390__
# define LG_QUANTUM 4
# endif
# ifdef __SH4__
# define LG_QUANTUM 4
# endif
# ifdef __tile__
# define LG_QUANTUM 4
# endif
# ifdef __le32__
# define LG_QUANTUM 4
# endif
# ifndef LG_QUANTUM
# error "Unknown minimum alignment for architecture; specify via "
"--with-lg-quantum"
# endif
#endif
#define QUANTUM ((size_t)(1U << LG_QUANTUM))
#define QUANTUM_MASK (QUANTUM - 1)
/* Return the smallest quantum multiple that is >= a. */
#define QUANTUM_CEILING(a) \
(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
#define LONG ((size_t)(1U << LG_SIZEOF_LONG))
#define LONG_MASK (LONG - 1)
/* Return the smallest long multiple that is >= a. */
#define LONG_CEILING(a) \
(((a) + LONG_MASK) & ~LONG_MASK)
#define SIZEOF_PTR (1U << LG_SIZEOF_PTR)
#define PTR_MASK (SIZEOF_PTR - 1)
/* Return the smallest (void *) multiple that is >= a. */
#define PTR_CEILING(a) \
(((a) + PTR_MASK) & ~PTR_MASK)
/*
* Maximum size of L1 cache line. This is used to avoid cache line aliasing.
* In addition, this controls the spacing of cacheline-spaced size classes.
*
* CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
* only handle raw constants.
*/
#define LG_CACHELINE 6
#define CACHELINE 64
#define CACHELINE_MASK (CACHELINE - 1)
/* Return the smallest cacheline multiple that is >= s. */
#define CACHELINE_CEILING(s) \
(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
/* Page size. LG_PAGE is determined by the configure script. */
#ifdef PAGE_MASK
# undef PAGE_MASK
#endif
#define PAGE ((size_t)(1U << LG_PAGE))
#define PAGE_MASK ((size_t)(PAGE - 1))
/* Return the smallest pagesize multiple that is >= s. */
#define PAGE_CEILING(s) \
(((s) + PAGE_MASK) & ~PAGE_MASK)
/* Return the nearest aligned address at or below a. */
#define ALIGNMENT_ADDR2BASE(a, alignment) \
((void *)((uintptr_t)(a) & (-(alignment))))
/* Return the offset between a and the nearest aligned address at or below a. */
#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
((size_t)((uintptr_t)(a) & (alignment - 1)))
/* Return the smallest alignment multiple that is >= s. */
#define ALIGNMENT_CEILING(s, alignment) \
(((s) + (alignment - 1)) & (-(alignment)))
/* Declare a variable-length array. */
#if __STDC_VERSION__ < 199901L
# ifdef _MSC_VER
# include <malloc.h>
# define alloca _alloca
# else
# ifdef JEMALLOC_HAS_ALLOCA_H
# include <alloca.h>
# else
# include <stdlib.h>
# endif
# endif
# define VARIABLE_ARRAY(type, name, count) \
type *name = alloca(sizeof(type) * (count))
#else
# define VARIABLE_ARRAY(type, name, count) type name[(count)]
#endif
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/arena.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/base.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h"
#include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/quarantine.h"
#include "jemalloc/internal/prof.h"
#undef JEMALLOC_H_TYPES
/******************************************************************************/
#define JEMALLOC_H_STRUCTS
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/bitmap.h"
#define JEMALLOC_ARENA_STRUCTS_A
#include "jemalloc/internal/arena.h"
#undef JEMALLOC_ARENA_STRUCTS_A
#include "jemalloc/internal/extent.h"
#define JEMALLOC_ARENA_STRUCTS_B
#include "jemalloc/internal/arena.h"
#undef JEMALLOC_ARENA_STRUCTS_B
#include "jemalloc/internal/base.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h"
#include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/quarantine.h"
#include "jemalloc/internal/prof.h"
#include "jemalloc/internal/tsd.h"
#undef JEMALLOC_H_STRUCTS
/******************************************************************************/
#define JEMALLOC_H_EXTERNS
extern bool opt_abort;
extern const char *opt_junk;
extern bool opt_junk_alloc;
extern bool opt_junk_free;
extern size_t opt_quarantine;
extern bool opt_redzone;
extern bool opt_utrace;
extern bool opt_xmalloc;
extern bool opt_zero;
extern size_t opt_narenas;
extern bool in_valgrind;
/* Number of CPUs. */
extern unsigned ncpus;
/*
* index2size_tab encodes the same information as could be computed (at
* unacceptable cost in some code paths) by index2size_compute().
*/
extern size_t const index2size_tab[NSIZES];
/*
* size2index_tab is a compact lookup table that rounds request sizes up to
* size classes. In order to reduce cache footprint, the table is compressed,
* and all accesses are via size2index().
*/
extern uint8_t const size2index_tab[];
arena_t *a0get(void);
void *a0malloc(size_t size);
void a0dalloc(void *ptr);
void *bootstrap_malloc(size_t size);
void *bootstrap_calloc(size_t num, size_t size);
void bootstrap_free(void *ptr);
arena_t *arenas_extend(unsigned ind);
arena_t *arena_init(unsigned ind);
unsigned narenas_total_get(void);
arena_t *arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing);
arena_t *arena_choose_hard(tsd_t *tsd);
void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
unsigned arena_nbound(unsigned ind);
void thread_allocated_cleanup(tsd_t *tsd);
void thread_deallocated_cleanup(tsd_t *tsd);
void arena_cleanup(tsd_t *tsd);
void arenas_cache_cleanup(tsd_t *tsd);
void narenas_cache_cleanup(tsd_t *tsd);
void arenas_cache_bypass_cleanup(tsd_t *tsd);
void jemalloc_prefork(void);
void jemalloc_postfork_parent(void);
void jemalloc_postfork_child(void);
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/arena.h"
#include "jemalloc/internal/base.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h"
#include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/quarantine.h"
#include "jemalloc/internal/prof.h"
#include "jemalloc/internal/tsd.h"
#undef JEMALLOC_H_EXTERNS
/******************************************************************************/
#define JEMALLOC_H_INLINES
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/base.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h"
#ifndef JEMALLOC_ENABLE_INLINE
szind_t size2index_compute(size_t size);
szind_t size2index_lookup(size_t size);
szind_t size2index(size_t size);
size_t index2size_compute(szind_t index);
size_t index2size_lookup(szind_t index);
size_t index2size(szind_t index);
size_t s2u_compute(size_t size);
size_t s2u_lookup(size_t size);
size_t s2u(size_t size);
size_t sa2u(size_t size, size_t alignment);
arena_t *arena_choose(tsd_t *tsd, arena_t *arena);
arena_t *arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing,
bool refresh_if_missing);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
JEMALLOC_INLINE szind_t
size2index_compute(size_t size)
{
#if (NTBINS != 0)
if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
size_t lg_ceil = lg_floor(pow2_ceil(size));
return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin);
}
#endif
{
size_t x = unlikely(ZI(size) < 0) ? ((size<<1) ?
(ZU(1)<<(LG_SIZEOF_PTR+3)) : ((ZU(1)<<(LG_SIZEOF_PTR+3))-1))
: lg_floor((size<<1)-1);
size_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 :
x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM);
size_t grp = shift << LG_SIZE_CLASS_GROUP;
size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
size_t delta_inverse_mask = ZI(-1) << lg_delta;
size_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
size_t index = NTBINS + grp + mod;
return (index);
}
}
JEMALLOC_ALWAYS_INLINE szind_t
size2index_lookup(size_t size)
{
assert(size <= LOOKUP_MAXCLASS);
{
size_t ret = ((size_t)(size2index_tab[(size-1) >>
LG_TINY_MIN]));
assert(ret == size2index_compute(size));
return (ret);
}
}
JEMALLOC_ALWAYS_INLINE szind_t
size2index(size_t size)
{
assert(size > 0);
if (likely(size <= LOOKUP_MAXCLASS))
return (size2index_lookup(size));
return (size2index_compute(size));
}
JEMALLOC_INLINE size_t
index2size_compute(szind_t index)
{
#if (NTBINS > 0)
if (index < NTBINS)
return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index));
#endif
{
size_t reduced_index = index - NTBINS;
size_t grp = reduced_index >> LG_SIZE_CLASS_GROUP;
size_t mod = reduced_index & ((ZU(1) << LG_SIZE_CLASS_GROUP) -
1);
size_t grp_size_mask = ~((!!grp)-1);
size_t grp_size = ((ZU(1) << (LG_QUANTUM +
(LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
size_t shift = (grp == 0) ? 1 : grp;
size_t lg_delta = shift + (LG_QUANTUM-1);
size_t mod_size = (mod+1) << lg_delta;
size_t usize = grp_size + mod_size;
return (usize);
}
}
JEMALLOC_ALWAYS_INLINE size_t
index2size_lookup(szind_t index)
{
size_t ret = (size_t)index2size_tab[index];
assert(ret == index2size_compute(index));
return (ret);
}
JEMALLOC_ALWAYS_INLINE size_t
index2size(szind_t index)
{
assert(index < NSIZES);
return (index2size_lookup(index));
}
JEMALLOC_ALWAYS_INLINE size_t
s2u_compute(size_t size)
{
#if (NTBINS > 0)
if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
size_t lg_ceil = lg_floor(pow2_ceil(size));
return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
(ZU(1) << lg_ceil));
}
#endif
{
size_t x = unlikely(ZI(size) < 0) ? ((size<<1) ?
(ZU(1)<<(LG_SIZEOF_PTR+3)) : ((ZU(1)<<(LG_SIZEOF_PTR+3))-1))
: lg_floor((size<<1)-1);
size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
size_t delta = ZU(1) << lg_delta;
size_t delta_mask = delta - 1;
size_t usize = (size + delta_mask) & ~delta_mask;
return (usize);
}
}
JEMALLOC_ALWAYS_INLINE size_t
s2u_lookup(size_t size)
{
size_t ret = index2size_lookup(size2index_lookup(size));
assert(ret == s2u_compute(size));
return (ret);
}
/*
* Compute usable size that would result from allocating an object with the
* specified size.
*/
JEMALLOC_ALWAYS_INLINE size_t
s2u(size_t size)
{
assert(size > 0);
if (likely(size <= LOOKUP_MAXCLASS))
return (s2u_lookup(size));
return (s2u_compute(size));
}
/*
* Compute usable size that would result from allocating an object with the
* specified size and alignment.
*/
JEMALLOC_ALWAYS_INLINE size_t
sa2u(size_t size, size_t alignment)
{
size_t usize;
assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
/* Try for a small size class. */
if (size <= SMALL_MAXCLASS && alignment < PAGE) {
/*
* Round size up to the nearest multiple of alignment.
*
* This done, we can take advantage of the fact that for each
* small size class, every object is aligned at the smallest
* power of two that is non-zero in the base two representation
* of the size. For example:
*
* Size | Base 2 | Minimum alignment
* -----+----------+------------------
* 96 | 1100000 | 32
* 144 | 10100000 | 32
* 192 | 11000000 | 64
*/
usize = s2u(ALIGNMENT_CEILING(size, alignment));
if (usize < LARGE_MINCLASS)
return (usize);
}
/* Try for a large size class. */
if (likely(size <= large_maxclass) && likely(alignment < chunksize)) {
/*
* We can't achieve subpage alignment, so round up alignment
* to the minimum that can actually be supported.
*/
alignment = PAGE_CEILING(alignment);
/* Make sure result is a large size class. */
usize = (size <= LARGE_MINCLASS) ? LARGE_MINCLASS : s2u(size);
/*
* Calculate the size of the over-size run that arena_palloc()
* would need to allocate in order to guarantee the alignment.
*/
if (usize + large_pad + alignment - PAGE <= arena_maxrun)
return (usize);
}
/* Huge size class. Beware of size_t overflow. */
/*
* We can't achieve subchunk alignment, so round up alignment to the
* minimum that can actually be supported.
*/
alignment = CHUNK_CEILING(alignment);
if (alignment == 0) {
/* size_t overflow. */
return (0);
}
/* Make sure result is a huge size class. */
if (size <= chunksize)
usize = chunksize;
else {
usize = s2u(size);
if (usize < size) {
/* size_t overflow. */
return (0);
}
}
/*
* Calculate the multi-chunk mapping that huge_palloc() would need in
* order to guarantee the alignment.
*/
if (usize + alignment - PAGE < usize) {
/* size_t overflow. */
return (0);
}
return (usize);
}
/* Choose an arena based on a per-thread value. */
JEMALLOC_INLINE arena_t *
arena_choose(tsd_t *tsd, arena_t *arena)
{
arena_t *ret;
if (arena != NULL)
return (arena);
if (unlikely((ret = tsd_arena_get(tsd)) == NULL))
ret = arena_choose_hard(tsd);
return (ret);
}
JEMALLOC_INLINE arena_t *
arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing,
bool refresh_if_missing)
{
arena_t *arena;
arena_t **arenas_cache = tsd_arenas_cache_get(tsd);
/* init_if_missing requires refresh_if_missing. */
assert(!init_if_missing || refresh_if_missing);
if (unlikely(arenas_cache == NULL)) {
/* arenas_cache hasn't been initialized yet. */
return (arena_get_hard(tsd, ind, init_if_missing));
}
if (unlikely(ind >= tsd_narenas_cache_get(tsd))) {
/*
* ind is invalid, cache is old (too small), or arena to be
* initialized.
*/
return (refresh_if_missing ? arena_get_hard(tsd, ind,
init_if_missing) : NULL);
}
arena = arenas_cache[ind];
if (likely(arena != NULL) || !refresh_if_missing)
return (arena);
return (arena_get_hard(tsd, ind, init_if_missing));
}
#endif
#include "jemalloc/internal/bitmap.h"
/*
* Include portions of arena.h interleaved with tcache.h in order to resolve
* circular dependencies.
*/
#define JEMALLOC_ARENA_INLINE_A
#include "jemalloc/internal/arena.h"
#undef JEMALLOC_ARENA_INLINE_A
#include "jemalloc/internal/tcache.h"
#define JEMALLOC_ARENA_INLINE_B
#include "jemalloc/internal/arena.h"
#undef JEMALLOC_ARENA_INLINE_B
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/quarantine.h"
#ifndef JEMALLOC_ENABLE_INLINE
arena_t *iaalloc(const void *ptr);
size_t isalloc(const void *ptr, bool demote);
void *iallocztm(tsd_t *tsd, size_t size, bool zero, tcache_t *tcache,
bool is_metadata, arena_t *arena);
void *imalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena);
void *imalloc(tsd_t *tsd, size_t size);
void *icalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena);
void *icalloc(tsd_t *tsd, size_t size);
void *ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, bool is_metadata, arena_t *arena);
void *ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, arena_t *arena);
void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero);
size_t ivsalloc(const void *ptr, bool demote);
size_t u2rz(size_t usize);
size_t p2rz(const void *ptr);
void idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata);
void idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache);
void idalloc(tsd_t *tsd, void *ptr);
void iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache);
void isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
void isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
void *iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t extra, size_t alignment, bool zero, tcache_t *tcache,
arena_t *arena);
void *iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t alignment, bool zero, tcache_t *tcache, arena_t *arena);
void *iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t alignment, bool zero);
bool ixalloc(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
JEMALLOC_ALWAYS_INLINE arena_t *
iaalloc(const void *ptr)
{
assert(ptr != NULL);
return (arena_aalloc(ptr));
}
/*
* Typical usage:
* void *ptr = [...]
* size_t sz = isalloc(ptr, config_prof);
*/
JEMALLOC_ALWAYS_INLINE size_t
isalloc(const void *ptr, bool demote)
{
assert(ptr != NULL);
/* Demotion only makes sense if config_prof is true. */
assert(config_prof || !demote);
return (arena_salloc(ptr, demote));
}
JEMALLOC_ALWAYS_INLINE void *
iallocztm(tsd_t *tsd, size_t size, bool zero, tcache_t *tcache, bool is_metadata,
arena_t *arena)
{
void *ret;
assert(size != 0);
ret = arena_malloc(tsd, arena, size, zero, tcache);
if (config_stats && is_metadata && likely(ret != NULL)) {
arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
config_prof));
}
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
imalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena)
{
return (iallocztm(tsd, size, false, tcache, false, arena));
}
JEMALLOC_ALWAYS_INLINE void *
imalloc(tsd_t *tsd, size_t size)
{
return (iallocztm(tsd, size, false, tcache_get(tsd, true), false, NULL));
}
JEMALLOC_ALWAYS_INLINE void *
icalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena)
{
return (iallocztm(tsd, size, true, tcache, false, arena));
}
JEMALLOC_ALWAYS_INLINE void *
icalloc(tsd_t *tsd, size_t size)
{
return (iallocztm(tsd, size, true, tcache_get(tsd, true), false, NULL));
}
JEMALLOC_ALWAYS_INLINE void *
ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, bool is_metadata, arena_t *arena)
{
void *ret;
assert(usize != 0);
assert(usize == sa2u(usize, alignment));
ret = arena_palloc(tsd, arena, usize, alignment, zero, tcache);
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
if (config_stats && is_metadata && likely(ret != NULL)) {
arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
config_prof));
}
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, arena_t *arena)
{
return (ipallocztm(tsd, usize, alignment, zero, tcache, false, arena));
}
JEMALLOC_ALWAYS_INLINE void *
ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero)
{
return (ipallocztm(tsd, usize, alignment, zero, tcache_get(tsd,
NULL), false, NULL));
}
JEMALLOC_ALWAYS_INLINE size_t
ivsalloc(const void *ptr, bool demote)
{
extent_node_t *node;
/* Return 0 if ptr is not within a chunk managed by jemalloc. */
node = chunk_lookup(ptr, false);
if (node == NULL)
return (0);
/* Only arena chunks should be looked up via interior pointers. */
assert(extent_node_addr_get(node) == ptr ||
extent_node_achunk_get(node));
return (isalloc(ptr, demote));
}
JEMALLOC_INLINE size_t
u2rz(size_t usize)
{
size_t ret;
if (usize <= SMALL_MAXCLASS) {
szind_t binind = size2index(usize);
ret = arena_bin_info[binind].redzone_size;
} else
ret = 0;
return (ret);
}
JEMALLOC_INLINE size_t
p2rz(const void *ptr)
{
size_t usize = isalloc(ptr, false);
return (u2rz(usize));
}
JEMALLOC_ALWAYS_INLINE void
idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata)
{
assert(ptr != NULL);
if (config_stats && is_metadata) {
arena_metadata_allocated_sub(iaalloc(ptr), isalloc(ptr,
config_prof));
}
arena_dalloc(tsd, ptr, tcache);
}
JEMALLOC_ALWAYS_INLINE void
idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache)
{
idalloctm(tsd, ptr, tcache, false);
}
JEMALLOC_ALWAYS_INLINE void
idalloc(tsd_t *tsd, void *ptr)
{
idalloctm(tsd, ptr, tcache_get(tsd, false), false);
}
JEMALLOC_ALWAYS_INLINE void
iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
{
if (config_fill && unlikely(opt_quarantine))
quarantine(tsd, ptr);
else
idalloctm(tsd, ptr, tcache, false);
}
JEMALLOC_ALWAYS_INLINE void
isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
{
arena_sdalloc(tsd, ptr, size, tcache);
}
JEMALLOC_ALWAYS_INLINE void
isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
{
if (config_fill && unlikely(opt_quarantine))
quarantine(tsd, ptr);
else
isdalloct(tsd, ptr, size, tcache);
}
JEMALLOC_ALWAYS_INLINE void *
iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t extra, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena)
{
void *p;
size_t usize, copysize;
usize = sa2u(size + extra, alignment);
if (usize == 0)
return (NULL);
p = ipalloct(tsd, usize, alignment, zero, tcache, arena);
if (p == NULL) {
if (extra == 0)
return (NULL);
/* Try again, without extra this time. */
usize = sa2u(size, alignment);
if (usize == 0)
return (NULL);
p = ipalloct(tsd, usize, alignment, zero, tcache, arena);
if (p == NULL)
return (NULL);
}
/*
* Copy at most size bytes (not size+extra), since the caller has no
* expectation that the extra bytes will be reliably preserved.
*/
copysize = (size < oldsize) ? size : oldsize;
memcpy(p, ptr, copysize);
isqalloc(tsd, ptr, oldsize, tcache);
return (p);
}
JEMALLOC_ALWAYS_INLINE void *
iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
bool zero, tcache_t *tcache, arena_t *arena)
{
assert(ptr != NULL);
assert(size != 0);
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
!= 0) {
/*
* Existing object alignment is inadequate; allocate new space
* and copy.
*/
return (iralloct_realign(tsd, ptr, oldsize, size, 0, alignment,
zero, tcache, arena));
}
return (arena_ralloc(tsd, arena, ptr, oldsize, size, alignment, zero,
tcache));
}
JEMALLOC_ALWAYS_INLINE void *
iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
bool zero)
{
return (iralloct(tsd, ptr, oldsize, size, alignment, zero,
tcache_get(tsd, true), NULL));
}
JEMALLOC_ALWAYS_INLINE bool
ixalloc(void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment,
bool zero)
{
assert(ptr != NULL);
assert(size != 0);
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
!= 0) {
/* Existing object alignment is inadequate. */
return (true);
}
return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero));
}
#endif
#include "jemalloc/internal/prof.h"
#undef JEMALLOC_H_INLINES
/******************************************************************************/
#endif /* JEMALLOC_INTERNAL_H */
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h
View file @
fb1f4f4e
#ifndef JEMALLOC_INTERNAL_DECLS_H
#define
JEMALLOC_INTERNAL_DECLS_H
#define
JEMALLOC_INTERNAL_DECLS_H
#include <math.h>
#ifdef _WIN32
# include <windows.h>
# include "msvc_compat/windows_extra.h"
# ifdef _WIN64
# if LG_VADDR <= 32
# error Generate the headers using x64 vcargs
# endif
# else
# if LG_VADDR > 32
# undef LG_VADDR
# define LG_VADDR 32
# endif
# endif
#else
# include <sys/param.h>
# include <sys/mman.h>
...
...
@@ -14,10 +23,27 @@
# if !defined(SYS_write) && defined(__NR_write)
# define SYS_write __NR_write
# endif
# if defined(SYS_open) && defined(__aarch64__)
/* Android headers may define SYS_open to __NR_open even though
* __NR_open may not exist on AArch64 (superseded by __NR_openat). */
# undef SYS_open
# endif
# include <sys/uio.h>
# endif
# include <pthread.h>
# include <signal.h>
# ifdef JEMALLOC_OS_UNFAIR_LOCK
# include <os/lock.h>
# endif
# ifdef JEMALLOC_GLIBC_MALLOC_HOOK
# include <sched.h>
# endif
# include <errno.h>
# include <sys/time.h>
# include <time.h>
# ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
# include <mach/mach_time.h>
# endif
#endif
#include <sys/types.h>
...
...
@@ -25,6 +51,9 @@
#ifndef SIZE_T_MAX
# define SIZE_T_MAX SIZE_MAX
#endif
#ifndef SSIZE_MAX
# define SSIZE_MAX ((ssize_t)(SIZE_T_MAX >> 1))
#endif
#include <stdarg.h>
#include <stdbool.h>
#include <stdio.h>
...
...
@@ -50,9 +79,7 @@ typedef intptr_t ssize_t;
# pragma warning(disable: 4996)
#if _MSC_VER < 1800
static
int
isblank
(
int
c
)
{
isblank
(
int
c
)
{
return
(
c
==
'\t'
||
c
==
' '
);
}
#endif
...
...
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in
View file @
fb1f4f4e
#ifndef JEMALLOC_INTERNAL_DEFS_H_
#define
JEMALLOC_INTERNAL_DEFS_H_
#define
JEMALLOC_INTERNAL_DEFS_H_
/*
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
* public APIs to be prefixed. This makes it possible, with some care, to use
...
...
@@ -8,6 +8,18 @@
#undef JEMALLOC_PREFIX
#undef JEMALLOC_CPREFIX
/*
* Define overrides for non-standard allocator-related functions if they are
* present on the system.
*/
#undef JEMALLOC_OVERRIDE___LIBC_CALLOC
#undef JEMALLOC_OVERRIDE___LIBC_FREE
#undef JEMALLOC_OVERRIDE___LIBC_MALLOC
#undef JEMALLOC_OVERRIDE___LIBC_MEMALIGN
#undef JEMALLOC_OVERRIDE___LIBC_REALLOC
#undef JEMALLOC_OVERRIDE___LIBC_VALLOC
#undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN
/*
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
* For shared libraries, symbol visibility mechanisms prevent these symbols
...
...
@@ -21,18 +33,24 @@
* order to yield to another virtual CPU.
*/
#undef CPU_SPINWAIT
/* 1 if CPU_SPINWAIT is defined, 0 otherwise. */
#undef HAVE_CPU_SPINWAIT
/*
* Number of significant bits in virtual addresses. This may be less than the
* total number of bits in a pointer, e.g. on x64, for which the uppermost 16
* bits are the same as bit 47.
*/
#undef LG_VADDR
/* Defined if C11 atomics are available. */
#undef JEMALLOC_C11ATOMICS
#undef JEMALLOC_C11
_
ATOMICS
/* Defined if
the equivalent of FreeBSD's atomic(9) function
s are available. */
#undef JEMALLOC_ATOMIC
9
/* Defined if
GCC __atomic atomic
s are available. */
#undef JEMALLOC_
GCC_
ATOMIC
_ATOMICS
/*
* Defined if OSAtomic*() functions are available, as provided by Darwin, and
* documented in the atomic(3) manual page.
*/
#undef JEMALLOC_OSATOMIC
/* Defined if GCC __sync atomics are available. */
#undef JEMALLOC_GCC_SYNC_ATOMICS
/*
* Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and
...
...
@@ -56,9 +74,9 @@
#undef JEMALLOC_HAVE_BUILTIN_CLZ
/*
* Defined if
madvise(2) is available
.
* Defined if
os_unfair_lock_*() functions are available, as provided by Darwin
.
*/
#undef JEMALLOC_
HAVE_MADVISE
#undef JEMALLOC_
OS_UNFAIR_LOCK
/*
* Defined if OSSpin*() functions are available, as provided by Darwin, and
...
...
@@ -66,6 +84,9 @@
*/
#undef JEMALLOC_OSSPIN
/* Defined if syscall(2) is usable. */
#undef JEMALLOC_USE_SYSCALL
/*
* Defined if secure_getenv(3) is available.
*/
...
...
@@ -76,6 +97,27 @@
*/
#undef JEMALLOC_HAVE_ISSETUGID
/* Defined if pthread_atfork(3) is available. */
#undef JEMALLOC_HAVE_PTHREAD_ATFORK
/* Defined if pthread_setname_np(3) is available. */
#undef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
/*
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
*/
#undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
/*
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
*/
#undef JEMALLOC_HAVE_CLOCK_MONOTONIC
/*
* Defined if mach_absolute_time() is available.
*/
#undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
/*
* Defined if _malloc_thread_cleanup() exists. At least in the case of
* FreeBSD, pthread_key_create() allocates, which if used during malloc
...
...
@@ -102,12 +144,6 @@
/* Non-empty if the tls_model attribute is supported. */
#undef JEMALLOC_TLS_MODEL
/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */
#undef JEMALLOC_CC_SILENCE
/* JEMALLOC_CODE_COVERAGE enables test code coverage analysis. */
#undef JEMALLOC_CODE_COVERAGE
/*
* JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
* inline functions.
...
...
@@ -130,36 +166,23 @@
#undef JEMALLOC_PROF_GCC
/*
* JEMALLOC_TCACHE enables a thread-specific caching layer for small objects.
* This makes it possible to allocate/deallocate objects without any locking
* when the cache is in the steady state.
*/
#undef JEMALLOC_TCACHE
/*
* JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage
* JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
* segment (DSS).
*/
#undef JEMALLOC_DSS
/* Support memory filling (junk/zero
/quarantine/redzone
). */
/* Support memory filling (junk/zero). */
#undef JEMALLOC_FILL
/* Support utrace(2)-based tracing. */
#undef JEMALLOC_UTRACE
/* Support Valgrind. */
#undef JEMALLOC_VALGRIND
/* Support optional abort() on OOM. */
#undef JEMALLOC_XMALLOC
/* Support lazy locking (avoid locking unless a second thread is launched). */
#undef JEMALLOC_LAZY_LOCK
/* Minimum size class to support is 2^LG_TINY_MIN bytes. */
#undef LG_TINY_MIN
/*
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
* classes).
...
...
@@ -169,6 +192,13 @@
/* One page is 2^LG_PAGE bytes. */
#undef LG_PAGE
/*
* One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
* system does not explicitly support huge pages; system calls that require
* explicit huge page support are separately configured.
*/
#undef LG_HUGEPAGE
/*
* If defined, adjacent virtual memory mappings with identical attributes
* automatically coalesce, and they fragment when changes are made to subranges.
...
...
@@ -179,27 +209,29 @@
#undef JEMALLOC_MAPS_COALESCE
/*
* If defined, use munmap() to unmap freed chunks, rather than storing them for
* later reuse. This is disabled by default on Linux because common sequences
* of mmap()/munmap() calls will cause virtual memory map holes.
* If defined, retain memory for later reuse by default rather than using e.g.
* munmap() to unmap freed extents. This is enabled on 64-bit Linux because
* common sequences of mmap()/munmap() calls will cause virtual memory map
* holes.
*/
#undef JEMALLOC_
MUNMAP
#undef JEMALLOC_
RETAIN
/* TLS is used to map arenas and magazine caches to threads. */
#undef JEMALLOC_TLS
/*
*
ffs()/ffsl() functions to use for bitmapping. Don't use these directly;
* instead
,
use
jemalloc_ffs() or jemalloc_ffsl
() from util.h
.
*
Used to mark unreachable code to quiet "end of non-void" compiler warnings.
*
Don't use this directly;
instead use
unreachable
() from util.h
*/
#undef JEMALLOC_INTERNAL_FFSL
#undef JEMALLOC_INTERNAL_FFS
#undef JEMALLOC_INTERNAL_UNREACHABLE
/*
*
JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside
*
within jemalloc-owned chunks before dereferencing them
.
*
ffs*() functions to use for bitmapping. Don't use these directly; instead,
*
use ffs_*() from util.h
.
*/
#undef JEMALLOC_IVSALLOC
#undef JEMALLOC_INTERNAL_FFSLL
#undef JEMALLOC_INTERNAL_FFSL
#undef JEMALLOC_INTERNAL_FFS
/*
* If defined, explicitly attempt to more uniformly distribute large allocation
...
...
@@ -207,24 +239,65 @@
*/
#undef JEMALLOC_CACHE_OBLIVIOUS
/*
* If defined, enable logging facilities. We make this a configure option to
* avoid taking extra branches everywhere.
*/
#undef JEMALLOC_LOG
/*
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
*/
#undef JEMALLOC_ZONE
#undef JEMALLOC_ZONE_VERSION
/*
* Methods for determining whether the OS overcommits.
* JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
* /proc/sys/vm.overcommit_memory file.
* JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
*/
#undef JEMALLOC_SYSCTL_VM_OVERCOMMIT
#undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
/* Defined if madvise(2) is available. */
#undef JEMALLOC_HAVE_MADVISE
/*
* Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
* arguments to madvise(2).
*/
#undef JEMALLOC_HAVE_MADVISE_HUGE
/*
* Methods for purging unused pages differ between operating systems.
*
* madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages,
* madvise(..., MADV_FREE) : This marks pages as being unused, such that they
* will be discarded rather than swapped out.
* madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is
* defined, this immediately discards pages,
* such that new pages will be demand-zeroed if
* the address region is later touched
.
*
madvise(..., MADV_FREE) : On FreeBSD and Da
rwi
n,
this
marks pages as being
*
unused, such that they will be discarded rat
her
*
than swapped out
.
* the address region is later touched
;
*
othe
rwi
se
this
behaves similarly to
*
MADV_FREE, though typically with hig
her
*
system overhead
.
*/
#undef JEMALLOC_PURGE_MADVISE_DONTNEED
#undef JEMALLOC_PURGE_MADVISE_FREE
#undef JEMALLOC_PURGE_MADVISE_DONTNEED
#undef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
/* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */
#undef JEMALLOC_DEFINE_MADVISE_FREE
/*
* Defined if MADV_DO[NT]DUMP is supported as an argument to madvise.
*/
#undef JEMALLOC_MADVISE_DONTDUMP
/*
* Defined if transparent huge pages (THPs) are supported via the
* MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
*/
#undef JEMALLOC_THP
/* Define if operating system has alloca.h header. */
#undef JEMALLOC_HAS_ALLOCA_H
...
...
@@ -241,6 +314,9 @@
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
#undef LG_SIZEOF_LONG
/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
#undef LG_SIZEOF_LONG_LONG
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
#undef LG_SIZEOF_INTMAX_T
...
...
@@ -250,13 +326,41 @@
/* glibc memalign hook. */
#undef JEMALLOC_GLIBC_MEMALIGN_HOOK
/* pthread support */
#undef JEMALLOC_HAVE_PTHREAD
/* dlsym() support */
#undef JEMALLOC_HAVE_DLSYM
/* Adaptive mutex support in pthreads. */
#undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
/* GNU specific sched_getcpu support */
#undef JEMALLOC_HAVE_SCHED_GETCPU
/* GNU specific sched_setaffinity support */
#undef JEMALLOC_HAVE_SCHED_SETAFFINITY
/*
* If defined, all the features necessary for background threads are present.
*/
#undef JEMALLOC_BACKGROUND_THREAD
/*
* If defined, jemalloc symbols are not exported (doesn't work when
* JEMALLOC_PREFIX is not defined).
*/
#undef JEMALLOC_EXPORT
/* config.malloc_conf options string. */
#undef JEMALLOC_CONFIG_MALLOC_CONF
/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
#undef JEMALLOC_IS_MALLOC
/*
* Defined if strerror_r returns char * if _GNU_SOURCE is defined.
*/
#undef JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h
0 → 100644
View file @
fb1f4f4e
#ifndef JEMALLOC_INTERNAL_EXTERNS_H
#define JEMALLOC_INTERNAL_EXTERNS_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/tsd_types.h"
/* TSD checks this to set thread local slow state accordingly. */
extern
bool
malloc_slow
;
/* Run-time options. */
extern
bool
opt_abort
;
extern
bool
opt_abort_conf
;
extern
const
char
*
opt_junk
;
extern
bool
opt_junk_alloc
;
extern
bool
opt_junk_free
;
extern
bool
opt_utrace
;
extern
bool
opt_xmalloc
;
extern
bool
opt_zero
;
extern
unsigned
opt_narenas
;
/* Number of CPUs. */
extern
unsigned
ncpus
;
/* Number of arenas used for automatic multiplexing of threads and arenas. */
extern
unsigned
narenas_auto
;
/*
* Arenas that are used to service external requests. Not all elements of the
* arenas array are necessarily used; arenas are created lazily as needed.
*/
extern
atomic_p_t
arenas
[];
void
*
a0malloc
(
size_t
size
);
void
a0dalloc
(
void
*
ptr
);
void
*
bootstrap_malloc
(
size_t
size
);
void
*
bootstrap_calloc
(
size_t
num
,
size_t
size
);
void
bootstrap_free
(
void
*
ptr
);
void
arena_set
(
unsigned
ind
,
arena_t
*
arena
);
unsigned
narenas_total_get
(
void
);
arena_t
*
arena_init
(
tsdn_t
*
tsdn
,
unsigned
ind
,
extent_hooks_t
*
extent_hooks
);
arena_tdata_t
*
arena_tdata_get_hard
(
tsd_t
*
tsd
,
unsigned
ind
);
arena_t
*
arena_choose_hard
(
tsd_t
*
tsd
,
bool
internal
);
void
arena_migrate
(
tsd_t
*
tsd
,
unsigned
oldind
,
unsigned
newind
);
void
iarena_cleanup
(
tsd_t
*
tsd
);
void
arena_cleanup
(
tsd_t
*
tsd
);
void
arenas_tdata_cleanup
(
tsd_t
*
tsd
);
void
jemalloc_prefork
(
void
);
void
jemalloc_postfork_parent
(
void
);
void
jemalloc_postfork_child
(
void
);
bool
malloc_initialized
(
void
);
#endif
/* JEMALLOC_INTERNAL_EXTERNS_H */
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h
0 → 100644
View file @
fb1f4f4e
#ifndef JEMALLOC_INTERNAL_INCLUDES_H
#define JEMALLOC_INTERNAL_INCLUDES_H
/*
* jemalloc can conceptually be broken into components (arena, tcache, etc.),
* but there are circular dependencies that cannot be broken without
* substantial performance degradation.
*
* Historically, we dealt with this by each header into four sections (types,
* structs, externs, and inlines), and included each header file multiple times
* in this file, picking out the portion we want on each pass using the
* following #defines:
* JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data
* types.
* JEMALLOC_H_STRUCTS : Data structures.
* JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
* JEMALLOC_H_INLINES : Inline functions.
*
* We're moving toward a world in which the dependencies are explicit; each file
* will #include the headers it depends on (rather than relying on them being
* implicitly available via this file including every header file in the
* project).
*
* We're now in an intermediate state: we've broken up the header files to avoid
* having to include each one multiple times, but have not yet moved the
* dependency information into the header files (i.e. we still rely on the
* ordering in this file to ensure all a header's dependencies are available in
* its translation unit). Each component is now broken up into multiple header
* files, corresponding to the sections above (e.g. instead of "foo.h", we now
* have "foo_types.h", "foo_structs.h", "foo_externs.h", "foo_inlines.h").
*
* Those files which have been converted to explicitly include their
* inter-component dependencies are now in the initial HERMETIC HEADERS
* section. All headers may still rely on jemalloc_preamble.h (which, by fiat,
* must be included first in every translation unit) for system headers and
* global jemalloc definitions, however.
*/
/******************************************************************************/
/* TYPES */
/******************************************************************************/
#include "jemalloc/internal/extent_types.h"
#include "jemalloc/internal/base_types.h"
#include "jemalloc/internal/arena_types.h"
#include "jemalloc/internal/tcache_types.h"
#include "jemalloc/internal/prof_types.h"
/******************************************************************************/
/* STRUCTS */
/******************************************************************************/
#include "jemalloc/internal/arena_structs_a.h"
#include "jemalloc/internal/extent_structs.h"
#include "jemalloc/internal/base_structs.h"
#include "jemalloc/internal/prof_structs.h"
#include "jemalloc/internal/arena_structs_b.h"
#include "jemalloc/internal/tcache_structs.h"
#include "jemalloc/internal/background_thread_structs.h"
/******************************************************************************/
/* EXTERNS */
/******************************************************************************/
#include "jemalloc/internal/jemalloc_internal_externs.h"
#include "jemalloc/internal/extent_externs.h"
#include "jemalloc/internal/base_externs.h"
#include "jemalloc/internal/arena_externs.h"
#include "jemalloc/internal/large_externs.h"
#include "jemalloc/internal/tcache_externs.h"
#include "jemalloc/internal/prof_externs.h"
#include "jemalloc/internal/background_thread_externs.h"
/******************************************************************************/
/* INLINES */
/******************************************************************************/
#include "jemalloc/internal/jemalloc_internal_inlines_a.h"
#include "jemalloc/internal/base_inlines.h"
/*
* Include portions of arena code interleaved with tcache code in order to
* resolve circular dependencies.
*/
#include "jemalloc/internal/prof_inlines_a.h"
#include "jemalloc/internal/arena_inlines_a.h"
#include "jemalloc/internal/extent_inlines.h"
#include "jemalloc/internal/jemalloc_internal_inlines_b.h"
#include "jemalloc/internal/tcache_inlines.h"
#include "jemalloc/internal/arena_inlines_b.h"
#include "jemalloc/internal/jemalloc_internal_inlines_c.h"
#include "jemalloc/internal/prof_inlines_b.h"
#include "jemalloc/internal/background_thread_inlines.h"
#endif
/* JEMALLOC_INTERNAL_INCLUDES_H */
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h
0 → 100644
View file @
fb1f4f4e
#ifndef JEMALLOC_INTERNAL_INLINES_A_H
#define JEMALLOC_INTERNAL_INLINES_A_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bit_util.h"
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/ticker.h"
JEMALLOC_ALWAYS_INLINE
malloc_cpuid_t
malloc_getcpu
(
void
)
{
assert
(
have_percpu_arena
);
#if defined(JEMALLOC_HAVE_SCHED_GETCPU)
return
(
malloc_cpuid_t
)
sched_getcpu
();
#else
not_reached
();
return
-
1
;
#endif
}
/* Return the chosen arena index based on current cpu. */
JEMALLOC_ALWAYS_INLINE
unsigned
percpu_arena_choose
(
void
)
{
assert
(
have_percpu_arena
&&
PERCPU_ARENA_ENABLED
(
opt_percpu_arena
));
malloc_cpuid_t
cpuid
=
malloc_getcpu
();
assert
(
cpuid
>=
0
);
unsigned
arena_ind
;
if
((
opt_percpu_arena
==
percpu_arena
)
||
((
unsigned
)
cpuid
<
ncpus
/
2
))
{
arena_ind
=
cpuid
;
}
else
{
assert
(
opt_percpu_arena
==
per_phycpu_arena
);
/* Hyper threads on the same physical CPU share arena. */
arena_ind
=
cpuid
-
ncpus
/
2
;
}
return
arena_ind
;
}
/* Return the limit of percpu auto arena range, i.e. arenas[0...ind_limit). */
JEMALLOC_ALWAYS_INLINE
unsigned
percpu_arena_ind_limit
(
percpu_arena_mode_t
mode
)
{
assert
(
have_percpu_arena
&&
PERCPU_ARENA_ENABLED
(
mode
));
if
(
mode
==
per_phycpu_arena
&&
ncpus
>
1
)
{
if
(
ncpus
%
2
)
{
/* This likely means a misconfig. */
return
ncpus
/
2
+
1
;
}
return
ncpus
/
2
;
}
else
{
return
ncpus
;
}
}
static
inline
arena_tdata_t
*
arena_tdata_get
(
tsd_t
*
tsd
,
unsigned
ind
,
bool
refresh_if_missing
)
{
arena_tdata_t
*
tdata
;
arena_tdata_t
*
arenas_tdata
=
tsd_arenas_tdata_get
(
tsd
);
if
(
unlikely
(
arenas_tdata
==
NULL
))
{
/* arenas_tdata hasn't been initialized yet. */
return
arena_tdata_get_hard
(
tsd
,
ind
);
}
if
(
unlikely
(
ind
>=
tsd_narenas_tdata_get
(
tsd
)))
{
/*
* ind is invalid, cache is old (too small), or tdata to be
* initialized.
*/
return
(
refresh_if_missing
?
arena_tdata_get_hard
(
tsd
,
ind
)
:
NULL
);
}
tdata
=
&
arenas_tdata
[
ind
];
if
(
likely
(
tdata
!=
NULL
)
||
!
refresh_if_missing
)
{
return
tdata
;
}
return
arena_tdata_get_hard
(
tsd
,
ind
);
}
static
inline
arena_t
*
arena_get
(
tsdn_t
*
tsdn
,
unsigned
ind
,
bool
init_if_missing
)
{
arena_t
*
ret
;
assert
(
ind
<
MALLOCX_ARENA_LIMIT
);
ret
=
(
arena_t
*
)
atomic_load_p
(
&
arenas
[
ind
],
ATOMIC_ACQUIRE
);
if
(
unlikely
(
ret
==
NULL
))
{
if
(
init_if_missing
)
{
ret
=
arena_init
(
tsdn
,
ind
,
(
extent_hooks_t
*
)
&
extent_hooks_default
);
}
}
return
ret
;
}
static
inline
ticker_t
*
decay_ticker_get
(
tsd_t
*
tsd
,
unsigned
ind
)
{
arena_tdata_t
*
tdata
;
tdata
=
arena_tdata_get
(
tsd
,
ind
,
true
);
if
(
unlikely
(
tdata
==
NULL
))
{
return
NULL
;
}
return
&
tdata
->
decay_ticker
;
}
JEMALLOC_ALWAYS_INLINE
cache_bin_t
*
tcache_small_bin_get
(
tcache_t
*
tcache
,
szind_t
binind
)
{
assert
(
binind
<
NBINS
);
return
&
tcache
->
bins_small
[
binind
];
}
JEMALLOC_ALWAYS_INLINE
cache_bin_t
*
tcache_large_bin_get
(
tcache_t
*
tcache
,
szind_t
binind
)
{
assert
(
binind
>=
NBINS
&&
binind
<
nhbins
);
return
&
tcache
->
bins_large
[
binind
-
NBINS
];
}
JEMALLOC_ALWAYS_INLINE
bool
tcache_available
(
tsd_t
*
tsd
)
{
/*
* Thread specific auto tcache might be unavailable if: 1) during tcache
* initialization, or 2) disabled through thread.tcache.enabled mallctl
* or config options. This check covers all cases.
*/
if
(
likely
(
tsd_tcache_enabled_get
(
tsd
)))
{
/* Associated arena == NULL implies tcache init in progress. */
assert
(
tsd_tcachep_get
(
tsd
)
->
arena
==
NULL
||
tcache_small_bin_get
(
tsd_tcachep_get
(
tsd
),
0
)
->
avail
!=
NULL
);
return
true
;
}
return
false
;
}
JEMALLOC_ALWAYS_INLINE
tcache_t
*
tcache_get
(
tsd_t
*
tsd
)
{
if
(
!
tcache_available
(
tsd
))
{
return
NULL
;
}
return
tsd_tcachep_get
(
tsd
);
}
static
inline
void
pre_reentrancy
(
tsd_t
*
tsd
,
arena_t
*
arena
)
{
/* arena is the current context. Reentry from a0 is not allowed. */
assert
(
arena
!=
arena_get
(
tsd_tsdn
(
tsd
),
0
,
false
));
bool
fast
=
tsd_fast
(
tsd
);
assert
(
tsd_reentrancy_level_get
(
tsd
)
<
INT8_MAX
);
++*
tsd_reentrancy_levelp_get
(
tsd
);
if
(
fast
)
{
/* Prepare slow path for reentrancy. */
tsd_slow_update
(
tsd
);
assert
(
tsd
->
state
==
tsd_state_nominal_slow
);
}
}
static
inline
void
post_reentrancy
(
tsd_t
*
tsd
)
{
int8_t
*
reentrancy_level
=
tsd_reentrancy_levelp_get
(
tsd
);
assert
(
*
reentrancy_level
>
0
);
if
(
--*
reentrancy_level
==
0
)
{
tsd_slow_update
(
tsd
);
}
}
#endif
/* JEMALLOC_INTERNAL_INLINES_A_H */
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h
0 → 100644
View file @
fb1f4f4e
#ifndef JEMALLOC_INTERNAL_INLINES_B_H
#define JEMALLOC_INTERNAL_INLINES_B_H
#include "jemalloc/internal/rtree.h"
/* Choose an arena based on a per-thread value. */
static
inline
arena_t
*
arena_choose_impl
(
tsd_t
*
tsd
,
arena_t
*
arena
,
bool
internal
)
{
arena_t
*
ret
;
if
(
arena
!=
NULL
)
{
return
arena
;
}
/* During reentrancy, arena 0 is the safest bet. */
if
(
unlikely
(
tsd_reentrancy_level_get
(
tsd
)
>
0
))
{
return
arena_get
(
tsd_tsdn
(
tsd
),
0
,
true
);
}
ret
=
internal
?
tsd_iarena_get
(
tsd
)
:
tsd_arena_get
(
tsd
);
if
(
unlikely
(
ret
==
NULL
))
{
ret
=
arena_choose_hard
(
tsd
,
internal
);
assert
(
ret
);
if
(
tcache_available
(
tsd
))
{
tcache_t
*
tcache
=
tcache_get
(
tsd
);
if
(
tcache
->
arena
!=
NULL
)
{
/* See comments in tcache_data_init().*/
assert
(
tcache
->
arena
==
arena_get
(
tsd_tsdn
(
tsd
),
0
,
false
));
if
(
tcache
->
arena
!=
ret
)
{
tcache_arena_reassociate
(
tsd_tsdn
(
tsd
),
tcache
,
ret
);
}
}
else
{
tcache_arena_associate
(
tsd_tsdn
(
tsd
),
tcache
,
ret
);
}
}
}
/*
* Note that for percpu arena, if the current arena is outside of the
* auto percpu arena range, (i.e. thread is assigned to a manually
* managed arena), then percpu arena is skipped.
*/
if
(
have_percpu_arena
&&
PERCPU_ARENA_ENABLED
(
opt_percpu_arena
)
&&
!
internal
&&
(
arena_ind_get
(
ret
)
<
percpu_arena_ind_limit
(
opt_percpu_arena
))
&&
(
ret
->
last_thd
!=
tsd_tsdn
(
tsd
)))
{
unsigned
ind
=
percpu_arena_choose
();
if
(
arena_ind_get
(
ret
)
!=
ind
)
{
percpu_arena_update
(
tsd
,
ind
);
ret
=
tsd_arena_get
(
tsd
);
}
ret
->
last_thd
=
tsd_tsdn
(
tsd
);
}
return
ret
;
}
static
inline
arena_t
*
arena_choose
(
tsd_t
*
tsd
,
arena_t
*
arena
)
{
return
arena_choose_impl
(
tsd
,
arena
,
false
);
}
static
inline
arena_t
*
arena_ichoose
(
tsd_t
*
tsd
,
arena_t
*
arena
)
{
return
arena_choose_impl
(
tsd
,
arena
,
true
);
}
static
inline
bool
arena_is_auto
(
arena_t
*
arena
)
{
assert
(
narenas_auto
>
0
);
return
(
arena_ind_get
(
arena
)
<
narenas_auto
);
}
JEMALLOC_ALWAYS_INLINE
extent_t
*
iealloc
(
tsdn_t
*
tsdn
,
const
void
*
ptr
)
{
rtree_ctx_t
rtree_ctx_fallback
;
rtree_ctx_t
*
rtree_ctx
=
tsdn_rtree_ctx
(
tsdn
,
&
rtree_ctx_fallback
);
return
rtree_extent_read
(
tsdn
,
&
extents_rtree
,
rtree_ctx
,
(
uintptr_t
)
ptr
,
true
);
}
#endif
/* JEMALLOC_INTERNAL_INLINES_B_H */
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h
0 → 100644
View file @
fb1f4f4e
#ifndef JEMALLOC_INTERNAL_INLINES_C_H
#define JEMALLOC_INTERNAL_INLINES_C_H
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/witness.h"
/*
* Translating the names of the 'i' functions:
* Abbreviations used in the first part of the function name (before
* alloc/dalloc) describe what that function accomplishes:
* a: arena (query)
* s: size (query, or sized deallocation)
* e: extent (query)
* p: aligned (allocates)
* vs: size (query, without knowing that the pointer is into the heap)
* r: rallocx implementation
* x: xallocx implementation
* Abbreviations used in the second part of the function name (after
* alloc/dalloc) describe the arguments it takes
* z: whether to return zeroed memory
* t: accepts a tcache_t * parameter
* m: accepts an arena_t * parameter
*/
JEMALLOC_ALWAYS_INLINE
arena_t
*
iaalloc
(
tsdn_t
*
tsdn
,
const
void
*
ptr
)
{
assert
(
ptr
!=
NULL
);
return
arena_aalloc
(
tsdn
,
ptr
);
}
JEMALLOC_ALWAYS_INLINE
size_t
isalloc
(
tsdn_t
*
tsdn
,
const
void
*
ptr
)
{
assert
(
ptr
!=
NULL
);
return
arena_salloc
(
tsdn
,
ptr
);
}
JEMALLOC_ALWAYS_INLINE
void
*
iallocztm
(
tsdn_t
*
tsdn
,
size_t
size
,
szind_t
ind
,
bool
zero
,
tcache_t
*
tcache
,
bool
is_internal
,
arena_t
*
arena
,
bool
slow_path
)
{
void
*
ret
;
assert
(
size
!=
0
);
assert
(
!
is_internal
||
tcache
==
NULL
);
assert
(
!
is_internal
||
arena
==
NULL
||
arena_is_auto
(
arena
));
if
(
!
tsdn_null
(
tsdn
)
&&
tsd_reentrancy_level_get
(
tsdn_tsd
(
tsdn
))
==
0
)
{
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
0
);
}
ret
=
arena_malloc
(
tsdn
,
arena
,
size
,
ind
,
zero
,
tcache
,
slow_path
);
if
(
config_stats
&&
is_internal
&&
likely
(
ret
!=
NULL
))
{
arena_internal_add
(
iaalloc
(
tsdn
,
ret
),
isalloc
(
tsdn
,
ret
));
}
return
ret
;
}
JEMALLOC_ALWAYS_INLINE
void
*
ialloc
(
tsd_t
*
tsd
,
size_t
size
,
szind_t
ind
,
bool
zero
,
bool
slow_path
)
{
return
iallocztm
(
tsd_tsdn
(
tsd
),
size
,
ind
,
zero
,
tcache_get
(
tsd
),
false
,
NULL
,
slow_path
);
}
JEMALLOC_ALWAYS_INLINE
void
*
ipallocztm
(
tsdn_t
*
tsdn
,
size_t
usize
,
size_t
alignment
,
bool
zero
,
tcache_t
*
tcache
,
bool
is_internal
,
arena_t
*
arena
)
{
void
*
ret
;
assert
(
usize
!=
0
);
assert
(
usize
==
sz_sa2u
(
usize
,
alignment
));
assert
(
!
is_internal
||
tcache
==
NULL
);
assert
(
!
is_internal
||
arena
==
NULL
||
arena_is_auto
(
arena
));
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
0
);
ret
=
arena_palloc
(
tsdn
,
arena
,
usize
,
alignment
,
zero
,
tcache
);
assert
(
ALIGNMENT_ADDR2BASE
(
ret
,
alignment
)
==
ret
);
if
(
config_stats
&&
is_internal
&&
likely
(
ret
!=
NULL
))
{
arena_internal_add
(
iaalloc
(
tsdn
,
ret
),
isalloc
(
tsdn
,
ret
));
}
return
ret
;
}
JEMALLOC_ALWAYS_INLINE
void
*
ipalloct
(
tsdn_t
*
tsdn
,
size_t
usize
,
size_t
alignment
,
bool
zero
,
tcache_t
*
tcache
,
arena_t
*
arena
)
{
return
ipallocztm
(
tsdn
,
usize
,
alignment
,
zero
,
tcache
,
false
,
arena
);
}
JEMALLOC_ALWAYS_INLINE
void
*
ipalloc
(
tsd_t
*
tsd
,
size_t
usize
,
size_t
alignment
,
bool
zero
)
{
return
ipallocztm
(
tsd_tsdn
(
tsd
),
usize
,
alignment
,
zero
,
tcache_get
(
tsd
),
false
,
NULL
);
}
JEMALLOC_ALWAYS_INLINE
size_t
ivsalloc
(
tsdn_t
*
tsdn
,
const
void
*
ptr
)
{
return
arena_vsalloc
(
tsdn
,
ptr
);
}
JEMALLOC_ALWAYS_INLINE
void
idalloctm
(
tsdn_t
*
tsdn
,
void
*
ptr
,
tcache_t
*
tcache
,
alloc_ctx_t
*
alloc_ctx
,
bool
is_internal
,
bool
slow_path
)
{
assert
(
ptr
!=
NULL
);
assert
(
!
is_internal
||
tcache
==
NULL
);
assert
(
!
is_internal
||
arena_is_auto
(
iaalloc
(
tsdn
,
ptr
)));
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
0
);
if
(
config_stats
&&
is_internal
)
{
arena_internal_sub
(
iaalloc
(
tsdn
,
ptr
),
isalloc
(
tsdn
,
ptr
));
}
if
(
!
is_internal
&&
!
tsdn_null
(
tsdn
)
&&
tsd_reentrancy_level_get
(
tsdn_tsd
(
tsdn
))
!=
0
)
{
assert
(
tcache
==
NULL
);
}
arena_dalloc
(
tsdn
,
ptr
,
tcache
,
alloc_ctx
,
slow_path
);
}
JEMALLOC_ALWAYS_INLINE
void
idalloc
(
tsd_t
*
tsd
,
void
*
ptr
)
{
idalloctm
(
tsd_tsdn
(
tsd
),
ptr
,
tcache_get
(
tsd
),
NULL
,
false
,
true
);
}
JEMALLOC_ALWAYS_INLINE
void
isdalloct
(
tsdn_t
*
tsdn
,
void
*
ptr
,
size_t
size
,
tcache_t
*
tcache
,
alloc_ctx_t
*
alloc_ctx
,
bool
slow_path
)
{
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
0
);
arena_sdalloc
(
tsdn
,
ptr
,
size
,
tcache
,
alloc_ctx
,
slow_path
);
}
JEMALLOC_ALWAYS_INLINE
void
*
iralloct_realign
(
tsdn_t
*
tsdn
,
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
extra
,
size_t
alignment
,
bool
zero
,
tcache_t
*
tcache
,
arena_t
*
arena
)
{
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
0
);
void
*
p
;
size_t
usize
,
copysize
;
usize
=
sz_sa2u
(
size
+
extra
,
alignment
);
if
(
unlikely
(
usize
==
0
||
usize
>
LARGE_MAXCLASS
))
{
return
NULL
;
}
p
=
ipalloct
(
tsdn
,
usize
,
alignment
,
zero
,
tcache
,
arena
);
if
(
p
==
NULL
)
{
if
(
extra
==
0
)
{
return
NULL
;
}
/* Try again, without extra this time. */
usize
=
sz_sa2u
(
size
,
alignment
);
if
(
unlikely
(
usize
==
0
||
usize
>
LARGE_MAXCLASS
))
{
return
NULL
;
}
p
=
ipalloct
(
tsdn
,
usize
,
alignment
,
zero
,
tcache
,
arena
);
if
(
p
==
NULL
)
{
return
NULL
;
}
}
/*
* Copy at most size bytes (not size+extra), since the caller has no
* expectation that the extra bytes will be reliably preserved.
*/
copysize
=
(
size
<
oldsize
)
?
size
:
oldsize
;
memcpy
(
p
,
ptr
,
copysize
);
isdalloct
(
tsdn
,
ptr
,
oldsize
,
tcache
,
NULL
,
true
);
return
p
;
}
JEMALLOC_ALWAYS_INLINE
void
*
iralloct
(
tsdn_t
*
tsdn
,
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
alignment
,
bool
zero
,
tcache_t
*
tcache
,
arena_t
*
arena
)
{
assert
(
ptr
!=
NULL
);
assert
(
size
!=
0
);
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
0
);
if
(
alignment
!=
0
&&
((
uintptr_t
)
ptr
&
((
uintptr_t
)
alignment
-
1
))
!=
0
)
{
/*
* Existing object alignment is inadequate; allocate new space
* and copy.
*/
return
iralloct_realign
(
tsdn
,
ptr
,
oldsize
,
size
,
0
,
alignment
,
zero
,
tcache
,
arena
);
}
return
arena_ralloc
(
tsdn
,
arena
,
ptr
,
oldsize
,
size
,
alignment
,
zero
,
tcache
);
}
JEMALLOC_ALWAYS_INLINE
void
*
iralloc
(
tsd_t
*
tsd
,
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
alignment
,
bool
zero
)
{
return
iralloct
(
tsd_tsdn
(
tsd
),
ptr
,
oldsize
,
size
,
alignment
,
zero
,
tcache_get
(
tsd
),
NULL
);
}
JEMALLOC_ALWAYS_INLINE
bool
ixalloc
(
tsdn_t
*
tsdn
,
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
extra
,
size_t
alignment
,
bool
zero
)
{
assert
(
ptr
!=
NULL
);
assert
(
size
!=
0
);
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
0
);
if
(
alignment
!=
0
&&
((
uintptr_t
)
ptr
&
((
uintptr_t
)
alignment
-
1
))
!=
0
)
{
/* Existing object alignment is inadequate. */
return
true
;
}
return
arena_ralloc_no_move
(
tsdn
,
ptr
,
oldsize
,
size
,
extra
,
zero
);
}
JEMALLOC_ALWAYS_INLINE
int
iget_defrag_hint
(
tsdn_t
*
tsdn
,
void
*
ptr
,
int
*
bin_util
,
int
*
run_util
)
{
int
defrag
=
0
;
rtree_ctx_t
rtree_ctx_fallback
;
rtree_ctx_t
*
rtree_ctx
=
tsdn_rtree_ctx
(
tsdn
,
&
rtree_ctx_fallback
);
szind_t
szind
;
bool
is_slab
;
rtree_szind_slab_read
(
tsdn
,
&
extents_rtree
,
rtree_ctx
,
(
uintptr_t
)
ptr
,
true
,
&
szind
,
&
is_slab
);
if
(
likely
(
is_slab
))
{
/* Small allocation. */
extent_t
*
slab
=
iealloc
(
tsdn
,
ptr
);
arena_t
*
arena
=
extent_arena_get
(
slab
);
szind_t
binind
=
extent_szind_get
(
slab
);
bin_t
*
bin
=
&
arena
->
bins
[
binind
];
malloc_mutex_lock
(
tsdn
,
&
bin
->
lock
);
/* don't bother moving allocations from the slab currently used for new allocations */
if
(
slab
!=
bin
->
slabcur
)
{
const
bin_info_t
*
bin_info
=
&
bin_infos
[
binind
];
size_t
availregs
=
bin_info
->
nregs
*
bin
->
stats
.
curslabs
;
*
bin_util
=
((
long
long
)
bin
->
stats
.
curregs
<<
16
)
/
availregs
;
*
run_util
=
((
long
long
)(
bin_info
->
nregs
-
extent_nfree_get
(
slab
))
<<
16
)
/
bin_info
->
nregs
;
defrag
=
1
;
}
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
}
return
defrag
;
}
#endif
/* JEMALLOC_INTERNAL_INLINES_C_H */
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h
View file @
fb1f4f4e
/*
* JEMALLOC_ALWAYS_INLINE and JEMALLOC_INLINE are used within header files for
* functions that are static inline functions if inlining is enabled, and
* single-definition library-private functions if inlining is disabled.
*
* JEMALLOC_ALWAYS_INLINE_C and JEMALLOC_INLINE_C are for use in .c files, in
* which case the denoted functions are always static, regardless of whether
* inlining is enabled.
*/
#if defined(JEMALLOC_DEBUG) || defined(JEMALLOC_CODE_COVERAGE)
/* Disable inlining to make debugging/profiling easier. */
# define JEMALLOC_ALWAYS_INLINE
# define JEMALLOC_ALWAYS_INLINE_C static
# define JEMALLOC_INLINE
# define JEMALLOC_INLINE_C static
# define inline
#else
# define JEMALLOC_ENABLE_INLINE
# ifdef JEMALLOC_HAVE_ATTR
# define JEMALLOC_ALWAYS_INLINE \
static inline JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline)
# define JEMALLOC_ALWAYS_INLINE_C \
static inline JEMALLOC_ATTR(always_inline)
# else
# define JEMALLOC_ALWAYS_INLINE static inline
# define JEMALLOC_ALWAYS_INLINE_C static inline
# endif
# define JEMALLOC_INLINE static inline
# define JEMALLOC_INLINE_C static inline
# ifdef _MSC_VER
# define inline _inline
# endif
#endif
#ifndef JEMALLOC_INTERNAL_MACROS_H
#define JEMALLOC_INTERNAL_MACROS_H
#ifdef JEMALLOC_
CC_SILENCE
# define
UNUSED
JEMALLOC_A
TTR(unused)
#ifdef JEMALLOC_
DEBUG
# define JEMALLOC_A
LWAYS_INLINE static inline
#else
# define UNUSED
# define JEMALLOC_ALWAYS_INLINE JEMALLOC_ATTR(always_inline) static inline
#endif
#ifdef _MSC_VER
# define inline _inline
#endif
#define ZU(z) ((size_t)z)
#define ZI(z) ((ssize_t)z)
#define QU(q) ((uint64_t)q)
#define QI(q) ((int64_t)q)
#define UNUSED JEMALLOC_ATTR(unused)
#define KZU(z) ZU(z##ULL)
#define KZI(z) ZI(z##LL)
#define KQU(q) QU(q##ULL)
#define KQI(q) QI(q##LL)
#define ZU(z) ((size_t)z)
#define ZD(z) ((ssize_t)z)
#define QU(q) ((uint64_t)q)
#define QD(q) ((int64_t)q)
#define KZU(z) ZU(z##ULL)
#define KZD(z) ZD(z##LL)
#define KQU(q) QU(q##ULL)
#define KQD(q) QI(q##LL)
#ifndef __DECONST
# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
#endif
#if
n
def
JEMALLOC_HAS_RESTRICT
#if
!
def
ined(
JEMALLOC_HAS_RESTRICT
) || defined(__cplusplus)
# define restrict
#endif
/* Various function pointers are statick and immutable except during testing. */
#ifdef JEMALLOC_JET
# define JET_MUTABLE
#else
# define JET_MUTABLE const
#endif
#define JEMALLOC_VA_ARGS_HEAD(head, ...) head
#define JEMALLOC_VA_ARGS_TAIL(head, ...) __VA_ARGS__
#endif
/* JEMALLOC_INTERNAL_MACROS_H */
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h
0 → 100644
View file @
fb1f4f4e
#ifndef JEMALLOC_INTERNAL_TYPES_H
#define JEMALLOC_INTERNAL_TYPES_H
/* Page size index type. */
typedef
unsigned
pszind_t
;
/* Size class index type. */
typedef
unsigned
szind_t
;
/* Processor / core id type. */
typedef
int
malloc_cpuid_t
;
/*
* Flags bits:
*
* a: arena
* t: tcache
* 0: unused
* z: zero
* n: alignment
*
* aaaaaaaa aaaatttt tttttttt 0znnnnnn
*/
#define MALLOCX_ARENA_BITS 12
#define MALLOCX_TCACHE_BITS 12
#define MALLOCX_LG_ALIGN_BITS 6
#define MALLOCX_ARENA_SHIFT 20
#define MALLOCX_TCACHE_SHIFT 8
#define MALLOCX_ARENA_MASK \
(((1 << MALLOCX_ARENA_BITS) - 1) << MALLOCX_ARENA_SHIFT)
/* NB: Arena index bias decreases the maximum number of arenas by 1. */
#define MALLOCX_ARENA_LIMIT ((1 << MALLOCX_ARENA_BITS) - 1)
#define MALLOCX_TCACHE_MASK \
(((1 << MALLOCX_TCACHE_BITS) - 1) << MALLOCX_TCACHE_SHIFT)
#define MALLOCX_TCACHE_MAX ((1 << MALLOCX_TCACHE_BITS) - 3)
#define MALLOCX_LG_ALIGN_MASK ((1 << MALLOCX_LG_ALIGN_BITS) - 1)
/* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */
#define MALLOCX_ALIGN_GET_SPECIFIED(flags) \
(ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK))
#define MALLOCX_ALIGN_GET(flags) \
(MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1))
#define MALLOCX_ZERO_GET(flags) \
((bool)(flags & MALLOCX_ZERO))
#define MALLOCX_TCACHE_GET(flags) \
(((unsigned)((flags & MALLOCX_TCACHE_MASK) >> MALLOCX_TCACHE_SHIFT)) - 2)
#define MALLOCX_ARENA_GET(flags) \
(((unsigned)(((unsigned)flags) >> MALLOCX_ARENA_SHIFT)) - 1)
/* Smallest size class to support. */
#define TINY_MIN (1U << LG_TINY_MIN)
/*
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
* classes).
*/
#ifndef LG_QUANTUM
# if (defined(__i386__) || defined(_M_IX86))
# define LG_QUANTUM 4
# endif
# ifdef __ia64__
# define LG_QUANTUM 4
# endif
# ifdef __alpha__
# define LG_QUANTUM 4
# endif
# if (defined(__sparc64__) || defined(__sparcv9) || defined(__sparc_v9__))
# define LG_QUANTUM 4
# endif
# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
# define LG_QUANTUM 4
# endif
# ifdef __arm__
# define LG_QUANTUM 3
# endif
# ifdef __aarch64__
# define LG_QUANTUM 4
# endif
# ifdef __hppa__
# define LG_QUANTUM 4
# endif
# ifdef __m68k__
# define LG_QUANTUM 3
# endif
# ifdef __mips__
# define LG_QUANTUM 3
# endif
# ifdef __nios2__
# define LG_QUANTUM 3
# endif
# ifdef __or1k__
# define LG_QUANTUM 3
# endif
# ifdef __powerpc__
# define LG_QUANTUM 4
# endif
# if defined(__riscv) || defined(__riscv__)
# define LG_QUANTUM 4
# endif
# ifdef __s390__
# define LG_QUANTUM 4
# endif
# if (defined (__SH3E__) || defined(__SH4_SINGLE__) || defined(__SH4__) || \
defined(__SH4_SINGLE_ONLY__))
# define LG_QUANTUM 4
# endif
# ifdef __tile__
# define LG_QUANTUM 4
# endif
# ifdef __le32__
# define LG_QUANTUM 4
# endif
# ifndef LG_QUANTUM
# error "Unknown minimum alignment for architecture; specify via "
"--with-lg-quantum"
# endif
#endif
#define QUANTUM ((size_t)(1U << LG_QUANTUM))
#define QUANTUM_MASK (QUANTUM - 1)
/* Return the smallest quantum multiple that is >= a. */
#define QUANTUM_CEILING(a) \
(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
#define LONG ((size_t)(1U << LG_SIZEOF_LONG))
#define LONG_MASK (LONG - 1)
/* Return the smallest long multiple that is >= a. */
#define LONG_CEILING(a) \
(((a) + LONG_MASK) & ~LONG_MASK)
#define SIZEOF_PTR (1U << LG_SIZEOF_PTR)
#define PTR_MASK (SIZEOF_PTR - 1)
/* Return the smallest (void *) multiple that is >= a. */
#define PTR_CEILING(a) \
(((a) + PTR_MASK) & ~PTR_MASK)
/*
* Maximum size of L1 cache line. This is used to avoid cache line aliasing.
* In addition, this controls the spacing of cacheline-spaced size classes.
*
* CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
* only handle raw constants.
*/
#define LG_CACHELINE 6
#define CACHELINE 64
#define CACHELINE_MASK (CACHELINE - 1)
/* Return the smallest cacheline multiple that is >= s. */
#define CACHELINE_CEILING(s) \
(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
/* Return the nearest aligned address at or below a. */
#define ALIGNMENT_ADDR2BASE(a, alignment) \
((void *)((uintptr_t)(a) & ((~(alignment)) + 1)))
/* Return the offset between a and the nearest aligned address at or below a. */
#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
((size_t)((uintptr_t)(a) & (alignment - 1)))
/* Return the smallest alignment multiple that is >= s. */
#define ALIGNMENT_CEILING(s, alignment) \
(((s) + (alignment - 1)) & ((~(alignment)) + 1))
/* Declare a variable-length array. */
#if __STDC_VERSION__ < 199901L
# ifdef _MSC_VER
# include <malloc.h>
# define alloca _alloca
# else
# ifdef JEMALLOC_HAS_ALLOCA_H
# include <alloca.h>
# else
# include <stdlib.h>
# endif
# endif
# define VARIABLE_ARRAY(type, name, count) \
type *name = alloca(sizeof(type) * (count))
#else
# define VARIABLE_ARRAY(type, name, count) type name[(count)]
#endif
#endif
/* JEMALLOC_INTERNAL_TYPES_H */
deps/jemalloc/include/jemalloc/internal/jemalloc_preamble.h.in
0 → 100644
View file @
fb1f4f4e
#ifndef JEMALLOC_PREAMBLE_H
#define JEMALLOC_PREAMBLE_H
#include "jemalloc_internal_defs.h"
#include "jemalloc/internal/jemalloc_internal_decls.h"
#ifdef JEMALLOC_UTRACE
#include <sys/ktrace.h>
#endif
#define JEMALLOC_NO_DEMANGLE
#ifdef JEMALLOC_JET
# undef JEMALLOC_IS_MALLOC
# define JEMALLOC_N(n) jet_##n
# include "jemalloc/internal/public_namespace.h"
# define JEMALLOC_NO_RENAME
# include "../jemalloc@install_suffix@.h"
# undef JEMALLOC_NO_RENAME
#else
# define JEMALLOC_N(n) @private_namespace@##n
# include "../jemalloc@install_suffix@.h"
#endif
#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
#include <libkern/OSAtomic.h>
#endif
#ifdef JEMALLOC_ZONE
#include <mach/mach_error.h>
#include <mach/mach_init.h>
#include <mach/vm_map.h>
#endif
#include "jemalloc/internal/jemalloc_internal_macros.h"
/*
* Note that the ordering matters here; the hook itself is name-mangled. We
* want the inclusion of hooks to happen early, so that we hook as much as
* possible.
*/
#ifndef JEMALLOC_NO_PRIVATE_NAMESPACE
# ifndef JEMALLOC_JET
# include "jemalloc/internal/private_namespace.h"
# else
# include "jemalloc/internal/private_namespace_jet.h"
# endif
#endif
#include "jemalloc/internal/hooks.h"
#ifdef JEMALLOC_DEFINE_MADVISE_FREE
# define JEMALLOC_MADV_FREE 8
#endif
static const bool config_debug =
#ifdef JEMALLOC_DEBUG
true
#else
false
#endif
;
static const bool have_dss =
#ifdef JEMALLOC_DSS
true
#else
false
#endif
;
static const bool have_madvise_huge =
#ifdef JEMALLOC_HAVE_MADVISE_HUGE
true
#else
false
#endif
;
static const bool config_fill =
#ifdef JEMALLOC_FILL
true
#else
false
#endif
;
static const bool config_lazy_lock =
#ifdef JEMALLOC_LAZY_LOCK
true
#else
false
#endif
;
static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF;
static const bool config_prof =
#ifdef JEMALLOC_PROF
true
#else
false
#endif
;
static const bool config_prof_libgcc =
#ifdef JEMALLOC_PROF_LIBGCC
true
#else
false
#endif
;
static const bool config_prof_libunwind =
#ifdef JEMALLOC_PROF_LIBUNWIND
true
#else
false
#endif
;
static const bool maps_coalesce =
#ifdef JEMALLOC_MAPS_COALESCE
true
#else
false
#endif
;
static const bool config_stats =
#ifdef JEMALLOC_STATS
true
#else
false
#endif
;
static const bool config_tls =
#ifdef JEMALLOC_TLS
true
#else
false
#endif
;
static const bool config_utrace =
#ifdef JEMALLOC_UTRACE
true
#else
false
#endif
;
static const bool config_xmalloc =
#ifdef JEMALLOC_XMALLOC
true
#else
false
#endif
;
static const bool config_cache_oblivious =
#ifdef JEMALLOC_CACHE_OBLIVIOUS
true
#else
false
#endif
;
/*
* Undocumented, for jemalloc development use only at the moment. See the note
* in jemalloc/internal/log.h.
*/
static const bool config_log =
#ifdef JEMALLOC_LOG
true
#else
false
#endif
;
#ifdef JEMALLOC_HAVE_SCHED_GETCPU
/* Currently percpu_arena depends on sched_getcpu. */
#define JEMALLOC_PERCPU_ARENA
#endif
static const bool have_percpu_arena =
#ifdef JEMALLOC_PERCPU_ARENA
true
#else
false
#endif
;
/*
* Undocumented, and not recommended; the application should take full
* responsibility for tracking provenance.
*/
static const bool force_ivsalloc =
#ifdef JEMALLOC_FORCE_IVSALLOC
true
#else
false
#endif
;
static const bool have_background_thread =
#ifdef JEMALLOC_BACKGROUND_THREAD
true
#else
false
#endif
;
#endif /* JEMALLOC_PREAMBLE_H */
deps/jemalloc/include/jemalloc/internal/large_externs.h
0 → 100644
View file @
fb1f4f4e
#ifndef JEMALLOC_INTERNAL_LARGE_EXTERNS_H
#define JEMALLOC_INTERNAL_LARGE_EXTERNS_H
void
*
large_malloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
usize
,
bool
zero
);
void
*
large_palloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
usize
,
size_t
alignment
,
bool
zero
);
bool
large_ralloc_no_move
(
tsdn_t
*
tsdn
,
extent_t
*
extent
,
size_t
usize_min
,
size_t
usize_max
,
bool
zero
);
void
*
large_ralloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_t
*
extent
,
size_t
usize
,
size_t
alignment
,
bool
zero
,
tcache_t
*
tcache
);
typedef
void
(
large_dalloc_junk_t
)(
void
*
,
size_t
);
extern
large_dalloc_junk_t
*
JET_MUTABLE
large_dalloc_junk
;
typedef
void
(
large_dalloc_maybe_junk_t
)(
void
*
,
size_t
);
extern
large_dalloc_maybe_junk_t
*
JET_MUTABLE
large_dalloc_maybe_junk
;
void
large_dalloc_prep_junked_locked
(
tsdn_t
*
tsdn
,
extent_t
*
extent
);
void
large_dalloc_finish
(
tsdn_t
*
tsdn
,
extent_t
*
extent
);
void
large_dalloc
(
tsdn_t
*
tsdn
,
extent_t
*
extent
);
size_t
large_salloc
(
tsdn_t
*
tsdn
,
const
extent_t
*
extent
);
prof_tctx_t
*
large_prof_tctx_get
(
tsdn_t
*
tsdn
,
const
extent_t
*
extent
);
void
large_prof_tctx_set
(
tsdn_t
*
tsdn
,
extent_t
*
extent
,
prof_tctx_t
*
tctx
);
void
large_prof_tctx_reset
(
tsdn_t
*
tsdn
,
extent_t
*
extent
);
#endif
/* JEMALLOC_INTERNAL_LARGE_EXTERNS_H */
Prev
1
2
3
4
5
6
7
8
9
10
11
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment