Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
1f72ec7d
Commit
1f72ec7d
authored
Feb 10, 2017
by
flowly
Committed by
GitHub
Feb 10, 2017
Browse files
Merge pull request #1 from antirez/unstable
update to upstream
parents
dfc98dcc
f917e0da
Changes
150
Hide whitespace changes
Inline
Side-by-side
Too many changes to show.
To preserve performance only
150 of 150+
files are displayed.
Plain diff
Email patch
deps/jemalloc/src/arena.c
View file @
1f72ec7d
Changes suppressed. Click to show.
...
...
@@ -4,1020 +4,2020 @@
/******************************************************************************/
/* Data. */
purge_mode_t
opt_purge
=
PURGE_DEFAULT
;
const
char
*
purge_mode_names
[]
=
{
"ratio"
,
"decay"
,
"N/A"
};
ssize_t
opt_lg_dirty_mult
=
LG_DIRTY_MULT_DEFAULT
;
static
ssize_t
lg_dirty_mult_default
;
ssize_t
opt_decay_time
=
DECAY_TIME_DEFAULT
;
static
ssize_t
decay_time_default
;
arena_bin_info_t
arena_bin_info
[
NBINS
];
JEMALLOC_ALIGNED
(
CACHELINE
)
const
uint8_t
small_size2bin
[]
=
{
#define S2B_8(i) i,
#define S2B_16(i) S2B_8(i) S2B_8(i)
#define S2B_32(i) S2B_16(i) S2B_16(i)
#define S2B_64(i) S2B_32(i) S2B_32(i)
#define S2B_128(i) S2B_64(i) S2B_64(i)
#define S2B_256(i) S2B_128(i) S2B_128(i)
#define S2B_512(i) S2B_256(i) S2B_256(i)
#define S2B_1024(i) S2B_512(i) S2B_512(i)
#define S2B_2048(i) S2B_1024(i) S2B_1024(i)
#define S2B_4096(i) S2B_2048(i) S2B_2048(i)
#define S2B_8192(i) S2B_4096(i) S2B_4096(i)
#define SIZE_CLASS(bin, delta, size) \
S2B_##delta(bin)
SIZE_CLASSES
#undef S2B_8
#undef S2B_16
#undef S2B_32
#undef S2B_64
#undef S2B_128
#undef S2B_256
#undef S2B_512
#undef S2B_1024
#undef S2B_2048
#undef S2B_4096
#undef S2B_8192
#undef SIZE_CLASS
};
size_t
map_bias
;
size_t
map_misc_offset
;
size_t
arena_maxrun
;
/* Max run size for arenas. */
size_t
large_maxclass
;
/* Max large size class. */
unsigned
nlclasses
;
/* Number of large size classes. */
unsigned
nhclasses
;
/* Number of huge size classes. */
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static
void
arena_avail_insert
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
size_t
pageind
,
size_t
npages
,
bool
maybe_adjac_pred
,
bool
maybe_adjac_succ
);
static
void
arena_avail_remove
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
size_t
pageind
,
size_t
npages
,
bool
maybe_adjac_pred
,
bool
maybe_adjac_succ
);
static
void
arena_run_split
(
arena_t
*
arena
,
arena_run_t
*
run
,
size_t
size
,
bool
large
,
size_t
binind
,
bool
zero
);
static
arena_chunk_t
*
arena_chunk_alloc
(
arena_t
*
arena
);
static
void
arena_chunk_dealloc
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
);
static
arena_run_t
*
arena_run_alloc_helper
(
arena_t
*
arena
,
size_t
size
,
bool
large
,
size_t
binind
,
bool
zero
);
static
arena_run_t
*
arena_run_alloc
(
arena_t
*
arena
,
size_t
size
,
bool
large
,
size_t
binind
,
bool
zero
);
static
arena_chunk_t
*
chunks_dirty_iter_cb
(
arena_chunk_tree_t
*
tree
,
arena_chunk_t
*
chunk
,
void
*
arg
);
static
void
arena_purge
(
arena_t
*
arena
,
bool
all
);
static
void
arena_run_dalloc
(
arena_t
*
arena
,
arena_run_t
*
run
,
bool
dirty
,
bool
cleaned
);
static
void
arena_run_trim_head
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
arena_run_t
*
run
,
size_t
oldsize
,
size_t
newsize
);
static
void
arena_run_trim_tail
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
arena_run_t
*
run
,
size_t
oldsize
,
size_t
newsize
,
bool
dirty
);
static
arena_run_t
*
arena_bin_runs_first
(
arena_bin_t
*
bin
);
static
void
arena_bin_runs_insert
(
arena_bin_t
*
bin
,
arena_run_t
*
run
);
static
void
arena_bin_runs_remove
(
arena_bin_t
*
bin
,
arena_run_t
*
run
);
static
arena_run_t
*
arena_bin_nonfull_run_tryget
(
arena_bin_t
*
bin
);
static
arena_run_t
*
arena_bin_nonfull_run_get
(
arena_t
*
arena
,
arena_bin_t
*
bin
);
static
void
*
arena_bin_malloc_hard
(
arena_t
*
arena
,
arena_bin_t
*
bin
);
static
void
arena_dissociate_bin_run
(
arena_chunk_t
*
chunk
,
arena_run_t
*
run
,
/*
* Function prototypes for static functions that are referenced prior to
* definition.
*/
static
void
arena_chunk_dalloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_chunk_t
*
chunk
);
static
void
arena_purge_to_limit
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
ndirty_limit
);
static
void
arena_run_dalloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_run_t
*
run
,
bool
dirty
,
bool
cleaned
,
bool
decommitted
);
static
void
arena_dalloc_bin_run
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
arena_run_t
*
run
,
arena_bin_t
*
bin
);
static
void
arena_bin_lower_run
(
arena_t
*
arena
,
arena_run_t
*
run
,
arena_bin_t
*
bin
);
static
void
arena_dalloc_bin_run
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
arena_run_t
*
run
,
arena_bin_t
*
bin
);
static
void
arena_bin_lower_run
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
arena_run_t
*
run
,
arena_bin_t
*
bin
);
static
void
arena_ralloc_large_shrink
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
void
*
ptr
,
size_t
oldsize
,
size_t
size
);
static
bool
arena_ralloc_large_grow
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
extra
,
bool
zero
);
static
bool
arena_ralloc_large
(
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
extra
,
bool
zero
);
static
size_t
bin_info_run_size_calc
(
arena_bin_info_t
*
bin_info
,
size_t
min_run_size
);
static
void
bin_info_init
(
void
);
/******************************************************************************/
static
inline
in
t
arena_
run_comp
(
arena_chunk_map_t
*
a
,
arena_chunk_map_
t
*
b
)
JEMALLOC_INLINE_C
size_
t
arena_
miscelm_size_get
(
const
arena_chunk_map_
misc_t
*
miscelm
)
{
uintptr_t
a_mapelm
=
(
uintptr_t
)
a
;
uintptr_t
b_mapelm
=
(
uintptr_t
)
b
;
assert
(
a
!=
NULL
);
assert
(
b
!=
NULL
);
arena_chunk_t
*
chunk
;
size_t
pageind
,
mapbits
;
return
((
a_mapelm
>
b_mapelm
)
-
(
a_mapelm
<
b_mapelm
));
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
miscelm
);
pageind
=
arena_miscelm_to_pageind
(
miscelm
);
mapbits
=
arena_mapbits_get
(
chunk
,
pageind
);
return
(
arena_mapbits_size_decode
(
mapbits
));
}
/* Generate red-black tree functions. */
rb_gen
(
static
UNUSED
,
arena_run_tree_
,
arena_run_tree_t
,
arena_chunk_map_t
,
u
.
rb_link
,
arena_run_comp
)
static
inline
int
arena_avail_comp
(
arena_chunk_map_t
*
a
,
arena_chunk_map_t
*
b
)
JEMALLOC_INLINE_C
const
extent_node_t
*
arena_miscelm_extent_get
(
const
arena_chunk_map_misc_t
*
miscelm
)
{
int
ret
;
size_t
a_size
=
a
->
bits
&
~
PAGE_MASK
;
size_t
b_size
=
b
->
bits
&
~
PAGE_MASK
;
arena_chunk_t
*
chunk
;
ret
=
(
a_size
>
b_size
)
-
(
a_size
<
b_size
);
if
(
ret
==
0
)
{
uintptr_t
a_mapelm
,
b_mapelm
;
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
miscelm
);
return
(
&
chunk
->
node
);
}
if
((
a
->
bits
&
CHUNK_MAP_KEY
)
!=
CHUNK_MAP_KEY
)
a_mapelm
=
(
uintptr_t
)
a
;
else
{
/*
* Treat keys as though they are lower than anything
* else.
*/
a_mapelm
=
0
;
}
b_mapelm
=
(
uintptr_t
)
b
;
JEMALLOC_INLINE_C
int
arena_sn_comp
(
const
arena_chunk_map_misc_t
*
a
,
const
arena_chunk_map_misc_t
*
b
)
{
size_t
a_sn
,
b_sn
;
ret
=
(
a_mapelm
>
b_mapelm
)
-
(
a_mapelm
<
b_mapelm
);
}
assert
(
a
!=
NULL
);
assert
(
b
!=
NULL
);
return
(
ret
);
}
a_sn
=
extent_node_sn_get
(
arena_miscelm_extent_get
(
a
)
);
b_sn
=
extent_node_sn_get
(
arena_miscelm_extent_get
(
b
));
/* Generate red-black tree functions. */
rb_gen
(
static
UNUSED
,
arena_avail_tree_
,
arena_avail_tree_t
,
arena_chunk_map_t
,
u
.
rb_link
,
arena_avail_comp
)
return
((
a_sn
>
b_sn
)
-
(
a_sn
<
b_sn
));
}
static
inline
int
arena_chunk_dirty_comp
(
arena_chunk_t
*
a
,
arena_chunk_t
*
b
)
JEMALLOC_INLINE_C
int
arena_ad_comp
(
const
arena_chunk_map_misc_t
*
a
,
const
arena_chunk_map_misc_t
*
b
)
{
uintptr_t
a_miscelm
=
(
uintptr_t
)
a
;
uintptr_t
b_miscelm
=
(
uintptr_t
)
b
;
assert
(
a
!=
NULL
);
assert
(
b
!=
NULL
);
/*
* Short-circuit for self comparison. The following comparison code
* would come to the same result, but at the cost of executing the slow
* path.
*/
if
(
a
==
b
)
return
(
0
);
return
((
a_miscelm
>
b_miscelm
)
-
(
a_miscelm
<
b_miscelm
));
}
/*
* Order such that chunks with higher fragmentation are "less than"
* those with lower fragmentation -- purging order is from "least" to
* "greatest". Fragmentation is measured as:
*
* mean current avail run size
* --------------------------------
* mean defragmented avail run size
*
* navail
* -----------
* nruns_avail nruns_avail-nruns_adjac
* = ========================= = -----------------------
* navail nruns_avail
* -----------------------
* nruns_avail-nruns_adjac
*
* The following code multiplies away the denominator prior to
* comparison, in order to avoid division.
*
*/
{
size_t
a_val
=
(
a
->
nruns_avail
-
a
->
nruns_adjac
)
*
b
->
nruns_avail
;
size_t
b_val
=
(
b
->
nruns_avail
-
b
->
nruns_adjac
)
*
a
->
nruns_avail
;
JEMALLOC_INLINE_C
int
arena_snad_comp
(
const
arena_chunk_map_misc_t
*
a
,
const
arena_chunk_map_misc_t
*
b
)
{
int
ret
;
if
(
a_val
<
b_val
)
return
(
1
);
if
(
a_val
>
b_val
)
return
(
-
1
);
}
/*
* Break ties by chunk address. For fragmented chunks, report lower
* addresses as "lower", so that fragmentation reduction happens first
* at lower addresses. However, use the opposite ordering for
* unfragmented chunks, in order to increase the chances of
* re-allocating dirty runs.
*/
{
uintptr_t
a_chunk
=
(
uintptr_t
)
a
;
uintptr_t
b_chunk
=
(
uintptr_t
)
b
;
int
ret
=
((
a_chunk
>
b_chunk
)
-
(
a_chunk
<
b_chunk
));
if
(
a
->
nruns_adjac
==
0
)
{
assert
(
b
->
nruns_adjac
==
0
);
ret
=
-
ret
;
}
assert
(
a
!=
NULL
);
assert
(
b
!=
NULL
);
ret
=
arena_sn_comp
(
a
,
b
);
if
(
ret
!=
0
)
return
(
ret
);
}
ret
=
arena_ad_comp
(
a
,
b
);
return
(
ret
);
}
/* Generate
red-black tree
functions. */
rb
_gen
(
static
UNUSED
,
arena_
chunk_dirty_
,
arena_chunk_tree
_t
,
arena_chunk_t
,
dirty
_link
,
arena_
chunk_dirty
_comp
)
/* Generate
pairing heap
functions. */
ph
_gen
(
static
UNUSED
,
arena_
run_heap_
,
arena_run_heap
_t
,
arena_chunk_
map_misc_
t
,
ph
_link
,
arena_
snad
_comp
)
static
inline
bool
arena_avail_adjac_pred
(
arena_chunk_t
*
chunk
,
size_t
pageind
)
#ifdef JEMALLOC_JET
#undef run_quantize_floor
#define run_quantize_floor JEMALLOC_N(n_run_quantize_floor)
#endif
static
size_t
run_quantize_floor
(
size_t
size
)
{
bool
ret
;
size_t
ret
;
pszind_t
pind
;
if
(
pageind
-
1
<
map_bias
)
ret
=
false
;
else
{
ret
=
(
arena_mapbits_allocated_get
(
chunk
,
pageind
-
1
)
==
0
);
assert
(
ret
==
false
||
arena_mapbits_dirty_get
(
chunk
,
pageind
-
1
)
!=
arena_mapbits_dirty_get
(
chunk
,
pageind
));
assert
(
size
>
0
);
assert
(
size
<=
HUGE_MAXCLASS
);
assert
((
size
&
PAGE_MASK
)
==
0
);
assert
(
size
!=
0
);
assert
(
size
==
PAGE_CEILING
(
size
));
pind
=
psz2ind
(
size
-
large_pad
+
1
);
if
(
pind
==
0
)
{
/*
* Avoid underflow. This short-circuit would also do the right
* thing for all sizes in the range for which there are
* PAGE-spaced size classes, but it's simplest to just handle
* the one case that would cause erroneous results.
*/
return
(
size
);
}
ret
=
pind2sz
(
pind
-
1
)
+
large_pad
;
assert
(
ret
<=
size
);
return
(
ret
);
}
static
inline
bool
arena_avail_adjac_succ
(
arena_chunk_t
*
chunk
,
size_t
pageind
,
size_t
npages
)
#ifdef JEMALLOC_JET
#undef run_quantize_floor
#define run_quantize_floor JEMALLOC_N(run_quantize_floor)
run_quantize_t
*
run_quantize_floor
=
JEMALLOC_N
(
n_run_quantize_floor
);
#endif
#ifdef JEMALLOC_JET
#undef run_quantize_ceil
#define run_quantize_ceil JEMALLOC_N(n_run_quantize_ceil)
#endif
static
size_t
run_quantize_ceil
(
size_t
size
)
{
bool
ret
;
size_t
ret
;
if
(
pageind
+
npages
==
chunk_npages
)
ret
=
false
;
else
{
assert
(
pageind
+
npages
<
chunk_npages
);
ret
=
(
arena_mapbits_allocated_get
(
chunk
,
pageind
+
npages
)
==
0
);
assert
(
ret
==
false
||
arena_mapbits_dirty_get
(
chunk
,
pageind
)
!=
arena_mapbits_dirty_get
(
chunk
,
pageind
+
npages
));
assert
(
size
>
0
);
assert
(
size
<=
HUGE_MAXCLASS
);
assert
((
size
&
PAGE_MASK
)
==
0
);
ret
=
run_quantize_floor
(
size
);
if
(
ret
<
size
)
{
/*
* Skip a quantization that may have an adequately large run,
* because under-sized runs may be mixed in. This only happens
* when an unusual size is requested, i.e. for aligned
* allocation, and is just one of several places where linear
* search would potentially find sufficiently aligned available
* memory somewhere lower.
*/
ret
=
pind2sz
(
psz2ind
(
ret
-
large_pad
+
1
))
+
large_pad
;
}
return
(
ret
);
}
#ifdef JEMALLOC_JET
#undef run_quantize_ceil
#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil)
run_quantize_t
*
run_quantize_ceil
=
JEMALLOC_N
(
n_run_quantize_ceil
);
#endif
static
inline
bool
arena_avail_adjac
(
arena_chunk_t
*
chunk
,
size_t
pageind
,
size_t
npages
)
static
void
arena_avail_insert
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
size_t
pageind
,
size_t
npages
)
{
return
(
arena_avail_adjac_pred
(
chunk
,
pageind
)
||
arena_avail_adjac_succ
(
chunk
,
pageind
,
npages
));
pszind_t
pind
=
psz2ind
(
run_quantize_floor
(
arena_miscelm_size_get
(
arena_miscelm_get_const
(
chunk
,
pageind
))));
assert
(
npages
==
(
arena_mapbits_unallocated_size_get
(
chunk
,
pageind
)
>>
LG_PAGE
));
assert
((
npages
<<
LG_PAGE
)
<
chunksize
);
assert
(
pind2sz
(
pind
)
<=
chunksize
);
arena_run_heap_insert
(
&
arena
->
runs_avail
[
pind
],
arena_miscelm_get_mutable
(
chunk
,
pageind
));
}
static
void
arena_avail_
insert
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
size_t
pageind
,
size_t
npages
,
bool
maybe_adjac_pred
,
bool
maybe_adjac_succ
)
arena_avail_
remove
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
size_t
pageind
,
size_t
npages
)
{
pszind_t
pind
=
psz2ind
(
run_quantize_floor
(
arena_miscelm_size_get
(
arena_miscelm_get_const
(
chunk
,
pageind
))));
assert
(
npages
==
(
arena_mapbits_unallocated_size_get
(
chunk
,
pageind
)
>>
LG_PAGE
));
assert
((
npages
<<
LG_PAGE
)
<
chunksize
);
assert
(
pind2sz
(
pind
)
<=
chunksize
);
arena_run_heap_remove
(
&
arena
->
runs_avail
[
pind
],
arena_miscelm_get_mutable
(
chunk
,
pageind
));
}
/*
* chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be
* removed and reinserted even if the run to be inserted is clean.
*/
if
(
chunk
->
ndirty
!=
0
)
arena_chunk_dirty_remove
(
&
arena
->
chunks_dirty
,
chunk
);
if
(
maybe_adjac_pred
&&
arena_avail_adjac_pred
(
chunk
,
pageind
))
chunk
->
nruns_adjac
++
;
if
(
maybe_adjac_succ
&&
arena_avail_adjac_succ
(
chunk
,
pageind
,
npages
))
chunk
->
nruns_adjac
++
;
chunk
->
nruns_avail
++
;
assert
(
chunk
->
nruns_avail
>
chunk
->
nruns_adjac
);
static
void
arena_run_dirty_insert
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
size_t
pageind
,
size_t
npages
)
{
arena_chunk_map_misc_t
*
miscelm
=
arena_miscelm_get_mutable
(
chunk
,
pageind
);
if
(
arena_mapbits_dirty_get
(
chunk
,
pageind
)
!=
0
)
{
arena
->
ndirty
+=
npages
;
chunk
->
ndirty
+=
npages
;
}
if
(
chunk
->
ndirty
!=
0
)
arena_chunk_dirty_insert
(
&
arena
->
chunks_dirty
,
chunk
);
assert
(
npages
==
(
arena_mapbits_unallocated_size_get
(
chunk
,
pageind
)
>>
LG_PAGE
));
assert
(
arena_mapbits_dirty_get
(
chunk
,
pageind
)
==
CHUNK_MAP_DIRTY
);
assert
(
arena_mapbits_dirty_get
(
chunk
,
pageind
+
npages
-
1
)
==
CHUNK_MAP_DIRTY
);
arena_avail_tree_insert
(
&
arena
->
runs_avail
,
arena_mapp_get
(
chunk
,
pageind
));
qr_new
(
&
miscelm
->
rd
,
rd_link
);
qr_meld
(
&
arena
->
runs_dirty
,
&
miscelm
->
rd
,
rd_link
);
arena
->
ndirty
+=
npages
;
}
static
void
arena_
avail
_remove
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
size_t
pageind
,
size_t
npages
,
bool
maybe_adjac_pred
,
bool
maybe_adjac_succ
)
arena_
run_dirty
_remove
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
size_t
pageind
,
size_t
npages
)
{
arena_chunk_map_misc_t
*
miscelm
=
arena_miscelm_get_mutable
(
chunk
,
pageind
);
assert
(
npages
==
(
arena_mapbits_unallocated_size_get
(
chunk
,
pageind
)
>>
LG_PAGE
));
assert
(
arena_mapbits_dirty_get
(
chunk
,
pageind
)
==
CHUNK_MAP_DIRTY
);
assert
(
arena_mapbits_dirty_get
(
chunk
,
pageind
+
npages
-
1
)
==
CHUNK_MAP_DIRTY
);
/*
* chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be
* removed and reinserted even if the run to be removed is clean.
*/
if
(
chunk
->
ndirty
!=
0
)
arena_chunk_dirty_remove
(
&
arena
->
chunks_dirty
,
chunk
);
qr_remove
(
&
miscelm
->
rd
,
rd_link
);
assert
(
arena
->
ndirty
>=
npages
);
arena
->
ndirty
-=
npages
;
}
static
size_t
arena_chunk_dirty_npages
(
const
extent_node_t
*
node
)
{
return
(
extent_node_size_get
(
node
)
>>
LG_PAGE
);
}
if
(
maybe_adjac_pred
&&
arena_avail_adjac_pred
(
chunk
,
pageind
))
chunk
->
nruns_adjac
--
;
if
(
maybe_adjac_succ
&&
arena_avail_adjac_succ
(
chunk
,
pageind
,
npages
))
chunk
->
nruns_adjac
--
;
chunk
->
nruns_avail
--
;
assert
(
chunk
->
nruns_avail
>
chunk
->
nruns_adjac
||
(
chunk
->
nruns_avail
==
0
&&
chunk
->
nruns_adjac
==
0
));
void
arena_chunk_cache_maybe_insert
(
arena_t
*
arena
,
extent_node_t
*
node
,
bool
cache
)
{
if
(
arena_mapbits_dirty_get
(
chunk
,
pageind
)
!=
0
)
{
arena
->
ndirty
-=
npages
;
chunk
->
ndirty
-=
npages
;
if
(
cache
)
{
extent_node_dirty_linkage_init
(
node
);
extent_node_dirty_insert
(
node
,
&
arena
->
runs_dirty
,
&
arena
->
chunks_cache
);
arena
->
ndirty
+=
arena_chunk_dirty_npages
(
node
);
}
if
(
chunk
->
ndirty
!=
0
)
arena_chunk_dirty_insert
(
&
arena
->
chunks_dirty
,
chunk
);
}
void
arena_chunk_cache_maybe_remove
(
arena_t
*
arena
,
extent_node_t
*
node
,
bool
dirty
)
{
arena_avail_tree_remove
(
&
arena
->
runs_avail
,
arena_mapp_get
(
chunk
,
pageind
));
if
(
dirty
)
{
extent_node_dirty_remove
(
node
);
assert
(
arena
->
ndirty
>=
arena_chunk_dirty_npages
(
node
));
arena
->
ndirty
-=
arena_chunk_dirty_npages
(
node
);
}
}
static
inline
void
*
JEMALLOC_INLINE_C
void
*
arena_run_reg_alloc
(
arena_run_t
*
run
,
arena_bin_info_t
*
bin_info
)
{
void
*
ret
;
unsigned
regind
;
bitmap_t
*
bitmap
=
(
bitmap_t
*
)((
uintptr_t
)
run
+
(
uintptr_t
)
bin_info
->
bitmap_offset
)
;
size_t
regind
;
arena_chunk_map_misc_t
*
miscelm
;
void
*
rpages
;
assert
(
run
->
nfree
>
0
);
assert
(
bitmap_full
(
bitmap
,
&
bin_info
->
bitmap_info
)
==
false
);
assert
(
!
bitmap_full
(
run
->
bitmap
,
&
bin_info
->
bitmap_info
));
regind
=
bitmap_sfu
(
bitmap
,
&
bin_info
->
bitmap_info
);
ret
=
(
void
*
)((
uintptr_t
)
run
+
(
uintptr_t
)
bin_info
->
reg0_offset
+
regind
=
(
unsigned
)
bitmap_sfu
(
run
->
bitmap
,
&
bin_info
->
bitmap_info
);
miscelm
=
arena_run_to_miscelm
(
run
);
rpages
=
arena_miscelm_to_rpages
(
miscelm
);
ret
=
(
void
*
)((
uintptr_t
)
rpages
+
(
uintptr_t
)
bin_info
->
reg0_offset
+
(
uintptr_t
)(
bin_info
->
reg_interval
*
regind
));
run
->
nfree
--
;
if
(
regind
==
run
->
nextind
)
run
->
nextind
++
;
assert
(
regind
<
run
->
nextind
);
return
(
ret
);
}
static
inline
void
JEMALLOC_INLINE_C
void
arena_run_reg_dalloc
(
arena_run_t
*
run
,
void
*
ptr
)
{
arena_chunk_t
*
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
run
);
size_t
pageind
=
((
uintptr_t
)
ptr
-
(
uintptr_t
)
chunk
)
>>
LG_PAGE
;
size_t
mapbits
=
arena_mapbits_get
(
chunk
,
pageind
);
s
ize
_t
binind
=
arena_ptr_small_binind_get
(
ptr
,
mapbits
);
s
zind
_t
binind
=
arena_ptr_small_binind_get
(
ptr
,
mapbits
);
arena_bin_info_t
*
bin_info
=
&
arena_bin_info
[
binind
];
unsigned
regind
=
arena_run_regind
(
run
,
bin_info
,
ptr
);
bitmap_t
*
bitmap
=
(
bitmap_t
*
)((
uintptr_t
)
run
+
(
uintptr_t
)
bin_info
->
bitmap_offset
);
size_t
regind
=
arena_run_regind
(
run
,
bin_info
,
ptr
);
assert
(
run
->
nfree
<
bin_info
->
nregs
);
/* Freeing an interior pointer can cause assertion failure. */
assert
(((
uintptr_t
)
ptr
-
((
uintptr_t
)
run
+
assert
(((
uintptr_t
)
ptr
-
((
uintptr_t
)
arena_miscelm_to_rpages
(
arena_run_to_miscelm
(
run
))
+
(
uintptr_t
)
bin_info
->
reg0_offset
))
%
(
uintptr_t
)
bin_info
->
reg_interval
==
0
);
assert
((
uintptr_t
)
ptr
>=
(
uintptr_t
)
run
+
assert
((
uintptr_t
)
ptr
>=
(
uintptr_t
)
arena_miscelm_to_rpages
(
arena_run_to_miscelm
(
run
))
+
(
uintptr_t
)
bin_info
->
reg0_offset
);
/* Freeing an unallocated pointer can cause assertion failure. */
assert
(
bitmap_get
(
bitmap
,
&
bin_info
->
bitmap_info
,
regind
));
assert
(
bitmap_get
(
run
->
bitmap
,
&
bin_info
->
bitmap_info
,
regind
));
bitmap_unset
(
bitmap
,
&
bin_info
->
bitmap_info
,
regind
);
bitmap_unset
(
run
->
bitmap
,
&
bin_info
->
bitmap_info
,
regind
);
run
->
nfree
++
;
}
static
inline
void
arena_chunk_validate_zeroed
(
arena_chunk_t
*
chunk
,
size_t
run_ind
)
JEMALLOC_INLINE_C
void
arena_run_zero
(
arena_chunk_t
*
chunk
,
size_t
run_ind
,
size_t
npages
)
{
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED
((
void
*
)((
uintptr_t
)
chunk
+
(
run_ind
<<
LG_PAGE
)),
(
npages
<<
LG_PAGE
));
memset
((
void
*
)((
uintptr_t
)
chunk
+
(
run_ind
<<
LG_PAGE
)),
0
,
(
npages
<<
LG_PAGE
));
}
JEMALLOC_INLINE_C
void
arena_run_page_mark_zeroed
(
arena_chunk_t
*
chunk
,
size_t
run_ind
)
{
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED
((
void
*
)((
uintptr_t
)
chunk
+
(
run_ind
<<
LG_PAGE
)),
PAGE
);
}
JEMALLOC_INLINE_C
void
arena_run_page_validate_zeroed
(
arena_chunk_t
*
chunk
,
size_t
run_ind
)
{
size_t
i
;
UNUSED
size_t
*
p
=
(
size_t
*
)((
uintptr_t
)
chunk
+
(
run_ind
<<
LG_PAGE
));
arena_run_page_mark_zeroed
(
chunk
,
run_ind
);
for
(
i
=
0
;
i
<
PAGE
/
sizeof
(
size_t
);
i
++
)
assert
(
p
[
i
]
==
0
);
}
static
void
arena_run_split
(
arena_t
*
arena
,
arena_run_t
*
run
,
size_t
size
,
bool
large
,
size_t
binind
,
bool
zero
)
arena_nactive_add
(
arena_t
*
arena
,
size_t
add_pages
)
{
arena_chunk_t
*
chunk
;
size_t
run_ind
,
total_pages
,
need_pages
,
rem_pages
,
i
;
size_t
flag_dirty
;
assert
((
large
&&
binind
==
BININD_INVALID
)
||
(
large
==
false
&&
binind
!=
BININD_INVALID
));
if
(
config_stats
)
{
size_t
cactive_add
=
CHUNK_CEILING
((
arena
->
nactive
+
add_pages
)
<<
LG_PAGE
)
-
CHUNK_CEILING
(
arena
->
nactive
<<
LG_PAGE
);
if
(
cactive_add
!=
0
)
stats_cactive_add
(
cactive_add
);
}
arena
->
nactive
+=
add_pages
;
}
static
void
arena_nactive_sub
(
arena_t
*
arena
,
size_t
sub_pages
)
{
if
(
config_stats
)
{
size_t
cactive_sub
=
CHUNK_CEILING
(
arena
->
nactive
<<
LG_PAGE
)
-
CHUNK_CEILING
((
arena
->
nactive
-
sub_pages
)
<<
LG_PAGE
);
if
(
cactive_sub
!=
0
)
stats_cactive_sub
(
cactive_sub
);
}
arena
->
nactive
-=
sub_pages
;
}
static
void
arena_run_split_remove
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
size_t
run_ind
,
size_t
flag_dirty
,
size_t
flag_decommitted
,
size_t
need_pages
)
{
size_t
total_pages
,
rem_pages
;
assert
(
flag_dirty
==
0
||
flag_decommitted
==
0
);
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
run
);
run_ind
=
(
unsigned
)(((
uintptr_t
)
run
-
(
uintptr_t
)
chunk
)
>>
LG_PAGE
);
flag_dirty
=
arena_mapbits_dirty_get
(
chunk
,
run_ind
);
total_pages
=
arena_mapbits_unallocated_size_get
(
chunk
,
run_ind
)
>>
LG_PAGE
;
assert
(
arena_mapbits_dirty_get
(
chunk
,
run_ind
+
total_pages
-
1
)
==
flag_dirty
);
need_pages
=
(
size
>>
LG_PAGE
);
assert
(
need_pages
>
0
);
assert
(
need_pages
<=
total_pages
);
rem_pages
=
total_pages
-
need_pages
;
arena_avail_remove
(
arena
,
chunk
,
run_ind
,
total_pages
,
true
,
true
);
if
(
config_stats
)
{
/*
* Update stats_cactive if nactive is crossing a chunk
* multiple.
*/
size_t
cactive_diff
=
CHUNK_CEILING
((
arena
->
nactive
+
need_pages
)
<<
LG_PAGE
)
-
CHUNK_CEILING
(
arena
->
nactive
<<
LG_PAGE
);
if
(
cactive_diff
!=
0
)
stats_cactive_add
(
cactive_diff
);
}
arena
->
nactive
+=
need_pages
;
arena_avail_remove
(
arena
,
chunk
,
run_ind
,
total_pages
);
if
(
flag_dirty
!=
0
)
arena_run_dirty_remove
(
arena
,
chunk
,
run_ind
,
total_pages
);
arena_nactive_add
(
arena
,
need_pages
);
/* Keep track of trailing unused pages for later use. */
if
(
rem_pages
>
0
)
{
size_t
flags
=
flag_dirty
|
flag_decommitted
;
size_t
flag_unzeroed_mask
=
(
flags
==
0
)
?
CHUNK_MAP_UNZEROED
:
0
;
arena_mapbits_unallocated_set
(
chunk
,
run_ind
+
need_pages
,
(
rem_pages
<<
LG_PAGE
),
flags
|
(
arena_mapbits_unzeroed_get
(
chunk
,
run_ind
+
need_pages
)
&
flag_unzeroed_mask
));
arena_mapbits_unallocated_set
(
chunk
,
run_ind
+
total_pages
-
1
,
(
rem_pages
<<
LG_PAGE
),
flags
|
(
arena_mapbits_unzeroed_get
(
chunk
,
run_ind
+
total_pages
-
1
)
&
flag_unzeroed_mask
));
if
(
flag_dirty
!=
0
)
{
arena_mapbits_unallocated_set
(
chunk
,
run_ind
+
need_pages
,
(
rem_pages
<<
LG_PAGE
),
CHUNK_MAP_DIRTY
);
arena_mapbits_unallocated_set
(
chunk
,
run_ind
+
total_pages
-
1
,
(
rem_pages
<<
LG_PAGE
),
CHUNK_MAP_DIRTY
);
}
else
{
arena_mapbits_unallocated_set
(
chunk
,
run_ind
+
need_pages
,
(
rem_pages
<<
LG_PAGE
),
arena_mapbits_unzeroed_get
(
chunk
,
run_ind
+
need_pages
));
arena_mapbits_unallocated_set
(
chunk
,
run_ind
+
total_pages
-
1
,
(
rem_pages
<<
LG_PAGE
),
arena_mapbits_unzeroed_get
(
chunk
,
run_ind
+
total_pages
-
1
));
arena_run_dirty_insert
(
arena
,
chunk
,
run_ind
+
need_pages
,
rem_pages
);
}
arena_avail_insert
(
arena
,
chunk
,
run_ind
+
need_pages
,
rem_pages
,
false
,
true
);
arena_avail_insert
(
arena
,
chunk
,
run_ind
+
need_pages
,
rem_pages
);
}
}
/*
* Update the page map separately for large vs. small runs, since it is
* possible to avoid iteration for large mallocs.
*/
if
(
large
)
{
if
(
zero
)
{
if
(
flag_dirty
==
0
)
{
/*
* The run is clean, so some pages may be
* zeroed (i.e. never before touched).
*/
for
(
i
=
0
;
i
<
need_pages
;
i
++
)
{
if
(
arena_mapbits_unzeroed_get
(
chunk
,
run_ind
+
i
)
!=
0
)
{
VALGRIND_MAKE_MEM_UNDEFINED
(
(
void
*
)((
uintptr_t
)
chunk
+
((
run_ind
+
i
)
<<
LG_PAGE
)),
PAGE
);
memset
((
void
*
)((
uintptr_t
)
chunk
+
((
run_ind
+
i
)
<<
LG_PAGE
)),
0
,
PAGE
);
}
else
if
(
config_debug
)
{
VALGRIND_MAKE_MEM_DEFINED
(
(
void
*
)((
uintptr_t
)
chunk
+
((
run_ind
+
i
)
<<
LG_PAGE
)),
PAGE
);
arena_chunk_validate_zeroed
(
chunk
,
run_ind
+
i
);
}
static
bool
arena_run_split_large_helper
(
arena_t
*
arena
,
arena_run_t
*
run
,
size_t
size
,
bool
remove
,
bool
zero
)
{
arena_chunk_t
*
chunk
;
arena_chunk_map_misc_t
*
miscelm
;
size_t
flag_dirty
,
flag_decommitted
,
run_ind
,
need_pages
;
size_t
flag_unzeroed_mask
;
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
run
);
miscelm
=
arena_run_to_miscelm
(
run
);
run_ind
=
arena_miscelm_to_pageind
(
miscelm
);
flag_dirty
=
arena_mapbits_dirty_get
(
chunk
,
run_ind
);
flag_decommitted
=
arena_mapbits_decommitted_get
(
chunk
,
run_ind
);
need_pages
=
(
size
>>
LG_PAGE
);
assert
(
need_pages
>
0
);
if
(
flag_decommitted
!=
0
&&
arena
->
chunk_hooks
.
commit
(
chunk
,
chunksize
,
run_ind
<<
LG_PAGE
,
size
,
arena
->
ind
))
return
(
true
);
if
(
remove
)
{
arena_run_split_remove
(
arena
,
chunk
,
run_ind
,
flag_dirty
,
flag_decommitted
,
need_pages
);
}
if
(
zero
)
{
if
(
flag_decommitted
!=
0
)
{
/* The run is untouched, and therefore zeroed. */
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED
((
void
*
)((
uintptr_t
)
chunk
+
(
run_ind
<<
LG_PAGE
)),
(
need_pages
<<
LG_PAGE
));
}
else
if
(
flag_dirty
!=
0
)
{
/* The run is dirty, so all pages must be zeroed. */
arena_run_zero
(
chunk
,
run_ind
,
need_pages
);
}
else
{
/*
* The run is clean, so some pages may be zeroed (i.e.
* never before touched).
*/
size_t
i
;
for
(
i
=
0
;
i
<
need_pages
;
i
++
)
{
if
(
arena_mapbits_unzeroed_get
(
chunk
,
run_ind
+
i
)
!=
0
)
arena_run_zero
(
chunk
,
run_ind
+
i
,
1
);
else
if
(
config_debug
)
{
arena_run_page_validate_zeroed
(
chunk
,
run_ind
+
i
);
}
else
{
arena_run_page_mark_zeroed
(
chunk
,
run_ind
+
i
);
}
}
else
{
/*
* The run is dirty, so all pages must be
* zeroed.
*/
VALGRIND_MAKE_MEM_UNDEFINED
((
void
*
)((
uintptr_t
)
chunk
+
(
run_ind
<<
LG_PAGE
)),
(
need_pages
<<
LG_PAGE
));
memset
((
void
*
)((
uintptr_t
)
chunk
+
(
run_ind
<<
LG_PAGE
)),
0
,
(
need_pages
<<
LG_PAGE
));
}
}
/*
* Set the last element first, in case the run only contains one
* page (i.e. both statements set the same element).
*/
arena_mapbits_large_set
(
chunk
,
run_ind
+
need_pages
-
1
,
0
,
flag_dirty
);
arena_mapbits_large_set
(
chunk
,
run_ind
,
size
,
flag_dirty
);
}
else
{
assert
(
zero
==
false
);
/*
* Propagate the dirty and unzeroed flags to the allocated
* small run, so that arena_dalloc_bin_run() has the ability to
* conditionally trim clean pages.
*/
arena_mapbits_small_set
(
chunk
,
run_ind
,
0
,
binind
,
flag_dirty
);
/*
* The first page will always be dirtied during small run
* initialization, so a validation failure here would not
* actually cause an observable failure.
*/
if
(
config_debug
&&
flag_dirty
==
0
&&
arena_mapbits_unzeroed_get
(
chunk
,
run_ind
)
==
0
)
arena_chunk_validate_zeroed
(
chunk
,
run_ind
);
for
(
i
=
1
;
i
<
need_pages
-
1
;
i
++
)
{
arena_mapbits_small_set
(
chunk
,
run_ind
+
i
,
i
,
binind
,
0
);
if
(
config_debug
&&
flag_dirty
==
0
&&
arena_mapbits_unzeroed_get
(
chunk
,
run_ind
+
i
)
==
0
)
arena_chunk_validate_zeroed
(
chunk
,
run_ind
+
i
);
}
arena_mapbits_small_set
(
chunk
,
run_ind
+
need_pages
-
1
,
need_pages
-
1
,
binind
,
flag_dirty
);
if
(
config_debug
&&
flag_dirty
==
0
&&
arena_mapbits_unzeroed_get
(
chunk
,
run_ind
+
need_pages
-
1
)
==
0
)
{
arena_chunk_validate_zeroed
(
chunk
,
run_ind
+
need_pages
-
1
);
}
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED
((
void
*
)((
uintptr_t
)
chunk
+
(
run_ind
<<
LG_PAGE
)),
(
need_pages
<<
LG_PAGE
));
}
/*
* Set the last element first, in case the run only contains one page
* (i.e. both statements set the same element).
*/
flag_unzeroed_mask
=
(
flag_dirty
|
flag_decommitted
)
==
0
?
CHUNK_MAP_UNZEROED
:
0
;
arena_mapbits_large_set
(
chunk
,
run_ind
+
need_pages
-
1
,
0
,
flag_dirty
|
(
flag_unzeroed_mask
&
arena_mapbits_unzeroed_get
(
chunk
,
run_ind
+
need_pages
-
1
)));
arena_mapbits_large_set
(
chunk
,
run_ind
,
size
,
flag_dirty
|
(
flag_unzeroed_mask
&
arena_mapbits_unzeroed_get
(
chunk
,
run_ind
)));
return
(
false
);
}
static
arena_chunk_t
*
arena_
chunk_alloc
(
arena_t
*
arena
)
static
bool
arena_
run_split_large
(
arena_t
*
arena
,
arena_run_t
*
run
,
size_t
size
,
bool
zero
)
{
arena_chunk_t
*
chunk
;
size_t
i
;
if
(
arena
->
spare
!=
NULL
)
{
chunk
=
arena
->
spare
;
arena
->
spare
=
NULL
;
return
(
arena_run_split_large_helper
(
arena
,
run
,
size
,
true
,
zero
));
}
assert
(
arena_mapbits_allocated_get
(
chunk
,
map_bias
)
==
0
);
assert
(
arena_mapbits_allocated_get
(
chunk
,
chunk_npages
-
1
)
==
0
);
assert
(
arena_mapbits_unallocated_size_get
(
chunk
,
map_bias
)
==
arena_maxclass
);
assert
(
arena_mapbits_unallocated_size_get
(
chunk
,
chunk_npages
-
1
)
==
arena_maxclass
);
assert
(
arena_mapbits_dirty_get
(
chunk
,
map_bias
)
==
arena_mapbits_dirty_get
(
chunk
,
chunk_npages
-
1
));
}
else
{
bool
zero
;
size_t
unzeroed
;
zero
=
false
;
malloc_mutex_unlock
(
&
arena
->
lock
);
chunk
=
(
arena_chunk_t
*
)
chunk_alloc
(
chunksize
,
chunksize
,
false
,
&
zero
,
arena
->
dss_prec
);
malloc_mutex_lock
(
&
arena
->
lock
);
if
(
chunk
==
NULL
)
return
(
NULL
);
if
(
config_stats
)
arena
->
stats
.
mapped
+=
chunksize
;
static
bool
arena_run_init_large
(
arena_t
*
arena
,
arena_run_t
*
run
,
size_t
size
,
bool
zero
)
{
chunk
->
arena
=
arena
;
return
(
arena_run_split_large_helper
(
arena
,
run
,
size
,
false
,
zero
));
}
/*
* Claim that no pages are in use, since the header is merely
* overhead.
*/
chunk
->
ndirty
=
0
;
static
bool
arena_run_split_small
(
arena_t
*
arena
,
arena_run_t
*
run
,
size_t
size
,
szind_t
binind
)
{
arena_chunk_t
*
chunk
;
arena_chunk_map_misc_t
*
miscelm
;
size_t
flag_dirty
,
flag_decommitted
,
run_ind
,
need_pages
,
i
;
chunk
->
nruns_avail
=
0
;
chunk
->
nruns_adjac
=
0
;
assert
(
binind
!=
BININD_INVALID
);
/*
* Initialize the map to contain one maximal free untouched run.
* Mark the pages as zeroed iff chunk_alloc() returned a zeroed
* chunk.
*/
unzeroed
=
zero
?
0
:
CHUNK_MAP_UNZEROED
;
arena_mapbits_unallocated_set
(
chunk
,
map_bias
,
arena_maxclass
,
unzeroed
);
/*
* There is no need to initialize the internal page map entries
* unless the chunk is not zeroed.
*/
if
(
zero
==
false
)
{
for
(
i
=
map_bias
+
1
;
i
<
chunk_npages
-
1
;
i
++
)
arena_mapbits_unzeroed_set
(
chunk
,
i
,
unzeroed
);
}
else
if
(
config_debug
)
{
for
(
i
=
map_bias
+
1
;
i
<
chunk_npages
-
1
;
i
++
)
{
assert
(
arena_mapbits_unzeroed_get
(
chunk
,
i
)
==
unzeroed
);
}
}
arena_mapbits_unallocated_set
(
chunk
,
chunk_npages
-
1
,
arena_maxclass
,
unzeroed
);
}
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
run
);
miscelm
=
arena_run_to_miscelm
(
run
);
run_ind
=
arena_miscelm_to_pageind
(
miscelm
);
flag_dirty
=
arena_mapbits_dirty_get
(
chunk
,
run_ind
);
flag_decommitted
=
arena_mapbits_decommitted_get
(
chunk
,
run_ind
);
need_pages
=
(
size
>>
LG_PAGE
);
assert
(
need_pages
>
0
);
/* Insert the run into the runs_avail tree. */
arena_avail_insert
(
arena
,
chunk
,
map_bias
,
chunk_npages
-
map_bias
,
false
,
fals
e
);
if
(
flag_decommitted
!=
0
&&
arena
->
chunk_hooks
.
commit
(
chunk
,
chunksize
,
run_ind
<<
LG_PAGE
,
size
,
arena
->
ind
))
return
(
tru
e
);
return
(
chunk
);
arena_run_split_remove
(
arena
,
chunk
,
run_ind
,
flag_dirty
,
flag_decommitted
,
need_pages
);
for
(
i
=
0
;
i
<
need_pages
;
i
++
)
{
size_t
flag_unzeroed
=
arena_mapbits_unzeroed_get
(
chunk
,
run_ind
+
i
);
arena_mapbits_small_set
(
chunk
,
run_ind
+
i
,
i
,
binind
,
flag_unzeroed
);
if
(
config_debug
&&
flag_dirty
==
0
&&
flag_unzeroed
==
0
)
arena_run_page_validate_zeroed
(
chunk
,
run_ind
+
i
);
}
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED
((
void
*
)((
uintptr_t
)
chunk
+
(
run_ind
<<
LG_PAGE
)),
(
need_pages
<<
LG_PAGE
));
return
(
false
);
}
static
void
arena_chunk_
dealloc
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
)
static
arena_chunk_t
*
arena_chunk_
init_spare
(
arena_t
*
arena
)
{
arena_chunk_t
*
chunk
;
assert
(
arena
->
spare
!=
NULL
);
chunk
=
arena
->
spare
;
arena
->
spare
=
NULL
;
assert
(
arena_mapbits_allocated_get
(
chunk
,
map_bias
)
==
0
);
assert
(
arena_mapbits_allocated_get
(
chunk
,
chunk_npages
-
1
)
==
0
);
assert
(
arena_mapbits_unallocated_size_get
(
chunk
,
map_bias
)
==
arena_max
class
);
arena_max
run
);
assert
(
arena_mapbits_unallocated_size_get
(
chunk
,
chunk_npages
-
1
)
==
arena_max
class
);
arena_max
run
);
assert
(
arena_mapbits_dirty_get
(
chunk
,
map_bias
)
==
arena_mapbits_dirty_get
(
chunk
,
chunk_npages
-
1
));
return
(
chunk
);
}
static
bool
arena_chunk_register
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
size_t
sn
,
bool
zero
)
{
/*
* Remove run from the runs_avail tree, so that the arena does not use
* it.
* The extent node notion of "committed" doesn't directly apply to
* arena chunks. Arbitrarily mark them as committed. The commit state
* of runs is tracked individually, and upon chunk deallocation the
* entire chunk is in a consistent commit state.
*/
arena_avail_remove
(
arena
,
chunk
,
map_bias
,
chunk_npages
-
map_bias
,
false
,
false
);
extent_node_init
(
&
chunk
->
node
,
arena
,
chunk
,
chunksize
,
sn
,
zero
,
true
);
extent_node_achunk_set
(
&
chunk
->
node
,
true
);
return
(
chunk_register
(
tsdn
,
chunk
,
&
chunk
->
node
));
}
if
(
arena
->
spare
!=
NULL
)
{
arena_chunk_t
*
spare
=
arena
->
spare
;
static
arena_chunk_t
*
arena_chunk_alloc_internal_hard
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
bool
*
zero
,
bool
*
commit
)
{
arena_chunk_t
*
chunk
;
size_t
sn
;
malloc_mutex_unlock
(
tsdn
,
&
arena
->
lock
);
chunk
=
(
arena_chunk_t
*
)
chunk_alloc_wrapper
(
tsdn
,
arena
,
chunk_hooks
,
NULL
,
chunksize
,
chunksize
,
&
sn
,
zero
,
commit
);
if
(
chunk
!=
NULL
&&
!*
commit
)
{
/* Commit header. */
if
(
chunk_hooks
->
commit
(
chunk
,
chunksize
,
0
,
map_bias
<<
LG_PAGE
,
arena
->
ind
))
{
chunk_dalloc_wrapper
(
tsdn
,
arena
,
chunk_hooks
,
(
void
*
)
chunk
,
chunksize
,
sn
,
*
zero
,
*
commit
);
chunk
=
NULL
;
}
}
if
(
chunk
!=
NULL
&&
arena_chunk_register
(
tsdn
,
arena
,
chunk
,
sn
,
*
zero
))
{
if
(
!*
commit
)
{
/* Undo commit of header. */
chunk_hooks
->
decommit
(
chunk
,
chunksize
,
0
,
map_bias
<<
LG_PAGE
,
arena
->
ind
);
}
chunk_dalloc_wrapper
(
tsdn
,
arena
,
chunk_hooks
,
(
void
*
)
chunk
,
chunksize
,
sn
,
*
zero
,
*
commit
);
chunk
=
NULL
;
}
arena
->
spare
=
chunk
;
malloc_mutex_unlock
(
&
arena
->
lock
);
chunk_dealloc
((
void
*
)
spare
,
chunksize
,
true
);
malloc_mutex_lock
(
&
arena
->
lock
);
if
(
config_stats
)
arena
->
stats
.
mapped
-=
chunksize
;
}
else
arena
->
spare
=
chunk
;
malloc_mutex_lock
(
tsdn
,
&
arena
->
lock
);
return
(
chunk
);
}
static
arena_
r
un_t
*
arena_
r
un_alloc_
helper
(
arena_t
*
arena
,
size_t
size
,
bool
large
,
size_t
binind
,
bool
zero
)
static
arena_
ch
un
k
_t
*
arena_
ch
un
k
_alloc_
internal
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bool
*
zero
,
bool
*
commit
)
{
arena_run_t
*
run
;
arena_chunk_map_t
*
mapelm
,
key
;
arena_chunk_t
*
chunk
;
chunk_hooks_t
chunk_hooks
=
CHUNK_HOOKS_INITIALIZER
;
size_t
sn
;
key
.
bits
=
size
|
CHUNK_MAP_KEY
;
mapelm
=
arena_avail_tree_nsearch
(
&
arena
->
runs_avail
,
&
key
);
if
(
mapelm
!=
NULL
)
{
arena_chunk_t
*
run_chunk
=
CHUNK_ADDR2BASE
(
mapelm
);
size_t
pageind
=
(((
uintptr_t
)
mapelm
-
(
uintptr_t
)
run_chunk
->
map
)
/
sizeof
(
arena_chunk_map_t
))
+
map_bias
;
chunk
=
chunk_alloc_cache
(
tsdn
,
arena
,
&
chunk_hooks
,
NULL
,
chunksize
,
chunksize
,
&
sn
,
zero
,
commit
,
true
);
if
(
chunk
!=
NULL
)
{
if
(
arena_chunk_register
(
tsdn
,
arena
,
chunk
,
sn
,
*
zero
))
{
chunk_dalloc_cache
(
tsdn
,
arena
,
&
chunk_hooks
,
chunk
,
chunksize
,
sn
,
true
);
return
(
NULL
);
}
}
if
(
chunk
==
NULL
)
{
chunk
=
arena_chunk_alloc_internal_hard
(
tsdn
,
arena
,
&
chunk_hooks
,
zero
,
commit
);
}
run
=
(
arena_run_t
*
)((
uintptr_t
)
run_chunk
+
(
pageind
<<
LG_PAGE
));
arena_run_split
(
arena
,
run
,
size
,
large
,
binind
,
zero
);
return
(
run
);
if
(
config_stats
&&
chunk
!=
NULL
)
{
arena
->
stats
.
mapped
+=
chunksize
;
arena
->
stats
.
metadata_mapped
+=
(
map_bias
<<
LG_PAGE
);
}
return
(
NULL
);
return
(
chunk
);
}
static
arena_run_t
*
arena_run_alloc
(
arena_t
*
arena
,
size_t
size
,
bool
large
,
size_t
binind
,
bool
zero
)
static
arena_chunk_t
*
arena_chunk_init_hard
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
arena_chunk_t
*
chunk
;
arena_run_t
*
run
;
bool
zero
,
commit
;
size_t
flag_unzeroed
,
flag_decommitted
,
i
;
assert
(
size
<=
arena_maxclass
);
assert
((
size
&
PAGE_MASK
)
==
0
);
assert
((
large
&&
binind
==
BININD_INVALID
)
||
(
large
==
false
&&
binind
!=
BININD_INVALID
));
assert
(
arena
->
spare
==
NULL
);
/* Search the arena's chunks for the lowest best fit. */
run
=
arena_run_alloc_helper
(
arena
,
size
,
large
,
binind
,
zero
);
if
(
run
!=
NULL
)
return
(
run
);
zero
=
false
;
commit
=
false
;
chunk
=
arena_chunk_alloc_internal
(
tsdn
,
arena
,
&
zero
,
&
commit
);
if
(
chunk
==
NULL
)
return
(
NULL
);
chunk
->
hugepage
=
true
;
/*
* No usable runs. Create a new chunk from which to allocate the run.
* Initialize the map to contain one maximal free untouched run. Mark
* the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed
* or decommitted chunk.
*/
chunk
=
arena_chunk_alloc
(
arena
);
if
(
chunk
!=
NULL
)
{
run
=
(
arena_run_t
*
)((
uintptr_t
)
chunk
+
(
map_bias
<<
LG_PAGE
));
arena_run_split
(
arena
,
run
,
size
,
large
,
binind
,
zero
);
return
(
run
);
}
flag_unzeroed
=
(
zero
||
!
commit
)
?
0
:
CHUNK_MAP_UNZEROED
;
flag_decommitted
=
commit
?
0
:
CHUNK_MAP_DECOMMITTED
;
arena_mapbits_unallocated_set
(
chunk
,
map_bias
,
arena_maxrun
,
flag_unzeroed
|
flag_decommitted
);
/*
* arena_chunk_alloc() failed, but another thread may have made
* There is no need to initialize the internal page map entries unless
* the chunk is not zeroed.
*/
if
(
!
zero
)
{
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED
(
(
void
*
)
arena_bitselm_get_const
(
chunk
,
map_bias
+
1
),
(
size_t
)((
uintptr_t
)
arena_bitselm_get_const
(
chunk
,
chunk_npages
-
1
)
-
(
uintptr_t
)
arena_bitselm_get_const
(
chunk
,
map_bias
+
1
)));
for
(
i
=
map_bias
+
1
;
i
<
chunk_npages
-
1
;
i
++
)
arena_mapbits_internal_set
(
chunk
,
i
,
flag_unzeroed
);
}
else
{
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED
((
void
*
)
arena_bitselm_get_const
(
chunk
,
map_bias
+
1
),
(
size_t
)((
uintptr_t
)
arena_bitselm_get_const
(
chunk
,
chunk_npages
-
1
)
-
(
uintptr_t
)
arena_bitselm_get_const
(
chunk
,
map_bias
+
1
)));
if
(
config_debug
)
{
for
(
i
=
map_bias
+
1
;
i
<
chunk_npages
-
1
;
i
++
)
{
assert
(
arena_mapbits_unzeroed_get
(
chunk
,
i
)
==
flag_unzeroed
);
}
}
}
arena_mapbits_unallocated_set
(
chunk
,
chunk_npages
-
1
,
arena_maxrun
,
flag_unzeroed
);
return
(
chunk
);
}
static
arena_chunk_t
*
arena_chunk_alloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
arena_chunk_t
*
chunk
;
if
(
arena
->
spare
!=
NULL
)
chunk
=
arena_chunk_init_spare
(
arena
);
else
{
chunk
=
arena_chunk_init_hard
(
tsdn
,
arena
);
if
(
chunk
==
NULL
)
return
(
NULL
);
}
ql_elm_new
(
&
chunk
->
node
,
ql_link
);
ql_tail_insert
(
&
arena
->
achunks
,
&
chunk
->
node
,
ql_link
);
arena_avail_insert
(
arena
,
chunk
,
map_bias
,
chunk_npages
-
map_bias
);
return
(
chunk
);
}
static
void
arena_chunk_discard
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_chunk_t
*
chunk
)
{
size_t
sn
,
hugepage
;
bool
committed
;
chunk_hooks_t
chunk_hooks
=
CHUNK_HOOKS_INITIALIZER
;
chunk_deregister
(
chunk
,
&
chunk
->
node
);
sn
=
extent_node_sn_get
(
&
chunk
->
node
);
hugepage
=
chunk
->
hugepage
;
committed
=
(
arena_mapbits_decommitted_get
(
chunk
,
map_bias
)
==
0
);
if
(
!
committed
)
{
/*
* Decommit the header. Mark the chunk as decommitted even if
* header decommit fails, since treating a partially committed
* chunk as committed has a high potential for causing later
* access of decommitted memory.
*/
chunk_hooks
=
chunk_hooks_get
(
tsdn
,
arena
);
chunk_hooks
.
decommit
(
chunk
,
chunksize
,
0
,
map_bias
<<
LG_PAGE
,
arena
->
ind
);
}
if
(
!
hugepage
)
{
/*
* Convert chunk back to the default state, so that all
* subsequent chunk allocations start out with chunks that can
* be backed by transparent huge pages.
*/
pages_huge
(
chunk
,
chunksize
);
}
chunk_dalloc_cache
(
tsdn
,
arena
,
&
chunk_hooks
,
(
void
*
)
chunk
,
chunksize
,
sn
,
committed
);
if
(
config_stats
)
{
arena
->
stats
.
mapped
-=
chunksize
;
arena
->
stats
.
metadata_mapped
-=
(
map_bias
<<
LG_PAGE
);
}
}
static
void
arena_spare_discard
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_chunk_t
*
spare
)
{
assert
(
arena
->
spare
!=
spare
);
if
(
arena_mapbits_dirty_get
(
spare
,
map_bias
)
!=
0
)
{
arena_run_dirty_remove
(
arena
,
spare
,
map_bias
,
chunk_npages
-
map_bias
);
}
arena_chunk_discard
(
tsdn
,
arena
,
spare
);
}
static
void
arena_chunk_dalloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_chunk_t
*
chunk
)
{
arena_chunk_t
*
spare
;
assert
(
arena_mapbits_allocated_get
(
chunk
,
map_bias
)
==
0
);
assert
(
arena_mapbits_allocated_get
(
chunk
,
chunk_npages
-
1
)
==
0
);
assert
(
arena_mapbits_unallocated_size_get
(
chunk
,
map_bias
)
==
arena_maxrun
);
assert
(
arena_mapbits_unallocated_size_get
(
chunk
,
chunk_npages
-
1
)
==
arena_maxrun
);
assert
(
arena_mapbits_dirty_get
(
chunk
,
map_bias
)
==
arena_mapbits_dirty_get
(
chunk
,
chunk_npages
-
1
));
assert
(
arena_mapbits_decommitted_get
(
chunk
,
map_bias
)
==
arena_mapbits_decommitted_get
(
chunk
,
chunk_npages
-
1
));
/* Remove run from runs_avail, so that the arena does not use it. */
arena_avail_remove
(
arena
,
chunk
,
map_bias
,
chunk_npages
-
map_bias
);
ql_remove
(
&
arena
->
achunks
,
&
chunk
->
node
,
ql_link
);
spare
=
arena
->
spare
;
arena
->
spare
=
chunk
;
if
(
spare
!=
NULL
)
arena_spare_discard
(
tsdn
,
arena
,
spare
);
}
static
void
arena_huge_malloc_stats_update
(
arena_t
*
arena
,
size_t
usize
)
{
szind_t
index
=
size2index
(
usize
)
-
nlclasses
-
NBINS
;
cassert
(
config_stats
);
arena
->
stats
.
nmalloc_huge
++
;
arena
->
stats
.
allocated_huge
+=
usize
;
arena
->
stats
.
hstats
[
index
].
nmalloc
++
;
arena
->
stats
.
hstats
[
index
].
curhchunks
++
;
}
static
void
arena_huge_malloc_stats_update_undo
(
arena_t
*
arena
,
size_t
usize
)
{
szind_t
index
=
size2index
(
usize
)
-
nlclasses
-
NBINS
;
cassert
(
config_stats
);
arena
->
stats
.
nmalloc_huge
--
;
arena
->
stats
.
allocated_huge
-=
usize
;
arena
->
stats
.
hstats
[
index
].
nmalloc
--
;
arena
->
stats
.
hstats
[
index
].
curhchunks
--
;
}
static
void
arena_huge_dalloc_stats_update
(
arena_t
*
arena
,
size_t
usize
)
{
szind_t
index
=
size2index
(
usize
)
-
nlclasses
-
NBINS
;
cassert
(
config_stats
);
arena
->
stats
.
ndalloc_huge
++
;
arena
->
stats
.
allocated_huge
-=
usize
;
arena
->
stats
.
hstats
[
index
].
ndalloc
++
;
arena
->
stats
.
hstats
[
index
].
curhchunks
--
;
}
static
void
arena_huge_reset_stats_cancel
(
arena_t
*
arena
,
size_t
usize
)
{
szind_t
index
=
size2index
(
usize
)
-
nlclasses
-
NBINS
;
cassert
(
config_stats
);
arena
->
stats
.
ndalloc_huge
++
;
arena
->
stats
.
hstats
[
index
].
ndalloc
--
;
}
static
void
arena_huge_dalloc_stats_update_undo
(
arena_t
*
arena
,
size_t
usize
)
{
szind_t
index
=
size2index
(
usize
)
-
nlclasses
-
NBINS
;
cassert
(
config_stats
);
arena
->
stats
.
ndalloc_huge
--
;
arena
->
stats
.
allocated_huge
+=
usize
;
arena
->
stats
.
hstats
[
index
].
ndalloc
--
;
arena
->
stats
.
hstats
[
index
].
curhchunks
++
;
}
static
void
arena_huge_ralloc_stats_update
(
arena_t
*
arena
,
size_t
oldsize
,
size_t
usize
)
{
arena_huge_dalloc_stats_update
(
arena
,
oldsize
);
arena_huge_malloc_stats_update
(
arena
,
usize
);
}
static
void
arena_huge_ralloc_stats_update_undo
(
arena_t
*
arena
,
size_t
oldsize
,
size_t
usize
)
{
arena_huge_dalloc_stats_update_undo
(
arena
,
oldsize
);
arena_huge_malloc_stats_update_undo
(
arena
,
usize
);
}
extent_node_t
*
arena_node_alloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
extent_node_t
*
node
;
malloc_mutex_lock
(
tsdn
,
&
arena
->
node_cache_mtx
);
node
=
ql_last
(
&
arena
->
node_cache
,
ql_link
);
if
(
node
==
NULL
)
{
malloc_mutex_unlock
(
tsdn
,
&
arena
->
node_cache_mtx
);
return
(
base_alloc
(
tsdn
,
sizeof
(
extent_node_t
)));
}
ql_tail_remove
(
&
arena
->
node_cache
,
extent_node_t
,
ql_link
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
node_cache_mtx
);
return
(
node
);
}
void
arena_node_dalloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_node_t
*
node
)
{
malloc_mutex_lock
(
tsdn
,
&
arena
->
node_cache_mtx
);
ql_elm_new
(
node
,
ql_link
);
ql_tail_insert
(
&
arena
->
node_cache
,
node
,
ql_link
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
node_cache_mtx
);
}
static
void
*
arena_chunk_alloc_huge_hard
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
size_t
usize
,
size_t
alignment
,
size_t
*
sn
,
bool
*
zero
,
size_t
csize
)
{
void
*
ret
;
bool
commit
=
true
;
ret
=
chunk_alloc_wrapper
(
tsdn
,
arena
,
chunk_hooks
,
NULL
,
csize
,
alignment
,
sn
,
zero
,
&
commit
);
if
(
ret
==
NULL
)
{
/* Revert optimistic stats updates. */
malloc_mutex_lock
(
tsdn
,
&
arena
->
lock
);
if
(
config_stats
)
{
arena_huge_malloc_stats_update_undo
(
arena
,
usize
);
arena
->
stats
.
mapped
-=
usize
;
}
arena_nactive_sub
(
arena
,
usize
>>
LG_PAGE
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
lock
);
}
return
(
ret
);
}
void
*
arena_chunk_alloc_huge
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
usize
,
size_t
alignment
,
size_t
*
sn
,
bool
*
zero
)
{
void
*
ret
;
chunk_hooks_t
chunk_hooks
=
CHUNK_HOOKS_INITIALIZER
;
size_t
csize
=
CHUNK_CEILING
(
usize
);
bool
commit
=
true
;
malloc_mutex_lock
(
tsdn
,
&
arena
->
lock
);
/* Optimistically update stats. */
if
(
config_stats
)
{
arena_huge_malloc_stats_update
(
arena
,
usize
);
arena
->
stats
.
mapped
+=
usize
;
}
arena_nactive_add
(
arena
,
usize
>>
LG_PAGE
);
ret
=
chunk_alloc_cache
(
tsdn
,
arena
,
&
chunk_hooks
,
NULL
,
csize
,
alignment
,
sn
,
zero
,
&
commit
,
true
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
lock
);
if
(
ret
==
NULL
)
{
ret
=
arena_chunk_alloc_huge_hard
(
tsdn
,
arena
,
&
chunk_hooks
,
usize
,
alignment
,
sn
,
zero
,
csize
);
}
return
(
ret
);
}
void
arena_chunk_dalloc_huge
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
void
*
chunk
,
size_t
usize
,
size_t
sn
)
{
chunk_hooks_t
chunk_hooks
=
CHUNK_HOOKS_INITIALIZER
;
size_t
csize
;
csize
=
CHUNK_CEILING
(
usize
);
malloc_mutex_lock
(
tsdn
,
&
arena
->
lock
);
if
(
config_stats
)
{
arena_huge_dalloc_stats_update
(
arena
,
usize
);
arena
->
stats
.
mapped
-=
usize
;
}
arena_nactive_sub
(
arena
,
usize
>>
LG_PAGE
);
chunk_dalloc_cache
(
tsdn
,
arena
,
&
chunk_hooks
,
chunk
,
csize
,
sn
,
true
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
lock
);
}
void
arena_chunk_ralloc_huge_similar
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
void
*
chunk
,
size_t
oldsize
,
size_t
usize
)
{
assert
(
CHUNK_CEILING
(
oldsize
)
==
CHUNK_CEILING
(
usize
));
assert
(
oldsize
!=
usize
);
malloc_mutex_lock
(
tsdn
,
&
arena
->
lock
);
if
(
config_stats
)
arena_huge_ralloc_stats_update
(
arena
,
oldsize
,
usize
);
if
(
oldsize
<
usize
)
arena_nactive_add
(
arena
,
(
usize
-
oldsize
)
>>
LG_PAGE
);
else
arena_nactive_sub
(
arena
,
(
oldsize
-
usize
)
>>
LG_PAGE
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
lock
);
}
void
arena_chunk_ralloc_huge_shrink
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
void
*
chunk
,
size_t
oldsize
,
size_t
usize
,
size_t
sn
)
{
size_t
udiff
=
oldsize
-
usize
;
size_t
cdiff
=
CHUNK_CEILING
(
oldsize
)
-
CHUNK_CEILING
(
usize
);
malloc_mutex_lock
(
tsdn
,
&
arena
->
lock
);
if
(
config_stats
)
{
arena_huge_ralloc_stats_update
(
arena
,
oldsize
,
usize
);
if
(
cdiff
!=
0
)
arena
->
stats
.
mapped
-=
cdiff
;
}
arena_nactive_sub
(
arena
,
udiff
>>
LG_PAGE
);
if
(
cdiff
!=
0
)
{
chunk_hooks_t
chunk_hooks
=
CHUNK_HOOKS_INITIALIZER
;
void
*
nchunk
=
(
void
*
)((
uintptr_t
)
chunk
+
CHUNK_CEILING
(
usize
));
chunk_dalloc_cache
(
tsdn
,
arena
,
&
chunk_hooks
,
nchunk
,
cdiff
,
sn
,
true
);
}
malloc_mutex_unlock
(
tsdn
,
&
arena
->
lock
);
}
static
bool
arena_chunk_ralloc_huge_expand_hard
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
chunk
,
size_t
oldsize
,
size_t
usize
,
size_t
*
sn
,
bool
*
zero
,
void
*
nchunk
,
size_t
udiff
,
size_t
cdiff
)
{
bool
err
;
bool
commit
=
true
;
err
=
(
chunk_alloc_wrapper
(
tsdn
,
arena
,
chunk_hooks
,
nchunk
,
cdiff
,
chunksize
,
sn
,
zero
,
&
commit
)
==
NULL
);
if
(
err
)
{
/* Revert optimistic stats updates. */
malloc_mutex_lock
(
tsdn
,
&
arena
->
lock
);
if
(
config_stats
)
{
arena_huge_ralloc_stats_update_undo
(
arena
,
oldsize
,
usize
);
arena
->
stats
.
mapped
-=
cdiff
;
}
arena_nactive_sub
(
arena
,
udiff
>>
LG_PAGE
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
lock
);
}
else
if
(
chunk_hooks
->
merge
(
chunk
,
CHUNK_CEILING
(
oldsize
),
nchunk
,
cdiff
,
true
,
arena
->
ind
))
{
chunk_dalloc_wrapper
(
tsdn
,
arena
,
chunk_hooks
,
nchunk
,
cdiff
,
*
sn
,
*
zero
,
true
);
err
=
true
;
}
return
(
err
);
}
bool
arena_chunk_ralloc_huge_expand
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
void
*
chunk
,
size_t
oldsize
,
size_t
usize
,
bool
*
zero
)
{
bool
err
;
chunk_hooks_t
chunk_hooks
=
chunk_hooks_get
(
tsdn
,
arena
);
void
*
nchunk
=
(
void
*
)((
uintptr_t
)
chunk
+
CHUNK_CEILING
(
oldsize
));
size_t
udiff
=
usize
-
oldsize
;
size_t
cdiff
=
CHUNK_CEILING
(
usize
)
-
CHUNK_CEILING
(
oldsize
);
size_t
sn
;
bool
commit
=
true
;
malloc_mutex_lock
(
tsdn
,
&
arena
->
lock
);
/* Optimistically update stats. */
if
(
config_stats
)
{
arena_huge_ralloc_stats_update
(
arena
,
oldsize
,
usize
);
arena
->
stats
.
mapped
+=
cdiff
;
}
arena_nactive_add
(
arena
,
udiff
>>
LG_PAGE
);
err
=
(
chunk_alloc_cache
(
tsdn
,
arena
,
&
chunk_hooks
,
nchunk
,
cdiff
,
chunksize
,
&
sn
,
zero
,
&
commit
,
true
)
==
NULL
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
lock
);
if
(
err
)
{
err
=
arena_chunk_ralloc_huge_expand_hard
(
tsdn
,
arena
,
&
chunk_hooks
,
chunk
,
oldsize
,
usize
,
&
sn
,
zero
,
nchunk
,
udiff
,
cdiff
);
}
else
if
(
chunk_hooks
.
merge
(
chunk
,
CHUNK_CEILING
(
oldsize
),
nchunk
,
cdiff
,
true
,
arena
->
ind
))
{
chunk_dalloc_wrapper
(
tsdn
,
arena
,
&
chunk_hooks
,
nchunk
,
cdiff
,
sn
,
*
zero
,
true
);
err
=
true
;
}
return
(
err
);
}
/*
* Do first-best-fit run selection, i.e. select the lowest run that best fits.
* Run sizes are indexed, so not all candidate runs are necessarily exactly the
* same size.
*/
static
arena_run_t
*
arena_run_first_best_fit
(
arena_t
*
arena
,
size_t
size
)
{
pszind_t
pind
,
i
;
pind
=
psz2ind
(
run_quantize_ceil
(
size
));
for
(
i
=
pind
;
pind2sz
(
i
)
<=
chunksize
;
i
++
)
{
arena_chunk_map_misc_t
*
miscelm
=
arena_run_heap_first
(
&
arena
->
runs_avail
[
i
]);
if
(
miscelm
!=
NULL
)
return
(
&
miscelm
->
run
);
}
return
(
NULL
);
}
static
arena_run_t
*
arena_run_alloc_large_helper
(
arena_t
*
arena
,
size_t
size
,
bool
zero
)
{
arena_run_t
*
run
=
arena_run_first_best_fit
(
arena
,
size
);
if
(
run
!=
NULL
)
{
if
(
arena_run_split_large
(
arena
,
run
,
size
,
zero
))
run
=
NULL
;
}
return
(
run
);
}
static
arena_run_t
*
arena_run_alloc_large
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
size
,
bool
zero
)
{
arena_chunk_t
*
chunk
;
arena_run_t
*
run
;
assert
(
size
<=
arena_maxrun
);
assert
(
size
==
PAGE_CEILING
(
size
));
/* Search the arena's chunks for the lowest best fit. */
run
=
arena_run_alloc_large_helper
(
arena
,
size
,
zero
);
if
(
run
!=
NULL
)
return
(
run
);
/*
* No usable runs. Create a new chunk from which to allocate the run.
*/
chunk
=
arena_chunk_alloc
(
tsdn
,
arena
);
if
(
chunk
!=
NULL
)
{
run
=
&
arena_miscelm_get_mutable
(
chunk
,
map_bias
)
->
run
;
if
(
arena_run_split_large
(
arena
,
run
,
size
,
zero
))
run
=
NULL
;
return
(
run
);
}
/*
* arena_chunk_alloc() failed, but another thread may have made
* sufficient memory available while this one dropped arena->lock in
* arena_chunk_alloc(), so search one more time.
*/
return
(
arena_run_alloc_helper
(
arena
,
size
,
large
,
binind
,
zero
));
return
(
arena_run_alloc_
large_
helper
(
arena
,
size
,
zero
));
}
static
inline
void
arena_
maybe_purge
(
arena_t
*
arena
)
static
arena_run_t
*
arena_
run_alloc_small_helper
(
arena_t
*
arena
,
size_t
size
,
szind_t
binind
)
{
size_t
npurgeable
,
threshold
;
arena_run_t
*
run
=
arena_run_first_best_fit
(
arena
,
size
);
if
(
run
!=
NULL
)
{
if
(
arena_run_split_small
(
arena
,
run
,
size
,
binind
))
run
=
NULL
;
}
return
(
run
);
}
static
arena_run_t
*
arena_run_alloc_small
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
size
,
szind_t
binind
)
{
arena_chunk_t
*
chunk
;
arena_run_t
*
run
;
assert
(
size
<=
arena_maxrun
);
assert
(
size
==
PAGE_CEILING
(
size
));
assert
(
binind
!=
BININD_INVALID
);
/* Search the arena's chunks for the lowest best fit. */
run
=
arena_run_alloc_small_helper
(
arena
,
size
,
binind
);
if
(
run
!=
NULL
)
return
(
run
);
/* Don't purge if the option is disabled. */
if
(
opt_lg_dirty_mult
<
0
)
return
;
/* Don't purge if all dirty pages are already being purged. */
if
(
arena
->
ndirty
<=
arena
->
npurgatory
)
return
;
npurgeable
=
arena
->
ndirty
-
arena
->
npurgatory
;
threshold
=
(
arena
->
nactive
>>
opt_lg_dirty_mult
);
/*
* Don't purge unless the number of purgeable pages exceeds the
* threshold.
* No usable runs. Create a new chunk from which to allocate the run.
*/
if
(
npurgeable
<=
threshold
)
return
;
chunk
=
arena_chunk_alloc
(
tsdn
,
arena
);
if
(
chunk
!=
NULL
)
{
run
=
&
arena_miscelm_get_mutable
(
chunk
,
map_bias
)
->
run
;
if
(
arena_run_split_small
(
arena
,
run
,
size
,
binind
))
run
=
NULL
;
return
(
run
);
}
arena_purge
(
arena
,
false
);
/*
* arena_chunk_alloc() failed, but another thread may have made
* sufficient memory available while this one dropped arena->lock in
* arena_chunk_alloc(), so search one more time.
*/
return
(
arena_run_alloc_small_helper
(
arena
,
size
,
binind
));
}
static
inline
size_t
arena_
chunk_purge
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
bool
all
)
static
bool
arena_
lg_dirty_mult_valid
(
ssize_t
lg_dirty_mult
)
{
size_t
npurged
;
ql_head
(
arena_chunk_map_t
)
mapelms
;
arena_chunk_map_t
*
mapelm
;
size_t
pageind
,
npages
;
size_t
nmadvise
;
ql_new
(
&
mapelms
);
return
(
lg_dirty_mult
>=
-
1
&&
lg_dirty_mult
<
(
ssize_t
)(
sizeof
(
size_t
)
<<
3
));
}
ssize_t
arena_lg_dirty_mult_get
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
ssize_t
lg_dirty_mult
;
malloc_mutex_lock
(
tsdn
,
&
arena
->
lock
);
lg_dirty_mult
=
arena
->
lg_dirty_mult
;
malloc_mutex_unlock
(
tsdn
,
&
arena
->
lock
);
return
(
lg_dirty_mult
);
}
bool
arena_lg_dirty_mult_set
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
ssize_t
lg_dirty_mult
)
{
if
(
!
arena_lg_dirty_mult_valid
(
lg_dirty_mult
))
return
(
true
);
malloc_mutex_lock
(
tsdn
,
&
arena
->
lock
);
arena
->
lg_dirty_mult
=
lg_dirty_mult
;
arena_maybe_purge
(
tsdn
,
arena
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
lock
);
return
(
false
);
}
static
void
arena_decay_deadline_init
(
arena_t
*
arena
)
{
assert
(
opt_purge
==
purge_mode_decay
);
/*
* If chunk is the spare, temporarily re-allocate it, 1) so that its
* run is reinserted into runs_avail, and 2) so that it cannot be
* completely discarded by another thread while arena->lock is dropped
* by this thread. Note that the arena_run_dalloc() call will
* implicitly deallocate the chunk, so no explicit action is required
* in this function to deallocate the chunk.
*
* Note that once a chunk contains dirty pages, it cannot again contain
* a single run unless 1) it is a dirty run, or 2) this function purges
* dirty pages and causes the transition to a single clean run. Thus
* (chunk == arena->spare) is possible, but it is not possible for
* this function to be called on the spare unless it contains a dirty
* run.
* Generate a new deadline that is uniformly random within the next
* epoch after the current one.
*/
if
(
chunk
==
arena
->
spare
)
{
assert
(
arena_mapbits_dirty_get
(
chunk
,
map_bias
)
!=
0
);
assert
(
arena_mapbits_dirty_get
(
chunk
,
chunk_npages
-
1
)
!=
0
);
nstime_copy
(
&
arena
->
decay
.
deadline
,
&
arena
->
decay
.
epoch
);
nstime_add
(
&
arena
->
decay
.
deadline
,
&
arena
->
decay
.
interval
);
if
(
arena
->
decay
.
time
>
0
)
{
nstime_t
jitter
;
arena_chunk_alloc
(
arena
);
nstime_init
(
&
jitter
,
prng_range_u64
(
&
arena
->
decay
.
jitter_state
,
nstime_ns
(
&
arena
->
decay
.
interval
)));
nstime_add
(
&
arena
->
decay
.
deadline
,
&
jitter
);
}
}
if
(
config_stats
)
arena
->
stats
.
purged
+=
chunk
->
ndirty
;
static
bool
arena_decay_deadline_reached
(
const
arena_t
*
arena
,
const
nstime_t
*
time
)
{
assert
(
opt_purge
==
purge_mode_decay
);
return
(
nstime_compare
(
&
arena
->
decay
.
deadline
,
time
)
<=
0
);
}
static
size_t
arena_decay_backlog_npages_limit
(
const
arena_t
*
arena
)
{
static
const
uint64_t
h_steps
[]
=
{
#define STEP(step, h, x, y) \
h,
SMOOTHSTEP
#undef STEP
};
uint64_t
sum
;
size_t
npages_limit_backlog
;
unsigned
i
;
assert
(
opt_purge
==
purge_mode_decay
);
/*
* Operate on all dirty runs if there is no clean/dirty run
* fragmentation.
* For each element of decay_backlog, multiply by the corresponding
* fixed-point smoothstep decay factor. Sum the products, then divide
* to round down to the nearest whole number of pages.
*/
if
(
chunk
->
nruns_adjac
==
0
)
all
=
true
;
sum
=
0
;
for
(
i
=
0
;
i
<
SMOOTHSTEP_NSTEPS
;
i
++
)
sum
+=
arena
->
decay
.
backlog
[
i
]
*
h_steps
[
i
];
npages_limit_backlog
=
(
size_t
)(
sum
>>
SMOOTHSTEP_BFP
);
return
(
npages_limit_backlog
);
}
static
void
arena_decay_backlog_update_last
(
arena_t
*
arena
)
{
size_t
ndirty_delta
=
(
arena
->
ndirty
>
arena
->
decay
.
ndirty
)
?
arena
->
ndirty
-
arena
->
decay
.
ndirty
:
0
;
arena
->
decay
.
backlog
[
SMOOTHSTEP_NSTEPS
-
1
]
=
ndirty_delta
;
}
static
void
arena_decay_backlog_update
(
arena_t
*
arena
,
uint64_t
nadvance_u64
)
{
if
(
nadvance_u64
>=
SMOOTHSTEP_NSTEPS
)
{
memset
(
arena
->
decay
.
backlog
,
0
,
(
SMOOTHSTEP_NSTEPS
-
1
)
*
sizeof
(
size_t
));
}
else
{
size_t
nadvance_z
=
(
size_t
)
nadvance_u64
;
assert
((
uint64_t
)
nadvance_z
==
nadvance_u64
);
memmove
(
arena
->
decay
.
backlog
,
&
arena
->
decay
.
backlog
[
nadvance_z
],
(
SMOOTHSTEP_NSTEPS
-
nadvance_z
)
*
sizeof
(
size_t
));
if
(
nadvance_z
>
1
)
{
memset
(
&
arena
->
decay
.
backlog
[
SMOOTHSTEP_NSTEPS
-
nadvance_z
],
0
,
(
nadvance_z
-
1
)
*
sizeof
(
size_t
));
}
}
arena_decay_backlog_update_last
(
arena
);
}
static
void
arena_decay_epoch_advance_helper
(
arena_t
*
arena
,
const
nstime_t
*
time
)
{
uint64_t
nadvance_u64
;
nstime_t
delta
;
assert
(
opt_purge
==
purge_mode_decay
);
assert
(
arena_decay_deadline_reached
(
arena
,
time
));
nstime_copy
(
&
delta
,
time
);
nstime_subtract
(
&
delta
,
&
arena
->
decay
.
epoch
);
nadvance_u64
=
nstime_divide
(
&
delta
,
&
arena
->
decay
.
interval
);
assert
(
nadvance_u64
>
0
);
/* Add nadvance_u64 decay intervals to epoch. */
nstime_copy
(
&
delta
,
&
arena
->
decay
.
interval
);
nstime_imultiply
(
&
delta
,
nadvance_u64
);
nstime_add
(
&
arena
->
decay
.
epoch
,
&
delta
);
/* Set a new deadline. */
arena_decay_deadline_init
(
arena
);
/* Update the backlog. */
arena_decay_backlog_update
(
arena
,
nadvance_u64
);
}
static
void
arena_decay_epoch_advance_purge
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
size_t
ndirty_limit
=
arena_decay_backlog_npages_limit
(
arena
);
if
(
arena
->
ndirty
>
ndirty_limit
)
arena_purge_to_limit
(
tsdn
,
arena
,
ndirty_limit
);
arena
->
decay
.
ndirty
=
arena
->
ndirty
;
}
static
void
arena_decay_epoch_advance
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
const
nstime_t
*
time
)
{
arena_decay_epoch_advance_helper
(
arena
,
time
);
arena_decay_epoch_advance_purge
(
tsdn
,
arena
);
}
static
void
arena_decay_init
(
arena_t
*
arena
,
ssize_t
decay_time
)
{
arena
->
decay
.
time
=
decay_time
;
if
(
decay_time
>
0
)
{
nstime_init2
(
&
arena
->
decay
.
interval
,
decay_time
,
0
);
nstime_idivide
(
&
arena
->
decay
.
interval
,
SMOOTHSTEP_NSTEPS
);
}
nstime_init
(
&
arena
->
decay
.
epoch
,
0
);
nstime_update
(
&
arena
->
decay
.
epoch
);
arena
->
decay
.
jitter_state
=
(
uint64_t
)(
uintptr_t
)
arena
;
arena_decay_deadline_init
(
arena
);
arena
->
decay
.
ndirty
=
arena
->
ndirty
;
memset
(
arena
->
decay
.
backlog
,
0
,
SMOOTHSTEP_NSTEPS
*
sizeof
(
size_t
));
}
static
bool
arena_decay_time_valid
(
ssize_t
decay_time
)
{
if
(
decay_time
<
-
1
)
return
(
false
);
if
(
decay_time
==
-
1
||
(
uint64_t
)
decay_time
<=
NSTIME_SEC_MAX
)
return
(
true
);
return
(
false
);
}
ssize_t
arena_decay_time_get
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
ssize_t
decay_time
;
malloc_mutex_lock
(
tsdn
,
&
arena
->
lock
);
decay_time
=
arena
->
decay
.
time
;
malloc_mutex_unlock
(
tsdn
,
&
arena
->
lock
);
return
(
decay_time
);
}
bool
arena_decay_time_set
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
ssize_t
decay_time
)
{
if
(
!
arena_decay_time_valid
(
decay_time
))
return
(
true
);
malloc_mutex_lock
(
tsdn
,
&
arena
->
lock
);
/*
* Temporarily allocate free dirty runs within chunk. If all is false,
* only operate on dirty runs that are fragments; otherwise operate on
* all dirty runs.
* Restart decay backlog from scratch, which may cause many dirty pages
* to be immediately purged. It would conceptually be possible to map
* the old backlog onto the new backlog, but there is no justification
* for such complexity since decay_time changes are intended to be
* infrequent, either between the {-1, 0, >0} states, or a one-time
* arbitrary change during initial arena configuration.
*/
for
(
pageind
=
map_bias
;
pageind
<
chunk_npages
;
pageind
+=
npages
)
{
mapelm
=
arena_mapp_get
(
chunk
,
pageind
);
if
(
arena_mapbits_allocated_get
(
chunk
,
pageind
)
==
0
)
{
arena_decay_init
(
arena
,
decay_time
);
arena_maybe_purge
(
tsdn
,
arena
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
lock
);
return
(
false
);
}
static
void
arena_maybe_purge_ratio
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
assert
(
opt_purge
==
purge_mode_ratio
);
/* Don't purge if the option is disabled. */
if
(
arena
->
lg_dirty_mult
<
0
)
return
;
/*
* Iterate, since preventing recursive purging could otherwise leave too
* many dirty pages.
*/
while
(
true
)
{
size_t
threshold
=
(
arena
->
nactive
>>
arena
->
lg_dirty_mult
);
if
(
threshold
<
chunk_npages
)
threshold
=
chunk_npages
;
/*
* Don't purge unless the number of purgeable pages exceeds the
* threshold.
*/
if
(
arena
->
ndirty
<=
threshold
)
return
;
arena_purge_to_limit
(
tsdn
,
arena
,
threshold
);
}
}
static
void
arena_maybe_purge_decay
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
nstime_t
time
;
assert
(
opt_purge
==
purge_mode_decay
);
/* Purge all or nothing if the option is disabled. */
if
(
arena
->
decay
.
time
<=
0
)
{
if
(
arena
->
decay
.
time
==
0
)
arena_purge_to_limit
(
tsdn
,
arena
,
0
);
return
;
}
nstime_init
(
&
time
,
0
);
nstime_update
(
&
time
);
if
(
unlikely
(
!
nstime_monotonic
()
&&
nstime_compare
(
&
arena
->
decay
.
epoch
,
&
time
)
>
0
))
{
/*
* Time went backwards. Move the epoch back in time and
* generate a new deadline, with the expectation that time
* typically flows forward for long enough periods of time that
* epochs complete. Unfortunately, this strategy is susceptible
* to clock jitter triggering premature epoch advances, but
* clock jitter estimation and compensation isn't feasible here
* because calls into this code are event-driven.
*/
nstime_copy
(
&
arena
->
decay
.
epoch
,
&
time
);
arena_decay_deadline_init
(
arena
);
}
else
{
/* Verify that time does not go backwards. */
assert
(
nstime_compare
(
&
arena
->
decay
.
epoch
,
&
time
)
<=
0
);
}
/*
* If the deadline has been reached, advance to the current epoch and
* purge to the new limit if necessary. Note that dirty pages created
* during the current epoch are not subject to purge until a future
* epoch, so as a result purging only happens during epoch advances.
*/
if
(
arena_decay_deadline_reached
(
arena
,
&
time
))
arena_decay_epoch_advance
(
tsdn
,
arena
,
&
time
);
}
void
arena_maybe_purge
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
/* Don't recursively purge. */
if
(
arena
->
purging
)
return
;
if
(
opt_purge
==
purge_mode_ratio
)
arena_maybe_purge_ratio
(
tsdn
,
arena
);
else
arena_maybe_purge_decay
(
tsdn
,
arena
);
}
static
size_t
arena_dirty_count
(
arena_t
*
arena
)
{
size_t
ndirty
=
0
;
arena_runs_dirty_link_t
*
rdelm
;
extent_node_t
*
chunkselm
;
for
(
rdelm
=
qr_next
(
&
arena
->
runs_dirty
,
rd_link
),
chunkselm
=
qr_next
(
&
arena
->
chunks_cache
,
cc_link
);
rdelm
!=
&
arena
->
runs_dirty
;
rdelm
=
qr_next
(
rdelm
,
rd_link
))
{
size_t
npages
;
if
(
rdelm
==
&
chunkselm
->
rd
)
{
npages
=
extent_node_size_get
(
chunkselm
)
>>
LG_PAGE
;
chunkselm
=
qr_next
(
chunkselm
,
cc_link
);
}
else
{
arena_chunk_t
*
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
rdelm
);
arena_chunk_map_misc_t
*
miscelm
=
arena_rd_to_miscelm
(
rdelm
);
size_t
pageind
=
arena_miscelm_to_pageind
(
miscelm
);
assert
(
arena_mapbits_allocated_get
(
chunk
,
pageind
)
==
0
);
assert
(
arena_mapbits_large_get
(
chunk
,
pageind
)
==
0
);
assert
(
arena_mapbits_dirty_get
(
chunk
,
pageind
)
!=
0
);
npages
=
arena_mapbits_unallocated_size_get
(
chunk
,
pageind
)
>>
LG_PAGE
;
}
ndirty
+=
npages
;
}
return
(
ndirty
);
}
static
size_t
arena_stash_dirty
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
size_t
ndirty_limit
,
arena_runs_dirty_link_t
*
purge_runs_sentinel
,
extent_node_t
*
purge_chunks_sentinel
)
{
arena_runs_dirty_link_t
*
rdelm
,
*
rdelm_next
;
extent_node_t
*
chunkselm
;
size_t
nstashed
=
0
;
/* Stash runs/chunks according to ndirty_limit. */
for
(
rdelm
=
qr_next
(
&
arena
->
runs_dirty
,
rd_link
),
chunkselm
=
qr_next
(
&
arena
->
chunks_cache
,
cc_link
);
rdelm
!=
&
arena
->
runs_dirty
;
rdelm
=
rdelm_next
)
{
size_t
npages
;
rdelm_next
=
qr_next
(
rdelm
,
rd_link
);
if
(
rdelm
==
&
chunkselm
->
rd
)
{
extent_node_t
*
chunkselm_next
;
size_t
sn
;
bool
zero
,
commit
;
UNUSED
void
*
chunk
;
npages
=
extent_node_size_get
(
chunkselm
)
>>
LG_PAGE
;
if
(
opt_purge
==
purge_mode_decay
&&
arena
->
ndirty
-
(
nstashed
+
npages
)
<
ndirty_limit
)
break
;
chunkselm_next
=
qr_next
(
chunkselm
,
cc_link
);
/*
* Allocate. chunkselm remains valid due to the
* dalloc_node=false argument to chunk_alloc_cache().
*/
zero
=
false
;
commit
=
false
;
chunk
=
chunk_alloc_cache
(
tsdn
,
arena
,
chunk_hooks
,
extent_node_addr_get
(
chunkselm
),
extent_node_size_get
(
chunkselm
),
chunksize
,
&
sn
,
&
zero
,
&
commit
,
false
);
assert
(
chunk
==
extent_node_addr_get
(
chunkselm
));
assert
(
zero
==
extent_node_zeroed_get
(
chunkselm
));
extent_node_dirty_insert
(
chunkselm
,
purge_runs_sentinel
,
purge_chunks_sentinel
);
assert
(
npages
==
(
extent_node_size_get
(
chunkselm
)
>>
LG_PAGE
));
chunkselm
=
chunkselm_next
;
}
else
{
arena_chunk_t
*
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
rdelm
);
arena_chunk_map_misc_t
*
miscelm
=
arena_rd_to_miscelm
(
rdelm
);
size_t
pageind
=
arena_miscelm_to_pageind
(
miscelm
);
arena_run_t
*
run
=
&
miscelm
->
run
;
size_t
run_size
=
arena_mapbits_unallocated_size_get
(
chunk
,
pageind
);
npages
=
run_size
>>
LG_PAGE
;
if
(
opt_purge
==
purge_mode_decay
&&
arena
->
ndirty
-
(
nstashed
+
npages
)
<
ndirty_limit
)
break
;
assert
(
pageind
+
npages
<=
chunk_npages
);
assert
(
arena_mapbits_dirty_get
(
chunk
,
pageind
)
==
arena_mapbits_dirty_get
(
chunk
,
pageind
+
npages
-
1
));
if
(
arena_mapbits_dirty_get
(
chunk
,
pageind
)
!=
0
&&
(
all
||
arena_avail_adjac
(
chunk
,
pageind
,
npages
)))
{
arena_run_t
*
run
=
(
arena_run_t
*
)((
uintptr_t
)
chunk
+
(
uintptr_t
)(
pageind
<<
LG_PAGE
));
arena_run_split
(
arena
,
run
,
run_size
,
true
,
BININD_INVALID
,
false
);
/* Append to list for later processing. */
ql_elm_new
(
mapelm
,
u
.
ql_link
);
ql_tail_insert
(
&
mapelms
,
mapelm
,
u
.
ql_link
);
}
}
else
{
/* Skip run. */
if
(
arena_mapbits_large_get
(
chunk
,
pageind
)
!=
0
)
{
npages
=
arena_mapbits_large_size_get
(
chunk
,
pageind
)
>>
LG_PAGE
;
}
else
{
size_t
binind
;
arena_bin_info_t
*
bin_info
;
arena_run_t
*
run
=
(
arena_run_t
*
)((
uintptr_t
)
chunk
+
(
uintptr_t
)(
pageind
<<
LG_PAGE
));
assert
(
arena_mapbits_small_runind_get
(
chunk
,
pageind
)
==
0
);
binind
=
arena_bin_index
(
arena
,
run
->
bin
);
bin_info
=
&
arena_bin_info
[
binind
];
npages
=
bin_info
->
run_size
>>
LG_PAGE
;
/*
* If purging the spare chunk's run, make it available
* prior to allocation.
*/
if
(
chunk
==
arena
->
spare
)
arena_chunk_alloc
(
tsdn
,
arena
);
/* Temporarily allocate the free dirty run. */
arena_run_split_large
(
arena
,
run
,
run_size
,
false
);
/* Stash. */
if
(
false
)
qr_new
(
rdelm
,
rd_link
);
/* Redundant. */
else
{
assert
(
qr_next
(
rdelm
,
rd_link
)
==
rdelm
);
assert
(
qr_prev
(
rdelm
,
rd_link
)
==
rdelm
);
}
qr_meld
(
purge_runs_sentinel
,
rdelm
,
rd_link
);
}
nstashed
+=
npages
;
if
(
opt_purge
==
purge_mode_ratio
&&
arena
->
ndirty
-
nstashed
<=
ndirty_limit
)
break
;
}
assert
(
pageind
==
chunk_npages
);
assert
(
chunk
->
ndirty
==
0
||
all
==
false
);
assert
(
chunk
->
nruns_adjac
==
0
);
malloc_mutex_unlock
(
&
arena
->
lock
);
return
(
nstashed
);
}
static
size_t
arena_purge_stashed
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
arena_runs_dirty_link_t
*
purge_runs_sentinel
,
extent_node_t
*
purge_chunks_sentinel
)
{
size_t
npurged
,
nmadvise
;
arena_runs_dirty_link_t
*
rdelm
;
extent_node_t
*
chunkselm
;
if
(
config_stats
)
nmadvise
=
0
;
npurged
=
0
;
ql_foreach
(
mapelm
,
&
mapelms
,
u
.
ql_link
)
{
bool
unzeroed
;
size_t
flag_unzeroed
,
i
;
pageind
=
(((
uintptr_t
)
mapelm
-
(
uintptr_t
)
chunk
->
map
)
/
sizeof
(
arena_chunk_map_t
))
+
map_bias
;
npages
=
arena_mapbits_large_size_get
(
chunk
,
pageind
)
>>
LG_PAGE
;
assert
(
pageind
+
npages
<=
chunk_npages
);
unzeroed
=
pages_purge
((
void
*
)((
uintptr_t
)
chunk
+
(
pageind
<<
LG_PAGE
)),
(
npages
<<
LG_PAGE
));
flag_unzeroed
=
unzeroed
?
CHUNK_MAP_UNZEROED
:
0
;
/*
* Set the unzeroed flag for all pages, now that pages_purge()
* has returned whether the pages were zeroed as a side effect
* of purging. This chunk map modification is safe even though
* the arena mutex isn't currently owned by this thread,
* because the run is marked as allocated, thus protecting it
* from being modified by any other thread. As long as these
* writes don't perturb the first and last elements'
* CHUNK_MAP_ALLOCATED bits, behavior is well defined.
*/
for
(
i
=
0
;
i
<
npages
;
i
++
)
{
arena_mapbits_unzeroed_set
(
chunk
,
pageind
+
i
,
flag_unzeroed
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
lock
);
for
(
rdelm
=
qr_next
(
purge_runs_sentinel
,
rd_link
),
chunkselm
=
qr_next
(
purge_chunks_sentinel
,
cc_link
);
rdelm
!=
purge_runs_sentinel
;
rdelm
=
qr_next
(
rdelm
,
rd_link
))
{
size_t
npages
;
if
(
rdelm
==
&
chunkselm
->
rd
)
{
/*
* Don't actually purge the chunk here because 1)
* chunkselm is embedded in the chunk and must remain
* valid, and 2) we deallocate the chunk in
* arena_unstash_purged(), where it is destroyed,
* decommitted, or purged, depending on chunk
* deallocation policy.
*/
size_t
size
=
extent_node_size_get
(
chunkselm
);
npages
=
size
>>
LG_PAGE
;
chunkselm
=
qr_next
(
chunkselm
,
cc_link
);
}
else
{
size_t
pageind
,
run_size
,
flag_unzeroed
,
flags
,
i
;
bool
decommitted
;
arena_chunk_t
*
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
rdelm
);
arena_chunk_map_misc_t
*
miscelm
=
arena_rd_to_miscelm
(
rdelm
);
pageind
=
arena_miscelm_to_pageind
(
miscelm
);
run_size
=
arena_mapbits_large_size_get
(
chunk
,
pageind
);
npages
=
run_size
>>
LG_PAGE
;
/*
* If this is the first run purged within chunk, mark
* the chunk as non-huge. This will prevent all use of
* transparent huge pages for this chunk until the chunk
* as a whole is deallocated.
*/
if
(
chunk
->
hugepage
)
{
pages_nohuge
(
chunk
,
chunksize
);
chunk
->
hugepage
=
false
;
}
assert
(
pageind
+
npages
<=
chunk_npages
);
assert
(
!
arena_mapbits_decommitted_get
(
chunk
,
pageind
));
assert
(
!
arena_mapbits_decommitted_get
(
chunk
,
pageind
+
npages
-
1
));
decommitted
=
!
chunk_hooks
->
decommit
(
chunk
,
chunksize
,
pageind
<<
LG_PAGE
,
npages
<<
LG_PAGE
,
arena
->
ind
);
if
(
decommitted
)
{
flag_unzeroed
=
0
;
flags
=
CHUNK_MAP_DECOMMITTED
;
}
else
{
flag_unzeroed
=
chunk_purge_wrapper
(
tsdn
,
arena
,
chunk_hooks
,
chunk
,
chunksize
,
pageind
<<
LG_PAGE
,
run_size
)
?
CHUNK_MAP_UNZEROED
:
0
;
flags
=
flag_unzeroed
;
}
arena_mapbits_large_set
(
chunk
,
pageind
+
npages
-
1
,
0
,
flags
);
arena_mapbits_large_set
(
chunk
,
pageind
,
run_size
,
flags
);
/*
* Set the unzeroed flag for internal pages, now that
* chunk_purge_wrapper() has returned whether the pages
* were zeroed as a side effect of purging. This chunk
* map modification is safe even though the arena mutex
* isn't currently owned by this thread, because the run
* is marked as allocated, thus protecting it from being
* modified by any other thread. As long as these
* writes don't perturb the first and last elements'
* CHUNK_MAP_ALLOCATED bits, behavior is well defined.
*/
for
(
i
=
1
;
i
<
npages
-
1
;
i
++
)
{
arena_mapbits_internal_set
(
chunk
,
pageind
+
i
,
flag_unzeroed
);
}
}
npurged
+=
npages
;
if
(
config_stats
)
nmadvise
++
;
}
malloc_mutex_lock
(
&
arena
->
lock
);
if
(
config_stats
)
malloc_mutex_lock
(
tsdn
,
&
arena
->
lock
);
if
(
config_stats
)
{
arena
->
stats
.
nmadvise
+=
nmadvise
;
arena
->
stats
.
purged
+=
npurged
;
}
/* Deallocate runs. */
for
(
mapelm
=
ql_first
(
&
mapelms
);
mapelm
!=
NULL
;
mapelm
=
ql_first
(
&
mapelms
))
{
arena_run_t
*
run
;
return
(
npurged
);
}
pageind
=
(((
uintptr_t
)
mapelm
-
(
uintptr_t
)
chunk
->
map
)
/
sizeof
(
arena_chunk_map_t
))
+
map_bias
;
run
=
(
arena_run_t
*
)((
uintptr_t
)
chunk
+
(
uintptr_t
)(
pageind
<<
LG_PAGE
));
ql_remove
(
&
mapelms
,
mapelm
,
u
.
ql_link
);
arena_run_dalloc
(
arena
,
run
,
false
,
true
);
static
void
arena_unstash_purged
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
arena_runs_dirty_link_t
*
purge_runs_sentinel
,
extent_node_t
*
purge_chunks_sentinel
)
{
arena_runs_dirty_link_t
*
rdelm
,
*
rdelm_next
;
extent_node_t
*
chunkselm
;
/* Deallocate chunks/runs. */
for
(
rdelm
=
qr_next
(
purge_runs_sentinel
,
rd_link
),
chunkselm
=
qr_next
(
purge_chunks_sentinel
,
cc_link
);
rdelm
!=
purge_runs_sentinel
;
rdelm
=
rdelm_next
)
{
rdelm_next
=
qr_next
(
rdelm
,
rd_link
);
if
(
rdelm
==
&
chunkselm
->
rd
)
{
extent_node_t
*
chunkselm_next
=
qr_next
(
chunkselm
,
cc_link
);
void
*
addr
=
extent_node_addr_get
(
chunkselm
);
size_t
size
=
extent_node_size_get
(
chunkselm
);
size_t
sn
=
extent_node_sn_get
(
chunkselm
);
bool
zeroed
=
extent_node_zeroed_get
(
chunkselm
);
bool
committed
=
extent_node_committed_get
(
chunkselm
);
extent_node_dirty_remove
(
chunkselm
);
arena_node_dalloc
(
tsdn
,
arena
,
chunkselm
);
chunkselm
=
chunkselm_next
;
chunk_dalloc_wrapper
(
tsdn
,
arena
,
chunk_hooks
,
addr
,
size
,
sn
,
zeroed
,
committed
);
}
else
{
arena_chunk_t
*
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
rdelm
);
arena_chunk_map_misc_t
*
miscelm
=
arena_rd_to_miscelm
(
rdelm
);
size_t
pageind
=
arena_miscelm_to_pageind
(
miscelm
);
bool
decommitted
=
(
arena_mapbits_decommitted_get
(
chunk
,
pageind
)
!=
0
);
arena_run_t
*
run
=
&
miscelm
->
run
;
qr_remove
(
rdelm
,
rd_link
);
arena_run_dalloc
(
tsdn
,
arena
,
run
,
false
,
true
,
decommitted
);
}
}
}
return
(
npurged
);
/*
* NB: ndirty_limit is interpreted differently depending on opt_purge:
* - purge_mode_ratio: Purge as few dirty run/chunks as possible to reach the
* desired state:
* (arena->ndirty <= ndirty_limit)
* - purge_mode_decay: Purge as many dirty runs/chunks as possible without
* violating the invariant:
* (arena->ndirty >= ndirty_limit)
*/
static
void
arena_purge_to_limit
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
ndirty_limit
)
{
chunk_hooks_t
chunk_hooks
=
chunk_hooks_get
(
tsdn
,
arena
);
size_t
npurge
,
npurged
;
arena_runs_dirty_link_t
purge_runs_sentinel
;
extent_node_t
purge_chunks_sentinel
;
arena
->
purging
=
true
;
/*
* Calls to arena_dirty_count() are disabled even for debug builds
* because overhead grows nonlinearly as memory usage increases.
*/
if
(
false
&&
config_debug
)
{
size_t
ndirty
=
arena_dirty_count
(
arena
);
assert
(
ndirty
==
arena
->
ndirty
);
}
assert
(
opt_purge
!=
purge_mode_ratio
||
(
arena
->
nactive
>>
arena
->
lg_dirty_mult
)
<
arena
->
ndirty
||
ndirty_limit
==
0
);
qr_new
(
&
purge_runs_sentinel
,
rd_link
);
extent_node_dirty_linkage_init
(
&
purge_chunks_sentinel
);
npurge
=
arena_stash_dirty
(
tsdn
,
arena
,
&
chunk_hooks
,
ndirty_limit
,
&
purge_runs_sentinel
,
&
purge_chunks_sentinel
);
if
(
npurge
==
0
)
goto
label_return
;
npurged
=
arena_purge_stashed
(
tsdn
,
arena
,
&
chunk_hooks
,
&
purge_runs_sentinel
,
&
purge_chunks_sentinel
);
assert
(
npurged
==
npurge
);
arena_unstash_purged
(
tsdn
,
arena
,
&
chunk_hooks
,
&
purge_runs_sentinel
,
&
purge_chunks_sentinel
);
if
(
config_stats
)
arena
->
stats
.
npurge
++
;
label_return:
arena
->
purging
=
false
;
}
static
arena_chunk_t
*
chunks_dirty_iter_cb
(
arena_chunk_tree_t
*
tree
,
arena_chunk_t
*
chunk
,
void
*
arg
)
void
arena_purge
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bool
all
)
{
malloc_mutex_lock
(
tsdn
,
&
arena
->
lock
);
if
(
all
)
arena_purge_to_limit
(
tsdn
,
arena
,
0
);
else
arena_maybe_purge
(
tsdn
,
arena
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
lock
);
}
static
void
arena_achunk_prof_reset
(
tsd_t
*
tsd
,
arena_t
*
arena
,
arena_chunk_t
*
chunk
)
{
size_t
*
ndirty
=
(
size_t
*
)
arg
;
size_t
pageind
,
npages
;
cassert
(
config_prof
);
assert
(
opt_prof
);
assert
(
chunk
->
ndirty
!=
0
);
*
ndirty
+=
chunk
->
ndirty
;
return
(
NULL
);
/*
* Iterate over the allocated runs and remove profiled allocations from
* the sample set.
*/
for
(
pageind
=
map_bias
;
pageind
<
chunk_npages
;
pageind
+=
npages
)
{
if
(
arena_mapbits_allocated_get
(
chunk
,
pageind
)
!=
0
)
{
if
(
arena_mapbits_large_get
(
chunk
,
pageind
)
!=
0
)
{
void
*
ptr
=
(
void
*
)((
uintptr_t
)
chunk
+
(
pageind
<<
LG_PAGE
));
size_t
usize
=
isalloc
(
tsd_tsdn
(
tsd
),
ptr
,
config_prof
);
prof_free
(
tsd
,
ptr
,
usize
);
npages
=
arena_mapbits_large_size_get
(
chunk
,
pageind
)
>>
LG_PAGE
;
}
else
{
/* Skip small run. */
size_t
binind
=
arena_mapbits_binind_get
(
chunk
,
pageind
);
arena_bin_info_t
*
bin_info
=
&
arena_bin_info
[
binind
];
npages
=
bin_info
->
run_size
>>
LG_PAGE
;
}
}
else
{
/* Skip unallocated run. */
npages
=
arena_mapbits_unallocated_size_get
(
chunk
,
pageind
)
>>
LG_PAGE
;
}
assert
(
pageind
+
npages
<=
chunk_npages
);
}
}
static
void
arena_
purge
(
arena_t
*
arena
,
bool
all
)
void
arena_
reset
(
tsd_t
*
tsd
,
arena_t
*
arena
)
{
arena_chunk_t
*
chunk
;
size_t
npurgatory
;
if
(
config_debug
)
{
size_t
ndirty
=
0
;
arena_chunk_dirty_iter
(
&
arena
->
chunks_dirty
,
NULL
,
chunks_dirty_iter_cb
,
(
void
*
)
&
ndirty
);
assert
(
ndirty
==
arena
->
ndirty
);
}
assert
(
arena
->
ndirty
>
arena
->
npurgatory
||
all
);
assert
((
arena
->
nactive
>>
opt_lg_dirty_mult
)
<
(
arena
->
ndirty
-
arena
->
npurgatory
)
||
all
);
if
(
config_stats
)
arena
->
stats
.
npurge
++
;
unsigned
i
;
extent_node_t
*
node
;
/*
* Compute the minimum number of pages that this thread should try to
* purge, and add the result to arena->npurgatory. This will keep
* multiple threads from racing to reduce ndirty below the threshold.
* Locking in this function is unintuitive. The caller guarantees that
* no concurrent operations are happening in this arena, but there are
* still reasons that some locking is necessary:
*
* - Some of the functions in the transitive closure of calls assume
* appropriate locks are held, and in some cases these locks are
* temporarily dropped to avoid lock order reversal or deadlock due to
* reentry.
* - mallctl("epoch", ...) may concurrently refresh stats. While
* strictly speaking this is a "concurrent operation", disallowing
* stats refreshes would impose an inconvenient burden.
*/
{
size_t
npurgeable
=
arena
->
ndirty
-
arena
->
npurgatory
;
if
(
all
==
false
)
{
size_t
threshold
=
(
arena
->
nactive
>>
opt_lg_dirty_mult
);
/* Remove large allocations from prof sample set. */
if
(
config_prof
&&
opt_prof
)
{
ql_foreach
(
node
,
&
arena
->
achunks
,
ql_link
)
{
arena_achunk_prof_reset
(
tsd
,
arena
,
extent_node_addr_get
(
node
));
}
}
npurgatory
=
npurgeable
-
threshold
;
}
else
npurgatory
=
npurgeable
;
/* Reset curruns for large size classes. */
if
(
config_stats
)
{
for
(
i
=
0
;
i
<
nlclasses
;
i
++
)
arena
->
stats
.
lstats
[
i
].
curruns
=
0
;
}
/* Huge allocations. */
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
arena
->
huge_mtx
);
for
(
node
=
ql_last
(
&
arena
->
huge
,
ql_link
);
node
!=
NULL
;
node
=
ql_last
(
&
arena
->
huge
,
ql_link
))
{
void
*
ptr
=
extent_node_addr_get
(
node
);
size_t
usize
;
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
arena
->
huge_mtx
);
if
(
config_stats
||
(
config_prof
&&
opt_prof
))
usize
=
isalloc
(
tsd_tsdn
(
tsd
),
ptr
,
config_prof
);
/* Remove huge allocation from prof sample set. */
if
(
config_prof
&&
opt_prof
)
prof_free
(
tsd
,
ptr
,
usize
);
huge_dalloc
(
tsd_tsdn
(
tsd
),
ptr
);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
arena
->
huge_mtx
);
/* Cancel out unwanted effects on stats. */
if
(
config_stats
)
arena_huge_reset_stats_cancel
(
arena
,
usize
);
}
arena
->
npurgatory
+=
npurgatory
;
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
arena
->
huge_mtx
)
;
while
(
npurgatory
>
0
)
{
size_t
npurgeable
,
npurged
,
nunpurged
;
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
arena
->
lock
);
/* Get next chunk with dirty pages. */
chunk
=
arena_chunk_dirty_first
(
&
arena
->
chunks_dirty
);
if
(
chunk
==
NULL
)
{
/*
* This thread was unable to purge as many pages as
* originally intended, due to races with other threads
* that either did some of the purging work, or re-used
* dirty pages.
*/
arena
->
npurgatory
-=
npurgatory
;
return
;
/* Bins. */
for
(
i
=
0
;
i
<
NBINS
;
i
++
)
{
arena_bin_t
*
bin
=
&
arena
->
bins
[
i
];
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
bin
->
lock
);
bin
->
runcur
=
NULL
;
arena_run_heap_new
(
&
bin
->
runs
);
if
(
config_stats
)
{
bin
->
stats
.
curregs
=
0
;
bin
->
stats
.
curruns
=
0
;
}
npurgeable
=
chunk
->
ndirty
;
assert
(
npurgeable
!=
0
);
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
bin
->
lock
)
;
}
if
(
npurgeable
>
npurgatory
&&
chunk
->
nruns_adjac
==
0
)
{
/*
* This thread will purge all the dirty pages in chunk,
* so set npurgatory to reflect this thread's intent to
* purge the pages. This tends to reduce the chances
* of the following scenario:
*
* 1) This thread sets arena->npurgatory such that
* (arena->ndirty - arena->npurgatory) is at the
* threshold.
* 2) This thread drops arena->lock.
* 3) Another thread causes one or more pages to be
* dirtied, and immediately determines that it must
* purge dirty pages.
*
* If this scenario *does* play out, that's okay,
* because all of the purging work being done really
* needs to happen.
*/
arena
->
npurgatory
+=
npurgeable
-
npurgatory
;
npurgatory
=
npurgeable
;
}
/*
* Re-initialize runs_dirty such that the chunks_cache and runs_dirty
* chains directly correspond.
*/
qr_new
(
&
arena
->
runs_dirty
,
rd_link
);
for
(
node
=
qr_next
(
&
arena
->
chunks_cache
,
cc_link
);
node
!=
&
arena
->
chunks_cache
;
node
=
qr_next
(
node
,
cc_link
))
{
qr_new
(
&
node
->
rd
,
rd_link
);
qr_meld
(
&
arena
->
runs_dirty
,
&
node
->
rd
,
rd_link
);
}
/*
* Keep track of how many pages are purgeable, versus how many
* actually get purged, and adjust counters accordingly.
*/
arena
->
npurgatory
-=
npurgeable
;
npurgatory
-=
npurgeable
;
npurged
=
arena_chunk_purge
(
arena
,
chunk
,
all
);
nunpurged
=
npurgeable
-
npurged
;
arena
->
npurgatory
+=
nunpurged
;
npurgatory
+=
nunpurged
;
/* Arena chunks. */
for
(
node
=
ql_last
(
&
arena
->
achunks
,
ql_link
);
node
!=
NULL
;
node
=
ql_last
(
&
arena
->
achunks
,
ql_link
))
{
ql_remove
(
&
arena
->
achunks
,
node
,
ql_link
);
arena_chunk_discard
(
tsd_tsdn
(
tsd
),
arena
,
extent_node_addr_get
(
node
));
}
}
void
arena_purge_all
(
arena_t
*
arena
)
{
/* Spare. */
if
(
arena
->
spare
!=
NULL
)
{
arena_chunk_discard
(
tsd_tsdn
(
tsd
),
arena
,
arena
->
spare
);
arena
->
spare
=
NULL
;
}
assert
(
!
arena
->
purging
);
arena
->
nactive
=
0
;
for
(
i
=
0
;
i
<
NPSIZES
;
i
++
)
arena_run_heap_new
(
&
arena
->
runs_avail
[
i
]);
malloc_mutex_lock
(
&
arena
->
lock
);
arena_purge
(
arena
,
true
);
malloc_mutex_unlock
(
&
arena
->
lock
);
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
arena
->
lock
);
}
static
void
arena_run_dalloc
(
arena_t
*
arena
,
arena_run_t
*
run
,
bool
dirty
,
bool
cleaned
)
arena_run_coalesce
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
size_t
*
p_size
,
size_t
*
p_run_ind
,
size_t
*
p_run_pages
,
size_t
flag_dirty
,
size_t
flag_decommitted
)
{
arena_chunk_t
*
chunk
;
size_t
size
,
run_ind
,
run_pages
,
flag_dirty
;
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
run
);
run_ind
=
(
size_t
)(((
uintptr_t
)
run
-
(
uintptr_t
)
chunk
)
>>
LG_PAGE
);
assert
(
run_ind
>=
map_bias
);
assert
(
run_ind
<
chunk_npages
);
if
(
arena_mapbits_large_get
(
chunk
,
run_ind
)
!=
0
)
{
size
=
arena_mapbits_large_size_get
(
chunk
,
run_ind
);
assert
(
size
==
PAGE
||
arena_mapbits_large_size_get
(
chunk
,
run_ind
+
(
size
>>
LG_PAGE
)
-
1
)
==
0
);
}
else
{
size_t
binind
=
arena_bin_index
(
arena
,
run
->
bin
);
arena_bin_info_t
*
bin_info
=
&
arena_bin_info
[
binind
];
size
=
bin_info
->
run_size
;
}
run_pages
=
(
size
>>
LG_PAGE
);
if
(
config_stats
)
{
/*
* Update stats_cactive if nactive is crossing a chunk
* multiple.
*/
size_t
cactive_diff
=
CHUNK_CEILING
(
arena
->
nactive
<<
LG_PAGE
)
-
CHUNK_CEILING
((
arena
->
nactive
-
run_pages
)
<<
LG_PAGE
);
if
(
cactive_diff
!=
0
)
stats_cactive_sub
(
cactive_diff
);
}
arena
->
nactive
-=
run_pages
;
/*
* The run is dirty if the caller claims to have dirtied it, as well as
* if it was already dirty before being allocated and the caller
* doesn't claim to have cleaned it.
*/
assert
(
arena_mapbits_dirty_get
(
chunk
,
run_ind
)
==
arena_mapbits_dirty_get
(
chunk
,
run_ind
+
run_pages
-
1
));
if
(
cleaned
==
false
&&
arena_mapbits_dirty_get
(
chunk
,
run_ind
)
!=
0
)
dirty
=
true
;
flag_dirty
=
dirty
?
CHUNK_MAP_DIRTY
:
0
;
/* Mark pages as unallocated in the chunk map. */
if
(
dirty
)
{
arena_mapbits_unallocated_set
(
chunk
,
run_ind
,
size
,
CHUNK_MAP_DIRTY
);
arena_mapbits_unallocated_set
(
chunk
,
run_ind
+
run_pages
-
1
,
size
,
CHUNK_MAP_DIRTY
);
}
else
{
arena_mapbits_unallocated_set
(
chunk
,
run_ind
,
size
,
arena_mapbits_unzeroed_get
(
chunk
,
run_ind
));
arena_mapbits_unallocated_set
(
chunk
,
run_ind
+
run_pages
-
1
,
size
,
arena_mapbits_unzeroed_get
(
chunk
,
run_ind
+
run_pages
-
1
));
}
size_t
size
=
*
p_size
;
size_t
run_ind
=
*
p_run_ind
;
size_t
run_pages
=
*
p_run_pages
;
/* Try to coalesce forward. */
if
(
run_ind
+
run_pages
<
chunk_npages
&&
arena_mapbits_allocated_get
(
chunk
,
run_ind
+
run_pages
)
==
0
&&
arena_mapbits_dirty_get
(
chunk
,
run_ind
+
run_pages
)
==
flag_dirty
)
{
arena_mapbits_dirty_get
(
chunk
,
run_ind
+
run_pages
)
==
flag_dirty
&&
arena_mapbits_decommitted_get
(
chunk
,
run_ind
+
run_pages
)
==
flag_decommitted
)
{
size_t
nrun_size
=
arena_mapbits_unallocated_size_get
(
chunk
,
run_ind
+
run_pages
);
size_t
nrun_pages
=
nrun_size
>>
LG_PAGE
;
...
...
@@ -1030,8 +2030,18 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
run_ind
+
run_pages
+
nrun_pages
-
1
)
==
nrun_size
);
assert
(
arena_mapbits_dirty_get
(
chunk
,
run_ind
+
run_pages
+
nrun_pages
-
1
)
==
flag_dirty
);
arena_avail_remove
(
arena
,
chunk
,
run_ind
+
run_pages
,
nrun_pages
,
false
,
true
);
assert
(
arena_mapbits_decommitted_get
(
chunk
,
run_ind
+
run_pages
+
nrun_pages
-
1
)
==
flag_decommitted
);
arena_avail_remove
(
arena
,
chunk
,
run_ind
+
run_pages
,
nrun_pages
);
/*
* If the successor is dirty, remove it from the set of dirty
* pages.
*/
if
(
flag_dirty
!=
0
)
{
arena_run_dirty_remove
(
arena
,
chunk
,
run_ind
+
run_pages
,
nrun_pages
);
}
size
+=
nrun_size
;
run_pages
+=
nrun_pages
;
...
...
@@ -1042,8 +2052,10 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
}
/* Try to coalesce backward. */
if
(
run_ind
>
map_bias
&&
arena_mapbits_allocated_get
(
chunk
,
run_ind
-
1
)
==
0
&&
arena_mapbits_dirty_get
(
chunk
,
run_ind
-
1
)
==
flag_dirty
)
{
if
(
run_ind
>
map_bias
&&
arena_mapbits_allocated_get
(
chunk
,
run_ind
-
1
)
==
0
&&
arena_mapbits_dirty_get
(
chunk
,
run_ind
-
1
)
==
flag_dirty
&&
arena_mapbits_decommitted_get
(
chunk
,
run_ind
-
1
)
==
flag_decommitted
)
{
size_t
prun_size
=
arena_mapbits_unallocated_size_get
(
chunk
,
run_ind
-
1
);
size_t
prun_pages
=
prun_size
>>
LG_PAGE
;
...
...
@@ -1057,8 +2069,18 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
assert
(
arena_mapbits_unallocated_size_get
(
chunk
,
run_ind
)
==
prun_size
);
assert
(
arena_mapbits_dirty_get
(
chunk
,
run_ind
)
==
flag_dirty
);
arena_avail_remove
(
arena
,
chunk
,
run_ind
,
prun_pages
,
true
,
false
);
assert
(
arena_mapbits_decommitted_get
(
chunk
,
run_ind
)
==
flag_decommitted
);
arena_avail_remove
(
arena
,
chunk
,
run_ind
,
prun_pages
);
/*
* If the predecessor is dirty, remove it from the set of dirty
* pages.
*/
if
(
flag_dirty
!=
0
)
{
arena_run_dirty_remove
(
arena
,
chunk
,
run_ind
,
prun_pages
);
}
size
+=
prun_size
;
run_pages
+=
prun_pages
;
...
...
@@ -1068,18 +2090,95 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
size
);
}
*
p_size
=
size
;
*
p_run_ind
=
run_ind
;
*
p_run_pages
=
run_pages
;
}
static
size_t
arena_run_size_get
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
arena_run_t
*
run
,
size_t
run_ind
)
{
size_t
size
;
assert
(
run_ind
>=
map_bias
);
assert
(
run_ind
<
chunk_npages
);
if
(
arena_mapbits_large_get
(
chunk
,
run_ind
)
!=
0
)
{
size
=
arena_mapbits_large_size_get
(
chunk
,
run_ind
);
assert
(
size
==
PAGE
||
arena_mapbits_large_size_get
(
chunk
,
run_ind
+
(
size
>>
LG_PAGE
)
-
1
)
==
0
);
}
else
{
arena_bin_info_t
*
bin_info
=
&
arena_bin_info
[
run
->
binind
];
size
=
bin_info
->
run_size
;
}
return
(
size
);
}
static
void
arena_run_dalloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_run_t
*
run
,
bool
dirty
,
bool
cleaned
,
bool
decommitted
)
{
arena_chunk_t
*
chunk
;
arena_chunk_map_misc_t
*
miscelm
;
size_t
size
,
run_ind
,
run_pages
,
flag_dirty
,
flag_decommitted
;
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
run
);
miscelm
=
arena_run_to_miscelm
(
run
);
run_ind
=
arena_miscelm_to_pageind
(
miscelm
);
assert
(
run_ind
>=
map_bias
);
assert
(
run_ind
<
chunk_npages
);
size
=
arena_run_size_get
(
arena
,
chunk
,
run
,
run_ind
);
run_pages
=
(
size
>>
LG_PAGE
);
arena_nactive_sub
(
arena
,
run_pages
);
/*
* The run is dirty if the caller claims to have dirtied it, as well as
* if it was already dirty before being allocated and the caller
* doesn't claim to have cleaned it.
*/
assert
(
arena_mapbits_dirty_get
(
chunk
,
run_ind
)
==
arena_mapbits_dirty_get
(
chunk
,
run_ind
+
run_pages
-
1
));
if
(
!
cleaned
&&
!
decommitted
&&
arena_mapbits_dirty_get
(
chunk
,
run_ind
)
!=
0
)
dirty
=
true
;
flag_dirty
=
dirty
?
CHUNK_MAP_DIRTY
:
0
;
flag_decommitted
=
decommitted
?
CHUNK_MAP_DECOMMITTED
:
0
;
/* Mark pages as unallocated in the chunk map. */
if
(
dirty
||
decommitted
)
{
size_t
flags
=
flag_dirty
|
flag_decommitted
;
arena_mapbits_unallocated_set
(
chunk
,
run_ind
,
size
,
flags
);
arena_mapbits_unallocated_set
(
chunk
,
run_ind
+
run_pages
-
1
,
size
,
flags
);
}
else
{
arena_mapbits_unallocated_set
(
chunk
,
run_ind
,
size
,
arena_mapbits_unzeroed_get
(
chunk
,
run_ind
));
arena_mapbits_unallocated_set
(
chunk
,
run_ind
+
run_pages
-
1
,
size
,
arena_mapbits_unzeroed_get
(
chunk
,
run_ind
+
run_pages
-
1
));
}
arena_run_coalesce
(
arena
,
chunk
,
&
size
,
&
run_ind
,
&
run_pages
,
flag_dirty
,
flag_decommitted
);
/* Insert into runs_avail, now that coalescing is complete. */
assert
(
arena_mapbits_unallocated_size_get
(
chunk
,
run_ind
)
==
arena_mapbits_unallocated_size_get
(
chunk
,
run_ind
+
run_pages
-
1
));
assert
(
arena_mapbits_dirty_get
(
chunk
,
run_ind
)
==
arena_mapbits_dirty_get
(
chunk
,
run_ind
+
run_pages
-
1
));
arena_avail_insert
(
arena
,
chunk
,
run_ind
,
run_pages
,
true
,
true
);
assert
(
arena_mapbits_decommitted_get
(
chunk
,
run_ind
)
==
arena_mapbits_decommitted_get
(
chunk
,
run_ind
+
run_pages
-
1
));
arena_avail_insert
(
arena
,
chunk
,
run_ind
,
run_pages
);
if
(
dirty
)
arena_run_dirty_insert
(
arena
,
chunk
,
run_ind
,
run_pages
);
/* Deallocate chunk if it is now completely unused. */
if
(
size
==
arena_max
class
)
{
if
(
size
==
arena_max
run
)
{
assert
(
run_ind
==
map_bias
);
assert
(
run_pages
==
(
arena_max
class
>>
LG_PAGE
));
arena_chunk_d
e
alloc
(
arena
,
chunk
);
assert
(
run_pages
==
(
arena_max
run
>>
LG_PAGE
));
arena_chunk_dalloc
(
tsdn
,
arena
,
chunk
);
}
/*
...
...
@@ -1090,16 +2189,20 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
* chances of spuriously crossing the dirty page purging threshold.
*/
if
(
dirty
)
arena_maybe_purge
(
arena
);
arena_maybe_purge
(
tsdn
,
arena
);
}
static
void
arena_run_trim_head
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
arena_run_t
*
run
,
size_t
oldsize
,
size_t
newsize
)
arena_run_trim_head
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
arena_run_t
*
run
,
size_t
oldsize
,
size_t
newsize
)
{
size_t
pageind
=
((
uintptr_t
)
run
-
(
uintptr_t
)
chunk
)
>>
LG_PAGE
;
arena_chunk_map_misc_t
*
miscelm
=
arena_run_to_miscelm
(
run
);
size_t
pageind
=
arena_miscelm_to_pageind
(
miscelm
);
size_t
head_npages
=
(
oldsize
-
newsize
)
>>
LG_PAGE
;
size_t
flag_dirty
=
arena_mapbits_dirty_get
(
chunk
,
pageind
);
size_t
flag_decommitted
=
arena_mapbits_decommitted_get
(
chunk
,
pageind
);
size_t
flag_unzeroed_mask
=
(
flag_dirty
|
flag_decommitted
)
==
0
?
CHUNK_MAP_UNZEROED
:
0
;
assert
(
oldsize
>
newsize
);
...
...
@@ -1109,8 +2212,11 @@ arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
* run first, in case of single-page runs.
*/
assert
(
arena_mapbits_large_size_get
(
chunk
,
pageind
)
==
oldsize
);
arena_mapbits_large_set
(
chunk
,
pageind
+
head_npages
-
1
,
0
,
flag_dirty
);
arena_mapbits_large_set
(
chunk
,
pageind
,
oldsize
-
newsize
,
flag_dirty
);
arena_mapbits_large_set
(
chunk
,
pageind
+
head_npages
-
1
,
0
,
flag_dirty
|
(
flag_unzeroed_mask
&
arena_mapbits_unzeroed_get
(
chunk
,
pageind
+
head_npages
-
1
)));
arena_mapbits_large_set
(
chunk
,
pageind
,
oldsize
-
newsize
,
flag_dirty
|
(
flag_unzeroed_mask
&
arena_mapbits_unzeroed_get
(
chunk
,
pageind
)));
if
(
config_debug
)
{
UNUSED
size_t
tail_npages
=
newsize
>>
LG_PAGE
;
...
...
@@ -1120,18 +2226,26 @@ arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
pageind
+
head_npages
+
tail_npages
-
1
)
==
flag_dirty
);
}
arena_mapbits_large_set
(
chunk
,
pageind
+
head_npages
,
newsize
,
flag_dirty
);
flag_dirty
|
(
flag_unzeroed_mask
&
arena_mapbits_unzeroed_get
(
chunk
,
pageind
+
head_npages
)));
arena_run_dalloc
(
arena
,
run
,
false
,
false
);
arena_run_dalloc
(
tsdn
,
arena
,
run
,
false
,
false
,
(
flag_decommitted
!=
0
));
}
static
void
arena_run_trim_tail
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
arena_run_t
*
run
,
size_t
oldsize
,
size_t
newsize
,
bool
dirty
)
arena_run_trim_tail
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
arena_run_t
*
run
,
size_t
oldsize
,
size_t
newsize
,
bool
dirty
)
{
size_t
pageind
=
((
uintptr_t
)
run
-
(
uintptr_t
)
chunk
)
>>
LG_PAGE
;
arena_chunk_map_misc_t
*
miscelm
=
arena_run_to_miscelm
(
run
);
size_t
pageind
=
arena_miscelm_to_pageind
(
miscelm
);
size_t
head_npages
=
newsize
>>
LG_PAGE
;
size_t
flag_dirty
=
arena_mapbits_dirty_get
(
chunk
,
pageind
);
size_t
flag_decommitted
=
arena_mapbits_decommitted_get
(
chunk
,
pageind
);
size_t
flag_unzeroed_mask
=
(
flag_dirty
|
flag_decommitted
)
==
0
?
CHUNK_MAP_UNZEROED
:
0
;
arena_chunk_map_misc_t
*
tail_miscelm
;
arena_run_t
*
tail_run
;
assert
(
oldsize
>
newsize
);
...
...
@@ -1141,8 +2255,11 @@ arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
* run first, in case of single-page runs.
*/
assert
(
arena_mapbits_large_size_get
(
chunk
,
pageind
)
==
oldsize
);
arena_mapbits_large_set
(
chunk
,
pageind
+
head_npages
-
1
,
0
,
flag_dirty
);
arena_mapbits_large_set
(
chunk
,
pageind
,
newsize
,
flag_dirty
);
arena_mapbits_large_set
(
chunk
,
pageind
+
head_npages
-
1
,
0
,
flag_dirty
|
(
flag_unzeroed_mask
&
arena_mapbits_unzeroed_get
(
chunk
,
pageind
+
head_npages
-
1
)));
arena_mapbits_large_set
(
chunk
,
pageind
,
newsize
,
flag_dirty
|
(
flag_unzeroed_mask
&
arena_mapbits_unzeroed_get
(
chunk
,
pageind
)));
if
(
config_debug
)
{
UNUSED
size_t
tail_npages
=
(
oldsize
-
newsize
)
>>
LG_PAGE
;
...
...
@@ -1152,74 +2269,42 @@ arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
pageind
+
head_npages
+
tail_npages
-
1
)
==
flag_dirty
);
}
arena_mapbits_large_set
(
chunk
,
pageind
+
head_npages
,
oldsize
-
newsize
,
flag_dirty
);
arena_run_dalloc
(
arena
,
(
arena_run_t
*
)((
uintptr_t
)
run
+
newsize
),
dirty
,
false
);
}
flag_dirty
|
(
flag_unzeroed_mask
&
arena_mapbits_unzeroed_get
(
chunk
,
pageind
+
head_npages
)));
static
arena_run_t
*
arena_bin_runs_first
(
arena_bin_t
*
bin
)
{
arena_chunk_map_t
*
mapelm
=
arena_run_tree_first
(
&
bin
->
runs
);
if
(
mapelm
!=
NULL
)
{
arena_chunk_t
*
chunk
;
size_t
pageind
;
arena_run_t
*
run
;
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
mapelm
);
pageind
=
((((
uintptr_t
)
mapelm
-
(
uintptr_t
)
chunk
->
map
)
/
sizeof
(
arena_chunk_map_t
)))
+
map_bias
;
run
=
(
arena_run_t
*
)((
uintptr_t
)
chunk
+
(
uintptr_t
)((
pageind
-
arena_mapbits_small_runind_get
(
chunk
,
pageind
))
<<
LG_PAGE
));
return
(
run
);
}
return
(
NULL
);
tail_miscelm
=
arena_miscelm_get_mutable
(
chunk
,
pageind
+
head_npages
);
tail_run
=
&
tail_miscelm
->
run
;
arena_run_dalloc
(
tsdn
,
arena
,
tail_run
,
dirty
,
false
,
(
flag_decommitted
!=
0
));
}
static
void
arena_bin_runs_insert
(
arena_bin_t
*
bin
,
arena_run_t
*
run
)
{
arena_chunk_t
*
chunk
=
CHUNK_ADDR2BASE
(
run
);
size_t
pageind
=
((
uintptr_t
)
run
-
(
uintptr_t
)
chunk
)
>>
LG_PAGE
;
arena_chunk_map_t
*
mapelm
=
arena_mapp_get
(
chunk
,
pageind
);
assert
(
arena_run_tree_search
(
&
bin
->
runs
,
mapelm
)
==
NULL
);
arena_run_tree_insert
(
&
bin
->
runs
,
mapelm
);
}
static
void
arena_bin_runs_remove
(
arena_bin_t
*
bin
,
arena_run_t
*
run
)
{
arena_chunk_t
*
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
run
);
size_t
pageind
=
((
uintptr_t
)
run
-
(
uintptr_t
)
chunk
)
>>
LG_PAGE
;
arena_chunk_map_t
*
mapelm
=
arena_mapp_get
(
chunk
,
pageind
);
assert
(
arena_run_tree_search
(
&
bin
->
runs
,
mapelm
)
!=
NULL
);
arena_chunk_map_misc_t
*
miscelm
=
arena_run_to_miscelm
(
run
);
arena_run_
tree_remove
(
&
bin
->
runs
,
m
ap
elm
);
arena_run_
heap_insert
(
&
bin
->
runs
,
m
isc
elm
);
}
static
arena_run_t
*
arena_bin_nonfull_run_tryget
(
arena_bin_t
*
bin
)
{
arena_run_t
*
run
=
arena_bin_runs_first
(
bin
);
if
(
run
!=
NULL
)
{
arena_bin_runs_remove
(
bin
,
run
);
if
(
config_stats
)
bin
->
stats
.
reruns
++
;
}
return
(
run
);
arena_chunk_map_misc_t
*
miscelm
;
miscelm
=
arena_run_heap_remove_first
(
&
bin
->
runs
);
if
(
miscelm
==
NULL
)
return
(
NULL
);
if
(
config_stats
)
bin
->
stats
.
reruns
++
;
return
(
&
miscelm
->
run
);
}
static
arena_run_t
*
arena_bin_nonfull_run_get
(
arena_t
*
arena
,
arena_bin_t
*
bin
)
arena_bin_nonfull_run_get
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_bin_t
*
bin
)
{
arena_run_t
*
run
;
s
ize
_t
binind
;
s
zind
_t
binind
;
arena_bin_info_t
*
bin_info
;
/* Look for a usable run. */
...
...
@@ -1232,25 +2317,19 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
bin_info
=
&
arena_bin_info
[
binind
];
/* Allocate a new run. */
malloc_mutex_unlock
(
&
bin
->
lock
);
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
/******************************/
malloc_mutex_lock
(
&
arena
->
lock
);
run
=
arena_run_alloc
(
arena
,
bin_info
->
run_size
,
false
,
binind
,
false
);
malloc_mutex_lock
(
tsdn
,
&
arena
->
lock
);
run
=
arena_run_alloc
_small
(
tsdn
,
arena
,
bin_info
->
run_size
,
binind
);
if
(
run
!=
NULL
)
{
bitmap_t
*
bitmap
=
(
bitmap_t
*
)((
uintptr_t
)
run
+
(
uintptr_t
)
bin_info
->
bitmap_offset
);
/* Initialize run internals. */
VALGRIND_MAKE_MEM_UNDEFINED
(
run
,
bin_info
->
reg0_offset
-
bin_info
->
redzone_size
);
run
->
bin
=
bin
;
run
->
nextind
=
0
;
run
->
binind
=
binind
;
run
->
nfree
=
bin_info
->
nregs
;
bitmap_init
(
bitmap
,
&
bin_info
->
bitmap_info
);
bitmap_init
(
run
->
bitmap
,
&
bin_info
->
bitmap_info
);
}
malloc_mutex_unlock
(
&
arena
->
lock
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
lock
);
/********************************/
malloc_mutex_lock
(
&
bin
->
lock
);
malloc_mutex_lock
(
tsdn
,
&
bin
->
lock
);
if
(
run
!=
NULL
)
{
if
(
config_stats
)
{
bin
->
stats
.
nruns
++
;
...
...
@@ -1260,7 +2339,7 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
}
/*
* arena_run_alloc() failed, but another thread may have made
* arena_run_alloc
_small
() failed, but another thread may have made
* sufficient memory available while this one dropped bin->lock above,
* so search one more time.
*/
...
...
@@ -1273,40 +2352,41 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
static
void
*
arena_bin_malloc_hard
(
arena_t
*
arena
,
arena_bin_t
*
bin
)
arena_bin_malloc_hard
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_bin_t
*
bin
)
{
void
*
ret
;
size_t
binind
;
szind_t
binind
;
arena_bin_info_t
*
bin_info
;
arena_run_t
*
run
;
binind
=
arena_bin_index
(
arena
,
bin
);
bin_info
=
&
arena_bin_info
[
binind
];
bin
->
runcur
=
NULL
;
run
=
arena_bin_nonfull_run_get
(
arena
,
bin
);
run
=
arena_bin_nonfull_run_get
(
tsdn
,
arena
,
bin
);
if
(
bin
->
runcur
!=
NULL
&&
bin
->
runcur
->
nfree
>
0
)
{
/*
* Another thread updated runcur while this one ran without the
* bin lock in arena_bin_nonfull_run_get().
*/
void
*
ret
;
assert
(
bin
->
runcur
->
nfree
>
0
);
ret
=
arena_run_reg_alloc
(
bin
->
runcur
,
bin_info
);
if
(
run
!=
NULL
)
{
arena_chunk_t
*
chunk
;
/*
* arena_run_alloc() may have allocated run, or
it may
* have pulled run from the bin's run tree.
Therefore
* it is unsafe to make any assumptions about
how run
* has previously been used, and
arena_bin_lower_run()
* must be called, as if a region
were just deallocated
* from the run.
* arena_run_alloc
_small
() may have allocated run, or
*
it may
have pulled run from the bin's run tree.
*
Therefore
it is unsafe to make any assumptions about
*
how run
has previously been used, and
*
arena_bin_lower_run()
must be called, as if a region
*
were just deallocated
from the run.
*/
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
run
);
if
(
run
->
nfree
==
bin_info
->
nregs
)
arena_dalloc_bin_run
(
arena
,
chunk
,
run
,
bin
);
else
arena_bin_lower_run
(
arena
,
chunk
,
run
,
bin
);
if
(
run
->
nfree
==
bin_info
->
nregs
)
{
arena_dalloc_bin_run
(
tsdn
,
arena
,
chunk
,
run
,
bin
);
}
else
arena_bin_lower_run
(
arena
,
run
,
bin
);
}
return
(
ret
);
}
...
...
@@ -1322,282 +2402,447 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
}
void
arena_prof_accum
(
arena_t
*
arena
,
uint64_t
accumbytes
)
{
cassert
(
config_prof
);
if
(
config_prof
&&
prof_interval
!=
0
)
{
arena
->
prof_accumbytes
+=
accumbytes
;
if
(
arena
->
prof_accumbytes
>=
prof_interval
)
{
prof_idump
();
arena
->
prof_accumbytes
-=
prof_interval
;
}
}
}
void
arena_tcache_fill_small
(
arena_t
*
arena
,
tcache_bin_t
*
tbin
,
size_t
binind
,
uint64_t
prof_accumbytes
)
arena_tcache_fill_small
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
tcache_bin_t
*
tbin
,
szind_t
binind
,
uint64_t
prof_accumbytes
)
{
unsigned
i
,
nfill
;
arena_bin_t
*
bin
;
arena_run_t
*
run
;
void
*
ptr
;
assert
(
tbin
->
ncached
==
0
);
if
(
config_prof
)
{
malloc_mutex_lock
(
&
arena
->
lock
);
arena_prof_accum
(
arena
,
prof_accumbytes
);
malloc_mutex_unlock
(
&
arena
->
lock
);
}
if
(
config_prof
&&
arena_prof_accum
(
tsdn
,
arena
,
prof_accumbytes
))
prof_idump
(
tsdn
);
bin
=
&
arena
->
bins
[
binind
];
malloc_mutex_lock
(
&
bin
->
lock
);
malloc_mutex_lock
(
tsdn
,
&
bin
->
lock
);
for
(
i
=
0
,
nfill
=
(
tcache_bin_info
[
binind
].
ncached_max
>>
tbin
->
lg_fill_div
);
i
<
nfill
;
i
++
)
{
arena_run_t
*
run
;
void
*
ptr
;
if
((
run
=
bin
->
runcur
)
!=
NULL
&&
run
->
nfree
>
0
)
ptr
=
arena_run_reg_alloc
(
run
,
&
arena_bin_info
[
binind
]);
else
ptr
=
arena_bin_malloc_hard
(
arena
,
bin
);
if
(
ptr
==
NULL
)
ptr
=
arena_bin_malloc_hard
(
tsdn
,
arena
,
bin
);
if
(
ptr
==
NULL
)
{
/*
* OOM. tbin->avail isn't yet filled down to its first
* element, so the successful allocations (if any) must
* be moved just before tbin->avail before bailing out.
*/
if
(
i
>
0
)
{
memmove
(
tbin
->
avail
-
i
,
tbin
->
avail
-
nfill
,
i
*
sizeof
(
void
*
));
}
break
;
if
(
config_fill
&&
opt_junk
)
{
}
if
(
config_fill
&&
unlikely
(
opt_junk_alloc
))
{
arena_alloc_junk_small
(
ptr
,
&
arena_bin_info
[
binind
],
true
);
}
/* Insert such that low regions get used first. */
tbin
->
avail
[
nfill
-
1
-
i
]
=
ptr
;
*
(
tbin
->
avail
-
nfill
+
i
)
=
ptr
;
}
if
(
config_stats
)
{
bin
->
stats
.
allocated
+=
i
*
arena_bin_info
[
binind
].
reg_size
;
bin
->
stats
.
nmalloc
+=
i
;
bin
->
stats
.
nrequests
+=
tbin
->
tstats
.
nrequests
;
bin
->
stats
.
curregs
+=
i
;
bin
->
stats
.
nfills
++
;
tbin
->
tstats
.
nrequests
=
0
;
}
malloc_mutex_unlock
(
&
bin
->
lock
);
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
tbin
->
ncached
=
i
;
arena_decay_tick
(
tsdn
,
arena
);
}
void
arena_alloc_junk_small
(
void
*
ptr
,
arena_bin_info_t
*
bin_info
,
bool
zero
)
{
size_t
redzone_size
=
bin_info
->
redzone_size
;
if
(
zero
)
{
size_t
redzone_size
=
bin_info
->
redzone_size
;
memset
((
void
*
)((
uintptr_t
)
ptr
-
redzone_size
),
0xa5
,
redzone_size
);
memset
((
void
*
)((
uintptr_t
)
ptr
+
bin_info
->
reg_size
),
0xa5
,
redzone_size
);
memset
((
void
*
)((
uintptr_t
)
ptr
-
redzone_size
),
JEMALLOC_ALLOC_JUNK
,
redzone_size
);
memset
((
void
*
)((
uintptr_t
)
ptr
+
bin_info
->
reg_size
),
JEMALLOC_ALLOC_JUNK
,
redzone_size
);
}
else
{
memset
((
void
*
)((
uintptr_t
)
ptr
-
bin_info
->
redzone_size
),
0xa5
,
bin_info
->
reg_interval
);
memset
((
void
*
)((
uintptr_t
)
ptr
-
redzone_size
),
JEMALLOC_ALLOC_JUNK
,
bin_info
->
reg_interval
);
}
}
void
arena_dalloc_junk_small
(
void
*
ptr
,
arena_bin_info_t
*
bin_info
)
#ifdef JEMALLOC_JET
#undef arena_redzone_corruption
#define arena_redzone_corruption JEMALLOC_N(n_arena_redzone_corruption)
#endif
static
void
arena_redzone_corruption
(
void
*
ptr
,
size_t
usize
,
bool
after
,
size_t
offset
,
uint8_t
byte
)
{
malloc_printf
(
"<jemalloc>: Corrupt redzone %zu byte%s %s %p "
"(size %zu), byte=%#x
\n
"
,
offset
,
(
offset
==
1
)
?
""
:
"s"
,
after
?
"after"
:
"before"
,
ptr
,
usize
,
byte
);
}
#ifdef JEMALLOC_JET
#undef arena_redzone_corruption
#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
arena_redzone_corruption_t
*
arena_redzone_corruption
=
JEMALLOC_N
(
n_arena_redzone_corruption
);
#endif
static
void
arena_redzones_validate
(
void
*
ptr
,
arena_bin_info_t
*
bin_info
,
bool
reset
)
{
size_t
size
=
bin_info
->
reg_size
;
size_t
redzone_size
=
bin_info
->
redzone_size
;
size_t
i
;
bool
error
=
false
;
for
(
i
=
1
;
i
<=
redzone_size
;
i
++
)
{
unsigned
byte
;
if
((
byte
=
*
(
uint8_t
*
)((
uintptr_t
)
ptr
-
i
))
!=
0xa5
)
{
error
=
true
;
malloc_printf
(
"<jemalloc>: Corrupt redzone "
"%zu byte%s before %p (size %zu), byte=%#x
\n
"
,
i
,
(
i
==
1
)
?
""
:
"s"
,
ptr
,
size
,
byte
);
if
(
opt_junk_alloc
)
{
size_t
size
=
bin_info
->
reg_size
;
size_t
redzone_size
=
bin_info
->
redzone_size
;
size_t
i
;
for
(
i
=
1
;
i
<=
redzone_size
;
i
++
)
{
uint8_t
*
byte
=
(
uint8_t
*
)((
uintptr_t
)
ptr
-
i
);
if
(
*
byte
!=
JEMALLOC_ALLOC_JUNK
)
{
error
=
true
;
arena_redzone_corruption
(
ptr
,
size
,
false
,
i
,
*
byte
);
if
(
reset
)
*
byte
=
JEMALLOC_ALLOC_JUNK
;
}
}
}
for
(
i
=
0
;
i
<
redzone_size
;
i
++
)
{
unsigned
byte
;
if
((
byte
=
*
(
uint8_t
*
)((
uintptr_t
)
ptr
+
size
+
i
))
!=
0xa5
)
{
error
=
true
;
malloc_printf
(
"<jemalloc>: Corrupt redzone "
"%zu byte%s after end of %p (size %zu), byte=%#x
\n
"
,
i
,
(
i
==
1
)
?
""
:
"s"
,
ptr
,
size
,
byte
);
for
(
i
=
0
;
i
<
redzone_size
;
i
++
)
{
uint8_t
*
byte
=
(
uint8_t
*
)((
uintptr_t
)
ptr
+
size
+
i
);
if
(
*
byte
!=
JEMALLOC_ALLOC_JUNK
)
{
error
=
true
;
arena_redzone_corruption
(
ptr
,
size
,
true
,
i
,
*
byte
);
if
(
reset
)
*
byte
=
JEMALLOC_ALLOC_JUNK
;
}
}
}
if
(
opt_abort
&&
error
)
abort
();
}
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_small
#define arena_dalloc_junk_small JEMALLOC_N(n_arena_dalloc_junk_small)
#endif
void
arena_dalloc_junk_small
(
void
*
ptr
,
arena_bin_info_t
*
bin_info
)
{
size_t
redzone_size
=
bin_info
->
redzone_size
;
memset
((
void
*
)((
uintptr_t
)
ptr
-
redzone_size
),
0x5a
,
arena_redzones_validate
(
ptr
,
bin_info
,
false
);
memset
((
void
*
)((
uintptr_t
)
ptr
-
redzone_size
),
JEMALLOC_FREE_JUNK
,
bin_info
->
reg_interval
);
}
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_small
#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
arena_dalloc_junk_small_t
*
arena_dalloc_junk_small
=
JEMALLOC_N
(
n_arena_dalloc_junk_small
);
#endif
void
*
arena_malloc_small
(
arena_t
*
arena
,
size_t
size
,
bool
zero
)
void
arena_quarantine_junk_small
(
void
*
ptr
,
size_t
usize
)
{
szind_t
binind
;
arena_bin_info_t
*
bin_info
;
cassert
(
config_fill
);
assert
(
opt_junk_free
);
assert
(
opt_quarantine
);
assert
(
usize
<=
SMALL_MAXCLASS
);
binind
=
size2index
(
usize
);
bin_info
=
&
arena_bin_info
[
binind
];
arena_redzones_validate
(
ptr
,
bin_info
,
true
);
}
static
void
*
arena_malloc_small
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
szind_t
binind
,
bool
zero
)
{
void
*
ret
;
arena_bin_t
*
bin
;
size_t
usize
;
arena_run_t
*
run
;
size_t
binind
;
binind
=
SMALL_SIZE2BIN
(
size
);
assert
(
binind
<
NBINS
);
bin
=
&
arena
->
bins
[
binind
];
size
=
arena_bin_info
[
binind
].
reg_size
;
u
size
=
index2size
(
binind
)
;
malloc_mutex_lock
(
&
bin
->
lock
);
malloc_mutex_lock
(
tsdn
,
&
bin
->
lock
);
if
((
run
=
bin
->
runcur
)
!=
NULL
&&
run
->
nfree
>
0
)
ret
=
arena_run_reg_alloc
(
run
,
&
arena_bin_info
[
binind
]);
else
ret
=
arena_bin_malloc_hard
(
arena
,
bin
);
ret
=
arena_bin_malloc_hard
(
tsdn
,
arena
,
bin
);
if
(
ret
==
NULL
)
{
malloc_mutex_unlock
(
&
bin
->
lock
);
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
return
(
NULL
);
}
if
(
config_stats
)
{
bin
->
stats
.
allocated
+=
size
;
bin
->
stats
.
nmalloc
++
;
bin
->
stats
.
nrequests
++
;
bin
->
stats
.
curregs
++
;
}
malloc_mutex_unlock
(
&
bin
->
lock
);
if
(
config_prof
&&
isthreaded
==
false
)
{
malloc_mutex_lock
(
&
arena
->
lock
);
arena_prof_accum
(
arena
,
size
);
malloc_mutex_unlock
(
&
arena
->
lock
);
}
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
if
(
config_prof
&&
!
isthreaded
&&
arena_prof_accum
(
tsdn
,
arena
,
usize
))
prof_idump
(
tsdn
);
if
(
zero
==
false
)
{
if
(
!
zero
)
{
if
(
config_fill
)
{
if
(
opt_junk
)
{
if
(
unlikely
(
opt_junk
_alloc
)
)
{
arena_alloc_junk_small
(
ret
,
&
arena_bin_info
[
binind
],
false
);
}
else
if
(
opt_zero
)
memset
(
ret
,
0
,
size
);
}
else
if
(
unlikely
(
opt_zero
)
)
memset
(
ret
,
0
,
u
size
);
}
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED
(
ret
,
usize
);
}
else
{
if
(
config_fill
&&
opt_junk
)
{
if
(
config_fill
&&
unlikely
(
opt_junk
_alloc
)
)
{
arena_alloc_junk_small
(
ret
,
&
arena_bin_info
[
binind
],
true
);
}
VALGRIND_MAKE_MEM_UNDEFINED
(
ret
,
size
);
memset
(
ret
,
0
,
size
);
JEMALLOC_
VALGRIND_MAKE_MEM_UNDEFINED
(
ret
,
u
size
);
memset
(
ret
,
0
,
u
size
);
}
arena_decay_tick
(
tsdn
,
arena
);
return
(
ret
);
}
void
*
arena_malloc_large
(
arena_t
*
arena
,
s
ize_t
size
,
bool
zero
)
arena_malloc_large
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
s
zind_t
binind
,
bool
zero
)
{
void
*
ret
;
size_t
usize
;
uintptr_t
random_offset
;
arena_run_t
*
run
;
arena_chunk_map_misc_t
*
miscelm
;
UNUSED
bool
idump
JEMALLOC_CC_SILENCE_INIT
(
false
);
/* Large allocation. */
size
=
PAGE_CEILING
(
size
);
malloc_mutex_lock
(
&
arena
->
lock
);
ret
=
(
void
*
)
arena_run_alloc
(
arena
,
size
,
true
,
BININD_INVALID
,
zero
);
if
(
ret
==
NULL
)
{
malloc_mutex_unlock
(
&
arena
->
lock
);
usize
=
index2size
(
binind
);
malloc_mutex_lock
(
tsdn
,
&
arena
->
lock
);
if
(
config_cache_oblivious
)
{
uint64_t
r
;
/*
* Compute a uniformly distributed offset within the first page
* that is a multiple of the cacheline size, e.g. [0 .. 63) * 64
* for 4 KiB pages and 64-byte cachelines.
*/
r
=
prng_lg_range_zu
(
&
arena
->
offset_state
,
LG_PAGE
-
LG_CACHELINE
,
false
);
random_offset
=
((
uintptr_t
)
r
)
<<
LG_CACHELINE
;
}
else
random_offset
=
0
;
run
=
arena_run_alloc_large
(
tsdn
,
arena
,
usize
+
large_pad
,
zero
);
if
(
run
==
NULL
)
{
malloc_mutex_unlock
(
tsdn
,
&
arena
->
lock
);
return
(
NULL
);
}
miscelm
=
arena_run_to_miscelm
(
run
);
ret
=
(
void
*
)((
uintptr_t
)
arena_miscelm_to_rpages
(
miscelm
)
+
random_offset
);
if
(
config_stats
)
{
szind_t
index
=
binind
-
NBINS
;
arena
->
stats
.
nmalloc_large
++
;
arena
->
stats
.
nrequests_large
++
;
arena
->
stats
.
allocated_large
+=
size
;
arena
->
stats
.
lstats
[
(
size
>>
LG_PAGE
)
-
1
].
nmalloc
++
;
arena
->
stats
.
lstats
[
(
size
>>
LG_PAGE
)
-
1
].
nrequests
++
;
arena
->
stats
.
lstats
[
(
size
>>
LG_PAGE
)
-
1
].
curruns
++
;
arena
->
stats
.
allocated_large
+=
u
size
;
arena
->
stats
.
lstats
[
index
].
nmalloc
++
;
arena
->
stats
.
lstats
[
index
].
nrequests
++
;
arena
->
stats
.
lstats
[
index
].
curruns
++
;
}
if
(
config_prof
)
arena_prof_accum
(
arena
,
size
);
malloc_mutex_unlock
(
&
arena
->
lock
);
idump
=
arena_prof_accum_locked
(
arena
,
usize
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
lock
);
if
(
config_prof
&&
idump
)
prof_idump
(
tsdn
);
if
(
zero
==
false
)
{
if
(
!
zero
)
{
if
(
config_fill
)
{
if
(
opt_junk
)
memset
(
ret
,
0xa5
,
size
);
else
if
(
opt_zero
)
memset
(
ret
,
0
,
size
);
if
(
unlikely
(
opt_junk
_alloc
)
)
memset
(
ret
,
JEMALLOC_ALLOC_JUNK
,
u
size
);
else
if
(
unlikely
(
opt_zero
)
)
memset
(
ret
,
0
,
u
size
);
}
}
arena_decay_tick
(
tsdn
,
arena
);
return
(
ret
);
}
/* Only handles large allocations that require more than page alignment. */
void
*
arena_palloc
(
arena_t
*
arena
,
size_t
size
,
size_t
alignment
,
bool
zero
)
arena_malloc_hard
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
size
,
szind_t
ind
,
bool
zero
)
{
assert
(
!
tsdn_null
(
tsdn
)
||
arena
!=
NULL
);
if
(
likely
(
!
tsdn_null
(
tsdn
)))
arena
=
arena_choose
(
tsdn_tsd
(
tsdn
),
arena
);
if
(
unlikely
(
arena
==
NULL
))
return
(
NULL
);
if
(
likely
(
size
<=
SMALL_MAXCLASS
))
return
(
arena_malloc_small
(
tsdn
,
arena
,
ind
,
zero
));
if
(
likely
(
size
<=
large_maxclass
))
return
(
arena_malloc_large
(
tsdn
,
arena
,
ind
,
zero
));
return
(
huge_malloc
(
tsdn
,
arena
,
index2size
(
ind
),
zero
));
}
/* Only handles large allocations that require more than page alignment. */
static
void
*
arena_palloc_large
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
usize
,
size_t
alignment
,
bool
zero
)
{
void
*
ret
;
size_t
alloc_size
,
leadsize
,
trailsize
;
arena_run_t
*
run
;
arena_chunk_t
*
chunk
;
arena_chunk_map_misc_t
*
miscelm
;
void
*
rpages
;
assert
((
size
&
PAGE_MASK
)
==
0
);
assert
(
!
tsdn_null
(
tsdn
)
||
arena
!=
NULL
);
assert
(
usize
==
PAGE_CEILING
(
usize
));
if
(
likely
(
!
tsdn_null
(
tsdn
)))
arena
=
arena_choose
(
tsdn_tsd
(
tsdn
),
arena
);
if
(
unlikely
(
arena
==
NULL
))
return
(
NULL
);
alignment
=
PAGE_CEILING
(
alignment
);
alloc_size
=
size
+
alignment
-
PAGE
;
alloc_size
=
u
size
+
large_pad
+
alignment
-
PAGE
;
malloc_mutex_lock
(
&
arena
->
lock
);
run
=
arena_run_alloc
(
arena
,
alloc_size
,
true
,
BININD_INVALID
,
zero
);
malloc_mutex_lock
(
tsdn
,
&
arena
->
lock
);
run
=
arena_run_alloc
_large
(
tsdn
,
arena
,
alloc_size
,
false
);
if
(
run
==
NULL
)
{
malloc_mutex_unlock
(
&
arena
->
lock
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
lock
);
return
(
NULL
);
}
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
run
);
miscelm
=
arena_run_to_miscelm
(
run
);
rpages
=
arena_miscelm_to_rpages
(
miscelm
);
leadsize
=
ALIGNMENT_CEILING
((
uintptr_t
)
run
,
alignment
)
-
(
uintptr_t
)
run
;
assert
(
alloc_size
>=
leadsize
+
size
);
trailsize
=
alloc_size
-
leadsize
-
size
;
ret
=
(
void
*
)((
uintptr_t
)
run
+
leadsize
);
leadsize
=
ALIGNMENT_CEILING
((
uintptr_t
)
rpages
,
alignment
)
-
(
uintptr_t
)
rpages
;
assert
(
alloc_size
>=
leadsize
+
usize
);
trailsize
=
alloc_size
-
leadsize
-
usize
-
large_pad
;
if
(
leadsize
!=
0
)
{
arena_run_trim_head
(
arena
,
chunk
,
run
,
alloc_size
,
alloc_size
-
leadsize
);
arena_chunk_map_misc_t
*
head_miscelm
=
miscelm
;
arena_run_t
*
head_run
=
run
;
miscelm
=
arena_miscelm_get_mutable
(
chunk
,
arena_miscelm_to_pageind
(
head_miscelm
)
+
(
leadsize
>>
LG_PAGE
));
run
=
&
miscelm
->
run
;
arena_run_trim_head
(
tsdn
,
arena
,
chunk
,
head_run
,
alloc_size
,
alloc_size
-
leadsize
);
}
if
(
trailsize
!=
0
)
{
arena_run_trim_tail
(
arena
,
chunk
,
ret
,
size
+
trailsize
,
size
,
false
);
arena_run_trim_tail
(
tsdn
,
arena
,
chunk
,
run
,
usize
+
large_pad
+
trailsize
,
usize
+
large_pad
,
false
);
}
if
(
arena_run_init_large
(
arena
,
run
,
usize
+
large_pad
,
zero
))
{
size_t
run_ind
=
arena_miscelm_to_pageind
(
arena_run_to_miscelm
(
run
));
bool
dirty
=
(
arena_mapbits_dirty_get
(
chunk
,
run_ind
)
!=
0
);
bool
decommitted
=
(
arena_mapbits_decommitted_get
(
chunk
,
run_ind
)
!=
0
);
assert
(
decommitted
);
/* Cause of OOM. */
arena_run_dalloc
(
tsdn
,
arena
,
run
,
dirty
,
false
,
decommitted
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
lock
);
return
(
NULL
);
}
ret
=
arena_miscelm_to_rpages
(
miscelm
);
if
(
config_stats
)
{
szind_t
index
=
size2index
(
usize
)
-
NBINS
;
arena
->
stats
.
nmalloc_large
++
;
arena
->
stats
.
nrequests_large
++
;
arena
->
stats
.
allocated_large
+=
size
;
arena
->
stats
.
lstats
[(
size
>>
LG_PAGE
)
-
1
].
nmalloc
++
;
arena
->
stats
.
lstats
[(
size
>>
LG_PAGE
)
-
1
].
nrequests
++
;
arena
->
stats
.
lstats
[(
size
>>
LG_PAGE
)
-
1
].
curruns
++
;
arena
->
stats
.
allocated_large
+=
usize
;
arena
->
stats
.
lstats
[
index
].
nmalloc
++
;
arena
->
stats
.
lstats
[
index
].
nrequests
++
;
arena
->
stats
.
lstats
[
index
].
curruns
++
;
}
malloc_mutex_unlock
(
tsdn
,
&
arena
->
lock
);
if
(
config_fill
&&
!
zero
)
{
if
(
unlikely
(
opt_junk_alloc
))
memset
(
ret
,
JEMALLOC_ALLOC_JUNK
,
usize
);
else
if
(
unlikely
(
opt_zero
))
memset
(
ret
,
0
,
usize
);
}
malloc_mutex_unlock
(
&
arena
->
lock
);
arena_decay_tick
(
tsdn
,
arena
);
return
(
ret
);
}
void
*
arena_palloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
usize
,
size_t
alignment
,
bool
zero
,
tcache_t
*
tcache
)
{
void
*
ret
;
if
(
config_fill
&&
zero
==
false
)
{
if
(
opt_junk
)
memset
(
ret
,
0xa5
,
size
);
else
if
(
opt_zero
)
memset
(
ret
,
0
,
size
);
if
(
usize
<=
SMALL_MAXCLASS
&&
(
alignment
<
PAGE
||
(
alignment
==
PAGE
&&
(
usize
&
PAGE_MASK
)
==
0
)))
{
/* Small; alignment doesn't require special run placement. */
ret
=
arena_malloc
(
tsdn
,
arena
,
usize
,
size2index
(
usize
),
zero
,
tcache
,
true
);
}
else
if
(
usize
<=
large_maxclass
&&
alignment
<=
PAGE
)
{
/*
* Large; alignment doesn't require special run placement.
* However, the cached pointer may be at a random offset from
* the base of the run, so do some bit manipulation to retrieve
* the base.
*/
ret
=
arena_malloc
(
tsdn
,
arena
,
usize
,
size2index
(
usize
),
zero
,
tcache
,
true
);
if
(
config_cache_oblivious
)
ret
=
(
void
*
)((
uintptr_t
)
ret
&
~
PAGE_MASK
);
}
else
{
if
(
likely
(
usize
<=
large_maxclass
))
{
ret
=
arena_palloc_large
(
tsdn
,
arena
,
usize
,
alignment
,
zero
);
}
else
if
(
likely
(
alignment
<=
chunksize
))
ret
=
huge_malloc
(
tsdn
,
arena
,
usize
,
zero
);
else
{
ret
=
huge_palloc
(
tsdn
,
arena
,
usize
,
alignment
,
zero
);
}
}
return
(
ret
);
}
void
arena_prof_promoted
(
const
void
*
ptr
,
size_t
size
)
arena_prof_promoted
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
size_t
size
)
{
arena_chunk_t
*
chunk
;
size_t
pageind
,
binind
;
size_t
pageind
;
szind_t
binind
;
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
assert
(
CHUNK_ADDR2BASE
(
ptr
)
!=
ptr
);
assert
(
isalloc
(
ptr
,
false
)
==
PAGE
);
assert
(
isalloc
(
ptr
,
true
)
==
PAGE
);
assert
(
isalloc
(
tsdn
,
ptr
,
false
)
==
LARGE_MINCLASS
);
assert
(
isalloc
(
tsdn
,
ptr
,
true
)
==
LARGE_MINCLASS
);
assert
(
size
<=
SMALL_MAXCLASS
);
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
ptr
);
pageind
=
((
uintptr_t
)
ptr
-
(
uintptr_t
)
chunk
)
>>
LG_PAGE
;
binind
=
SMALL_SIZE2BIN
(
size
);
binind
=
size2index
(
size
);
assert
(
binind
<
NBINS
);
arena_mapbits_large_binind_set
(
chunk
,
pageind
,
binind
);
assert
(
isalloc
(
ptr
,
false
)
==
PAGE
);
assert
(
isalloc
(
ptr
,
true
)
==
size
);
assert
(
isalloc
(
tsdn
,
ptr
,
false
)
==
LARGE_MINCLASS
);
assert
(
isalloc
(
tsdn
,
ptr
,
true
)
==
size
);
}
static
void
...
...
@@ -1609,82 +2854,55 @@ arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
if
(
run
==
bin
->
runcur
)
bin
->
runcur
=
NULL
;
else
{
size_t
binind
=
arena_bin_index
(
chunk
->
arena
,
bin
);
szind_t
binind
=
arena_bin_index
(
extent_node_arena_get
(
&
chunk
->
node
),
bin
);
arena_bin_info_t
*
bin_info
=
&
arena_bin_info
[
binind
];
/*
* The following block's conditional is necessary because if the
* run only contains one region, then it never gets inserted
* into the non-full runs tree.
*/
if
(
bin_info
->
nregs
!=
1
)
{
/*
* This block's conditional is necessary because if the
* run only contains one region, then it never gets
* inserted into the non-full runs tree.
*/
arena_bin_runs_remove
(
bin
,
run
);
arena_chunk_map_misc_t
*
miscelm
=
arena_run_to_miscelm
(
run
);
arena_run_heap_remove
(
&
bin
->
runs
,
miscelm
);
}
}
}
static
void
arena_dalloc_bin_run
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
arena_run_t
*
run
,
arena_bin_t
*
bin
)
arena_dalloc_bin_run
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
arena_run_t
*
run
,
arena_bin_t
*
bin
)
{
size_t
binind
;
arena_bin_info_t
*
bin_info
;
size_t
npages
,
run_ind
,
past
;
assert
(
run
!=
bin
->
runcur
);
assert
(
arena_run_tree_search
(
&
bin
->
runs
,
arena_mapp_get
(
chunk
,
((
uintptr_t
)
run
-
(
uintptr_t
)
chunk
)
>>
LG_PAGE
))
==
NULL
);
binind
=
arena_bin_index
(
chunk
->
arena
,
run
->
bin
);
bin_info
=
&
arena_bin_info
[
binind
];
malloc_mutex_unlock
(
&
bin
->
lock
);
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
/******************************/
npages
=
bin_info
->
run_size
>>
LG_PAGE
;
run_ind
=
(
size_t
)(((
uintptr_t
)
run
-
(
uintptr_t
)
chunk
)
>>
LG_PAGE
);
past
=
(
size_t
)(
PAGE_CEILING
((
uintptr_t
)
run
+
(
uintptr_t
)
bin_info
->
reg0_offset
+
(
uintptr_t
)(
run
->
nextind
*
bin_info
->
reg_interval
-
bin_info
->
redzone_size
)
-
(
uintptr_t
)
chunk
)
>>
LG_PAGE
);
malloc_mutex_lock
(
&
arena
->
lock
);
/*
* If the run was originally clean, and some pages were never touched,
* trim the clean pages before deallocating the dirty portion of the
* run.
*/
assert
(
arena_mapbits_dirty_get
(
chunk
,
run_ind
)
==
arena_mapbits_dirty_get
(
chunk
,
run_ind
+
npages
-
1
));
if
(
arena_mapbits_dirty_get
(
chunk
,
run_ind
)
==
0
&&
past
-
run_ind
<
npages
)
{
/* Trim clean pages. Convert to large run beforehand. */
assert
(
npages
>
0
);
arena_mapbits_large_set
(
chunk
,
run_ind
,
bin_info
->
run_size
,
0
);
arena_mapbits_large_set
(
chunk
,
run_ind
+
npages
-
1
,
0
,
0
);
arena_run_trim_tail
(
arena
,
chunk
,
run
,
(
npages
<<
LG_PAGE
),
((
past
-
run_ind
)
<<
LG_PAGE
),
false
);
/* npages = past - run_ind; */
}
arena_run_dalloc
(
arena
,
run
,
true
,
false
);
malloc_mutex_unlock
(
&
arena
->
lock
);
malloc_mutex_lock
(
tsdn
,
&
arena
->
lock
);
arena_run_dalloc
(
tsdn
,
arena
,
run
,
true
,
false
,
false
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
lock
);
/****************************/
malloc_mutex_lock
(
&
bin
->
lock
);
malloc_mutex_lock
(
tsdn
,
&
bin
->
lock
);
if
(
config_stats
)
bin
->
stats
.
curruns
--
;
}
static
void
arena_bin_lower_run
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
arena_run_t
*
run
,
arena_bin_t
*
bin
)
arena_bin_lower_run
(
arena_t
*
arena
,
arena_run_t
*
run
,
arena_bin_t
*
bin
)
{
/*
* Make sure that if bin->runcur is non-NULL, it refers to the lowest
* non-full run. It is okay to NULL runcur out rather than proactively
* keeping it pointing at the lowest non-full run.
* Make sure that if bin->runcur is non-NULL, it refers to the
* oldest/lowest non-full run. It is okay to NULL runcur out rather
* than proactively keeping it pointing at the oldest/lowest non-full
* run.
*/
if
((
uintptr_t
)
run
<
(
uintptr_t
)
bin
->
runcur
)
{
if
(
bin
->
runcur
!=
NULL
&&
arena_snad_comp
(
arena_run_to_miscelm
(
bin
->
runcur
),
arena_run_to_miscelm
(
run
))
>
0
)
{
/* Switch runcur. */
if
(
bin
->
runcur
->
nfree
>
0
)
arena_bin_runs_insert
(
bin
,
bin
->
runcur
);
...
...
@@ -1695,105 +2913,152 @@ arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
arena_bin_runs_insert
(
bin
,
run
);
}
void
arena_dalloc_bin_locked
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
void
*
ptr
,
arena_chunk_map_
t
*
mapelm
)
static
void
arena_dalloc_bin_locked
_impl
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
void
*
ptr
,
arena_chunk_map_
bits_t
*
bitselm
,
bool
junked
)
{
size_t
pageind
;
size_t
pageind
,
rpages_ind
;
arena_run_t
*
run
;
arena_bin_t
*
bin
;
arena_bin_info_t
*
bin_info
;
s
ize_t
size
,
binind
;
s
zind_t
binind
;
pageind
=
((
uintptr_t
)
ptr
-
(
uintptr_t
)
chunk
)
>>
LG_PAGE
;
r
un
=
(
arena_run_t
*
)((
uintptr_t
)
chunk
+
(
uintptr_t
)((
pageind
-
arena_mapbits_small_runind_get
(
chunk
,
pageind
)
)
<<
LG_PAGE
))
;
bin
=
run
->
bin
;
bin
ind
=
arena
_ptr_small_binind_get
(
ptr
,
mapelm
->
bits
)
;
r
pages_ind
=
pageind
-
arena_mapbits_small_runind_get
(
chunk
,
pageind
);
run
=
&
arena_miscelm_get_mutable
(
chunk
,
r
page
s_
ind
)
->
run
;
bin
ind
=
run
->
bin
ind
;
bin
=
&
arena
->
bins
[
binind
]
;
bin_info
=
&
arena_bin_info
[
binind
];
if
(
config_fill
||
config_stats
)
size
=
bin_info
->
reg_size
;
if
(
config_fill
&&
opt_junk
)
if
(
!
junked
&&
config_fill
&&
unlikely
(
opt_junk
_free
)
)
arena_dalloc_junk_small
(
ptr
,
bin_info
);
arena_run_reg_dalloc
(
run
,
ptr
);
if
(
run
->
nfree
==
bin_info
->
nregs
)
{
arena_dissociate_bin_run
(
chunk
,
run
,
bin
);
arena_dalloc_bin_run
(
arena
,
chunk
,
run
,
bin
);
arena_dalloc_bin_run
(
tsdn
,
arena
,
chunk
,
run
,
bin
);
}
else
if
(
run
->
nfree
==
1
&&
run
!=
bin
->
runcur
)
arena_bin_lower_run
(
arena
,
chunk
,
run
,
bin
);
arena_bin_lower_run
(
arena
,
run
,
bin
);
if
(
config_stats
)
{
bin
->
stats
.
allocated
-=
size
;
bin
->
stats
.
ndalloc
++
;
bin
->
stats
.
curregs
--
;
}
}
void
arena_dalloc_bin
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
void
*
ptr
,
size_t
pageind
,
arena_chunk_map_t
*
mapelm
)
arena_dalloc_bin_junked_locked
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
void
*
ptr
,
arena_chunk_map_bits_t
*
bitselm
)
{
arena_dalloc_bin_locked_impl
(
tsdn
,
arena
,
chunk
,
ptr
,
bitselm
,
true
);
}
void
arena_dalloc_bin
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
void
*
ptr
,
size_t
pageind
,
arena_chunk_map_bits_t
*
bitselm
)
{
arena_run_t
*
run
;
arena_bin_t
*
bin
;
run
=
(
arena_run_t
*
)((
uintptr_t
)
chunk
+
(
uintptr_t
)((
pageind
-
arena_mapbits_small_runind_get
(
chunk
,
pageind
))
<<
LG_PAGE
));
bin
=
run
->
bin
;
malloc_mutex_lock
(
&
bin
->
lock
);
arena_dalloc_bin_locked
(
arena
,
chunk
,
ptr
,
mapelm
);
malloc_mutex_unlock
(
&
bin
->
lock
);
size_t
rpages_ind
;
rpages_ind
=
pageind
-
arena_mapbits_small_runind_get
(
chunk
,
pageind
);
run
=
&
arena_miscelm_get_mutable
(
chunk
,
rpages_ind
)
->
run
;
bin
=
&
arena
->
bins
[
run
->
binind
];
malloc_mutex_lock
(
tsdn
,
&
bin
->
lock
);
arena_dalloc_bin_locked_impl
(
tsdn
,
arena
,
chunk
,
ptr
,
bitselm
,
false
);
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
}
void
arena_dalloc_small
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
void
*
ptr
,
size_t
pageind
)
arena_dalloc_small
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
void
*
ptr
,
size_t
pageind
)
{
arena_chunk_map_
t
*
map
elm
;
arena_chunk_map_
bits_t
*
bits
elm
;
if
(
config_debug
)
{
/* arena_ptr_small_binind_get() does extra sanity checking. */
assert
(
arena_ptr_small_binind_get
(
ptr
,
arena_mapbits_get
(
chunk
,
pageind
))
!=
BININD_INVALID
);
}
mapelm
=
arena_mapp_get
(
chunk
,
pageind
);
arena_dalloc_bin
(
arena
,
chunk
,
ptr
,
pageind
,
mapelm
);
bitselm
=
arena_bitselm_get_mutable
(
chunk
,
pageind
);
arena_dalloc_bin
(
tsdn
,
arena
,
chunk
,
ptr
,
pageind
,
bitselm
);
arena_decay_tick
(
tsdn
,
arena
);
}
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_large
#define arena_dalloc_junk_large JEMALLOC_N(n_arena_dalloc_junk_large)
#endif
void
arena_dalloc_large_locked
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
void
*
ptr
)
arena_dalloc_junk_large
(
void
*
ptr
,
size_t
usize
)
{
if
(
config_fill
&&
unlikely
(
opt_junk_free
))
memset
(
ptr
,
JEMALLOC_FREE_JUNK
,
usize
);
}
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_large
#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
arena_dalloc_junk_large_t
*
arena_dalloc_junk_large
=
JEMALLOC_N
(
n_arena_dalloc_junk_large
);
#endif
static
void
arena_dalloc_large_locked_impl
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
void
*
ptr
,
bool
junked
)
{
size_t
pageind
=
((
uintptr_t
)
ptr
-
(
uintptr_t
)
chunk
)
>>
LG_PAGE
;
arena_chunk_map_misc_t
*
miscelm
=
arena_miscelm_get_mutable
(
chunk
,
pageind
);
arena_run_t
*
run
=
&
miscelm
->
run
;
if
(
config_fill
||
config_stats
)
{
size_t
pageind
=
((
uintptr_t
)
ptr
-
(
uintptr_t
)
chunk
)
>>
LG_PAGE
;
size_t
size
=
arena_mapbits_large_size_get
(
chunk
,
pageind
)
;
size_t
usize
=
arena_mapbits_large_size_get
(
chunk
,
pageind
)
-
large_pad
;
if
(
config_fill
&&
config_stats
&&
opt_
junk
)
memset
(
ptr
,
0x5a
,
size
);
if
(
!
junk
ed
)
arena_dalloc_junk_large
(
ptr
,
u
size
);
if
(
config_stats
)
{
szind_t
index
=
size2index
(
usize
)
-
NBINS
;
arena
->
stats
.
ndalloc_large
++
;
arena
->
stats
.
allocated_large
-=
size
;
arena
->
stats
.
lstats
[
(
size
>>
LG_PAGE
)
-
1
].
ndalloc
++
;
arena
->
stats
.
lstats
[
(
size
>>
LG_PAGE
)
-
1
].
curruns
--
;
arena
->
stats
.
allocated_large
-=
u
size
;
arena
->
stats
.
lstats
[
index
].
ndalloc
++
;
arena
->
stats
.
lstats
[
index
].
curruns
--
;
}
}
arena_run_dalloc
(
arena
,
(
arena_run_t
*
)
ptr
,
true
,
false
);
arena_run_dalloc
(
tsdn
,
arena
,
run
,
true
,
false
,
false
);
}
void
arena_dalloc_large_junked_locked
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
void
*
ptr
)
{
arena_dalloc_large_locked_impl
(
tsdn
,
arena
,
chunk
,
ptr
,
true
);
}
void
arena_dalloc_large
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
void
*
ptr
)
arena_dalloc_large
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
void
*
ptr
)
{
malloc_mutex_lock
(
&
arena
->
lock
);
arena_dalloc_large_locked
(
arena
,
chunk
,
ptr
);
malloc_mutex_unlock
(
&
arena
->
lock
);
malloc_mutex_lock
(
tsdn
,
&
arena
->
lock
);
arena_dalloc_large_locked_impl
(
tsdn
,
arena
,
chunk
,
ptr
,
false
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
lock
);
arena_decay_tick
(
tsdn
,
arena
);
}
static
void
arena_ralloc_large_shrink
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
void
*
ptr
,
size_t
oldsize
,
size_t
size
)
arena_ralloc_large_shrink
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
void
*
ptr
,
size_t
oldsize
,
size_t
size
)
{
size_t
pageind
=
((
uintptr_t
)
ptr
-
(
uintptr_t
)
chunk
)
>>
LG_PAGE
;
arena_chunk_map_misc_t
*
miscelm
=
arena_miscelm_get_mutable
(
chunk
,
pageind
);
arena_run_t
*
run
=
&
miscelm
->
run
;
assert
(
size
<
oldsize
);
...
...
@@ -1801,56 +3066,85 @@ arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
* Shrink the run, and make trailing pages available for other
* allocations.
*/
malloc_mutex_lock
(
&
arena
->
lock
);
arena_run_trim_tail
(
arena
,
chunk
,
(
arena_run_t
*
)
ptr
,
oldsize
,
size
,
true
);
malloc_mutex_lock
(
tsdn
,
&
arena
->
lock
);
arena_run_trim_tail
(
tsdn
,
arena
,
chunk
,
run
,
oldsize
+
large_pad
,
size
+
large_pad
,
true
);
if
(
config_stats
)
{
szind_t
oldindex
=
size2index
(
oldsize
)
-
NBINS
;
szind_t
index
=
size2index
(
size
)
-
NBINS
;
arena
->
stats
.
ndalloc_large
++
;
arena
->
stats
.
allocated_large
-=
oldsize
;
arena
->
stats
.
lstats
[
(
old
size
>>
LG_PAGE
)
-
1
].
ndalloc
++
;
arena
->
stats
.
lstats
[
(
old
size
>>
LG_PAGE
)
-
1
].
curruns
--
;
arena
->
stats
.
lstats
[
old
index
].
ndalloc
++
;
arena
->
stats
.
lstats
[
old
index
].
curruns
--
;
arena
->
stats
.
nmalloc_large
++
;
arena
->
stats
.
nrequests_large
++
;
arena
->
stats
.
allocated_large
+=
size
;
arena
->
stats
.
lstats
[
(
size
>>
LG_PAGE
)
-
1
].
nmalloc
++
;
arena
->
stats
.
lstats
[
(
size
>>
LG_PAGE
)
-
1
].
nrequests
++
;
arena
->
stats
.
lstats
[
(
size
>>
LG_PAGE
)
-
1
].
curruns
++
;
arena
->
stats
.
lstats
[
index
].
nmalloc
++
;
arena
->
stats
.
lstats
[
index
].
nrequests
++
;
arena
->
stats
.
lstats
[
index
].
curruns
++
;
}
malloc_mutex_unlock
(
&
arena
->
lock
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
lock
);
}
static
bool
arena_ralloc_large_grow
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
extra
,
bool
zero
)
arena_ralloc_large_grow
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
void
*
ptr
,
size_t
oldsize
,
size_t
u
size
_min
,
size_t
usize_max
,
bool
zero
)
{
size_t
pageind
=
((
uintptr_t
)
ptr
-
(
uintptr_t
)
chunk
)
>>
LG_PAGE
;
size_t
npages
=
oldsize
>>
LG_PAGE
;
size_t
npages
=
(
oldsize
+
large_pad
)
>>
LG_PAGE
;
size_t
followsize
;
assert
(
oldsize
==
arena_mapbits_large_size_get
(
chunk
,
pageind
));
assert
(
oldsize
==
arena_mapbits_large_size_get
(
chunk
,
pageind
)
-
large_pad
);
/* Try to extend the run. */
assert
(
size
+
extra
>
oldsize
);
malloc_mutex_lock
(
&
arena
->
lock
);
if
(
pageind
+
npages
<
chunk_npages
&&
arena_mapbits_allocated_get
(
chunk
,
pageind
+
npages
)
==
0
&&
(
followsize
=
arena_mapbits_unallocated_size_get
(
chunk
,
pageind
+
npages
))
>=
size
-
oldsize
)
{
malloc_mutex_lock
(
tsdn
,
&
arena
->
lock
);
if
(
pageind
+
npages
>=
chunk_npages
||
arena_mapbits_allocated_get
(
chunk
,
pageind
+
npages
)
!=
0
)
goto
label_fail
;
followsize
=
arena_mapbits_unallocated_size_get
(
chunk
,
pageind
+
npages
);
if
(
oldsize
+
followsize
>=
usize_min
)
{
/*
* The next run is available and sufficiently large. Split the
* following run, then merge the first part with the existing
* allocation.
*/
size_t
flag_dirty
;
size_t
splitsize
=
(
oldsize
+
followsize
<=
size
+
extra
)
?
followsize
:
size
+
extra
-
oldsize
;
arena_run_split
(
arena
,
(
arena_run_t
*
)((
uintptr_t
)
chunk
+
((
pageind
+
npages
)
<<
LG_PAGE
)),
splitsize
,
true
,
BININD_INVALID
,
zero
);
arena_run_t
*
run
;
size_t
usize
,
splitsize
,
size
,
flag_dirty
,
flag_unzeroed_mask
;
usize
=
usize_max
;
while
(
oldsize
+
followsize
<
usize
)
usize
=
index2size
(
size2index
(
usize
)
-
1
);
assert
(
usize
>=
usize_min
);
assert
(
usize
>=
oldsize
);
splitsize
=
usize
-
oldsize
;
if
(
splitsize
==
0
)
goto
label_fail
;
run
=
&
arena_miscelm_get_mutable
(
chunk
,
pageind
+
npages
)
->
run
;
if
(
arena_run_split_large
(
arena
,
run
,
splitsize
,
zero
))
goto
label_fail
;
if
(
config_cache_oblivious
&&
zero
)
{
/*
* Zero the trailing bytes of the original allocation's
* last page, since they are in an indeterminate state.
* There will always be trailing bytes, because ptr's
* offset from the beginning of the run is a multiple of
* CACHELINE in [0 .. PAGE).
*/
void
*
zbase
=
(
void
*
)((
uintptr_t
)
ptr
+
oldsize
);
void
*
zpast
=
PAGE_ADDR2BASE
((
void
*
)((
uintptr_t
)
zbase
+
PAGE
));
size_t
nzero
=
(
uintptr_t
)
zpast
-
(
uintptr_t
)
zbase
;
assert
(
nzero
>
0
);
memset
(
zbase
,
0
,
nzero
);
}
size
=
oldsize
+
splitsize
;
npages
=
size
>>
LG_PAGE
;
npages
=
(
size
+
large_pad
)
>>
LG_PAGE
;
/*
* Mark the extended run as dirty if either portion of the run
...
...
@@ -1862,210 +3156,320 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
*/
flag_dirty
=
arena_mapbits_dirty_get
(
chunk
,
pageind
)
|
arena_mapbits_dirty_get
(
chunk
,
pageind
+
npages
-
1
);
arena_mapbits_large_set
(
chunk
,
pageind
,
size
,
flag_dirty
);
arena_mapbits_large_set
(
chunk
,
pageind
+
npages
-
1
,
0
,
flag_dirty
);
flag_unzeroed_mask
=
flag_dirty
==
0
?
CHUNK_MAP_UNZEROED
:
0
;
arena_mapbits_large_set
(
chunk
,
pageind
,
size
+
large_pad
,
flag_dirty
|
(
flag_unzeroed_mask
&
arena_mapbits_unzeroed_get
(
chunk
,
pageind
)));
arena_mapbits_large_set
(
chunk
,
pageind
+
npages
-
1
,
0
,
flag_dirty
|
(
flag_unzeroed_mask
&
arena_mapbits_unzeroed_get
(
chunk
,
pageind
+
npages
-
1
)));
if
(
config_stats
)
{
szind_t
oldindex
=
size2index
(
oldsize
)
-
NBINS
;
szind_t
index
=
size2index
(
size
)
-
NBINS
;
arena
->
stats
.
ndalloc_large
++
;
arena
->
stats
.
allocated_large
-=
oldsize
;
arena
->
stats
.
lstats
[
(
old
size
>>
LG_PAGE
)
-
1
].
ndalloc
++
;
arena
->
stats
.
lstats
[
(
old
size
>>
LG_PAGE
)
-
1
].
curruns
--
;
arena
->
stats
.
lstats
[
old
index
].
ndalloc
++
;
arena
->
stats
.
lstats
[
old
index
].
curruns
--
;
arena
->
stats
.
nmalloc_large
++
;
arena
->
stats
.
nrequests_large
++
;
arena
->
stats
.
allocated_large
+=
size
;
arena
->
stats
.
lstats
[
(
size
>>
LG_PAGE
)
-
1
].
nmalloc
++
;
arena
->
stats
.
lstats
[
(
size
>>
LG_PAGE
)
-
1
].
nrequests
++
;
arena
->
stats
.
lstats
[
(
size
>>
LG_PAGE
)
-
1
].
curruns
++
;
arena
->
stats
.
lstats
[
index
].
nmalloc
++
;
arena
->
stats
.
lstats
[
index
].
nrequests
++
;
arena
->
stats
.
lstats
[
index
].
curruns
++
;
}
malloc_mutex_unlock
(
&
arena
->
lock
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
lock
);
return
(
false
);
}
malloc_mutex_unlock
(
&
arena
->
lock
);
label_fail:
malloc_mutex_unlock
(
tsdn
,
&
arena
->
lock
);
return
(
true
);
}
#ifdef JEMALLOC_JET
#undef arena_ralloc_junk_large
#define arena_ralloc_junk_large JEMALLOC_N(n_arena_ralloc_junk_large)
#endif
static
void
arena_ralloc_junk_large
(
void
*
ptr
,
size_t
old_usize
,
size_t
usize
)
{
if
(
config_fill
&&
unlikely
(
opt_junk_free
))
{
memset
((
void
*
)((
uintptr_t
)
ptr
+
usize
),
JEMALLOC_FREE_JUNK
,
old_usize
-
usize
);
}
}
#ifdef JEMALLOC_JET
#undef arena_ralloc_junk_large
#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
arena_ralloc_junk_large_t
*
arena_ralloc_junk_large
=
JEMALLOC_N
(
n_arena_ralloc_junk_large
);
#endif
/*
* Try to resize a large allocation, in order to avoid copying. This will
* always fail if growing an object, and the following run is already in use.
*/
static
bool
arena_ralloc_large
(
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
extra
,
bool
zero
)
arena_ralloc_large
(
tsdn_t
*
tsdn
,
void
*
ptr
,
size_t
oldsize
,
size_t
u
size
_min
,
size_t
usize_max
,
bool
zero
)
{
size_t
psize
;
arena_chunk_t
*
chunk
;
arena_t
*
arena
;
psize
=
PAGE_CEILING
(
size
+
extra
);
if
(
psize
==
oldsize
)
{
/* Same size class. */
if
(
config_fill
&&
opt_junk
&&
size
<
oldsize
)
{
memset
((
void
*
)((
uintptr_t
)
ptr
+
size
),
0x5a
,
oldsize
-
size
);
}
if
(
oldsize
==
usize_max
)
{
/* Current size class is compatible and maximal. */
return
(
false
);
}
else
{
arena_chunk_t
*
chunk
;
arena_t
*
arena
;
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
ptr
);
arena
=
chunk
->
arena
;
}
if
(
psize
<
oldsize
)
{
/* Fill before shrinking in order avoid a race. */
if
(
config_fill
&&
opt_junk
)
{
memset
((
void
*
)((
uintptr_t
)
ptr
+
size
),
0x5a
,
oldsize
-
size
);
}
arena_ralloc_large_shrink
(
arena
,
chunk
,
ptr
,
oldsize
,
psize
);
return
(
false
);
}
else
{
bool
ret
=
arena_ralloc_large_grow
(
arena
,
chunk
,
ptr
,
oldsize
,
PAGE_CEILING
(
size
),
psize
-
PAGE_CEILING
(
size
),
zero
);
if
(
config_fill
&&
ret
==
false
&&
zero
==
false
&&
opt_zero
)
{
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
ptr
);
arena
=
extent_node_arena_get
(
&
chunk
->
node
);
if
(
oldsize
<
usize_max
)
{
bool
ret
=
arena_ralloc_large_grow
(
tsdn
,
arena
,
chunk
,
ptr
,
oldsize
,
usize_min
,
usize_max
,
zero
);
if
(
config_fill
&&
!
ret
&&
!
zero
)
{
if
(
unlikely
(
opt_junk_alloc
))
{
memset
((
void
*
)((
uintptr_t
)
ptr
+
oldsize
),
JEMALLOC_ALLOC_JUNK
,
isalloc
(
tsdn
,
ptr
,
config_prof
)
-
oldsize
);
}
else
if
(
unlikely
(
opt_zero
))
{
memset
((
void
*
)((
uintptr_t
)
ptr
+
oldsize
),
0
,
size
-
oldsize
);
isalloc
(
tsdn
,
ptr
,
config_prof
)
-
oldsize
);
}
return
(
ret
);
}
return
(
ret
);
}
assert
(
oldsize
>
usize_max
);
/* Fill before shrinking in order avoid a race. */
arena_ralloc_junk_large
(
ptr
,
oldsize
,
usize_max
);
arena_ralloc_large_shrink
(
tsdn
,
arena
,
chunk
,
ptr
,
oldsize
,
usize_max
);
return
(
false
);
}
void
*
arena_ralloc_no_move
(
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
extra
,
bool
zero
)
bool
arena_ralloc_no_move
(
tsdn_t
*
tsdn
,
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
extra
,
bool
zero
)
{
size_t
usize_min
,
usize_max
;
/*
* Avoid moving the allocation if the size class can be left the same.
*/
if
(
oldsize
<=
arena_maxclass
)
{
/* Calls with non-zero extra had to clamp extra. */
assert
(
extra
==
0
||
size
+
extra
<=
HUGE_MAXCLASS
);
if
(
unlikely
(
size
>
HUGE_MAXCLASS
))
return
(
true
);
usize_min
=
s2u
(
size
);
usize_max
=
s2u
(
size
+
extra
);
if
(
likely
(
oldsize
<=
large_maxclass
&&
usize_min
<=
large_maxclass
))
{
arena_chunk_t
*
chunk
;
/*
* Avoid moving the allocation if the size class can be left the
* same.
*/
if
(
oldsize
<=
SMALL_MAXCLASS
)
{
assert
(
arena_bin_info
[
SMALL_SIZE2BIN
(
oldsize
)].
reg_size
==
oldsize
);
if
((
size
+
extra
<=
SMALL_MAXCLASS
&&
SMALL_SIZE2BIN
(
size
+
extra
)
==
SMALL_SIZE2BIN
(
oldsize
))
||
(
size
<=
oldsize
&&
size
+
extra
>=
oldsize
))
{
if
(
config_fill
&&
opt_junk
&&
size
<
oldsize
)
{
memset
((
void
*
)((
uintptr_t
)
ptr
+
size
),
0x5a
,
oldsize
-
size
);
}
return
(
ptr
);
}
assert
(
arena_bin_info
[
size2index
(
oldsize
)].
reg_size
==
oldsize
);
if
((
usize_max
>
SMALL_MAXCLASS
||
size2index
(
usize_max
)
!=
size2index
(
oldsize
))
&&
(
size
>
oldsize
||
usize_max
<
oldsize
))
return
(
true
);
}
else
{
assert
(
size
<=
arena_maxclass
);
if
(
size
+
extra
>
SMALL_MAXCLASS
)
{
if
(
arena_ralloc_large
(
ptr
,
oldsize
,
size
,
extra
,
zero
)
==
false
)
return
(
ptr
);
}
if
(
usize_max
<=
SMALL_MAXCLASS
)
return
(
true
);
if
(
arena_ralloc_large
(
tsdn
,
ptr
,
oldsize
,
usize_min
,
usize_max
,
zero
))
return
(
true
);
}
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
ptr
);
arena_decay_tick
(
tsdn
,
extent_node_arena_get
(
&
chunk
->
node
));
return
(
false
);
}
else
{
return
(
huge_ralloc_no_move
(
tsdn
,
ptr
,
oldsize
,
usize_min
,
usize_max
,
zero
));
}
}
/* Reallocation would require a move. */
return
(
NULL
);
static
void
*
arena_ralloc_move_helper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
usize
,
size_t
alignment
,
bool
zero
,
tcache_t
*
tcache
)
{
if
(
alignment
==
0
)
return
(
arena_malloc
(
tsdn
,
arena
,
usize
,
size2index
(
usize
),
zero
,
tcache
,
true
));
usize
=
sa2u
(
usize
,
alignment
);
if
(
unlikely
(
usize
==
0
||
usize
>
HUGE_MAXCLASS
))
return
(
NULL
);
return
(
ipalloct
(
tsdn
,
usize
,
alignment
,
zero
,
tcache
,
arena
));
}
void
*
arena_ralloc
(
arena_t
*
arena
,
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
extra
,
size_t
alignment
,
bool
zero
,
bool
try_tcache_alloc
,
bool
try_tcache_dalloc
)
arena_ralloc
(
tsd_t
*
tsd
,
arena_t
*
arena
,
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
alignment
,
bool
zero
,
tcache_t
*
tcache
)
{
void
*
ret
;
size_t
copy
size
;
size_t
u
size
;
/* Try to avoid moving the allocation. */
ret
=
arena_ralloc_no_move
(
ptr
,
oldsize
,
size
,
extra
,
zero
);
if
(
ret
!=
NULL
)
return
(
ret
);
usize
=
s2u
(
size
);
if
(
unlikely
(
usize
==
0
||
size
>
HUGE_MAXCLASS
))
return
(
NULL
);
/*
* size and oldsize are different enough that we need to move the
* object. In that case, fall back to allocating new space and
* copying.
*/
if
(
alignment
!=
0
)
{
size_t
usize
=
sa2u
(
size
+
extra
,
alignment
);
if
(
usize
==
0
)
return
(
NULL
);
ret
=
ipallocx
(
usize
,
alignment
,
zero
,
try_tcache_alloc
,
arena
);
}
else
ret
=
arena_malloc
(
arena
,
size
+
extra
,
zero
,
try_tcache_alloc
);
if
(
likely
(
usize
<=
large_maxclass
))
{
size_t
copysize
;
if
(
ret
==
NULL
)
{
if
(
extra
==
0
)
return
(
NULL
);
/* Try again, this time without extra. */
if
(
alignment
!=
0
)
{
size_t
usize
=
sa2u
(
size
,
alignment
);
if
(
usize
==
0
)
return
(
NULL
);
ret
=
ipallocx
(
usize
,
alignment
,
zero
,
try_tcache_alloc
,
arena
);
}
else
ret
=
arena_malloc
(
arena
,
size
,
zero
,
try_tcache_alloc
);
/* Try to avoid moving the allocation. */
if
(
!
arena_ralloc_no_move
(
tsd_tsdn
(
tsd
),
ptr
,
oldsize
,
usize
,
0
,
zero
))
return
(
ptr
);
/*
* size and oldsize are different enough that we need to move
* the object. In that case, fall back to allocating new space
* and copying.
*/
ret
=
arena_ralloc_move_helper
(
tsd_tsdn
(
tsd
),
arena
,
usize
,
alignment
,
zero
,
tcache
);
if
(
ret
==
NULL
)
return
(
NULL
);
}
/* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */
/*
* Junk/zero-filling were already done by
* ipalloc()/arena_malloc().
*/
/*
* Copy at most size bytes (not size+extra), since the caller has no
* expectation that the extra bytes will be reliably preserved.
*/
copysize
=
(
size
<
oldsize
)
?
size
:
oldsize
;
VALGRIND_MAKE_MEM_UNDEFINED
(
ret
,
copysize
);
memcpy
(
ret
,
ptr
,
copysiz
e
);
iqallocx
(
ptr
,
try_tcache_dalloc
);
copysize
=
(
usize
<
oldsize
)
?
usize
:
oldsize
;
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED
(
ret
,
copysize
);
memcpy
(
ret
,
ptr
,
copysize
);
isqalloc
(
tsd
,
ptr
,
oldsize
,
tcache
,
true
);
}
else
{
ret
=
huge_ralloc
(
tsd
,
arena
,
ptr
,
oldsize
,
usize
,
alignment
,
zero
,
tcach
e
);
}
return
(
ret
);
}
dss_prec_t
arena_dss_prec_get
(
arena_t
*
arena
)
arena_dss_prec_get
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
dss_prec_t
ret
;
malloc_mutex_lock
(
&
arena
->
lock
);
malloc_mutex_lock
(
tsdn
,
&
arena
->
lock
);
ret
=
arena
->
dss_prec
;
malloc_mutex_unlock
(
&
arena
->
lock
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
lock
);
return
(
ret
);
}
void
arena_dss_prec_set
(
arena_t
*
arena
,
dss_prec_t
dss_prec
)
bool
arena_dss_prec_set
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
dss_prec_t
dss_prec
)
{
malloc_mutex_lock
(
&
arena
->
lock
);
if
(
!
have_dss
)
return
(
dss_prec
!=
dss_prec_disabled
);
malloc_mutex_lock
(
tsdn
,
&
arena
->
lock
);
arena
->
dss_prec
=
dss_prec
;
malloc_mutex_unlock
(
&
arena
->
lock
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
lock
);
return
(
false
);
}
void
arena_stats_merge
(
arena_t
*
arena
,
const
char
**
dss
,
size_t
*
nactive
,
size_t
*
ndirty
,
arena_stats_t
*
astats
,
malloc_bin_stats_t
*
bstats
,
malloc_large_stats_t
*
lstats
)
ssize_t
arena_lg_dirty_mult_default_get
(
void
)
{
return
((
ssize_t
)
atomic_read_z
((
size_t
*
)
&
lg_dirty_mult_default
));
}
bool
arena_lg_dirty_mult_default_set
(
ssize_t
lg_dirty_mult
)
{
if
(
opt_purge
!=
purge_mode_ratio
)
return
(
true
);
if
(
!
arena_lg_dirty_mult_valid
(
lg_dirty_mult
))
return
(
true
);
atomic_write_z
((
size_t
*
)
&
lg_dirty_mult_default
,
(
size_t
)
lg_dirty_mult
);
return
(
false
);
}
ssize_t
arena_decay_time_default_get
(
void
)
{
return
((
ssize_t
)
atomic_read_z
((
size_t
*
)
&
decay_time_default
));
}
bool
arena_decay_time_default_set
(
ssize_t
decay_time
)
{
if
(
opt_purge
!=
purge_mode_decay
)
return
(
true
);
if
(
!
arena_decay_time_valid
(
decay_time
))
return
(
true
);
atomic_write_z
((
size_t
*
)
&
decay_time_default
,
(
size_t
)
decay_time
);
return
(
false
);
}
static
void
arena_basic_stats_merge_locked
(
arena_t
*
arena
,
unsigned
*
nthreads
,
const
char
**
dss
,
ssize_t
*
lg_dirty_mult
,
ssize_t
*
decay_time
,
size_t
*
nactive
,
size_t
*
ndirty
)
{
unsigned
i
;
malloc_mutex_lock
(
&
arena
->
lock
);
*
nthreads
+=
arena_nthreads_get
(
arena
,
false
);
*
dss
=
dss_prec_names
[
arena
->
dss_prec
];
*
lg_dirty_mult
=
arena
->
lg_dirty_mult
;
*
decay_time
=
arena
->
decay
.
time
;
*
nactive
+=
arena
->
nactive
;
*
ndirty
+=
arena
->
ndirty
;
}
void
arena_basic_stats_merge
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
unsigned
*
nthreads
,
const
char
**
dss
,
ssize_t
*
lg_dirty_mult
,
ssize_t
*
decay_time
,
size_t
*
nactive
,
size_t
*
ndirty
)
{
malloc_mutex_lock
(
tsdn
,
&
arena
->
lock
);
arena_basic_stats_merge_locked
(
arena
,
nthreads
,
dss
,
lg_dirty_mult
,
decay_time
,
nactive
,
ndirty
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
lock
);
}
void
arena_stats_merge
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
unsigned
*
nthreads
,
const
char
**
dss
,
ssize_t
*
lg_dirty_mult
,
ssize_t
*
decay_time
,
size_t
*
nactive
,
size_t
*
ndirty
,
arena_stats_t
*
astats
,
malloc_bin_stats_t
*
bstats
,
malloc_large_stats_t
*
lstats
,
malloc_huge_stats_t
*
hstats
)
{
unsigned
i
;
cassert
(
config_stats
);
malloc_mutex_lock
(
tsdn
,
&
arena
->
lock
);
arena_basic_stats_merge_locked
(
arena
,
nthreads
,
dss
,
lg_dirty_mult
,
decay_time
,
nactive
,
ndirty
);
astats
->
mapped
+=
arena
->
stats
.
mapped
;
astats
->
retained
+=
arena
->
stats
.
retained
;
astats
->
npurge
+=
arena
->
stats
.
npurge
;
astats
->
nmadvise
+=
arena
->
stats
.
nmadvise
;
astats
->
purged
+=
arena
->
stats
.
purged
;
astats
->
metadata_mapped
+=
arena
->
stats
.
metadata_mapped
;
astats
->
metadata_allocated
+=
arena_metadata_allocated_get
(
arena
);
astats
->
allocated_large
+=
arena
->
stats
.
allocated_large
;
astats
->
nmalloc_large
+=
arena
->
stats
.
nmalloc_large
;
astats
->
ndalloc_large
+=
arena
->
stats
.
ndalloc_large
;
astats
->
nrequests_large
+=
arena
->
stats
.
nrequests_large
;
astats
->
allocated_huge
+=
arena
->
stats
.
allocated_huge
;
astats
->
nmalloc_huge
+=
arena
->
stats
.
nmalloc_huge
;
astats
->
ndalloc_huge
+=
arena
->
stats
.
ndalloc_huge
;
for
(
i
=
0
;
i
<
nlclasses
;
i
++
)
{
lstats
[
i
].
nmalloc
+=
arena
->
stats
.
lstats
[
i
].
nmalloc
;
...
...
@@ -2073,16 +3477,22 @@ arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
lstats
[
i
].
nrequests
+=
arena
->
stats
.
lstats
[
i
].
nrequests
;
lstats
[
i
].
curruns
+=
arena
->
stats
.
lstats
[
i
].
curruns
;
}
malloc_mutex_unlock
(
&
arena
->
lock
);
for
(
i
=
0
;
i
<
nhclasses
;
i
++
)
{
hstats
[
i
].
nmalloc
+=
arena
->
stats
.
hstats
[
i
].
nmalloc
;
hstats
[
i
].
ndalloc
+=
arena
->
stats
.
hstats
[
i
].
ndalloc
;
hstats
[
i
].
curhchunks
+=
arena
->
stats
.
hstats
[
i
].
curhchunks
;
}
malloc_mutex_unlock
(
tsdn
,
&
arena
->
lock
);
for
(
i
=
0
;
i
<
NBINS
;
i
++
)
{
arena_bin_t
*
bin
=
&
arena
->
bins
[
i
];
malloc_mutex_lock
(
&
bin
->
lock
);
bstats
[
i
].
allocated
+=
bin
->
stats
.
allocated
;
malloc_mutex_lock
(
tsdn
,
&
bin
->
lock
);
bstats
[
i
].
nmalloc
+=
bin
->
stats
.
nmalloc
;
bstats
[
i
].
ndalloc
+=
bin
->
stats
.
ndalloc
;
bstats
[
i
].
nrequests
+=
bin
->
stats
.
nrequests
;
bstats
[
i
].
curregs
+=
bin
->
stats
.
curregs
;
if
(
config_tcache
)
{
bstats
[
i
].
nfills
+=
bin
->
stats
.
nfills
;
bstats
[
i
].
nflushes
+=
bin
->
stats
.
nflushes
;
...
...
@@ -2090,31 +3500,74 @@ arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
bstats
[
i
].
nruns
+=
bin
->
stats
.
nruns
;
bstats
[
i
].
reruns
+=
bin
->
stats
.
reruns
;
bstats
[
i
].
curruns
+=
bin
->
stats
.
curruns
;
malloc_mutex_unlock
(
&
bin
->
lock
);
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
}
}
bool
arena_new
(
arena_t
*
arena
,
unsigned
ind
)
unsigned
arena_nthreads_get
(
arena_t
*
arena
,
bool
internal
)
{
return
(
atomic_read_u
(
&
arena
->
nthreads
[
internal
]));
}
void
arena_nthreads_inc
(
arena_t
*
arena
,
bool
internal
)
{
atomic_add_u
(
&
arena
->
nthreads
[
internal
],
1
);
}
void
arena_nthreads_dec
(
arena_t
*
arena
,
bool
internal
)
{
atomic_sub_u
(
&
arena
->
nthreads
[
internal
],
1
);
}
size_t
arena_extent_sn_next
(
arena_t
*
arena
)
{
return
(
atomic_add_z
(
&
arena
->
extent_sn_next
,
1
)
-
1
);
}
arena_t
*
arena_new
(
tsdn_t
*
tsdn
,
unsigned
ind
)
{
arena_t
*
arena
;
unsigned
i
;
arena_bin_t
*
bin
;
arena
->
ind
=
ind
;
arena
->
nthreads
=
0
;
/*
* Allocate arena, arena->lstats, and arena->hstats contiguously, mainly
* because there is no way to clean up if base_alloc() OOMs.
*/
if
(
config_stats
)
{
arena
=
(
arena_t
*
)
base_alloc
(
tsdn
,
CACHELINE_CEILING
(
sizeof
(
arena_t
))
+
QUANTUM_CEILING
((
nlclasses
*
sizeof
(
malloc_large_stats_t
)))
+
(
nhclasses
*
sizeof
(
malloc_huge_stats_t
)));
}
else
arena
=
(
arena_t
*
)
base_alloc
(
tsdn
,
sizeof
(
arena_t
));
if
(
arena
==
NULL
)
return
(
NULL
);
if
(
malloc_mutex_init
(
&
arena
->
lock
))
return
(
true
);
arena
->
ind
=
ind
;
arena
->
nthreads
[
0
]
=
arena
->
nthreads
[
1
]
=
0
;
if
(
malloc_mutex_init
(
&
arena
->
lock
,
"arena"
,
WITNESS_RANK_ARENA
))
return
(
NULL
);
if
(
config_stats
)
{
memset
(
&
arena
->
stats
,
0
,
sizeof
(
arena_stats_t
));
arena
->
stats
.
lstats
=
(
malloc_large_stats_t
*
)
base_alloc
(
nlclasses
*
sizeof
(
malloc_large_stats_t
));
if
(
arena
->
stats
.
lstats
==
NULL
)
return
(
true
);
arena
->
stats
.
lstats
=
(
malloc_large_stats_t
*
)((
uintptr_t
)
arena
+
CACHELINE_CEILING
(
sizeof
(
arena_t
)));
memset
(
arena
->
stats
.
lstats
,
0
,
nlclasses
*
sizeof
(
malloc_large_stats_t
));
arena
->
stats
.
hstats
=
(
malloc_huge_stats_t
*
)((
uintptr_t
)
arena
+
CACHELINE_CEILING
(
sizeof
(
arena_t
))
+
QUANTUM_CEILING
(
nlclasses
*
sizeof
(
malloc_large_stats_t
)));
memset
(
arena
->
stats
.
hstats
,
0
,
nhclasses
*
sizeof
(
malloc_huge_stats_t
));
if
(
config_tcache
)
ql_new
(
&
arena
->
tcache_ql
);
}
...
...
@@ -2122,56 +3575,89 @@ arena_new(arena_t *arena, unsigned ind)
if
(
config_prof
)
arena
->
prof_accumbytes
=
0
;
if
(
config_cache_oblivious
)
{
/*
* A nondeterministic seed based on the address of arena reduces
* the likelihood of lockstep non-uniform cache index
* utilization among identical concurrent processes, but at the
* cost of test repeatability. For debug builds, instead use a
* deterministic seed.
*/
arena
->
offset_state
=
config_debug
?
ind
:
(
size_t
)(
uintptr_t
)
arena
;
}
arena
->
dss_prec
=
chunk_dss_prec_get
();
/* Initialize chunks. */
arena_chunk_dirty_new
(
&
arena
->
chunks_dirty
);
ql_new
(
&
arena
->
achunks
);
arena
->
extent_sn_next
=
0
;
arena
->
spare
=
NULL
;
arena
->
lg_dirty_mult
=
arena_lg_dirty_mult_default_get
();
arena
->
purging
=
false
;
arena
->
nactive
=
0
;
arena
->
ndirty
=
0
;
arena
->
npurgatory
=
0
;
arena_avail_tree_new
(
&
arena
->
runs_avail
);
for
(
i
=
0
;
i
<
NPSIZES
;
i
++
)
arena_run_heap_new
(
&
arena
->
runs_avail
[
i
]);
qr_new
(
&
arena
->
runs_dirty
,
rd_link
);
qr_new
(
&
arena
->
chunks_cache
,
cc_link
);
if
(
opt_purge
==
purge_mode_decay
)
arena_decay_init
(
arena
,
arena_decay_time_default_get
());
ql_new
(
&
arena
->
huge
);
if
(
malloc_mutex_init
(
&
arena
->
huge_mtx
,
"arena_huge"
,
WITNESS_RANK_ARENA_HUGE
))
return
(
NULL
);
extent_tree_szsnad_new
(
&
arena
->
chunks_szsnad_cached
);
extent_tree_ad_new
(
&
arena
->
chunks_ad_cached
);
extent_tree_szsnad_new
(
&
arena
->
chunks_szsnad_retained
);
extent_tree_ad_new
(
&
arena
->
chunks_ad_retained
);
if
(
malloc_mutex_init
(
&
arena
->
chunks_mtx
,
"arena_chunks"
,
WITNESS_RANK_ARENA_CHUNKS
))
return
(
NULL
);
ql_new
(
&
arena
->
node_cache
);
if
(
malloc_mutex_init
(
&
arena
->
node_cache_mtx
,
"arena_node_cache"
,
WITNESS_RANK_ARENA_NODE_CACHE
))
return
(
NULL
);
arena
->
chunk_hooks
=
chunk_hooks_default
;
/* Initialize bins. */
for
(
i
=
0
;
i
<
NBINS
;
i
++
)
{
bin
=
&
arena
->
bins
[
i
];
if
(
malloc_mutex_init
(
&
bin
->
lock
))
return
(
true
);
arena_bin_t
*
bin
=
&
arena
->
bins
[
i
];
if
(
malloc_mutex_init
(
&
bin
->
lock
,
"arena_bin"
,
WITNESS_RANK_ARENA_BIN
))
return
(
NULL
);
bin
->
runcur
=
NULL
;
arena_run_
tree
_new
(
&
bin
->
runs
);
arena_run_
heap
_new
(
&
bin
->
runs
);
if
(
config_stats
)
memset
(
&
bin
->
stats
,
0
,
sizeof
(
malloc_bin_stats_t
));
}
return
(
false
);
return
(
arena
);
}
/*
* Calculate bin_info->run_size such that it meets the following constraints:
*
* *) bin_info->run_size >= min_run_size
* *) bin_info->run_size <= arena_maxclass
* *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
* *) bin_info->run_size <= arena_maxrun
* *) bin_info->nregs <= RUN_MAXREGS
*
* bin_info->nregs
, bin_info->bitmap_offset,
and bin_info->reg0_offset are also
*
calculated here, since
these settings are all interdependent.
* bin_info->nregs and bin_info->reg0_offset are also
calculated here, since
* these settings are all interdependent.
*/
static
size_t
bin_info_run_size_calc
(
arena_bin_info_t
*
bin_info
,
size_t
min_run_size
)
static
void
bin_info_run_size_calc
(
arena_bin_info_t
*
bin_info
)
{
size_t
pad_size
;
size_t
try_run_size
,
good_run_size
;
uint32_t
try_nregs
,
good_nregs
;
uint32_t
try_hdr_size
,
good_hdr_size
;
uint32_t
try_bitmap_offset
,
good_bitmap_offset
;
uint32_t
try_ctx0_offset
,
good_ctx0_offset
;
uint32_t
try_redzone0_offset
,
good_redzone0_offset
;
assert
(
min_run_size
>=
PAGE
);
assert
(
min_run_size
<=
arena_maxclass
);
size_t
try_run_size
,
perfect_run_size
,
actual_run_size
;
uint32_t
try_nregs
,
perfect_nregs
,
actual_nregs
;
/*
* Determine redzone size based on minimum alignment and minimum
...
...
@@ -2180,8 +3666,8 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
* minimum alignment; without the padding, each redzone would have to
* be twice as large in order to maintain alignment.
*/
if
(
config_fill
&&
opt_redzone
)
{
size_t
align_min
=
ZU
(
1
)
<<
(
ffs
(
bin_info
->
reg_size
)
-
1
);
if
(
config_fill
&&
unlikely
(
opt_redzone
)
)
{
size_t
align_min
=
ZU
(
1
)
<<
(
ffs
_zu
(
bin_info
->
reg_size
)
-
1
);
if
(
align_min
<=
REDZONE_MINSIZE
)
{
bin_info
->
redzone_size
=
REDZONE_MINSIZE
;
pad_size
=
0
;
...
...
@@ -2197,128 +3683,86 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
(
bin_info
->
redzone_size
<<
1
);
/*
* Calculate known-valid settings before entering the run_size
* expansion loop, so that the first part of the loop always copies
* valid settings.
*
* The do..while loop iteratively reduces the number of regions until
* the run header and the regions no longer overlap. A closed formula
* would be quite messy, since there is an interdependency between the
* header's mask length and the number of regions.
* Compute run size under ideal conditions (no redzones, no limit on run
* size).
*/
try_run_size
=
min_run_size
;
try_nregs
=
((
try_run_size
-
sizeof
(
arena_run_t
))
/
bin_info
->
reg_interval
)
+
1
;
/* Counter-act try_nregs-- in loop. */
if
(
try_nregs
>
RUN_MAXREGS
)
{
try_nregs
=
RUN_MAXREGS
+
1
;
/* Counter-act try_nregs-- in loop. */
}
try_run_size
=
PAGE
;
try_nregs
=
(
uint32_t
)(
try_run_size
/
bin_info
->
reg_size
);
do
{
try_nregs
--
;
try_hdr_size
=
sizeof
(
arena_run_t
);
/* Pad to a long boundary. */
try_hdr_size
=
LONG_CEILING
(
try_hdr_size
);
try_bitmap_offset
=
try_hdr_size
;
/* Add space for bitmap. */
try_hdr_size
+=
bitmap_size
(
try_nregs
);
if
(
config_prof
&&
opt_prof
&&
prof_promote
==
false
)
{
/* Pad to a quantum boundary. */
try_hdr_size
=
QUANTUM_CEILING
(
try_hdr_size
);
try_ctx0_offset
=
try_hdr_size
;
/* Add space for one (prof_ctx_t *) per region. */
try_hdr_size
+=
try_nregs
*
sizeof
(
prof_ctx_t
*
);
}
else
try_ctx0_offset
=
0
;
try_redzone0_offset
=
try_run_size
-
(
try_nregs
*
bin_info
->
reg_interval
)
-
pad_size
;
}
while
(
try_hdr_size
>
try_redzone0_offset
);
/* run_size expansion loop. */
do
{
/*
* Copy valid settings before trying more aggressive settings.
*/
good_run_size
=
try_run_size
;
good_nregs
=
try_nregs
;
good_hdr_size
=
try_hdr_size
;
good_bitmap_offset
=
try_bitmap_offset
;
good_ctx0_offset
=
try_ctx0_offset
;
good_redzone0_offset
=
try_redzone0_offset
;
/* Try more aggressive settings. */
perfect_run_size
=
try_run_size
;
perfect_nregs
=
try_nregs
;
try_run_size
+=
PAGE
;
try_nregs
=
((
try_run_size
-
sizeof
(
arena_run_t
)
-
pad_size
)
/
bin_info
->
reg_interval
)
+
1
;
/* Counter-act try_nregs-- in loop. */
if
(
try_nregs
>
RUN_MAXREGS
)
{
try_nregs
=
RUN_MAXREGS
+
1
;
/* Counter-act try_nregs-- in loop. */
}
do
{
try_nregs
--
;
try_hdr_size
=
sizeof
(
arena_run_t
);
/* Pad to a long boundary. */
try_hdr_size
=
LONG_CEILING
(
try_hdr_size
);
try_bitmap_offset
=
try_hdr_size
;
/* Add space for bitmap. */
try_hdr_size
+=
bitmap_size
(
try_nregs
);
if
(
config_prof
&&
opt_prof
&&
prof_promote
==
false
)
{
/* Pad to a quantum boundary. */
try_hdr_size
=
QUANTUM_CEILING
(
try_hdr_size
);
try_ctx0_offset
=
try_hdr_size
;
/*
* Add space for one (prof_ctx_t *) per region.
*/
try_hdr_size
+=
try_nregs
*
sizeof
(
prof_ctx_t
*
);
}
try_redzone0_offset
=
try_run_size
-
(
try_nregs
*
bin_info
->
reg_interval
)
-
pad_size
;
}
while
(
try_hdr_size
>
try_redzone0_offset
);
}
while
(
try_run_size
<=
arena_maxclass
&&
try_run_size
<=
arena_maxclass
&&
RUN_MAX_OVRHD
*
(
bin_info
->
reg_interval
<<
3
)
>
RUN_MAX_OVRHD_RELAX
&&
(
try_redzone0_offset
<<
RUN_BFP
)
>
RUN_MAX_OVRHD
*
try_run_size
&&
try_nregs
<
RUN_MAXREGS
);
assert
(
good_hdr_size
<=
good_redzone0_offset
);
try_nregs
=
(
uint32_t
)(
try_run_size
/
bin_info
->
reg_size
);
}
while
(
perfect_run_size
!=
perfect_nregs
*
bin_info
->
reg_size
);
assert
(
perfect_nregs
<=
RUN_MAXREGS
);
actual_run_size
=
perfect_run_size
;
actual_nregs
=
(
uint32_t
)((
actual_run_size
-
pad_size
)
/
bin_info
->
reg_interval
);
/*
* Redzones can require enough padding that not even a single region can
* fit within the number of pages that would normally be dedicated to a
* run for this size class. Increase the run size until at least one
* region fits.
*/
while
(
actual_nregs
==
0
)
{
assert
(
config_fill
&&
unlikely
(
opt_redzone
));
actual_run_size
+=
PAGE
;
actual_nregs
=
(
uint32_t
)((
actual_run_size
-
pad_size
)
/
bin_info
->
reg_interval
);
}
/*
* Make sure that the run will fit within an arena chunk.
*/
while
(
actual_run_size
>
arena_maxrun
)
{
actual_run_size
-=
PAGE
;
actual_nregs
=
(
uint32_t
)((
actual_run_size
-
pad_size
)
/
bin_info
->
reg_interval
);
}
assert
(
actual_nregs
>
0
);
assert
(
actual_run_size
==
s2u
(
actual_run_size
));
/* Copy final settings. */
bin_info
->
run_size
=
good_run_size
;
bin_info
->
nregs
=
good_nregs
;
bin_info
->
bitmap_offset
=
good_bitmap_offset
;
bin_info
->
ctx0_offset
=
good_ctx0_offset
;
bin_info
->
reg0_offset
=
good_redzone0_offset
+
bin_info
->
redzone_size
;
bin_info
->
run_size
=
actual_run_size
;
bin_info
->
nregs
=
actual_nregs
;
bin_info
->
reg0_offset
=
(
uint32_t
)(
actual_run_size
-
(
actual_nregs
*
bin_info
->
reg_interval
)
-
pad_size
+
bin_info
->
redzone_size
);
assert
(
bin_info
->
reg0_offset
-
bin_info
->
redzone_size
+
(
bin_info
->
nregs
*
bin_info
->
reg_interval
)
+
pad_size
==
bin_info
->
run_size
);
return
(
good_run_size
);
}
static
void
bin_info_init
(
void
)
{
arena_bin_info_t
*
bin_info
;
size_t
prev_run_size
=
PAGE
;
#define
SIZE_CLASS(bin, delta
, size)
\
bin_info = &arena_bin_info[
b
in]; \
#define
BIN_INFO_INIT_bin_yes(index
, size) \
bin_info = &arena_bin_info[in
dex
]; \
bin_info->reg_size = size; \
prev_run_size =
bin_info_run_size_calc(bin_info
, prev_run_size);
\
bin_info_run_size_calc(bin_info
);
\
bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
#define BIN_INFO_INIT_bin_no(index, size)
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
SIZE_CLASSES
#undef SIZE_CLASS
#undef BIN_INFO_INIT_bin_yes
#undef BIN_INFO_INIT_bin_no
#undef SC
}
void
arena_boot
(
void
)
{
size_t
header_size
;
unsigned
i
;
arena_lg_dirty_mult_default_set
(
opt_lg_dirty_mult
);
arena_decay_time_default_set
(
opt_decay_time
);
/*
* Compute the header size such that it is large enough to contain the
* page map. The page map is biased to omit entries for the header
...
...
@@ -2333,44 +3777,87 @@ arena_boot(void)
*/
map_bias
=
0
;
for
(
i
=
0
;
i
<
3
;
i
++
)
{
header_size
=
offsetof
(
arena_chunk_t
,
map
)
+
(
sizeof
(
arena_chunk_map_
t
)
*
(
chunk_npages
-
map_bias
));
map_bias
=
(
header_size
>>
LG_PAGE
)
+
(
(
header_size
&
PAGE_MASK
)
!=
0
)
;
size_t
header_size
=
offsetof
(
arena_chunk_t
,
map
_bits
)
+
(
(
sizeof
(
arena_chunk_map_
bits_t
)
+
sizeof
(
arena_chunk_map_misc_t
)
)
*
(
chunk_npages
-
map_bias
));
map_bias
=
(
header_size
+
PAGE_MASK
)
>>
LG_PAGE
;
}
assert
(
map_bias
>
0
);
arena_maxclass
=
chunksize
-
(
map_bias
<<
LG_PAGE
);
map_misc_offset
=
offsetof
(
arena_chunk_t
,
map_bits
)
+
sizeof
(
arena_chunk_map_bits_t
)
*
(
chunk_npages
-
map_bias
);
arena_maxrun
=
chunksize
-
(
map_bias
<<
LG_PAGE
);
assert
(
arena_maxrun
>
0
);
large_maxclass
=
index2size
(
size2index
(
chunksize
)
-
1
);
if
(
large_maxclass
>
arena_maxrun
)
{
/*
* For small chunk sizes it's possible for there to be fewer
* non-header pages available than are necessary to serve the
* size classes just below chunksize.
*/
large_maxclass
=
arena_maxrun
;
}
assert
(
large_maxclass
>
0
);
nlclasses
=
size2index
(
large_maxclass
)
-
size2index
(
SMALL_MAXCLASS
);
nhclasses
=
NSIZES
-
nlclasses
-
NBINS
;
bin_info_init
();
}
void
arena_prefork
(
arena_t
*
arena
)
arena_prefork0
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
malloc_mutex_prefork
(
tsdn
,
&
arena
->
lock
);
}
void
arena_prefork1
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
malloc_mutex_prefork
(
tsdn
,
&
arena
->
chunks_mtx
);
}
void
arena_prefork2
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
malloc_mutex_prefork
(
tsdn
,
&
arena
->
node_cache_mtx
);
}
void
arena_prefork3
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
unsigned
i
;
malloc_mutex_prefork
(
&
arena
->
lock
);
for
(
i
=
0
;
i
<
NBINS
;
i
++
)
malloc_mutex_prefork
(
&
arena
->
bins
[
i
].
lock
);
malloc_mutex_prefork
(
tsdn
,
&
arena
->
bins
[
i
].
lock
);
malloc_mutex_prefork
(
tsdn
,
&
arena
->
huge_mtx
);
}
void
arena_postfork_parent
(
arena_t
*
arena
)
arena_postfork_parent
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
unsigned
i
;
malloc_mutex_postfork_parent
(
tsdn
,
&
arena
->
huge_mtx
);
for
(
i
=
0
;
i
<
NBINS
;
i
++
)
malloc_mutex_postfork_parent
(
&
arena
->
bins
[
i
].
lock
);
malloc_mutex_postfork_parent
(
&
arena
->
lock
);
malloc_mutex_postfork_parent
(
tsdn
,
&
arena
->
bins
[
i
].
lock
);
malloc_mutex_postfork_parent
(
tsdn
,
&
arena
->
node_cache_mtx
);
malloc_mutex_postfork_parent
(
tsdn
,
&
arena
->
chunks_mtx
);
malloc_mutex_postfork_parent
(
tsdn
,
&
arena
->
lock
);
}
void
arena_postfork_child
(
arena_t
*
arena
)
arena_postfork_child
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
unsigned
i
;
malloc_mutex_postfork_child
(
tsdn
,
&
arena
->
huge_mtx
);
for
(
i
=
0
;
i
<
NBINS
;
i
++
)
malloc_mutex_postfork_child
(
&
arena
->
bins
[
i
].
lock
);
malloc_mutex_postfork_child
(
&
arena
->
lock
);
malloc_mutex_postfork_child
(
tsdn
,
&
arena
->
bins
[
i
].
lock
);
malloc_mutex_postfork_child
(
tsdn
,
&
arena
->
node_cache_mtx
);
malloc_mutex_postfork_child
(
tsdn
,
&
arena
->
chunks_mtx
);
malloc_mutex_postfork_child
(
tsdn
,
&
arena
->
lock
);
}
deps/jemalloc/src/base.c
View file @
1f72ec7d
...
...
@@ -5,135 +5,183 @@
/* Data. */
static
malloc_mutex_t
base_mtx
;
/*
* Current pages that are being used for internal memory allocations. These
* pages are carved up in cacheline-size quanta, so that there is no chance of
* false cache line sharing.
*/
static
void
*
base_pages
;
static
void
*
base_next_addr
;
static
void
*
base_past_addr
;
/* Addr immediately past base_pages. */
static
size_t
base_extent_sn_next
;
static
extent_tree_t
base_avail_szsnad
;
static
extent_node_t
*
base_nodes
;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static
bool
base_pages_alloc
(
size_t
minsize
);
static
size_t
base_allocated
;
static
size_t
base_resident
;
static
size_t
base_mapped
;
/******************************************************************************/
static
bool
base_
pages_alloc
(
size_t
minsize
)
static
extent_node_t
*
base_
node_try_alloc
(
tsdn_t
*
tsdn
)
{
size_t
csize
;
bool
zero
;
extent_node_t
*
node
;
assert
(
minsize
!=
0
);
csize
=
CHUNK_CEILING
(
minsize
);
zero
=
false
;
base_pages
=
chunk_alloc
(
csize
,
chunksize
,
true
,
&
zero
,
chunk_dss_prec_get
());
if
(
base_pages
==
NULL
)
return
(
true
);
base_next_addr
=
base_pages
;
base_past_addr
=
(
void
*
)((
uintptr_t
)
base_pages
+
csize
);
malloc_mutex_assert_owner
(
tsdn
,
&
base_mtx
);
return
(
false
);
if
(
base_nodes
==
NULL
)
return
(
NULL
);
node
=
base_nodes
;
base_nodes
=
*
(
extent_node_t
**
)
node
;
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED
(
node
,
sizeof
(
extent_node_t
));
return
(
node
);
}
void
*
base_
alloc
(
size_t
siz
e
)
static
void
base_
node_dalloc
(
tsdn_t
*
tsdn
,
extent_node_t
*
nod
e
)
{
void
*
ret
;
size_t
csize
;
/* Round size up to nearest multiple of the cacheline size. */
csize
=
CACHELINE_CEILING
(
size
);
malloc_mutex_lock
(
&
base_mtx
);
/* Make sure there's enough space for the allocation. */
if
((
uintptr_t
)
base_next_addr
+
csize
>
(
uintptr_t
)
base_past_addr
)
{
if
(
base_pages_alloc
(
csize
))
{
malloc_mutex_unlock
(
&
base_mtx
);
return
(
NULL
);
}
}
/* Allocate. */
ret
=
base_next_addr
;
base_next_addr
=
(
void
*
)((
uintptr_t
)
base_next_addr
+
csize
);
malloc_mutex_unlock
(
&
base_mtx
);
malloc_mutex_assert_owner
(
tsdn
,
&
base_mtx
);
return
(
ret
);
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED
(
node
,
sizeof
(
extent_node_t
));
*
(
extent_node_t
**
)
node
=
base_nodes
;
base_nodes
=
node
;
}
void
*
base_
calloc
(
size_t
numbe
r
,
size_t
size
)
static
void
base_
extent_node_init
(
extent_node_t
*
node
,
void
*
add
r
,
size_t
size
)
{
void
*
ret
=
base_alloc
(
number
*
size
)
;
size_t
sn
=
atomic_add_z
(
&
base_extent_sn_next
,
1
)
-
1
;
if
(
ret
!=
NULL
)
memset
(
ret
,
0
,
number
*
size
);
extent_node_init
(
node
,
NULL
,
addr
,
size
,
sn
,
true
,
true
);
}
return
(
ret
);
static
extent_node_t
*
base_chunk_alloc
(
tsdn_t
*
tsdn
,
size_t
minsize
)
{
extent_node_t
*
node
;
size_t
csize
,
nsize
;
void
*
addr
;
malloc_mutex_assert_owner
(
tsdn
,
&
base_mtx
);
assert
(
minsize
!=
0
);
node
=
base_node_try_alloc
(
tsdn
);
/* Allocate enough space to also carve a node out if necessary. */
nsize
=
(
node
==
NULL
)
?
CACHELINE_CEILING
(
sizeof
(
extent_node_t
))
:
0
;
csize
=
CHUNK_CEILING
(
minsize
+
nsize
);
addr
=
chunk_alloc_base
(
csize
);
if
(
addr
==
NULL
)
{
if
(
node
!=
NULL
)
base_node_dalloc
(
tsdn
,
node
);
return
(
NULL
);
}
base_mapped
+=
csize
;
if
(
node
==
NULL
)
{
node
=
(
extent_node_t
*
)
addr
;
addr
=
(
void
*
)((
uintptr_t
)
addr
+
nsize
);
csize
-=
nsize
;
if
(
config_stats
)
{
base_allocated
+=
nsize
;
base_resident
+=
PAGE_CEILING
(
nsize
);
}
}
base_extent_node_init
(
node
,
addr
,
csize
);
return
(
node
);
}
extent_node_t
*
base_node_alloc
(
void
)
/*
* base_alloc() guarantees demand-zeroed memory, in order to make multi-page
* sparse data structures such as radix tree nodes efficient with respect to
* physical memory usage.
*/
void
*
base_alloc
(
tsdn_t
*
tsdn
,
size_t
size
)
{
extent_node_t
*
ret
;
void
*
ret
;
size_t
csize
,
usize
;
extent_node_t
*
node
;
extent_node_t
key
;
/*
* Round size up to nearest multiple of the cacheline size, so that
* there is no chance of false cache line sharing.
*/
csize
=
CACHELINE_CEILING
(
size
);
malloc_mutex_lock
(
&
base_mtx
);
if
(
base_nodes
!=
NULL
)
{
ret
=
base_nodes
;
base_nodes
=
*
(
extent_node_t
**
)
ret
;
malloc_mutex_unlock
(
&
base_mtx
);
usize
=
s2u
(
csize
);
extent_node_init
(
&
key
,
NULL
,
NULL
,
usize
,
0
,
false
,
false
);
malloc_mutex_lock
(
tsdn
,
&
base_mtx
);
node
=
extent_tree_szsnad_nsearch
(
&
base_avail_szsnad
,
&
key
);
if
(
node
!=
NULL
)
{
/* Use existing space. */
extent_tree_szsnad_remove
(
&
base_avail_szsnad
,
node
);
}
else
{
malloc_mutex_unlock
(
&
base_mtx
);
ret
=
(
extent_node_t
*
)
base_alloc
(
sizeof
(
extent_node_t
));
/* Try to allocate more space. */
node
=
base_chunk_alloc
(
tsdn
,
csize
);
}
if
(
node
==
NULL
)
{
ret
=
NULL
;
goto
label_return
;
}
ret
=
extent_node_addr_get
(
node
);
if
(
extent_node_size_get
(
node
)
>
csize
)
{
extent_node_addr_set
(
node
,
(
void
*
)((
uintptr_t
)
ret
+
csize
));
extent_node_size_set
(
node
,
extent_node_size_get
(
node
)
-
csize
);
extent_tree_szsnad_insert
(
&
base_avail_szsnad
,
node
);
}
else
base_node_dalloc
(
tsdn
,
node
);
if
(
config_stats
)
{
base_allocated
+=
csize
;
/*
* Add one PAGE to base_resident for every page boundary that is
* crossed by the new allocation.
*/
base_resident
+=
PAGE_CEILING
((
uintptr_t
)
ret
+
csize
)
-
PAGE_CEILING
((
uintptr_t
)
ret
);
}
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED
(
ret
,
csize
);
label_return:
malloc_mutex_unlock
(
tsdn
,
&
base_mtx
);
return
(
ret
);
}
void
base_node_dealloc
(
extent_node_t
*
node
)
base_stats_get
(
tsdn_t
*
tsdn
,
size_t
*
allocated
,
size_t
*
resident
,
size_t
*
mapped
)
{
malloc_mutex_lock
(
&
base_mtx
);
*
(
extent_node_t
**
)
node
=
base_nodes
;
base_nodes
=
node
;
malloc_mutex_unlock
(
&
base_mtx
);
malloc_mutex_lock
(
tsdn
,
&
base_mtx
);
assert
(
base_allocated
<=
base_resident
);
assert
(
base_resident
<=
base_mapped
);
*
allocated
=
base_allocated
;
*
resident
=
base_resident
;
*
mapped
=
base_mapped
;
malloc_mutex_unlock
(
tsdn
,
&
base_mtx
);
}
bool
base_boot
(
void
)
{
base_nodes
=
NULL
;
if
(
malloc_mutex_init
(
&
base_mtx
))
if
(
malloc_mutex_init
(
&
base_mtx
,
"base"
,
WITNESS_RANK_BASE
))
return
(
true
);
base_extent_sn_next
=
0
;
extent_tree_szsnad_new
(
&
base_avail_szsnad
);
base_nodes
=
NULL
;
return
(
false
);
}
void
base_prefork
(
void
)
base_prefork
(
tsdn_t
*
tsdn
)
{
malloc_mutex_prefork
(
&
base_mtx
);
malloc_mutex_prefork
(
tsdn
,
&
base_mtx
);
}
void
base_postfork_parent
(
void
)
base_postfork_parent
(
tsdn_t
*
tsdn
)
{
malloc_mutex_postfork_parent
(
&
base_mtx
);
malloc_mutex_postfork_parent
(
tsdn
,
&
base_mtx
);
}
void
base_postfork_child
(
void
)
base_postfork_child
(
tsdn_t
*
tsdn
)
{
malloc_mutex_postfork_child
(
&
base_mtx
);
malloc_mutex_postfork_child
(
tsdn
,
&
base_mtx
);
}
deps/jemalloc/src/bitmap.c
View file @
1f72ec7d
#define
JEMALLOC_BITMAP_C_
#define
JEMALLOC_BITMAP_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static
size_t
bits2groups
(
size_t
nbits
);
/******************************************************************************/
static
size_t
bits2groups
(
size_t
nbits
)
{
return
((
nbits
>>
LG_BITMAP_GROUP_NBITS
)
+
!!
(
nbits
&
BITMAP_GROUP_NBITS_MASK
));
}
#ifdef USE_TREE
void
bitmap_info_init
(
bitmap_info_t
*
binfo
,
size_t
nbits
)
...
...
@@ -31,33 +20,25 @@ bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
* that requires only one group.
*/
binfo
->
levels
[
0
].
group_offset
=
0
;
group_count
=
bits2groups
(
nbits
);
group_count
=
BITMAP_BITS2GROUPS
(
nbits
);
for
(
i
=
1
;
group_count
>
1
;
i
++
)
{
assert
(
i
<
BITMAP_MAX_LEVELS
);
binfo
->
levels
[
i
].
group_offset
=
binfo
->
levels
[
i
-
1
].
group_offset
+
group_count
;
group_count
=
bits2groups
(
group_count
);
group_count
=
BITMAP_BITS2GROUPS
(
group_count
);
}
binfo
->
levels
[
i
].
group_offset
=
binfo
->
levels
[
i
-
1
].
group_offset
+
group_count
;
assert
(
binfo
->
levels
[
i
].
group_offset
<=
BITMAP_GROUPS_MAX
);
binfo
->
nlevels
=
i
;
binfo
->
nbits
=
nbits
;
}
size_t
static
size_t
bitmap_info_ngroups
(
const
bitmap_info_t
*
binfo
)
{
return
(
binfo
->
levels
[
binfo
->
nlevels
].
group_offset
<<
LG_SIZEOF_BITMAP
);
}
size_t
bitmap_size
(
size_t
nbits
)
{
bitmap_info_t
binfo
;
bitmap_info_init
(
&
binfo
,
nbits
);
return
(
bitmap_info_ngroups
(
&
binfo
));
return
(
binfo
->
levels
[
binfo
->
nlevels
].
group_offset
);
}
void
...
...
@@ -73,8 +54,7 @@ bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
* correspond to the first logical bit in the group, so extra bits
* are the most significant bits of the last group.
*/
memset
(
bitmap
,
0xffU
,
binfo
->
levels
[
binfo
->
nlevels
].
group_offset
<<
LG_SIZEOF_BITMAP
);
memset
(
bitmap
,
0xffU
,
bitmap_size
(
binfo
));
extra
=
(
BITMAP_GROUP_NBITS
-
(
binfo
->
nbits
&
BITMAP_GROUP_NBITS_MASK
))
&
BITMAP_GROUP_NBITS_MASK
;
if
(
extra
!=
0
)
...
...
@@ -88,3 +68,44 @@ bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
bitmap
[
binfo
->
levels
[
i
+
1
].
group_offset
-
1
]
>>=
extra
;
}
}
#else
/* USE_TREE */
void
bitmap_info_init
(
bitmap_info_t
*
binfo
,
size_t
nbits
)
{
assert
(
nbits
>
0
);
assert
(
nbits
<=
(
ZU
(
1
)
<<
LG_BITMAP_MAXBITS
));
binfo
->
ngroups
=
BITMAP_BITS2GROUPS
(
nbits
);
binfo
->
nbits
=
nbits
;
}
static
size_t
bitmap_info_ngroups
(
const
bitmap_info_t
*
binfo
)
{
return
(
binfo
->
ngroups
);
}
void
bitmap_init
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
)
{
size_t
extra
;
memset
(
bitmap
,
0xffU
,
bitmap_size
(
binfo
));
extra
=
(
BITMAP_GROUP_NBITS
-
(
binfo
->
nbits
&
BITMAP_GROUP_NBITS_MASK
))
&
BITMAP_GROUP_NBITS_MASK
;
if
(
extra
!=
0
)
bitmap
[
binfo
->
ngroups
-
1
]
>>=
extra
;
}
#endif
/* USE_TREE */
size_t
bitmap_size
(
const
bitmap_info_t
*
binfo
)
{
return
(
bitmap_info_ngroups
(
binfo
)
<<
LG_SIZEOF_BITMAP
);
}
deps/jemalloc/src/chunk.c
View file @
1f72ec7d
...
...
@@ -5,139 +5,337 @@
/* Data. */
const
char
*
opt_dss
=
DSS_DEFAULT
;
size_t
opt_lg_chunk
=
LG_CHUNK_DEFAULT
;
size_t
opt_lg_chunk
=
0
;
malloc_mutex_t
chunks_mtx
;
chunk_stats_t
stats_chunks
;
/* Used exclusively for gdump triggering. */
static
size_t
curchunks
;
static
size_t
highchunks
;
/*
* Trees of chunks that were previously allocated (trees differ only in node
* ordering). These are used when allocating chunks, in an attempt to re-use
* address space. Depending on function, different tree orderings are needed,
* which is why there are two trees with the same contents.
*/
static
extent_tree_t
chunks_szad_mmap
;
static
extent_tree_t
chunks_ad_mmap
;
static
extent_tree_t
chunks_szad_dss
;
static
extent_tree_t
chunks_ad_dss
;
rtree_t
*
chunks_rtree
;
rtree_t
chunks_rtree
;
/* Various chunk-related settings. */
size_t
chunksize
;
size_t
chunksize_mask
;
/* (chunksize - 1). */
size_t
chunk_npages
;
size_t
map_bias
;
size_t
arena_maxclass
;
/* Max size class for arenas. */
static
void
*
chunk_alloc_default
(
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
*
commit
,
unsigned
arena_ind
);
static
bool
chunk_dalloc_default
(
void
*
chunk
,
size_t
size
,
bool
committed
,
unsigned
arena_ind
);
static
bool
chunk_commit_default
(
void
*
chunk
,
size_t
size
,
size_t
offset
,
size_t
length
,
unsigned
arena_ind
);
static
bool
chunk_decommit_default
(
void
*
chunk
,
size_t
size
,
size_t
offset
,
size_t
length
,
unsigned
arena_ind
);
static
bool
chunk_purge_default
(
void
*
chunk
,
size_t
size
,
size_t
offset
,
size_t
length
,
unsigned
arena_ind
);
static
bool
chunk_split_default
(
void
*
chunk
,
size_t
size
,
size_t
size_a
,
size_t
size_b
,
bool
committed
,
unsigned
arena_ind
);
static
bool
chunk_merge_default
(
void
*
chunk_a
,
size_t
size_a
,
void
*
chunk_b
,
size_t
size_b
,
bool
committed
,
unsigned
arena_ind
);
const
chunk_hooks_t
chunk_hooks_default
=
{
chunk_alloc_default
,
chunk_dalloc_default
,
chunk_commit_default
,
chunk_decommit_default
,
chunk_purge_default
,
chunk_split_default
,
chunk_merge_default
};
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
/*
* Function prototypes for static functions that are referenced prior to
* definition.
*/
static
void
*
chunk_recycle
(
extent_tree_t
*
chunks_szad
,
extent_tree_t
*
chunks_ad
,
size_t
size
,
size_t
alignment
,
bool
base
,
bool
*
zero
);
static
void
chunk_record
(
extent_tree_t
*
chunks_szad
,
extent_tree_t
*
chunks_ad
,
void
*
chunk
,
size_t
size
);
static
void
chunk_record
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
extent_tree_t
*
chunks_szsnad
,
extent_tree_t
*
chunks_ad
,
bool
cache
,
void
*
chunk
,
size_t
size
,
size_t
sn
,
bool
zeroed
,
bool
committed
);
/******************************************************************************/
static
chunk_hooks_t
chunk_hooks_get_locked
(
arena_t
*
arena
)
{
return
(
arena
->
chunk_hooks
);
}
chunk_hooks_t
chunk_hooks_get
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
chunk_hooks_t
chunk_hooks
;
malloc_mutex_lock
(
tsdn
,
&
arena
->
chunks_mtx
);
chunk_hooks
=
chunk_hooks_get_locked
(
arena
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
chunks_mtx
);
return
(
chunk_hooks
);
}
chunk_hooks_t
chunk_hooks_set
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
const
chunk_hooks_t
*
chunk_hooks
)
{
chunk_hooks_t
old_chunk_hooks
;
malloc_mutex_lock
(
tsdn
,
&
arena
->
chunks_mtx
);
old_chunk_hooks
=
arena
->
chunk_hooks
;
/*
* Copy each field atomically so that it is impossible for readers to
* see partially updated pointers. There are places where readers only
* need one hook function pointer (therefore no need to copy the
* entirety of arena->chunk_hooks), and stale reads do not affect
* correctness, so they perform unlocked reads.
*/
#define ATOMIC_COPY_HOOK(n) do { \
union { \
chunk_##n##_t **n; \
void **v; \
} u; \
u.n = &arena->chunk_hooks.n; \
atomic_write_p(u.v, chunk_hooks->n); \
} while (0)
ATOMIC_COPY_HOOK
(
alloc
);
ATOMIC_COPY_HOOK
(
dalloc
);
ATOMIC_COPY_HOOK
(
commit
);
ATOMIC_COPY_HOOK
(
decommit
);
ATOMIC_COPY_HOOK
(
purge
);
ATOMIC_COPY_HOOK
(
split
);
ATOMIC_COPY_HOOK
(
merge
);
#undef ATOMIC_COPY_HOOK
malloc_mutex_unlock
(
tsdn
,
&
arena
->
chunks_mtx
);
return
(
old_chunk_hooks
);
}
static
void
chunk_hooks_assure_initialized_impl
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
bool
locked
)
{
static
const
chunk_hooks_t
uninitialized_hooks
=
CHUNK_HOOKS_INITIALIZER
;
if
(
memcmp
(
chunk_hooks
,
&
uninitialized_hooks
,
sizeof
(
chunk_hooks_t
))
==
0
)
{
*
chunk_hooks
=
locked
?
chunk_hooks_get_locked
(
arena
)
:
chunk_hooks_get
(
tsdn
,
arena
);
}
}
static
void
chunk_hooks_assure_initialized_locked
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
)
{
chunk_hooks_assure_initialized_impl
(
tsdn
,
arena
,
chunk_hooks
,
true
);
}
static
void
chunk_hooks_assure_initialized
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
)
{
chunk_hooks_assure_initialized_impl
(
tsdn
,
arena
,
chunk_hooks
,
false
);
}
bool
chunk_register
(
tsdn_t
*
tsdn
,
const
void
*
chunk
,
const
extent_node_t
*
node
)
{
assert
(
extent_node_addr_get
(
node
)
==
chunk
);
if
(
rtree_set
(
&
chunks_rtree
,
(
uintptr_t
)
chunk
,
node
))
return
(
true
);
if
(
config_prof
&&
opt_prof
)
{
size_t
size
=
extent_node_size_get
(
node
);
size_t
nadd
=
(
size
==
0
)
?
1
:
size
/
chunksize
;
size_t
cur
=
atomic_add_z
(
&
curchunks
,
nadd
);
size_t
high
=
atomic_read_z
(
&
highchunks
);
while
(
cur
>
high
&&
atomic_cas_z
(
&
highchunks
,
high
,
cur
))
{
/*
* Don't refresh cur, because it may have decreased
* since this thread lost the highchunks update race.
*/
high
=
atomic_read_z
(
&
highchunks
);
}
if
(
cur
>
high
&&
prof_gdump_get_unlocked
())
prof_gdump
(
tsdn
);
}
return
(
false
);
}
void
chunk_deregister
(
const
void
*
chunk
,
const
extent_node_t
*
node
)
{
bool
err
;
err
=
rtree_set
(
&
chunks_rtree
,
(
uintptr_t
)
chunk
,
NULL
);
assert
(
!
err
);
if
(
config_prof
&&
opt_prof
)
{
size_t
size
=
extent_node_size_get
(
node
);
size_t
nsub
=
(
size
==
0
)
?
1
:
size
/
chunksize
;
assert
(
atomic_read_z
(
&
curchunks
)
>=
nsub
);
atomic_sub_z
(
&
curchunks
,
nsub
);
}
}
/*
* Do first-best-fit chunk selection, i.e. select the oldest/lowest chunk that
* best fits.
*/
static
extent_node_t
*
chunk_first_best_fit
(
arena_t
*
arena
,
extent_tree_t
*
chunks_szsnad
,
size_t
size
)
{
extent_node_t
key
;
assert
(
size
==
CHUNK_CEILING
(
size
));
extent_node_init
(
&
key
,
arena
,
NULL
,
size
,
0
,
false
,
false
);
return
(
extent_tree_szsnad_nsearch
(
chunks_szsnad
,
&
key
));
}
static
void
*
chunk_recycle
(
extent_tree_t
*
chunks_szad
,
extent_tree_t
*
chunks_ad
,
size_t
size
,
size_t
alignment
,
bool
base
,
bool
*
zero
)
chunk_recycle
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
extent_tree_t
*
chunks_szsnad
,
extent_tree_t
*
chunks_ad
,
bool
cache
,
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
size_t
*
sn
,
bool
*
zero
,
bool
*
commit
,
bool
dalloc_node
)
{
void
*
ret
;
extent_node_t
*
node
;
extent_node_t
key
;
size_t
alloc_size
,
leadsize
,
trailsize
;
bool
zeroed
;
bool
zeroed
,
committed
;
if
(
base
)
{
/*
* This function may need to call base_node_{,de}alloc(), but
* the current chunk allocation request is on behalf of the
* base allocator. Avoid deadlock (and if that weren't an
* issue, potential for infinite recursion) by returning NULL.
*/
return
(
NULL
);
}
assert
(
CHUNK_CEILING
(
size
)
==
size
);
assert
(
alignment
>
0
);
assert
(
new_addr
==
NULL
||
alignment
==
chunksize
);
assert
(
CHUNK_ADDR2BASE
(
new_addr
)
==
new_addr
);
/*
* Cached chunks use the node linkage embedded in their headers, in
* which case dalloc_node is true, and new_addr is non-NULL because
* we're operating on a specific chunk.
*/
assert
(
dalloc_node
||
new_addr
!=
NULL
);
alloc_size
=
size
+
alignment
-
chunksize
;
alloc_size
=
size
+
CHUNK_CEILING
(
alignment
)
-
chunksize
;
/* Beware size_t wrap-around. */
if
(
alloc_size
<
size
)
return
(
NULL
);
key
.
addr
=
NULL
;
key
.
size
=
alloc_size
;
malloc_mutex_lock
(
&
chunks_mtx
);
node
=
extent_tree_szad_nsearch
(
chunks_szad
,
&
key
);
if
(
node
==
NULL
)
{
malloc_mutex_unlock
(
&
chunks_mtx
);
malloc_mutex_lock
(
tsdn
,
&
arena
->
chunks_mtx
);
chunk_hooks_assure_initialized_locked
(
tsdn
,
arena
,
chunk_hooks
);
if
(
new_addr
!=
NULL
)
{
extent_node_t
key
;
extent_node_init
(
&
key
,
arena
,
new_addr
,
alloc_size
,
0
,
false
,
false
);
node
=
extent_tree_ad_search
(
chunks_ad
,
&
key
);
}
else
{
node
=
chunk_first_best_fit
(
arena
,
chunks_szsnad
,
alloc_size
);
}
if
(
node
==
NULL
||
(
new_addr
!=
NULL
&&
extent_node_size_get
(
node
)
<
size
))
{
malloc_mutex_unlock
(
tsdn
,
&
arena
->
chunks_mtx
);
return
(
NULL
);
}
leadsize
=
ALIGNMENT_CEILING
((
uintptr_t
)
extent_node_addr_get
(
node
),
alignment
)
-
(
uintptr_t
)
extent_node_addr_get
(
node
);
assert
(
new_addr
==
NULL
||
leadsize
==
0
);
assert
(
extent_node_size_get
(
node
)
>=
leadsize
+
size
);
trailsize
=
extent_node_size_get
(
node
)
-
leadsize
-
size
;
ret
=
(
void
*
)((
uintptr_t
)
extent_node_addr_get
(
node
)
+
leadsize
);
*
sn
=
extent_node_sn_get
(
node
);
zeroed
=
extent_node_zeroed_get
(
node
);
if
(
zeroed
)
*
zero
=
true
;
committed
=
extent_node_committed_get
(
node
);
if
(
committed
)
*
commit
=
true
;
/* Split the lead. */
if
(
leadsize
!=
0
&&
chunk_hooks
->
split
(
extent_node_addr_get
(
node
),
extent_node_size_get
(
node
),
leadsize
,
size
,
false
,
arena
->
ind
))
{
malloc_mutex_unlock
(
tsdn
,
&
arena
->
chunks_mtx
);
return
(
NULL
);
}
leadsize
=
ALIGNMENT_CEILING
((
uintptr_t
)
node
->
addr
,
alignment
)
-
(
uintptr_t
)
node
->
addr
;
assert
(
node
->
size
>=
leadsize
+
size
);
trailsize
=
node
->
size
-
leadsize
-
size
;
ret
=
(
void
*
)((
uintptr_t
)
node
->
addr
+
leadsize
);
/* Remove node from the tree. */
extent_tree_szad_remove
(
chunks_szad
,
node
);
extent_tree_sz
sn
ad_remove
(
chunks_sz
sn
ad
,
node
);
extent_tree_ad_remove
(
chunks_ad
,
node
);
arena_chunk_cache_maybe_remove
(
arena
,
node
,
cache
);
if
(
leadsize
!=
0
)
{
/* Insert the leading space as a smaller chunk. */
node
->
size
=
leadsize
;
extent_tree_szad_insert
(
chunks_szad
,
node
);
extent_
node
_
size
_set
(
node
,
leadsize
)
;
extent_tree_sz
sn
ad_insert
(
chunks_sz
sn
ad
,
node
);
extent_tree_ad_insert
(
chunks_ad
,
node
);
arena_chunk_cache_maybe_insert
(
arena
,
node
,
cache
);
node
=
NULL
;
}
if
(
trailsize
!=
0
)
{
/* Split the trail. */
if
(
chunk_hooks
->
split
(
ret
,
size
+
trailsize
,
size
,
trailsize
,
false
,
arena
->
ind
))
{
if
(
dalloc_node
&&
node
!=
NULL
)
arena_node_dalloc
(
tsdn
,
arena
,
node
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
chunks_mtx
);
chunk_record
(
tsdn
,
arena
,
chunk_hooks
,
chunks_szsnad
,
chunks_ad
,
cache
,
ret
,
size
+
trailsize
,
*
sn
,
zeroed
,
committed
);
return
(
NULL
);
}
/* Insert the trailing space as a smaller chunk. */
if
(
node
==
NULL
)
{
/*
* An additional node is required, but
* base_node_alloc() can cause a new base chunk to be
* allocated. Drop chunks_mtx in order to avoid
* deadlock, and if node allocation fails, deallocate
* the result before returning an error.
*/
malloc_mutex_unlock
(
&
chunks_mtx
);
node
=
base_node_alloc
();
node
=
arena_node_alloc
(
tsdn
,
arena
);
if
(
node
==
NULL
)
{
chunk_dealloc
(
ret
,
size
,
true
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
chunks_mtx
);
chunk_record
(
tsdn
,
arena
,
chunk_hooks
,
chunks_szsnad
,
chunks_ad
,
cache
,
ret
,
size
+
trailsize
,
*
sn
,
zeroed
,
committed
);
return
(
NULL
);
}
malloc_mutex_lock
(
&
chunks_mtx
);
}
node
->
addr
=
(
void
*
)((
uintptr_t
)(
ret
)
+
size
)
;
node
->
size
=
trailsize
;
extent_tree_szad_insert
(
chunks_szad
,
node
);
extent_node_init
(
node
,
arena
,
(
void
*
)((
uintptr_t
)(
ret
)
+
size
)
,
trailsize
,
*
sn
,
zeroed
,
committed
)
;
extent_tree_sz
sn
ad_insert
(
chunks_sz
sn
ad
,
node
);
extent_tree_ad_insert
(
chunks_ad
,
node
);
arena_chunk_cache_maybe_insert
(
arena
,
node
,
cache
);
node
=
NULL
;
}
malloc_mutex_unlock
(
&
chunks_mtx
);
zeroed
=
false
;
if
(
node
!=
NULL
)
{
if
(
node
->
zeroed
)
{
zeroed
=
true
;
*
zero
=
true
;
}
base_node_dealloc
(
node
);
if
(
!
committed
&&
chunk_hooks
->
commit
(
ret
,
size
,
0
,
size
,
arena
->
ind
))
{
malloc_mutex_unlock
(
tsdn
,
&
arena
->
chunks_mtx
);
chunk_record
(
tsdn
,
arena
,
chunk_hooks
,
chunks_szsnad
,
chunks_ad
,
cache
,
ret
,
size
,
*
sn
,
zeroed
,
committed
);
return
(
NULL
);
}
if
(
zeroed
==
false
&&
*
zero
)
{
VALGRIND_MAKE_MEM_UNDEFINED
(
ret
,
size
);
memset
(
ret
,
0
,
size
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
chunks_mtx
);
assert
(
dalloc_node
||
node
!=
NULL
);
if
(
dalloc_node
&&
node
!=
NULL
)
arena_node_dalloc
(
tsdn
,
arena
,
node
);
if
(
*
zero
)
{
if
(
!
zeroed
)
memset
(
ret
,
0
,
size
);
else
if
(
config_debug
)
{
size_t
i
;
size_t
*
p
=
(
size_t
*
)(
uintptr_t
)
ret
;
for
(
i
=
0
;
i
<
size
/
sizeof
(
size_t
);
i
++
)
assert
(
p
[
i
]
==
0
);
}
if
(
config_valgrind
)
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED
(
ret
,
size
);
}
return
(
ret
);
}
/*
* If the caller specifies (*zero
== false
), it is still possible to receive
*
zeroed
memory, in which case *zero is toggled to true. arena_chunk_alloc()
*
takes
advantage of this to avoid demanding zeroed chunks, but taking
*
advantage of
them if they are returned.
* If the caller specifies (
!
*zero), it is still possible to receive
zeroed
* memory, in which case *zero is toggled to true. arena_chunk_alloc()
takes
* advantage of this to avoid demanding zeroed chunks, but taking
advantage of
* them if they are returned.
*/
void
*
chunk_alloc
(
size_t
size
,
size_t
alignm
en
t
,
bool
base
,
bool
*
zero
,
dss_prec_t
dss_prec
)
static
void
*
chunk_alloc
_core
(
tsdn_t
*
tsdn
,
arena_t
*
ar
en
a
,
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
*
commit
,
dss_prec_t
dss_prec
)
{
void
*
ret
;
...
...
@@ -147,235 +345,451 @@ chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
assert
((
alignment
&
chunksize_mask
)
==
0
);
/* "primary" dss. */
if
(
config_dss
&&
dss_prec
==
dss_prec_primary
)
{
if
((
ret
=
chunk_recycle
(
&
chunks_szad_dss
,
&
chunks_ad_dss
,
size
,
alignment
,
base
,
zero
))
!=
NULL
)
goto
label_return
;
if
((
ret
=
chunk_alloc_dss
(
size
,
alignment
,
zero
))
!=
NULL
)
goto
label_return
;
}
if
(
have_dss
&&
dss_prec
==
dss_prec_primary
&&
(
ret
=
chunk_alloc_dss
(
tsdn
,
arena
,
new_addr
,
size
,
alignment
,
zero
,
commit
))
!=
NULL
)
return
(
ret
);
/* mmap. */
if
((
ret
=
chunk_recycle
(
&
chunks_szad_mmap
,
&
chunks_ad_mmap
,
size
,
alignment
,
base
,
zero
))
!=
NULL
)
goto
label_return
;
if
((
ret
=
chunk_alloc_mmap
(
size
,
alignment
,
zero
))
!=
NULL
)
goto
label_return
;
if
((
ret
=
chunk_alloc_mmap
(
new_addr
,
size
,
alignment
,
zero
,
commit
))
!=
NULL
)
return
(
ret
);
/* "secondary" dss. */
if
(
config_dss
&&
dss_prec
==
dss_prec_secondary
)
{
if
((
ret
=
chunk_recycle
(
&
chunks_szad_dss
,
&
chunks_ad_dss
,
size
,
alignment
,
base
,
zero
))
!=
NULL
)
goto
label_return
;
if
((
ret
=
chunk_alloc_dss
(
size
,
alignment
,
zero
))
!=
NULL
)
goto
label_return
;
}
if
(
have_dss
&&
dss_prec
==
dss_prec_secondary
&&
(
ret
=
chunk_alloc_dss
(
tsdn
,
arena
,
new_addr
,
size
,
alignment
,
zero
,
commit
))
!=
NULL
)
return
(
ret
);
/* All strategies for allocation failed. */
ret
=
NULL
;
label_return:
if
(
config_ivsalloc
&&
base
==
false
&&
ret
!=
NULL
)
{
if
(
rtree_set
(
chunks_rtree
,
(
uintptr_t
)
ret
,
ret
))
{
chunk_dealloc
(
ret
,
size
,
true
);
return
(
NULL
);
return
(
NULL
);
}
void
*
chunk_alloc_base
(
size_t
size
)
{
void
*
ret
;
bool
zero
,
commit
;
/*
* Directly call chunk_alloc_mmap() rather than chunk_alloc_core()
* because it's critical that chunk_alloc_base() return untouched
* demand-zeroed virtual memory.
*/
zero
=
true
;
commit
=
true
;
ret
=
chunk_alloc_mmap
(
NULL
,
size
,
chunksize
,
&
zero
,
&
commit
);
if
(
ret
==
NULL
)
return
(
NULL
);
if
(
config_valgrind
)
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED
(
ret
,
size
);
return
(
ret
);
}
void
*
chunk_alloc_cache
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
size_t
*
sn
,
bool
*
zero
,
bool
*
commit
,
bool
dalloc_node
)
{
void
*
ret
;
assert
(
size
!=
0
);
assert
((
size
&
chunksize_mask
)
==
0
);
assert
(
alignment
!=
0
);
assert
((
alignment
&
chunksize_mask
)
==
0
);
ret
=
chunk_recycle
(
tsdn
,
arena
,
chunk_hooks
,
&
arena
->
chunks_szsnad_cached
,
&
arena
->
chunks_ad_cached
,
true
,
new_addr
,
size
,
alignment
,
sn
,
zero
,
commit
,
dalloc_node
);
if
(
ret
==
NULL
)
return
(
NULL
);
if
(
config_valgrind
)
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED
(
ret
,
size
);
return
(
ret
);
}
static
arena_t
*
chunk_arena_get
(
tsdn_t
*
tsdn
,
unsigned
arena_ind
)
{
arena_t
*
arena
;
arena
=
arena_get
(
tsdn
,
arena_ind
,
false
);
/*
* The arena we're allocating on behalf of must have been initialized
* already.
*/
assert
(
arena
!=
NULL
);
return
(
arena
);
}
static
void
*
chunk_alloc_default_impl
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
*
commit
)
{
void
*
ret
;
ret
=
chunk_alloc_core
(
tsdn
,
arena
,
new_addr
,
size
,
alignment
,
zero
,
commit
,
arena
->
dss_prec
);
if
(
ret
==
NULL
)
return
(
NULL
);
if
(
config_valgrind
)
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED
(
ret
,
size
);
return
(
ret
);
}
static
void
*
chunk_alloc_default
(
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
*
commit
,
unsigned
arena_ind
)
{
tsdn_t
*
tsdn
;
arena_t
*
arena
;
tsdn
=
tsdn_fetch
();
arena
=
chunk_arena_get
(
tsdn
,
arena_ind
);
return
(
chunk_alloc_default_impl
(
tsdn
,
arena
,
new_addr
,
size
,
alignment
,
zero
,
commit
));
}
static
void
*
chunk_alloc_retained
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
size_t
*
sn
,
bool
*
zero
,
bool
*
commit
)
{
void
*
ret
;
assert
(
size
!=
0
);
assert
((
size
&
chunksize_mask
)
==
0
);
assert
(
alignment
!=
0
);
assert
((
alignment
&
chunksize_mask
)
==
0
);
ret
=
chunk_recycle
(
tsdn
,
arena
,
chunk_hooks
,
&
arena
->
chunks_szsnad_retained
,
&
arena
->
chunks_ad_retained
,
false
,
new_addr
,
size
,
alignment
,
sn
,
zero
,
commit
,
true
);
if
(
config_stats
&&
ret
!=
NULL
)
arena
->
stats
.
retained
-=
size
;
return
(
ret
);
}
void
*
chunk_alloc_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
size_t
*
sn
,
bool
*
zero
,
bool
*
commit
)
{
void
*
ret
;
chunk_hooks_assure_initialized
(
tsdn
,
arena
,
chunk_hooks
);
ret
=
chunk_alloc_retained
(
tsdn
,
arena
,
chunk_hooks
,
new_addr
,
size
,
alignment
,
sn
,
zero
,
commit
);
if
(
ret
==
NULL
)
{
if
(
chunk_hooks
->
alloc
==
chunk_alloc_default
)
{
/* Call directly to propagate tsdn. */
ret
=
chunk_alloc_default_impl
(
tsdn
,
arena
,
new_addr
,
size
,
alignment
,
zero
,
commit
);
}
else
{
ret
=
chunk_hooks
->
alloc
(
new_addr
,
size
,
alignment
,
zero
,
commit
,
arena
->
ind
);
}
}
if
((
config_stats
||
config_prof
)
&&
ret
!=
NULL
)
{
bool
gdump
;
malloc_mutex_lock
(
&
chunks_mtx
);
if
(
config_stats
)
stats_chunks
.
nchunks
+=
(
size
/
chunksize
);
stats_chunks
.
curchunks
+=
(
size
/
chunksize
);
if
(
stats_chunks
.
curchunks
>
stats_chunks
.
highchunks
)
{
stats_chunks
.
highchunks
=
stats_chunks
.
curchunks
;
if
(
config_prof
)
gdump
=
true
;
}
else
if
(
config_prof
)
gdump
=
false
;
malloc_mutex_unlock
(
&
chunks_mtx
);
if
(
config_prof
&&
opt_prof
&&
opt_prof_gdump
&&
gdump
)
prof_gdump
();
}
if
(
config_debug
&&
*
zero
&&
ret
!=
NULL
)
{
size_t
i
;
size_t
*
p
=
(
size_t
*
)(
uintptr_t
)
ret
;
VALGRIND_MAKE_MEM_DEFINED
(
ret
,
size
);
for
(
i
=
0
;
i
<
size
/
sizeof
(
size_t
);
i
++
)
assert
(
p
[
i
]
==
0
);
if
(
ret
==
NULL
)
return
(
NULL
);
*
sn
=
arena_extent_sn_next
(
arena
);
if
(
config_valgrind
&&
chunk_hooks
->
alloc
!=
chunk_alloc_default
)
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED
(
ret
,
chunksize
);
}
assert
(
CHUNK_ADDR2BASE
(
ret
)
==
ret
);
return
(
ret
);
}
static
void
chunk_record
(
extent_tree_t
*
chunks_szad
,
extent_tree_t
*
chunks_ad
,
void
*
chunk
,
size_t
size
)
chunk_record
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
extent_tree_t
*
chunks_szsnad
,
extent_tree_t
*
chunks_ad
,
bool
cache
,
void
*
chunk
,
size_t
size
,
size_t
sn
,
bool
zeroed
,
bool
committed
)
{
bool
unzeroed
;
extent_node_t
*
xnode
,
*
node
,
*
prev
,
key
;
unzeroed
=
pages_purge
(
chunk
,
size
);
extent_node_t
*
node
,
*
prev
;
extent_node_t
key
;
/*
* Allocate a node before acquiring chunks_mtx even though it might not
* be needed, because base_node_alloc() may cause a new base chunk to
* be allocated, which could cause deadlock if chunks_mtx were already
* held.
*/
xnode
=
base_node_alloc
();
assert
(
!
cache
||
!
zeroed
);
unzeroed
=
cache
||
!
zeroed
;
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS
(
chunk
,
size
);
malloc_mutex_lock
(
&
chunks_mtx
);
key
.
addr
=
(
void
*
)((
uintptr_t
)
chunk
+
size
);
malloc_mutex_lock
(
tsdn
,
&
arena
->
chunks_mtx
);
chunk_hooks_assure_initialized_locked
(
tsdn
,
arena
,
chunk_hooks
);
extent_node_init
(
&
key
,
arena
,
(
void
*
)((
uintptr_t
)
chunk
+
size
),
0
,
0
,
false
,
false
);
node
=
extent_tree_ad_nsearch
(
chunks_ad
,
&
key
);
/* Try to coalesce forward. */
if
(
node
!=
NULL
&&
node
->
addr
==
key
.
addr
)
{
if
(
node
!=
NULL
&&
extent_node_addr_get
(
node
)
==
extent_node_addr_get
(
&
key
)
&&
extent_node_committed_get
(
node
)
==
committed
&&
!
chunk_hooks
->
merge
(
chunk
,
size
,
extent_node_addr_get
(
node
),
extent_node_size_get
(
node
),
false
,
arena
->
ind
))
{
/*
* Coalesce chunk with the following address range. This does
* not change the position within chunks_ad, so only
* remove/insert from/into chunks_szad.
* remove/insert from/into chunks_sz
sn
ad.
*/
extent_tree_szad_remove
(
chunks_szad
,
node
);
node
->
addr
=
chunk
;
node
->
size
+=
size
;
node
->
zeroed
=
(
node
->
zeroed
&&
(
unzeroed
==
false
));
extent_tree_szad_insert
(
chunks_szad
,
node
);
if
(
xnode
!=
NULL
)
base_node_dealloc
(
xnode
);
extent_tree_szsnad_remove
(
chunks_szsnad
,
node
);
arena_chunk_cache_maybe_remove
(
arena
,
node
,
cache
);
extent_node_addr_set
(
node
,
chunk
);
extent_node_size_set
(
node
,
size
+
extent_node_size_get
(
node
));
if
(
sn
<
extent_node_sn_get
(
node
))
extent_node_sn_set
(
node
,
sn
);
extent_node_zeroed_set
(
node
,
extent_node_zeroed_get
(
node
)
&&
!
unzeroed
);
extent_tree_szsnad_insert
(
chunks_szsnad
,
node
);
arena_chunk_cache_maybe_insert
(
arena
,
node
,
cache
);
}
else
{
/* Coalescing forward failed, so insert a new node. */
if
(
xnode
==
NULL
)
{
node
=
arena_node_alloc
(
tsdn
,
arena
);
if
(
node
==
NULL
)
{
/*
*
base_n
ode
_
alloc
()
failed, which is an exceedingly
* unlikely failure. Leak chunk
; its pages have
* already been purged, so this is only
a virtual
* memory leak.
*
N
ode
alloc
ation
failed, which is an exceedingly
* unlikely failure. Leak chunk
after making sure its
*
pages have
already been purged, so
that
this is only
*
a virtual
memory leak.
*/
malloc_mutex_unlock
(
&
chunks_mtx
);
return
;
if
(
cache
)
{
chunk_purge_wrapper
(
tsdn
,
arena
,
chunk_hooks
,
chunk
,
size
,
0
,
size
);
}
goto
label_return
;
}
node
=
xnode
;
node
->
addr
=
chunk
;
node
->
size
=
size
;
node
->
zeroed
=
(
unzeroed
==
false
);
extent_node_init
(
node
,
arena
,
chunk
,
size
,
sn
,
!
unzeroed
,
committed
);
extent_tree_ad_insert
(
chunks_ad
,
node
);
extent_tree_szad_insert
(
chunks_szad
,
node
);
extent_tree_szsnad_insert
(
chunks_szsnad
,
node
);
arena_chunk_cache_maybe_insert
(
arena
,
node
,
cache
);
}
/* Try to coalesce backward. */
prev
=
extent_tree_ad_prev
(
chunks_ad
,
node
);
if
(
prev
!=
NULL
&&
(
void
*
)((
uintptr_t
)
prev
->
addr
+
prev
->
size
)
==
chunk
)
{
if
(
prev
!=
NULL
&&
(
void
*
)((
uintptr_t
)
extent_node_addr_get
(
prev
)
+
extent_node_size_get
(
prev
))
==
chunk
&&
extent_node_committed_get
(
prev
)
==
committed
&&
!
chunk_hooks
->
merge
(
extent_node_addr_get
(
prev
),
extent_node_size_get
(
prev
),
chunk
,
size
,
false
,
arena
->
ind
))
{
/*
* Coalesce chunk with the previous address range. This does
* not change the position within chunks_ad, so only
* remove/insert node from/into chunks_szad.
* remove/insert node from/into chunks_sz
sn
ad.
*/
extent_tree_szad_remove
(
chunks_szad
,
prev
);
extent_tree_sz
sn
ad_remove
(
chunks_sz
sn
ad
,
prev
);
extent_tree_ad_remove
(
chunks_ad
,
prev
);
extent_tree_szad_remove
(
chunks_szad
,
node
);
node
->
addr
=
prev
->
addr
;
node
->
size
+=
prev
->
size
;
node
->
zeroed
=
(
node
->
zeroed
&&
prev
->
zeroed
);
extent_tree_szad_insert
(
chunks_szad
,
node
);
base_node_dealloc
(
prev
);
arena_chunk_cache_maybe_remove
(
arena
,
prev
,
cache
);
extent_tree_szsnad_remove
(
chunks_szsnad
,
node
);
arena_chunk_cache_maybe_remove
(
arena
,
node
,
cache
);
extent_node_addr_set
(
node
,
extent_node_addr_get
(
prev
));
extent_node_size_set
(
node
,
extent_node_size_get
(
prev
)
+
extent_node_size_get
(
node
));
if
(
extent_node_sn_get
(
prev
)
<
extent_node_sn_get
(
node
))
extent_node_sn_set
(
node
,
extent_node_sn_get
(
prev
));
extent_node_zeroed_set
(
node
,
extent_node_zeroed_get
(
prev
)
&&
extent_node_zeroed_get
(
node
));
extent_tree_szsnad_insert
(
chunks_szsnad
,
node
);
arena_chunk_cache_maybe_insert
(
arena
,
node
,
cache
);
arena_node_dalloc
(
tsdn
,
arena
,
prev
);
}
malloc_mutex_unlock
(
&
chunks_mtx
);
label_return:
malloc_mutex_unlock
(
tsdn
,
&
arena
->
chunks_mtx
);
}
void
chunk_unmap
(
void
*
chunk
,
size_t
size
)
chunk_dalloc_cache
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
chunk
,
size_t
size
,
size_t
sn
,
bool
committed
)
{
assert
(
chunk
!=
NULL
);
assert
(
CHUNK_ADDR2BASE
(
chunk
)
==
chunk
);
assert
(
size
!=
0
);
assert
((
size
&
chunksize_mask
)
==
0
);
if
(
config_dss
&&
chunk_in_dss
(
chunk
))
chunk_record
(
&
chunks_szad_dss
,
&
chunks_ad_dss
,
chunk
,
size
);
else
if
(
chunk_dealloc_mmap
(
chunk
,
size
))
chunk_record
(
&
chunks_szad_mmap
,
&
chunks_ad_mmap
,
chunk
,
size
);
chunk_record
(
tsdn
,
arena
,
chunk_hooks
,
&
arena
->
chunks_szsnad_cached
,
&
arena
->
chunks_ad_cached
,
true
,
chunk
,
size
,
sn
,
false
,
committed
);
arena_maybe_purge
(
tsdn
,
arena
);
}
static
bool
chunk_dalloc_default_impl
(
void
*
chunk
,
size_t
size
)
{
if
(
!
have_dss
||
!
chunk_in_dss
(
chunk
))
return
(
chunk_dalloc_mmap
(
chunk
,
size
));
return
(
true
);
}
static
bool
chunk_dalloc_default
(
void
*
chunk
,
size_t
size
,
bool
committed
,
unsigned
arena_ind
)
{
return
(
chunk_dalloc_default_impl
(
chunk
,
size
));
}
void
chunk_dealloc
(
void
*
chunk
,
size_t
size
,
bool
unmap
)
chunk_dalloc_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
chunk
,
size_t
size
,
size_t
sn
,
bool
zeroed
,
bool
committed
)
{
bool
err
;
assert
(
chunk
!=
NULL
);
assert
(
CHUNK_ADDR2BASE
(
chunk
)
==
chunk
);
assert
(
size
!=
0
);
assert
((
size
&
chunksize_mask
)
==
0
);
if
(
config_ivsalloc
)
rtree_set
(
chunks_rtree
,
(
uintptr_t
)
chunk
,
NULL
);
if
(
config_stats
||
config_prof
)
{
malloc_mutex_lock
(
&
chunks_mtx
);
assert
(
stats_chunks
.
curchunks
>=
(
size
/
chunksize
));
stats_chunks
.
curchunks
-=
(
size
/
chunksize
);
malloc_mutex_unlock
(
&
chunks_mtx
);
chunk_hooks_assure_initialized
(
tsdn
,
arena
,
chunk_hooks
);
/* Try to deallocate. */
if
(
chunk_hooks
->
dalloc
==
chunk_dalloc_default
)
{
/* Call directly to propagate tsdn. */
err
=
chunk_dalloc_default_impl
(
chunk
,
size
);
}
else
err
=
chunk_hooks
->
dalloc
(
chunk
,
size
,
committed
,
arena
->
ind
);
if
(
!
err
)
return
;
/* Try to decommit; purge if that fails. */
if
(
committed
)
{
committed
=
chunk_hooks
->
decommit
(
chunk
,
size
,
0
,
size
,
arena
->
ind
);
}
zeroed
=
!
committed
||
!
chunk_hooks
->
purge
(
chunk
,
size
,
0
,
size
,
arena
->
ind
);
chunk_record
(
tsdn
,
arena
,
chunk_hooks
,
&
arena
->
chunks_szsnad_retained
,
&
arena
->
chunks_ad_retained
,
false
,
chunk
,
size
,
sn
,
zeroed
,
committed
);
if
(
config_stats
)
arena
->
stats
.
retained
+=
size
;
}
static
bool
chunk_commit_default
(
void
*
chunk
,
size_t
size
,
size_t
offset
,
size_t
length
,
unsigned
arena_ind
)
{
return
(
pages_commit
((
void
*
)((
uintptr_t
)
chunk
+
(
uintptr_t
)
offset
),
length
));
}
static
bool
chunk_decommit_default
(
void
*
chunk
,
size_t
size
,
size_t
offset
,
size_t
length
,
unsigned
arena_ind
)
{
return
(
pages_decommit
((
void
*
)((
uintptr_t
)
chunk
+
(
uintptr_t
)
offset
),
length
));
}
static
bool
chunk_purge_default
(
void
*
chunk
,
size_t
size
,
size_t
offset
,
size_t
length
,
unsigned
arena_ind
)
{
assert
(
chunk
!=
NULL
);
assert
(
CHUNK_ADDR2BASE
(
chunk
)
==
chunk
);
assert
((
offset
&
PAGE_MASK
)
==
0
);
assert
(
length
!=
0
);
assert
((
length
&
PAGE_MASK
)
==
0
);
if
(
unmap
)
chunk_unmap
(
chunk
,
size
);
return
(
pages_purge
((
void
*
)((
uintptr_t
)
chunk
+
(
uintptr_t
)
offset
),
length
)
);
}
bool
chunk_boot
(
void
)
chunk_purge_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
chunk
,
size_t
size
,
size_t
offset
,
size_t
length
)
{
/* Set variables according to the value of opt_lg_chunk. */
chunksize
=
(
ZU
(
1
)
<<
opt_lg_chunk
);
assert
(
chunksize
>=
PAGE
);
chunksize_mask
=
chunksize
-
1
;
chunk_npages
=
(
chunksize
>>
LG_PAGE
);
chunk_hooks_assure_initialized
(
tsdn
,
arena
,
chunk_hooks
);
return
(
chunk_hooks
->
purge
(
chunk
,
size
,
offset
,
length
,
arena
->
ind
));
}
if
(
config_stats
||
config_prof
)
{
if
(
malloc_mutex_init
(
&
chunks_mtx
))
return
(
true
);
memset
(
&
stats_chunks
,
0
,
sizeof
(
chunk_stats_t
));
}
if
(
config_dss
&&
chunk_dss_boot
())
static
bool
chunk_split_default
(
void
*
chunk
,
size_t
size
,
size_t
size_a
,
size_t
size_b
,
bool
committed
,
unsigned
arena_ind
)
{
if
(
!
maps_coalesce
)
return
(
true
);
return
(
false
);
}
static
bool
chunk_merge_default_impl
(
void
*
chunk_a
,
void
*
chunk_b
)
{
if
(
!
maps_coalesce
)
return
(
true
);
if
(
have_dss
&&
!
chunk_dss_mergeable
(
chunk_a
,
chunk_b
))
return
(
true
);
extent_tree_szad_new
(
&
chunks_szad_mmap
);
extent_tree_ad_new
(
&
chunks_ad_mmap
);
extent_tree_szad_new
(
&
chunks_szad_dss
);
extent_tree_ad_new
(
&
chunks_ad_dss
);
if
(
config_ivsalloc
)
{
chunks_rtree
=
rtree_new
((
ZU
(
1
)
<<
(
LG_SIZEOF_PTR
+
3
))
-
opt_lg_chunk
);
if
(
chunks_rtree
==
NULL
)
return
(
true
);
}
return
(
false
);
}
void
chunk_prefork
(
void
)
static
bool
chunk_merge_default
(
void
*
chunk_a
,
size_t
size_a
,
void
*
chunk_b
,
size_t
size_b
,
bool
committed
,
unsigned
arena_ind
)
{
malloc_mutex_lock
(
&
chunks_mtx
);
if
(
config_ivsalloc
)
rtree_prefork
(
chunks_rtree
);
chunk_dss_prefork
();
return
(
chunk_merge_default_impl
(
chunk_a
,
chunk_b
));
}
void
chunk
_postfork_parent
(
void
)
static
rtree_node_elm_t
*
chunk
s_rtree_node_alloc
(
size_t
nelms
)
{
chunk_dss_postfork_parent
();
if
(
config_ivsalloc
)
rtree_postfork_parent
(
chunks_rtree
);
malloc_mutex_postfork_parent
(
&
chunks_mtx
);
return
((
rtree_node_elm_t
*
)
base_alloc
(
TSDN_NULL
,
nelms
*
sizeof
(
rtree_node_elm_t
)));
}
void
chunk_
postfork_child
(
void
)
bool
chunk_
boot
(
void
)
{
#ifdef _WIN32
SYSTEM_INFO
info
;
GetSystemInfo
(
&
info
);
chunk_dss_postfork_child
();
if
(
config_ivsalloc
)
rtree_postfork_child
(
chunks_rtree
);
malloc_mutex_postfork_child
(
&
chunks_mtx
);
/*
* Verify actual page size is equal to or an integral multiple of
* configured page size.
*/
if
(
info
.
dwPageSize
&
((
1U
<<
LG_PAGE
)
-
1
))
return
(
true
);
/*
* Configure chunksize (if not set) to match granularity (usually 64K),
* so pages_map will always take fast path.
*/
if
(
!
opt_lg_chunk
)
{
opt_lg_chunk
=
ffs_u
((
unsigned
)
info
.
dwAllocationGranularity
)
-
1
;
}
#else
if
(
!
opt_lg_chunk
)
opt_lg_chunk
=
LG_CHUNK_DEFAULT
;
#endif
/* Set variables according to the value of opt_lg_chunk. */
chunksize
=
(
ZU
(
1
)
<<
opt_lg_chunk
);
assert
(
chunksize
>=
PAGE
);
chunksize_mask
=
chunksize
-
1
;
chunk_npages
=
(
chunksize
>>
LG_PAGE
);
if
(
have_dss
)
chunk_dss_boot
();
if
(
rtree_new
(
&
chunks_rtree
,
(
unsigned
)((
ZU
(
1
)
<<
(
LG_SIZEOF_PTR
+
3
))
-
opt_lg_chunk
),
chunks_rtree_node_alloc
,
NULL
))
return
(
true
);
return
(
false
);
}
deps/jemalloc/src/chunk_dss.c
View file @
1f72ec7d
...
...
@@ -10,45 +10,43 @@ const char *dss_prec_names[] = {
"N/A"
};
/* Current dss precedence default, used when creating new arenas. */
static
dss_prec_t
dss_prec_default
=
DSS_PREC_DEFAULT
;
/*
* Protects sbrk() calls. This avoids malloc races among threads, though it
* does not protect against races with threads that call sbrk() directly.
* Current dss precedence default, used when creating new arenas. NB: This is
* stored as unsigned rather than dss_prec_t because in principle there's no
* guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use
* atomic operations to synchronize the setting.
*/
static
malloc_mutex_t
dss_mtx
;
static
unsigned
dss_prec_default
=
(
unsigned
)
DSS_PREC_DEFAULT
;
/* Base address of the DSS. */
static
void
*
dss_base
;
/*
Current end of the DSS, or ((void *)-1) if
the DSS is exhausted. */
static
voi
d
*
dss_
prev
;
/*
C
urrent upper limit on DSS addresses. */
/*
Atomic boolean indicating whether
the DSS is exhausted. */
static
unsigne
d
dss_
exhausted
;
/*
Atomic c
urrent upper limit on DSS addresses. */
static
void
*
dss_max
;
/******************************************************************************/
#ifndef JEMALLOC_HAVE_SBRK
static
void
*
sbrk
(
intptr_t
increment
)
chunk_dss_
sbrk
(
intptr_t
increment
)
{
#ifdef JEMALLOC_DSS
return
(
sbrk
(
increment
));
#else
not_implemented
();
return
(
NULL
);
}
#endif
}
dss_prec_t
chunk_dss_prec_get
(
void
)
{
dss_prec_t
ret
;
if
(
config_dss
==
false
)
if
(
!
have_dss
)
return
(
dss_prec_disabled
);
malloc_mutex_lock
(
&
dss_mtx
);
ret
=
dss_prec_default
;
malloc_mutex_unlock
(
&
dss_mtx
);
ret
=
(
dss_prec_t
)
atomic_read_u
(
&
dss_prec_default
);
return
(
ret
);
}
...
...
@@ -56,20 +54,50 @@ bool
chunk_dss_prec_set
(
dss_prec_t
dss_prec
)
{
if
(
config_dss
==
false
)
return
(
true
);
malloc_mutex_lock
(
&
dss_mtx
);
dss_prec_default
=
dss_prec
;
malloc_mutex_unlock
(
&
dss_mtx
);
if
(
!
have_dss
)
return
(
dss_prec
!=
dss_prec_disabled
);
atomic_write_u
(
&
dss_prec_default
,
(
unsigned
)
dss_prec
);
return
(
false
);
}
void
*
chunk_
alloc_dss
(
size_t
size
,
size_t
alignment
,
bool
*
zero
)
static
void
*
chunk_
dss_max_update
(
void
*
new_addr
)
{
void
*
ret
;
void
*
max_cur
;
spin_t
spinner
;
/*
* Get the current end of the DSS as max_cur and assure that dss_max is
* up to date.
*/
spin_init
(
&
spinner
);
while
(
true
)
{
void
*
max_prev
=
atomic_read_p
(
&
dss_max
);
max_cur
=
chunk_dss_sbrk
(
0
);
if
((
uintptr_t
)
max_prev
>
(
uintptr_t
)
max_cur
)
{
/*
* Another thread optimistically updated dss_max. Wait
* for it to finish.
*/
spin_adaptive
(
&
spinner
);
continue
;
}
if
(
!
atomic_cas_p
(
&
dss_max
,
max_prev
,
max_cur
))
break
;
}
/* Fixed new_addr can only be supported if it is at the edge of DSS. */
if
(
new_addr
!=
NULL
&&
max_cur
!=
new_addr
)
return
(
NULL
);
return
(
max_cur
);
}
cassert
(
config_dss
);
void
*
chunk_alloc_dss
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
*
commit
)
{
cassert
(
have_dss
);
assert
(
size
>
0
&&
(
size
&
chunksize_mask
)
==
0
);
assert
(
alignment
>
0
&&
(
alignment
&
chunksize_mask
)
==
0
);
...
...
@@ -80,20 +108,21 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
if
((
intptr_t
)
size
<
0
)
return
(
NULL
);
malloc_mutex_lock
(
&
dss_mtx
);
if
(
dss_prev
!=
(
void
*
)
-
1
)
{
size_t
gap_size
,
cpad_size
;
void
*
cpad
,
*
dss_next
;
intptr_t
incr
;
if
(
!
atomic_read_u
(
&
dss_exhausted
))
{
/*
* The loop is necessary to recover from races with other
* threads that are using the DSS for something other than
* malloc.
*/
do
{
/* Get the current end of the DSS. */
dss_max
=
sbrk
(
0
);
while
(
true
)
{
void
*
ret
,
*
cpad
,
*
max_cur
,
*
dss_next
,
*
dss_prev
;
size_t
gap_size
,
cpad_size
;
intptr_t
incr
;
max_cur
=
chunk_dss_max_update
(
new_addr
);
if
(
max_cur
==
NULL
)
goto
label_oom
;
/*
* Calculate how much padding is necessary to
* chunk-align the end of the DSS.
...
...
@@ -111,87 +140,99 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
cpad_size
=
(
uintptr_t
)
ret
-
(
uintptr_t
)
cpad
;
dss_next
=
(
void
*
)((
uintptr_t
)
ret
+
size
);
if
((
uintptr_t
)
ret
<
(
uintptr_t
)
dss_max
||
(
uintptr_t
)
dss_next
<
(
uintptr_t
)
dss_max
)
{
/* Wrap-around. */
malloc_mutex_unlock
(
&
dss_mtx
);
return
(
NULL
);
}
(
uintptr_t
)
dss_next
<
(
uintptr_t
)
dss_max
)
goto
label_oom
;
/* Wrap-around. */
incr
=
gap_size
+
cpad_size
+
size
;
dss_prev
=
sbrk
(
incr
);
if
(
dss_prev
==
dss_max
)
{
/*
* Optimistically update dss_max, and roll back below if
* sbrk() fails. No other thread will try to extend the
* DSS while dss_max is greater than the current DSS
* max reported by sbrk(0).
*/
if
(
atomic_cas_p
(
&
dss_max
,
max_cur
,
dss_next
))
continue
;
/* Try to allocate. */
dss_prev
=
chunk_dss_sbrk
(
incr
);
if
(
dss_prev
==
max_cur
)
{
/* Success. */
dss_max
=
dss_next
;
malloc_mutex_unlock
(
&
dss_mtx
);
if
(
cpad_size
!=
0
)
chunk_unmap
(
cpad
,
cpad_size
);
if
(
cpad_size
!=
0
)
{
chunk_hooks_t
chunk_hooks
=
CHUNK_HOOKS_INITIALIZER
;
chunk_dalloc_wrapper
(
tsdn
,
arena
,
&
chunk_hooks
,
cpad
,
cpad_size
,
arena_extent_sn_next
(
arena
),
false
,
true
);
}
if
(
*
zero
)
{
VALGRIND_MAKE_MEM_UNDEFINED
(
ret
,
size
);
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED
(
ret
,
size
);
memset
(
ret
,
0
,
size
);
}
if
(
!*
commit
)
*
commit
=
pages_decommit
(
ret
,
size
);
return
(
ret
);
}
}
while
(
dss_prev
!=
(
void
*
)
-
1
);
}
malloc_mutex_unlock
(
&
dss_mtx
);
/*
* Failure, whether due to OOM or a race with a raw
* sbrk() call from outside the allocator. Try to roll
* back optimistic dss_max update; if rollback fails,
* it's due to another caller of this function having
* succeeded since this invocation started, in which
* case rollback is not necessary.
*/
atomic_cas_p
(
&
dss_max
,
dss_next
,
max_cur
);
if
(
dss_prev
==
(
void
*
)
-
1
)
{
/* OOM. */
atomic_write_u
(
&
dss_exhausted
,
(
unsigned
)
true
);
goto
label_oom
;
}
}
}
label_oom:
return
(
NULL
);
}
bool
chunk_in_dss
(
void
*
chunk
)
static
bool
chunk_in_dss
_helper
(
void
*
chunk
,
void
*
max
)
{
bool
ret
;
cassert
(
config_dss
);
malloc_mutex_lock
(
&
dss_mtx
);
if
((
uintptr_t
)
chunk
>=
(
uintptr_t
)
dss_base
&&
(
uintptr_t
)
chunk
<
(
uintptr_t
)
dss_max
)
ret
=
true
;
else
ret
=
false
;
malloc_mutex_unlock
(
&
dss_mtx
);
return
(
ret
);
return
((
uintptr_t
)
chunk
>=
(
uintptr_t
)
dss_base
&&
(
uintptr_t
)
chunk
<
(
uintptr_t
)
max
);
}
bool
chunk_
dss_boot
(
void
)
chunk_
in_dss
(
void
*
chunk
)
{
cassert
(
config
_dss
);
cassert
(
have
_dss
);
if
(
malloc_mutex_init
(
&
dss_mtx
))
return
(
true
);
dss_base
=
sbrk
(
0
);
dss_prev
=
dss_base
;
dss_max
=
dss_base
;
return
(
false
);
return
(
chunk_in_dss_helper
(
chunk
,
atomic_read_p
(
&
dss_max
)));
}
void
chunk_dss_
prefork
(
void
)
bool
chunk_dss_
mergeable
(
void
*
chunk_a
,
void
*
chunk_b
)
{
void
*
max
;
if
(
config_dss
)
malloc_mutex_prefork
(
&
dss_mtx
);
}
void
chunk_dss_postfork_parent
(
void
)
{
cassert
(
have_dss
);
if
(
config_dss
)
malloc_mutex_postfork_parent
(
&
dss_mtx
);
max
=
atomic_read_p
(
&
dss_max
);
return
(
chunk_in_dss_helper
(
chunk_a
,
max
)
==
chunk_in_dss_helper
(
chunk_b
,
max
));
}
void
chunk_dss_
postfork_child
(
void
)
chunk_dss_
boot
(
void
)
{
if
(
config_dss
)
malloc_mutex_postfork_child
(
&
dss_mtx
);
cassert
(
have_dss
);
dss_base
=
chunk_dss_sbrk
(
0
);
dss_exhausted
=
(
unsigned
)(
dss_base
==
(
void
*
)
-
1
);
dss_max
=
dss_base
;
}
/******************************************************************************/
deps/jemalloc/src/chunk_mmap.c
View file @
1f72ec7d
#define JEMALLOC_CHUNK_MMAP_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static
void
*
pages_map
(
void
*
addr
,
size_t
size
);
static
void
pages_unmap
(
void
*
addr
,
size_t
size
);
static
void
*
chunk_alloc_mmap_slow
(
size_t
size
,
size_t
alignment
,
bool
*
zero
);
/******************************************************************************/
static
void
*
pages
_map
(
void
*
addr
,
size_t
size
)
chunk_alloc
_
m
map
_slow
(
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
*
commit
)
{
void
*
ret
;
assert
(
size
!=
0
);
#ifdef _WIN32
/*
* If VirtualAlloc can't allocate at the given address when one is
* given, it fails and returns NULL.
*/
ret
=
VirtualAlloc
(
addr
,
size
,
MEM_COMMIT
|
MEM_RESERVE
,
PAGE_READWRITE
);
#else
/*
* We don't use MAP_FIXED here, because it can cause the *replacement*
* of existing mappings, and we only want to create new mappings.
*/
ret
=
mmap
(
addr
,
size
,
PROT_READ
|
PROT_WRITE
,
MAP_PRIVATE
|
MAP_ANON
,
-
1
,
0
);
assert
(
ret
!=
NULL
);
if
(
ret
==
MAP_FAILED
)
ret
=
NULL
;
else
if
(
addr
!=
NULL
&&
ret
!=
addr
)
{
/*
* We succeeded in mapping memory, but not in the right place.
*/
if
(
munmap
(
ret
,
size
)
==
-
1
)
{
char
buf
[
BUFERROR_BUF
];
buferror
(
buf
,
sizeof
(
buf
));
malloc_printf
(
"<jemalloc: Error in munmap(): %s
\n
"
,
buf
);
if
(
opt_abort
)
abort
();
}
ret
=
NULL
;
}
#endif
assert
(
ret
==
NULL
||
(
addr
==
NULL
&&
ret
!=
addr
)
||
(
addr
!=
NULL
&&
ret
==
addr
));
return
(
ret
);
}
static
void
pages_unmap
(
void
*
addr
,
size_t
size
)
{
#ifdef _WIN32
if
(
VirtualFree
(
addr
,
0
,
MEM_RELEASE
)
==
0
)
#else
if
(
munmap
(
addr
,
size
)
==
-
1
)
#endif
{
char
buf
[
BUFERROR_BUF
];
buferror
(
buf
,
sizeof
(
buf
));
malloc_printf
(
"<jemalloc>: Error in "
#ifdef _WIN32
"VirtualFree"
#else
"munmap"
#endif
"(): %s
\n
"
,
buf
);
if
(
opt_abort
)
abort
();
}
}
static
void
*
pages_trim
(
void
*
addr
,
size_t
alloc_size
,
size_t
leadsize
,
size_t
size
)
{
void
*
ret
=
(
void
*
)((
uintptr_t
)
addr
+
leadsize
);
assert
(
alloc_size
>=
leadsize
+
size
);
#ifdef _WIN32
{
void
*
new_addr
;
pages_unmap
(
addr
,
alloc_size
);
new_addr
=
pages_map
(
ret
,
size
);
if
(
new_addr
==
ret
)
return
(
ret
);
if
(
new_addr
)
pages_unmap
(
new_addr
,
size
);
return
(
NULL
);
}
#else
{
size_t
trailsize
=
alloc_size
-
leadsize
-
size
;
if
(
leadsize
!=
0
)
pages_unmap
(
addr
,
leadsize
);
if
(
trailsize
!=
0
)
pages_unmap
((
void
*
)((
uintptr_t
)
ret
+
size
),
trailsize
);
return
(
ret
);
}
#endif
}
bool
pages_purge
(
void
*
addr
,
size_t
length
)
{
bool
unzeroed
;
#ifdef _WIN32
VirtualAlloc
(
addr
,
length
,
MEM_RESET
,
PAGE_READWRITE
);
unzeroed
=
true
;
#else
# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
# define JEMALLOC_MADV_PURGE MADV_DONTNEED
# define JEMALLOC_MADV_ZEROS true
# elif defined(JEMALLOC_PURGE_MADVISE_FREE)
# define JEMALLOC_MADV_PURGE MADV_FREE
# define JEMALLOC_MADV_ZEROS false
# else
# error "No method defined for purging unused dirty pages."
# endif
int
err
=
madvise
(
addr
,
length
,
JEMALLOC_MADV_PURGE
);
unzeroed
=
(
JEMALLOC_MADV_ZEROS
==
false
||
err
!=
0
);
# undef JEMALLOC_MADV_PURGE
# undef JEMALLOC_MADV_ZEROS
#endif
return
(
unzeroed
);
}
static
void
*
chunk_alloc_mmap_slow
(
size_t
size
,
size_t
alignment
,
bool
*
zero
)
{
void
*
ret
,
*
pages
;
size_t
alloc_size
,
leadsize
;
size_t
alloc_size
;
alloc_size
=
size
+
alignment
-
PAGE
;
/* Beware size_t wrap-around. */
if
(
alloc_size
<
size
)
return
(
NULL
);
do
{
pages
=
pages_map
(
NULL
,
alloc_size
);
void
*
pages
;
size_t
leadsize
;
pages
=
pages_map
(
NULL
,
alloc_size
,
commit
);
if
(
pages
==
NULL
)
return
(
NULL
);
leadsize
=
ALIGNMENT_CEILING
((
uintptr_t
)
pages
,
alignment
)
-
(
uintptr_t
)
pages
;
ret
=
pages_trim
(
pages
,
alloc_size
,
leadsize
,
size
);
ret
=
pages_trim
(
pages
,
alloc_size
,
leadsize
,
size
,
commit
);
}
while
(
ret
==
NULL
);
assert
(
ret
!=
NULL
);
...
...
@@ -164,7 +30,8 @@ chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero)
}
void
*
chunk_alloc_mmap
(
size_t
size
,
size_t
alignment
,
bool
*
zero
)
chunk_alloc_mmap
(
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
*
commit
)
{
void
*
ret
;
size_t
offset
;
...
...
@@ -185,13 +52,14 @@ chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
assert
(
alignment
!=
0
);
assert
((
alignment
&
chunksize_mask
)
==
0
);
ret
=
pages_map
(
NULL
,
size
);
if
(
ret
==
NULL
)
return
(
NULL
);
ret
=
pages_map
(
new_addr
,
size
,
commit
);
if
(
ret
==
NULL
||
ret
==
new_addr
)
return
(
ret
);
assert
(
new_addr
==
NULL
);
offset
=
ALIGNMENT_ADDR2OFFSET
(
ret
,
alignment
);
if
(
offset
!=
0
)
{
pages_unmap
(
ret
,
size
);
return
(
chunk_alloc_mmap_slow
(
size
,
alignment
,
zero
));
return
(
chunk_alloc_mmap_slow
(
size
,
alignment
,
zero
,
commit
));
}
assert
(
ret
!=
NULL
);
...
...
@@ -200,11 +68,11 @@ chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
}
bool
chunk_d
e
alloc_mmap
(
void
*
chunk
,
size_t
size
)
chunk_dalloc_mmap
(
void
*
chunk
,
size_t
size
)
{
if
(
config_munmap
)
pages_unmap
(
chunk
,
size
);
return
(
config_munmap
==
false
);
return
(
!
config_munmap
);
}
deps/jemalloc/src/ckh.c
View file @
1f72ec7d
...
...
@@ -40,8 +40,8 @@
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static
bool
ckh_grow
(
ckh_t
*
ckh
);
static
void
ckh_shrink
(
ckh_t
*
ckh
);
static
bool
ckh_grow
(
tsd_t
*
tsd
,
ckh_t
*
ckh
);
static
void
ckh_shrink
(
tsd_t
*
tsd
,
ckh_t
*
ckh
);
/******************************************************************************/
...
...
@@ -49,7 +49,7 @@ static void ckh_shrink(ckh_t *ckh);
* Search bucket for key and return the cell number if found; SIZE_T_MAX
* otherwise.
*/
JEMALLOC_INLINE
size_t
JEMALLOC_INLINE
_C
size_t
ckh_bucket_search
(
ckh_t
*
ckh
,
size_t
bucket
,
const
void
*
key
)
{
ckhc_t
*
cell
;
...
...
@@ -67,28 +67,28 @@ ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key)
/*
* Search table for key and return cell number if found; SIZE_T_MAX otherwise.
*/
JEMALLOC_INLINE
size_t
JEMALLOC_INLINE
_C
size_t
ckh_isearch
(
ckh_t
*
ckh
,
const
void
*
key
)
{
size_t
hash
1
,
hash2
,
bucket
,
cell
;
size_t
hash
es
[
2
]
,
bucket
,
cell
;
assert
(
ckh
!=
NULL
);
ckh
->
hash
(
key
,
ckh
->
lg_curbuckets
,
&
hash1
,
&
hash2
);
ckh
->
hash
(
key
,
hashes
);
/* Search primary bucket. */
bucket
=
hash
1
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
bucket
=
hash
es
[
0
]
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
cell
=
ckh_bucket_search
(
ckh
,
bucket
,
key
);
if
(
cell
!=
SIZE_T_MAX
)
return
(
cell
);
/* Search secondary bucket. */
bucket
=
hash
2
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
bucket
=
hash
es
[
1
]
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
cell
=
ckh_bucket_search
(
ckh
,
bucket
,
key
);
return
(
cell
);
}
JEMALLOC_INLINE
bool
JEMALLOC_INLINE
_C
bool
ckh_try_bucket_insert
(
ckh_t
*
ckh
,
size_t
bucket
,
const
void
*
key
,
const
void
*
data
)
{
...
...
@@ -99,7 +99,8 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
* Cycle through the cells in the bucket, starting at a random position.
* The randomness avoids worst-case search overhead as buckets fill up.
*/
prng32
(
offset
,
LG_CKH_BUCKET_CELLS
,
ckh
->
prng_state
,
CKH_A
,
CKH_C
);
offset
=
(
unsigned
)
prng_lg_range_u64
(
&
ckh
->
prng_state
,
LG_CKH_BUCKET_CELLS
);
for
(
i
=
0
;
i
<
(
ZU
(
1
)
<<
LG_CKH_BUCKET_CELLS
);
i
++
)
{
cell
=
&
ckh
->
tab
[(
bucket
<<
LG_CKH_BUCKET_CELLS
)
+
((
i
+
offset
)
&
((
ZU
(
1
)
<<
LG_CKH_BUCKET_CELLS
)
-
1
))];
...
...
@@ -120,13 +121,13 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
* eviction/relocation procedure until either success or detection of an
* eviction/relocation bucket cycle.
*/
JEMALLOC_INLINE
bool
JEMALLOC_INLINE
_C
bool
ckh_evict_reloc_insert
(
ckh_t
*
ckh
,
size_t
argbucket
,
void
const
**
argkey
,
void
const
**
argdata
)
{
const
void
*
key
,
*
data
,
*
tkey
,
*
tdata
;
ckhc_t
*
cell
;
size_t
hash
1
,
hash2
,
bucket
,
tbucket
;
size_t
hash
es
[
2
]
,
bucket
,
tbucket
;
unsigned
i
;
bucket
=
argbucket
;
...
...
@@ -141,7 +142,8 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
* were an item for which both hashes indicated the same
* bucket.
*/
prng32
(
i
,
LG_CKH_BUCKET_CELLS
,
ckh
->
prng_state
,
CKH_A
,
CKH_C
);
i
=
(
unsigned
)
prng_lg_range_u64
(
&
ckh
->
prng_state
,
LG_CKH_BUCKET_CELLS
);
cell
=
&
ckh
->
tab
[(
bucket
<<
LG_CKH_BUCKET_CELLS
)
+
i
];
assert
(
cell
->
key
!=
NULL
);
...
...
@@ -155,10 +157,11 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
#endif
/* Find the alternate bucket for the evicted item. */
ckh
->
hash
(
key
,
ckh
->
lg_curbuckets
,
&
hash1
,
&
hash2
);
tbucket
=
hash
2
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
ckh
->
hash
(
key
,
hashes
);
tbucket
=
hash
es
[
1
]
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
if
(
tbucket
==
bucket
)
{
tbucket
=
hash1
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
tbucket
=
hashes
[
0
]
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
/*
* It may be that (tbucket == bucket) still, if the
* item's hashes both indicate this bucket. However,
...
...
@@ -184,28 +187,28 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
}
bucket
=
tbucket
;
if
(
ckh_try_bucket_insert
(
ckh
,
bucket
,
key
,
data
)
==
false
)
if
(
!
ckh_try_bucket_insert
(
ckh
,
bucket
,
key
,
data
))
return
(
false
);
}
}
JEMALLOC_INLINE
bool
JEMALLOC_INLINE
_C
bool
ckh_try_insert
(
ckh_t
*
ckh
,
void
const
**
argkey
,
void
const
**
argdata
)
{
size_t
hash
1
,
hash2
,
bucket
;
size_t
hash
es
[
2
]
,
bucket
;
const
void
*
key
=
*
argkey
;
const
void
*
data
=
*
argdata
;
ckh
->
hash
(
key
,
ckh
->
lg_curbuckets
,
&
hash1
,
&
hash2
);
ckh
->
hash
(
key
,
hashes
);
/* Try to insert in primary bucket. */
bucket
=
hash
1
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
if
(
ckh_try_bucket_insert
(
ckh
,
bucket
,
key
,
data
)
==
false
)
bucket
=
hash
es
[
0
]
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
if
(
!
ckh_try_bucket_insert
(
ckh
,
bucket
,
key
,
data
))
return
(
false
);
/* Try to insert in secondary bucket. */
bucket
=
hash
2
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
if
(
ckh_try_bucket_insert
(
ckh
,
bucket
,
key
,
data
)
==
false
)
bucket
=
hash
es
[
1
]
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
if
(
!
ckh_try_bucket_insert
(
ckh
,
bucket
,
key
,
data
))
return
(
false
);
/*
...
...
@@ -218,7 +221,7 @@ ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
* Try to rebuild the hash table from scratch by inserting all items from the
* old table into the new.
*/
JEMALLOC_INLINE
bool
JEMALLOC_INLINE
_C
bool
ckh_rebuild
(
ckh_t
*
ckh
,
ckhc_t
*
aTab
)
{
size_t
count
,
i
,
nins
;
...
...
@@ -242,12 +245,11 @@ ckh_rebuild(ckh_t *ckh, ckhc_t *aTab)
}
static
bool
ckh_grow
(
ckh_t
*
ckh
)
ckh_grow
(
tsd_t
*
tsd
,
ckh_t
*
ckh
)
{
bool
ret
;
ckhc_t
*
tab
,
*
ttab
;
size_t
lg_curcells
;
unsigned
lg_prevbuckets
;
unsigned
lg_prevbuckets
,
lg_curcells
;
#ifdef CKH_COUNT
ckh
->
ngrows
++
;
...
...
@@ -265,11 +267,12 @@ ckh_grow(ckh_t *ckh)
lg_curcells
++
;
usize
=
sa2u
(
sizeof
(
ckhc_t
)
<<
lg_curcells
,
CACHELINE
);
if
(
u
size
==
0
)
{
if
(
u
nlikely
(
usize
==
0
||
usize
>
HUGE_MAXCLASS
)
)
{
ret
=
true
;
goto
label_return
;
}
tab
=
(
ckhc_t
*
)
ipalloc
(
usize
,
CACHELINE
,
true
);
tab
=
(
ckhc_t
*
)
ipallocztm
(
tsd_tsdn
(
tsd
),
usize
,
CACHELINE
,
true
,
NULL
,
true
,
arena_ichoose
(
tsd
,
NULL
));
if
(
tab
==
NULL
)
{
ret
=
true
;
goto
label_return
;
...
...
@@ -280,13 +283,13 @@ ckh_grow(ckh_t *ckh)
tab
=
ttab
;
ckh
->
lg_curbuckets
=
lg_curcells
-
LG_CKH_BUCKET_CELLS
;
if
(
ckh_rebuild
(
ckh
,
tab
)
==
false
)
{
idalloc
(
tab
);
if
(
!
ckh_rebuild
(
ckh
,
tab
))
{
idalloc
tm
(
tsd_tsdn
(
tsd
),
tab
,
NULL
,
true
,
true
);
break
;
}
/* Rebuilding failed, so back out partially rebuilt table. */
idalloc
(
ckh
->
tab
);
idalloc
tm
(
tsd_tsdn
(
tsd
),
ckh
->
tab
,
NULL
,
true
,
true
);
ckh
->
tab
=
tab
;
ckh
->
lg_curbuckets
=
lg_prevbuckets
;
}
...
...
@@ -297,11 +300,11 @@ label_return:
}
static
void
ckh_shrink
(
ckh_t
*
ckh
)
ckh_shrink
(
tsd_t
*
tsd
,
ckh_t
*
ckh
)
{
ckhc_t
*
tab
,
*
ttab
;
size_t
lg_curcells
,
usize
;
unsigned
lg_prevbuckets
;
size_t
usize
;
unsigned
lg_prevbuckets
,
lg_curcells
;
/*
* It is possible (though unlikely, given well behaved hashes) that the
...
...
@@ -310,9 +313,10 @@ ckh_shrink(ckh_t *ckh)
lg_prevbuckets
=
ckh
->
lg_curbuckets
;
lg_curcells
=
ckh
->
lg_curbuckets
+
LG_CKH_BUCKET_CELLS
-
1
;
usize
=
sa2u
(
sizeof
(
ckhc_t
)
<<
lg_curcells
,
CACHELINE
);
if
(
u
size
==
0
)
if
(
u
nlikely
(
usize
==
0
||
usize
>
HUGE_MAXCLASS
)
)
return
;
tab
=
(
ckhc_t
*
)
ipalloc
(
usize
,
CACHELINE
,
true
);
tab
=
(
ckhc_t
*
)
ipallocztm
(
tsd_tsdn
(
tsd
),
usize
,
CACHELINE
,
true
,
NULL
,
true
,
arena_ichoose
(
tsd
,
NULL
));
if
(
tab
==
NULL
)
{
/*
* An OOM error isn't worth propagating, since it doesn't
...
...
@@ -326,8 +330,8 @@ ckh_shrink(ckh_t *ckh)
tab
=
ttab
;
ckh
->
lg_curbuckets
=
lg_curcells
-
LG_CKH_BUCKET_CELLS
;
if
(
ckh_rebuild
(
ckh
,
tab
)
==
false
)
{
idalloc
(
tab
);
if
(
!
ckh_rebuild
(
ckh
,
tab
))
{
idalloc
tm
(
tsd_tsdn
(
tsd
),
tab
,
NULL
,
true
,
true
);
#ifdef CKH_COUNT
ckh
->
nshrinks
++
;
#endif
...
...
@@ -335,7 +339,7 @@ ckh_shrink(ckh_t *ckh)
}
/* Rebuilding failed, so back out partially rebuilt table. */
idalloc
(
ckh
->
tab
);
idalloc
tm
(
tsd_tsdn
(
tsd
),
ckh
->
tab
,
NULL
,
true
,
true
);
ckh
->
tab
=
tab
;
ckh
->
lg_curbuckets
=
lg_prevbuckets
;
#ifdef CKH_COUNT
...
...
@@ -344,7 +348,8 @@ ckh_shrink(ckh_t *ckh)
}
bool
ckh_new
(
ckh_t
*
ckh
,
size_t
minitems
,
ckh_hash_t
*
hash
,
ckh_keycomp_t
*
keycomp
)
ckh_new
(
tsd_t
*
tsd
,
ckh_t
*
ckh
,
size_t
minitems
,
ckh_hash_t
*
hash
,
ckh_keycomp_t
*
keycomp
)
{
bool
ret
;
size_t
mincells
,
usize
;
...
...
@@ -365,10 +370,10 @@ ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
ckh
->
count
=
0
;
/*
* Find the minimum power of 2 that is large enough to fit
aBaseCount
* Find the minimum power of 2 that is large enough to fit
minitems
* entries. We are using (2+,2) cuckoo hashing, which has an expected
* maximum load factor of at least ~0.86, so 0.75 is a conservative load
* factor that will typically allow
2^aLgMinI
tems to fit without ever
* factor that will typically allow
mincells i
tems to fit without ever
* growing the table.
*/
assert
(
LG_CKH_BUCKET_CELLS
>
0
);
...
...
@@ -383,11 +388,12 @@ ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
ckh
->
keycomp
=
keycomp
;
usize
=
sa2u
(
sizeof
(
ckhc_t
)
<<
lg_mincells
,
CACHELINE
);
if
(
u
size
==
0
)
{
if
(
u
nlikely
(
usize
==
0
||
usize
>
HUGE_MAXCLASS
)
)
{
ret
=
true
;
goto
label_return
;
}
ckh
->
tab
=
(
ckhc_t
*
)
ipalloc
(
usize
,
CACHELINE
,
true
);
ckh
->
tab
=
(
ckhc_t
*
)
ipallocztm
(
tsd_tsdn
(
tsd
),
usize
,
CACHELINE
,
true
,
NULL
,
true
,
arena_ichoose
(
tsd
,
NULL
));
if
(
ckh
->
tab
==
NULL
)
{
ret
=
true
;
goto
label_return
;
...
...
@@ -399,16 +405,16 @@ label_return:
}
void
ckh_delete
(
ckh_t
*
ckh
)
ckh_delete
(
tsd_t
*
tsd
,
ckh_t
*
ckh
)
{
assert
(
ckh
!=
NULL
);
#ifdef CKH_VERBOSE
malloc_printf
(
"%s(%p): ngrows: %"
PRI
u64
", nshrinks: %"
PRI
u64
","
" nshrinkfails: %"
PRI
u64
", ninserts: %"
PRI
u64
","
" nrelocs: %"
PRI
u64
"
\n
"
,
__func__
,
ckh
,
"%s(%p): ngrows: %"
FMT
u64
", nshrinks: %"
FMT
u64
","
" nshrinkfails: %"
FMT
u64
", ninserts: %"
FMT
u64
","
" nrelocs: %"
FMT
u64
"
\n
"
,
__func__
,
ckh
,
(
unsigned
long
long
)
ckh
->
ngrows
,
(
unsigned
long
long
)
ckh
->
nshrinks
,
(
unsigned
long
long
)
ckh
->
nshrinkfails
,
...
...
@@ -416,10 +422,9 @@ ckh_delete(ckh_t *ckh)
(
unsigned
long
long
)
ckh
->
nrelocs
);
#endif
idalloc
(
ckh
->
tab
);
#ifdef JEMALLOC_DEBUG
memset
(
ckh
,
0x5a
,
sizeof
(
ckh_t
));
#endif
idalloctm
(
tsd_tsdn
(
tsd
),
ckh
->
tab
,
NULL
,
true
,
true
);
if
(
config_debug
)
memset
(
ckh
,
JEMALLOC_FREE_JUNK
,
sizeof
(
ckh_t
));
}
size_t
...
...
@@ -452,7 +457,7 @@ ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data)
}
bool
ckh_insert
(
ckh_t
*
ckh
,
const
void
*
key
,
const
void
*
data
)
ckh_insert
(
tsd_t
*
tsd
,
ckh_t
*
ckh
,
const
void
*
key
,
const
void
*
data
)
{
bool
ret
;
...
...
@@ -464,7 +469,7 @@ ckh_insert(ckh_t *ckh, const void *key, const void *data)
#endif
while
(
ckh_try_insert
(
ckh
,
&
key
,
&
data
))
{
if
(
ckh_grow
(
ckh
))
{
if
(
ckh_grow
(
tsd
,
ckh
))
{
ret
=
true
;
goto
label_return
;
}
...
...
@@ -476,7 +481,8 @@ label_return:
}
bool
ckh_remove
(
ckh_t
*
ckh
,
const
void
*
searchkey
,
void
**
key
,
void
**
data
)
ckh_remove
(
tsd_t
*
tsd
,
ckh_t
*
ckh
,
const
void
*
searchkey
,
void
**
key
,
void
**
data
)
{
size_t
cell
;
...
...
@@ -497,7 +503,7 @@ ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data)
+
LG_CKH_BUCKET_CELLS
-
2
))
&&
ckh
->
lg_curbuckets
>
ckh
->
lg_minbuckets
)
{
/* Ignore error due to OOM. */
ckh_shrink
(
ckh
);
ckh_shrink
(
tsd
,
ckh
);
}
return
(
false
);
...
...
@@ -526,31 +532,10 @@ ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data)
}
void
ckh_string_hash
(
const
void
*
key
,
unsigned
minbits
,
size_t
*
hash1
,
size_t
*
hash
2
)
ckh_string_hash
(
const
void
*
key
,
size_t
r_
hash
[
2
]
)
{
size_t
ret1
,
ret2
;
uint64_t
h
;
assert
(
minbits
<=
32
||
(
SIZEOF_PTR
==
8
&&
minbits
<=
64
));
assert
(
hash1
!=
NULL
);
assert
(
hash2
!=
NULL
);
h
=
hash
(
key
,
strlen
((
const
char
*
)
key
),
UINT64_C
(
0x94122f335b332aea
));
if
(
minbits
<=
32
)
{
/*
* Avoid doing multiple hashes, since a single hash provides
* enough bits.
*/
ret1
=
h
&
ZU
(
0xffffffffU
);
ret2
=
h
>>
32
;
}
else
{
ret1
=
h
;
ret2
=
hash
(
key
,
strlen
((
const
char
*
)
key
),
UINT64_C
(
0x8432a476666bbc13
));
}
*
hash1
=
ret1
;
*
hash2
=
ret2
;
hash
(
key
,
strlen
((
const
char
*
)
key
),
0x94122f33U
,
r_hash
);
}
bool
...
...
@@ -564,41 +549,16 @@ ckh_string_keycomp(const void *k1, const void *k2)
}
void
ckh_pointer_hash
(
const
void
*
key
,
unsigned
minbits
,
size_t
*
hash1
,
size_t
*
hash2
)
ckh_pointer_hash
(
const
void
*
key
,
size_t
r_hash
[
2
])
{
size_t
ret1
,
ret2
;
uint64_t
h
;
union
{
const
void
*
v
;
uint64
_t
i
;
size
_t
i
;
}
u
;
assert
(
minbits
<=
32
||
(
SIZEOF_PTR
==
8
&&
minbits
<=
64
));
assert
(
hash1
!=
NULL
);
assert
(
hash2
!=
NULL
);
assert
(
sizeof
(
u
.
v
)
==
sizeof
(
u
.
i
));
#if (LG_SIZEOF_PTR != LG_SIZEOF_INT)
u
.
i
=
0
;
#endif
u
.
v
=
key
;
h
=
hash
(
&
u
.
i
,
sizeof
(
u
.
i
),
UINT64_C
(
0xd983396e68886082
));
if
(
minbits
<=
32
)
{
/*
* Avoid doing multiple hashes, since a single hash provides
* enough bits.
*/
ret1
=
h
&
ZU
(
0xffffffffU
);
ret2
=
h
>>
32
;
}
else
{
assert
(
SIZEOF_PTR
==
8
);
ret1
=
h
;
ret2
=
hash
(
&
u
.
i
,
sizeof
(
u
.
i
),
UINT64_C
(
0x5e2be9aff8709a5d
));
}
*
hash1
=
ret1
;
*
hash2
=
ret2
;
hash
(
&
u
.
i
,
sizeof
(
u
.
i
),
0xd983396eU
,
r_hash
);
}
bool
...
...
deps/jemalloc/src/ctl.c
View file @
1f72ec7d
...
...
@@ -7,7 +7,6 @@
/*
* ctl_mtx protects the following:
* - ctl_stats.*
* - opt_prof_active
*/
static
malloc_mutex_t
ctl_mtx
;
static
bool
ctl_initialized
;
...
...
@@ -17,67 +16,68 @@ static ctl_stats_t ctl_stats;
/******************************************************************************/
/* Helpers for named and indexed nodes. */
static
inline
const
ctl_named_node_t
*
JEMALLOC_INLINE_C
const
ctl_named_node_t
*
ctl_named_node
(
const
ctl_node_t
*
node
)
{
return
((
node
->
named
)
?
(
const
ctl_named_node_t
*
)
node
:
NULL
);
}
static
inline
const
ctl_named_node_t
*
ctl_named_children
(
const
ctl_named_node_t
*
node
,
in
t
index
)
JEMALLOC_INLINE_C
const
ctl_named_node_t
*
ctl_named_children
(
const
ctl_named_node_t
*
node
,
size_
t
index
)
{
const
ctl_named_node_t
*
children
=
ctl_named_node
(
node
->
children
);
return
(
children
?
&
children
[
index
]
:
NULL
);
}
static
inline
const
ctl_indexed_node_t
*
JEMALLOC_INLINE_C
const
ctl_indexed_node_t
*
ctl_indexed_node
(
const
ctl_node_t
*
node
)
{
return
((
node
->
named
==
false
)
?
(
const
ctl_indexed_node_t
*
)
node
:
NULL
);
return
(
!
node
->
named
?
(
const
ctl_indexed_node_t
*
)
node
:
NULL
);
}
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
#define CTL_PROTO(n) \
static int n##_ctl(const size_t *mib, size_t miblen,
void *oldp,
\
size_t *oldlenp, void *newp, size_t newlen);
static int n##_ctl(
tsd_t *tsd,
const size_t *mib, size_t miblen, \
void *oldp,
size_t *oldlenp, void *newp, size_t newlen);
#define INDEX_PROTO(n) \
static const ctl_named_node_t *n##_index(
const size_t *mib
, \
size_t miblen, size_t i);
static const ctl_named_node_t *n##_index(
tsdn_t *tsdn
, \
const size_t *mib,
size_t miblen, size_t i);
static
bool
ctl_arena_init
(
ctl_arena_stats_t
*
astats
);
static
void
ctl_arena_clear
(
ctl_arena_stats_t
*
astats
);
static
void
ctl_arena_stats_amerge
(
ctl_arena_stats_t
*
cstats
,
static
void
ctl_arena_stats_amerge
(
tsdn_t
*
tsdn
,
ctl_arena_stats_t
*
cstats
,
arena_t
*
arena
);
static
void
ctl_arena_stats_smerge
(
ctl_arena_stats_t
*
sstats
,
ctl_arena_stats_t
*
astats
);
static
void
ctl_arena_refresh
(
arena_t
*
arena
,
unsigned
i
);
static
bool
ctl_grow
(
void
);
static
void
ctl_refresh
(
void
);
static
bool
ctl_init
(
void
);
static
int
ctl_lookup
(
const
char
*
name
,
ctl_node_t
const
**
nodesp
,
size_t
*
mibp
,
size_t
*
depthp
);
static
void
ctl_arena_refresh
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
unsigned
i
);
static
bool
ctl_grow
(
tsdn_t
*
tsdn
);
static
void
ctl_refresh
(
tsdn_t
*
tsdn
);
static
bool
ctl_init
(
tsdn_t
*
tsdn
);
static
int
ctl_lookup
(
tsdn_t
*
tsdn
,
const
char
*
name
,
ctl_node_t
const
**
nodesp
,
size_t
*
mibp
,
size_t
*
depthp
);
CTL_PROTO
(
version
)
CTL_PROTO
(
epoch
)
CTL_PROTO
(
thread_tcache_enabled
)
CTL_PROTO
(
thread_tcache_flush
)
CTL_PROTO
(
thread_prof_name
)
CTL_PROTO
(
thread_prof_active
)
CTL_PROTO
(
thread_arena
)
CTL_PROTO
(
thread_allocated
)
CTL_PROTO
(
thread_allocatedp
)
CTL_PROTO
(
thread_deallocated
)
CTL_PROTO
(
thread_deallocatedp
)
CTL_PROTO
(
config_cache_oblivious
)
CTL_PROTO
(
config_debug
)
CTL_PROTO
(
config_dss
)
CTL_PROTO
(
config_fill
)
CTL_PROTO
(
config_lazy_lock
)
CTL_PROTO
(
config_m
remap
)
CTL_PROTO
(
config_m
alloc_conf
)
CTL_PROTO
(
config_munmap
)
CTL_PROTO
(
config_prof
)
CTL_PROTO
(
config_prof_libgcc
)
...
...
@@ -92,29 +92,39 @@ CTL_PROTO(opt_abort)
CTL_PROTO
(
opt_dss
)
CTL_PROTO
(
opt_lg_chunk
)
CTL_PROTO
(
opt_narenas
)
CTL_PROTO
(
opt_purge
)
CTL_PROTO
(
opt_lg_dirty_mult
)
CTL_PROTO
(
opt_decay_time
)
CTL_PROTO
(
opt_stats_print
)
CTL_PROTO
(
opt_junk
)
CTL_PROTO
(
opt_zero
)
CTL_PROTO
(
opt_quarantine
)
CTL_PROTO
(
opt_redzone
)
CTL_PROTO
(
opt_utrace
)
CTL_PROTO
(
opt_valgrind
)
CTL_PROTO
(
opt_xmalloc
)
CTL_PROTO
(
opt_tcache
)
CTL_PROTO
(
opt_lg_tcache_max
)
CTL_PROTO
(
opt_prof
)
CTL_PROTO
(
opt_prof_prefix
)
CTL_PROTO
(
opt_prof_active
)
CTL_PROTO
(
opt_prof_thread_active_init
)
CTL_PROTO
(
opt_lg_prof_sample
)
CTL_PROTO
(
opt_lg_prof_interval
)
CTL_PROTO
(
opt_prof_gdump
)
CTL_PROTO
(
opt_prof_final
)
CTL_PROTO
(
opt_prof_leak
)
CTL_PROTO
(
opt_prof_accum
)
CTL_PROTO
(
tcache_create
)
CTL_PROTO
(
tcache_flush
)
CTL_PROTO
(
tcache_destroy
)
static
void
arena_i_purge
(
tsdn_t
*
tsdn
,
unsigned
arena_ind
,
bool
all
);
CTL_PROTO
(
arena_i_purge
)
static
void
arena_purge
(
unsigned
arena_ind
);
CTL_PROTO
(
arena_i_decay
)
CTL_PROTO
(
arena_i_reset
)
CTL_PROTO
(
arena_i_dss
)
CTL_PROTO
(
arena_i_lg_dirty_mult
)
CTL_PROTO
(
arena_i_decay_time
)
CTL_PROTO
(
arena_i_chunk_hooks
)
INDEX_PROTO
(
arena_i
)
CTL_PROTO
(
arenas_bin_i_size
)
CTL_PROTO
(
arenas_bin_i_nregs
)
...
...
@@ -122,25 +132,27 @@ CTL_PROTO(arenas_bin_i_run_size)
INDEX_PROTO
(
arenas_bin_i
)
CTL_PROTO
(
arenas_lrun_i_size
)
INDEX_PROTO
(
arenas_lrun_i
)
CTL_PROTO
(
arenas_hchunk_i_size
)
INDEX_PROTO
(
arenas_hchunk_i
)
CTL_PROTO
(
arenas_narenas
)
CTL_PROTO
(
arenas_initialized
)
CTL_PROTO
(
arenas_lg_dirty_mult
)
CTL_PROTO
(
arenas_decay_time
)
CTL_PROTO
(
arenas_quantum
)
CTL_PROTO
(
arenas_page
)
CTL_PROTO
(
arenas_tcache_max
)
CTL_PROTO
(
arenas_nbins
)
CTL_PROTO
(
arenas_nhbins
)
CTL_PROTO
(
arenas_nlruns
)
CTL_PROTO
(
arenas_
purge
)
CTL_PROTO
(
arenas_
nhchunks
)
CTL_PROTO
(
arenas_extend
)
CTL_PROTO
(
prof_thread_active_init
)
CTL_PROTO
(
prof_active
)
CTL_PROTO
(
prof_dump
)
CTL_PROTO
(
prof_gdump
)
CTL_PROTO
(
prof_reset
)
CTL_PROTO
(
prof_interval
)
CTL_PROTO
(
stats_chunks_current
)
CTL_PROTO
(
stats_chunks_total
)
CTL_PROTO
(
stats_chunks_high
)
CTL_PROTO
(
stats_huge_allocated
)
CTL_PROTO
(
stats_huge_nmalloc
)
CTL_PROTO
(
stats_huge_ndalloc
)
CTL_PROTO
(
lg_prof_sample
)
CTL_PROTO
(
stats_arenas_i_small_allocated
)
CTL_PROTO
(
stats_arenas_i_small_nmalloc
)
CTL_PROTO
(
stats_arenas_i_small_ndalloc
)
...
...
@@ -149,10 +161,14 @@ CTL_PROTO(stats_arenas_i_large_allocated)
CTL_PROTO
(
stats_arenas_i_large_nmalloc
)
CTL_PROTO
(
stats_arenas_i_large_ndalloc
)
CTL_PROTO
(
stats_arenas_i_large_nrequests
)
CTL_PROTO
(
stats_arenas_i_bins_j_allocated
)
CTL_PROTO
(
stats_arenas_i_huge_allocated
)
CTL_PROTO
(
stats_arenas_i_huge_nmalloc
)
CTL_PROTO
(
stats_arenas_i_huge_ndalloc
)
CTL_PROTO
(
stats_arenas_i_huge_nrequests
)
CTL_PROTO
(
stats_arenas_i_bins_j_nmalloc
)
CTL_PROTO
(
stats_arenas_i_bins_j_ndalloc
)
CTL_PROTO
(
stats_arenas_i_bins_j_nrequests
)
CTL_PROTO
(
stats_arenas_i_bins_j_curregs
)
CTL_PROTO
(
stats_arenas_i_bins_j_nfills
)
CTL_PROTO
(
stats_arenas_i_bins_j_nflushes
)
CTL_PROTO
(
stats_arenas_i_bins_j_nruns
)
...
...
@@ -164,19 +180,32 @@ CTL_PROTO(stats_arenas_i_lruns_j_ndalloc)
CTL_PROTO
(
stats_arenas_i_lruns_j_nrequests
)
CTL_PROTO
(
stats_arenas_i_lruns_j_curruns
)
INDEX_PROTO
(
stats_arenas_i_lruns_j
)
CTL_PROTO
(
stats_arenas_i_hchunks_j_nmalloc
)
CTL_PROTO
(
stats_arenas_i_hchunks_j_ndalloc
)
CTL_PROTO
(
stats_arenas_i_hchunks_j_nrequests
)
CTL_PROTO
(
stats_arenas_i_hchunks_j_curhchunks
)
INDEX_PROTO
(
stats_arenas_i_hchunks_j
)
CTL_PROTO
(
stats_arenas_i_nthreads
)
CTL_PROTO
(
stats_arenas_i_dss
)
CTL_PROTO
(
stats_arenas_i_lg_dirty_mult
)
CTL_PROTO
(
stats_arenas_i_decay_time
)
CTL_PROTO
(
stats_arenas_i_pactive
)
CTL_PROTO
(
stats_arenas_i_pdirty
)
CTL_PROTO
(
stats_arenas_i_mapped
)
CTL_PROTO
(
stats_arenas_i_retained
)
CTL_PROTO
(
stats_arenas_i_npurge
)
CTL_PROTO
(
stats_arenas_i_nmadvise
)
CTL_PROTO
(
stats_arenas_i_purged
)
CTL_PROTO
(
stats_arenas_i_metadata_mapped
)
CTL_PROTO
(
stats_arenas_i_metadata_allocated
)
INDEX_PROTO
(
stats_arenas_i
)
CTL_PROTO
(
stats_cactive
)
CTL_PROTO
(
stats_allocated
)
CTL_PROTO
(
stats_active
)
CTL_PROTO
(
stats_metadata
)
CTL_PROTO
(
stats_resident
)
CTL_PROTO
(
stats_mapped
)
CTL_PROTO
(
stats_retained
)
/******************************************************************************/
/* mallctl tree. */
...
...
@@ -197,71 +226,90 @@ CTL_PROTO(stats_mapped)
*/
#define INDEX(i) {false}, i##_index
static
const
ctl_named_node_t
tcache_node
[]
=
{
static
const
ctl_named_node_t
thread_
tcache_node
[]
=
{
{
NAME
(
"enabled"
),
CTL
(
thread_tcache_enabled
)},
{
NAME
(
"flush"
),
CTL
(
thread_tcache_flush
)}
};
static
const
ctl_named_node_t
thread_prof_node
[]
=
{
{
NAME
(
"name"
),
CTL
(
thread_prof_name
)},
{
NAME
(
"active"
),
CTL
(
thread_prof_active
)}
};
static
const
ctl_named_node_t
thread_node
[]
=
{
{
NAME
(
"arena"
),
CTL
(
thread_arena
)},
{
NAME
(
"allocated"
),
CTL
(
thread_allocated
)},
{
NAME
(
"allocatedp"
),
CTL
(
thread_allocatedp
)},
{
NAME
(
"deallocated"
),
CTL
(
thread_deallocated
)},
{
NAME
(
"deallocatedp"
),
CTL
(
thread_deallocatedp
)},
{
NAME
(
"tcache"
),
CHILD
(
named
,
tcache
)}
{
NAME
(
"tcache"
),
CHILD
(
named
,
thread_tcache
)},
{
NAME
(
"prof"
),
CHILD
(
named
,
thread_prof
)}
};
static
const
ctl_named_node_t
config_node
[]
=
{
{
NAME
(
"
debug"
),
CTL
(
config_debug
)},
{
NAME
(
"d
ss
"
),
CTL
(
config_d
ss
)},
{
NAME
(
"fill"
),
CTL
(
config_fill
)},
{
NAME
(
"lazy_lock"
),
CTL
(
config_lazy_lock
)},
{
NAME
(
"m
remap
"
),
CTL
(
config_m
remap
)},
{
NAME
(
"munmap"
),
CTL
(
config_munmap
)},
{
NAME
(
"prof"
),
CTL
(
config_prof
)},
{
NAME
(
"prof_libgcc"
),
CTL
(
config_prof_libgcc
)},
{
NAME
(
"prof_libunwind"
),
CTL
(
config_prof_libunwind
)},
{
NAME
(
"stats"
),
CTL
(
config_stats
)},
{
NAME
(
"tcache"
),
CTL
(
config_tcache
)},
{
NAME
(
"tls"
),
CTL
(
config_tls
)},
{
NAME
(
"utrace"
),
CTL
(
config_utrace
)},
{
NAME
(
"valgrind"
),
CTL
(
config_valgrind
)},
{
NAME
(
"xmalloc"
),
CTL
(
config_xmalloc
)}
{
NAME
(
"
cache_oblivious"
),
CTL
(
config_cache_oblivious
)},
{
NAME
(
"d
ebug
"
),
CTL
(
config_d
ebug
)},
{
NAME
(
"fill"
),
CTL
(
config_fill
)},
{
NAME
(
"lazy_lock"
),
CTL
(
config_lazy_lock
)},
{
NAME
(
"m
alloc_conf
"
),
CTL
(
config_m
alloc_conf
)},
{
NAME
(
"munmap"
),
CTL
(
config_munmap
)},
{
NAME
(
"prof"
),
CTL
(
config_prof
)},
{
NAME
(
"prof_libgcc"
),
CTL
(
config_prof_libgcc
)},
{
NAME
(
"prof_libunwind"
),
CTL
(
config_prof_libunwind
)},
{
NAME
(
"stats"
),
CTL
(
config_stats
)},
{
NAME
(
"tcache"
),
CTL
(
config_tcache
)},
{
NAME
(
"tls"
),
CTL
(
config_tls
)},
{
NAME
(
"utrace"
),
CTL
(
config_utrace
)},
{
NAME
(
"valgrind"
),
CTL
(
config_valgrind
)},
{
NAME
(
"xmalloc"
),
CTL
(
config_xmalloc
)}
};
static
const
ctl_named_node_t
opt_node
[]
=
{
{
NAME
(
"abort"
),
CTL
(
opt_abort
)},
{
NAME
(
"dss"
),
CTL
(
opt_dss
)},
{
NAME
(
"lg_chunk"
),
CTL
(
opt_lg_chunk
)},
{
NAME
(
"narenas"
),
CTL
(
opt_narenas
)},
{
NAME
(
"lg_dirty_mult"
),
CTL
(
opt_lg_dirty_mult
)},
{
NAME
(
"stats_print"
),
CTL
(
opt_stats_print
)},
{
NAME
(
"junk"
),
CTL
(
opt_junk
)},
{
NAME
(
"zero"
),
CTL
(
opt_zero
)},
{
NAME
(
"quarantine"
),
CTL
(
opt_quarantine
)},
{
NAME
(
"redzone"
),
CTL
(
opt_redzone
)},
{
NAME
(
"utrace"
),
CTL
(
opt_utrace
)},
{
NAME
(
"valgrind"
),
CTL
(
opt_valgrind
)},
{
NAME
(
"xmalloc"
),
CTL
(
opt_xmalloc
)},
{
NAME
(
"tcache"
),
CTL
(
opt_tcache
)},
{
NAME
(
"lg_tcache_max"
),
CTL
(
opt_lg_tcache_max
)},
{
NAME
(
"prof"
),
CTL
(
opt_prof
)},
{
NAME
(
"prof_prefix"
),
CTL
(
opt_prof_prefix
)},
{
NAME
(
"prof_active"
),
CTL
(
opt_prof_active
)},
{
NAME
(
"lg_prof_sample"
),
CTL
(
opt_lg_prof_sample
)},
{
NAME
(
"lg_prof_interval"
),
CTL
(
opt_lg_prof_interval
)},
{
NAME
(
"prof_gdump"
),
CTL
(
opt_prof_gdump
)},
{
NAME
(
"prof_final"
),
CTL
(
opt_prof_final
)},
{
NAME
(
"prof_leak"
),
CTL
(
opt_prof_leak
)},
{
NAME
(
"prof_accum"
),
CTL
(
opt_prof_accum
)}
{
NAME
(
"abort"
),
CTL
(
opt_abort
)},
{
NAME
(
"dss"
),
CTL
(
opt_dss
)},
{
NAME
(
"lg_chunk"
),
CTL
(
opt_lg_chunk
)},
{
NAME
(
"narenas"
),
CTL
(
opt_narenas
)},
{
NAME
(
"purge"
),
CTL
(
opt_purge
)},
{
NAME
(
"lg_dirty_mult"
),
CTL
(
opt_lg_dirty_mult
)},
{
NAME
(
"decay_time"
),
CTL
(
opt_decay_time
)},
{
NAME
(
"stats_print"
),
CTL
(
opt_stats_print
)},
{
NAME
(
"junk"
),
CTL
(
opt_junk
)},
{
NAME
(
"zero"
),
CTL
(
opt_zero
)},
{
NAME
(
"quarantine"
),
CTL
(
opt_quarantine
)},
{
NAME
(
"redzone"
),
CTL
(
opt_redzone
)},
{
NAME
(
"utrace"
),
CTL
(
opt_utrace
)},
{
NAME
(
"xmalloc"
),
CTL
(
opt_xmalloc
)},
{
NAME
(
"tcache"
),
CTL
(
opt_tcache
)},
{
NAME
(
"lg_tcache_max"
),
CTL
(
opt_lg_tcache_max
)},
{
NAME
(
"prof"
),
CTL
(
opt_prof
)},
{
NAME
(
"prof_prefix"
),
CTL
(
opt_prof_prefix
)},
{
NAME
(
"prof_active"
),
CTL
(
opt_prof_active
)},
{
NAME
(
"prof_thread_active_init"
),
CTL
(
opt_prof_thread_active_init
)},
{
NAME
(
"lg_prof_sample"
),
CTL
(
opt_lg_prof_sample
)},
{
NAME
(
"lg_prof_interval"
),
CTL
(
opt_lg_prof_interval
)},
{
NAME
(
"prof_gdump"
),
CTL
(
opt_prof_gdump
)},
{
NAME
(
"prof_final"
),
CTL
(
opt_prof_final
)},
{
NAME
(
"prof_leak"
),
CTL
(
opt_prof_leak
)},
{
NAME
(
"prof_accum"
),
CTL
(
opt_prof_accum
)}
};
static
const
ctl_named_node_t
tcache_node
[]
=
{
{
NAME
(
"create"
),
CTL
(
tcache_create
)},
{
NAME
(
"flush"
),
CTL
(
tcache_flush
)},
{
NAME
(
"destroy"
),
CTL
(
tcache_destroy
)}
};
static
const
ctl_named_node_t
arena_i_node
[]
=
{
{
NAME
(
"purge"
),
CTL
(
arena_i_purge
)},
{
NAME
(
"dss"
),
CTL
(
arena_i_dss
)}
{
NAME
(
"purge"
),
CTL
(
arena_i_purge
)},
{
NAME
(
"decay"
),
CTL
(
arena_i_decay
)},
{
NAME
(
"reset"
),
CTL
(
arena_i_reset
)},
{
NAME
(
"dss"
),
CTL
(
arena_i_dss
)},
{
NAME
(
"lg_dirty_mult"
),
CTL
(
arena_i_lg_dirty_mult
)},
{
NAME
(
"decay_time"
),
CTL
(
arena_i_decay_time
)},
{
NAME
(
"chunk_hooks"
),
CTL
(
arena_i_chunk_hooks
)}
};
static
const
ctl_named_node_t
super_arena_i_node
[]
=
{
{
NAME
(
""
),
CHILD
(
named
,
arena_i
)}
{
NAME
(
""
),
CHILD
(
named
,
arena_i
)}
};
static
const
ctl_indexed_node_t
arena_node
[]
=
{
...
...
@@ -269,12 +317,12 @@ static const ctl_indexed_node_t arena_node[] = {
};
static
const
ctl_named_node_t
arenas_bin_i_node
[]
=
{
{
NAME
(
"size"
),
CTL
(
arenas_bin_i_size
)},
{
NAME
(
"nregs"
),
CTL
(
arenas_bin_i_nregs
)},
{
NAME
(
"run_size"
),
CTL
(
arenas_bin_i_run_size
)}
{
NAME
(
"size"
),
CTL
(
arenas_bin_i_size
)},
{
NAME
(
"nregs"
),
CTL
(
arenas_bin_i_nregs
)},
{
NAME
(
"run_size"
),
CTL
(
arenas_bin_i_run_size
)}
};
static
const
ctl_named_node_t
super_arenas_bin_i_node
[]
=
{
{
NAME
(
""
),
CHILD
(
named
,
arenas_bin_i
)}
{
NAME
(
""
),
CHILD
(
named
,
arenas_bin_i
)}
};
static
const
ctl_indexed_node_t
arenas_bin_node
[]
=
{
...
...
@@ -282,76 +330,94 @@ static const ctl_indexed_node_t arenas_bin_node[] = {
};
static
const
ctl_named_node_t
arenas_lrun_i_node
[]
=
{
{
NAME
(
"size"
),
CTL
(
arenas_lrun_i_size
)}
{
NAME
(
"size"
),
CTL
(
arenas_lrun_i_size
)}
};
static
const
ctl_named_node_t
super_arenas_lrun_i_node
[]
=
{
{
NAME
(
""
),
CHILD
(
named
,
arenas_lrun_i
)}
{
NAME
(
""
),
CHILD
(
named
,
arenas_lrun_i
)}
};
static
const
ctl_indexed_node_t
arenas_lrun_node
[]
=
{
{
INDEX
(
arenas_lrun_i
)}
};
static
const
ctl_named_node_t
arenas_hchunk_i_node
[]
=
{
{
NAME
(
"size"
),
CTL
(
arenas_hchunk_i_size
)}
};
static
const
ctl_named_node_t
super_arenas_hchunk_i_node
[]
=
{
{
NAME
(
""
),
CHILD
(
named
,
arenas_hchunk_i
)}
};
static
const
ctl_indexed_node_t
arenas_hchunk_node
[]
=
{
{
INDEX
(
arenas_hchunk_i
)}
};
static
const
ctl_named_node_t
arenas_node
[]
=
{
{
NAME
(
"narenas"
),
CTL
(
arenas_narenas
)},
{
NAME
(
"initialized"
),
CTL
(
arenas_initialized
)},
{
NAME
(
"quantum"
),
CTL
(
arenas_quantum
)},
{
NAME
(
"page"
),
CTL
(
arenas_page
)},
{
NAME
(
"tcache_max"
),
CTL
(
arenas_tcache_max
)},
{
NAME
(
"nbins"
),
CTL
(
arenas_nbins
)},
{
NAME
(
"nhbins"
),
CTL
(
arenas_nhbins
)},
{
NAME
(
"bin"
),
CHILD
(
indexed
,
arenas_bin
)},
{
NAME
(
"nlruns"
),
CTL
(
arenas_nlruns
)},
{
NAME
(
"lrun"
),
CHILD
(
indexed
,
arenas_lrun
)},
{
NAME
(
"purge"
),
CTL
(
arenas_purge
)},
{
NAME
(
"extend"
),
CTL
(
arenas_extend
)}
{
NAME
(
"narenas"
),
CTL
(
arenas_narenas
)},
{
NAME
(
"initialized"
),
CTL
(
arenas_initialized
)},
{
NAME
(
"lg_dirty_mult"
),
CTL
(
arenas_lg_dirty_mult
)},
{
NAME
(
"decay_time"
),
CTL
(
arenas_decay_time
)},
{
NAME
(
"quantum"
),
CTL
(
arenas_quantum
)},
{
NAME
(
"page"
),
CTL
(
arenas_page
)},
{
NAME
(
"tcache_max"
),
CTL
(
arenas_tcache_max
)},
{
NAME
(
"nbins"
),
CTL
(
arenas_nbins
)},
{
NAME
(
"nhbins"
),
CTL
(
arenas_nhbins
)},
{
NAME
(
"bin"
),
CHILD
(
indexed
,
arenas_bin
)},
{
NAME
(
"nlruns"
),
CTL
(
arenas_nlruns
)},
{
NAME
(
"lrun"
),
CHILD
(
indexed
,
arenas_lrun
)},
{
NAME
(
"nhchunks"
),
CTL
(
arenas_nhchunks
)},
{
NAME
(
"hchunk"
),
CHILD
(
indexed
,
arenas_hchunk
)},
{
NAME
(
"extend"
),
CTL
(
arenas_extend
)}
};
static
const
ctl_named_node_t
prof_node
[]
=
{
{
NAME
(
"thread_active_init"
),
CTL
(
prof_thread_active_init
)},
{
NAME
(
"active"
),
CTL
(
prof_active
)},
{
NAME
(
"dump"
),
CTL
(
prof_dump
)},
{
NAME
(
"interval"
),
CTL
(
prof_interval
)}
};
static
const
ctl_named_node_t
stats_chunks_node
[]
=
{
{
NAME
(
"current"
),
CTL
(
stats_chunks_current
)},
{
NAME
(
"total"
),
CTL
(
stats_chunks_total
)},
{
NAME
(
"high"
),
CTL
(
stats_chunks_high
)}
{
NAME
(
"gdump"
),
CTL
(
prof_gdump
)},
{
NAME
(
"reset"
),
CTL
(
prof_reset
)},
{
NAME
(
"interval"
),
CTL
(
prof_interval
)},
{
NAME
(
"lg_sample"
),
CTL
(
lg_prof_sample
)}
};
static
const
ctl_named_node_t
stats_huge_node
[]
=
{
{
NAME
(
"allocated"
),
CTL
(
stats_huge_allocated
)},
{
NAME
(
"nmalloc"
),
CTL
(
stats_huge_nmalloc
)},
{
NAME
(
"ndalloc"
),
CTL
(
stats_huge_ndalloc
)}
static
const
ctl_named_node_t
stats_arenas_i_metadata_node
[]
=
{
{
NAME
(
"mapped"
),
CTL
(
stats_arenas_i_metadata_mapped
)},
{
NAME
(
"allocated"
),
CTL
(
stats_arenas_i_metadata_allocated
)}
};
static
const
ctl_named_node_t
stats_arenas_i_small_node
[]
=
{
{
NAME
(
"allocated"
),
CTL
(
stats_arenas_i_small_allocated
)},
{
NAME
(
"nmalloc"
),
CTL
(
stats_arenas_i_small_nmalloc
)},
{
NAME
(
"ndalloc"
),
CTL
(
stats_arenas_i_small_ndalloc
)},
{
NAME
(
"nrequests"
),
CTL
(
stats_arenas_i_small_nrequests
)}
{
NAME
(
"allocated"
),
CTL
(
stats_arenas_i_small_allocated
)},
{
NAME
(
"nmalloc"
),
CTL
(
stats_arenas_i_small_nmalloc
)},
{
NAME
(
"ndalloc"
),
CTL
(
stats_arenas_i_small_ndalloc
)},
{
NAME
(
"nrequests"
),
CTL
(
stats_arenas_i_small_nrequests
)}
};
static
const
ctl_named_node_t
stats_arenas_i_large_node
[]
=
{
{
NAME
(
"allocated"
),
CTL
(
stats_arenas_i_large_allocated
)},
{
NAME
(
"nmalloc"
),
CTL
(
stats_arenas_i_large_nmalloc
)},
{
NAME
(
"ndalloc"
),
CTL
(
stats_arenas_i_large_ndalloc
)},
{
NAME
(
"nrequests"
),
CTL
(
stats_arenas_i_large_nrequests
)}
{
NAME
(
"allocated"
),
CTL
(
stats_arenas_i_large_allocated
)},
{
NAME
(
"nmalloc"
),
CTL
(
stats_arenas_i_large_nmalloc
)},
{
NAME
(
"ndalloc"
),
CTL
(
stats_arenas_i_large_ndalloc
)},
{
NAME
(
"nrequests"
),
CTL
(
stats_arenas_i_large_nrequests
)}
};
static
const
ctl_named_node_t
stats_arenas_i_huge_node
[]
=
{
{
NAME
(
"allocated"
),
CTL
(
stats_arenas_i_huge_allocated
)},
{
NAME
(
"nmalloc"
),
CTL
(
stats_arenas_i_huge_nmalloc
)},
{
NAME
(
"ndalloc"
),
CTL
(
stats_arenas_i_huge_ndalloc
)},
{
NAME
(
"nrequests"
),
CTL
(
stats_arenas_i_huge_nrequests
)}
};
static
const
ctl_named_node_t
stats_arenas_i_bins_j_node
[]
=
{
{
NAME
(
"alloc
ated
"
),
CTL
(
stats_arenas_i_bins_j_alloc
ated
)},
{
NAME
(
"n
m
alloc"
),
CTL
(
stats_arenas_i_bins_j_n
m
alloc
)},
{
NAME
(
"n
dalloc
"
),
CTL
(
stats_arenas_i_bins_j_n
dalloc
)},
{
NAME
(
"
nrequest
s"
),
CTL
(
stats_arenas_i_bins_j_
nrequest
s
)},
{
NAME
(
"nfills"
),
CTL
(
stats_arenas_i_bins_j_nfills
)},
{
NAME
(
"nflushes"
),
CTL
(
stats_arenas_i_bins_j_nflushes
)},
{
NAME
(
"nruns"
),
CTL
(
stats_arenas_i_bins_j_nruns
)},
{
NAME
(
"nreruns"
),
CTL
(
stats_arenas_i_bins_j_nreruns
)},
{
NAME
(
"curruns"
),
CTL
(
stats_arenas_i_bins_j_curruns
)}
{
NAME
(
"
nm
alloc"
),
CTL
(
stats_arenas_i_bins_j_
nm
alloc
)},
{
NAME
(
"n
d
alloc"
),
CTL
(
stats_arenas_i_bins_j_n
d
alloc
)},
{
NAME
(
"n
requests
"
),
CTL
(
stats_arenas_i_bins_j_n
requests
)},
{
NAME
(
"
curreg
s"
),
CTL
(
stats_arenas_i_bins_j_
curreg
s
)},
{
NAME
(
"nfills"
),
CTL
(
stats_arenas_i_bins_j_nfills
)},
{
NAME
(
"nflushes"
),
CTL
(
stats_arenas_i_bins_j_nflushes
)},
{
NAME
(
"nruns"
),
CTL
(
stats_arenas_i_bins_j_nruns
)},
{
NAME
(
"nreruns"
),
CTL
(
stats_arenas_i_bins_j_nreruns
)},
{
NAME
(
"curruns"
),
CTL
(
stats_arenas_i_bins_j_curruns
)}
};
static
const
ctl_named_node_t
super_stats_arenas_i_bins_j_node
[]
=
{
{
NAME
(
""
),
CHILD
(
named
,
stats_arenas_i_bins_j
)}
{
NAME
(
""
),
CHILD
(
named
,
stats_arenas_i_bins_j
)}
};
static
const
ctl_indexed_node_t
stats_arenas_i_bins_node
[]
=
{
...
...
@@ -359,35 +425,55 @@ static const ctl_indexed_node_t stats_arenas_i_bins_node[] = {
};
static
const
ctl_named_node_t
stats_arenas_i_lruns_j_node
[]
=
{
{
NAME
(
"nmalloc"
),
CTL
(
stats_arenas_i_lruns_j_nmalloc
)},
{
NAME
(
"ndalloc"
),
CTL
(
stats_arenas_i_lruns_j_ndalloc
)},
{
NAME
(
"nrequests"
),
CTL
(
stats_arenas_i_lruns_j_nrequests
)},
{
NAME
(
"curruns"
),
CTL
(
stats_arenas_i_lruns_j_curruns
)}
{
NAME
(
"nmalloc"
),
CTL
(
stats_arenas_i_lruns_j_nmalloc
)},
{
NAME
(
"ndalloc"
),
CTL
(
stats_arenas_i_lruns_j_ndalloc
)},
{
NAME
(
"nrequests"
),
CTL
(
stats_arenas_i_lruns_j_nrequests
)},
{
NAME
(
"curruns"
),
CTL
(
stats_arenas_i_lruns_j_curruns
)}
};
static
const
ctl_named_node_t
super_stats_arenas_i_lruns_j_node
[]
=
{
{
NAME
(
""
),
CHILD
(
named
,
stats_arenas_i_lruns_j
)}
{
NAME
(
""
),
CHILD
(
named
,
stats_arenas_i_lruns_j
)}
};
static
const
ctl_indexed_node_t
stats_arenas_i_lruns_node
[]
=
{
{
INDEX
(
stats_arenas_i_lruns_j
)}
};
static
const
ctl_named_node_t
stats_arenas_i_hchunks_j_node
[]
=
{
{
NAME
(
"nmalloc"
),
CTL
(
stats_arenas_i_hchunks_j_nmalloc
)},
{
NAME
(
"ndalloc"
),
CTL
(
stats_arenas_i_hchunks_j_ndalloc
)},
{
NAME
(
"nrequests"
),
CTL
(
stats_arenas_i_hchunks_j_nrequests
)},
{
NAME
(
"curhchunks"
),
CTL
(
stats_arenas_i_hchunks_j_curhchunks
)}
};
static
const
ctl_named_node_t
super_stats_arenas_i_hchunks_j_node
[]
=
{
{
NAME
(
""
),
CHILD
(
named
,
stats_arenas_i_hchunks_j
)}
};
static
const
ctl_indexed_node_t
stats_arenas_i_hchunks_node
[]
=
{
{
INDEX
(
stats_arenas_i_hchunks_j
)}
};
static
const
ctl_named_node_t
stats_arenas_i_node
[]
=
{
{
NAME
(
"nthreads"
),
CTL
(
stats_arenas_i_nthreads
)},
{
NAME
(
"dss"
),
CTL
(
stats_arenas_i_dss
)},
{
NAME
(
"pactive"
),
CTL
(
stats_arenas_i_pactive
)},
{
NAME
(
"pdirty"
),
CTL
(
stats_arenas_i_pdirty
)},
{
NAME
(
"mapped"
),
CTL
(
stats_arenas_i_mapped
)},
{
NAME
(
"npurge"
),
CTL
(
stats_arenas_i_npurge
)},
{
NAME
(
"nmadvise"
),
CTL
(
stats_arenas_i_nmadvise
)},
{
NAME
(
"purged"
),
CTL
(
stats_arenas_i_purged
)},
{
NAME
(
"small"
),
CHILD
(
named
,
stats_arenas_i_small
)},
{
NAME
(
"large"
),
CHILD
(
named
,
stats_arenas_i_large
)},
{
NAME
(
"bins"
),
CHILD
(
indexed
,
stats_arenas_i_bins
)},
{
NAME
(
"lruns"
),
CHILD
(
indexed
,
stats_arenas_i_lruns
)}
{
NAME
(
"nthreads"
),
CTL
(
stats_arenas_i_nthreads
)},
{
NAME
(
"dss"
),
CTL
(
stats_arenas_i_dss
)},
{
NAME
(
"lg_dirty_mult"
),
CTL
(
stats_arenas_i_lg_dirty_mult
)},
{
NAME
(
"decay_time"
),
CTL
(
stats_arenas_i_decay_time
)},
{
NAME
(
"pactive"
),
CTL
(
stats_arenas_i_pactive
)},
{
NAME
(
"pdirty"
),
CTL
(
stats_arenas_i_pdirty
)},
{
NAME
(
"mapped"
),
CTL
(
stats_arenas_i_mapped
)},
{
NAME
(
"retained"
),
CTL
(
stats_arenas_i_retained
)},
{
NAME
(
"npurge"
),
CTL
(
stats_arenas_i_npurge
)},
{
NAME
(
"nmadvise"
),
CTL
(
stats_arenas_i_nmadvise
)},
{
NAME
(
"purged"
),
CTL
(
stats_arenas_i_purged
)},
{
NAME
(
"metadata"
),
CHILD
(
named
,
stats_arenas_i_metadata
)},
{
NAME
(
"small"
),
CHILD
(
named
,
stats_arenas_i_small
)},
{
NAME
(
"large"
),
CHILD
(
named
,
stats_arenas_i_large
)},
{
NAME
(
"huge"
),
CHILD
(
named
,
stats_arenas_i_huge
)},
{
NAME
(
"bins"
),
CHILD
(
indexed
,
stats_arenas_i_bins
)},
{
NAME
(
"lruns"
),
CHILD
(
indexed
,
stats_arenas_i_lruns
)},
{
NAME
(
"hchunks"
),
CHILD
(
indexed
,
stats_arenas_i_hchunks
)}
};
static
const
ctl_named_node_t
super_stats_arenas_i_node
[]
=
{
{
NAME
(
""
),
CHILD
(
named
,
stats_arenas_i
)}
{
NAME
(
""
),
CHILD
(
named
,
stats_arenas_i
)}
};
static
const
ctl_indexed_node_t
stats_arenas_node
[]
=
{
...
...
@@ -395,13 +481,14 @@ static const ctl_indexed_node_t stats_arenas_node[] = {
};
static
const
ctl_named_node_t
stats_node
[]
=
{
{
NAME
(
"cactive"
),
CTL
(
stats_cactive
)},
{
NAME
(
"allocated"
),
CTL
(
stats_allocated
)},
{
NAME
(
"active"
),
CTL
(
stats_active
)},
{
NAME
(
"mapped"
),
CTL
(
stats_mapped
)},
{
NAME
(
"chunks"
),
CHILD
(
named
,
stats_chunks
)},
{
NAME
(
"huge"
),
CHILD
(
named
,
stats_huge
)},
{
NAME
(
"arenas"
),
CHILD
(
indexed
,
stats_arenas
)}
{
NAME
(
"cactive"
),
CTL
(
stats_cactive
)},
{
NAME
(
"allocated"
),
CTL
(
stats_allocated
)},
{
NAME
(
"active"
),
CTL
(
stats_active
)},
{
NAME
(
"metadata"
),
CTL
(
stats_metadata
)},
{
NAME
(
"resident"
),
CTL
(
stats_resident
)},
{
NAME
(
"mapped"
),
CTL
(
stats_mapped
)},
{
NAME
(
"retained"
),
CTL
(
stats_retained
)},
{
NAME
(
"arenas"
),
CHILD
(
indexed
,
stats_arenas
)}
};
static
const
ctl_named_node_t
root_node
[]
=
{
...
...
@@ -410,6 +497,7 @@ static const ctl_named_node_t root_node[] = {
{
NAME
(
"thread"
),
CHILD
(
named
,
thread
)},
{
NAME
(
"config"
),
CHILD
(
named
,
config
)},
{
NAME
(
"opt"
),
CHILD
(
named
,
opt
)},
{
NAME
(
"tcache"
),
CHILD
(
named
,
tcache
)},
{
NAME
(
"arena"
),
CHILD
(
indexed
,
arena
)},
{
NAME
(
"arenas"
),
CHILD
(
named
,
arenas
)},
{
NAME
(
"prof"
),
CHILD
(
named
,
prof
)},
...
...
@@ -431,12 +519,19 @@ ctl_arena_init(ctl_arena_stats_t *astats)
{
if
(
astats
->
lstats
==
NULL
)
{
astats
->
lstats
=
(
malloc_large_stats_t
*
)
base_
alloc
(
nlclasses
*
astats
->
lstats
=
(
malloc_large_stats_t
*
)
a0m
alloc
(
nlclasses
*
sizeof
(
malloc_large_stats_t
));
if
(
astats
->
lstats
==
NULL
)
return
(
true
);
}
if
(
astats
->
hstats
==
NULL
)
{
astats
->
hstats
=
(
malloc_huge_stats_t
*
)
a0malloc
(
nhclasses
*
sizeof
(
malloc_huge_stats_t
));
if
(
astats
->
hstats
==
NULL
)
return
(
true
);
}
return
(
false
);
}
...
...
@@ -444,7 +539,10 @@ static void
ctl_arena_clear
(
ctl_arena_stats_t
*
astats
)
{
astats
->
nthreads
=
0
;
astats
->
dss
=
dss_prec_names
[
dss_prec_limit
];
astats
->
lg_dirty_mult
=
-
1
;
astats
->
decay_time
=
-
1
;
astats
->
pactive
=
0
;
astats
->
pdirty
=
0
;
if
(
config_stats
)
{
...
...
@@ -456,22 +554,33 @@ ctl_arena_clear(ctl_arena_stats_t *astats)
memset
(
astats
->
bstats
,
0
,
NBINS
*
sizeof
(
malloc_bin_stats_t
));
memset
(
astats
->
lstats
,
0
,
nlclasses
*
sizeof
(
malloc_large_stats_t
));
memset
(
astats
->
hstats
,
0
,
nhclasses
*
sizeof
(
malloc_huge_stats_t
));
}
}
static
void
ctl_arena_stats_amerge
(
ctl_arena_stats_t
*
cstats
,
arena_t
*
arena
)
ctl_arena_stats_amerge
(
tsdn_t
*
tsdn
,
ctl_arena_stats_t
*
cstats
,
arena_t
*
arena
)
{
unsigned
i
;
arena_stats_merge
(
arena
,
&
cstats
->
dss
,
&
cstats
->
pactive
,
&
cstats
->
pdirty
,
&
cstats
->
astats
,
cstats
->
bstats
,
cstats
->
lstats
);
for
(
i
=
0
;
i
<
NBINS
;
i
++
)
{
cstats
->
allocated_small
+=
cstats
->
bstats
[
i
].
allocated
;
cstats
->
nmalloc_small
+=
cstats
->
bstats
[
i
].
nmalloc
;
cstats
->
ndalloc_small
+=
cstats
->
bstats
[
i
].
ndalloc
;
cstats
->
nrequests_small
+=
cstats
->
bstats
[
i
].
nrequests
;
if
(
config_stats
)
{
arena_stats_merge
(
tsdn
,
arena
,
&
cstats
->
nthreads
,
&
cstats
->
dss
,
&
cstats
->
lg_dirty_mult
,
&
cstats
->
decay_time
,
&
cstats
->
pactive
,
&
cstats
->
pdirty
,
&
cstats
->
astats
,
cstats
->
bstats
,
cstats
->
lstats
,
cstats
->
hstats
);
for
(
i
=
0
;
i
<
NBINS
;
i
++
)
{
cstats
->
allocated_small
+=
cstats
->
bstats
[
i
].
curregs
*
index2size
(
i
);
cstats
->
nmalloc_small
+=
cstats
->
bstats
[
i
].
nmalloc
;
cstats
->
ndalloc_small
+=
cstats
->
bstats
[
i
].
ndalloc
;
cstats
->
nrequests_small
+=
cstats
->
bstats
[
i
].
nrequests
;
}
}
else
{
arena_basic_stats_merge
(
tsdn
,
arena
,
&
cstats
->
nthreads
,
&
cstats
->
dss
,
&
cstats
->
lg_dirty_mult
,
&
cstats
->
decay_time
,
&
cstats
->
pactive
,
&
cstats
->
pdirty
);
}
}
...
...
@@ -480,109 +589,107 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
{
unsigned
i
;
sstats
->
nthreads
+=
astats
->
nthreads
;
sstats
->
pactive
+=
astats
->
pactive
;
sstats
->
pdirty
+=
astats
->
pdirty
;
sstats
->
astats
.
mapped
+=
astats
->
astats
.
mapped
;
sstats
->
astats
.
npurge
+=
astats
->
astats
.
npurge
;
sstats
->
astats
.
nmadvise
+=
astats
->
astats
.
nmadvise
;
sstats
->
astats
.
purged
+=
astats
->
astats
.
purged
;
sstats
->
allocated_small
+=
astats
->
allocated_small
;
sstats
->
nmalloc_small
+=
astats
->
nmalloc_small
;
sstats
->
ndalloc_small
+=
astats
->
ndalloc_small
;
sstats
->
nrequests_small
+=
astats
->
nrequests_small
;
sstats
->
astats
.
allocated_large
+=
astats
->
astats
.
allocated_large
;
sstats
->
astats
.
nmalloc_large
+=
astats
->
astats
.
nmalloc_large
;
sstats
->
astats
.
ndalloc_large
+=
astats
->
astats
.
ndalloc_large
;
sstats
->
astats
.
nrequests_large
+=
astats
->
astats
.
nrequests_large
;
for
(
i
=
0
;
i
<
nlclasses
;
i
++
)
{
sstats
->
lstats
[
i
].
nmalloc
+=
astats
->
lstats
[
i
].
nmalloc
;
sstats
->
lstats
[
i
].
ndalloc
+=
astats
->
lstats
[
i
].
ndalloc
;
sstats
->
lstats
[
i
].
nrequests
+=
astats
->
lstats
[
i
].
nrequests
;
sstats
->
lstats
[
i
].
curruns
+=
astats
->
lstats
[
i
].
curruns
;
}
if
(
config_stats
)
{
sstats
->
astats
.
mapped
+=
astats
->
astats
.
mapped
;
sstats
->
astats
.
retained
+=
astats
->
astats
.
retained
;
sstats
->
astats
.
npurge
+=
astats
->
astats
.
npurge
;
sstats
->
astats
.
nmadvise
+=
astats
->
astats
.
nmadvise
;
sstats
->
astats
.
purged
+=
astats
->
astats
.
purged
;
sstats
->
astats
.
metadata_mapped
+=
astats
->
astats
.
metadata_mapped
;
sstats
->
astats
.
metadata_allocated
+=
astats
->
astats
.
metadata_allocated
;
sstats
->
allocated_small
+=
astats
->
allocated_small
;
sstats
->
nmalloc_small
+=
astats
->
nmalloc_small
;
sstats
->
ndalloc_small
+=
astats
->
ndalloc_small
;
sstats
->
nrequests_small
+=
astats
->
nrequests_small
;
sstats
->
astats
.
allocated_large
+=
astats
->
astats
.
allocated_large
;
sstats
->
astats
.
nmalloc_large
+=
astats
->
astats
.
nmalloc_large
;
sstats
->
astats
.
ndalloc_large
+=
astats
->
astats
.
ndalloc_large
;
sstats
->
astats
.
nrequests_large
+=
astats
->
astats
.
nrequests_large
;
sstats
->
astats
.
allocated_huge
+=
astats
->
astats
.
allocated_huge
;
sstats
->
astats
.
nmalloc_huge
+=
astats
->
astats
.
nmalloc_huge
;
sstats
->
astats
.
ndalloc_huge
+=
astats
->
astats
.
ndalloc_huge
;
for
(
i
=
0
;
i
<
NBINS
;
i
++
)
{
sstats
->
bstats
[
i
].
nmalloc
+=
astats
->
bstats
[
i
].
nmalloc
;
sstats
->
bstats
[
i
].
ndalloc
+=
astats
->
bstats
[
i
].
ndalloc
;
sstats
->
bstats
[
i
].
nrequests
+=
astats
->
bstats
[
i
].
nrequests
;
sstats
->
bstats
[
i
].
curregs
+=
astats
->
bstats
[
i
].
curregs
;
if
(
config_tcache
)
{
sstats
->
bstats
[
i
].
nfills
+=
astats
->
bstats
[
i
].
nfills
;
sstats
->
bstats
[
i
].
nflushes
+=
astats
->
bstats
[
i
].
nflushes
;
}
sstats
->
bstats
[
i
].
nruns
+=
astats
->
bstats
[
i
].
nruns
;
sstats
->
bstats
[
i
].
reruns
+=
astats
->
bstats
[
i
].
reruns
;
sstats
->
bstats
[
i
].
curruns
+=
astats
->
bstats
[
i
].
curruns
;
}
for
(
i
=
0
;
i
<
NBINS
;
i
++
)
{
sstats
->
bstats
[
i
].
allocated
+=
astats
->
bstats
[
i
].
allocated
;
sstats
->
bstats
[
i
].
nmalloc
+=
astats
->
bstats
[
i
].
nmalloc
;
sstats
->
bstats
[
i
].
ndalloc
+=
astats
->
bstats
[
i
].
ndalloc
;
sstats
->
bstats
[
i
].
nrequests
+=
astats
->
bstats
[
i
].
nrequests
;
if
(
config_tcache
)
{
sstats
->
bstats
[
i
].
nfills
+=
astats
->
bstats
[
i
].
nfills
;
sstats
->
bstats
[
i
].
nflushes
+=
astats
->
bstats
[
i
].
nflushes
;
for
(
i
=
0
;
i
<
nlclasses
;
i
++
)
{
sstats
->
lstats
[
i
].
nmalloc
+=
astats
->
lstats
[
i
].
nmalloc
;
sstats
->
lstats
[
i
].
ndalloc
+=
astats
->
lstats
[
i
].
ndalloc
;
sstats
->
lstats
[
i
].
nrequests
+=
astats
->
lstats
[
i
].
nrequests
;
sstats
->
lstats
[
i
].
curruns
+=
astats
->
lstats
[
i
].
curruns
;
}
for
(
i
=
0
;
i
<
nhclasses
;
i
++
)
{
sstats
->
hstats
[
i
].
nmalloc
+=
astats
->
hstats
[
i
].
nmalloc
;
sstats
->
hstats
[
i
].
ndalloc
+=
astats
->
hstats
[
i
].
ndalloc
;
sstats
->
hstats
[
i
].
curhchunks
+=
astats
->
hstats
[
i
].
curhchunks
;
}
sstats
->
bstats
[
i
].
nruns
+=
astats
->
bstats
[
i
].
nruns
;
sstats
->
bstats
[
i
].
reruns
+=
astats
->
bstats
[
i
].
reruns
;
sstats
->
bstats
[
i
].
curruns
+=
astats
->
bstats
[
i
].
curruns
;
}
}
static
void
ctl_arena_refresh
(
arena_t
*
arena
,
unsigned
i
)
ctl_arena_refresh
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
unsigned
i
)
{
ctl_arena_stats_t
*
astats
=
&
ctl_stats
.
arenas
[
i
];
ctl_arena_stats_t
*
sstats
=
&
ctl_stats
.
arenas
[
ctl_stats
.
narenas
];
ctl_arena_clear
(
astats
);
sstats
->
nthreads
+=
astats
->
nthreads
;
if
(
config_stats
)
{
ctl_arena_stats_amerge
(
astats
,
arena
);
/* Merge into sum stats as well. */
ctl_arena_stats_smerge
(
sstats
,
astats
);
}
else
{
astats
->
pactive
+=
arena
->
nactive
;
astats
->
pdirty
+=
arena
->
ndirty
;
/* Merge into sum stats as well. */
sstats
->
pactive
+=
arena
->
nactive
;
sstats
->
pdirty
+=
arena
->
ndirty
;
}
ctl_arena_stats_amerge
(
tsdn
,
astats
,
arena
);
/* Merge into sum stats as well. */
ctl_arena_stats_smerge
(
sstats
,
astats
);
}
static
bool
ctl_grow
(
void
)
ctl_grow
(
tsdn_t
*
tsdn
)
{
size_t
astats_size
;
ctl_arena_stats_t
*
astats
;
arena_t
**
tarenas
;
/* Extend arena stats and arenas arrays. */
astats_size
=
(
ctl_stats
.
narenas
+
2
)
*
sizeof
(
ctl_arena_stats_t
);
if
(
ctl_stats
.
narenas
==
narenas_auto
)
{
/* ctl_stats.arenas and arenas came from base_alloc(). */
astats
=
(
ctl_arena_stats_t
*
)
imalloc
(
astats_size
);
if
(
astats
==
NULL
)
return
(
true
);
memcpy
(
astats
,
ctl_stats
.
arenas
,
(
ctl_stats
.
narenas
+
1
)
*
sizeof
(
ctl_arena_stats_t
));
tarenas
=
(
arena_t
**
)
imalloc
((
ctl_stats
.
narenas
+
1
)
*
sizeof
(
arena_t
*
));
if
(
tarenas
==
NULL
)
{
idalloc
(
astats
);
return
(
true
);
}
memcpy
(
tarenas
,
arenas
,
ctl_stats
.
narenas
*
sizeof
(
arena_t
*
));
}
else
{
astats
=
(
ctl_arena_stats_t
*
)
iralloc
(
ctl_stats
.
arenas
,
astats_size
,
0
,
0
,
false
,
false
);
if
(
astats
==
NULL
)
return
(
true
);
/* Initialize new arena. */
if
(
arena_init
(
tsdn
,
ctl_stats
.
narenas
)
==
NULL
)
return
(
true
);
tarenas
=
(
arena_t
**
)
iralloc
(
arenas
,
(
ctl_stats
.
narenas
+
1
)
*
sizeof
(
arena_t
*
),
0
,
0
,
false
,
false
);
if
(
tarenas
==
NULL
)
return
(
true
);
}
/* Initialize the new astats and arenas elements. */
/* Allocate extended arena stats. */
astats
=
(
ctl_arena_stats_t
*
)
a0malloc
((
ctl_stats
.
narenas
+
2
)
*
sizeof
(
ctl_arena_stats_t
));
if
(
astats
==
NULL
)
return
(
true
);
/* Initialize the new astats element. */
memcpy
(
astats
,
ctl_stats
.
arenas
,
(
ctl_stats
.
narenas
+
1
)
*
sizeof
(
ctl_arena_stats_t
));
memset
(
&
astats
[
ctl_stats
.
narenas
+
1
],
0
,
sizeof
(
ctl_arena_stats_t
));
if
(
ctl_arena_init
(
&
astats
[
ctl_stats
.
narenas
+
1
]))
if
(
ctl_arena_init
(
&
astats
[
ctl_stats
.
narenas
+
1
]))
{
a0dalloc
(
astats
);
return
(
true
);
tarenas
[
ctl_stats
.
narenas
]
=
NULL
;
}
/* Swap merged stats to their new location. */
{
ctl_arena_stats_t
tstats
;
...
...
@@ -593,89 +700,76 @@ ctl_grow(void)
memcpy
(
&
astats
[
ctl_stats
.
narenas
+
1
],
&
tstats
,
sizeof
(
ctl_arena_stats_t
));
}
a0dalloc
(
ctl_stats
.
arenas
);
ctl_stats
.
arenas
=
astats
;
ctl_stats
.
narenas
++
;
malloc_mutex_lock
(
&
arenas_lock
);
arenas
=
tarenas
;
narenas_total
++
;
arenas_extend
(
narenas_total
-
1
);
malloc_mutex_unlock
(
&
arenas_lock
);
return
(
false
);
}
static
void
ctl_refresh
(
void
)
ctl_refresh
(
tsdn_t
*
tsdn
)
{
unsigned
i
;
VARIABLE_ARRAY
(
arena_t
*
,
tarenas
,
ctl_stats
.
narenas
);
if
(
config_stats
)
{
malloc_mutex_lock
(
&
chunks_mtx
);
ctl_stats
.
chunks
.
current
=
stats_chunks
.
curchunks
;
ctl_stats
.
chunks
.
total
=
stats_chunks
.
nchunks
;
ctl_stats
.
chunks
.
high
=
stats_chunks
.
highchunks
;
malloc_mutex_unlock
(
&
chunks_mtx
);
malloc_mutex_lock
(
&
huge_mtx
);
ctl_stats
.
huge
.
allocated
=
huge_allocated
;
ctl_stats
.
huge
.
nmalloc
=
huge_nmalloc
;
ctl_stats
.
huge
.
ndalloc
=
huge_ndalloc
;
malloc_mutex_unlock
(
&
huge_mtx
);
}
/*
* Clear sum stats, since they will be merged into by
* ctl_arena_refresh().
*/
ctl_stats
.
arenas
[
ctl_stats
.
narenas
].
nthreads
=
0
;
ctl_arena_clear
(
&
ctl_stats
.
arenas
[
ctl_stats
.
narenas
]);
malloc_mutex_lock
(
&
arenas_lock
);
memcpy
(
tarenas
,
arenas
,
sizeof
(
arena_t
*
)
*
ctl_stats
.
narenas
);
for
(
i
=
0
;
i
<
ctl_stats
.
narenas
;
i
++
)
{
if
(
arenas
[
i
]
!=
NULL
)
ctl_stats
.
arenas
[
i
].
nthreads
=
arenas
[
i
]
->
nthreads
;
else
ctl_stats
.
arenas
[
i
].
nthreads
=
0
;
}
malloc_mutex_unlock
(
&
arenas_lock
);
for
(
i
=
0
;
i
<
ctl_stats
.
narenas
;
i
++
)
tarenas
[
i
]
=
arena_get
(
tsdn
,
i
,
false
);
for
(
i
=
0
;
i
<
ctl_stats
.
narenas
;
i
++
)
{
bool
initialized
=
(
tarenas
[
i
]
!=
NULL
);
ctl_stats
.
arenas
[
i
].
initialized
=
initialized
;
if
(
initialized
)
ctl_arena_refresh
(
tarenas
[
i
],
i
);
ctl_arena_refresh
(
tsdn
,
tarenas
[
i
],
i
);
}
if
(
config_stats
)
{
size_t
base_allocated
,
base_resident
,
base_mapped
;
base_stats_get
(
tsdn
,
&
base_allocated
,
&
base_resident
,
&
base_mapped
);
ctl_stats
.
allocated
=
ctl_stats
.
arenas
[
ctl_stats
.
narenas
].
allocated_small
+
ctl_stats
.
arenas
[
ctl_stats
.
narenas
].
astats
.
allocated_large
+
ctl_stats
.
huge
.
allocated
;
ctl_stats
.
arenas
[
ctl_stats
.
narenas
].
allocated_small
+
ctl_stats
.
arenas
[
ctl_stats
.
narenas
].
astats
.
allocated_large
+
ctl_stats
.
arenas
[
ctl_stats
.
narenas
].
astats
.
allocated
_huge
;
ctl_stats
.
active
=
(
ctl_stats
.
arenas
[
ctl_stats
.
narenas
].
pactive
<<
LG_PAGE
)
+
ctl_stats
.
huge
.
allocated
;
ctl_stats
.
mapped
=
(
ctl_stats
.
chunks
.
current
<<
opt_lg_chunk
);
(
ctl_stats
.
arenas
[
ctl_stats
.
narenas
].
pactive
<<
LG_PAGE
);
ctl_stats
.
metadata
=
base_allocated
+
ctl_stats
.
arenas
[
ctl_stats
.
narenas
].
astats
.
metadata_mapped
+
ctl_stats
.
arenas
[
ctl_stats
.
narenas
].
astats
.
metadata_allocated
;
ctl_stats
.
resident
=
base_resident
+
ctl_stats
.
arenas
[
ctl_stats
.
narenas
].
astats
.
metadata_mapped
+
((
ctl_stats
.
arenas
[
ctl_stats
.
narenas
].
pactive
+
ctl_stats
.
arenas
[
ctl_stats
.
narenas
].
pdirty
)
<<
LG_PAGE
);
ctl_stats
.
mapped
=
base_mapped
+
ctl_stats
.
arenas
[
ctl_stats
.
narenas
].
astats
.
mapped
;
ctl_stats
.
retained
=
ctl_stats
.
arenas
[
ctl_stats
.
narenas
].
astats
.
retained
;
}
ctl_epoch
++
;
}
static
bool
ctl_init
(
void
)
ctl_init
(
tsdn_t
*
tsdn
)
{
bool
ret
;
malloc_mutex_lock
(
&
ctl_mtx
);
if
(
ctl_initialized
==
false
)
{
malloc_mutex_lock
(
tsdn
,
&
ctl_mtx
);
if
(
!
ctl_initialized
)
{
/*
* Allocate space for one extra arena stats element, which
* contains summed stats across all arenas.
*/
assert
(
narenas_auto
==
narenas_total_get
());
ctl_stats
.
narenas
=
narenas_auto
;
ctl_stats
.
arenas
=
(
ctl_arena_stats_t
*
)
base_alloc
(
ctl_stats
.
narenas
=
narenas_total_get
();
ctl_stats
.
arenas
=
(
ctl_arena_stats_t
*
)
a0malloc
(
(
ctl_stats
.
narenas
+
1
)
*
sizeof
(
ctl_arena_stats_t
));
if
(
ctl_stats
.
arenas
==
NULL
)
{
ret
=
true
;
...
...
@@ -693,6 +787,15 @@ ctl_init(void)
unsigned
i
;
for
(
i
=
0
;
i
<=
ctl_stats
.
narenas
;
i
++
)
{
if
(
ctl_arena_init
(
&
ctl_stats
.
arenas
[
i
]))
{
unsigned
j
;
for
(
j
=
0
;
j
<
i
;
j
++
)
{
a0dalloc
(
ctl_stats
.
arenas
[
j
].
lstats
);
a0dalloc
(
ctl_stats
.
arenas
[
j
].
hstats
);
}
a0dalloc
(
ctl_stats
.
arenas
);
ctl_stats
.
arenas
=
NULL
;
ret
=
true
;
goto
label_return
;
}
...
...
@@ -701,19 +804,19 @@ ctl_init(void)
ctl_stats
.
arenas
[
ctl_stats
.
narenas
].
initialized
=
true
;
ctl_epoch
=
0
;
ctl_refresh
();
ctl_refresh
(
tsdn
);
ctl_initialized
=
true
;
}
ret
=
false
;
label_return:
malloc_mutex_unlock
(
&
ctl_mtx
);
malloc_mutex_unlock
(
tsdn
,
&
ctl_mtx
);
return
(
ret
);
}
static
int
ctl_lookup
(
const
char
*
name
,
ctl_node_t
const
**
nodesp
,
size_t
*
mibp
,
size_t
*
depthp
)
ctl_lookup
(
tsdn_t
*
tsdn
,
const
char
*
name
,
ctl_node_t
const
**
nodesp
,
size_t
*
mibp
,
size_t
*
depthp
)
{
int
ret
;
const
char
*
elm
,
*
tdot
,
*
dot
;
...
...
@@ -765,7 +868,7 @@ ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp,
}
inode
=
ctl_indexed_node
(
node
->
children
);
node
=
inode
->
index
(
mibp
,
*
depthp
,
(
size_t
)
index
);
node
=
inode
->
index
(
tsdn
,
mibp
,
*
depthp
,
(
size_t
)
index
);
if
(
node
==
NULL
)
{
ret
=
ENOENT
;
goto
label_return
;
...
...
@@ -809,8 +912,8 @@ label_return:
}
int
ctl_byname
(
const
char
*
name
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
ctl_byname
(
tsd_t
*
tsd
,
const
char
*
name
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
size_t
depth
;
...
...
@@ -818,19 +921,19 @@ ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
size_t
mib
[
CTL_MAX_DEPTH
];
const
ctl_named_node_t
*
node
;
if
(
ctl_initialized
==
false
&&
ctl_init
())
{
if
(
!
ctl_initialized
&&
ctl_init
(
tsd_tsdn
(
tsd
)
))
{
ret
=
EAGAIN
;
goto
label_return
;
}
depth
=
CTL_MAX_DEPTH
;
ret
=
ctl_lookup
(
name
,
nodes
,
mib
,
&
depth
);
ret
=
ctl_lookup
(
tsd_tsdn
(
tsd
),
name
,
nodes
,
mib
,
&
depth
);
if
(
ret
!=
0
)
goto
label_return
;
node
=
ctl_named_node
(
nodes
[
depth
-
1
]);
if
(
node
!=
NULL
&&
node
->
ctl
)
ret
=
node
->
ctl
(
mib
,
depth
,
oldp
,
oldlenp
,
newp
,
newlen
);
ret
=
node
->
ctl
(
tsd
,
mib
,
depth
,
oldp
,
oldlenp
,
newp
,
newlen
);
else
{
/* The name refers to a partial path through the ctl tree. */
ret
=
ENOENT
;
...
...
@@ -841,29 +944,29 @@ label_return:
}
int
ctl_nametomib
(
const
char
*
name
,
size_t
*
mibp
,
size_t
*
miblenp
)
ctl_nametomib
(
tsdn_t
*
tsdn
,
const
char
*
name
,
size_t
*
mibp
,
size_t
*
miblenp
)
{
int
ret
;
if
(
ctl_initialized
==
false
&&
ctl_init
())
{
if
(
!
ctl_initialized
&&
ctl_init
(
tsdn
))
{
ret
=
EAGAIN
;
goto
label_return
;
}
ret
=
ctl_lookup
(
name
,
NULL
,
mibp
,
miblenp
);
ret
=
ctl_lookup
(
tsdn
,
name
,
NULL
,
mibp
,
miblenp
);
label_return:
return
(
ret
);
}
int
ctl_bymib
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
ctl_bymib
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
const
ctl_named_node_t
*
node
;
size_t
i
;
if
(
ctl_initialized
==
false
&&
ctl_init
())
{
if
(
!
ctl_initialized
&&
ctl_init
(
tsd_tsdn
(
tsd
)
))
{
ret
=
EAGAIN
;
goto
label_return
;
}
...
...
@@ -875,7 +978,7 @@ ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
assert
(
node
->
nchildren
>
0
);
if
(
ctl_named_node
(
node
->
children
)
!=
NULL
)
{
/* Children are named. */
if
(
node
->
nchildren
<=
mib
[
i
])
{
if
(
node
->
nchildren
<=
(
unsigned
)
mib
[
i
])
{
ret
=
ENOENT
;
goto
label_return
;
}
...
...
@@ -885,7 +988,7 @@ ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
/* Indexed element. */
inode
=
ctl_indexed_node
(
node
->
children
);
node
=
inode
->
index
(
mib
,
miblen
,
mib
[
i
]);
node
=
inode
->
index
(
tsd_tsdn
(
tsd
),
mib
,
miblen
,
mib
[
i
]);
if
(
node
==
NULL
)
{
ret
=
ENOENT
;
goto
label_return
;
...
...
@@ -895,7 +998,7 @@ ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
/* Call the ctl function. */
if
(
node
&&
node
->
ctl
)
ret
=
node
->
ctl
(
mib
,
miblen
,
oldp
,
oldlenp
,
newp
,
newlen
);
ret
=
node
->
ctl
(
tsd
,
mib
,
miblen
,
oldp
,
oldlenp
,
newp
,
newlen
);
else
{
/* Partial MIB. */
ret
=
ENOENT
;
...
...
@@ -909,7 +1012,7 @@ bool
ctl_boot
(
void
)
{
if
(
malloc_mutex_init
(
&
ctl_mtx
))
if
(
malloc_mutex_init
(
&
ctl_mtx
,
"ctl"
,
WITNESS_RANK_CTL
))
return
(
true
);
ctl_initialized
=
false
;
...
...
@@ -918,24 +1021,24 @@ ctl_boot(void)
}
void
ctl_prefork
(
void
)
ctl_prefork
(
tsdn_t
*
tsdn
)
{
malloc_mutex_
lock
(
&
ctl_mtx
);
malloc_mutex_
prefork
(
tsdn
,
&
ctl_mtx
);
}
void
ctl_postfork_parent
(
void
)
ctl_postfork_parent
(
tsdn_t
*
tsdn
)
{
malloc_mutex_postfork_parent
(
&
ctl_mtx
);
malloc_mutex_postfork_parent
(
tsdn
,
&
ctl_mtx
);
}
void
ctl_postfork_child
(
void
)
ctl_postfork_child
(
tsdn_t
*
tsdn
)
{
malloc_mutex_postfork_child
(
&
ctl_mtx
);
malloc_mutex_postfork_child
(
tsdn
,
&
ctl_mtx
);
}
/******************************************************************************/
...
...
@@ -955,16 +1058,24 @@ ctl_postfork_child(void)
} \
} while (0)
#define READ_XOR_WRITE() do { \
if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \
newlen != 0)) { \
ret = EPERM; \
goto label_return; \
} \
} while (0)
#define READ(v, t) do { \
if (oldp != NULL && oldlenp != NULL) { \
if (*oldlenp != sizeof(t)) { \
size_t copylen = (sizeof(t) <= *oldlenp) \
? sizeof(t) : *oldlenp; \
memcpy(oldp, (void *)&
v
, copylen); \
memcpy(oldp, (void *)&
(v)
, copylen); \
ret = EINVAL; \
goto label_return; \
}
else
\
*(t *)oldp =
v
; \
} \
*(t *)oldp =
(v)
; \
} \
} while (0)
...
...
@@ -974,7 +1085,7 @@ ctl_postfork_child(void)
ret = EINVAL; \
goto label_return; \
} \
v
= *(t *)newp;
\
(v)
= *(t *)newp; \
} \
} while (0)
...
...
@@ -984,64 +1095,64 @@ ctl_postfork_child(void)
*/
#define CTL_RO_CLGEN(c, l, n, v, t) \
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp,
\
void *newp, size_t newlen)
\
n##_ctl(
tsd_t *tsd,
const size_t *mib, size_t miblen, void *oldp, \
size_t *oldlenp,
void *newp, size_t newlen) \
{ \
int ret; \
t oldval; \
\
if ((c)
== false
) \
if (
!
(c))
\
return (ENOENT); \
if (l) \
malloc_mutex_lock(&ctl_mtx);
\
malloc_mutex_lock(
tsd_tsdn(tsd),
&ctl_mtx); \
READONLY(); \
oldval =
v
; \
oldval =
(v)
; \
READ(oldval, t); \
\
ret = 0; \
label_return: \
if (l) \
malloc_mutex_unlock(&ctl_mtx);
\
malloc_mutex_unlock(
tsd_tsdn(tsd),
&ctl_mtx); \
return (ret); \
}
#define CTL_RO_CGEN(c, n, v, t) \
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp,
\
void *newp, size_t newlen)
\
n##_ctl(
tsd_t *tsd,
const size_t *mib, size_t miblen, void *oldp, \
size_t *oldlenp,
void *newp, size_t newlen) \
{ \
int ret; \
t oldval; \
\
if ((c)
== false
) \
if (
!
(c))
\
return (ENOENT); \
malloc_mutex_lock(&ctl_mtx);
\
malloc_mutex_lock(
tsd_tsdn(tsd),
&ctl_mtx); \
READONLY(); \
oldval =
v
; \
oldval =
(v)
; \
READ(oldval, t); \
\
ret = 0; \
label_return: \
malloc_mutex_unlock(&ctl_mtx);
\
malloc_mutex_unlock(
tsd_tsdn(tsd),
&ctl_mtx); \
return (ret); \
}
#define CTL_RO_GEN(n, v, t) \
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp,
\
void *newp, size_t newlen)
\
n##_ctl(
tsd_t *tsd,
const size_t *mib, size_t miblen, void *oldp, \
size_t *oldlenp,
void *newp, size_t newlen) \
{ \
int ret; \
t oldval; \
\
malloc_mutex_lock(&ctl_mtx);
\
malloc_mutex_lock(
tsd_tsdn(tsd),
&ctl_mtx); \
READONLY(); \
oldval =
v
; \
oldval =
(v)
; \
READ(oldval, t); \
\
ret = 0; \
label_return: \
malloc_mutex_unlock(&ctl_mtx);
\
malloc_mutex_unlock(
tsd_tsdn(tsd),
&ctl_mtx); \
return (ret); \
}
...
...
@@ -1051,16 +1162,16 @@ label_return: \
*/
#define CTL_RO_NL_CGEN(c, n, v, t) \
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp,
\
void *newp, size_t newlen)
\
n##_ctl(
tsd_t *tsd,
const size_t *mib, size_t miblen, void *oldp, \
size_t *oldlenp,
void *newp, size_t newlen) \
{ \
int ret; \
t oldval; \
\
if ((c)
== false
) \
if (
!
(c))
\
return (ENOENT); \
READONLY(); \
oldval =
v
; \
oldval =
(v)
; \
READ(oldval, t); \
\
ret = 0; \
...
...
@@ -1070,14 +1181,33 @@ label_return: \
#define CTL_RO_NL_GEN(n, v, t) \
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
void *newp, size_t newlen) \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
size_t *oldlenp, void *newp, size_t newlen) \
{ \
int ret; \
t oldval; \
\
READONLY(); \
oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
return (ret); \
}
#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \
static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
size_t *oldlenp, void *newp, size_t newlen) \
{ \
int ret; \
t oldval; \
\
if (!(c)) \
return (ENOENT); \
READONLY(); \
oldval =
v;
\
oldval =
(m(tsd));
\
READ(oldval, t); \
\
ret = 0; \
...
...
@@ -1085,52 +1215,161 @@ label_return: \
return (ret); \
}
#define CTL_RO_
BOOL_
CONFIG_GEN(n
)
\
#define CTL_RO_CONFIG_GEN(n
, t)
\
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp,
\
void *newp, size_t newlen)
\
n##_ctl(
tsd_t *tsd,
const size_t *mib, size_t miblen, void *oldp, \
size_t *oldlenp,
void *newp, size_t newlen) \
{ \
int ret; \
bool
oldval; \
t
oldval; \
\
READONLY(); \
oldval = n; \
READ(oldval,
bool
); \
READ(oldval,
t
); \
\
ret = 0; \
label_return: \
return (ret); \
}
/******************************************************************************/
CTL_RO_NL_GEN
(
version
,
JEMALLOC_VERSION
,
const
char
*
)
static
int
epoch_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
epoch_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
uint64_t
newval
;
UNUSED
uint64_t
newval
;
malloc_mutex_lock
(
&
ctl_mtx
);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
WRITE
(
newval
,
uint64_t
);
if
(
newp
!=
NULL
)
ctl_refresh
();
ctl_refresh
(
tsd_tsdn
(
tsd
)
);
READ
(
ctl_epoch
,
uint64_t
);
ret
=
0
;
label_return:
malloc_mutex_unlock
(
&
ctl_mtx
);
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
return
(
ret
);
}
/******************************************************************************/
CTL_RO_CONFIG_GEN
(
config_cache_oblivious
,
bool
)
CTL_RO_CONFIG_GEN
(
config_debug
,
bool
)
CTL_RO_CONFIG_GEN
(
config_fill
,
bool
)
CTL_RO_CONFIG_GEN
(
config_lazy_lock
,
bool
)
CTL_RO_CONFIG_GEN
(
config_malloc_conf
,
const
char
*
)
CTL_RO_CONFIG_GEN
(
config_munmap
,
bool
)
CTL_RO_CONFIG_GEN
(
config_prof
,
bool
)
CTL_RO_CONFIG_GEN
(
config_prof_libgcc
,
bool
)
CTL_RO_CONFIG_GEN
(
config_prof_libunwind
,
bool
)
CTL_RO_CONFIG_GEN
(
config_stats
,
bool
)
CTL_RO_CONFIG_GEN
(
config_tcache
,
bool
)
CTL_RO_CONFIG_GEN
(
config_tls
,
bool
)
CTL_RO_CONFIG_GEN
(
config_utrace
,
bool
)
CTL_RO_CONFIG_GEN
(
config_valgrind
,
bool
)
CTL_RO_CONFIG_GEN
(
config_xmalloc
,
bool
)
/******************************************************************************/
CTL_RO_NL_GEN
(
opt_abort
,
opt_abort
,
bool
)
CTL_RO_NL_GEN
(
opt_dss
,
opt_dss
,
const
char
*
)
CTL_RO_NL_GEN
(
opt_lg_chunk
,
opt_lg_chunk
,
size_t
)
CTL_RO_NL_GEN
(
opt_narenas
,
opt_narenas
,
unsigned
)
CTL_RO_NL_GEN
(
opt_purge
,
purge_mode_names
[
opt_purge
],
const
char
*
)
CTL_RO_NL_GEN
(
opt_lg_dirty_mult
,
opt_lg_dirty_mult
,
ssize_t
)
CTL_RO_NL_GEN
(
opt_decay_time
,
opt_decay_time
,
ssize_t
)
CTL_RO_NL_GEN
(
opt_stats_print
,
opt_stats_print
,
bool
)
CTL_RO_NL_CGEN
(
config_fill
,
opt_junk
,
opt_junk
,
const
char
*
)
CTL_RO_NL_CGEN
(
config_fill
,
opt_quarantine
,
opt_quarantine
,
size_t
)
CTL_RO_NL_CGEN
(
config_fill
,
opt_redzone
,
opt_redzone
,
bool
)
CTL_RO_NL_CGEN
(
config_fill
,
opt_zero
,
opt_zero
,
bool
)
CTL_RO_NL_CGEN
(
config_utrace
,
opt_utrace
,
opt_utrace
,
bool
)
CTL_RO_NL_CGEN
(
config_xmalloc
,
opt_xmalloc
,
opt_xmalloc
,
bool
)
CTL_RO_NL_CGEN
(
config_tcache
,
opt_tcache
,
opt_tcache
,
bool
)
CTL_RO_NL_CGEN
(
config_tcache
,
opt_lg_tcache_max
,
opt_lg_tcache_max
,
ssize_t
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof
,
opt_prof
,
bool
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof_prefix
,
opt_prof_prefix
,
const
char
*
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof_active
,
opt_prof_active
,
bool
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof_thread_active_init
,
opt_prof_thread_active_init
,
bool
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_lg_prof_sample
,
opt_lg_prof_sample
,
size_t
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof_accum
,
opt_prof_accum
,
bool
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_lg_prof_interval
,
opt_lg_prof_interval
,
ssize_t
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof_gdump
,
opt_prof_gdump
,
bool
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof_final
,
opt_prof_final
,
bool
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof_leak
,
opt_prof_leak
,
bool
)
/******************************************************************************/
static
int
thread_
tcache_enabled_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
thread_
arena_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
arena_t
*
oldarena
;
unsigned
newind
,
oldind
;
oldarena
=
arena_choose
(
tsd
,
NULL
);
if
(
oldarena
==
NULL
)
return
(
EAGAIN
);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
newind
=
oldind
=
oldarena
->
ind
;
WRITE
(
newind
,
unsigned
);
READ
(
oldind
,
unsigned
);
if
(
newind
!=
oldind
)
{
arena_t
*
newarena
;
if
(
newind
>=
ctl_stats
.
narenas
)
{
/* New arena index is out of range. */
ret
=
EFAULT
;
goto
label_return
;
}
/* Initialize arena if necessary. */
newarena
=
arena_get
(
tsd_tsdn
(
tsd
),
newind
,
true
);
if
(
newarena
==
NULL
)
{
ret
=
EAGAIN
;
goto
label_return
;
}
/* Set new arena/tcache associations. */
arena_migrate
(
tsd
,
oldind
,
newind
);
if
(
config_tcache
)
{
tcache_t
*
tcache
=
tsd_tcache_get
(
tsd
);
if
(
tcache
!=
NULL
)
{
tcache_arena_reassociate
(
tsd_tsdn
(
tsd
),
tcache
,
oldarena
,
newarena
);
}
}
}
ret
=
0
;
label_return:
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
return
(
ret
);
}
CTL_TSD_RO_NL_CGEN
(
config_stats
,
thread_allocated
,
tsd_thread_allocated_get
,
uint64_t
)
CTL_TSD_RO_NL_CGEN
(
config_stats
,
thread_allocatedp
,
tsd_thread_allocatedp_get
,
uint64_t
*
)
CTL_TSD_RO_NL_CGEN
(
config_stats
,
thread_deallocated
,
tsd_thread_deallocated_get
,
uint64_t
)
CTL_TSD_RO_NL_CGEN
(
config_stats
,
thread_deallocatedp
,
tsd_thread_deallocatedp_get
,
uint64_t
*
)
static
int
thread_tcache_enabled_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
bool
oldval
;
if
(
config_tcache
==
false
)
if
(
!
config_tcache
)
return
(
ENOENT
);
oldval
=
tcache_enabled_get
();
...
...
@@ -1149,12 +1388,12 @@ label_return:
}
static
int
thread_tcache_flush_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
thread_tcache_flush_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
if
(
config_tcache
==
false
)
if
(
!
config_tcache
)
return
(
ENOENT
);
READONLY
();
...
...
@@ -1168,146 +1407,107 @@ label_return:
}
static
int
thread_
arena_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
thread_
prof_name_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
unsigned
newind
,
oldind
;
malloc_mutex_lock
(
&
ctl_mtx
);
newind
=
oldind
=
choose_arena
(
NULL
)
->
ind
;
WRITE
(
newind
,
unsigned
);
READ
(
oldind
,
unsigned
);
if
(
newind
!=
oldind
)
{
arena_t
*
arena
;
if
(
!
config_prof
)
return
(
ENOENT
);
if
(
newind
>=
ctl_stats
.
narenas
)
{
/* New arena index is out of range. */
ret
=
EFAULT
;
goto
label_return
;
}
READ_XOR_WRITE
();
/* Initialize arena if necessary. */
malloc_mutex_lock
(
&
arenas_lock
);
if
((
arena
=
arenas
[
newind
])
==
NULL
&&
(
arena
=
arenas_extend
(
newind
))
==
NULL
)
{
malloc_mutex_unlock
(
&
arenas_lock
);
ret
=
EAGAIN
;
if
(
newp
!=
NULL
)
{
if
(
newlen
!=
sizeof
(
const
char
*
))
{
ret
=
EINVAL
;
goto
label_return
;
}
assert
(
arena
==
arenas
[
newind
]);
arenas
[
oldind
]
->
nthreads
--
;
arenas
[
newind
]
->
nthreads
++
;
malloc_mutex_unlock
(
&
arenas_lock
);
/* Set new arena association. */
if
(
config_tcache
)
{
tcache_t
*
tcache
;
if
((
uintptr_t
)(
tcache
=
*
tcache_tsd_get
())
>
(
uintptr_t
)
TCACHE_STATE_MAX
)
{
tcache_arena_dissociate
(
tcache
);
tcache_arena_associate
(
tcache
,
arena
);
}
}
arenas_tsd_set
(
&
arena
);
if
((
ret
=
prof_thread_name_set
(
tsd
,
*
(
const
char
**
)
newp
))
!=
0
)
goto
label_return
;
}
else
{
const
char
*
oldname
=
prof_thread_name_get
(
tsd
);
READ
(
oldname
,
const
char
*
);
}
ret
=
0
;
label_return:
malloc_mutex_unlock
(
&
ctl_mtx
);
return
(
ret
);
}
CTL_RO_NL_CGEN
(
config_stats
,
thread_allocated
,
thread_allocated_tsd_get
()
->
allocated
,
uint64_t
)
CTL_RO_NL_CGEN
(
config_stats
,
thread_allocatedp
,
&
thread_allocated_tsd_get
()
->
allocated
,
uint64_t
*
)
CTL_RO_NL_CGEN
(
config_stats
,
thread_deallocated
,
thread_allocated_tsd_get
()
->
deallocated
,
uint64_t
)
CTL_RO_NL_CGEN
(
config_stats
,
thread_deallocatedp
,
&
thread_allocated_tsd_get
()
->
deallocated
,
uint64_t
*
)
/******************************************************************************/
static
int
thread_prof_active_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
bool
oldval
;
CTL_RO_BOOL_CONFIG_GEN
(
config_debug
)
CTL_RO_BOOL_CONFIG_GEN
(
config_dss
)
CTL_RO_BOOL_CONFIG_GEN
(
config_fill
)
CTL_RO_BOOL_CONFIG_GEN
(
config_lazy_lock
)
CTL_RO_BOOL_CONFIG_GEN
(
config_mremap
)
CTL_RO_BOOL_CONFIG_GEN
(
config_munmap
)
CTL_RO_BOOL_CONFIG_GEN
(
config_prof
)
CTL_RO_BOOL_CONFIG_GEN
(
config_prof_libgcc
)
CTL_RO_BOOL_CONFIG_GEN
(
config_prof_libunwind
)
CTL_RO_BOOL_CONFIG_GEN
(
config_stats
)
CTL_RO_BOOL_CONFIG_GEN
(
config_tcache
)
CTL_RO_BOOL_CONFIG_GEN
(
config_tls
)
CTL_RO_BOOL_CONFIG_GEN
(
config_utrace
)
CTL_RO_BOOL_CONFIG_GEN
(
config_valgrind
)
CTL_RO_BOOL_CONFIG_GEN
(
config_xmalloc
)
if
(
!
config_prof
)
return
(
ENOENT
);
/******************************************************************************/
oldval
=
prof_thread_active_get
(
tsd
);
if
(
newp
!=
NULL
)
{
if
(
newlen
!=
sizeof
(
bool
))
{
ret
=
EINVAL
;
goto
label_return
;
}
if
(
prof_thread_active_set
(
tsd
,
*
(
bool
*
)
newp
))
{
ret
=
EAGAIN
;
goto
label_return
;
}
}
READ
(
oldval
,
bool
);
CTL_RO_NL_GEN
(
opt_abort
,
opt_abort
,
bool
)
CTL_RO_NL_GEN
(
opt_dss
,
opt_dss
,
const
char
*
)
CTL_RO_NL_GEN
(
opt_lg_chunk
,
opt_lg_chunk
,
size_t
)
CTL_RO_NL_GEN
(
opt_narenas
,
opt_narenas
,
size_t
)
CTL_RO_NL_GEN
(
opt_lg_dirty_mult
,
opt_lg_dirty_mult
,
ssize_t
)
CTL_RO_NL_GEN
(
opt_stats_print
,
opt_stats_print
,
bool
)
CTL_RO_NL_CGEN
(
config_fill
,
opt_junk
,
opt_junk
,
bool
)
CTL_RO_NL_CGEN
(
config_fill
,
opt_zero
,
opt_zero
,
bool
)
CTL_RO_NL_CGEN
(
config_fill
,
opt_quarantine
,
opt_quarantine
,
size_t
)
CTL_RO_NL_CGEN
(
config_fill
,
opt_redzone
,
opt_redzone
,
bool
)
CTL_RO_NL_CGEN
(
config_utrace
,
opt_utrace
,
opt_utrace
,
bool
)
CTL_RO_NL_CGEN
(
config_valgrind
,
opt_valgrind
,
opt_valgrind
,
bool
)
CTL_RO_NL_CGEN
(
config_xmalloc
,
opt_xmalloc
,
opt_xmalloc
,
bool
)
CTL_RO_NL_CGEN
(
config_tcache
,
opt_tcache
,
opt_tcache
,
bool
)
CTL_RO_NL_CGEN
(
config_tcache
,
opt_lg_tcache_max
,
opt_lg_tcache_max
,
ssize_t
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof
,
opt_prof
,
bool
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof_prefix
,
opt_prof_prefix
,
const
char
*
)
CTL_RO_CGEN
(
config_prof
,
opt_prof_active
,
opt_prof_active
,
bool
)
/* Mutable. */
CTL_RO_NL_CGEN
(
config_prof
,
opt_lg_prof_sample
,
opt_lg_prof_sample
,
size_t
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_lg_prof_interval
,
opt_lg_prof_interval
,
ssize_t
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof_gdump
,
opt_prof_gdump
,
bool
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof_final
,
opt_prof_final
,
bool
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof_leak
,
opt_prof_leak
,
bool
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof_accum
,
opt_prof_accum
,
bool
)
ret
=
0
;
label_return:
return
(
ret
);
}
/******************************************************************************/
/* ctl_mutex must be held during execution of this function. */
static
void
arena_purge
(
unsigned
arena_ind
)
static
int
tcache_create_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
VARIABLE_ARRAY
(
arena_t
*
,
tarenas
,
ctl_stats
.
narenas
);
int
ret
;
unsigned
tcache_ind
;
malloc_mutex_lock
(
&
arenas_lock
);
memcpy
(
tarenas
,
arenas
,
sizeof
(
arena_t
*
)
*
ctl_stats
.
narenas
);
malloc_mutex_unlock
(
&
arenas_lock
);
if
(
!
config_tcache
)
return
(
ENOENT
);
if
(
arena_ind
==
ctl_stats
.
narenas
)
{
unsigned
i
;
for
(
i
=
0
;
i
<
ctl_stats
.
narenas
;
i
++
)
{
if
(
tarenas
[
i
]
!=
NULL
)
arena_purge_all
(
tarenas
[
i
]);
}
}
else
{
assert
(
arena_ind
<
ctl_stats
.
narenas
);
if
(
tarenas
[
arena_ind
]
!=
NULL
)
arena_purge_all
(
tarenas
[
arena_ind
]);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
READONLY
();
if
(
tcaches_create
(
tsd
,
&
tcache_ind
))
{
ret
=
EFAULT
;
goto
label_return
;
}
READ
(
tcache_ind
,
unsigned
);
ret
=
0
;
label_return:
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
return
(
ret
);
}
static
int
arena_i_purge_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
tcache_flush_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
unsigned
tcache_ind
;
if
(
!
config_tcache
)
return
(
ENOENT
);
READONLY
();
WRITEONLY
();
malloc_mutex_lock
(
&
ctl_mtx
);
arena_purge
(
mib
[
1
]);
malloc_mutex_unlock
(
&
ctl_mtx
);
tcache_ind
=
UINT_MAX
;
WRITE
(
tcache_ind
,
unsigned
);
if
(
tcache_ind
==
UINT_MAX
)
{
ret
=
EFAULT
;
goto
label_return
;
}
tcaches_flush
(
tsd
,
tcache_ind
);
ret
=
0
;
label_return:
...
...
@@ -1315,106 +1515,321 @@ label_return:
}
static
int
arena_i_dss_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
tcache_destroy_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
,
i
;
bool
match
,
err
;
const
char
*
dss
;
unsigned
arena_ind
=
mib
[
1
];
dss_prec_t
dss_prec_old
=
dss_prec_limit
;
dss_prec_t
dss_prec
=
dss_prec_limit
;
int
ret
;
unsigned
tcache_ind
;
malloc_mutex_lock
(
&
ctl_mtx
);
WRITE
(
dss
,
const
char
*
);
match
=
false
;
for
(
i
=
0
;
i
<
dss_prec_limit
;
i
++
)
{
if
(
strcmp
(
dss_prec_names
[
i
],
dss
)
==
0
)
{
dss_prec
=
i
;
match
=
true
;
break
;
}
}
if
(
match
==
false
)
{
ret
=
EINVAL
;
goto
label_return
;
}
if
(
!
config_tcache
)
return
(
ENOENT
);
if
(
arena_ind
<
ctl_stats
.
narenas
)
{
arena_t
*
arena
=
arenas
[
arena_ind
];
if
(
arena
!=
NULL
)
{
dss_prec_old
=
arena_dss_prec_get
(
arena
);
arena_dss_prec_set
(
arena
,
dss_prec
);
err
=
false
;
}
else
err
=
true
;
}
else
{
dss_prec_old
=
chunk_dss_prec_get
();
err
=
chunk_dss_prec_set
(
dss_prec
);
}
dss
=
dss_prec_names
[
dss_prec_old
];
READ
(
dss
,
const
char
*
);
if
(
err
)
{
WRITEONLY
();
tcache_ind
=
UINT_MAX
;
WRITE
(
tcache_ind
,
unsigned
);
if
(
tcache_ind
==
UINT_MAX
)
{
ret
=
EFAULT
;
goto
label_return
;
}
tcaches_destroy
(
tsd
,
tcache_ind
);
ret
=
0
;
label_return:
malloc_mutex_unlock
(
&
ctl_mtx
);
return
(
ret
);
}
static
const
ctl_named_node_t
*
arena_i_index
(
const
size_t
*
mib
,
size_t
miblen
,
size_t
i
)
/******************************************************************************/
static
void
arena_i_purge
(
tsdn_t
*
tsdn
,
unsigned
arena_ind
,
bool
all
)
{
const
ctl_named_node_t
*
ret
;
malloc_mutex_lock
(
&
ctl_mtx
);
if
(
i
>
ctl_stats
.
narenas
)
{
ret
=
NULL
;
malloc_mutex_lock
(
tsdn
,
&
ctl_mtx
);
{
unsigned
narenas
=
ctl_stats
.
narenas
;
if
(
arena_ind
==
narenas
)
{
unsigned
i
;
VARIABLE_ARRAY
(
arena_t
*
,
tarenas
,
narenas
);
for
(
i
=
0
;
i
<
narenas
;
i
++
)
tarenas
[
i
]
=
arena_get
(
tsdn
,
i
,
false
);
/*
* No further need to hold ctl_mtx, since narenas and
* tarenas contain everything needed below.
*/
malloc_mutex_unlock
(
tsdn
,
&
ctl_mtx
);
for
(
i
=
0
;
i
<
narenas
;
i
++
)
{
if
(
tarenas
[
i
]
!=
NULL
)
arena_purge
(
tsdn
,
tarenas
[
i
],
all
);
}
}
else
{
arena_t
*
tarena
;
assert
(
arena_ind
<
narenas
);
tarena
=
arena_get
(
tsdn
,
arena_ind
,
false
);
/* No further need to hold ctl_mtx. */
malloc_mutex_unlock
(
tsdn
,
&
ctl_mtx
);
if
(
tarena
!=
NULL
)
arena_purge
(
tsdn
,
tarena
,
all
);
}
}
}
static
int
arena_i_purge_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
READONLY
();
WRITEONLY
();
arena_i_purge
(
tsd_tsdn
(
tsd
),
(
unsigned
)
mib
[
1
],
true
);
ret
=
0
;
label_return:
return
(
ret
);
}
static
int
arena_i_decay_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
READONLY
();
WRITEONLY
();
arena_i_purge
(
tsd_tsdn
(
tsd
),
(
unsigned
)
mib
[
1
],
false
);
ret
=
0
;
label_return:
return
(
ret
);
}
static
int
arena_i_reset_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
unsigned
arena_ind
;
arena_t
*
arena
;
READONLY
();
WRITEONLY
();
if
((
config_valgrind
&&
unlikely
(
in_valgrind
))
||
(
config_fill
&&
unlikely
(
opt_quarantine
)))
{
ret
=
EFAULT
;
goto
label_return
;
}
ret
=
super_arena_i_node
;
arena_ind
=
(
unsigned
)
mib
[
1
];
if
(
config_debug
)
{
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
assert
(
arena_ind
<
ctl_stats
.
narenas
);
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
}
assert
(
arena_ind
>=
opt_narenas
);
arena
=
arena_get
(
tsd_tsdn
(
tsd
),
arena_ind
,
false
);
arena_reset
(
tsd
,
arena
);
ret
=
0
;
label_return:
malloc_mutex_unlock
(
&
ctl_mtx
);
return
(
ret
);
}
static
int
arena_i_dss_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
const
char
*
dss
=
NULL
;
unsigned
arena_ind
=
(
unsigned
)
mib
[
1
];
dss_prec_t
dss_prec_old
=
dss_prec_limit
;
dss_prec_t
dss_prec
=
dss_prec_limit
;
/******************************************************************************/
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
WRITE
(
dss
,
const
char
*
);
if
(
dss
!=
NULL
)
{
int
i
;
bool
match
=
false
;
for
(
i
=
0
;
i
<
dss_prec_limit
;
i
++
)
{
if
(
strcmp
(
dss_prec_names
[
i
],
dss
)
==
0
)
{
dss_prec
=
i
;
match
=
true
;
break
;
}
}
CTL_RO_NL_GEN
(
arenas_bin_i_size
,
arena_bin_info
[
mib
[
2
]].
reg_size
,
size_t
)
CTL_RO_NL_GEN
(
arenas_bin_i_nregs
,
arena_bin_info
[
mib
[
2
]].
nregs
,
uint32_t
)
CTL_RO_NL_GEN
(
arenas_bin_i_run_size
,
arena_bin_info
[
mib
[
2
]].
run_size
,
size_t
)
static
const
ctl_named_node_t
*
arenas_bin_i_index
(
const
size_t
*
mib
,
size_t
miblen
,
size_t
i
)
if
(
!
match
)
{
ret
=
EINVAL
;
goto
label_return
;
}
}
if
(
arena_ind
<
ctl_stats
.
narenas
)
{
arena_t
*
arena
=
arena_get
(
tsd_tsdn
(
tsd
),
arena_ind
,
false
);
if
(
arena
==
NULL
||
(
dss_prec
!=
dss_prec_limit
&&
arena_dss_prec_set
(
tsd_tsdn
(
tsd
),
arena
,
dss_prec
)))
{
ret
=
EFAULT
;
goto
label_return
;
}
dss_prec_old
=
arena_dss_prec_get
(
tsd_tsdn
(
tsd
),
arena
);
}
else
{
if
(
dss_prec
!=
dss_prec_limit
&&
chunk_dss_prec_set
(
dss_prec
))
{
ret
=
EFAULT
;
goto
label_return
;
}
dss_prec_old
=
chunk_dss_prec_get
();
}
dss
=
dss_prec_names
[
dss_prec_old
];
READ
(
dss
,
const
char
*
);
ret
=
0
;
label_return:
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
return
(
ret
);
}
static
int
arena_i_lg_dirty_mult_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
unsigned
arena_ind
=
(
unsigned
)
mib
[
1
];
arena_t
*
arena
;
if
(
i
>
NBINS
)
return
(
NULL
);
return
(
super_arenas_bin_i_node
);
arena
=
arena_get
(
tsd_tsdn
(
tsd
),
arena_ind
,
false
);
if
(
arena
==
NULL
)
{
ret
=
EFAULT
;
goto
label_return
;
}
if
(
oldp
!=
NULL
&&
oldlenp
!=
NULL
)
{
size_t
oldval
=
arena_lg_dirty_mult_get
(
tsd_tsdn
(
tsd
),
arena
);
READ
(
oldval
,
ssize_t
);
}
if
(
newp
!=
NULL
)
{
if
(
newlen
!=
sizeof
(
ssize_t
))
{
ret
=
EINVAL
;
goto
label_return
;
}
if
(
arena_lg_dirty_mult_set
(
tsd_tsdn
(
tsd
),
arena
,
*
(
ssize_t
*
)
newp
))
{
ret
=
EFAULT
;
goto
label_return
;
}
}
ret
=
0
;
label_return:
return
(
ret
);
}
static
int
arena_i_decay_time_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
unsigned
arena_ind
=
(
unsigned
)
mib
[
1
];
arena_t
*
arena
;
arena
=
arena_get
(
tsd_tsdn
(
tsd
),
arena_ind
,
false
);
if
(
arena
==
NULL
)
{
ret
=
EFAULT
;
goto
label_return
;
}
if
(
oldp
!=
NULL
&&
oldlenp
!=
NULL
)
{
size_t
oldval
=
arena_decay_time_get
(
tsd_tsdn
(
tsd
),
arena
);
READ
(
oldval
,
ssize_t
);
}
if
(
newp
!=
NULL
)
{
if
(
newlen
!=
sizeof
(
ssize_t
))
{
ret
=
EINVAL
;
goto
label_return
;
}
if
(
arena_decay_time_set
(
tsd_tsdn
(
tsd
),
arena
,
*
(
ssize_t
*
)
newp
))
{
ret
=
EFAULT
;
goto
label_return
;
}
}
ret
=
0
;
label_return:
return
(
ret
);
}
static
int
arena_i_chunk_hooks_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
unsigned
arena_ind
=
(
unsigned
)
mib
[
1
];
arena_t
*
arena
;
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
if
(
arena_ind
<
narenas_total_get
()
&&
(
arena
=
arena_get
(
tsd_tsdn
(
tsd
),
arena_ind
,
false
))
!=
NULL
)
{
if
(
newp
!=
NULL
)
{
chunk_hooks_t
old_chunk_hooks
,
new_chunk_hooks
;
WRITE
(
new_chunk_hooks
,
chunk_hooks_t
);
old_chunk_hooks
=
chunk_hooks_set
(
tsd_tsdn
(
tsd
),
arena
,
&
new_chunk_hooks
);
READ
(
old_chunk_hooks
,
chunk_hooks_t
);
}
else
{
chunk_hooks_t
old_chunk_hooks
=
chunk_hooks_get
(
tsd_tsdn
(
tsd
),
arena
);
READ
(
old_chunk_hooks
,
chunk_hooks_t
);
}
}
else
{
ret
=
EFAULT
;
goto
label_return
;
}
ret
=
0
;
label_return:
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
return
(
ret
);
}
CTL_RO_NL_GEN
(
arenas_lrun_i_size
,
((
mib
[
2
]
+
1
)
<<
LG_PAGE
),
size_t
)
static
const
ctl_named_node_t
*
arena
s_lrun
_i_index
(
const
size_t
*
mib
,
size_t
miblen
,
size_t
i
)
arena_i_index
(
tsdn_t
*
tsdn
,
const
size_t
*
mib
,
size_t
miblen
,
size_t
i
)
{
const
ctl_named_node_t
*
ret
;
if
(
i
>
nlclasses
)
return
(
NULL
);
return
(
super_arenas_lrun_i_node
);
malloc_mutex_lock
(
tsdn
,
&
ctl_mtx
);
if
(
i
>
ctl_stats
.
narenas
)
{
ret
=
NULL
;
goto
label_return
;
}
ret
=
super_arena_i_node
;
label_return:
malloc_mutex_unlock
(
tsdn
,
&
ctl_mtx
);
return
(
ret
);
}
/******************************************************************************/
static
int
arenas_narenas_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
arenas_narenas_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
unsigned
narenas
;
malloc_mutex_lock
(
&
ctl_mtx
);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
READONLY
();
if
(
*
oldlenp
!=
sizeof
(
unsigned
))
{
ret
=
EINVAL
;
...
...
@@ -1425,23 +1840,23 @@ arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp,
ret
=
0
;
label_return:
malloc_mutex_unlock
(
&
ctl_mtx
);
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
return
(
ret
);
}
static
int
arenas_initialized_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
arenas_initialized_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
unsigned
nread
,
i
;
malloc_mutex_lock
(
&
ctl_mtx
);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
READONLY
();
if
(
*
oldlenp
!=
ctl_stats
.
narenas
*
sizeof
(
bool
))
{
ret
=
EINVAL
;
nread
=
(
*
oldlenp
<
ctl_stats
.
narenas
*
sizeof
(
bool
))
?
(
*
oldlenp
/
sizeof
(
bool
))
:
ctl_stats
.
narenas
;
?
(
unsigned
)
(
*
oldlenp
/
sizeof
(
bool
))
:
ctl_stats
.
narenas
;
}
else
{
ret
=
0
;
nread
=
ctl_stats
.
narenas
;
...
...
@@ -1451,107 +1866,191 @@ arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp,
((
bool
*
)
oldp
)[
i
]
=
ctl_stats
.
arenas
[
i
].
initialized
;
label_return:
malloc_mutex_unlock
(
&
ctl_mtx
);
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
return
(
ret
);
}
CTL_RO_NL_GEN
(
arenas_quantum
,
QUANTUM
,
size_t
)
CTL_RO_NL_GEN
(
arenas_page
,
PAGE
,
size_t
)
CTL_RO_NL_CGEN
(
config_tcache
,
arenas_tcache_max
,
tcache_maxclass
,
size_t
)
CTL_RO_NL_GEN
(
arenas_nbins
,
NBINS
,
unsigned
)
CTL_RO_NL_CGEN
(
config_tcache
,
arenas_nhbins
,
nhbins
,
unsigned
)
CTL_RO_NL_GEN
(
arenas_nlruns
,
nlclasses
,
size_t
)
static
int
arenas_lg_dirty_mult_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
if
(
oldp
!=
NULL
&&
oldlenp
!=
NULL
)
{
size_t
oldval
=
arena_lg_dirty_mult_default_get
();
READ
(
oldval
,
ssize_t
);
}
if
(
newp
!=
NULL
)
{
if
(
newlen
!=
sizeof
(
ssize_t
))
{
ret
=
EINVAL
;
goto
label_return
;
}
if
(
arena_lg_dirty_mult_default_set
(
*
(
ssize_t
*
)
newp
))
{
ret
=
EFAULT
;
goto
label_return
;
}
}
ret
=
0
;
label_return:
return
(
ret
);
}
static
int
arenas_
purge_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
arenas_
decay_time_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
unsigned
arena_ind
;
malloc_mutex_lock
(
&
ctl_mtx
);
WRITEONLY
();
arena_ind
=
UINT_MAX
;
WRITE
(
arena_ind
,
unsigned
);
if
(
newp
!=
NULL
&&
arena_ind
>=
ctl_stats
.
narenas
)
ret
=
EFAULT
;
else
{
if
(
arena_ind
==
UINT_MAX
)
arena_ind
=
ctl_stats
.
narenas
;
arena_purge
(
arena_ind
);
ret
=
0
;
if
(
oldp
!=
NULL
&&
oldlenp
!=
NULL
)
{
size_t
oldval
=
arena_decay_time_default_get
();
READ
(
oldval
,
ssize_t
);
}
if
(
newp
!=
NULL
)
{
if
(
newlen
!=
sizeof
(
ssize_t
))
{
ret
=
EINVAL
;
goto
label_return
;
}
if
(
arena_decay_time_default_set
(
*
(
ssize_t
*
)
newp
))
{
ret
=
EFAULT
;
goto
label_return
;
}
}
ret
=
0
;
label_return:
malloc_mutex_unlock
(
&
ctl_mtx
);
return
(
ret
);
}
CTL_RO_NL_GEN
(
arenas_quantum
,
QUANTUM
,
size_t
)
CTL_RO_NL_GEN
(
arenas_page
,
PAGE
,
size_t
)
CTL_RO_NL_CGEN
(
config_tcache
,
arenas_tcache_max
,
tcache_maxclass
,
size_t
)
CTL_RO_NL_GEN
(
arenas_nbins
,
NBINS
,
unsigned
)
CTL_RO_NL_CGEN
(
config_tcache
,
arenas_nhbins
,
nhbins
,
unsigned
)
CTL_RO_NL_GEN
(
arenas_bin_i_size
,
arena_bin_info
[
mib
[
2
]].
reg_size
,
size_t
)
CTL_RO_NL_GEN
(
arenas_bin_i_nregs
,
arena_bin_info
[
mib
[
2
]].
nregs
,
uint32_t
)
CTL_RO_NL_GEN
(
arenas_bin_i_run_size
,
arena_bin_info
[
mib
[
2
]].
run_size
,
size_t
)
static
const
ctl_named_node_t
*
arenas_bin_i_index
(
tsdn_t
*
tsdn
,
const
size_t
*
mib
,
size_t
miblen
,
size_t
i
)
{
if
(
i
>
NBINS
)
return
(
NULL
);
return
(
super_arenas_bin_i_node
);
}
CTL_RO_NL_GEN
(
arenas_nlruns
,
nlclasses
,
unsigned
)
CTL_RO_NL_GEN
(
arenas_lrun_i_size
,
index2size
(
NBINS
+
(
szind_t
)
mib
[
2
]),
size_t
)
static
const
ctl_named_node_t
*
arenas_lrun_i_index
(
tsdn_t
*
tsdn
,
const
size_t
*
mib
,
size_t
miblen
,
size_t
i
)
{
if
(
i
>
nlclasses
)
return
(
NULL
);
return
(
super_arenas_lrun_i_node
);
}
CTL_RO_NL_GEN
(
arenas_nhchunks
,
nhclasses
,
unsigned
)
CTL_RO_NL_GEN
(
arenas_hchunk_i_size
,
index2size
(
NBINS
+
nlclasses
+
(
szind_t
)
mib
[
2
]),
size_t
)
static
const
ctl_named_node_t
*
arenas_hchunk_i_index
(
tsdn_t
*
tsdn
,
const
size_t
*
mib
,
size_t
miblen
,
size_t
i
)
{
if
(
i
>
nhclasses
)
return
(
NULL
);
return
(
super_arenas_hchunk_i_node
);
}
static
int
arenas_extend_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
arenas_extend_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
unsigned
narenas
;
malloc_mutex_lock
(
&
ctl_mtx
);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
READONLY
();
if
(
ctl_grow
())
{
if
(
ctl_grow
(
tsd_tsdn
(
tsd
)
))
{
ret
=
EAGAIN
;
goto
label_return
;
}
READ
(
ctl_stats
.
narenas
-
1
,
unsigned
);
narenas
=
ctl_stats
.
narenas
-
1
;
READ
(
narenas
,
unsigned
);
ret
=
0
;
label_return:
malloc_mutex_unlock
(
&
ctl_mtx
);
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
return
(
ret
);
}
/******************************************************************************/
static
int
prof_
active_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
prof_
thread_active_init_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
bool
oldval
;
if
(
config_prof
==
false
)
if
(
!
config_prof
)
return
(
ENOENT
);
malloc_mutex_lock
(
&
ctl_mtx
);
/* Protect opt_prof_active. */
oldval
=
opt_prof_active
;
if
(
newp
!=
NULL
)
{
/*
* The memory barriers will tend to make opt_prof_active
* propagate faster on systems with weak memory ordering.
*/
mb_write
();
WRITE
(
opt_prof_active
,
bool
);
mb_write
();
}
if
(
newlen
!=
sizeof
(
bool
))
{
ret
=
EINVAL
;
goto
label_return
;
}
oldval
=
prof_thread_active_init_set
(
tsd_tsdn
(
tsd
),
*
(
bool
*
)
newp
);
}
else
oldval
=
prof_thread_active_init_get
(
tsd_tsdn
(
tsd
));
READ
(
oldval
,
bool
);
ret
=
0
;
label_return:
malloc_mutex_unlock
(
&
ctl_mtx
);
return
(
ret
);
}
static
int
prof_dump_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
prof_active_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
bool
oldval
;
if
(
!
config_prof
)
return
(
ENOENT
);
if
(
newp
!=
NULL
)
{
if
(
newlen
!=
sizeof
(
bool
))
{
ret
=
EINVAL
;
goto
label_return
;
}
oldval
=
prof_active_set
(
tsd_tsdn
(
tsd
),
*
(
bool
*
)
newp
);
}
else
oldval
=
prof_active_get
(
tsd_tsdn
(
tsd
));
READ
(
oldval
,
bool
);
ret
=
0
;
label_return:
return
(
ret
);
}
static
int
prof_dump_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
const
char
*
filename
=
NULL
;
if
(
config_prof
==
false
)
if
(
!
config_prof
)
return
(
ENOENT
);
WRITEONLY
();
WRITE
(
filename
,
const
char
*
);
if
(
prof_mdump
(
filename
))
{
if
(
prof_mdump
(
tsd
,
filename
))
{
ret
=
EFAULT
;
goto
label_return
;
}
...
...
@@ -1561,17 +2060,89 @@ label_return:
return
(
ret
);
}
static
int
prof_gdump_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
bool
oldval
;
if
(
!
config_prof
)
return
(
ENOENT
);
if
(
newp
!=
NULL
)
{
if
(
newlen
!=
sizeof
(
bool
))
{
ret
=
EINVAL
;
goto
label_return
;
}
oldval
=
prof_gdump_set
(
tsd_tsdn
(
tsd
),
*
(
bool
*
)
newp
);
}
else
oldval
=
prof_gdump_get
(
tsd_tsdn
(
tsd
));
READ
(
oldval
,
bool
);
ret
=
0
;
label_return:
return
(
ret
);
}
static
int
prof_reset_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
size_t
lg_sample
=
lg_prof_sample
;
if
(
!
config_prof
)
return
(
ENOENT
);
WRITEONLY
();
WRITE
(
lg_sample
,
size_t
);
if
(
lg_sample
>=
(
sizeof
(
uint64_t
)
<<
3
))
lg_sample
=
(
sizeof
(
uint64_t
)
<<
3
)
-
1
;
prof_reset
(
tsd
,
lg_sample
);
ret
=
0
;
label_return:
return
(
ret
);
}
CTL_RO_NL_CGEN
(
config_prof
,
prof_interval
,
prof_interval
,
uint64_t
)
CTL_RO_NL_CGEN
(
config_prof
,
lg_prof_sample
,
lg_prof_sample
,
size_t
)
/******************************************************************************/
CTL_RO_CGEN
(
config_stats
,
stats_chunks_current
,
ctl_stats
.
chunks
.
current
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_chunks_total
,
ctl_stats
.
chunks
.
total
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_chunks_high
,
ctl_stats
.
chunks
.
high
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_huge_allocated
,
huge_allocated
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_huge_nmalloc
,
huge_nmalloc
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_huge_ndalloc
,
huge_ndalloc
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_cactive
,
&
stats_cactive
,
size_t
*
)
CTL_RO_CGEN
(
config_stats
,
stats_allocated
,
ctl_stats
.
allocated
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_active
,
ctl_stats
.
active
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_metadata
,
ctl_stats
.
metadata
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_resident
,
ctl_stats
.
resident
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_mapped
,
ctl_stats
.
mapped
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_retained
,
ctl_stats
.
retained
,
size_t
)
CTL_RO_GEN
(
stats_arenas_i_dss
,
ctl_stats
.
arenas
[
mib
[
2
]].
dss
,
const
char
*
)
CTL_RO_GEN
(
stats_arenas_i_lg_dirty_mult
,
ctl_stats
.
arenas
[
mib
[
2
]].
lg_dirty_mult
,
ssize_t
)
CTL_RO_GEN
(
stats_arenas_i_decay_time
,
ctl_stats
.
arenas
[
mib
[
2
]].
decay_time
,
ssize_t
)
CTL_RO_GEN
(
stats_arenas_i_nthreads
,
ctl_stats
.
arenas
[
mib
[
2
]].
nthreads
,
unsigned
)
CTL_RO_GEN
(
stats_arenas_i_pactive
,
ctl_stats
.
arenas
[
mib
[
2
]].
pactive
,
size_t
)
CTL_RO_GEN
(
stats_arenas_i_pdirty
,
ctl_stats
.
arenas
[
mib
[
2
]].
pdirty
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_mapped
,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
mapped
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_retained
,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
retained
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_npurge
,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
npurge
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_nmadvise
,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
nmadvise
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_purged
,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
purged
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_metadata_mapped
,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
metadata_mapped
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_metadata_allocated
,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
metadata_allocated
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_small_allocated
,
ctl_stats
.
arenas
[
mib
[
2
]].
allocated_small
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_small_nmalloc
,
...
...
@@ -1588,15 +2159,23 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
ndalloc_large
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_large_nrequests
,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
nrequests_large
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_huge_allocated
,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
allocated_huge
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_huge_nmalloc
,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
nmalloc_huge
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_huge_ndalloc
,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
ndalloc_huge
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_huge_nrequests
,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
nmalloc_huge
,
uint64_t
)
/* Intentional. */
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_bins_j_allocated
,
ctl_stats
.
arenas
[
mib
[
2
]].
bstats
[
mib
[
4
]].
allocated
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_bins_j_nmalloc
,
ctl_stats
.
arenas
[
mib
[
2
]].
bstats
[
mib
[
4
]].
nmalloc
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_bins_j_ndalloc
,
ctl_stats
.
arenas
[
mib
[
2
]].
bstats
[
mib
[
4
]].
ndalloc
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_bins_j_nrequests
,
ctl_stats
.
arenas
[
mib
[
2
]].
bstats
[
mib
[
4
]].
nrequests
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_bins_j_curregs
,
ctl_stats
.
arenas
[
mib
[
2
]].
bstats
[
mib
[
4
]].
curregs
,
size_t
)
CTL_RO_CGEN
(
config_stats
&&
config_tcache
,
stats_arenas_i_bins_j_nfills
,
ctl_stats
.
arenas
[
mib
[
2
]].
bstats
[
mib
[
4
]].
nfills
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
&&
config_tcache
,
stats_arenas_i_bins_j_nflushes
,
...
...
@@ -1609,7 +2188,8 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns,
ctl_stats
.
arenas
[
mib
[
2
]].
bstats
[
mib
[
4
]].
curruns
,
size_t
)
static
const
ctl_named_node_t
*
stats_arenas_i_bins_j_index
(
const
size_t
*
mib
,
size_t
miblen
,
size_t
j
)
stats_arenas_i_bins_j_index
(
tsdn_t
*
tsdn
,
const
size_t
*
mib
,
size_t
miblen
,
size_t
j
)
{
if
(
j
>
NBINS
)
...
...
@@ -1627,7 +2207,8 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns,
ctl_stats
.
arenas
[
mib
[
2
]].
lstats
[
mib
[
4
]].
curruns
,
size_t
)
static
const
ctl_named_node_t
*
stats_arenas_i_lruns_j_index
(
const
size_t
*
mib
,
size_t
miblen
,
size_t
j
)
stats_arenas_i_lruns_j_index
(
tsdn_t
*
tsdn
,
const
size_t
*
mib
,
size_t
miblen
,
size_t
j
)
{
if
(
j
>
nlclasses
)
...
...
@@ -1635,37 +2216,39 @@ stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
return
(
super_stats_arenas_i_lruns_j_node
);
}
CTL_RO_GEN
(
stats_arenas_i_nthreads
,
ctl_stats
.
arenas
[
mib
[
2
]].
nthreads
,
unsigned
)
CTL_RO_GEN
(
stats_arenas_i_dss
,
ctl_stats
.
arenas
[
mib
[
2
]].
dss
,
const
char
*
)
CTL_RO_GEN
(
stats_arenas_i_pactive
,
ctl_stats
.
arenas
[
mib
[
2
]].
pactive
,
size_t
)
CTL_RO_GEN
(
stats_arenas_i_pdirty
,
ctl_stats
.
arenas
[
mib
[
2
]].
pdirty
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_mapped
,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
mapped
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_npurge
,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
npurge
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_nmadvise
,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
nmadvise
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_purged
,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
purged
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_hchunks_j_nmalloc
,
ctl_stats
.
arenas
[
mib
[
2
]].
hstats
[
mib
[
4
]].
nmalloc
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_hchunks_j_ndalloc
,
ctl_stats
.
arenas
[
mib
[
2
]].
hstats
[
mib
[
4
]].
ndalloc
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_hchunks_j_nrequests
,
ctl_stats
.
arenas
[
mib
[
2
]].
hstats
[
mib
[
4
]].
nmalloc
,
/* Intentional. */
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_hchunks_j_curhchunks
,
ctl_stats
.
arenas
[
mib
[
2
]].
hstats
[
mib
[
4
]].
curhchunks
,
size_t
)
static
const
ctl_named_node_t
*
stats_arenas_i_index
(
const
size_t
*
mib
,
size_t
miblen
,
size_t
i
)
stats_arenas_i_hchunks_j_index
(
tsdn_t
*
tsdn
,
const
size_t
*
mib
,
size_t
miblen
,
size_t
j
)
{
if
(
j
>
nhclasses
)
return
(
NULL
);
return
(
super_stats_arenas_i_hchunks_j_node
);
}
static
const
ctl_named_node_t
*
stats_arenas_i_index
(
tsdn_t
*
tsdn
,
const
size_t
*
mib
,
size_t
miblen
,
size_t
i
)
{
const
ctl_named_node_t
*
ret
;
malloc_mutex_lock
(
&
ctl_mtx
);
if
(
i
>
ctl_stats
.
narenas
||
ctl_stats
.
arenas
[
i
].
initialized
==
false
)
{
malloc_mutex_lock
(
tsdn
,
&
ctl_mtx
);
if
(
i
>
ctl_stats
.
narenas
||
!
ctl_stats
.
arenas
[
i
].
initialized
)
{
ret
=
NULL
;
goto
label_return
;
}
ret
=
super_stats_arenas_i_node
;
label_return:
malloc_mutex_unlock
(
&
ctl_mtx
);
malloc_mutex_unlock
(
tsdn
,
&
ctl_mtx
);
return
(
ret
);
}
CTL_RO_CGEN
(
config_stats
,
stats_cactive
,
&
stats_cactive
,
size_t
*
)
CTL_RO_CGEN
(
config_stats
,
stats_allocated
,
ctl_stats
.
allocated
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_active
,
ctl_stats
.
active
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_mapped
,
ctl_stats
.
mapped
,
size_t
)
deps/jemalloc/src/extent.c
View file @
1f72ec7d
...
...
@@ -3,37 +3,75 @@
/******************************************************************************/
static
inline
int
extent_szad_comp
(
extent_node_t
*
a
,
extent_node_t
*
b
)
/*
* Round down to the nearest chunk size that can actually be requested during
* normal huge allocation.
*/
JEMALLOC_INLINE_C
size_t
extent_quantize
(
size_t
size
)
{
int
ret
;
size_t
a_size
=
a
->
size
;
size_t
b_size
=
b
->
size
;
size_t
ret
;
szind_t
ind
;
ret
=
(
a_size
>
b_size
)
-
(
a_size
<
b_size
);
if
(
ret
==
0
)
{
uintptr_t
a_addr
=
(
uintptr_t
)
a
->
addr
;
uintptr_t
b_addr
=
(
uintptr_t
)
b
->
addr
;
assert
(
size
>
0
);
ret
=
(
a_addr
>
b_addr
)
-
(
a_addr
<
b_addr
);
ind
=
size2index
(
size
+
1
);
if
(
ind
==
0
)
{
/* Avoid underflow. */
return
(
index2size
(
0
));
}
ret
=
index2size
(
ind
-
1
);
assert
(
ret
<=
size
);
return
(
ret
);
}
/* Generate red-black tree functions. */
rb_gen
(,
extent_tree_szad_
,
extent_tree_t
,
extent_node_t
,
link_szad
,
extent_szad_comp
)
JEMALLOC_INLINE_C
int
extent_sz_comp
(
const
extent_node_t
*
a
,
const
extent_node_t
*
b
)
{
size_t
a_qsize
=
extent_quantize
(
extent_node_size_get
(
a
));
size_t
b_qsize
=
extent_quantize
(
extent_node_size_get
(
b
));
return
((
a_qsize
>
b_qsize
)
-
(
a_qsize
<
b_qsize
));
}
JEMALLOC_INLINE_C
int
extent_sn_comp
(
const
extent_node_t
*
a
,
const
extent_node_t
*
b
)
{
size_t
a_sn
=
extent_node_sn_get
(
a
);
size_t
b_sn
=
extent_node_sn_get
(
b
);
return
((
a_sn
>
b_sn
)
-
(
a_sn
<
b_sn
));
}
static
inline
int
extent_ad_comp
(
extent_node_t
*
a
,
extent_node_t
*
b
)
JEMALLOC_INLINE_C
int
extent_ad_comp
(
const
extent_node_t
*
a
,
const
extent_node_t
*
b
)
{
uintptr_t
a_addr
=
(
uintptr_t
)
a
->
addr
;
uintptr_t
b_addr
=
(
uintptr_t
)
b
->
addr
;
uintptr_t
a_addr
=
(
uintptr_t
)
extent_node_addr_get
(
a
)
;
uintptr_t
b_addr
=
(
uintptr_t
)
extent_node_addr_get
(
b
)
;
return
((
a_addr
>
b_addr
)
-
(
a_addr
<
b_addr
));
}
JEMALLOC_INLINE_C
int
extent_szsnad_comp
(
const
extent_node_t
*
a
,
const
extent_node_t
*
b
)
{
int
ret
;
ret
=
extent_sz_comp
(
a
,
b
);
if
(
ret
!=
0
)
return
(
ret
);
ret
=
extent_sn_comp
(
a
,
b
);
if
(
ret
!=
0
)
return
(
ret
);
ret
=
extent_ad_comp
(
a
,
b
);
return
(
ret
);
}
/* Generate red-black tree functions. */
rb_gen
(,
extent_tree_szsnad_
,
extent_tree_t
,
extent_node_t
,
szsnad_link
,
extent_szsnad_comp
)
/* Generate red-black tree functions. */
rb_gen
(,
extent_tree_ad_
,
extent_tree_t
,
extent_node_t
,
link_ad
,
extent_ad_comp
)
rb_gen
(,
extent_tree_ad_
,
extent_tree_t
,
extent_node_t
,
ad_link
,
extent_ad_comp
)
deps/jemalloc/src/huge.c
View file @
1f72ec7d
...
...
@@ -2,44 +2,77 @@
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Data. */
uint64_t
huge_nmalloc
;
uint64_t
huge_ndalloc
;
size_t
huge_allocated
;
static
extent_node_t
*
huge_node_get
(
const
void
*
ptr
)
{
extent_node_t
*
node
;
malloc_mutex_t
huge_mtx
;
node
=
chunk_lookup
(
ptr
,
true
);
assert
(
!
extent_node_achunk_get
(
node
));
/******************************************************************************/
return
(
node
);
}
static
bool
huge_node_set
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
extent_node_t
*
node
)
{
assert
(
extent_node_addr_get
(
node
)
==
ptr
);
assert
(
!
extent_node_achunk_get
(
node
));
return
(
chunk_register
(
tsdn
,
ptr
,
node
));
}
/* Tree of chunks that are stand-alone huge allocations. */
static
extent_tree_t
huge
;
static
void
huge_node_reset
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
extent_node_t
*
node
)
{
bool
err
;
err
=
huge_node_set
(
tsdn
,
ptr
,
node
);
assert
(
!
err
);
}
static
void
huge_node_unset
(
const
void
*
ptr
,
const
extent_node_t
*
node
)
{
chunk_deregister
(
ptr
,
node
);
}
void
*
huge_malloc
(
size_t
size
,
bool
zero
)
huge_malloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
u
size
,
bool
zero
)
{
return
(
huge_palloc
(
size
,
chunksize
,
zero
));
assert
(
usize
==
s2u
(
usize
));
return
(
huge_palloc
(
tsdn
,
arena
,
usize
,
chunksize
,
zero
));
}
void
*
huge_palloc
(
size_t
size
,
size_t
alignment
,
bool
zero
)
huge_palloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
usize
,
size_t
alignment
,
bool
zero
)
{
void
*
ret
;
size_t
csize
;
size_t
ausize
;
arena_t
*
iarena
;
extent_node_t
*
node
;
size_t
sn
;
bool
is_zeroed
;
/* Allocate one or more contiguous chunks for this request. */
csize
=
CHUNK_CEILING
(
size
);
if
(
csize
==
0
)
{
/* size is large enough to cause size_t wrap-around. */
assert
(
!
tsdn_null
(
tsdn
)
||
arena
!=
NULL
);
ausize
=
sa2u
(
usize
,
alignment
);
if
(
unlikely
(
ausize
==
0
||
ausize
>
HUGE_MAXCLASS
))
return
(
NULL
);
}
assert
(
ausize
>=
chunksize
);
/* Allocate an extent node with which to track the chunk. */
node
=
base_node_alloc
();
iarena
=
(
!
tsdn_null
(
tsdn
))
?
arena_ichoose
(
tsdn_tsd
(
tsdn
),
NULL
)
:
a0get
();
node
=
ipallocztm
(
tsdn
,
CACHELINE_CEILING
(
sizeof
(
extent_node_t
)),
CACHELINE
,
false
,
NULL
,
true
,
iarena
);
if
(
node
==
NULL
)
return
(
NULL
);
...
...
@@ -48,266 +81,397 @@ huge_palloc(size_t size, size_t alignment, bool zero)
* it is possible to make correct junk/zero fill decisions below.
*/
is_zeroed
=
zero
;
ret
=
chunk_alloc
(
csize
,
alignment
,
false
,
&
is_zeroed
,
chunk_dss_prec_get
());
if
(
ret
==
NULL
)
{
base_node_dealloc
(
node
);
if
(
likely
(
!
tsdn_null
(
tsdn
)))
arena
=
arena_choose
(
tsdn_tsd
(
tsdn
),
arena
);
if
(
unlikely
(
arena
==
NULL
)
||
(
ret
=
arena_chunk_alloc_huge
(
tsdn
,
arena
,
usize
,
alignment
,
&
sn
,
&
is_zeroed
))
==
NULL
)
{
idalloctm
(
tsdn
,
node
,
NULL
,
true
,
true
);
return
(
NULL
);
}
/* Insert node into huge. */
node
->
addr
=
ret
;
node
->
size
=
csize
;
malloc_mutex_lock
(
&
huge_mtx
);
extent_tree_ad_insert
(
&
huge
,
node
);
if
(
config_stats
)
{
stats_cactive_add
(
csize
);
huge_nmalloc
++
;
huge_allocated
+=
csize
;
}
malloc_mutex_unlock
(
&
huge_mtx
);
extent_node_init
(
node
,
arena
,
ret
,
usize
,
sn
,
is_zeroed
,
true
);
if
(
config_fill
&&
zero
==
false
)
{
if
(
opt_junk
)
memset
(
ret
,
0xa5
,
csize
);
else
if
(
opt_zero
&&
is_zeroed
==
false
)
memset
(
ret
,
0
,
csize
);
if
(
huge_node_set
(
tsdn
,
ret
,
node
))
{
arena_chunk_dalloc_huge
(
tsdn
,
arena
,
ret
,
usize
,
sn
);
idalloctm
(
tsdn
,
node
,
NULL
,
true
,
true
);
return
(
NULL
);
}
/* Insert node into huge. */
malloc_mutex_lock
(
tsdn
,
&
arena
->
huge_mtx
);
ql_elm_new
(
node
,
ql_link
);
ql_tail_insert
(
&
arena
->
huge
,
node
,
ql_link
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
huge_mtx
);
if
(
zero
||
(
config_fill
&&
unlikely
(
opt_zero
)))
{
if
(
!
is_zeroed
)
memset
(
ret
,
0
,
usize
);
}
else
if
(
config_fill
&&
unlikely
(
opt_junk_alloc
))
memset
(
ret
,
JEMALLOC_ALLOC_JUNK
,
usize
);
arena_decay_tick
(
tsdn
,
arena
);
return
(
ret
);
}
void
*
huge_ralloc_no_move
(
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
extra
)
#ifdef JEMALLOC_JET
#undef huge_dalloc_junk
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
#endif
static
void
huge_dalloc_junk
(
void
*
ptr
,
size_t
usize
)
{
/*
* Avoid moving the allocation if the size class can be left the same.
*/
if
(
oldsize
>
arena_maxclass
&&
CHUNK_CEILING
(
oldsize
)
>=
CHUNK_CEILING
(
size
)
&&
CHUNK_CEILING
(
oldsize
)
<=
CHUNK_CEILING
(
size
+
extra
))
{
assert
(
CHUNK_CEILING
(
oldsize
)
==
oldsize
);
if
(
config_fill
&&
opt_junk
&&
size
<
oldsize
)
{
memset
((
void
*
)((
uintptr_t
)
ptr
+
size
),
0x5a
,
oldsize
-
size
);
}
return
(
ptr
);
if
(
config_fill
&&
have_dss
&&
unlikely
(
opt_junk_free
))
{
/*
* Only bother junk filling if the chunk isn't about to be
* unmapped.
*/
if
(
!
config_munmap
||
(
have_dss
&&
chunk_in_dss
(
ptr
)))
memset
(
ptr
,
JEMALLOC_FREE_JUNK
,
usize
);
}
}
#ifdef JEMALLOC_JET
#undef huge_dalloc_junk
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
huge_dalloc_junk_t
*
huge_dalloc_junk
=
JEMALLOC_N
(
huge_dalloc_junk_impl
);
#endif
/* Reallocation would require a move. */
return
(
NULL
);
static
void
huge_ralloc_no_move_similar
(
tsdn_t
*
tsdn
,
void
*
ptr
,
size_t
oldsize
,
size_t
usize_min
,
size_t
usize_max
,
bool
zero
)
{
size_t
usize
,
usize_next
;
extent_node_t
*
node
;
arena_t
*
arena
;
chunk_hooks_t
chunk_hooks
=
CHUNK_HOOKS_INITIALIZER
;
bool
pre_zeroed
,
post_zeroed
;
/* Increase usize to incorporate extra. */
for
(
usize
=
usize_min
;
usize
<
usize_max
&&
(
usize_next
=
s2u
(
usize
+
1
))
<=
oldsize
;
usize
=
usize_next
)
;
/* Do nothing. */
if
(
oldsize
==
usize
)
return
;
node
=
huge_node_get
(
ptr
);
arena
=
extent_node_arena_get
(
node
);
pre_zeroed
=
extent_node_zeroed_get
(
node
);
/* Fill if necessary (shrinking). */
if
(
oldsize
>
usize
)
{
size_t
sdiff
=
oldsize
-
usize
;
if
(
config_fill
&&
unlikely
(
opt_junk_free
))
{
memset
((
void
*
)((
uintptr_t
)
ptr
+
usize
),
JEMALLOC_FREE_JUNK
,
sdiff
);
post_zeroed
=
false
;
}
else
{
post_zeroed
=
!
chunk_purge_wrapper
(
tsdn
,
arena
,
&
chunk_hooks
,
ptr
,
CHUNK_CEILING
(
oldsize
),
usize
,
sdiff
);
}
}
else
post_zeroed
=
pre_zeroed
;
malloc_mutex_lock
(
tsdn
,
&
arena
->
huge_mtx
);
/* Update the size of the huge allocation. */
huge_node_unset
(
ptr
,
node
);
assert
(
extent_node_size_get
(
node
)
!=
usize
);
extent_node_size_set
(
node
,
usize
);
huge_node_reset
(
tsdn
,
ptr
,
node
);
/* Update zeroed. */
extent_node_zeroed_set
(
node
,
post_zeroed
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
huge_mtx
);
arena_chunk_ralloc_huge_similar
(
tsdn
,
arena
,
ptr
,
oldsize
,
usize
);
/* Fill if necessary (growing). */
if
(
oldsize
<
usize
)
{
if
(
zero
||
(
config_fill
&&
unlikely
(
opt_zero
)))
{
if
(
!
pre_zeroed
)
{
memset
((
void
*
)((
uintptr_t
)
ptr
+
oldsize
),
0
,
usize
-
oldsize
);
}
}
else
if
(
config_fill
&&
unlikely
(
opt_junk_alloc
))
{
memset
((
void
*
)((
uintptr_t
)
ptr
+
oldsize
),
JEMALLOC_ALLOC_JUNK
,
usize
-
oldsize
);
}
}
}
void
*
huge_ralloc
(
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
extra
,
size_t
alignment
,
bool
zero
,
bool
try_tcache_dalloc
)
static
bool
huge_ralloc
_no_move_shrink
(
tsdn_t
*
tsdn
,
void
*
ptr
,
size_t
oldsize
,
size_t
usize
)
{
void
*
ret
;
size_t
copysize
;
extent_node_t
*
node
;
arena_t
*
arena
;
chunk_hooks_t
chunk_hooks
;
size_t
cdiff
;
bool
pre_zeroed
,
post_zeroed
;
node
=
huge_node_get
(
ptr
);
arena
=
extent_node_arena_get
(
node
);
pre_zeroed
=
extent_node_zeroed_get
(
node
);
chunk_hooks
=
chunk_hooks_get
(
tsdn
,
arena
);
assert
(
oldsize
>
usize
);
/* Split excess chunks. */
cdiff
=
CHUNK_CEILING
(
oldsize
)
-
CHUNK_CEILING
(
usize
);
if
(
cdiff
!=
0
&&
chunk_hooks
.
split
(
ptr
,
CHUNK_CEILING
(
oldsize
),
CHUNK_CEILING
(
usize
),
cdiff
,
true
,
arena
->
ind
))
return
(
true
);
/* Try to avoid moving the allocation. */
ret
=
huge_ralloc_no_move
(
ptr
,
oldsize
,
size
,
extra
);
if
(
ret
!=
NULL
)
return
(
ret
);
if
(
oldsize
>
usize
)
{
size_t
sdiff
=
oldsize
-
usize
;
if
(
config_fill
&&
unlikely
(
opt_junk_free
))
{
huge_dalloc_junk
((
void
*
)((
uintptr_t
)
ptr
+
usize
),
sdiff
);
post_zeroed
=
false
;
}
else
{
post_zeroed
=
!
chunk_purge_wrapper
(
tsdn
,
arena
,
&
chunk_hooks
,
CHUNK_ADDR2BASE
((
uintptr_t
)
ptr
+
usize
),
CHUNK_CEILING
(
oldsize
),
CHUNK_ADDR2OFFSET
((
uintptr_t
)
ptr
+
usize
),
sdiff
);
}
}
else
post_zeroed
=
pre_zeroed
;
/*
* size and oldsize are different enough that we need to use a
* different size class. In that case, fall back to allocating new
* space and copying.
*/
if
(
alignment
>
chunksize
)
ret
=
huge_palloc
(
size
+
extra
,
alignment
,
zero
);
else
ret
=
huge_malloc
(
size
+
extra
,
zero
);
if
(
ret
==
NULL
)
{
if
(
extra
==
0
)
return
(
NULL
);
/* Try again, this time without extra. */
if
(
alignment
>
chunksize
)
ret
=
huge_palloc
(
size
,
alignment
,
zero
);
else
ret
=
huge_malloc
(
size
,
zero
);
if
(
ret
==
NULL
)
return
(
NULL
);
}
malloc_mutex_lock
(
tsdn
,
&
arena
->
huge_mtx
);
/* Update the size of the huge allocation. */
huge_node_unset
(
ptr
,
node
);
extent_node_size_set
(
node
,
usize
);
huge_node_reset
(
tsdn
,
ptr
,
node
);
/* Update zeroed. */
extent_node_zeroed_set
(
node
,
post_zeroed
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
huge_mtx
);
/*
* Copy at most size bytes (not size+extra), since the caller has no
* expectation that the extra bytes will be reliably preserved.
*/
copysize
=
(
size
<
oldsize
)
?
size
:
oldsize
;
/* Zap the excess chunks. */
arena_chunk_ralloc_huge_shrink
(
tsdn
,
arena
,
ptr
,
oldsize
,
usize
,
extent_node_sn_get
(
node
));
return
(
false
);
}
static
bool
huge_ralloc_no_move_expand
(
tsdn_t
*
tsdn
,
void
*
ptr
,
size_t
oldsize
,
size_t
usize
,
bool
zero
)
{
extent_node_t
*
node
;
arena_t
*
arena
;
bool
is_zeroed_subchunk
,
is_zeroed_chunk
;
node
=
huge_node_get
(
ptr
);
arena
=
extent_node_arena_get
(
node
);
malloc_mutex_lock
(
tsdn
,
&
arena
->
huge_mtx
);
is_zeroed_subchunk
=
extent_node_zeroed_get
(
node
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
huge_mtx
);
#ifdef JEMALLOC_MREMAP
/*
* Use
mremap(2) if this is a huge-->huge reallocation, and neither the
*
source nor the destination are in dss
.
* Use
is_zeroed_chunk to detect whether the trailing memory is zeroed,
*
update extent's zeroed field, and zero as necessary
.
*/
if
(
oldsize
>=
chunksize
&&
(
config_dss
==
false
||
(
chunk_in_dss
(
ptr
)
==
false
&&
chunk_in_dss
(
ret
)
==
false
)))
{
size_t
newsize
=
huge_salloc
(
ret
);
is_zeroed_chunk
=
false
;
if
(
arena_chunk_ralloc_huge_expand
(
tsdn
,
arena
,
ptr
,
oldsize
,
usize
,
&
is_zeroed_chunk
))
return
(
true
);
/*
* Remove ptr from the tree of huge allocations before
* performing the remap operation, in order to avoid the
* possibility of another thread acquiring that mapping before
* this one removes it from the tree.
*/
huge_dalloc
(
ptr
,
false
);
if
(
mremap
(
ptr
,
oldsize
,
newsize
,
MREMAP_MAYMOVE
|
MREMAP_FIXED
,
ret
)
==
MAP_FAILED
)
{
/*
* Assuming no chunk management bugs in the allocator,
* the only documented way an error can occur here is
* if the application changed the map type for a
* portion of the old allocation. This is firmly in
* undefined behavior territory, so write a diagnostic
* message, and optionally abort.
*/
char
buf
[
BUFERROR_BUF
];
buferror
(
buf
,
sizeof
(
buf
));
malloc_printf
(
"<jemalloc>: Error in mremap(): %s
\n
"
,
buf
);
if
(
opt_abort
)
abort
();
memcpy
(
ret
,
ptr
,
copysize
);
chunk_dealloc_mmap
(
ptr
,
oldsize
);
malloc_mutex_lock
(
tsdn
,
&
arena
->
huge_mtx
);
huge_node_unset
(
ptr
,
node
);
extent_node_size_set
(
node
,
usize
);
extent_node_zeroed_set
(
node
,
extent_node_zeroed_get
(
node
)
&&
is_zeroed_chunk
);
huge_node_reset
(
tsdn
,
ptr
,
node
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
huge_mtx
);
if
(
zero
||
(
config_fill
&&
unlikely
(
opt_zero
)))
{
if
(
!
is_zeroed_subchunk
)
{
memset
((
void
*
)((
uintptr_t
)
ptr
+
oldsize
),
0
,
CHUNK_CEILING
(
oldsize
)
-
oldsize
);
}
}
else
#endif
{
memcpy
(
ret
,
ptr
,
copysize
);
iqallocx
(
ptr
,
try_tcache_dalloc
);
if
(
!
is_zeroed_chunk
)
{
memset
((
void
*
)((
uintptr_t
)
ptr
+
CHUNK_CEILING
(
oldsize
)),
0
,
usize
-
CHUNK_CEILING
(
oldsize
));
}
}
else
if
(
config_fill
&&
unlikely
(
opt_junk_alloc
))
{
memset
((
void
*
)((
uintptr_t
)
ptr
+
oldsize
),
JEMALLOC_ALLOC_JUNK
,
usize
-
oldsize
);
}
return
(
ret
);
return
(
false
);
}
void
huge_dalloc
(
void
*
ptr
,
bool
unmap
)
bool
huge_ralloc_no_move
(
tsdn_t
*
tsdn
,
void
*
ptr
,
size_t
oldsize
,
size_t
usize_min
,
size_t
usize_max
,
bool
zero
)
{
extent_node_t
*
node
,
key
;
malloc_mutex_lock
(
&
huge_mtx
);
assert
(
s2u
(
oldsize
)
==
oldsize
);
/* The following should have been caught by callers. */
assert
(
usize_min
>
0
&&
usize_max
<=
HUGE_MAXCLASS
);
/* Extract from tree of huge allocations. */
key
.
addr
=
ptr
;
node
=
extent_tree_ad_search
(
&
huge
,
&
key
);
assert
(
node
!=
NULL
);
assert
(
node
->
addr
==
ptr
);
extent_tree_ad_remove
(
&
huge
,
node
);
/* Both allocations must be huge to avoid a move. */
if
(
oldsize
<
chunksize
||
usize_max
<
chunksize
)
return
(
true
);
if
(
config_stats
)
{
stats_cactive_sub
(
node
->
size
);
huge_ndalloc
++
;
huge_allocated
-=
node
->
size
;
if
(
CHUNK_CEILING
(
usize_max
)
>
CHUNK_CEILING
(
oldsize
))
{
/* Attempt to expand the allocation in-place. */
if
(
!
huge_ralloc_no_move_expand
(
tsdn
,
ptr
,
oldsize
,
usize_max
,
zero
))
{
arena_decay_tick
(
tsdn
,
huge_aalloc
(
ptr
));
return
(
false
);
}
/* Try again, this time with usize_min. */
if
(
usize_min
<
usize_max
&&
CHUNK_CEILING
(
usize_min
)
>
CHUNK_CEILING
(
oldsize
)
&&
huge_ralloc_no_move_expand
(
tsdn
,
ptr
,
oldsize
,
usize_min
,
zero
))
{
arena_decay_tick
(
tsdn
,
huge_aalloc
(
ptr
));
return
(
false
);
}
}
malloc_mutex_unlock
(
&
huge_mtx
);
if
(
unmap
&&
config_fill
&&
config_dss
&&
opt_junk
)
memset
(
node
->
addr
,
0x5a
,
node
->
size
);
chunk_dealloc
(
node
->
addr
,
node
->
size
,
unmap
);
/*
* Avoid moving the allocation if the existing chunk size accommodates
* the new size.
*/
if
(
CHUNK_CEILING
(
oldsize
)
>=
CHUNK_CEILING
(
usize_min
)
&&
CHUNK_CEILING
(
oldsize
)
<=
CHUNK_CEILING
(
usize_max
))
{
huge_ralloc_no_move_similar
(
tsdn
,
ptr
,
oldsize
,
usize_min
,
usize_max
,
zero
);
arena_decay_tick
(
tsdn
,
huge_aalloc
(
ptr
));
return
(
false
);
}
base_node_dealloc
(
node
);
/* Attempt to shrink the allocation in-place. */
if
(
CHUNK_CEILING
(
oldsize
)
>
CHUNK_CEILING
(
usize_max
))
{
if
(
!
huge_ralloc_no_move_shrink
(
tsdn
,
ptr
,
oldsize
,
usize_max
))
{
arena_decay_tick
(
tsdn
,
huge_aalloc
(
ptr
));
return
(
false
);
}
}
return
(
true
);
}
size_t
huge_salloc
(
const
void
*
ptr
)
static
void
*
huge_ralloc_move_helper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
usize
,
size_t
alignment
,
bool
zero
)
{
size_t
ret
;
extent_node_t
*
node
,
key
;
malloc_mutex_lock
(
&
huge_mtx
);
/* Extract from tree of huge allocations. */
key
.
addr
=
__DECONST
(
void
*
,
ptr
);
node
=
extent_tree_ad_search
(
&
huge
,
&
key
);
assert
(
node
!=
NULL
);
ret
=
node
->
size
;
malloc_mutex_unlock
(
&
huge_mtx
);
return
(
ret
);
if
(
alignment
<=
chunksize
)
return
(
huge_malloc
(
tsdn
,
arena
,
usize
,
zero
));
return
(
huge_palloc
(
tsdn
,
arena
,
usize
,
alignment
,
zero
));
}
prof_ctx_t
*
huge_prof_ctx_get
(
const
void
*
ptr
)
void
*
huge_ralloc
(
tsd_t
*
tsd
,
arena_t
*
arena
,
void
*
ptr
,
size_t
oldsize
,
size_t
usize
,
size_t
alignment
,
bool
zero
,
tcache_t
*
tcache
)
{
prof_ctx_t
*
ret
;
extent_node_t
*
node
,
key
;
malloc_mutex_lock
(
&
huge_mtx
);
void
*
ret
;
size_t
copysize
;
/* Extract from tree of huge allocations. */
key
.
addr
=
__DECONST
(
void
*
,
ptr
);
node
=
extent_tree_ad_search
(
&
huge
,
&
key
);
assert
(
node
!=
NULL
);
/* The following should have been caught by callers. */
assert
(
usize
>
0
&&
usize
<=
HUGE_MAXCLASS
);
ret
=
node
->
prof_ctx
;
/* Try to avoid moving the allocation. */
if
(
!
huge_ralloc_no_move
(
tsd_tsdn
(
tsd
),
ptr
,
oldsize
,
usize
,
usize
,
zero
))
return
(
ptr
);
malloc_mutex_unlock
(
&
huge_mtx
);
/*
* usize and oldsize are different enough that we need to use a
* different size class. In that case, fall back to allocating new
* space and copying.
*/
ret
=
huge_ralloc_move_helper
(
tsd_tsdn
(
tsd
),
arena
,
usize
,
alignment
,
zero
);
if
(
ret
==
NULL
)
return
(
NULL
);
copysize
=
(
usize
<
oldsize
)
?
usize
:
oldsize
;
memcpy
(
ret
,
ptr
,
copysize
);
isqalloc
(
tsd
,
ptr
,
oldsize
,
tcache
,
true
);
return
(
ret
);
}
void
huge_
prof_ctx_set
(
const
void
*
ptr
,
prof_ctx_t
*
ctx
)
huge_
dalloc
(
tsdn_t
*
tsdn
,
void
*
ptr
)
{
extent_node_t
*
node
,
key
;
malloc_mutex_lock
(
&
huge_mtx
);
/* Extract from tree of huge allocations. */
key
.
addr
=
__DECONST
(
void
*
,
ptr
);
node
=
extent_tree_ad_search
(
&
huge
,
&
key
);
assert
(
node
!=
NULL
);
extent_node_t
*
node
;
arena_t
*
arena
;
node
=
huge_node_get
(
ptr
);
arena
=
extent_node_arena_get
(
node
);
huge_node_unset
(
ptr
,
node
);
malloc_mutex_lock
(
tsdn
,
&
arena
->
huge_mtx
);
ql_remove
(
&
arena
->
huge
,
node
,
ql_link
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
huge_mtx
);
huge_dalloc_junk
(
extent_node_addr_get
(
node
),
extent_node_size_get
(
node
));
arena_chunk_dalloc_huge
(
tsdn
,
extent_node_arena_get
(
node
),
extent_node_addr_get
(
node
),
extent_node_size_get
(
node
),
extent_node_sn_get
(
node
));
idalloctm
(
tsdn
,
node
,
NULL
,
true
,
true
);
arena_decay_tick
(
tsdn
,
arena
);
}
node
->
prof_ctx
=
ctx
;
arena_t
*
huge_aalloc
(
const
void
*
ptr
)
{
malloc_mutex_unlock
(
&
huge_mtx
);
return
(
extent_node_arena_get
(
huge_node_get
(
ptr
))
);
}
bool
huge_
boot
(
void
)
size_t
huge_
salloc
(
tsdn_t
*
tsdn
,
const
void
*
ptr
)
{
size_t
size
;
extent_node_t
*
node
;
arena_t
*
arena
;
/* Initialize chunks data. */
if
(
malloc_mutex_init
(
&
huge_mtx
))
return
(
true
);
extent_tree_ad_new
(
&
huge
);
if
(
config_stats
)
{
huge_nmalloc
=
0
;
huge_ndalloc
=
0
;
huge_allocated
=
0
;
}
node
=
huge_node_get
(
ptr
);
arena
=
extent_node_arena_get
(
node
);
malloc_mutex_lock
(
tsdn
,
&
arena
->
huge_mtx
);
size
=
extent_node_size_get
(
node
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
huge_mtx
);
return
(
fals
e
);
return
(
siz
e
);
}
void
huge_pr
efork
(
void
)
prof_tctx_t
*
huge_pr
of_tctx_get
(
tsdn_t
*
tsdn
,
const
void
*
ptr
)
{
prof_tctx_t
*
tctx
;
extent_node_t
*
node
;
arena_t
*
arena
;
node
=
huge_node_get
(
ptr
);
arena
=
extent_node_arena_get
(
node
);
malloc_mutex_lock
(
tsdn
,
&
arena
->
huge_mtx
);
tctx
=
extent_node_prof_tctx_get
(
node
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
huge_mtx
);
malloc_mutex_prefork
(
&
huge_m
tx
);
return
(
tc
tx
);
}
void
huge_p
ostfork_parent
(
void
)
huge_p
rof_tctx_set
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
prof_tctx_t
*
tctx
)
{
extent_node_t
*
node
;
arena_t
*
arena
;
malloc_mutex_postfork_parent
(
&
huge_mtx
);
node
=
huge_node_get
(
ptr
);
arena
=
extent_node_arena_get
(
node
);
malloc_mutex_lock
(
tsdn
,
&
arena
->
huge_mtx
);
extent_node_prof_tctx_set
(
node
,
tctx
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
huge_mtx
);
}
void
huge_p
ostfork_child
(
void
)
huge_p
rof_tctx_reset
(
tsdn_t
*
tsdn
,
const
void
*
ptr
)
{
malloc_mutex_postfork_child
(
&
huge_mtx
);
huge_prof_tctx_set
(
tsdn
,
ptr
,
(
prof_tctx_t
*
)(
uintptr_t
)
1U
);
}
Prev
1
…
4
5
6
7
8
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment