Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
4a884343
Commit
4a884343
authored
Oct 10, 2021
by
Yoav Steinberg
Browse files
Delete old jemalloc before pulling in subtree.
parent
7ff7536e
Changes
169
Hide whitespace changes
Inline
Side-by-side
Too many changes to show.
To preserve performance only
169 of 169+
files are displayed.
Plain diff
Email patch
deps/jemalloc/msvc/test_threads/test_threads.cpp
deleted
100644 → 0
View file @
7ff7536e
// jemalloc C++ threaded test
// Author: Rustam Abdullaev
// Public Domain
#include <atomic>
#include <functional>
#include <future>
#include <random>
#include <thread>
#include <vector>
#include <stdio.h>
#include <jemalloc/jemalloc.h>
using
std
::
vector
;
using
std
::
thread
;
using
std
::
uniform_int_distribution
;
using
std
::
minstd_rand
;
int
test_threads
()
{
je_malloc_conf
=
"narenas:3"
;
int
narenas
=
0
;
size_t
sz
=
sizeof
(
narenas
);
je_mallctl
(
"opt.narenas"
,
(
void
*
)
&
narenas
,
&
sz
,
NULL
,
0
);
if
(
narenas
!=
3
)
{
printf
(
"Error: unexpected number of arenas: %d
\n
"
,
narenas
);
return
1
;
}
static
const
int
sizes
[]
=
{
7
,
16
,
32
,
60
,
91
,
100
,
120
,
144
,
169
,
199
,
255
,
400
,
670
,
900
,
917
,
1025
,
3333
,
5190
,
13131
,
49192
,
99999
,
123123
,
255265
,
2333111
};
static
const
int
numSizes
=
(
int
)(
sizeof
(
sizes
)
/
sizeof
(
sizes
[
0
]));
vector
<
thread
>
workers
;
static
const
int
numThreads
=
narenas
+
1
,
numAllocsMax
=
25
,
numIter1
=
50
,
numIter2
=
50
;
je_malloc_stats_print
(
NULL
,
NULL
,
NULL
);
size_t
allocated1
;
size_t
sz1
=
sizeof
(
allocated1
);
je_mallctl
(
"stats.active"
,
(
void
*
)
&
allocated1
,
&
sz1
,
NULL
,
0
);
printf
(
"
\n
Press Enter to start threads...
\n
"
);
getchar
();
printf
(
"Starting %d threads x %d x %d iterations...
\n
"
,
numThreads
,
numIter1
,
numIter2
);
for
(
int
i
=
0
;
i
<
numThreads
;
i
++
)
{
workers
.
emplace_back
([
tid
=
i
]()
{
uniform_int_distribution
<
int
>
sizeDist
(
0
,
numSizes
-
1
);
minstd_rand
rnd
(
tid
*
17
);
uint8_t
*
ptrs
[
numAllocsMax
];
int
ptrsz
[
numAllocsMax
];
for
(
int
i
=
0
;
i
<
numIter1
;
++
i
)
{
thread
t
([
&
]()
{
for
(
int
i
=
0
;
i
<
numIter2
;
++
i
)
{
const
int
numAllocs
=
numAllocsMax
-
sizeDist
(
rnd
);
for
(
int
j
=
0
;
j
<
numAllocs
;
j
+=
64
)
{
const
int
x
=
sizeDist
(
rnd
);
const
int
sz
=
sizes
[
x
];
ptrsz
[
j
]
=
sz
;
ptrs
[
j
]
=
(
uint8_t
*
)
je_malloc
(
sz
);
if
(
!
ptrs
[
j
])
{
printf
(
"Unable to allocate %d bytes in thread %d, iter %d, alloc %d. %d
\n
"
,
sz
,
tid
,
i
,
j
,
x
);
exit
(
1
);
}
for
(
int
k
=
0
;
k
<
sz
;
k
++
)
ptrs
[
j
][
k
]
=
tid
+
k
;
}
for
(
int
j
=
0
;
j
<
numAllocs
;
j
+=
64
)
{
for
(
int
k
=
0
,
sz
=
ptrsz
[
j
];
k
<
sz
;
k
++
)
if
(
ptrs
[
j
][
k
]
!=
(
uint8_t
)(
tid
+
k
))
{
printf
(
"Memory error in thread %d, iter %d, alloc %d @ %d : %02X!=%02X
\n
"
,
tid
,
i
,
j
,
k
,
ptrs
[
j
][
k
],
(
uint8_t
)(
tid
+
k
));
exit
(
1
);
}
je_free
(
ptrs
[
j
]);
}
}
});
t
.
join
();
}
});
}
for
(
thread
&
t
:
workers
)
{
t
.
join
();
}
je_malloc_stats_print
(
NULL
,
NULL
,
NULL
);
size_t
allocated2
;
je_mallctl
(
"stats.active"
,
(
void
*
)
&
allocated2
,
&
sz1
,
NULL
,
0
);
size_t
leaked
=
allocated2
-
allocated1
;
printf
(
"
\n
Done. Leaked: %zd bytes
\n
"
,
leaked
);
bool
failed
=
leaked
>
65536
;
// in case C++ runtime allocated something (e.g. iostream locale or facet)
printf
(
"
\n
Test %s!
\n
"
,
(
failed
?
"FAILED"
:
"successful"
));
printf
(
"
\n
Press Enter to continue...
\n
"
);
getchar
();
return
failed
?
1
:
0
;
}
deps/jemalloc/msvc/test_threads/test_threads.h
deleted
100644 → 0
View file @
7ff7536e
#pragma once
int
test_threads
();
deps/jemalloc/msvc/test_threads/test_threads_main.cpp
deleted
100644 → 0
View file @
7ff7536e
#include "test_threads.h"
#include <future>
#include <functional>
#include <chrono>
using
namespace
std
::
chrono_literals
;
int
main
(
int
argc
,
char
**
argv
)
{
int
rc
=
test_threads
();
return
rc
;
}
deps/jemalloc/run_tests.sh
deleted
100755 → 0
View file @
7ff7536e
$(
dirname
"
$)
"
)
/scripts/gen_run_tests.py | bash
deps/jemalloc/scripts/gen_run_tests.py
deleted
100755 → 0
View file @
7ff7536e
#!/usr/bin/env python
import
sys
from
itertools
import
combinations
from
os
import
uname
from
multiprocessing
import
cpu_count
# Later, we want to test extended vaddr support. Apparently, the "real" way of
# checking this is flaky on OS X.
bits_64
=
sys
.
maxsize
>
2
**
32
nparallel
=
cpu_count
()
*
2
uname
=
uname
()[
0
]
def
powerset
(
items
):
result
=
[]
for
i
in
xrange
(
len
(
items
)
+
1
):
result
+=
combinations
(
items
,
i
)
return
result
possible_compilers
=
[(
'gcc'
,
'g++'
),
(
'clang'
,
'clang++'
)]
possible_compiler_opts
=
[
'-m32'
,
]
possible_config_opts
=
[
'--enable-debug'
,
'--enable-prof'
,
'--disable-stats'
,
]
if
bits_64
:
possible_config_opts
.
append
(
'--with-lg-vaddr=56'
)
possible_malloc_conf_opts
=
[
'tcache:false'
,
'dss:primary'
,
'percpu_arena:percpu'
,
'background_thread:true'
,
]
print
'set -e'
print
'if [ -f Makefile ] ; then make relclean ; fi'
print
'autoconf'
print
'rm -rf run_tests.out'
print
'mkdir run_tests.out'
print
'cd run_tests.out'
ind
=
0
for
cc
,
cxx
in
possible_compilers
:
for
compiler_opts
in
powerset
(
possible_compiler_opts
):
for
config_opts
in
powerset
(
possible_config_opts
):
for
malloc_conf_opts
in
powerset
(
possible_malloc_conf_opts
):
if
cc
is
'clang'
\
and
'-m32'
in
possible_compiler_opts
\
and
'--enable-prof'
in
config_opts
:
continue
config_line
=
(
'EXTRA_CFLAGS=-Werror EXTRA_CXXFLAGS=-Werror '
+
'CC="{} {}" '
.
format
(
cc
,
" "
.
join
(
compiler_opts
))
+
'CXX="{} {}" '
.
format
(
cxx
,
" "
.
join
(
compiler_opts
))
+
'../../configure '
+
" "
.
join
(
config_opts
)
+
(
' --with-malloc-conf='
+
","
.
join
(
malloc_conf_opts
)
if
len
(
malloc_conf_opts
)
>
0
else
''
)
)
# We don't want to test large vaddr spaces in 32-bit mode.
if
(
'-m32'
in
compiler_opts
and
'--with-lg-vaddr=56'
in
config_opts
):
continue
# Per CPU arenas are only supported on Linux.
linux_supported
=
(
'percpu_arena:percpu'
in
malloc_conf_opts
\
or
'background_thread:true'
in
malloc_conf_opts
)
# Heap profiling and dss are not supported on OS X.
darwin_unsupported
=
(
'--enable-prof'
in
config_opts
or
\
'dss:primary'
in
malloc_conf_opts
)
if
(
uname
==
'Linux'
and
linux_supported
)
\
or
(
not
linux_supported
and
(
uname
!=
'Darwin'
or
\
not
darwin_unsupported
)):
print
"""cat <<EOF > run_test_%(ind)d.sh
#!/bin/sh
set -e
abort() {
echo "==> Error" >> run_test.log
echo "Error; see run_tests.out/run_test_%(ind)d.out/run_test.log"
exit 255 # Special exit code tells xargs to terminate.
}
# Environment variables are not supported.
run_cmd() {
echo "==> \$@" >> run_test.log
\$@ >> run_test.log 2>&1 || abort
}
echo "=> run_test_%(ind)d: %(config_line)s"
mkdir run_test_%(ind)d.out
cd run_test_%(ind)d.out
echo "==> %(config_line)s" >> run_test.log
%(config_line)s >> run_test.log 2>&1 || abort
run_cmd make all tests
run_cmd make check
run_cmd make distclean
EOF
chmod 755 run_test_%(ind)d.sh"""
%
{
'ind'
:
ind
,
'config_line'
:
config_line
}
ind
+=
1
print
'for i in `seq 0 %(last_ind)d` ; do echo run_test_${i}.sh ; done | xargs -P %(nparallel)d -n 1 sh'
%
{
'last_ind'
:
ind
-
1
,
'nparallel'
:
nparallel
}
deps/jemalloc/scripts/gen_travis.py
deleted
100755 → 0
View file @
7ff7536e
#!/usr/bin/env python
from
itertools
import
combinations
travis_template
=
"""
\
language: generic
matrix:
include:
%s
before_script:
- autoconf
- ./configure ${COMPILER_FLAGS:+
\
CC="$CC $COMPILER_FLAGS"
\
CXX="$CXX $COMPILER_FLAGS" }
\
$CONFIGURE_FLAGS
- make -j3
- make -j3 tests
script:
- make check
"""
# The 'default' configuration is gcc, on linux, with no compiler or configure
# flags. We also test with clang, -m32, --enable-debug, --enable-prof,
# --disable-stats, and --with-malloc-conf=tcache:false. To avoid abusing
# travis though, we don't test all 2**7 = 128 possible combinations of these;
# instead, we only test combinations of up to 2 'unusual' settings, under the
# hope that bugs involving interactions of such settings are rare.
# Things at once, for C(7, 0) + C(7, 1) + C(7, 2) = 29
MAX_UNUSUAL_OPTIONS
=
2
os_default
=
'linux'
os_unusual
=
'osx'
compilers_default
=
'CC=gcc CXX=g++'
compilers_unusual
=
'CC=clang CXX=clang++'
compiler_flag_unusuals
=
[
'-m32'
]
configure_flag_unusuals
=
[
'--enable-debug'
,
'--enable-prof'
,
'--disable-stats'
,
]
malloc_conf_unusuals
=
[
'tcache:false'
,
'dss:primary'
,
'percpu_arena:percpu'
,
'background_thread:true'
,
]
all_unusuals
=
(
[
os_unusual
]
+
[
compilers_unusual
]
+
compiler_flag_unusuals
+
configure_flag_unusuals
+
malloc_conf_unusuals
)
unusual_combinations_to_test
=
[]
for
i
in
xrange
(
MAX_UNUSUAL_OPTIONS
+
1
):
unusual_combinations_to_test
+=
combinations
(
all_unusuals
,
i
)
include_rows
=
""
for
unusual_combination
in
unusual_combinations_to_test
:
os
=
os_default
if
os_unusual
in
unusual_combination
:
os
=
os_unusual
compilers
=
compilers_default
if
compilers_unusual
in
unusual_combination
:
compilers
=
compilers_unusual
compiler_flags
=
[
x
for
x
in
unusual_combination
if
x
in
compiler_flag_unusuals
]
configure_flags
=
[
x
for
x
in
unusual_combination
if
x
in
configure_flag_unusuals
]
malloc_conf
=
[
x
for
x
in
unusual_combination
if
x
in
malloc_conf_unusuals
]
# Filter out unsupported configurations on OS X.
if
os
==
'osx'
and
(
'dss:primary'
in
malloc_conf
or
\
'percpu_arena:percpu'
in
malloc_conf
or
'background_thread:true'
\
in
malloc_conf
):
continue
if
len
(
malloc_conf
)
>
0
:
configure_flags
.
append
(
'--with-malloc-conf='
+
","
.
join
(
malloc_conf
))
# Filter out an unsupported configuration - heap profiling on OS X.
if
os
==
'osx'
and
'--enable-prof'
in
configure_flags
:
continue
# We get some spurious errors when -Warray-bounds is enabled.
env_string
=
(
'{} COMPILER_FLAGS="{}" CONFIGURE_FLAGS="{}" '
'EXTRA_CFLAGS="-Werror -Wno-array-bounds"'
).
format
(
compilers
,
" "
.
join
(
compiler_flags
),
" "
.
join
(
configure_flags
))
include_rows
+=
' - os: %s
\n
'
%
os
include_rows
+=
' env: %s
\n
'
%
env_string
if
'-m32'
in
unusual_combination
and
os
==
'linux'
:
include_rows
+=
' addons:
\n
'
include_rows
+=
' apt:
\n
'
include_rows
+=
' packages:
\n
'
include_rows
+=
' - gcc-multilib
\n
'
print
travis_template
%
include_rows
deps/jemalloc/src/arena.c
deleted
100644 → 0
View file @
7ff7536e
#define JEMALLOC_ARENA_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/div.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/extent_mmap.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/util.h"
/******************************************************************************/
/* Data. */
/*
* Define names for both unininitialized and initialized phases, so that
* options and mallctl processing are straightforward.
*/
const
char
*
percpu_arena_mode_names
[]
=
{
"percpu"
,
"phycpu"
,
"disabled"
,
"percpu"
,
"phycpu"
};
percpu_arena_mode_t
opt_percpu_arena
=
PERCPU_ARENA_DEFAULT
;
ssize_t
opt_dirty_decay_ms
=
DIRTY_DECAY_MS_DEFAULT
;
ssize_t
opt_muzzy_decay_ms
=
MUZZY_DECAY_MS_DEFAULT
;
static
atomic_zd_t
dirty_decay_ms_default
;
static
atomic_zd_t
muzzy_decay_ms_default
;
const
uint64_t
h_steps
[
SMOOTHSTEP_NSTEPS
]
=
{
#define STEP(step, h, x, y) \
h,
SMOOTHSTEP
#undef STEP
};
static
div_info_t
arena_binind_div_info
[
NBINS
];
/******************************************************************************/
/*
* Function prototypes for static functions that are referenced prior to
* definition.
*/
static
void
arena_decay_to_limit
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_decay_t
*
decay
,
extents_t
*
extents
,
bool
all
,
size_t
npages_limit
,
size_t
npages_decay_max
,
bool
is_background_thread
);
static
bool
arena_decay_dirty
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bool
is_background_thread
,
bool
all
);
static
void
arena_dalloc_bin_slab
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_t
*
slab
,
bin_t
*
bin
);
static
void
arena_bin_lower_slab
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_t
*
slab
,
bin_t
*
bin
);
/******************************************************************************/
void
arena_basic_stats_merge
(
UNUSED
tsdn_t
*
tsdn
,
arena_t
*
arena
,
unsigned
*
nthreads
,
const
char
**
dss
,
ssize_t
*
dirty_decay_ms
,
ssize_t
*
muzzy_decay_ms
,
size_t
*
nactive
,
size_t
*
ndirty
,
size_t
*
nmuzzy
)
{
*
nthreads
+=
arena_nthreads_get
(
arena
,
false
);
*
dss
=
dss_prec_names
[
arena_dss_prec_get
(
arena
)];
*
dirty_decay_ms
=
arena_dirty_decay_ms_get
(
arena
);
*
muzzy_decay_ms
=
arena_muzzy_decay_ms_get
(
arena
);
*
nactive
+=
atomic_load_zu
(
&
arena
->
nactive
,
ATOMIC_RELAXED
);
*
ndirty
+=
extents_npages_get
(
&
arena
->
extents_dirty
);
*
nmuzzy
+=
extents_npages_get
(
&
arena
->
extents_muzzy
);
}
void
arena_stats_merge
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
unsigned
*
nthreads
,
const
char
**
dss
,
ssize_t
*
dirty_decay_ms
,
ssize_t
*
muzzy_decay_ms
,
size_t
*
nactive
,
size_t
*
ndirty
,
size_t
*
nmuzzy
,
arena_stats_t
*
astats
,
bin_stats_t
*
bstats
,
arena_stats_large_t
*
lstats
)
{
cassert
(
config_stats
);
arena_basic_stats_merge
(
tsdn
,
arena
,
nthreads
,
dss
,
dirty_decay_ms
,
muzzy_decay_ms
,
nactive
,
ndirty
,
nmuzzy
);
size_t
base_allocated
,
base_resident
,
base_mapped
,
metadata_thp
;
base_stats_get
(
tsdn
,
arena
->
base
,
&
base_allocated
,
&
base_resident
,
&
base_mapped
,
&
metadata_thp
);
arena_stats_lock
(
tsdn
,
&
arena
->
stats
);
arena_stats_accum_zu
(
&
astats
->
mapped
,
base_mapped
+
arena_stats_read_zu
(
tsdn
,
&
arena
->
stats
,
&
arena
->
stats
.
mapped
));
arena_stats_accum_zu
(
&
astats
->
retained
,
extents_npages_get
(
&
arena
->
extents_retained
)
<<
LG_PAGE
);
arena_stats_accum_u64
(
&
astats
->
decay_dirty
.
npurge
,
arena_stats_read_u64
(
tsdn
,
&
arena
->
stats
,
&
arena
->
stats
.
decay_dirty
.
npurge
));
arena_stats_accum_u64
(
&
astats
->
decay_dirty
.
nmadvise
,
arena_stats_read_u64
(
tsdn
,
&
arena
->
stats
,
&
arena
->
stats
.
decay_dirty
.
nmadvise
));
arena_stats_accum_u64
(
&
astats
->
decay_dirty
.
purged
,
arena_stats_read_u64
(
tsdn
,
&
arena
->
stats
,
&
arena
->
stats
.
decay_dirty
.
purged
));
arena_stats_accum_u64
(
&
astats
->
decay_muzzy
.
npurge
,
arena_stats_read_u64
(
tsdn
,
&
arena
->
stats
,
&
arena
->
stats
.
decay_muzzy
.
npurge
));
arena_stats_accum_u64
(
&
astats
->
decay_muzzy
.
nmadvise
,
arena_stats_read_u64
(
tsdn
,
&
arena
->
stats
,
&
arena
->
stats
.
decay_muzzy
.
nmadvise
));
arena_stats_accum_u64
(
&
astats
->
decay_muzzy
.
purged
,
arena_stats_read_u64
(
tsdn
,
&
arena
->
stats
,
&
arena
->
stats
.
decay_muzzy
.
purged
));
arena_stats_accum_zu
(
&
astats
->
base
,
base_allocated
);
arena_stats_accum_zu
(
&
astats
->
internal
,
arena_internal_get
(
arena
));
arena_stats_accum_zu
(
&
astats
->
metadata_thp
,
metadata_thp
);
arena_stats_accum_zu
(
&
astats
->
resident
,
base_resident
+
(((
atomic_load_zu
(
&
arena
->
nactive
,
ATOMIC_RELAXED
)
+
extents_npages_get
(
&
arena
->
extents_dirty
)
+
extents_npages_get
(
&
arena
->
extents_muzzy
))
<<
LG_PAGE
)));
for
(
szind_t
i
=
0
;
i
<
NSIZES
-
NBINS
;
i
++
)
{
uint64_t
nmalloc
=
arena_stats_read_u64
(
tsdn
,
&
arena
->
stats
,
&
arena
->
stats
.
lstats
[
i
].
nmalloc
);
arena_stats_accum_u64
(
&
lstats
[
i
].
nmalloc
,
nmalloc
);
arena_stats_accum_u64
(
&
astats
->
nmalloc_large
,
nmalloc
);
uint64_t
ndalloc
=
arena_stats_read_u64
(
tsdn
,
&
arena
->
stats
,
&
arena
->
stats
.
lstats
[
i
].
ndalloc
);
arena_stats_accum_u64
(
&
lstats
[
i
].
ndalloc
,
ndalloc
);
arena_stats_accum_u64
(
&
astats
->
ndalloc_large
,
ndalloc
);
uint64_t
nrequests
=
arena_stats_read_u64
(
tsdn
,
&
arena
->
stats
,
&
arena
->
stats
.
lstats
[
i
].
nrequests
);
arena_stats_accum_u64
(
&
lstats
[
i
].
nrequests
,
nmalloc
+
nrequests
);
arena_stats_accum_u64
(
&
astats
->
nrequests_large
,
nmalloc
+
nrequests
);
assert
(
nmalloc
>=
ndalloc
);
assert
(
nmalloc
-
ndalloc
<=
SIZE_T_MAX
);
size_t
curlextents
=
(
size_t
)(
nmalloc
-
ndalloc
);
lstats
[
i
].
curlextents
+=
curlextents
;
arena_stats_accum_zu
(
&
astats
->
allocated_large
,
curlextents
*
sz_index2size
(
NBINS
+
i
));
}
arena_stats_unlock
(
tsdn
,
&
arena
->
stats
);
/* tcache_bytes counts currently cached bytes. */
atomic_store_zu
(
&
astats
->
tcache_bytes
,
0
,
ATOMIC_RELAXED
);
malloc_mutex_lock
(
tsdn
,
&
arena
->
tcache_ql_mtx
);
cache_bin_array_descriptor_t
*
descriptor
;
ql_foreach
(
descriptor
,
&
arena
->
cache_bin_array_descriptor_ql
,
link
)
{
szind_t
i
=
0
;
for
(;
i
<
NBINS
;
i
++
)
{
cache_bin_t
*
tbin
=
&
descriptor
->
bins_small
[
i
];
arena_stats_accum_zu
(
&
astats
->
tcache_bytes
,
tbin
->
ncached
*
sz_index2size
(
i
));
}
for
(;
i
<
nhbins
;
i
++
)
{
cache_bin_t
*
tbin
=
&
descriptor
->
bins_large
[
i
];
arena_stats_accum_zu
(
&
astats
->
tcache_bytes
,
tbin
->
ncached
*
sz_index2size
(
i
));
}
}
malloc_mutex_prof_read
(
tsdn
,
&
astats
->
mutex_prof_data
[
arena_prof_mutex_tcache_list
],
&
arena
->
tcache_ql_mtx
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
tcache_ql_mtx
);
#define READ_ARENA_MUTEX_PROF_DATA(mtx, ind) \
malloc_mutex_lock(tsdn, &arena->mtx); \
malloc_mutex_prof_read(tsdn, &astats->mutex_prof_data[ind], \
&arena->mtx); \
malloc_mutex_unlock(tsdn, &arena->mtx);
/* Gather per arena mutex profiling data. */
READ_ARENA_MUTEX_PROF_DATA
(
large_mtx
,
arena_prof_mutex_large
);
READ_ARENA_MUTEX_PROF_DATA
(
extent_avail_mtx
,
arena_prof_mutex_extent_avail
)
READ_ARENA_MUTEX_PROF_DATA
(
extents_dirty
.
mtx
,
arena_prof_mutex_extents_dirty
)
READ_ARENA_MUTEX_PROF_DATA
(
extents_muzzy
.
mtx
,
arena_prof_mutex_extents_muzzy
)
READ_ARENA_MUTEX_PROF_DATA
(
extents_retained
.
mtx
,
arena_prof_mutex_extents_retained
)
READ_ARENA_MUTEX_PROF_DATA
(
decay_dirty
.
mtx
,
arena_prof_mutex_decay_dirty
)
READ_ARENA_MUTEX_PROF_DATA
(
decay_muzzy
.
mtx
,
arena_prof_mutex_decay_muzzy
)
READ_ARENA_MUTEX_PROF_DATA
(
base
->
mtx
,
arena_prof_mutex_base
)
#undef READ_ARENA_MUTEX_PROF_DATA
nstime_copy
(
&
astats
->
uptime
,
&
arena
->
create_time
);
nstime_update
(
&
astats
->
uptime
);
nstime_subtract
(
&
astats
->
uptime
,
&
arena
->
create_time
);
for
(
szind_t
i
=
0
;
i
<
NBINS
;
i
++
)
{
bin_stats_merge
(
tsdn
,
&
bstats
[
i
],
&
arena
->
bins
[
i
]);
}
}
void
arena_extents_dirty_dalloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
)
{
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
0
);
extents_dalloc
(
tsdn
,
arena
,
r_extent_hooks
,
&
arena
->
extents_dirty
,
extent
);
if
(
arena_dirty_decay_ms_get
(
arena
)
==
0
)
{
arena_decay_dirty
(
tsdn
,
arena
,
false
,
true
);
}
else
{
arena_background_thread_inactivity_check
(
tsdn
,
arena
,
false
);
}
}
static
void
*
arena_slab_reg_alloc
(
extent_t
*
slab
,
const
bin_info_t
*
bin_info
)
{
void
*
ret
;
arena_slab_data_t
*
slab_data
=
extent_slab_data_get
(
slab
);
size_t
regind
;
assert
(
extent_nfree_get
(
slab
)
>
0
);
assert
(
!
bitmap_full
(
slab_data
->
bitmap
,
&
bin_info
->
bitmap_info
));
regind
=
bitmap_sfu
(
slab_data
->
bitmap
,
&
bin_info
->
bitmap_info
);
ret
=
(
void
*
)((
uintptr_t
)
extent_addr_get
(
slab
)
+
(
uintptr_t
)(
bin_info
->
reg_size
*
regind
));
extent_nfree_dec
(
slab
);
return
ret
;
}
#ifndef JEMALLOC_JET
static
#endif
size_t
arena_slab_regind
(
extent_t
*
slab
,
szind_t
binind
,
const
void
*
ptr
)
{
size_t
diff
,
regind
;
/* Freeing a pointer outside the slab can cause assertion failure. */
assert
((
uintptr_t
)
ptr
>=
(
uintptr_t
)
extent_addr_get
(
slab
));
assert
((
uintptr_t
)
ptr
<
(
uintptr_t
)
extent_past_get
(
slab
));
/* Freeing an interior pointer can cause assertion failure. */
assert
(((
uintptr_t
)
ptr
-
(
uintptr_t
)
extent_addr_get
(
slab
))
%
(
uintptr_t
)
bin_infos
[
binind
].
reg_size
==
0
);
diff
=
(
size_t
)((
uintptr_t
)
ptr
-
(
uintptr_t
)
extent_addr_get
(
slab
));
/* Avoid doing division with a variable divisor. */
regind
=
div_compute
(
&
arena_binind_div_info
[
binind
],
diff
);
assert
(
regind
<
bin_infos
[
binind
].
nregs
);
return
regind
;
}
static
void
arena_slab_reg_dalloc
(
extent_t
*
slab
,
arena_slab_data_t
*
slab_data
,
void
*
ptr
)
{
szind_t
binind
=
extent_szind_get
(
slab
);
const
bin_info_t
*
bin_info
=
&
bin_infos
[
binind
];
size_t
regind
=
arena_slab_regind
(
slab
,
binind
,
ptr
);
assert
(
extent_nfree_get
(
slab
)
<
bin_info
->
nregs
);
/* Freeing an unallocated pointer can cause assertion failure. */
assert
(
bitmap_get
(
slab_data
->
bitmap
,
&
bin_info
->
bitmap_info
,
regind
));
bitmap_unset
(
slab_data
->
bitmap
,
&
bin_info
->
bitmap_info
,
regind
);
extent_nfree_inc
(
slab
);
}
static
void
arena_nactive_add
(
arena_t
*
arena
,
size_t
add_pages
)
{
atomic_fetch_add_zu
(
&
arena
->
nactive
,
add_pages
,
ATOMIC_RELAXED
);
}
static
void
arena_nactive_sub
(
arena_t
*
arena
,
size_t
sub_pages
)
{
assert
(
atomic_load_zu
(
&
arena
->
nactive
,
ATOMIC_RELAXED
)
>=
sub_pages
);
atomic_fetch_sub_zu
(
&
arena
->
nactive
,
sub_pages
,
ATOMIC_RELAXED
);
}
static
void
arena_large_malloc_stats_update
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
usize
)
{
szind_t
index
,
hindex
;
cassert
(
config_stats
);
if
(
usize
<
LARGE_MINCLASS
)
{
usize
=
LARGE_MINCLASS
;
}
index
=
sz_size2index
(
usize
);
hindex
=
(
index
>=
NBINS
)
?
index
-
NBINS
:
0
;
arena_stats_add_u64
(
tsdn
,
&
arena
->
stats
,
&
arena
->
stats
.
lstats
[
hindex
].
nmalloc
,
1
);
}
static
void
arena_large_dalloc_stats_update
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
usize
)
{
szind_t
index
,
hindex
;
cassert
(
config_stats
);
if
(
usize
<
LARGE_MINCLASS
)
{
usize
=
LARGE_MINCLASS
;
}
index
=
sz_size2index
(
usize
);
hindex
=
(
index
>=
NBINS
)
?
index
-
NBINS
:
0
;
arena_stats_add_u64
(
tsdn
,
&
arena
->
stats
,
&
arena
->
stats
.
lstats
[
hindex
].
ndalloc
,
1
);
}
static
void
arena_large_ralloc_stats_update
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
oldusize
,
size_t
usize
)
{
arena_large_dalloc_stats_update
(
tsdn
,
arena
,
oldusize
);
arena_large_malloc_stats_update
(
tsdn
,
arena
,
usize
);
}
extent_t
*
arena_extent_alloc_large
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
usize
,
size_t
alignment
,
bool
*
zero
)
{
extent_hooks_t
*
extent_hooks
=
EXTENT_HOOKS_INITIALIZER
;
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
0
);
szind_t
szind
=
sz_size2index
(
usize
);
size_t
mapped_add
;
bool
commit
=
true
;
extent_t
*
extent
=
extents_alloc
(
tsdn
,
arena
,
&
extent_hooks
,
&
arena
->
extents_dirty
,
NULL
,
usize
,
sz_large_pad
,
alignment
,
false
,
szind
,
zero
,
&
commit
);
if
(
extent
==
NULL
)
{
extent
=
extents_alloc
(
tsdn
,
arena
,
&
extent_hooks
,
&
arena
->
extents_muzzy
,
NULL
,
usize
,
sz_large_pad
,
alignment
,
false
,
szind
,
zero
,
&
commit
);
}
size_t
size
=
usize
+
sz_large_pad
;
if
(
extent
==
NULL
)
{
extent
=
extent_alloc_wrapper
(
tsdn
,
arena
,
&
extent_hooks
,
NULL
,
usize
,
sz_large_pad
,
alignment
,
false
,
szind
,
zero
,
&
commit
);
if
(
config_stats
)
{
/*
* extent may be NULL on OOM, but in that case
* mapped_add isn't used below, so there's no need to
* conditionlly set it to 0 here.
*/
mapped_add
=
size
;
}
}
else
if
(
config_stats
)
{
mapped_add
=
0
;
}
if
(
extent
!=
NULL
)
{
if
(
config_stats
)
{
arena_stats_lock
(
tsdn
,
&
arena
->
stats
);
arena_large_malloc_stats_update
(
tsdn
,
arena
,
usize
);
if
(
mapped_add
!=
0
)
{
arena_stats_add_zu
(
tsdn
,
&
arena
->
stats
,
&
arena
->
stats
.
mapped
,
mapped_add
);
}
arena_stats_unlock
(
tsdn
,
&
arena
->
stats
);
}
arena_nactive_add
(
arena
,
size
>>
LG_PAGE
);
}
return
extent
;
}
void
arena_extent_dalloc_large_prep
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_t
*
extent
)
{
if
(
config_stats
)
{
arena_stats_lock
(
tsdn
,
&
arena
->
stats
);
arena_large_dalloc_stats_update
(
tsdn
,
arena
,
extent_usize_get
(
extent
));
arena_stats_unlock
(
tsdn
,
&
arena
->
stats
);
}
arena_nactive_sub
(
arena
,
extent_size_get
(
extent
)
>>
LG_PAGE
);
}
void
arena_extent_ralloc_large_shrink
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_t
*
extent
,
size_t
oldusize
)
{
size_t
usize
=
extent_usize_get
(
extent
);
size_t
udiff
=
oldusize
-
usize
;
if
(
config_stats
)
{
arena_stats_lock
(
tsdn
,
&
arena
->
stats
);
arena_large_ralloc_stats_update
(
tsdn
,
arena
,
oldusize
,
usize
);
arena_stats_unlock
(
tsdn
,
&
arena
->
stats
);
}
arena_nactive_sub
(
arena
,
udiff
>>
LG_PAGE
);
}
void
arena_extent_ralloc_large_expand
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_t
*
extent
,
size_t
oldusize
)
{
size_t
usize
=
extent_usize_get
(
extent
);
size_t
udiff
=
usize
-
oldusize
;
if
(
config_stats
)
{
arena_stats_lock
(
tsdn
,
&
arena
->
stats
);
arena_large_ralloc_stats_update
(
tsdn
,
arena
,
oldusize
,
usize
);
arena_stats_unlock
(
tsdn
,
&
arena
->
stats
);
}
arena_nactive_add
(
arena
,
udiff
>>
LG_PAGE
);
}
static
ssize_t
arena_decay_ms_read
(
arena_decay_t
*
decay
)
{
return
atomic_load_zd
(
&
decay
->
time_ms
,
ATOMIC_RELAXED
);
}
static
void
arena_decay_ms_write
(
arena_decay_t
*
decay
,
ssize_t
decay_ms
)
{
atomic_store_zd
(
&
decay
->
time_ms
,
decay_ms
,
ATOMIC_RELAXED
);
}
static
void
arena_decay_deadline_init
(
arena_decay_t
*
decay
)
{
/*
* Generate a new deadline that is uniformly random within the next
* epoch after the current one.
*/
nstime_copy
(
&
decay
->
deadline
,
&
decay
->
epoch
);
nstime_add
(
&
decay
->
deadline
,
&
decay
->
interval
);
if
(
arena_decay_ms_read
(
decay
)
>
0
)
{
nstime_t
jitter
;
nstime_init
(
&
jitter
,
prng_range_u64
(
&
decay
->
jitter_state
,
nstime_ns
(
&
decay
->
interval
)));
nstime_add
(
&
decay
->
deadline
,
&
jitter
);
}
}
static
bool
arena_decay_deadline_reached
(
const
arena_decay_t
*
decay
,
const
nstime_t
*
time
)
{
return
(
nstime_compare
(
&
decay
->
deadline
,
time
)
<=
0
);
}
static
size_t
arena_decay_backlog_npages_limit
(
const
arena_decay_t
*
decay
)
{
uint64_t
sum
;
size_t
npages_limit_backlog
;
unsigned
i
;
/*
* For each element of decay_backlog, multiply by the corresponding
* fixed-point smoothstep decay factor. Sum the products, then divide
* to round down to the nearest whole number of pages.
*/
sum
=
0
;
for
(
i
=
0
;
i
<
SMOOTHSTEP_NSTEPS
;
i
++
)
{
sum
+=
decay
->
backlog
[
i
]
*
h_steps
[
i
];
}
npages_limit_backlog
=
(
size_t
)(
sum
>>
SMOOTHSTEP_BFP
);
return
npages_limit_backlog
;
}
static
void
arena_decay_backlog_update_last
(
arena_decay_t
*
decay
,
size_t
current_npages
)
{
size_t
npages_delta
=
(
current_npages
>
decay
->
nunpurged
)
?
current_npages
-
decay
->
nunpurged
:
0
;
decay
->
backlog
[
SMOOTHSTEP_NSTEPS
-
1
]
=
npages_delta
;
if
(
config_debug
)
{
if
(
current_npages
>
decay
->
ceil_npages
)
{
decay
->
ceil_npages
=
current_npages
;
}
size_t
npages_limit
=
arena_decay_backlog_npages_limit
(
decay
);
assert
(
decay
->
ceil_npages
>=
npages_limit
);
if
(
decay
->
ceil_npages
>
npages_limit
)
{
decay
->
ceil_npages
=
npages_limit
;
}
}
}
static
void
arena_decay_backlog_update
(
arena_decay_t
*
decay
,
uint64_t
nadvance_u64
,
size_t
current_npages
)
{
if
(
nadvance_u64
>=
SMOOTHSTEP_NSTEPS
)
{
memset
(
decay
->
backlog
,
0
,
(
SMOOTHSTEP_NSTEPS
-
1
)
*
sizeof
(
size_t
));
}
else
{
size_t
nadvance_z
=
(
size_t
)
nadvance_u64
;
assert
((
uint64_t
)
nadvance_z
==
nadvance_u64
);
memmove
(
decay
->
backlog
,
&
decay
->
backlog
[
nadvance_z
],
(
SMOOTHSTEP_NSTEPS
-
nadvance_z
)
*
sizeof
(
size_t
));
if
(
nadvance_z
>
1
)
{
memset
(
&
decay
->
backlog
[
SMOOTHSTEP_NSTEPS
-
nadvance_z
],
0
,
(
nadvance_z
-
1
)
*
sizeof
(
size_t
));
}
}
arena_decay_backlog_update_last
(
decay
,
current_npages
);
}
static
void
arena_decay_try_purge
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_decay_t
*
decay
,
extents_t
*
extents
,
size_t
current_npages
,
size_t
npages_limit
,
bool
is_background_thread
)
{
if
(
current_npages
>
npages_limit
)
{
arena_decay_to_limit
(
tsdn
,
arena
,
decay
,
extents
,
false
,
npages_limit
,
current_npages
-
npages_limit
,
is_background_thread
);
}
}
static
void
arena_decay_epoch_advance_helper
(
arena_decay_t
*
decay
,
const
nstime_t
*
time
,
size_t
current_npages
)
{
assert
(
arena_decay_deadline_reached
(
decay
,
time
));
nstime_t
delta
;
nstime_copy
(
&
delta
,
time
);
nstime_subtract
(
&
delta
,
&
decay
->
epoch
);
uint64_t
nadvance_u64
=
nstime_divide
(
&
delta
,
&
decay
->
interval
);
assert
(
nadvance_u64
>
0
);
/* Add nadvance_u64 decay intervals to epoch. */
nstime_copy
(
&
delta
,
&
decay
->
interval
);
nstime_imultiply
(
&
delta
,
nadvance_u64
);
nstime_add
(
&
decay
->
epoch
,
&
delta
);
/* Set a new deadline. */
arena_decay_deadline_init
(
decay
);
/* Update the backlog. */
arena_decay_backlog_update
(
decay
,
nadvance_u64
,
current_npages
);
}
static
void
arena_decay_epoch_advance
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_decay_t
*
decay
,
extents_t
*
extents
,
const
nstime_t
*
time
,
bool
is_background_thread
)
{
size_t
current_npages
=
extents_npages_get
(
extents
);
arena_decay_epoch_advance_helper
(
decay
,
time
,
current_npages
);
size_t
npages_limit
=
arena_decay_backlog_npages_limit
(
decay
);
/* We may unlock decay->mtx when try_purge(). Finish logging first. */
decay
->
nunpurged
=
(
npages_limit
>
current_npages
)
?
npages_limit
:
current_npages
;
if
(
!
background_thread_enabled
()
||
is_background_thread
)
{
arena_decay_try_purge
(
tsdn
,
arena
,
decay
,
extents
,
current_npages
,
npages_limit
,
is_background_thread
);
}
}
static
void
arena_decay_reinit
(
arena_decay_t
*
decay
,
ssize_t
decay_ms
)
{
arena_decay_ms_write
(
decay
,
decay_ms
);
if
(
decay_ms
>
0
)
{
nstime_init
(
&
decay
->
interval
,
(
uint64_t
)
decay_ms
*
KQU
(
1000000
));
nstime_idivide
(
&
decay
->
interval
,
SMOOTHSTEP_NSTEPS
);
}
nstime_init
(
&
decay
->
epoch
,
0
);
nstime_update
(
&
decay
->
epoch
);
decay
->
jitter_state
=
(
uint64_t
)(
uintptr_t
)
decay
;
arena_decay_deadline_init
(
decay
);
decay
->
nunpurged
=
0
;
memset
(
decay
->
backlog
,
0
,
SMOOTHSTEP_NSTEPS
*
sizeof
(
size_t
));
}
static
bool
arena_decay_init
(
arena_decay_t
*
decay
,
ssize_t
decay_ms
,
arena_stats_decay_t
*
stats
)
{
if
(
config_debug
)
{
for
(
size_t
i
=
0
;
i
<
sizeof
(
arena_decay_t
);
i
++
)
{
assert
(((
char
*
)
decay
)[
i
]
==
0
);
}
decay
->
ceil_npages
=
0
;
}
if
(
malloc_mutex_init
(
&
decay
->
mtx
,
"decay"
,
WITNESS_RANK_DECAY
,
malloc_mutex_rank_exclusive
))
{
return
true
;
}
decay
->
purging
=
false
;
arena_decay_reinit
(
decay
,
decay_ms
);
/* Memory is zeroed, so there is no need to clear stats. */
if
(
config_stats
)
{
decay
->
stats
=
stats
;
}
return
false
;
}
static
bool
arena_decay_ms_valid
(
ssize_t
decay_ms
)
{
if
(
decay_ms
<
-
1
)
{
return
false
;
}
if
(
decay_ms
==
-
1
||
(
uint64_t
)
decay_ms
<=
NSTIME_SEC_MAX
*
KQU
(
1000
))
{
return
true
;
}
return
false
;
}
static
bool
arena_maybe_decay
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_decay_t
*
decay
,
extents_t
*
extents
,
bool
is_background_thread
)
{
malloc_mutex_assert_owner
(
tsdn
,
&
decay
->
mtx
);
/* Purge all or nothing if the option is disabled. */
ssize_t
decay_ms
=
arena_decay_ms_read
(
decay
);
if
(
decay_ms
<=
0
)
{
if
(
decay_ms
==
0
)
{
arena_decay_to_limit
(
tsdn
,
arena
,
decay
,
extents
,
false
,
0
,
extents_npages_get
(
extents
),
is_background_thread
);
}
return
false
;
}
nstime_t
time
;
nstime_init
(
&
time
,
0
);
nstime_update
(
&
time
);
if
(
unlikely
(
!
nstime_monotonic
()
&&
nstime_compare
(
&
decay
->
epoch
,
&
time
)
>
0
))
{
/*
* Time went backwards. Move the epoch back in time and
* generate a new deadline, with the expectation that time
* typically flows forward for long enough periods of time that
* epochs complete. Unfortunately, this strategy is susceptible
* to clock jitter triggering premature epoch advances, but
* clock jitter estimation and compensation isn't feasible here
* because calls into this code are event-driven.
*/
nstime_copy
(
&
decay
->
epoch
,
&
time
);
arena_decay_deadline_init
(
decay
);
}
else
{
/* Verify that time does not go backwards. */
assert
(
nstime_compare
(
&
decay
->
epoch
,
&
time
)
<=
0
);
}
/*
* If the deadline has been reached, advance to the current epoch and
* purge to the new limit if necessary. Note that dirty pages created
* during the current epoch are not subject to purge until a future
* epoch, so as a result purging only happens during epoch advances, or
* being triggered by background threads (scheduled event).
*/
bool
advance_epoch
=
arena_decay_deadline_reached
(
decay
,
&
time
);
if
(
advance_epoch
)
{
arena_decay_epoch_advance
(
tsdn
,
arena
,
decay
,
extents
,
&
time
,
is_background_thread
);
}
else
if
(
is_background_thread
)
{
arena_decay_try_purge
(
tsdn
,
arena
,
decay
,
extents
,
extents_npages_get
(
extents
),
arena_decay_backlog_npages_limit
(
decay
),
is_background_thread
);
}
return
advance_epoch
;
}
static
ssize_t
arena_decay_ms_get
(
arena_decay_t
*
decay
)
{
return
arena_decay_ms_read
(
decay
);
}
ssize_t
arena_dirty_decay_ms_get
(
arena_t
*
arena
)
{
return
arena_decay_ms_get
(
&
arena
->
decay_dirty
);
}
ssize_t
arena_muzzy_decay_ms_get
(
arena_t
*
arena
)
{
return
arena_decay_ms_get
(
&
arena
->
decay_muzzy
);
}
static
bool
arena_decay_ms_set
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_decay_t
*
decay
,
extents_t
*
extents
,
ssize_t
decay_ms
)
{
if
(
!
arena_decay_ms_valid
(
decay_ms
))
{
return
true
;
}
malloc_mutex_lock
(
tsdn
,
&
decay
->
mtx
);
/*
* Restart decay backlog from scratch, which may cause many dirty pages
* to be immediately purged. It would conceptually be possible to map
* the old backlog onto the new backlog, but there is no justification
* for such complexity since decay_ms changes are intended to be
* infrequent, either between the {-1, 0, >0} states, or a one-time
* arbitrary change during initial arena configuration.
*/
arena_decay_reinit
(
decay
,
decay_ms
);
arena_maybe_decay
(
tsdn
,
arena
,
decay
,
extents
,
false
);
malloc_mutex_unlock
(
tsdn
,
&
decay
->
mtx
);
return
false
;
}
bool
arena_dirty_decay_ms_set
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
ssize_t
decay_ms
)
{
return
arena_decay_ms_set
(
tsdn
,
arena
,
&
arena
->
decay_dirty
,
&
arena
->
extents_dirty
,
decay_ms
);
}
bool
arena_muzzy_decay_ms_set
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
ssize_t
decay_ms
)
{
return
arena_decay_ms_set
(
tsdn
,
arena
,
&
arena
->
decay_muzzy
,
&
arena
->
extents_muzzy
,
decay_ms
);
}
static
size_t
arena_stash_decayed
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extents_t
*
extents
,
size_t
npages_limit
,
size_t
npages_decay_max
,
extent_list_t
*
decay_extents
)
{
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
0
);
/* Stash extents according to npages_limit. */
size_t
nstashed
=
0
;
extent_t
*
extent
;
while
(
nstashed
<
npages_decay_max
&&
(
extent
=
extents_evict
(
tsdn
,
arena
,
r_extent_hooks
,
extents
,
npages_limit
))
!=
NULL
)
{
extent_list_append
(
decay_extents
,
extent
);
nstashed
+=
extent_size_get
(
extent
)
>>
LG_PAGE
;
}
return
nstashed
;
}
static
size_t
arena_decay_stashed
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
arena_decay_t
*
decay
,
extents_t
*
extents
,
bool
all
,
extent_list_t
*
decay_extents
,
bool
is_background_thread
)
{
UNUSED
size_t
nmadvise
,
nunmapped
;
size_t
npurged
;
if
(
config_stats
)
{
nmadvise
=
0
;
nunmapped
=
0
;
}
npurged
=
0
;
ssize_t
muzzy_decay_ms
=
arena_muzzy_decay_ms_get
(
arena
);
for
(
extent_t
*
extent
=
extent_list_first
(
decay_extents
);
extent
!=
NULL
;
extent
=
extent_list_first
(
decay_extents
))
{
if
(
config_stats
)
{
nmadvise
++
;
}
size_t
npages
=
extent_size_get
(
extent
)
>>
LG_PAGE
;
npurged
+=
npages
;
extent_list_remove
(
decay_extents
,
extent
);
switch
(
extents_state_get
(
extents
))
{
case
extent_state_active
:
not_reached
();
case
extent_state_dirty
:
if
(
!
all
&&
muzzy_decay_ms
!=
0
&&
!
extent_purge_lazy_wrapper
(
tsdn
,
arena
,
r_extent_hooks
,
extent
,
0
,
extent_size_get
(
extent
)))
{
extents_dalloc
(
tsdn
,
arena
,
r_extent_hooks
,
&
arena
->
extents_muzzy
,
extent
);
arena_background_thread_inactivity_check
(
tsdn
,
arena
,
is_background_thread
);
break
;
}
/* Fall through. */
case
extent_state_muzzy
:
extent_dalloc_wrapper
(
tsdn
,
arena
,
r_extent_hooks
,
extent
);
if
(
config_stats
)
{
nunmapped
+=
npages
;
}
break
;
case
extent_state_retained
:
default:
not_reached
();
}
}
if
(
config_stats
)
{
arena_stats_lock
(
tsdn
,
&
arena
->
stats
);
arena_stats_add_u64
(
tsdn
,
&
arena
->
stats
,
&
decay
->
stats
->
npurge
,
1
);
arena_stats_add_u64
(
tsdn
,
&
arena
->
stats
,
&
decay
->
stats
->
nmadvise
,
nmadvise
);
arena_stats_add_u64
(
tsdn
,
&
arena
->
stats
,
&
decay
->
stats
->
purged
,
npurged
);
arena_stats_sub_zu
(
tsdn
,
&
arena
->
stats
,
&
arena
->
stats
.
mapped
,
nunmapped
<<
LG_PAGE
);
arena_stats_unlock
(
tsdn
,
&
arena
->
stats
);
}
return
npurged
;
}
/*
* npages_limit: Decay at most npages_decay_max pages without violating the
* invariant: (extents_npages_get(extents) >= npages_limit). We need an upper
* bound on number of pages in order to prevent unbounded growth (namely in
* stashed), otherwise unbounded new pages could be added to extents during the
* current decay run, so that the purging thread never finishes.
*/
static
void
arena_decay_to_limit
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_decay_t
*
decay
,
extents_t
*
extents
,
bool
all
,
size_t
npages_limit
,
size_t
npages_decay_max
,
bool
is_background_thread
)
{
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
1
);
malloc_mutex_assert_owner
(
tsdn
,
&
decay
->
mtx
);
if
(
decay
->
purging
)
{
return
;
}
decay
->
purging
=
true
;
malloc_mutex_unlock
(
tsdn
,
&
decay
->
mtx
);
extent_hooks_t
*
extent_hooks
=
extent_hooks_get
(
arena
);
extent_list_t
decay_extents
;
extent_list_init
(
&
decay_extents
);
size_t
npurge
=
arena_stash_decayed
(
tsdn
,
arena
,
&
extent_hooks
,
extents
,
npages_limit
,
npages_decay_max
,
&
decay_extents
);
if
(
npurge
!=
0
)
{
UNUSED
size_t
npurged
=
arena_decay_stashed
(
tsdn
,
arena
,
&
extent_hooks
,
decay
,
extents
,
all
,
&
decay_extents
,
is_background_thread
);
assert
(
npurged
==
npurge
);
}
malloc_mutex_lock
(
tsdn
,
&
decay
->
mtx
);
decay
->
purging
=
false
;
}
static
bool
arena_decay_impl
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_decay_t
*
decay
,
extents_t
*
extents
,
bool
is_background_thread
,
bool
all
)
{
if
(
all
)
{
malloc_mutex_lock
(
tsdn
,
&
decay
->
mtx
);
arena_decay_to_limit
(
tsdn
,
arena
,
decay
,
extents
,
all
,
0
,
extents_npages_get
(
extents
),
is_background_thread
);
malloc_mutex_unlock
(
tsdn
,
&
decay
->
mtx
);
return
false
;
}
if
(
malloc_mutex_trylock
(
tsdn
,
&
decay
->
mtx
))
{
/* No need to wait if another thread is in progress. */
return
true
;
}
bool
epoch_advanced
=
arena_maybe_decay
(
tsdn
,
arena
,
decay
,
extents
,
is_background_thread
);
UNUSED
size_t
npages_new
;
if
(
epoch_advanced
)
{
/* Backlog is updated on epoch advance. */
npages_new
=
decay
->
backlog
[
SMOOTHSTEP_NSTEPS
-
1
];
}
malloc_mutex_unlock
(
tsdn
,
&
decay
->
mtx
);
if
(
have_background_thread
&&
background_thread_enabled
()
&&
epoch_advanced
&&
!
is_background_thread
)
{
background_thread_interval_check
(
tsdn
,
arena
,
decay
,
npages_new
);
}
return
false
;
}
static
bool
arena_decay_dirty
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bool
is_background_thread
,
bool
all
)
{
return
arena_decay_impl
(
tsdn
,
arena
,
&
arena
->
decay_dirty
,
&
arena
->
extents_dirty
,
is_background_thread
,
all
);
}
static
bool
arena_decay_muzzy
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bool
is_background_thread
,
bool
all
)
{
return
arena_decay_impl
(
tsdn
,
arena
,
&
arena
->
decay_muzzy
,
&
arena
->
extents_muzzy
,
is_background_thread
,
all
);
}
void
arena_decay
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bool
is_background_thread
,
bool
all
)
{
if
(
arena_decay_dirty
(
tsdn
,
arena
,
is_background_thread
,
all
))
{
return
;
}
arena_decay_muzzy
(
tsdn
,
arena
,
is_background_thread
,
all
);
}
static
void
arena_slab_dalloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_t
*
slab
)
{
arena_nactive_sub
(
arena
,
extent_size_get
(
slab
)
>>
LG_PAGE
);
extent_hooks_t
*
extent_hooks
=
EXTENT_HOOKS_INITIALIZER
;
arena_extents_dirty_dalloc
(
tsdn
,
arena
,
&
extent_hooks
,
slab
);
}
static
void
arena_bin_slabs_nonfull_insert
(
bin_t
*
bin
,
extent_t
*
slab
)
{
assert
(
extent_nfree_get
(
slab
)
>
0
);
extent_heap_insert
(
&
bin
->
slabs_nonfull
,
slab
);
}
static
void
arena_bin_slabs_nonfull_remove
(
bin_t
*
bin
,
extent_t
*
slab
)
{
extent_heap_remove
(
&
bin
->
slabs_nonfull
,
slab
);
}
static
extent_t
*
arena_bin_slabs_nonfull_tryget
(
bin_t
*
bin
)
{
extent_t
*
slab
=
extent_heap_remove_first
(
&
bin
->
slabs_nonfull
);
if
(
slab
==
NULL
)
{
return
NULL
;
}
if
(
config_stats
)
{
bin
->
stats
.
reslabs
++
;
}
return
slab
;
}
static
void
arena_bin_slabs_full_insert
(
arena_t
*
arena
,
bin_t
*
bin
,
extent_t
*
slab
)
{
assert
(
extent_nfree_get
(
slab
)
==
0
);
/*
* Tracking extents is required by arena_reset, which is not allowed
* for auto arenas. Bypass this step to avoid touching the extent
* linkage (often results in cache misses) for auto arenas.
*/
if
(
arena_is_auto
(
arena
))
{
return
;
}
extent_list_append
(
&
bin
->
slabs_full
,
slab
);
}
static
void
arena_bin_slabs_full_remove
(
arena_t
*
arena
,
bin_t
*
bin
,
extent_t
*
slab
)
{
if
(
arena_is_auto
(
arena
))
{
return
;
}
extent_list_remove
(
&
bin
->
slabs_full
,
slab
);
}
void
arena_reset
(
tsd_t
*
tsd
,
arena_t
*
arena
)
{
/*
* Locking in this function is unintuitive. The caller guarantees that
* no concurrent operations are happening in this arena, but there are
* still reasons that some locking is necessary:
*
* - Some of the functions in the transitive closure of calls assume
* appropriate locks are held, and in some cases these locks are
* temporarily dropped to avoid lock order reversal or deadlock due to
* reentry.
* - mallctl("epoch", ...) may concurrently refresh stats. While
* strictly speaking this is a "concurrent operation", disallowing
* stats refreshes would impose an inconvenient burden.
*/
/* Large allocations. */
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
arena
->
large_mtx
);
for
(
extent_t
*
extent
=
extent_list_first
(
&
arena
->
large
);
extent
!=
NULL
;
extent
=
extent_list_first
(
&
arena
->
large
))
{
void
*
ptr
=
extent_base_get
(
extent
);
size_t
usize
;
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
arena
->
large_mtx
);
alloc_ctx_t
alloc_ctx
;
rtree_ctx_t
*
rtree_ctx
=
tsd_rtree_ctx
(
tsd
);
rtree_szind_slab_read
(
tsd_tsdn
(
tsd
),
&
extents_rtree
,
rtree_ctx
,
(
uintptr_t
)
ptr
,
true
,
&
alloc_ctx
.
szind
,
&
alloc_ctx
.
slab
);
assert
(
alloc_ctx
.
szind
!=
NSIZES
);
if
(
config_stats
||
(
config_prof
&&
opt_prof
))
{
usize
=
sz_index2size
(
alloc_ctx
.
szind
);
assert
(
usize
==
isalloc
(
tsd_tsdn
(
tsd
),
ptr
));
}
/* Remove large allocation from prof sample set. */
if
(
config_prof
&&
opt_prof
)
{
prof_free
(
tsd
,
ptr
,
usize
,
&
alloc_ctx
);
}
large_dalloc
(
tsd_tsdn
(
tsd
),
extent
);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
arena
->
large_mtx
);
}
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
arena
->
large_mtx
);
/* Bins. */
for
(
unsigned
i
=
0
;
i
<
NBINS
;
i
++
)
{
extent_t
*
slab
;
bin_t
*
bin
=
&
arena
->
bins
[
i
];
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
bin
->
lock
);
if
(
bin
->
slabcur
!=
NULL
)
{
slab
=
bin
->
slabcur
;
bin
->
slabcur
=
NULL
;
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
bin
->
lock
);
arena_slab_dalloc
(
tsd_tsdn
(
tsd
),
arena
,
slab
);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
bin
->
lock
);
}
while
((
slab
=
extent_heap_remove_first
(
&
bin
->
slabs_nonfull
))
!=
NULL
)
{
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
bin
->
lock
);
arena_slab_dalloc
(
tsd_tsdn
(
tsd
),
arena
,
slab
);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
bin
->
lock
);
}
for
(
slab
=
extent_list_first
(
&
bin
->
slabs_full
);
slab
!=
NULL
;
slab
=
extent_list_first
(
&
bin
->
slabs_full
))
{
arena_bin_slabs_full_remove
(
arena
,
bin
,
slab
);
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
bin
->
lock
);
arena_slab_dalloc
(
tsd_tsdn
(
tsd
),
arena
,
slab
);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
bin
->
lock
);
}
if
(
config_stats
)
{
bin
->
stats
.
curregs
=
0
;
bin
->
stats
.
curslabs
=
0
;
}
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
bin
->
lock
);
}
atomic_store_zu
(
&
arena
->
nactive
,
0
,
ATOMIC_RELAXED
);
}
static
void
arena_destroy_retained
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
/*
* Iterate over the retained extents and destroy them. This gives the
* extent allocator underlying the extent hooks an opportunity to unmap
* all retained memory without having to keep its own metadata
* structures. In practice, virtual memory for dss-allocated extents is
* leaked here, so best practice is to avoid dss for arenas to be
* destroyed, or provide custom extent hooks that track retained
* dss-based extents for later reuse.
*/
extent_hooks_t
*
extent_hooks
=
extent_hooks_get
(
arena
);
extent_t
*
extent
;
while
((
extent
=
extents_evict
(
tsdn
,
arena
,
&
extent_hooks
,
&
arena
->
extents_retained
,
0
))
!=
NULL
)
{
extent_destroy_wrapper
(
tsdn
,
arena
,
&
extent_hooks
,
extent
);
}
}
void
arena_destroy
(
tsd_t
*
tsd
,
arena_t
*
arena
)
{
assert
(
base_ind_get
(
arena
->
base
)
>=
narenas_auto
);
assert
(
arena_nthreads_get
(
arena
,
false
)
==
0
);
assert
(
arena_nthreads_get
(
arena
,
true
)
==
0
);
/*
* No allocations have occurred since arena_reset() was called.
* Furthermore, the caller (arena_i_destroy_ctl()) purged all cached
* extents, so only retained extents may remain.
*/
assert
(
extents_npages_get
(
&
arena
->
extents_dirty
)
==
0
);
assert
(
extents_npages_get
(
&
arena
->
extents_muzzy
)
==
0
);
/* Deallocate retained memory. */
arena_destroy_retained
(
tsd_tsdn
(
tsd
),
arena
);
/*
* Remove the arena pointer from the arenas array. We rely on the fact
* that there is no way for the application to get a dirty read from the
* arenas array unless there is an inherent race in the application
* involving access of an arena being concurrently destroyed. The
* application must synchronize knowledge of the arena's validity, so as
* long as we use an atomic write to update the arenas array, the
* application will get a clean read any time after it synchronizes
* knowledge that the arena is no longer valid.
*/
arena_set
(
base_ind_get
(
arena
->
base
),
NULL
);
/*
* Destroy the base allocator, which manages all metadata ever mapped by
* this arena.
*/
base_delete
(
tsd_tsdn
(
tsd
),
arena
->
base
);
}
static
extent_t
*
arena_slab_alloc_hard
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
const
bin_info_t
*
bin_info
,
szind_t
szind
)
{
extent_t
*
slab
;
bool
zero
,
commit
;
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
0
);
zero
=
false
;
commit
=
true
;
slab
=
extent_alloc_wrapper
(
tsdn
,
arena
,
r_extent_hooks
,
NULL
,
bin_info
->
slab_size
,
0
,
PAGE
,
true
,
szind
,
&
zero
,
&
commit
);
if
(
config_stats
&&
slab
!=
NULL
)
{
arena_stats_mapped_add
(
tsdn
,
&
arena
->
stats
,
bin_info
->
slab_size
);
}
return
slab
;
}
static
extent_t
*
arena_slab_alloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
szind_t
binind
,
const
bin_info_t
*
bin_info
)
{
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
0
);
extent_hooks_t
*
extent_hooks
=
EXTENT_HOOKS_INITIALIZER
;
szind_t
szind
=
sz_size2index
(
bin_info
->
reg_size
);
bool
zero
=
false
;
bool
commit
=
true
;
extent_t
*
slab
=
extents_alloc
(
tsdn
,
arena
,
&
extent_hooks
,
&
arena
->
extents_dirty
,
NULL
,
bin_info
->
slab_size
,
0
,
PAGE
,
true
,
binind
,
&
zero
,
&
commit
);
if
(
slab
==
NULL
)
{
slab
=
extents_alloc
(
tsdn
,
arena
,
&
extent_hooks
,
&
arena
->
extents_muzzy
,
NULL
,
bin_info
->
slab_size
,
0
,
PAGE
,
true
,
binind
,
&
zero
,
&
commit
);
}
if
(
slab
==
NULL
)
{
slab
=
arena_slab_alloc_hard
(
tsdn
,
arena
,
&
extent_hooks
,
bin_info
,
szind
);
if
(
slab
==
NULL
)
{
return
NULL
;
}
}
assert
(
extent_slab_get
(
slab
));
/* Initialize slab internals. */
arena_slab_data_t
*
slab_data
=
extent_slab_data_get
(
slab
);
extent_nfree_set
(
slab
,
bin_info
->
nregs
);
bitmap_init
(
slab_data
->
bitmap
,
&
bin_info
->
bitmap_info
,
false
);
arena_nactive_add
(
arena
,
extent_size_get
(
slab
)
>>
LG_PAGE
);
return
slab
;
}
static
extent_t
*
arena_bin_nonfull_slab_get
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bin_t
*
bin
,
szind_t
binind
)
{
extent_t
*
slab
;
const
bin_info_t
*
bin_info
;
/* Look for a usable slab. */
slab
=
arena_bin_slabs_nonfull_tryget
(
bin
);
if
(
slab
!=
NULL
)
{
return
slab
;
}
/* No existing slabs have any space available. */
bin_info
=
&
bin_infos
[
binind
];
/* Allocate a new slab. */
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
/******************************/
slab
=
arena_slab_alloc
(
tsdn
,
arena
,
binind
,
bin_info
);
/********************************/
malloc_mutex_lock
(
tsdn
,
&
bin
->
lock
);
if
(
slab
!=
NULL
)
{
if
(
config_stats
)
{
bin
->
stats
.
nslabs
++
;
bin
->
stats
.
curslabs
++
;
}
return
slab
;
}
/*
* arena_slab_alloc() failed, but another thread may have made
* sufficient memory available while this one dropped bin->lock above,
* so search one more time.
*/
slab
=
arena_bin_slabs_nonfull_tryget
(
bin
);
if
(
slab
!=
NULL
)
{
return
slab
;
}
return
NULL
;
}
/* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */
static
void
*
arena_bin_malloc_hard
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bin_t
*
bin
,
szind_t
binind
)
{
const
bin_info_t
*
bin_info
;
extent_t
*
slab
;
bin_info
=
&
bin_infos
[
binind
];
if
(
!
arena_is_auto
(
arena
)
&&
bin
->
slabcur
!=
NULL
)
{
arena_bin_slabs_full_insert
(
arena
,
bin
,
bin
->
slabcur
);
bin
->
slabcur
=
NULL
;
}
slab
=
arena_bin_nonfull_slab_get
(
tsdn
,
arena
,
bin
,
binind
);
if
(
bin
->
slabcur
!=
NULL
)
{
/*
* Another thread updated slabcur while this one ran without the
* bin lock in arena_bin_nonfull_slab_get().
*/
if
(
extent_nfree_get
(
bin
->
slabcur
)
>
0
)
{
void
*
ret
=
arena_slab_reg_alloc
(
bin
->
slabcur
,
bin_info
);
if
(
slab
!=
NULL
)
{
/*
* arena_slab_alloc() may have allocated slab,
* or it may have been pulled from
* slabs_nonfull. Therefore it is unsafe to
* make any assumptions about how slab has
* previously been used, and
* arena_bin_lower_slab() must be called, as if
* a region were just deallocated from the slab.
*/
if
(
extent_nfree_get
(
slab
)
==
bin_info
->
nregs
)
{
arena_dalloc_bin_slab
(
tsdn
,
arena
,
slab
,
bin
);
}
else
{
arena_bin_lower_slab
(
tsdn
,
arena
,
slab
,
bin
);
}
}
return
ret
;
}
arena_bin_slabs_full_insert
(
arena
,
bin
,
bin
->
slabcur
);
bin
->
slabcur
=
NULL
;
}
if
(
slab
==
NULL
)
{
return
NULL
;
}
bin
->
slabcur
=
slab
;
assert
(
extent_nfree_get
(
bin
->
slabcur
)
>
0
);
return
arena_slab_reg_alloc
(
slab
,
bin_info
);
}
void
arena_tcache_fill_small
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
tcache_t
*
tcache
,
cache_bin_t
*
tbin
,
szind_t
binind
,
uint64_t
prof_accumbytes
)
{
unsigned
i
,
nfill
;
bin_t
*
bin
;
assert
(
tbin
->
ncached
==
0
);
if
(
config_prof
&&
arena_prof_accum
(
tsdn
,
arena
,
prof_accumbytes
))
{
prof_idump
(
tsdn
);
}
bin
=
&
arena
->
bins
[
binind
];
malloc_mutex_lock
(
tsdn
,
&
bin
->
lock
);
for
(
i
=
0
,
nfill
=
(
tcache_bin_info
[
binind
].
ncached_max
>>
tcache
->
lg_fill_div
[
binind
]);
i
<
nfill
;
i
++
)
{
extent_t
*
slab
;
void
*
ptr
;
if
((
slab
=
bin
->
slabcur
)
!=
NULL
&&
extent_nfree_get
(
slab
)
>
0
)
{
ptr
=
arena_slab_reg_alloc
(
slab
,
&
bin_infos
[
binind
]);
}
else
{
ptr
=
arena_bin_malloc_hard
(
tsdn
,
arena
,
bin
,
binind
);
}
if
(
ptr
==
NULL
)
{
/*
* OOM. tbin->avail isn't yet filled down to its first
* element, so the successful allocations (if any) must
* be moved just before tbin->avail before bailing out.
*/
if
(
i
>
0
)
{
memmove
(
tbin
->
avail
-
i
,
tbin
->
avail
-
nfill
,
i
*
sizeof
(
void
*
));
}
break
;
}
if
(
config_fill
&&
unlikely
(
opt_junk_alloc
))
{
arena_alloc_junk_small
(
ptr
,
&
bin_infos
[
binind
],
true
);
}
/* Insert such that low regions get used first. */
*
(
tbin
->
avail
-
nfill
+
i
)
=
ptr
;
}
if
(
config_stats
)
{
bin
->
stats
.
nmalloc
+=
i
;
bin
->
stats
.
nrequests
+=
tbin
->
tstats
.
nrequests
;
bin
->
stats
.
curregs
+=
i
;
bin
->
stats
.
nfills
++
;
tbin
->
tstats
.
nrequests
=
0
;
}
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
tbin
->
ncached
=
i
;
arena_decay_tick
(
tsdn
,
arena
);
}
void
arena_alloc_junk_small
(
void
*
ptr
,
const
bin_info_t
*
bin_info
,
bool
zero
)
{
if
(
!
zero
)
{
memset
(
ptr
,
JEMALLOC_ALLOC_JUNK
,
bin_info
->
reg_size
);
}
}
static
void
arena_dalloc_junk_small_impl
(
void
*
ptr
,
const
bin_info_t
*
bin_info
)
{
memset
(
ptr
,
JEMALLOC_FREE_JUNK
,
bin_info
->
reg_size
);
}
arena_dalloc_junk_small_t
*
JET_MUTABLE
arena_dalloc_junk_small
=
arena_dalloc_junk_small_impl
;
static
void
*
arena_malloc_small
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
szind_t
binind
,
bool
zero
)
{
void
*
ret
;
bin_t
*
bin
;
size_t
usize
;
extent_t
*
slab
;
assert
(
binind
<
NBINS
);
bin
=
&
arena
->
bins
[
binind
];
usize
=
sz_index2size
(
binind
);
malloc_mutex_lock
(
tsdn
,
&
bin
->
lock
);
if
((
slab
=
bin
->
slabcur
)
!=
NULL
&&
extent_nfree_get
(
slab
)
>
0
)
{
ret
=
arena_slab_reg_alloc
(
slab
,
&
bin_infos
[
binind
]);
}
else
{
ret
=
arena_bin_malloc_hard
(
tsdn
,
arena
,
bin
,
binind
);
}
if
(
ret
==
NULL
)
{
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
return
NULL
;
}
if
(
config_stats
)
{
bin
->
stats
.
nmalloc
++
;
bin
->
stats
.
nrequests
++
;
bin
->
stats
.
curregs
++
;
}
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
if
(
config_prof
&&
arena_prof_accum
(
tsdn
,
arena
,
usize
))
{
prof_idump
(
tsdn
);
}
if
(
!
zero
)
{
if
(
config_fill
)
{
if
(
unlikely
(
opt_junk_alloc
))
{
arena_alloc_junk_small
(
ret
,
&
bin_infos
[
binind
],
false
);
}
else
if
(
unlikely
(
opt_zero
))
{
memset
(
ret
,
0
,
usize
);
}
}
}
else
{
if
(
config_fill
&&
unlikely
(
opt_junk_alloc
))
{
arena_alloc_junk_small
(
ret
,
&
bin_infos
[
binind
],
true
);
}
memset
(
ret
,
0
,
usize
);
}
arena_decay_tick
(
tsdn
,
arena
);
return
ret
;
}
void
*
arena_malloc_hard
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
size
,
szind_t
ind
,
bool
zero
)
{
assert
(
!
tsdn_null
(
tsdn
)
||
arena
!=
NULL
);
if
(
likely
(
!
tsdn_null
(
tsdn
)))
{
arena
=
arena_choose
(
tsdn_tsd
(
tsdn
),
arena
);
}
if
(
unlikely
(
arena
==
NULL
))
{
return
NULL
;
}
if
(
likely
(
size
<=
SMALL_MAXCLASS
))
{
return
arena_malloc_small
(
tsdn
,
arena
,
ind
,
zero
);
}
return
large_malloc
(
tsdn
,
arena
,
sz_index2size
(
ind
),
zero
);
}
void
*
arena_palloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
usize
,
size_t
alignment
,
bool
zero
,
tcache_t
*
tcache
)
{
void
*
ret
;
if
(
usize
<=
SMALL_MAXCLASS
&&
(
alignment
<
PAGE
||
(
alignment
==
PAGE
&&
(
usize
&
PAGE_MASK
)
==
0
)))
{
/* Small; alignment doesn't require special slab placement. */
ret
=
arena_malloc
(
tsdn
,
arena
,
usize
,
sz_size2index
(
usize
),
zero
,
tcache
,
true
);
}
else
{
if
(
likely
(
alignment
<=
CACHELINE
))
{
ret
=
large_malloc
(
tsdn
,
arena
,
usize
,
zero
);
}
else
{
ret
=
large_palloc
(
tsdn
,
arena
,
usize
,
alignment
,
zero
);
}
}
return
ret
;
}
void
arena_prof_promote
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
size_t
usize
)
{
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
assert
(
isalloc
(
tsdn
,
ptr
)
==
LARGE_MINCLASS
);
assert
(
usize
<=
SMALL_MAXCLASS
);
rtree_ctx_t
rtree_ctx_fallback
;
rtree_ctx_t
*
rtree_ctx
=
tsdn_rtree_ctx
(
tsdn
,
&
rtree_ctx_fallback
);
extent_t
*
extent
=
rtree_extent_read
(
tsdn
,
&
extents_rtree
,
rtree_ctx
,
(
uintptr_t
)
ptr
,
true
);
arena_t
*
arena
=
extent_arena_get
(
extent
);
szind_t
szind
=
sz_size2index
(
usize
);
extent_szind_set
(
extent
,
szind
);
rtree_szind_slab_update
(
tsdn
,
&
extents_rtree
,
rtree_ctx
,
(
uintptr_t
)
ptr
,
szind
,
false
);
prof_accum_cancel
(
tsdn
,
&
arena
->
prof_accum
,
usize
);
assert
(
isalloc
(
tsdn
,
ptr
)
==
usize
);
}
static
size_t
arena_prof_demote
(
tsdn_t
*
tsdn
,
extent_t
*
extent
,
const
void
*
ptr
)
{
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
extent_szind_set
(
extent
,
NBINS
);
rtree_ctx_t
rtree_ctx_fallback
;
rtree_ctx_t
*
rtree_ctx
=
tsdn_rtree_ctx
(
tsdn
,
&
rtree_ctx_fallback
);
rtree_szind_slab_update
(
tsdn
,
&
extents_rtree
,
rtree_ctx
,
(
uintptr_t
)
ptr
,
NBINS
,
false
);
assert
(
isalloc
(
tsdn
,
ptr
)
==
LARGE_MINCLASS
);
return
LARGE_MINCLASS
;
}
void
arena_dalloc_promoted
(
tsdn_t
*
tsdn
,
void
*
ptr
,
tcache_t
*
tcache
,
bool
slow_path
)
{
cassert
(
config_prof
);
assert
(
opt_prof
);
extent_t
*
extent
=
iealloc
(
tsdn
,
ptr
);
size_t
usize
=
arena_prof_demote
(
tsdn
,
extent
,
ptr
);
if
(
usize
<=
tcache_maxclass
)
{
tcache_dalloc_large
(
tsdn_tsd
(
tsdn
),
tcache
,
ptr
,
sz_size2index
(
usize
),
slow_path
);
}
else
{
large_dalloc
(
tsdn
,
extent
);
}
}
static
void
arena_dissociate_bin_slab
(
arena_t
*
arena
,
extent_t
*
slab
,
bin_t
*
bin
)
{
/* Dissociate slab from bin. */
if
(
slab
==
bin
->
slabcur
)
{
bin
->
slabcur
=
NULL
;
}
else
{
szind_t
binind
=
extent_szind_get
(
slab
);
const
bin_info_t
*
bin_info
=
&
bin_infos
[
binind
];
/*
* The following block's conditional is necessary because if the
* slab only contains one region, then it never gets inserted
* into the non-full slabs heap.
*/
if
(
bin_info
->
nregs
==
1
)
{
arena_bin_slabs_full_remove
(
arena
,
bin
,
slab
);
}
else
{
arena_bin_slabs_nonfull_remove
(
bin
,
slab
);
}
}
}
static
void
arena_dalloc_bin_slab
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_t
*
slab
,
bin_t
*
bin
)
{
assert
(
slab
!=
bin
->
slabcur
);
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
/******************************/
arena_slab_dalloc
(
tsdn
,
arena
,
slab
);
/****************************/
malloc_mutex_lock
(
tsdn
,
&
bin
->
lock
);
if
(
config_stats
)
{
bin
->
stats
.
curslabs
--
;
}
}
static
void
arena_bin_lower_slab
(
UNUSED
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_t
*
slab
,
bin_t
*
bin
)
{
assert
(
extent_nfree_get
(
slab
)
>
0
);
/*
* Make sure that if bin->slabcur is non-NULL, it refers to the
* oldest/lowest non-full slab. It is okay to NULL slabcur out rather
* than proactively keeping it pointing at the oldest/lowest non-full
* slab.
*/
if
(
bin
->
slabcur
!=
NULL
&&
extent_snad_comp
(
bin
->
slabcur
,
slab
)
>
0
)
{
/* Switch slabcur. */
if
(
extent_nfree_get
(
bin
->
slabcur
)
>
0
)
{
arena_bin_slabs_nonfull_insert
(
bin
,
bin
->
slabcur
);
}
else
{
arena_bin_slabs_full_insert
(
arena
,
bin
,
bin
->
slabcur
);
}
bin
->
slabcur
=
slab
;
if
(
config_stats
)
{
bin
->
stats
.
reslabs
++
;
}
}
else
{
arena_bin_slabs_nonfull_insert
(
bin
,
slab
);
}
}
static
void
arena_dalloc_bin_locked_impl
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_t
*
slab
,
void
*
ptr
,
bool
junked
)
{
arena_slab_data_t
*
slab_data
=
extent_slab_data_get
(
slab
);
szind_t
binind
=
extent_szind_get
(
slab
);
bin_t
*
bin
=
&
arena
->
bins
[
binind
];
const
bin_info_t
*
bin_info
=
&
bin_infos
[
binind
];
if
(
!
junked
&&
config_fill
&&
unlikely
(
opt_junk_free
))
{
arena_dalloc_junk_small
(
ptr
,
bin_info
);
}
arena_slab_reg_dalloc
(
slab
,
slab_data
,
ptr
);
unsigned
nfree
=
extent_nfree_get
(
slab
);
if
(
nfree
==
bin_info
->
nregs
)
{
arena_dissociate_bin_slab
(
arena
,
slab
,
bin
);
arena_dalloc_bin_slab
(
tsdn
,
arena
,
slab
,
bin
);
}
else
if
(
nfree
==
1
&&
slab
!=
bin
->
slabcur
)
{
arena_bin_slabs_full_remove
(
arena
,
bin
,
slab
);
arena_bin_lower_slab
(
tsdn
,
arena
,
slab
,
bin
);
}
if
(
config_stats
)
{
bin
->
stats
.
ndalloc
++
;
bin
->
stats
.
curregs
--
;
}
}
void
arena_dalloc_bin_junked_locked
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_t
*
extent
,
void
*
ptr
)
{
arena_dalloc_bin_locked_impl
(
tsdn
,
arena
,
extent
,
ptr
,
true
);
}
static
void
arena_dalloc_bin
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_t
*
extent
,
void
*
ptr
)
{
szind_t
binind
=
extent_szind_get
(
extent
);
bin_t
*
bin
=
&
arena
->
bins
[
binind
];
malloc_mutex_lock
(
tsdn
,
&
bin
->
lock
);
arena_dalloc_bin_locked_impl
(
tsdn
,
arena
,
extent
,
ptr
,
false
);
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
}
void
arena_dalloc_small
(
tsdn_t
*
tsdn
,
void
*
ptr
)
{
extent_t
*
extent
=
iealloc
(
tsdn
,
ptr
);
arena_t
*
arena
=
extent_arena_get
(
extent
);
arena_dalloc_bin
(
tsdn
,
arena
,
extent
,
ptr
);
arena_decay_tick
(
tsdn
,
arena
);
}
bool
arena_ralloc_no_move
(
tsdn_t
*
tsdn
,
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
extra
,
bool
zero
)
{
/* Calls with non-zero extra had to clamp extra. */
assert
(
extra
==
0
||
size
+
extra
<=
LARGE_MAXCLASS
);
if
(
unlikely
(
size
>
LARGE_MAXCLASS
))
{
return
true
;
}
extent_t
*
extent
=
iealloc
(
tsdn
,
ptr
);
size_t
usize_min
=
sz_s2u
(
size
);
size_t
usize_max
=
sz_s2u
(
size
+
extra
);
if
(
likely
(
oldsize
<=
SMALL_MAXCLASS
&&
usize_min
<=
SMALL_MAXCLASS
))
{
/*
* Avoid moving the allocation if the size class can be left the
* same.
*/
assert
(
bin_infos
[
sz_size2index
(
oldsize
)].
reg_size
==
oldsize
);
if
((
usize_max
>
SMALL_MAXCLASS
||
sz_size2index
(
usize_max
)
!=
sz_size2index
(
oldsize
))
&&
(
size
>
oldsize
||
usize_max
<
oldsize
))
{
return
true
;
}
arena_decay_tick
(
tsdn
,
extent_arena_get
(
extent
));
return
false
;
}
else
if
(
oldsize
>=
LARGE_MINCLASS
&&
usize_max
>=
LARGE_MINCLASS
)
{
return
large_ralloc_no_move
(
tsdn
,
extent
,
usize_min
,
usize_max
,
zero
);
}
return
true
;
}
static
void
*
arena_ralloc_move_helper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
usize
,
size_t
alignment
,
bool
zero
,
tcache_t
*
tcache
)
{
if
(
alignment
==
0
)
{
return
arena_malloc
(
tsdn
,
arena
,
usize
,
sz_size2index
(
usize
),
zero
,
tcache
,
true
);
}
usize
=
sz_sa2u
(
usize
,
alignment
);
if
(
unlikely
(
usize
==
0
||
usize
>
LARGE_MAXCLASS
))
{
return
NULL
;
}
return
ipalloct
(
tsdn
,
usize
,
alignment
,
zero
,
tcache
,
arena
);
}
void
*
arena_ralloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
alignment
,
bool
zero
,
tcache_t
*
tcache
)
{
size_t
usize
=
sz_s2u
(
size
);
if
(
unlikely
(
usize
==
0
||
size
>
LARGE_MAXCLASS
))
{
return
NULL
;
}
if
(
likely
(
usize
<=
SMALL_MAXCLASS
))
{
/* Try to avoid moving the allocation. */
if
(
!
arena_ralloc_no_move
(
tsdn
,
ptr
,
oldsize
,
usize
,
0
,
zero
))
{
return
ptr
;
}
}
if
(
oldsize
>=
LARGE_MINCLASS
&&
usize
>=
LARGE_MINCLASS
)
{
return
large_ralloc
(
tsdn
,
arena
,
iealloc
(
tsdn
,
ptr
),
usize
,
alignment
,
zero
,
tcache
);
}
/*
* size and oldsize are different enough that we need to move the
* object. In that case, fall back to allocating new space and copying.
*/
void
*
ret
=
arena_ralloc_move_helper
(
tsdn
,
arena
,
usize
,
alignment
,
zero
,
tcache
);
if
(
ret
==
NULL
)
{
return
NULL
;
}
/*
* Junk/zero-filling were already done by
* ipalloc()/arena_malloc().
*/
size_t
copysize
=
(
usize
<
oldsize
)
?
usize
:
oldsize
;
memcpy
(
ret
,
ptr
,
copysize
);
isdalloct
(
tsdn
,
ptr
,
oldsize
,
tcache
,
NULL
,
true
);
return
ret
;
}
dss_prec_t
arena_dss_prec_get
(
arena_t
*
arena
)
{
return
(
dss_prec_t
)
atomic_load_u
(
&
arena
->
dss_prec
,
ATOMIC_ACQUIRE
);
}
bool
arena_dss_prec_set
(
arena_t
*
arena
,
dss_prec_t
dss_prec
)
{
if
(
!
have_dss
)
{
return
(
dss_prec
!=
dss_prec_disabled
);
}
atomic_store_u
(
&
arena
->
dss_prec
,
(
unsigned
)
dss_prec
,
ATOMIC_RELEASE
);
return
false
;
}
ssize_t
arena_dirty_decay_ms_default_get
(
void
)
{
return
atomic_load_zd
(
&
dirty_decay_ms_default
,
ATOMIC_RELAXED
);
}
bool
arena_dirty_decay_ms_default_set
(
ssize_t
decay_ms
)
{
if
(
!
arena_decay_ms_valid
(
decay_ms
))
{
return
true
;
}
atomic_store_zd
(
&
dirty_decay_ms_default
,
decay_ms
,
ATOMIC_RELAXED
);
return
false
;
}
ssize_t
arena_muzzy_decay_ms_default_get
(
void
)
{
return
atomic_load_zd
(
&
muzzy_decay_ms_default
,
ATOMIC_RELAXED
);
}
bool
arena_muzzy_decay_ms_default_set
(
ssize_t
decay_ms
)
{
if
(
!
arena_decay_ms_valid
(
decay_ms
))
{
return
true
;
}
atomic_store_zd
(
&
muzzy_decay_ms_default
,
decay_ms
,
ATOMIC_RELAXED
);
return
false
;
}
bool
arena_retain_grow_limit_get_set
(
tsd_t
*
tsd
,
arena_t
*
arena
,
size_t
*
old_limit
,
size_t
*
new_limit
)
{
assert
(
opt_retain
);
pszind_t
new_ind
JEMALLOC_CC_SILENCE_INIT
(
0
);
if
(
new_limit
!=
NULL
)
{
size_t
limit
=
*
new_limit
;
/* Grow no more than the new limit. */
if
((
new_ind
=
sz_psz2ind
(
limit
+
1
)
-
1
)
>
EXTENT_GROW_MAX_PIND
)
{
return
true
;
}
}
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
arena
->
extent_grow_mtx
);
if
(
old_limit
!=
NULL
)
{
*
old_limit
=
sz_pind2sz
(
arena
->
retain_grow_limit
);
}
if
(
new_limit
!=
NULL
)
{
arena
->
retain_grow_limit
=
new_ind
;
}
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
arena
->
extent_grow_mtx
);
return
false
;
}
unsigned
arena_nthreads_get
(
arena_t
*
arena
,
bool
internal
)
{
return
atomic_load_u
(
&
arena
->
nthreads
[
internal
],
ATOMIC_RELAXED
);
}
void
arena_nthreads_inc
(
arena_t
*
arena
,
bool
internal
)
{
atomic_fetch_add_u
(
&
arena
->
nthreads
[
internal
],
1
,
ATOMIC_RELAXED
);
}
void
arena_nthreads_dec
(
arena_t
*
arena
,
bool
internal
)
{
atomic_fetch_sub_u
(
&
arena
->
nthreads
[
internal
],
1
,
ATOMIC_RELAXED
);
}
size_t
arena_extent_sn_next
(
arena_t
*
arena
)
{
return
atomic_fetch_add_zu
(
&
arena
->
extent_sn_next
,
1
,
ATOMIC_RELAXED
);
}
arena_t
*
arena_new
(
tsdn_t
*
tsdn
,
unsigned
ind
,
extent_hooks_t
*
extent_hooks
)
{
arena_t
*
arena
;
base_t
*
base
;
unsigned
i
;
if
(
ind
==
0
)
{
base
=
b0get
();
}
else
{
base
=
base_new
(
tsdn
,
ind
,
extent_hooks
);
if
(
base
==
NULL
)
{
return
NULL
;
}
}
arena
=
(
arena_t
*
)
base_alloc
(
tsdn
,
base
,
sizeof
(
arena_t
),
CACHELINE
);
if
(
arena
==
NULL
)
{
goto
label_error
;
}
atomic_store_u
(
&
arena
->
nthreads
[
0
],
0
,
ATOMIC_RELAXED
);
atomic_store_u
(
&
arena
->
nthreads
[
1
],
0
,
ATOMIC_RELAXED
);
arena
->
last_thd
=
NULL
;
if
(
config_stats
)
{
if
(
arena_stats_init
(
tsdn
,
&
arena
->
stats
))
{
goto
label_error
;
}
ql_new
(
&
arena
->
tcache_ql
);
ql_new
(
&
arena
->
cache_bin_array_descriptor_ql
);
if
(
malloc_mutex_init
(
&
arena
->
tcache_ql_mtx
,
"tcache_ql"
,
WITNESS_RANK_TCACHE_QL
,
malloc_mutex_rank_exclusive
))
{
goto
label_error
;
}
}
if
(
config_prof
)
{
if
(
prof_accum_init
(
tsdn
,
&
arena
->
prof_accum
))
{
goto
label_error
;
}
}
if
(
config_cache_oblivious
)
{
/*
* A nondeterministic seed based on the address of arena reduces
* the likelihood of lockstep non-uniform cache index
* utilization among identical concurrent processes, but at the
* cost of test repeatability. For debug builds, instead use a
* deterministic seed.
*/
atomic_store_zu
(
&
arena
->
offset_state
,
config_debug
?
ind
:
(
size_t
)(
uintptr_t
)
arena
,
ATOMIC_RELAXED
);
}
atomic_store_zu
(
&
arena
->
extent_sn_next
,
0
,
ATOMIC_RELAXED
);
atomic_store_u
(
&
arena
->
dss_prec
,
(
unsigned
)
extent_dss_prec_get
(),
ATOMIC_RELAXED
);
atomic_store_zu
(
&
arena
->
nactive
,
0
,
ATOMIC_RELAXED
);
extent_list_init
(
&
arena
->
large
);
if
(
malloc_mutex_init
(
&
arena
->
large_mtx
,
"arena_large"
,
WITNESS_RANK_ARENA_LARGE
,
malloc_mutex_rank_exclusive
))
{
goto
label_error
;
}
/*
* Delay coalescing for dirty extents despite the disruptive effect on
* memory layout for best-fit extent allocation, since cached extents
* are likely to be reused soon after deallocation, and the cost of
* merging/splitting extents is non-trivial.
*/
if
(
extents_init
(
tsdn
,
&
arena
->
extents_dirty
,
extent_state_dirty
,
true
))
{
goto
label_error
;
}
/*
* Coalesce muzzy extents immediately, because operations on them are in
* the critical path much less often than for dirty extents.
*/
if
(
extents_init
(
tsdn
,
&
arena
->
extents_muzzy
,
extent_state_muzzy
,
false
))
{
goto
label_error
;
}
/*
* Coalesce retained extents immediately, in part because they will
* never be evicted (and therefore there's no opportunity for delayed
* coalescing), but also because operations on retained extents are not
* in the critical path.
*/
if
(
extents_init
(
tsdn
,
&
arena
->
extents_retained
,
extent_state_retained
,
false
))
{
goto
label_error
;
}
if
(
arena_decay_init
(
&
arena
->
decay_dirty
,
arena_dirty_decay_ms_default_get
(),
&
arena
->
stats
.
decay_dirty
))
{
goto
label_error
;
}
if
(
arena_decay_init
(
&
arena
->
decay_muzzy
,
arena_muzzy_decay_ms_default_get
(),
&
arena
->
stats
.
decay_muzzy
))
{
goto
label_error
;
}
arena
->
extent_grow_next
=
sz_psz2ind
(
HUGEPAGE
);
arena
->
retain_grow_limit
=
EXTENT_GROW_MAX_PIND
;
if
(
malloc_mutex_init
(
&
arena
->
extent_grow_mtx
,
"extent_grow"
,
WITNESS_RANK_EXTENT_GROW
,
malloc_mutex_rank_exclusive
))
{
goto
label_error
;
}
extent_avail_new
(
&
arena
->
extent_avail
);
if
(
malloc_mutex_init
(
&
arena
->
extent_avail_mtx
,
"extent_avail"
,
WITNESS_RANK_EXTENT_AVAIL
,
malloc_mutex_rank_exclusive
))
{
goto
label_error
;
}
/* Initialize bins. */
for
(
i
=
0
;
i
<
NBINS
;
i
++
)
{
bool
err
=
bin_init
(
&
arena
->
bins
[
i
]);
if
(
err
)
{
goto
label_error
;
}
}
arena
->
base
=
base
;
/* Set arena before creating background threads. */
arena_set
(
ind
,
arena
);
nstime_init
(
&
arena
->
create_time
,
0
);
nstime_update
(
&
arena
->
create_time
);
/* We don't support reentrancy for arena 0 bootstrapping. */
if
(
ind
!=
0
)
{
/*
* If we're here, then arena 0 already exists, so bootstrapping
* is done enough that we should have tsd.
*/
assert
(
!
tsdn_null
(
tsdn
));
pre_reentrancy
(
tsdn_tsd
(
tsdn
),
arena
);
if
(
hooks_arena_new_hook
)
{
hooks_arena_new_hook
();
}
post_reentrancy
(
tsdn_tsd
(
tsdn
));
}
return
arena
;
label_error:
if
(
ind
!=
0
)
{
base_delete
(
tsdn
,
base
);
}
return
NULL
;
}
void
arena_boot
(
void
)
{
arena_dirty_decay_ms_default_set
(
opt_dirty_decay_ms
);
arena_muzzy_decay_ms_default_set
(
opt_muzzy_decay_ms
);
#define REGIND_bin_yes(index, reg_size) \
div_init(&arena_binind_div_info[(index)], (reg_size));
#define REGIND_bin_no(index, reg_size)
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \
lg_delta_lookup) \
REGIND_bin_##bin(index, (1U<<lg_grp) + (ndelta << lg_delta))
SIZE_CLASSES
#undef REGIND_bin_yes
#undef REGIND_bin_no
#undef SC
}
void
arena_prefork0
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
malloc_mutex_prefork
(
tsdn
,
&
arena
->
decay_dirty
.
mtx
);
malloc_mutex_prefork
(
tsdn
,
&
arena
->
decay_muzzy
.
mtx
);
}
void
arena_prefork1
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
if
(
config_stats
)
{
malloc_mutex_prefork
(
tsdn
,
&
arena
->
tcache_ql_mtx
);
}
}
void
arena_prefork2
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
malloc_mutex_prefork
(
tsdn
,
&
arena
->
extent_grow_mtx
);
}
void
arena_prefork3
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
extents_prefork
(
tsdn
,
&
arena
->
extents_dirty
);
extents_prefork
(
tsdn
,
&
arena
->
extents_muzzy
);
extents_prefork
(
tsdn
,
&
arena
->
extents_retained
);
}
void
arena_prefork4
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
malloc_mutex_prefork
(
tsdn
,
&
arena
->
extent_avail_mtx
);
}
void
arena_prefork5
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
base_prefork
(
tsdn
,
arena
->
base
);
}
void
arena_prefork6
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
malloc_mutex_prefork
(
tsdn
,
&
arena
->
large_mtx
);
}
void
arena_prefork7
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
for
(
unsigned
i
=
0
;
i
<
NBINS
;
i
++
)
{
bin_prefork
(
tsdn
,
&
arena
->
bins
[
i
]);
}
}
void
arena_postfork_parent
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
unsigned
i
;
for
(
i
=
0
;
i
<
NBINS
;
i
++
)
{
bin_postfork_parent
(
tsdn
,
&
arena
->
bins
[
i
]);
}
malloc_mutex_postfork_parent
(
tsdn
,
&
arena
->
large_mtx
);
base_postfork_parent
(
tsdn
,
arena
->
base
);
malloc_mutex_postfork_parent
(
tsdn
,
&
arena
->
extent_avail_mtx
);
extents_postfork_parent
(
tsdn
,
&
arena
->
extents_dirty
);
extents_postfork_parent
(
tsdn
,
&
arena
->
extents_muzzy
);
extents_postfork_parent
(
tsdn
,
&
arena
->
extents_retained
);
malloc_mutex_postfork_parent
(
tsdn
,
&
arena
->
extent_grow_mtx
);
malloc_mutex_postfork_parent
(
tsdn
,
&
arena
->
decay_dirty
.
mtx
);
malloc_mutex_postfork_parent
(
tsdn
,
&
arena
->
decay_muzzy
.
mtx
);
if
(
config_stats
)
{
malloc_mutex_postfork_parent
(
tsdn
,
&
arena
->
tcache_ql_mtx
);
}
}
void
arena_postfork_child
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
unsigned
i
;
atomic_store_u
(
&
arena
->
nthreads
[
0
],
0
,
ATOMIC_RELAXED
);
atomic_store_u
(
&
arena
->
nthreads
[
1
],
0
,
ATOMIC_RELAXED
);
if
(
tsd_arena_get
(
tsdn_tsd
(
tsdn
))
==
arena
)
{
arena_nthreads_inc
(
arena
,
false
);
}
if
(
tsd_iarena_get
(
tsdn_tsd
(
tsdn
))
==
arena
)
{
arena_nthreads_inc
(
arena
,
true
);
}
if
(
config_stats
)
{
ql_new
(
&
arena
->
tcache_ql
);
ql_new
(
&
arena
->
cache_bin_array_descriptor_ql
);
tcache_t
*
tcache
=
tcache_get
(
tsdn_tsd
(
tsdn
));
if
(
tcache
!=
NULL
&&
tcache
->
arena
==
arena
)
{
ql_elm_new
(
tcache
,
link
);
ql_tail_insert
(
&
arena
->
tcache_ql
,
tcache
,
link
);
cache_bin_array_descriptor_init
(
&
tcache
->
cache_bin_array_descriptor
,
tcache
->
bins_small
,
tcache
->
bins_large
);
ql_tail_insert
(
&
arena
->
cache_bin_array_descriptor_ql
,
&
tcache
->
cache_bin_array_descriptor
,
link
);
}
}
for
(
i
=
0
;
i
<
NBINS
;
i
++
)
{
bin_postfork_child
(
tsdn
,
&
arena
->
bins
[
i
]);
}
malloc_mutex_postfork_child
(
tsdn
,
&
arena
->
large_mtx
);
base_postfork_child
(
tsdn
,
arena
->
base
);
malloc_mutex_postfork_child
(
tsdn
,
&
arena
->
extent_avail_mtx
);
extents_postfork_child
(
tsdn
,
&
arena
->
extents_dirty
);
extents_postfork_child
(
tsdn
,
&
arena
->
extents_muzzy
);
extents_postfork_child
(
tsdn
,
&
arena
->
extents_retained
);
malloc_mutex_postfork_child
(
tsdn
,
&
arena
->
extent_grow_mtx
);
malloc_mutex_postfork_child
(
tsdn
,
&
arena
->
decay_dirty
.
mtx
);
malloc_mutex_postfork_child
(
tsdn
,
&
arena
->
decay_muzzy
.
mtx
);
if
(
config_stats
)
{
malloc_mutex_postfork_child
(
tsdn
,
&
arena
->
tcache_ql_mtx
);
}
}
deps/jemalloc/src/background_thread.c
deleted
100644 → 0
View file @
7ff7536e
#define JEMALLOC_BACKGROUND_THREAD_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
/******************************************************************************/
/* Data. */
/* This option should be opt-in only. */
#define BACKGROUND_THREAD_DEFAULT false
/* Read-only after initialization. */
bool
opt_background_thread
=
BACKGROUND_THREAD_DEFAULT
;
size_t
opt_max_background_threads
=
MAX_BACKGROUND_THREAD_LIMIT
;
/* Used for thread creation, termination and stats. */
malloc_mutex_t
background_thread_lock
;
/* Indicates global state. Atomic because decay reads this w/o locking. */
atomic_b_t
background_thread_enabled_state
;
size_t
n_background_threads
;
size_t
max_background_threads
;
/* Thread info per-index. */
background_thread_info_t
*
background_thread_info
;
/* False if no necessary runtime support. */
bool
can_enable_background_thread
;
/******************************************************************************/
#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
#include <dlfcn.h>
static
int
(
*
pthread_create_fptr
)(
pthread_t
*
__restrict
,
const
pthread_attr_t
*
,
void
*
(
*
)(
void
*
),
void
*
__restrict
);
static
void
pthread_create_wrapper_init
(
void
)
{
#ifdef JEMALLOC_LAZY_LOCK
if
(
!
isthreaded
)
{
isthreaded
=
true
;
}
#endif
}
int
pthread_create_wrapper
(
pthread_t
*
__restrict
thread
,
const
pthread_attr_t
*
attr
,
void
*
(
*
start_routine
)(
void
*
),
void
*
__restrict
arg
)
{
pthread_create_wrapper_init
();
return
pthread_create_fptr
(
thread
,
attr
,
start_routine
,
arg
);
}
#endif
/* JEMALLOC_PTHREAD_CREATE_WRAPPER */
#ifndef JEMALLOC_BACKGROUND_THREAD
#define NOT_REACHED { not_reached(); }
bool
background_thread_create
(
tsd_t
*
tsd
,
unsigned
arena_ind
)
NOT_REACHED
bool
background_threads_enable
(
tsd_t
*
tsd
)
NOT_REACHED
bool
background_threads_disable
(
tsd_t
*
tsd
)
NOT_REACHED
void
background_thread_interval_check
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_decay_t
*
decay
,
size_t
npages_new
)
NOT_REACHED
void
background_thread_prefork0
(
tsdn_t
*
tsdn
)
NOT_REACHED
void
background_thread_prefork1
(
tsdn_t
*
tsdn
)
NOT_REACHED
void
background_thread_postfork_parent
(
tsdn_t
*
tsdn
)
NOT_REACHED
void
background_thread_postfork_child
(
tsdn_t
*
tsdn
)
NOT_REACHED
bool
background_thread_stats_read
(
tsdn_t
*
tsdn
,
background_thread_stats_t
*
stats
)
NOT_REACHED
void
background_thread_ctl_init
(
tsdn_t
*
tsdn
)
NOT_REACHED
#undef NOT_REACHED
#else
static
bool
background_thread_enabled_at_fork
;
static
void
background_thread_info_init
(
tsdn_t
*
tsdn
,
background_thread_info_t
*
info
)
{
background_thread_wakeup_time_set
(
tsdn
,
info
,
0
);
info
->
npages_to_purge_new
=
0
;
if
(
config_stats
)
{
info
->
tot_n_runs
=
0
;
nstime_init
(
&
info
->
tot_sleep_time
,
0
);
}
}
static
inline
bool
set_current_thread_affinity
(
UNUSED
int
cpu
)
{
#if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
cpu_set_t
cpuset
;
CPU_ZERO
(
&
cpuset
);
CPU_SET
(
cpu
,
&
cpuset
);
int
ret
=
sched_setaffinity
(
0
,
sizeof
(
cpu_set_t
),
&
cpuset
);
return
(
ret
!=
0
);
#else
return
false
;
#endif
}
/* Threshold for determining when to wake up the background thread. */
#define BACKGROUND_THREAD_NPAGES_THRESHOLD UINT64_C(1024)
#define BILLION UINT64_C(1000000000)
/* Minimal sleep interval 100 ms. */
#define BACKGROUND_THREAD_MIN_INTERVAL_NS (BILLION / 10)
static
inline
size_t
decay_npurge_after_interval
(
arena_decay_t
*
decay
,
size_t
interval
)
{
size_t
i
;
uint64_t
sum
=
0
;
for
(
i
=
0
;
i
<
interval
;
i
++
)
{
sum
+=
decay
->
backlog
[
i
]
*
h_steps
[
i
];
}
for
(;
i
<
SMOOTHSTEP_NSTEPS
;
i
++
)
{
sum
+=
decay
->
backlog
[
i
]
*
(
h_steps
[
i
]
-
h_steps
[
i
-
interval
]);
}
return
(
size_t
)(
sum
>>
SMOOTHSTEP_BFP
);
}
static
uint64_t
arena_decay_compute_purge_interval_impl
(
tsdn_t
*
tsdn
,
arena_decay_t
*
decay
,
extents_t
*
extents
)
{
if
(
malloc_mutex_trylock
(
tsdn
,
&
decay
->
mtx
))
{
/* Use minimal interval if decay is contended. */
return
BACKGROUND_THREAD_MIN_INTERVAL_NS
;
}
uint64_t
interval
;
ssize_t
decay_time
=
atomic_load_zd
(
&
decay
->
time_ms
,
ATOMIC_RELAXED
);
if
(
decay_time
<=
0
)
{
/* Purging is eagerly done or disabled currently. */
interval
=
BACKGROUND_THREAD_INDEFINITE_SLEEP
;
goto
label_done
;
}
uint64_t
decay_interval_ns
=
nstime_ns
(
&
decay
->
interval
);
assert
(
decay_interval_ns
>
0
);
size_t
npages
=
extents_npages_get
(
extents
);
if
(
npages
==
0
)
{
unsigned
i
;
for
(
i
=
0
;
i
<
SMOOTHSTEP_NSTEPS
;
i
++
)
{
if
(
decay
->
backlog
[
i
]
>
0
)
{
break
;
}
}
if
(
i
==
SMOOTHSTEP_NSTEPS
)
{
/* No dirty pages recorded. Sleep indefinitely. */
interval
=
BACKGROUND_THREAD_INDEFINITE_SLEEP
;
goto
label_done
;
}
}
if
(
npages
<=
BACKGROUND_THREAD_NPAGES_THRESHOLD
)
{
/* Use max interval. */
interval
=
decay_interval_ns
*
SMOOTHSTEP_NSTEPS
;
goto
label_done
;
}
size_t
lb
=
BACKGROUND_THREAD_MIN_INTERVAL_NS
/
decay_interval_ns
;
size_t
ub
=
SMOOTHSTEP_NSTEPS
;
/* Minimal 2 intervals to ensure reaching next epoch deadline. */
lb
=
(
lb
<
2
)
?
2
:
lb
;
if
((
decay_interval_ns
*
ub
<=
BACKGROUND_THREAD_MIN_INTERVAL_NS
)
||
(
lb
+
2
>
ub
))
{
interval
=
BACKGROUND_THREAD_MIN_INTERVAL_NS
;
goto
label_done
;
}
assert
(
lb
+
2
<=
ub
);
size_t
npurge_lb
,
npurge_ub
;
npurge_lb
=
decay_npurge_after_interval
(
decay
,
lb
);
if
(
npurge_lb
>
BACKGROUND_THREAD_NPAGES_THRESHOLD
)
{
interval
=
decay_interval_ns
*
lb
;
goto
label_done
;
}
npurge_ub
=
decay_npurge_after_interval
(
decay
,
ub
);
if
(
npurge_ub
<
BACKGROUND_THREAD_NPAGES_THRESHOLD
)
{
interval
=
decay_interval_ns
*
ub
;
goto
label_done
;
}
unsigned
n_search
=
0
;
size_t
target
,
npurge
;
while
((
npurge_lb
+
BACKGROUND_THREAD_NPAGES_THRESHOLD
<
npurge_ub
)
&&
(
lb
+
2
<
ub
))
{
target
=
(
lb
+
ub
)
/
2
;
npurge
=
decay_npurge_after_interval
(
decay
,
target
);
if
(
npurge
>
BACKGROUND_THREAD_NPAGES_THRESHOLD
)
{
ub
=
target
;
npurge_ub
=
npurge
;
}
else
{
lb
=
target
;
npurge_lb
=
npurge
;
}
assert
(
n_search
++
<
lg_floor
(
SMOOTHSTEP_NSTEPS
)
+
1
);
}
interval
=
decay_interval_ns
*
(
ub
+
lb
)
/
2
;
label_done:
interval
=
(
interval
<
BACKGROUND_THREAD_MIN_INTERVAL_NS
)
?
BACKGROUND_THREAD_MIN_INTERVAL_NS
:
interval
;
malloc_mutex_unlock
(
tsdn
,
&
decay
->
mtx
);
return
interval
;
}
/* Compute purge interval for background threads. */
static
uint64_t
arena_decay_compute_purge_interval
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
uint64_t
i1
,
i2
;
i1
=
arena_decay_compute_purge_interval_impl
(
tsdn
,
&
arena
->
decay_dirty
,
&
arena
->
extents_dirty
);
if
(
i1
==
BACKGROUND_THREAD_MIN_INTERVAL_NS
)
{
return
i1
;
}
i2
=
arena_decay_compute_purge_interval_impl
(
tsdn
,
&
arena
->
decay_muzzy
,
&
arena
->
extents_muzzy
);
return
i1
<
i2
?
i1
:
i2
;
}
static
void
background_thread_sleep
(
tsdn_t
*
tsdn
,
background_thread_info_t
*
info
,
uint64_t
interval
)
{
if
(
config_stats
)
{
info
->
tot_n_runs
++
;
}
info
->
npages_to_purge_new
=
0
;
struct
timeval
tv
;
/* Specific clock required by timedwait. */
gettimeofday
(
&
tv
,
NULL
);
nstime_t
before_sleep
;
nstime_init2
(
&
before_sleep
,
tv
.
tv_sec
,
tv
.
tv_usec
*
1000
);
int
ret
;
if
(
interval
==
BACKGROUND_THREAD_INDEFINITE_SLEEP
)
{
assert
(
background_thread_indefinite_sleep
(
info
));
ret
=
pthread_cond_wait
(
&
info
->
cond
,
&
info
->
mtx
.
lock
);
assert
(
ret
==
0
);
}
else
{
assert
(
interval
>=
BACKGROUND_THREAD_MIN_INTERVAL_NS
&&
interval
<=
BACKGROUND_THREAD_INDEFINITE_SLEEP
);
/* We need malloc clock (can be different from tv). */
nstime_t
next_wakeup
;
nstime_init
(
&
next_wakeup
,
0
);
nstime_update
(
&
next_wakeup
);
nstime_iadd
(
&
next_wakeup
,
interval
);
assert
(
nstime_ns
(
&
next_wakeup
)
<
BACKGROUND_THREAD_INDEFINITE_SLEEP
);
background_thread_wakeup_time_set
(
tsdn
,
info
,
nstime_ns
(
&
next_wakeup
));
nstime_t
ts_wakeup
;
nstime_copy
(
&
ts_wakeup
,
&
before_sleep
);
nstime_iadd
(
&
ts_wakeup
,
interval
);
struct
timespec
ts
;
ts
.
tv_sec
=
(
size_t
)
nstime_sec
(
&
ts_wakeup
);
ts
.
tv_nsec
=
(
size_t
)
nstime_nsec
(
&
ts_wakeup
);
assert
(
!
background_thread_indefinite_sleep
(
info
));
ret
=
pthread_cond_timedwait
(
&
info
->
cond
,
&
info
->
mtx
.
lock
,
&
ts
);
assert
(
ret
==
ETIMEDOUT
||
ret
==
0
);
background_thread_wakeup_time_set
(
tsdn
,
info
,
BACKGROUND_THREAD_INDEFINITE_SLEEP
);
}
if
(
config_stats
)
{
gettimeofday
(
&
tv
,
NULL
);
nstime_t
after_sleep
;
nstime_init2
(
&
after_sleep
,
tv
.
tv_sec
,
tv
.
tv_usec
*
1000
);
if
(
nstime_compare
(
&
after_sleep
,
&
before_sleep
)
>
0
)
{
nstime_subtract
(
&
after_sleep
,
&
before_sleep
);
nstime_add
(
&
info
->
tot_sleep_time
,
&
after_sleep
);
}
}
}
static
bool
background_thread_pause_check
(
tsdn_t
*
tsdn
,
background_thread_info_t
*
info
)
{
if
(
unlikely
(
info
->
state
==
background_thread_paused
))
{
malloc_mutex_unlock
(
tsdn
,
&
info
->
mtx
);
/* Wait on global lock to update status. */
malloc_mutex_lock
(
tsdn
,
&
background_thread_lock
);
malloc_mutex_unlock
(
tsdn
,
&
background_thread_lock
);
malloc_mutex_lock
(
tsdn
,
&
info
->
mtx
);
return
true
;
}
return
false
;
}
static
inline
void
background_work_sleep_once
(
tsdn_t
*
tsdn
,
background_thread_info_t
*
info
,
unsigned
ind
)
{
uint64_t
min_interval
=
BACKGROUND_THREAD_INDEFINITE_SLEEP
;
unsigned
narenas
=
narenas_total_get
();
for
(
unsigned
i
=
ind
;
i
<
narenas
;
i
+=
max_background_threads
)
{
arena_t
*
arena
=
arena_get
(
tsdn
,
i
,
false
);
if
(
!
arena
)
{
continue
;
}
arena_decay
(
tsdn
,
arena
,
true
,
false
);
if
(
min_interval
==
BACKGROUND_THREAD_MIN_INTERVAL_NS
)
{
/* Min interval will be used. */
continue
;
}
uint64_t
interval
=
arena_decay_compute_purge_interval
(
tsdn
,
arena
);
assert
(
interval
>=
BACKGROUND_THREAD_MIN_INTERVAL_NS
);
if
(
min_interval
>
interval
)
{
min_interval
=
interval
;
}
}
background_thread_sleep
(
tsdn
,
info
,
min_interval
);
}
static
bool
background_threads_disable_single
(
tsd_t
*
tsd
,
background_thread_info_t
*
info
)
{
if
(
info
==
&
background_thread_info
[
0
])
{
malloc_mutex_assert_owner
(
tsd_tsdn
(
tsd
),
&
background_thread_lock
);
}
else
{
malloc_mutex_assert_not_owner
(
tsd_tsdn
(
tsd
),
&
background_thread_lock
);
}
pre_reentrancy
(
tsd
,
NULL
);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
info
->
mtx
);
bool
has_thread
;
assert
(
info
->
state
!=
background_thread_paused
);
if
(
info
->
state
==
background_thread_started
)
{
has_thread
=
true
;
info
->
state
=
background_thread_stopped
;
pthread_cond_signal
(
&
info
->
cond
);
}
else
{
has_thread
=
false
;
}
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
info
->
mtx
);
if
(
!
has_thread
)
{
post_reentrancy
(
tsd
);
return
false
;
}
void
*
ret
;
if
(
pthread_join
(
info
->
thread
,
&
ret
))
{
post_reentrancy
(
tsd
);
return
true
;
}
assert
(
ret
==
NULL
);
n_background_threads
--
;
post_reentrancy
(
tsd
);
return
false
;
}
static
void
*
background_thread_entry
(
void
*
ind_arg
);
static
int
background_thread_create_signals_masked
(
pthread_t
*
thread
,
const
pthread_attr_t
*
attr
,
void
*
(
*
start_routine
)(
void
*
),
void
*
arg
)
{
/*
* Mask signals during thread creation so that the thread inherits
* an empty signal set.
*/
sigset_t
set
;
sigfillset
(
&
set
);
sigset_t
oldset
;
int
mask_err
=
pthread_sigmask
(
SIG_SETMASK
,
&
set
,
&
oldset
);
if
(
mask_err
!=
0
)
{
return
mask_err
;
}
int
create_err
=
pthread_create_wrapper
(
thread
,
attr
,
start_routine
,
arg
);
/*
* Restore the signal mask. Failure to restore the signal mask here
* changes program behavior.
*/
int
restore_err
=
pthread_sigmask
(
SIG_SETMASK
,
&
oldset
,
NULL
);
if
(
restore_err
!=
0
)
{
malloc_printf
(
"<jemalloc>: background thread creation "
"failed (%d), and signal mask restoration failed "
"(%d)
\n
"
,
create_err
,
restore_err
);
if
(
opt_abort
)
{
abort
();
}
}
return
create_err
;
}
static
bool
check_background_thread_creation
(
tsd_t
*
tsd
,
unsigned
*
n_created
,
bool
*
created_threads
)
{
bool
ret
=
false
;
if
(
likely
(
*
n_created
==
n_background_threads
))
{
return
ret
;
}
tsdn_t
*
tsdn
=
tsd_tsdn
(
tsd
);
malloc_mutex_unlock
(
tsdn
,
&
background_thread_info
[
0
].
mtx
);
for
(
unsigned
i
=
1
;
i
<
max_background_threads
;
i
++
)
{
if
(
created_threads
[
i
])
{
continue
;
}
background_thread_info_t
*
info
=
&
background_thread_info
[
i
];
malloc_mutex_lock
(
tsdn
,
&
info
->
mtx
);
/*
* In case of the background_thread_paused state because of
* arena reset, delay the creation.
*/
bool
create
=
(
info
->
state
==
background_thread_started
);
malloc_mutex_unlock
(
tsdn
,
&
info
->
mtx
);
if
(
!
create
)
{
continue
;
}
pre_reentrancy
(
tsd
,
NULL
);
int
err
=
background_thread_create_signals_masked
(
&
info
->
thread
,
NULL
,
background_thread_entry
,
(
void
*
)(
uintptr_t
)
i
);
post_reentrancy
(
tsd
);
if
(
err
==
0
)
{
(
*
n_created
)
++
;
created_threads
[
i
]
=
true
;
}
else
{
malloc_printf
(
"<jemalloc>: background thread "
"creation failed (%d)
\n
"
,
err
);
if
(
opt_abort
)
{
abort
();
}
}
/* Return to restart the loop since we unlocked. */
ret
=
true
;
break
;
}
malloc_mutex_lock
(
tsdn
,
&
background_thread_info
[
0
].
mtx
);
return
ret
;
}
static
void
background_thread0_work
(
tsd_t
*
tsd
)
{
/* Thread0 is also responsible for launching / terminating threads. */
VARIABLE_ARRAY
(
bool
,
created_threads
,
max_background_threads
);
unsigned
i
;
for
(
i
=
1
;
i
<
max_background_threads
;
i
++
)
{
created_threads
[
i
]
=
false
;
}
/* Start working, and create more threads when asked. */
unsigned
n_created
=
1
;
while
(
background_thread_info
[
0
].
state
!=
background_thread_stopped
)
{
if
(
background_thread_pause_check
(
tsd_tsdn
(
tsd
),
&
background_thread_info
[
0
]))
{
continue
;
}
if
(
check_background_thread_creation
(
tsd
,
&
n_created
,
(
bool
*
)
&
created_threads
))
{
continue
;
}
background_work_sleep_once
(
tsd_tsdn
(
tsd
),
&
background_thread_info
[
0
],
0
);
}
/*
* Shut down other threads at exit. Note that the ctl thread is holding
* the global background_thread mutex (and is waiting) for us.
*/
assert
(
!
background_thread_enabled
());
for
(
i
=
1
;
i
<
max_background_threads
;
i
++
)
{
background_thread_info_t
*
info
=
&
background_thread_info
[
i
];
assert
(
info
->
state
!=
background_thread_paused
);
if
(
created_threads
[
i
])
{
background_threads_disable_single
(
tsd
,
info
);
}
else
{
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
info
->
mtx
);
if
(
info
->
state
!=
background_thread_stopped
)
{
/* The thread was not created. */
assert
(
info
->
state
==
background_thread_started
);
n_background_threads
--
;
info
->
state
=
background_thread_stopped
;
}
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
info
->
mtx
);
}
}
background_thread_info
[
0
].
state
=
background_thread_stopped
;
assert
(
n_background_threads
==
1
);
}
static
void
background_work
(
tsd_t
*
tsd
,
unsigned
ind
)
{
background_thread_info_t
*
info
=
&
background_thread_info
[
ind
];
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
info
->
mtx
);
background_thread_wakeup_time_set
(
tsd_tsdn
(
tsd
),
info
,
BACKGROUND_THREAD_INDEFINITE_SLEEP
);
if
(
ind
==
0
)
{
background_thread0_work
(
tsd
);
}
else
{
while
(
info
->
state
!=
background_thread_stopped
)
{
if
(
background_thread_pause_check
(
tsd_tsdn
(
tsd
),
info
))
{
continue
;
}
background_work_sleep_once
(
tsd_tsdn
(
tsd
),
info
,
ind
);
}
}
assert
(
info
->
state
==
background_thread_stopped
);
background_thread_wakeup_time_set
(
tsd_tsdn
(
tsd
),
info
,
0
);
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
info
->
mtx
);
}
static
void
*
background_thread_entry
(
void
*
ind_arg
)
{
unsigned
thread_ind
=
(
unsigned
)(
uintptr_t
)
ind_arg
;
assert
(
thread_ind
<
max_background_threads
);
#ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
pthread_setname_np
(
pthread_self
(),
"jemalloc_bg_thd"
);
#endif
if
(
opt_percpu_arena
!=
percpu_arena_disabled
)
{
set_current_thread_affinity
((
int
)
thread_ind
);
}
/*
* Start periodic background work. We use internal tsd which avoids
* side effects, for example triggering new arena creation (which in
* turn triggers another background thread creation).
*/
background_work
(
tsd_internal_fetch
(),
thread_ind
);
assert
(
pthread_equal
(
pthread_self
(),
background_thread_info
[
thread_ind
].
thread
));
return
NULL
;
}
static
void
background_thread_init
(
tsd_t
*
tsd
,
background_thread_info_t
*
info
)
{
malloc_mutex_assert_owner
(
tsd_tsdn
(
tsd
),
&
background_thread_lock
);
info
->
state
=
background_thread_started
;
background_thread_info_init
(
tsd_tsdn
(
tsd
),
info
);
n_background_threads
++
;
}
/* Create a new background thread if needed. */
bool
background_thread_create
(
tsd_t
*
tsd
,
unsigned
arena_ind
)
{
assert
(
have_background_thread
);
malloc_mutex_assert_owner
(
tsd_tsdn
(
tsd
),
&
background_thread_lock
);
/* We create at most NCPUs threads. */
size_t
thread_ind
=
arena_ind
%
max_background_threads
;
background_thread_info_t
*
info
=
&
background_thread_info
[
thread_ind
];
bool
need_new_thread
;
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
info
->
mtx
);
need_new_thread
=
background_thread_enabled
()
&&
(
info
->
state
==
background_thread_stopped
);
if
(
need_new_thread
)
{
background_thread_init
(
tsd
,
info
);
}
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
info
->
mtx
);
if
(
!
need_new_thread
)
{
return
false
;
}
if
(
arena_ind
!=
0
)
{
/* Threads are created asynchronously by Thread 0. */
background_thread_info_t
*
t0
=
&
background_thread_info
[
0
];
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
t0
->
mtx
);
assert
(
t0
->
state
==
background_thread_started
);
pthread_cond_signal
(
&
t0
->
cond
);
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
t0
->
mtx
);
return
false
;
}
pre_reentrancy
(
tsd
,
NULL
);
/*
* To avoid complications (besides reentrancy), create internal
* background threads with the underlying pthread_create.
*/
int
err
=
background_thread_create_signals_masked
(
&
info
->
thread
,
NULL
,
background_thread_entry
,
(
void
*
)
thread_ind
);
post_reentrancy
(
tsd
);
if
(
err
!=
0
)
{
malloc_printf
(
"<jemalloc>: arena 0 background thread creation "
"failed (%d)
\n
"
,
err
);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
info
->
mtx
);
info
->
state
=
background_thread_stopped
;
n_background_threads
--
;
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
info
->
mtx
);
return
true
;
}
return
false
;
}
bool
background_threads_enable
(
tsd_t
*
tsd
)
{
assert
(
n_background_threads
==
0
);
assert
(
background_thread_enabled
());
malloc_mutex_assert_owner
(
tsd_tsdn
(
tsd
),
&
background_thread_lock
);
VARIABLE_ARRAY
(
bool
,
marked
,
max_background_threads
);
unsigned
i
,
nmarked
;
for
(
i
=
0
;
i
<
max_background_threads
;
i
++
)
{
marked
[
i
]
=
false
;
}
nmarked
=
0
;
/* Thread 0 is required and created at the end. */
marked
[
0
]
=
true
;
/* Mark the threads we need to create for thread 0. */
unsigned
n
=
narenas_total_get
();
for
(
i
=
1
;
i
<
n
;
i
++
)
{
if
(
marked
[
i
%
max_background_threads
]
||
arena_get
(
tsd_tsdn
(
tsd
),
i
,
false
)
==
NULL
)
{
continue
;
}
background_thread_info_t
*
info
=
&
background_thread_info
[
i
%
max_background_threads
];
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
info
->
mtx
);
assert
(
info
->
state
==
background_thread_stopped
);
background_thread_init
(
tsd
,
info
);
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
info
->
mtx
);
marked
[
i
%
max_background_threads
]
=
true
;
if
(
++
nmarked
==
max_background_threads
)
{
break
;
}
}
return
background_thread_create
(
tsd
,
0
);
}
bool
background_threads_disable
(
tsd_t
*
tsd
)
{
assert
(
!
background_thread_enabled
());
malloc_mutex_assert_owner
(
tsd_tsdn
(
tsd
),
&
background_thread_lock
);
/* Thread 0 will be responsible for terminating other threads. */
if
(
background_threads_disable_single
(
tsd
,
&
background_thread_info
[
0
]))
{
return
true
;
}
assert
(
n_background_threads
==
0
);
return
false
;
}
/* Check if we need to signal the background thread early. */
void
background_thread_interval_check
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_decay_t
*
decay
,
size_t
npages_new
)
{
background_thread_info_t
*
info
=
arena_background_thread_info_get
(
arena
);
if
(
malloc_mutex_trylock
(
tsdn
,
&
info
->
mtx
))
{
/*
* Background thread may hold the mutex for a long period of
* time. We'd like to avoid the variance on application
* threads. So keep this non-blocking, and leave the work to a
* future epoch.
*/
return
;
}
if
(
info
->
state
!=
background_thread_started
)
{
goto
label_done
;
}
if
(
malloc_mutex_trylock
(
tsdn
,
&
decay
->
mtx
))
{
goto
label_done
;
}
ssize_t
decay_time
=
atomic_load_zd
(
&
decay
->
time_ms
,
ATOMIC_RELAXED
);
if
(
decay_time
<=
0
)
{
/* Purging is eagerly done or disabled currently. */
goto
label_done_unlock2
;
}
uint64_t
decay_interval_ns
=
nstime_ns
(
&
decay
->
interval
);
assert
(
decay_interval_ns
>
0
);
nstime_t
diff
;
nstime_init
(
&
diff
,
background_thread_wakeup_time_get
(
info
));
if
(
nstime_compare
(
&
diff
,
&
decay
->
epoch
)
<=
0
)
{
goto
label_done_unlock2
;
}
nstime_subtract
(
&
diff
,
&
decay
->
epoch
);
if
(
nstime_ns
(
&
diff
)
<
BACKGROUND_THREAD_MIN_INTERVAL_NS
)
{
goto
label_done_unlock2
;
}
if
(
npages_new
>
0
)
{
size_t
n_epoch
=
(
size_t
)(
nstime_ns
(
&
diff
)
/
decay_interval_ns
);
/*
* Compute how many new pages we would need to purge by the next
* wakeup, which is used to determine if we should signal the
* background thread.
*/
uint64_t
npurge_new
;
if
(
n_epoch
>=
SMOOTHSTEP_NSTEPS
)
{
npurge_new
=
npages_new
;
}
else
{
uint64_t
h_steps_max
=
h_steps
[
SMOOTHSTEP_NSTEPS
-
1
];
assert
(
h_steps_max
>=
h_steps
[
SMOOTHSTEP_NSTEPS
-
1
-
n_epoch
]);
npurge_new
=
npages_new
*
(
h_steps_max
-
h_steps
[
SMOOTHSTEP_NSTEPS
-
1
-
n_epoch
]);
npurge_new
>>=
SMOOTHSTEP_BFP
;
}
info
->
npages_to_purge_new
+=
npurge_new
;
}
bool
should_signal
;
if
(
info
->
npages_to_purge_new
>
BACKGROUND_THREAD_NPAGES_THRESHOLD
)
{
should_signal
=
true
;
}
else
if
(
unlikely
(
background_thread_indefinite_sleep
(
info
))
&&
(
extents_npages_get
(
&
arena
->
extents_dirty
)
>
0
||
extents_npages_get
(
&
arena
->
extents_muzzy
)
>
0
||
info
->
npages_to_purge_new
>
0
))
{
should_signal
=
true
;
}
else
{
should_signal
=
false
;
}
if
(
should_signal
)
{
info
->
npages_to_purge_new
=
0
;
pthread_cond_signal
(
&
info
->
cond
);
}
label_done_unlock2:
malloc_mutex_unlock
(
tsdn
,
&
decay
->
mtx
);
label_done:
malloc_mutex_unlock
(
tsdn
,
&
info
->
mtx
);
}
void
background_thread_prefork0
(
tsdn_t
*
tsdn
)
{
malloc_mutex_prefork
(
tsdn
,
&
background_thread_lock
);
background_thread_enabled_at_fork
=
background_thread_enabled
();
}
void
background_thread_prefork1
(
tsdn_t
*
tsdn
)
{
for
(
unsigned
i
=
0
;
i
<
max_background_threads
;
i
++
)
{
malloc_mutex_prefork
(
tsdn
,
&
background_thread_info
[
i
].
mtx
);
}
}
void
background_thread_postfork_parent
(
tsdn_t
*
tsdn
)
{
for
(
unsigned
i
=
0
;
i
<
max_background_threads
;
i
++
)
{
malloc_mutex_postfork_parent
(
tsdn
,
&
background_thread_info
[
i
].
mtx
);
}
malloc_mutex_postfork_parent
(
tsdn
,
&
background_thread_lock
);
}
void
background_thread_postfork_child
(
tsdn_t
*
tsdn
)
{
for
(
unsigned
i
=
0
;
i
<
max_background_threads
;
i
++
)
{
malloc_mutex_postfork_child
(
tsdn
,
&
background_thread_info
[
i
].
mtx
);
}
malloc_mutex_postfork_child
(
tsdn
,
&
background_thread_lock
);
if
(
!
background_thread_enabled_at_fork
)
{
return
;
}
/* Clear background_thread state (reset to disabled for child). */
malloc_mutex_lock
(
tsdn
,
&
background_thread_lock
);
n_background_threads
=
0
;
background_thread_enabled_set
(
tsdn
,
false
);
for
(
unsigned
i
=
0
;
i
<
max_background_threads
;
i
++
)
{
background_thread_info_t
*
info
=
&
background_thread_info
[
i
];
malloc_mutex_lock
(
tsdn
,
&
info
->
mtx
);
info
->
state
=
background_thread_stopped
;
int
ret
=
pthread_cond_init
(
&
info
->
cond
,
NULL
);
assert
(
ret
==
0
);
background_thread_info_init
(
tsdn
,
info
);
malloc_mutex_unlock
(
tsdn
,
&
info
->
mtx
);
}
malloc_mutex_unlock
(
tsdn
,
&
background_thread_lock
);
}
bool
background_thread_stats_read
(
tsdn_t
*
tsdn
,
background_thread_stats_t
*
stats
)
{
assert
(
config_stats
);
malloc_mutex_lock
(
tsdn
,
&
background_thread_lock
);
if
(
!
background_thread_enabled
())
{
malloc_mutex_unlock
(
tsdn
,
&
background_thread_lock
);
return
true
;
}
stats
->
num_threads
=
n_background_threads
;
uint64_t
num_runs
=
0
;
nstime_init
(
&
stats
->
run_interval
,
0
);
for
(
unsigned
i
=
0
;
i
<
max_background_threads
;
i
++
)
{
background_thread_info_t
*
info
=
&
background_thread_info
[
i
];
if
(
malloc_mutex_trylock
(
tsdn
,
&
info
->
mtx
))
{
/*
* Each background thread run may take a long time;
* avoid waiting on the stats if the thread is active.
*/
continue
;
}
if
(
info
->
state
!=
background_thread_stopped
)
{
num_runs
+=
info
->
tot_n_runs
;
nstime_add
(
&
stats
->
run_interval
,
&
info
->
tot_sleep_time
);
}
malloc_mutex_unlock
(
tsdn
,
&
info
->
mtx
);
}
stats
->
num_runs
=
num_runs
;
if
(
num_runs
>
0
)
{
nstime_idivide
(
&
stats
->
run_interval
,
num_runs
);
}
malloc_mutex_unlock
(
tsdn
,
&
background_thread_lock
);
return
false
;
}
#undef BACKGROUND_THREAD_NPAGES_THRESHOLD
#undef BILLION
#undef BACKGROUND_THREAD_MIN_INTERVAL_NS
static
bool
pthread_create_fptr_init
(
void
)
{
if
(
pthread_create_fptr
!=
NULL
)
{
return
false
;
}
pthread_create_fptr
=
dlsym
(
RTLD_NEXT
,
"pthread_create"
);
if
(
pthread_create_fptr
==
NULL
)
{
can_enable_background_thread
=
false
;
if
(
config_lazy_lock
||
opt_background_thread
)
{
malloc_write
(
"<jemalloc>: Error in dlsym(RTLD_NEXT, "
"
\"
pthread_create
\"
)
\n
"
);
abort
();
}
}
else
{
can_enable_background_thread
=
true
;
}
return
false
;
}
/*
* When lazy lock is enabled, we need to make sure setting isthreaded before
* taking any background_thread locks. This is called early in ctl (instead of
* wait for the pthread_create calls to trigger) because the mutex is required
* before creating background threads.
*/
void
background_thread_ctl_init
(
tsdn_t
*
tsdn
)
{
malloc_mutex_assert_not_owner
(
tsdn
,
&
background_thread_lock
);
#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
pthread_create_fptr_init
();
pthread_create_wrapper_init
();
#endif
}
#endif
/* defined(JEMALLOC_BACKGROUND_THREAD) */
bool
background_thread_boot0
(
void
)
{
if
(
!
have_background_thread
&&
opt_background_thread
)
{
malloc_printf
(
"<jemalloc>: option background_thread currently "
"supports pthread only
\n
"
);
return
true
;
}
#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
if
((
config_lazy_lock
||
opt_background_thread
)
&&
pthread_create_fptr_init
())
{
return
true
;
}
#endif
return
false
;
}
bool
background_thread_boot1
(
tsdn_t
*
tsdn
)
{
#ifdef JEMALLOC_BACKGROUND_THREAD
assert
(
have_background_thread
);
assert
(
narenas_total_get
()
>
0
);
if
(
opt_max_background_threads
==
MAX_BACKGROUND_THREAD_LIMIT
&&
ncpus
<
MAX_BACKGROUND_THREAD_LIMIT
)
{
opt_max_background_threads
=
ncpus
;
}
max_background_threads
=
opt_max_background_threads
;
background_thread_enabled_set
(
tsdn
,
opt_background_thread
);
if
(
malloc_mutex_init
(
&
background_thread_lock
,
"background_thread_global"
,
WITNESS_RANK_BACKGROUND_THREAD_GLOBAL
,
malloc_mutex_rank_exclusive
))
{
return
true
;
}
background_thread_info
=
(
background_thread_info_t
*
)
base_alloc
(
tsdn
,
b0get
(),
opt_max_background_threads
*
sizeof
(
background_thread_info_t
),
CACHELINE
);
if
(
background_thread_info
==
NULL
)
{
return
true
;
}
for
(
unsigned
i
=
0
;
i
<
max_background_threads
;
i
++
)
{
background_thread_info_t
*
info
=
&
background_thread_info
[
i
];
/* Thread mutex is rank_inclusive because of thread0. */
if
(
malloc_mutex_init
(
&
info
->
mtx
,
"background_thread"
,
WITNESS_RANK_BACKGROUND_THREAD
,
malloc_mutex_address_ordered
))
{
return
true
;
}
if
(
pthread_cond_init
(
&
info
->
cond
,
NULL
))
{
return
true
;
}
malloc_mutex_lock
(
tsdn
,
&
info
->
mtx
);
info
->
state
=
background_thread_stopped
;
background_thread_info_init
(
tsdn
,
info
);
malloc_mutex_unlock
(
tsdn
,
&
info
->
mtx
);
}
#endif
return
false
;
}
deps/jemalloc/src/base.c
deleted
100644 → 0
View file @
7ff7536e
#define JEMALLOC_BASE_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/extent_mmap.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/sz.h"
/******************************************************************************/
/* Data. */
static
base_t
*
b0
;
metadata_thp_mode_t
opt_metadata_thp
=
METADATA_THP_DEFAULT
;
const
char
*
metadata_thp_mode_names
[]
=
{
"disabled"
,
"auto"
,
"always"
};
/******************************************************************************/
static
inline
bool
metadata_thp_madvise
(
void
)
{
return
(
metadata_thp_enabled
()
&&
(
init_system_thp_mode
==
thp_mode_default
));
}
static
void
*
base_map
(
tsdn_t
*
tsdn
,
extent_hooks_t
*
extent_hooks
,
unsigned
ind
,
size_t
size
)
{
void
*
addr
;
bool
zero
=
true
;
bool
commit
=
true
;
/* Use huge page sizes and alignment regardless of opt_metadata_thp. */
assert
(
size
==
HUGEPAGE_CEILING
(
size
));
size_t
alignment
=
HUGEPAGE
;
if
(
extent_hooks
==
&
extent_hooks_default
)
{
addr
=
extent_alloc_mmap
(
NULL
,
size
,
alignment
,
&
zero
,
&
commit
);
}
else
{
/* No arena context as we are creating new arenas. */
tsd_t
*
tsd
=
tsdn_null
(
tsdn
)
?
tsd_fetch
()
:
tsdn_tsd
(
tsdn
);
pre_reentrancy
(
tsd
,
NULL
);
addr
=
extent_hooks
->
alloc
(
extent_hooks
,
NULL
,
size
,
alignment
,
&
zero
,
&
commit
,
ind
);
post_reentrancy
(
tsd
);
}
return
addr
;
}
static
void
base_unmap
(
tsdn_t
*
tsdn
,
extent_hooks_t
*
extent_hooks
,
unsigned
ind
,
void
*
addr
,
size_t
size
)
{
/*
* Cascade through dalloc, decommit, purge_forced, and purge_lazy,
* stopping at first success. This cascade is performed for consistency
* with the cascade in extent_dalloc_wrapper() because an application's
* custom hooks may not support e.g. dalloc. This function is only ever
* called as a side effect of arena destruction, so although it might
* seem pointless to do anything besides dalloc here, the application
* may in fact want the end state of all associated virtual memory to be
* in some consistent-but-allocated state.
*/
if
(
extent_hooks
==
&
extent_hooks_default
)
{
if
(
!
extent_dalloc_mmap
(
addr
,
size
))
{
goto
label_done
;
}
if
(
!
pages_decommit
(
addr
,
size
))
{
goto
label_done
;
}
if
(
!
pages_purge_forced
(
addr
,
size
))
{
goto
label_done
;
}
if
(
!
pages_purge_lazy
(
addr
,
size
))
{
goto
label_done
;
}
/* Nothing worked. This should never happen. */
not_reached
();
}
else
{
tsd_t
*
tsd
=
tsdn_null
(
tsdn
)
?
tsd_fetch
()
:
tsdn_tsd
(
tsdn
);
pre_reentrancy
(
tsd
,
NULL
);
if
(
extent_hooks
->
dalloc
!=
NULL
&&
!
extent_hooks
->
dalloc
(
extent_hooks
,
addr
,
size
,
true
,
ind
))
{
goto
label_post_reentrancy
;
}
if
(
extent_hooks
->
decommit
!=
NULL
&&
!
extent_hooks
->
decommit
(
extent_hooks
,
addr
,
size
,
0
,
size
,
ind
))
{
goto
label_post_reentrancy
;
}
if
(
extent_hooks
->
purge_forced
!=
NULL
&&
!
extent_hooks
->
purge_forced
(
extent_hooks
,
addr
,
size
,
0
,
size
,
ind
))
{
goto
label_post_reentrancy
;
}
if
(
extent_hooks
->
purge_lazy
!=
NULL
&&
!
extent_hooks
->
purge_lazy
(
extent_hooks
,
addr
,
size
,
0
,
size
,
ind
))
{
goto
label_post_reentrancy
;
}
/* Nothing worked. That's the application's problem. */
label_post_reentrancy:
post_reentrancy
(
tsd
);
}
label_done:
if
(
metadata_thp_madvise
())
{
/* Set NOHUGEPAGE after unmap to avoid kernel defrag. */
assert
(((
uintptr_t
)
addr
&
HUGEPAGE_MASK
)
==
0
&&
(
size
&
HUGEPAGE_MASK
)
==
0
);
pages_nohuge
(
addr
,
size
);
}
}
static
void
base_extent_init
(
size_t
*
extent_sn_next
,
extent_t
*
extent
,
void
*
addr
,
size_t
size
)
{
size_t
sn
;
sn
=
*
extent_sn_next
;
(
*
extent_sn_next
)
++
;
extent_binit
(
extent
,
addr
,
size
,
sn
);
}
static
size_t
base_get_num_blocks
(
base_t
*
base
,
bool
with_new_block
)
{
base_block_t
*
b
=
base
->
blocks
;
assert
(
b
!=
NULL
);
size_t
n_blocks
=
with_new_block
?
2
:
1
;
while
(
b
->
next
!=
NULL
)
{
n_blocks
++
;
b
=
b
->
next
;
}
return
n_blocks
;
}
static
void
base_auto_thp_switch
(
tsdn_t
*
tsdn
,
base_t
*
base
)
{
assert
(
opt_metadata_thp
==
metadata_thp_auto
);
malloc_mutex_assert_owner
(
tsdn
,
&
base
->
mtx
);
if
(
base
->
auto_thp_switched
)
{
return
;
}
/* Called when adding a new block. */
bool
should_switch
;
if
(
base_ind_get
(
base
)
!=
0
)
{
should_switch
=
(
base_get_num_blocks
(
base
,
true
)
==
BASE_AUTO_THP_THRESHOLD
);
}
else
{
should_switch
=
(
base_get_num_blocks
(
base
,
true
)
==
BASE_AUTO_THP_THRESHOLD_A0
);
}
if
(
!
should_switch
)
{
return
;
}
base
->
auto_thp_switched
=
true
;
assert
(
!
config_stats
||
base
->
n_thp
==
0
);
/* Make the initial blocks THP lazily. */
base_block_t
*
block
=
base
->
blocks
;
while
(
block
!=
NULL
)
{
assert
((
block
->
size
&
HUGEPAGE_MASK
)
==
0
);
pages_huge
(
block
,
block
->
size
);
if
(
config_stats
)
{
base
->
n_thp
+=
HUGEPAGE_CEILING
(
block
->
size
-
extent_bsize_get
(
&
block
->
extent
))
>>
LG_HUGEPAGE
;
}
block
=
block
->
next
;
assert
(
block
==
NULL
||
(
base_ind_get
(
base
)
==
0
));
}
}
static
void
*
base_extent_bump_alloc_helper
(
extent_t
*
extent
,
size_t
*
gap_size
,
size_t
size
,
size_t
alignment
)
{
void
*
ret
;
assert
(
alignment
==
ALIGNMENT_CEILING
(
alignment
,
QUANTUM
));
assert
(
size
==
ALIGNMENT_CEILING
(
size
,
alignment
));
*
gap_size
=
ALIGNMENT_CEILING
((
uintptr_t
)
extent_addr_get
(
extent
),
alignment
)
-
(
uintptr_t
)
extent_addr_get
(
extent
);
ret
=
(
void
*
)((
uintptr_t
)
extent_addr_get
(
extent
)
+
*
gap_size
);
assert
(
extent_bsize_get
(
extent
)
>=
*
gap_size
+
size
);
extent_binit
(
extent
,
(
void
*
)((
uintptr_t
)
extent_addr_get
(
extent
)
+
*
gap_size
+
size
),
extent_bsize_get
(
extent
)
-
*
gap_size
-
size
,
extent_sn_get
(
extent
));
return
ret
;
}
static
void
base_extent_bump_alloc_post
(
base_t
*
base
,
extent_t
*
extent
,
size_t
gap_size
,
void
*
addr
,
size_t
size
)
{
if
(
extent_bsize_get
(
extent
)
>
0
)
{
/*
* Compute the index for the largest size class that does not
* exceed extent's size.
*/
szind_t
index_floor
=
sz_size2index
(
extent_bsize_get
(
extent
)
+
1
)
-
1
;
extent_heap_insert
(
&
base
->
avail
[
index_floor
],
extent
);
}
if
(
config_stats
)
{
base
->
allocated
+=
size
;
/*
* Add one PAGE to base_resident for every page boundary that is
* crossed by the new allocation. Adjust n_thp similarly when
* metadata_thp is enabled.
*/
base
->
resident
+=
PAGE_CEILING
((
uintptr_t
)
addr
+
size
)
-
PAGE_CEILING
((
uintptr_t
)
addr
-
gap_size
);
assert
(
base
->
allocated
<=
base
->
resident
);
assert
(
base
->
resident
<=
base
->
mapped
);
if
(
metadata_thp_madvise
()
&&
(
opt_metadata_thp
==
metadata_thp_always
||
base
->
auto_thp_switched
))
{
base
->
n_thp
+=
(
HUGEPAGE_CEILING
((
uintptr_t
)
addr
+
size
)
-
HUGEPAGE_CEILING
((
uintptr_t
)
addr
-
gap_size
))
>>
LG_HUGEPAGE
;
assert
(
base
->
mapped
>=
base
->
n_thp
<<
LG_HUGEPAGE
);
}
}
}
static
void
*
base_extent_bump_alloc
(
base_t
*
base
,
extent_t
*
extent
,
size_t
size
,
size_t
alignment
)
{
void
*
ret
;
size_t
gap_size
;
ret
=
base_extent_bump_alloc_helper
(
extent
,
&
gap_size
,
size
,
alignment
);
base_extent_bump_alloc_post
(
base
,
extent
,
gap_size
,
ret
,
size
);
return
ret
;
}
/*
* Allocate a block of virtual memory that is large enough to start with a
* base_block_t header, followed by an object of specified size and alignment.
* On success a pointer to the initialized base_block_t header is returned.
*/
static
base_block_t
*
base_block_alloc
(
tsdn_t
*
tsdn
,
base_t
*
base
,
extent_hooks_t
*
extent_hooks
,
unsigned
ind
,
pszind_t
*
pind_last
,
size_t
*
extent_sn_next
,
size_t
size
,
size_t
alignment
)
{
alignment
=
ALIGNMENT_CEILING
(
alignment
,
QUANTUM
);
size_t
usize
=
ALIGNMENT_CEILING
(
size
,
alignment
);
size_t
header_size
=
sizeof
(
base_block_t
);
size_t
gap_size
=
ALIGNMENT_CEILING
(
header_size
,
alignment
)
-
header_size
;
/*
* Create increasingly larger blocks in order to limit the total number
* of disjoint virtual memory ranges. Choose the next size in the page
* size class series (skipping size classes that are not a multiple of
* HUGEPAGE), or a size large enough to satisfy the requested size and
* alignment, whichever is larger.
*/
size_t
min_block_size
=
HUGEPAGE_CEILING
(
sz_psz2u
(
header_size
+
gap_size
+
usize
));
pszind_t
pind_next
=
(
*
pind_last
+
1
<
NPSIZES
)
?
*
pind_last
+
1
:
*
pind_last
;
size_t
next_block_size
=
HUGEPAGE_CEILING
(
sz_pind2sz
(
pind_next
));
size_t
block_size
=
(
min_block_size
>
next_block_size
)
?
min_block_size
:
next_block_size
;
base_block_t
*
block
=
(
base_block_t
*
)
base_map
(
tsdn
,
extent_hooks
,
ind
,
block_size
);
if
(
block
==
NULL
)
{
return
NULL
;
}
if
(
metadata_thp_madvise
())
{
void
*
addr
=
(
void
*
)
block
;
assert
(((
uintptr_t
)
addr
&
HUGEPAGE_MASK
)
==
0
&&
(
block_size
&
HUGEPAGE_MASK
)
==
0
);
if
(
opt_metadata_thp
==
metadata_thp_always
)
{
pages_huge
(
addr
,
block_size
);
}
else
if
(
opt_metadata_thp
==
metadata_thp_auto
&&
base
!=
NULL
)
{
/* base != NULL indicates this is not a new base. */
malloc_mutex_lock
(
tsdn
,
&
base
->
mtx
);
base_auto_thp_switch
(
tsdn
,
base
);
if
(
base
->
auto_thp_switched
)
{
pages_huge
(
addr
,
block_size
);
}
malloc_mutex_unlock
(
tsdn
,
&
base
->
mtx
);
}
}
*
pind_last
=
sz_psz2ind
(
block_size
);
block
->
size
=
block_size
;
block
->
next
=
NULL
;
assert
(
block_size
>=
header_size
);
base_extent_init
(
extent_sn_next
,
&
block
->
extent
,
(
void
*
)((
uintptr_t
)
block
+
header_size
),
block_size
-
header_size
);
return
block
;
}
/*
* Allocate an extent that is at least as large as specified size, with
* specified alignment.
*/
static
extent_t
*
base_extent_alloc
(
tsdn_t
*
tsdn
,
base_t
*
base
,
size_t
size
,
size_t
alignment
)
{
malloc_mutex_assert_owner
(
tsdn
,
&
base
->
mtx
);
extent_hooks_t
*
extent_hooks
=
base_extent_hooks_get
(
base
);
/*
* Drop mutex during base_block_alloc(), because an extent hook will be
* called.
*/
malloc_mutex_unlock
(
tsdn
,
&
base
->
mtx
);
base_block_t
*
block
=
base_block_alloc
(
tsdn
,
base
,
extent_hooks
,
base_ind_get
(
base
),
&
base
->
pind_last
,
&
base
->
extent_sn_next
,
size
,
alignment
);
malloc_mutex_lock
(
tsdn
,
&
base
->
mtx
);
if
(
block
==
NULL
)
{
return
NULL
;
}
block
->
next
=
base
->
blocks
;
base
->
blocks
=
block
;
if
(
config_stats
)
{
base
->
allocated
+=
sizeof
(
base_block_t
);
base
->
resident
+=
PAGE_CEILING
(
sizeof
(
base_block_t
));
base
->
mapped
+=
block
->
size
;
if
(
metadata_thp_madvise
()
&&
!
(
opt_metadata_thp
==
metadata_thp_auto
&&
!
base
->
auto_thp_switched
))
{
assert
(
base
->
n_thp
>
0
);
base
->
n_thp
+=
HUGEPAGE_CEILING
(
sizeof
(
base_block_t
))
>>
LG_HUGEPAGE
;
}
assert
(
base
->
allocated
<=
base
->
resident
);
assert
(
base
->
resident
<=
base
->
mapped
);
assert
(
base
->
n_thp
<<
LG_HUGEPAGE
<=
base
->
mapped
);
}
return
&
block
->
extent
;
}
base_t
*
b0get
(
void
)
{
return
b0
;
}
base_t
*
base_new
(
tsdn_t
*
tsdn
,
unsigned
ind
,
extent_hooks_t
*
extent_hooks
)
{
pszind_t
pind_last
=
0
;
size_t
extent_sn_next
=
0
;
base_block_t
*
block
=
base_block_alloc
(
tsdn
,
NULL
,
extent_hooks
,
ind
,
&
pind_last
,
&
extent_sn_next
,
sizeof
(
base_t
),
QUANTUM
);
if
(
block
==
NULL
)
{
return
NULL
;
}
size_t
gap_size
;
size_t
base_alignment
=
CACHELINE
;
size_t
base_size
=
ALIGNMENT_CEILING
(
sizeof
(
base_t
),
base_alignment
);
base_t
*
base
=
(
base_t
*
)
base_extent_bump_alloc_helper
(
&
block
->
extent
,
&
gap_size
,
base_size
,
base_alignment
);
base
->
ind
=
ind
;
atomic_store_p
(
&
base
->
extent_hooks
,
extent_hooks
,
ATOMIC_RELAXED
);
if
(
malloc_mutex_init
(
&
base
->
mtx
,
"base"
,
WITNESS_RANK_BASE
,
malloc_mutex_rank_exclusive
))
{
base_unmap
(
tsdn
,
extent_hooks
,
ind
,
block
,
block
->
size
);
return
NULL
;
}
base
->
pind_last
=
pind_last
;
base
->
extent_sn_next
=
extent_sn_next
;
base
->
blocks
=
block
;
base
->
auto_thp_switched
=
false
;
for
(
szind_t
i
=
0
;
i
<
NSIZES
;
i
++
)
{
extent_heap_new
(
&
base
->
avail
[
i
]);
}
if
(
config_stats
)
{
base
->
allocated
=
sizeof
(
base_block_t
);
base
->
resident
=
PAGE_CEILING
(
sizeof
(
base_block_t
));
base
->
mapped
=
block
->
size
;
base
->
n_thp
=
(
opt_metadata_thp
==
metadata_thp_always
)
&&
metadata_thp_madvise
()
?
HUGEPAGE_CEILING
(
sizeof
(
base_block_t
))
>>
LG_HUGEPAGE
:
0
;
assert
(
base
->
allocated
<=
base
->
resident
);
assert
(
base
->
resident
<=
base
->
mapped
);
assert
(
base
->
n_thp
<<
LG_HUGEPAGE
<=
base
->
mapped
);
}
base_extent_bump_alloc_post
(
base
,
&
block
->
extent
,
gap_size
,
base
,
base_size
);
return
base
;
}
void
base_delete
(
tsdn_t
*
tsdn
,
base_t
*
base
)
{
extent_hooks_t
*
extent_hooks
=
base_extent_hooks_get
(
base
);
base_block_t
*
next
=
base
->
blocks
;
do
{
base_block_t
*
block
=
next
;
next
=
block
->
next
;
base_unmap
(
tsdn
,
extent_hooks
,
base_ind_get
(
base
),
block
,
block
->
size
);
}
while
(
next
!=
NULL
);
}
extent_hooks_t
*
base_extent_hooks_get
(
base_t
*
base
)
{
return
(
extent_hooks_t
*
)
atomic_load_p
(
&
base
->
extent_hooks
,
ATOMIC_ACQUIRE
);
}
extent_hooks_t
*
base_extent_hooks_set
(
base_t
*
base
,
extent_hooks_t
*
extent_hooks
)
{
extent_hooks_t
*
old_extent_hooks
=
base_extent_hooks_get
(
base
);
atomic_store_p
(
&
base
->
extent_hooks
,
extent_hooks
,
ATOMIC_RELEASE
);
return
old_extent_hooks
;
}
static
void
*
base_alloc_impl
(
tsdn_t
*
tsdn
,
base_t
*
base
,
size_t
size
,
size_t
alignment
,
size_t
*
esn
)
{
alignment
=
QUANTUM_CEILING
(
alignment
);
size_t
usize
=
ALIGNMENT_CEILING
(
size
,
alignment
);
size_t
asize
=
usize
+
alignment
-
QUANTUM
;
extent_t
*
extent
=
NULL
;
malloc_mutex_lock
(
tsdn
,
&
base
->
mtx
);
for
(
szind_t
i
=
sz_size2index
(
asize
);
i
<
NSIZES
;
i
++
)
{
extent
=
extent_heap_remove_first
(
&
base
->
avail
[
i
]);
if
(
extent
!=
NULL
)
{
/* Use existing space. */
break
;
}
}
if
(
extent
==
NULL
)
{
/* Try to allocate more space. */
extent
=
base_extent_alloc
(
tsdn
,
base
,
usize
,
alignment
);
}
void
*
ret
;
if
(
extent
==
NULL
)
{
ret
=
NULL
;
goto
label_return
;
}
ret
=
base_extent_bump_alloc
(
base
,
extent
,
usize
,
alignment
);
if
(
esn
!=
NULL
)
{
*
esn
=
extent_sn_get
(
extent
);
}
label_return:
malloc_mutex_unlock
(
tsdn
,
&
base
->
mtx
);
return
ret
;
}
/*
* base_alloc() returns zeroed memory, which is always demand-zeroed for the
* auto arenas, in order to make multi-page sparse data structures such as radix
* tree nodes efficient with respect to physical memory usage. Upon success a
* pointer to at least size bytes with specified alignment is returned. Note
* that size is rounded up to the nearest multiple of alignment to avoid false
* sharing.
*/
void
*
base_alloc
(
tsdn_t
*
tsdn
,
base_t
*
base
,
size_t
size
,
size_t
alignment
)
{
return
base_alloc_impl
(
tsdn
,
base
,
size
,
alignment
,
NULL
);
}
extent_t
*
base_alloc_extent
(
tsdn_t
*
tsdn
,
base_t
*
base
)
{
size_t
esn
;
extent_t
*
extent
=
base_alloc_impl
(
tsdn
,
base
,
sizeof
(
extent_t
),
CACHELINE
,
&
esn
);
if
(
extent
==
NULL
)
{
return
NULL
;
}
extent_esn_set
(
extent
,
esn
);
return
extent
;
}
void
base_stats_get
(
tsdn_t
*
tsdn
,
base_t
*
base
,
size_t
*
allocated
,
size_t
*
resident
,
size_t
*
mapped
,
size_t
*
n_thp
)
{
cassert
(
config_stats
);
malloc_mutex_lock
(
tsdn
,
&
base
->
mtx
);
assert
(
base
->
allocated
<=
base
->
resident
);
assert
(
base
->
resident
<=
base
->
mapped
);
*
allocated
=
base
->
allocated
;
*
resident
=
base
->
resident
;
*
mapped
=
base
->
mapped
;
*
n_thp
=
base
->
n_thp
;
malloc_mutex_unlock
(
tsdn
,
&
base
->
mtx
);
}
void
base_prefork
(
tsdn_t
*
tsdn
,
base_t
*
base
)
{
malloc_mutex_prefork
(
tsdn
,
&
base
->
mtx
);
}
void
base_postfork_parent
(
tsdn_t
*
tsdn
,
base_t
*
base
)
{
malloc_mutex_postfork_parent
(
tsdn
,
&
base
->
mtx
);
}
void
base_postfork_child
(
tsdn_t
*
tsdn
,
base_t
*
base
)
{
malloc_mutex_postfork_child
(
tsdn
,
&
base
->
mtx
);
}
bool
base_boot
(
tsdn_t
*
tsdn
)
{
b0
=
base_new
(
tsdn
,
0
,
(
extent_hooks_t
*
)
&
extent_hooks_default
);
return
(
b0
==
NULL
);
}
deps/jemalloc/src/bin.c
deleted
100644 → 0
View file @
7ff7536e
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/bin.h"
#include "jemalloc/internal/witness.h"
const
bin_info_t
bin_infos
[
NBINS
]
=
{
#define BIN_INFO_bin_yes(reg_size, slab_size, nregs) \
{reg_size, slab_size, nregs, BITMAP_INFO_INITIALIZER(nregs)},
#define BIN_INFO_bin_no(reg_size, slab_size, nregs)
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \
lg_delta_lookup) \
BIN_INFO_bin_##bin((1U<<lg_grp) + (ndelta<<lg_delta), \
(pgs << LG_PAGE), (pgs << LG_PAGE) / ((1U<<lg_grp) + \
(ndelta<<lg_delta)))
SIZE_CLASSES
#undef BIN_INFO_bin_yes
#undef BIN_INFO_bin_no
#undef SC
};
bool
bin_init
(
bin_t
*
bin
)
{
if
(
malloc_mutex_init
(
&
bin
->
lock
,
"bin"
,
WITNESS_RANK_BIN
,
malloc_mutex_rank_exclusive
))
{
return
true
;
}
bin
->
slabcur
=
NULL
;
extent_heap_new
(
&
bin
->
slabs_nonfull
);
extent_list_init
(
&
bin
->
slabs_full
);
if
(
config_stats
)
{
memset
(
&
bin
->
stats
,
0
,
sizeof
(
bin_stats_t
));
}
return
false
;
}
void
bin_prefork
(
tsdn_t
*
tsdn
,
bin_t
*
bin
)
{
malloc_mutex_prefork
(
tsdn
,
&
bin
->
lock
);
}
void
bin_postfork_parent
(
tsdn_t
*
tsdn
,
bin_t
*
bin
)
{
malloc_mutex_postfork_parent
(
tsdn
,
&
bin
->
lock
);
}
void
bin_postfork_child
(
tsdn_t
*
tsdn
,
bin_t
*
bin
)
{
malloc_mutex_postfork_child
(
tsdn
,
&
bin
->
lock
);
}
deps/jemalloc/src/bitmap.c
deleted
100644 → 0
View file @
7ff7536e
#define JEMALLOC_BITMAP_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
/******************************************************************************/
#ifdef BITMAP_USE_TREE
void
bitmap_info_init
(
bitmap_info_t
*
binfo
,
size_t
nbits
)
{
unsigned
i
;
size_t
group_count
;
assert
(
nbits
>
0
);
assert
(
nbits
<=
(
ZU
(
1
)
<<
LG_BITMAP_MAXBITS
));
/*
* Compute the number of groups necessary to store nbits bits, and
* progressively work upward through the levels until reaching a level
* that requires only one group.
*/
binfo
->
levels
[
0
].
group_offset
=
0
;
group_count
=
BITMAP_BITS2GROUPS
(
nbits
);
for
(
i
=
1
;
group_count
>
1
;
i
++
)
{
assert
(
i
<
BITMAP_MAX_LEVELS
);
binfo
->
levels
[
i
].
group_offset
=
binfo
->
levels
[
i
-
1
].
group_offset
+
group_count
;
group_count
=
BITMAP_BITS2GROUPS
(
group_count
);
}
binfo
->
levels
[
i
].
group_offset
=
binfo
->
levels
[
i
-
1
].
group_offset
+
group_count
;
assert
(
binfo
->
levels
[
i
].
group_offset
<=
BITMAP_GROUPS_MAX
);
binfo
->
nlevels
=
i
;
binfo
->
nbits
=
nbits
;
}
static
size_t
bitmap_info_ngroups
(
const
bitmap_info_t
*
binfo
)
{
return
binfo
->
levels
[
binfo
->
nlevels
].
group_offset
;
}
void
bitmap_init
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
,
bool
fill
)
{
size_t
extra
;
unsigned
i
;
/*
* Bits are actually inverted with regard to the external bitmap
* interface.
*/
if
(
fill
)
{
/* The "filled" bitmap starts out with all 0 bits. */
memset
(
bitmap
,
0
,
bitmap_size
(
binfo
));
return
;
}
/*
* The "empty" bitmap starts out with all 1 bits, except for trailing
* unused bits (if any). Note that each group uses bit 0 to correspond
* to the first logical bit in the group, so extra bits are the most
* significant bits of the last group.
*/
memset
(
bitmap
,
0xffU
,
bitmap_size
(
binfo
));
extra
=
(
BITMAP_GROUP_NBITS
-
(
binfo
->
nbits
&
BITMAP_GROUP_NBITS_MASK
))
&
BITMAP_GROUP_NBITS_MASK
;
if
(
extra
!=
0
)
{
bitmap
[
binfo
->
levels
[
1
].
group_offset
-
1
]
>>=
extra
;
}
for
(
i
=
1
;
i
<
binfo
->
nlevels
;
i
++
)
{
size_t
group_count
=
binfo
->
levels
[
i
].
group_offset
-
binfo
->
levels
[
i
-
1
].
group_offset
;
extra
=
(
BITMAP_GROUP_NBITS
-
(
group_count
&
BITMAP_GROUP_NBITS_MASK
))
&
BITMAP_GROUP_NBITS_MASK
;
if
(
extra
!=
0
)
{
bitmap
[
binfo
->
levels
[
i
+
1
].
group_offset
-
1
]
>>=
extra
;
}
}
}
#else
/* BITMAP_USE_TREE */
void
bitmap_info_init
(
bitmap_info_t
*
binfo
,
size_t
nbits
)
{
assert
(
nbits
>
0
);
assert
(
nbits
<=
(
ZU
(
1
)
<<
LG_BITMAP_MAXBITS
));
binfo
->
ngroups
=
BITMAP_BITS2GROUPS
(
nbits
);
binfo
->
nbits
=
nbits
;
}
static
size_t
bitmap_info_ngroups
(
const
bitmap_info_t
*
binfo
)
{
return
binfo
->
ngroups
;
}
void
bitmap_init
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
,
bool
fill
)
{
size_t
extra
;
if
(
fill
)
{
memset
(
bitmap
,
0
,
bitmap_size
(
binfo
));
return
;
}
memset
(
bitmap
,
0xffU
,
bitmap_size
(
binfo
));
extra
=
(
BITMAP_GROUP_NBITS
-
(
binfo
->
nbits
&
BITMAP_GROUP_NBITS_MASK
))
&
BITMAP_GROUP_NBITS_MASK
;
if
(
extra
!=
0
)
{
bitmap
[
binfo
->
ngroups
-
1
]
>>=
extra
;
}
}
#endif
/* BITMAP_USE_TREE */
size_t
bitmap_size
(
const
bitmap_info_t
*
binfo
)
{
return
(
bitmap_info_ngroups
(
binfo
)
<<
LG_SIZEOF_BITMAP
);
}
deps/jemalloc/src/ckh.c
deleted
100644 → 0
View file @
7ff7536e
/*
*******************************************************************************
* Implementation of (2^1+,2) cuckoo hashing, where 2^1+ indicates that each
* hash bucket contains 2^n cells, for n >= 1, and 2 indicates that two hash
* functions are employed. The original cuckoo hashing algorithm was described
* in:
*
* Pagh, R., F.F. Rodler (2004) Cuckoo Hashing. Journal of Algorithms
* 51(2):122-144.
*
* Generalization of cuckoo hashing was discussed in:
*
* Erlingsson, U., M. Manasse, F. McSherry (2006) A cool and practical
* alternative to traditional hash tables. In Proceedings of the 7th
* Workshop on Distributed Data and Structures (WDAS'06), Santa Clara, CA,
* January 2006.
*
* This implementation uses precisely two hash functions because that is the
* fewest that can work, and supporting multiple hashes is an implementation
* burden. Here is a reproduction of Figure 1 from Erlingsson et al. (2006)
* that shows approximate expected maximum load factors for various
* configurations:
*
* | #cells/bucket |
* #hashes | 1 | 2 | 4 | 8 |
* --------+-------+-------+-------+-------+
* 1 | 0.006 | 0.006 | 0.03 | 0.12 |
* 2 | 0.49 | 0.86 |>0.93< |>0.96< |
* 3 | 0.91 | 0.97 | 0.98 | 0.999 |
* 4 | 0.97 | 0.99 | 0.999 | |
*
* The number of cells per bucket is chosen such that a bucket fits in one cache
* line. So, on 32- and 64-bit systems, we use (8,2) and (4,2) cuckoo hashing,
* respectively.
*
******************************************************************************/
#define JEMALLOC_CKH_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/malloc_io.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/util.h"
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static
bool
ckh_grow
(
tsd_t
*
tsd
,
ckh_t
*
ckh
);
static
void
ckh_shrink
(
tsd_t
*
tsd
,
ckh_t
*
ckh
);
/******************************************************************************/
/*
* Search bucket for key and return the cell number if found; SIZE_T_MAX
* otherwise.
*/
static
size_t
ckh_bucket_search
(
ckh_t
*
ckh
,
size_t
bucket
,
const
void
*
key
)
{
ckhc_t
*
cell
;
unsigned
i
;
for
(
i
=
0
;
i
<
(
ZU
(
1
)
<<
LG_CKH_BUCKET_CELLS
);
i
++
)
{
cell
=
&
ckh
->
tab
[(
bucket
<<
LG_CKH_BUCKET_CELLS
)
+
i
];
if
(
cell
->
key
!=
NULL
&&
ckh
->
keycomp
(
key
,
cell
->
key
))
{
return
(
bucket
<<
LG_CKH_BUCKET_CELLS
)
+
i
;
}
}
return
SIZE_T_MAX
;
}
/*
* Search table for key and return cell number if found; SIZE_T_MAX otherwise.
*/
static
size_t
ckh_isearch
(
ckh_t
*
ckh
,
const
void
*
key
)
{
size_t
hashes
[
2
],
bucket
,
cell
;
assert
(
ckh
!=
NULL
);
ckh
->
hash
(
key
,
hashes
);
/* Search primary bucket. */
bucket
=
hashes
[
0
]
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
cell
=
ckh_bucket_search
(
ckh
,
bucket
,
key
);
if
(
cell
!=
SIZE_T_MAX
)
{
return
cell
;
}
/* Search secondary bucket. */
bucket
=
hashes
[
1
]
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
cell
=
ckh_bucket_search
(
ckh
,
bucket
,
key
);
return
cell
;
}
static
bool
ckh_try_bucket_insert
(
ckh_t
*
ckh
,
size_t
bucket
,
const
void
*
key
,
const
void
*
data
)
{
ckhc_t
*
cell
;
unsigned
offset
,
i
;
/*
* Cycle through the cells in the bucket, starting at a random position.
* The randomness avoids worst-case search overhead as buckets fill up.
*/
offset
=
(
unsigned
)
prng_lg_range_u64
(
&
ckh
->
prng_state
,
LG_CKH_BUCKET_CELLS
);
for
(
i
=
0
;
i
<
(
ZU
(
1
)
<<
LG_CKH_BUCKET_CELLS
);
i
++
)
{
cell
=
&
ckh
->
tab
[(
bucket
<<
LG_CKH_BUCKET_CELLS
)
+
((
i
+
offset
)
&
((
ZU
(
1
)
<<
LG_CKH_BUCKET_CELLS
)
-
1
))];
if
(
cell
->
key
==
NULL
)
{
cell
->
key
=
key
;
cell
->
data
=
data
;
ckh
->
count
++
;
return
false
;
}
}
return
true
;
}
/*
* No space is available in bucket. Randomly evict an item, then try to find an
* alternate location for that item. Iteratively repeat this
* eviction/relocation procedure until either success or detection of an
* eviction/relocation bucket cycle.
*/
static
bool
ckh_evict_reloc_insert
(
ckh_t
*
ckh
,
size_t
argbucket
,
void
const
**
argkey
,
void
const
**
argdata
)
{
const
void
*
key
,
*
data
,
*
tkey
,
*
tdata
;
ckhc_t
*
cell
;
size_t
hashes
[
2
],
bucket
,
tbucket
;
unsigned
i
;
bucket
=
argbucket
;
key
=
*
argkey
;
data
=
*
argdata
;
while
(
true
)
{
/*
* Choose a random item within the bucket to evict. This is
* critical to correct function, because without (eventually)
* evicting all items within a bucket during iteration, it
* would be possible to get stuck in an infinite loop if there
* were an item for which both hashes indicated the same
* bucket.
*/
i
=
(
unsigned
)
prng_lg_range_u64
(
&
ckh
->
prng_state
,
LG_CKH_BUCKET_CELLS
);
cell
=
&
ckh
->
tab
[(
bucket
<<
LG_CKH_BUCKET_CELLS
)
+
i
];
assert
(
cell
->
key
!=
NULL
);
/* Swap cell->{key,data} and {key,data} (evict). */
tkey
=
cell
->
key
;
tdata
=
cell
->
data
;
cell
->
key
=
key
;
cell
->
data
=
data
;
key
=
tkey
;
data
=
tdata
;
#ifdef CKH_COUNT
ckh
->
nrelocs
++
;
#endif
/* Find the alternate bucket for the evicted item. */
ckh
->
hash
(
key
,
hashes
);
tbucket
=
hashes
[
1
]
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
if
(
tbucket
==
bucket
)
{
tbucket
=
hashes
[
0
]
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
/*
* It may be that (tbucket == bucket) still, if the
* item's hashes both indicate this bucket. However,
* we are guaranteed to eventually escape this bucket
* during iteration, assuming pseudo-random item
* selection (true randomness would make infinite
* looping a remote possibility). The reason we can
* never get trapped forever is that there are two
* cases:
*
* 1) This bucket == argbucket, so we will quickly
* detect an eviction cycle and terminate.
* 2) An item was evicted to this bucket from another,
* which means that at least one item in this bucket
* has hashes that indicate distinct buckets.
*/
}
/* Check for a cycle. */
if
(
tbucket
==
argbucket
)
{
*
argkey
=
key
;
*
argdata
=
data
;
return
true
;
}
bucket
=
tbucket
;
if
(
!
ckh_try_bucket_insert
(
ckh
,
bucket
,
key
,
data
))
{
return
false
;
}
}
}
static
bool
ckh_try_insert
(
ckh_t
*
ckh
,
void
const
**
argkey
,
void
const
**
argdata
)
{
size_t
hashes
[
2
],
bucket
;
const
void
*
key
=
*
argkey
;
const
void
*
data
=
*
argdata
;
ckh
->
hash
(
key
,
hashes
);
/* Try to insert in primary bucket. */
bucket
=
hashes
[
0
]
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
if
(
!
ckh_try_bucket_insert
(
ckh
,
bucket
,
key
,
data
))
{
return
false
;
}
/* Try to insert in secondary bucket. */
bucket
=
hashes
[
1
]
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
if
(
!
ckh_try_bucket_insert
(
ckh
,
bucket
,
key
,
data
))
{
return
false
;
}
/*
* Try to find a place for this item via iterative eviction/relocation.
*/
return
ckh_evict_reloc_insert
(
ckh
,
bucket
,
argkey
,
argdata
);
}
/*
* Try to rebuild the hash table from scratch by inserting all items from the
* old table into the new.
*/
static
bool
ckh_rebuild
(
ckh_t
*
ckh
,
ckhc_t
*
aTab
)
{
size_t
count
,
i
,
nins
;
const
void
*
key
,
*
data
;
count
=
ckh
->
count
;
ckh
->
count
=
0
;
for
(
i
=
nins
=
0
;
nins
<
count
;
i
++
)
{
if
(
aTab
[
i
].
key
!=
NULL
)
{
key
=
aTab
[
i
].
key
;
data
=
aTab
[
i
].
data
;
if
(
ckh_try_insert
(
ckh
,
&
key
,
&
data
))
{
ckh
->
count
=
count
;
return
true
;
}
nins
++
;
}
}
return
false
;
}
static
bool
ckh_grow
(
tsd_t
*
tsd
,
ckh_t
*
ckh
)
{
bool
ret
;
ckhc_t
*
tab
,
*
ttab
;
unsigned
lg_prevbuckets
,
lg_curcells
;
#ifdef CKH_COUNT
ckh
->
ngrows
++
;
#endif
/*
* It is possible (though unlikely, given well behaved hashes) that the
* table will have to be doubled more than once in order to create a
* usable table.
*/
lg_prevbuckets
=
ckh
->
lg_curbuckets
;
lg_curcells
=
ckh
->
lg_curbuckets
+
LG_CKH_BUCKET_CELLS
;
while
(
true
)
{
size_t
usize
;
lg_curcells
++
;
usize
=
sz_sa2u
(
sizeof
(
ckhc_t
)
<<
lg_curcells
,
CACHELINE
);
if
(
unlikely
(
usize
==
0
||
usize
>
LARGE_MAXCLASS
))
{
ret
=
true
;
goto
label_return
;
}
tab
=
(
ckhc_t
*
)
ipallocztm
(
tsd_tsdn
(
tsd
),
usize
,
CACHELINE
,
true
,
NULL
,
true
,
arena_ichoose
(
tsd
,
NULL
));
if
(
tab
==
NULL
)
{
ret
=
true
;
goto
label_return
;
}
/* Swap in new table. */
ttab
=
ckh
->
tab
;
ckh
->
tab
=
tab
;
tab
=
ttab
;
ckh
->
lg_curbuckets
=
lg_curcells
-
LG_CKH_BUCKET_CELLS
;
if
(
!
ckh_rebuild
(
ckh
,
tab
))
{
idalloctm
(
tsd_tsdn
(
tsd
),
tab
,
NULL
,
NULL
,
true
,
true
);
break
;
}
/* Rebuilding failed, so back out partially rebuilt table. */
idalloctm
(
tsd_tsdn
(
tsd
),
ckh
->
tab
,
NULL
,
NULL
,
true
,
true
);
ckh
->
tab
=
tab
;
ckh
->
lg_curbuckets
=
lg_prevbuckets
;
}
ret
=
false
;
label_return:
return
ret
;
}
static
void
ckh_shrink
(
tsd_t
*
tsd
,
ckh_t
*
ckh
)
{
ckhc_t
*
tab
,
*
ttab
;
size_t
usize
;
unsigned
lg_prevbuckets
,
lg_curcells
;
/*
* It is possible (though unlikely, given well behaved hashes) that the
* table rebuild will fail.
*/
lg_prevbuckets
=
ckh
->
lg_curbuckets
;
lg_curcells
=
ckh
->
lg_curbuckets
+
LG_CKH_BUCKET_CELLS
-
1
;
usize
=
sz_sa2u
(
sizeof
(
ckhc_t
)
<<
lg_curcells
,
CACHELINE
);
if
(
unlikely
(
usize
==
0
||
usize
>
LARGE_MAXCLASS
))
{
return
;
}
tab
=
(
ckhc_t
*
)
ipallocztm
(
tsd_tsdn
(
tsd
),
usize
,
CACHELINE
,
true
,
NULL
,
true
,
arena_ichoose
(
tsd
,
NULL
));
if
(
tab
==
NULL
)
{
/*
* An OOM error isn't worth propagating, since it doesn't
* prevent this or future operations from proceeding.
*/
return
;
}
/* Swap in new table. */
ttab
=
ckh
->
tab
;
ckh
->
tab
=
tab
;
tab
=
ttab
;
ckh
->
lg_curbuckets
=
lg_curcells
-
LG_CKH_BUCKET_CELLS
;
if
(
!
ckh_rebuild
(
ckh
,
tab
))
{
idalloctm
(
tsd_tsdn
(
tsd
),
tab
,
NULL
,
NULL
,
true
,
true
);
#ifdef CKH_COUNT
ckh
->
nshrinks
++
;
#endif
return
;
}
/* Rebuilding failed, so back out partially rebuilt table. */
idalloctm
(
tsd_tsdn
(
tsd
),
ckh
->
tab
,
NULL
,
NULL
,
true
,
true
);
ckh
->
tab
=
tab
;
ckh
->
lg_curbuckets
=
lg_prevbuckets
;
#ifdef CKH_COUNT
ckh
->
nshrinkfails
++
;
#endif
}
bool
ckh_new
(
tsd_t
*
tsd
,
ckh_t
*
ckh
,
size_t
minitems
,
ckh_hash_t
*
hash
,
ckh_keycomp_t
*
keycomp
)
{
bool
ret
;
size_t
mincells
,
usize
;
unsigned
lg_mincells
;
assert
(
minitems
>
0
);
assert
(
hash
!=
NULL
);
assert
(
keycomp
!=
NULL
);
#ifdef CKH_COUNT
ckh
->
ngrows
=
0
;
ckh
->
nshrinks
=
0
;
ckh
->
nshrinkfails
=
0
;
ckh
->
ninserts
=
0
;
ckh
->
nrelocs
=
0
;
#endif
ckh
->
prng_state
=
42
;
/* Value doesn't really matter. */
ckh
->
count
=
0
;
/*
* Find the minimum power of 2 that is large enough to fit minitems
* entries. We are using (2+,2) cuckoo hashing, which has an expected
* maximum load factor of at least ~0.86, so 0.75 is a conservative load
* factor that will typically allow mincells items to fit without ever
* growing the table.
*/
assert
(
LG_CKH_BUCKET_CELLS
>
0
);
mincells
=
((
minitems
+
(
3
-
(
minitems
%
3
)))
/
3
)
<<
2
;
for
(
lg_mincells
=
LG_CKH_BUCKET_CELLS
;
(
ZU
(
1
)
<<
lg_mincells
)
<
mincells
;
lg_mincells
++
)
{
/* Do nothing. */
}
ckh
->
lg_minbuckets
=
lg_mincells
-
LG_CKH_BUCKET_CELLS
;
ckh
->
lg_curbuckets
=
lg_mincells
-
LG_CKH_BUCKET_CELLS
;
ckh
->
hash
=
hash
;
ckh
->
keycomp
=
keycomp
;
usize
=
sz_sa2u
(
sizeof
(
ckhc_t
)
<<
lg_mincells
,
CACHELINE
);
if
(
unlikely
(
usize
==
0
||
usize
>
LARGE_MAXCLASS
))
{
ret
=
true
;
goto
label_return
;
}
ckh
->
tab
=
(
ckhc_t
*
)
ipallocztm
(
tsd_tsdn
(
tsd
),
usize
,
CACHELINE
,
true
,
NULL
,
true
,
arena_ichoose
(
tsd
,
NULL
));
if
(
ckh
->
tab
==
NULL
)
{
ret
=
true
;
goto
label_return
;
}
ret
=
false
;
label_return:
return
ret
;
}
void
ckh_delete
(
tsd_t
*
tsd
,
ckh_t
*
ckh
)
{
assert
(
ckh
!=
NULL
);
#ifdef CKH_VERBOSE
malloc_printf
(
"%s(%p): ngrows: %"
FMTu64
", nshrinks: %"
FMTu64
","
" nshrinkfails: %"
FMTu64
", ninserts: %"
FMTu64
","
" nrelocs: %"
FMTu64
"
\n
"
,
__func__
,
ckh
,
(
unsigned
long
long
)
ckh
->
ngrows
,
(
unsigned
long
long
)
ckh
->
nshrinks
,
(
unsigned
long
long
)
ckh
->
nshrinkfails
,
(
unsigned
long
long
)
ckh
->
ninserts
,
(
unsigned
long
long
)
ckh
->
nrelocs
);
#endif
idalloctm
(
tsd_tsdn
(
tsd
),
ckh
->
tab
,
NULL
,
NULL
,
true
,
true
);
if
(
config_debug
)
{
memset
(
ckh
,
JEMALLOC_FREE_JUNK
,
sizeof
(
ckh_t
));
}
}
size_t
ckh_count
(
ckh_t
*
ckh
)
{
assert
(
ckh
!=
NULL
);
return
ckh
->
count
;
}
bool
ckh_iter
(
ckh_t
*
ckh
,
size_t
*
tabind
,
void
**
key
,
void
**
data
)
{
size_t
i
,
ncells
;
for
(
i
=
*
tabind
,
ncells
=
(
ZU
(
1
)
<<
(
ckh
->
lg_curbuckets
+
LG_CKH_BUCKET_CELLS
));
i
<
ncells
;
i
++
)
{
if
(
ckh
->
tab
[
i
].
key
!=
NULL
)
{
if
(
key
!=
NULL
)
{
*
key
=
(
void
*
)
ckh
->
tab
[
i
].
key
;
}
if
(
data
!=
NULL
)
{
*
data
=
(
void
*
)
ckh
->
tab
[
i
].
data
;
}
*
tabind
=
i
+
1
;
return
false
;
}
}
return
true
;
}
bool
ckh_insert
(
tsd_t
*
tsd
,
ckh_t
*
ckh
,
const
void
*
key
,
const
void
*
data
)
{
bool
ret
;
assert
(
ckh
!=
NULL
);
assert
(
ckh_search
(
ckh
,
key
,
NULL
,
NULL
));
#ifdef CKH_COUNT
ckh
->
ninserts
++
;
#endif
while
(
ckh_try_insert
(
ckh
,
&
key
,
&
data
))
{
if
(
ckh_grow
(
tsd
,
ckh
))
{
ret
=
true
;
goto
label_return
;
}
}
ret
=
false
;
label_return:
return
ret
;
}
bool
ckh_remove
(
tsd_t
*
tsd
,
ckh_t
*
ckh
,
const
void
*
searchkey
,
void
**
key
,
void
**
data
)
{
size_t
cell
;
assert
(
ckh
!=
NULL
);
cell
=
ckh_isearch
(
ckh
,
searchkey
);
if
(
cell
!=
SIZE_T_MAX
)
{
if
(
key
!=
NULL
)
{
*
key
=
(
void
*
)
ckh
->
tab
[
cell
].
key
;
}
if
(
data
!=
NULL
)
{
*
data
=
(
void
*
)
ckh
->
tab
[
cell
].
data
;
}
ckh
->
tab
[
cell
].
key
=
NULL
;
ckh
->
tab
[
cell
].
data
=
NULL
;
/* Not necessary. */
ckh
->
count
--
;
/* Try to halve the table if it is less than 1/4 full. */
if
(
ckh
->
count
<
(
ZU
(
1
)
<<
(
ckh
->
lg_curbuckets
+
LG_CKH_BUCKET_CELLS
-
2
))
&&
ckh
->
lg_curbuckets
>
ckh
->
lg_minbuckets
)
{
/* Ignore error due to OOM. */
ckh_shrink
(
tsd
,
ckh
);
}
return
false
;
}
return
true
;
}
bool
ckh_search
(
ckh_t
*
ckh
,
const
void
*
searchkey
,
void
**
key
,
void
**
data
)
{
size_t
cell
;
assert
(
ckh
!=
NULL
);
cell
=
ckh_isearch
(
ckh
,
searchkey
);
if
(
cell
!=
SIZE_T_MAX
)
{
if
(
key
!=
NULL
)
{
*
key
=
(
void
*
)
ckh
->
tab
[
cell
].
key
;
}
if
(
data
!=
NULL
)
{
*
data
=
(
void
*
)
ckh
->
tab
[
cell
].
data
;
}
return
false
;
}
return
true
;
}
void
ckh_string_hash
(
const
void
*
key
,
size_t
r_hash
[
2
])
{
hash
(
key
,
strlen
((
const
char
*
)
key
),
0x94122f33U
,
r_hash
);
}
bool
ckh_string_keycomp
(
const
void
*
k1
,
const
void
*
k2
)
{
assert
(
k1
!=
NULL
);
assert
(
k2
!=
NULL
);
return
!
strcmp
((
char
*
)
k1
,
(
char
*
)
k2
);
}
void
ckh_pointer_hash
(
const
void
*
key
,
size_t
r_hash
[
2
])
{
union
{
const
void
*
v
;
size_t
i
;
}
u
;
assert
(
sizeof
(
u
.
v
)
==
sizeof
(
u
.
i
));
u
.
v
=
key
;
hash
(
&
u
.
i
,
sizeof
(
u
.
i
),
0xd983396eU
,
r_hash
);
}
bool
ckh_pointer_keycomp
(
const
void
*
k1
,
const
void
*
k2
)
{
return
(
k1
==
k2
);
}
deps/jemalloc/src/ctl.c
deleted
100644 → 0
View file @
7ff7536e
#define JEMALLOC_CTL_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/extent_mmap.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/util.h"
/******************************************************************************/
/* Data. */
/*
* ctl_mtx protects the following:
* - ctl_stats->*
*/
static
malloc_mutex_t
ctl_mtx
;
static
bool
ctl_initialized
;
static
ctl_stats_t
*
ctl_stats
;
static
ctl_arenas_t
*
ctl_arenas
;
/******************************************************************************/
/* Helpers for named and indexed nodes. */
static
const
ctl_named_node_t
*
ctl_named_node
(
const
ctl_node_t
*
node
)
{
return
((
node
->
named
)
?
(
const
ctl_named_node_t
*
)
node
:
NULL
);
}
static
const
ctl_named_node_t
*
ctl_named_children
(
const
ctl_named_node_t
*
node
,
size_t
index
)
{
const
ctl_named_node_t
*
children
=
ctl_named_node
(
node
->
children
);
return
(
children
?
&
children
[
index
]
:
NULL
);
}
static
const
ctl_indexed_node_t
*
ctl_indexed_node
(
const
ctl_node_t
*
node
)
{
return
(
!
node
->
named
?
(
const
ctl_indexed_node_t
*
)
node
:
NULL
);
}
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
#define CTL_PROTO(n) \
static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
void *oldp, size_t *oldlenp, void *newp, size_t newlen);
#define INDEX_PROTO(n) \
static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \
const size_t *mib, size_t miblen, size_t i);
CTL_PROTO
(
version
)
CTL_PROTO
(
epoch
)
CTL_PROTO
(
background_thread
)
CTL_PROTO
(
max_background_threads
)
CTL_PROTO
(
thread_tcache_enabled
)
CTL_PROTO
(
thread_tcache_flush
)
CTL_PROTO
(
thread_prof_name
)
CTL_PROTO
(
thread_prof_active
)
CTL_PROTO
(
thread_arena
)
CTL_PROTO
(
thread_allocated
)
CTL_PROTO
(
thread_allocatedp
)
CTL_PROTO
(
thread_deallocated
)
CTL_PROTO
(
thread_deallocatedp
)
CTL_PROTO
(
config_cache_oblivious
)
CTL_PROTO
(
config_debug
)
CTL_PROTO
(
config_fill
)
CTL_PROTO
(
config_lazy_lock
)
CTL_PROTO
(
config_malloc_conf
)
CTL_PROTO
(
config_prof
)
CTL_PROTO
(
config_prof_libgcc
)
CTL_PROTO
(
config_prof_libunwind
)
CTL_PROTO
(
config_stats
)
CTL_PROTO
(
config_utrace
)
CTL_PROTO
(
config_xmalloc
)
CTL_PROTO
(
opt_abort
)
CTL_PROTO
(
opt_abort_conf
)
CTL_PROTO
(
opt_metadata_thp
)
CTL_PROTO
(
opt_retain
)
CTL_PROTO
(
opt_dss
)
CTL_PROTO
(
opt_narenas
)
CTL_PROTO
(
opt_percpu_arena
)
CTL_PROTO
(
opt_background_thread
)
CTL_PROTO
(
opt_max_background_threads
)
CTL_PROTO
(
opt_dirty_decay_ms
)
CTL_PROTO
(
opt_muzzy_decay_ms
)
CTL_PROTO
(
opt_stats_print
)
CTL_PROTO
(
opt_stats_print_opts
)
CTL_PROTO
(
opt_junk
)
CTL_PROTO
(
opt_zero
)
CTL_PROTO
(
opt_utrace
)
CTL_PROTO
(
opt_xmalloc
)
CTL_PROTO
(
opt_tcache
)
CTL_PROTO
(
opt_thp
)
CTL_PROTO
(
opt_lg_extent_max_active_fit
)
CTL_PROTO
(
opt_lg_tcache_max
)
CTL_PROTO
(
opt_prof
)
CTL_PROTO
(
opt_prof_prefix
)
CTL_PROTO
(
opt_prof_active
)
CTL_PROTO
(
opt_prof_thread_active_init
)
CTL_PROTO
(
opt_lg_prof_sample
)
CTL_PROTO
(
opt_lg_prof_interval
)
CTL_PROTO
(
opt_prof_gdump
)
CTL_PROTO
(
opt_prof_final
)
CTL_PROTO
(
opt_prof_leak
)
CTL_PROTO
(
opt_prof_accum
)
CTL_PROTO
(
tcache_create
)
CTL_PROTO
(
tcache_flush
)
CTL_PROTO
(
tcache_destroy
)
CTL_PROTO
(
arena_i_initialized
)
CTL_PROTO
(
arena_i_decay
)
CTL_PROTO
(
arena_i_purge
)
CTL_PROTO
(
arena_i_reset
)
CTL_PROTO
(
arena_i_destroy
)
CTL_PROTO
(
arena_i_dss
)
CTL_PROTO
(
arena_i_dirty_decay_ms
)
CTL_PROTO
(
arena_i_muzzy_decay_ms
)
CTL_PROTO
(
arena_i_extent_hooks
)
CTL_PROTO
(
arena_i_retain_grow_limit
)
INDEX_PROTO
(
arena_i
)
CTL_PROTO
(
arenas_bin_i_size
)
CTL_PROTO
(
arenas_bin_i_nregs
)
CTL_PROTO
(
arenas_bin_i_slab_size
)
INDEX_PROTO
(
arenas_bin_i
)
CTL_PROTO
(
arenas_lextent_i_size
)
INDEX_PROTO
(
arenas_lextent_i
)
CTL_PROTO
(
arenas_narenas
)
CTL_PROTO
(
arenas_dirty_decay_ms
)
CTL_PROTO
(
arenas_muzzy_decay_ms
)
CTL_PROTO
(
arenas_quantum
)
CTL_PROTO
(
arenas_page
)
CTL_PROTO
(
arenas_tcache_max
)
CTL_PROTO
(
arenas_nbins
)
CTL_PROTO
(
arenas_nhbins
)
CTL_PROTO
(
arenas_nlextents
)
CTL_PROTO
(
arenas_create
)
CTL_PROTO
(
arenas_lookup
)
CTL_PROTO
(
prof_thread_active_init
)
CTL_PROTO
(
prof_active
)
CTL_PROTO
(
prof_dump
)
CTL_PROTO
(
prof_gdump
)
CTL_PROTO
(
prof_reset
)
CTL_PROTO
(
prof_interval
)
CTL_PROTO
(
lg_prof_sample
)
CTL_PROTO
(
stats_arenas_i_small_allocated
)
CTL_PROTO
(
stats_arenas_i_small_nmalloc
)
CTL_PROTO
(
stats_arenas_i_small_ndalloc
)
CTL_PROTO
(
stats_arenas_i_small_nrequests
)
CTL_PROTO
(
stats_arenas_i_large_allocated
)
CTL_PROTO
(
stats_arenas_i_large_nmalloc
)
CTL_PROTO
(
stats_arenas_i_large_ndalloc
)
CTL_PROTO
(
stats_arenas_i_large_nrequests
)
CTL_PROTO
(
stats_arenas_i_bins_j_nmalloc
)
CTL_PROTO
(
stats_arenas_i_bins_j_ndalloc
)
CTL_PROTO
(
stats_arenas_i_bins_j_nrequests
)
CTL_PROTO
(
stats_arenas_i_bins_j_curregs
)
CTL_PROTO
(
stats_arenas_i_bins_j_nfills
)
CTL_PROTO
(
stats_arenas_i_bins_j_nflushes
)
CTL_PROTO
(
stats_arenas_i_bins_j_nslabs
)
CTL_PROTO
(
stats_arenas_i_bins_j_nreslabs
)
CTL_PROTO
(
stats_arenas_i_bins_j_curslabs
)
INDEX_PROTO
(
stats_arenas_i_bins_j
)
CTL_PROTO
(
stats_arenas_i_lextents_j_nmalloc
)
CTL_PROTO
(
stats_arenas_i_lextents_j_ndalloc
)
CTL_PROTO
(
stats_arenas_i_lextents_j_nrequests
)
CTL_PROTO
(
stats_arenas_i_lextents_j_curlextents
)
INDEX_PROTO
(
stats_arenas_i_lextents_j
)
CTL_PROTO
(
stats_arenas_i_nthreads
)
CTL_PROTO
(
stats_arenas_i_uptime
)
CTL_PROTO
(
stats_arenas_i_dss
)
CTL_PROTO
(
stats_arenas_i_dirty_decay_ms
)
CTL_PROTO
(
stats_arenas_i_muzzy_decay_ms
)
CTL_PROTO
(
stats_arenas_i_pactive
)
CTL_PROTO
(
stats_arenas_i_pdirty
)
CTL_PROTO
(
stats_arenas_i_pmuzzy
)
CTL_PROTO
(
stats_arenas_i_mapped
)
CTL_PROTO
(
stats_arenas_i_retained
)
CTL_PROTO
(
stats_arenas_i_dirty_npurge
)
CTL_PROTO
(
stats_arenas_i_dirty_nmadvise
)
CTL_PROTO
(
stats_arenas_i_dirty_purged
)
CTL_PROTO
(
stats_arenas_i_muzzy_npurge
)
CTL_PROTO
(
stats_arenas_i_muzzy_nmadvise
)
CTL_PROTO
(
stats_arenas_i_muzzy_purged
)
CTL_PROTO
(
stats_arenas_i_base
)
CTL_PROTO
(
stats_arenas_i_internal
)
CTL_PROTO
(
stats_arenas_i_metadata_thp
)
CTL_PROTO
(
stats_arenas_i_tcache_bytes
)
CTL_PROTO
(
stats_arenas_i_resident
)
INDEX_PROTO
(
stats_arenas_i
)
CTL_PROTO
(
stats_allocated
)
CTL_PROTO
(
stats_active
)
CTL_PROTO
(
stats_background_thread_num_threads
)
CTL_PROTO
(
stats_background_thread_num_runs
)
CTL_PROTO
(
stats_background_thread_run_interval
)
CTL_PROTO
(
stats_metadata
)
CTL_PROTO
(
stats_metadata_thp
)
CTL_PROTO
(
stats_resident
)
CTL_PROTO
(
stats_mapped
)
CTL_PROTO
(
stats_retained
)
#define MUTEX_STATS_CTL_PROTO_GEN(n) \
CTL_PROTO(stats_##n##_num_ops) \
CTL_PROTO(stats_##n##_num_wait) \
CTL_PROTO(stats_##n##_num_spin_acq) \
CTL_PROTO(stats_##n##_num_owner_switch) \
CTL_PROTO(stats_##n##_total_wait_time) \
CTL_PROTO(stats_##n##_max_wait_time) \
CTL_PROTO(stats_##n##_max_num_thds)
/* Global mutexes. */
#define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(mutexes_##mtx)
MUTEX_PROF_GLOBAL_MUTEXES
#undef OP
/* Per arena mutexes. */
#define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(arenas_i_mutexes_##mtx)
MUTEX_PROF_ARENA_MUTEXES
#undef OP
/* Arena bin mutexes. */
MUTEX_STATS_CTL_PROTO_GEN
(
arenas_i_bins_j_mutex
)
#undef MUTEX_STATS_CTL_PROTO_GEN
CTL_PROTO
(
stats_mutexes_reset
)
/******************************************************************************/
/* mallctl tree. */
#define NAME(n) {true}, n
#define CHILD(t, c) \
sizeof(c##_node) / sizeof(ctl_##t##_node_t), \
(ctl_node_t *)c##_node, \
NULL
#define CTL(c) 0, NULL, c##_ctl
/*
* Only handles internal indexed nodes, since there are currently no external
* ones.
*/
#define INDEX(i) {false}, i##_index
static
const
ctl_named_node_t
thread_tcache_node
[]
=
{
{
NAME
(
"enabled"
),
CTL
(
thread_tcache_enabled
)},
{
NAME
(
"flush"
),
CTL
(
thread_tcache_flush
)}
};
static
const
ctl_named_node_t
thread_prof_node
[]
=
{
{
NAME
(
"name"
),
CTL
(
thread_prof_name
)},
{
NAME
(
"active"
),
CTL
(
thread_prof_active
)}
};
static
const
ctl_named_node_t
thread_node
[]
=
{
{
NAME
(
"arena"
),
CTL
(
thread_arena
)},
{
NAME
(
"allocated"
),
CTL
(
thread_allocated
)},
{
NAME
(
"allocatedp"
),
CTL
(
thread_allocatedp
)},
{
NAME
(
"deallocated"
),
CTL
(
thread_deallocated
)},
{
NAME
(
"deallocatedp"
),
CTL
(
thread_deallocatedp
)},
{
NAME
(
"tcache"
),
CHILD
(
named
,
thread_tcache
)},
{
NAME
(
"prof"
),
CHILD
(
named
,
thread_prof
)}
};
static
const
ctl_named_node_t
config_node
[]
=
{
{
NAME
(
"cache_oblivious"
),
CTL
(
config_cache_oblivious
)},
{
NAME
(
"debug"
),
CTL
(
config_debug
)},
{
NAME
(
"fill"
),
CTL
(
config_fill
)},
{
NAME
(
"lazy_lock"
),
CTL
(
config_lazy_lock
)},
{
NAME
(
"malloc_conf"
),
CTL
(
config_malloc_conf
)},
{
NAME
(
"prof"
),
CTL
(
config_prof
)},
{
NAME
(
"prof_libgcc"
),
CTL
(
config_prof_libgcc
)},
{
NAME
(
"prof_libunwind"
),
CTL
(
config_prof_libunwind
)},
{
NAME
(
"stats"
),
CTL
(
config_stats
)},
{
NAME
(
"utrace"
),
CTL
(
config_utrace
)},
{
NAME
(
"xmalloc"
),
CTL
(
config_xmalloc
)}
};
static
const
ctl_named_node_t
opt_node
[]
=
{
{
NAME
(
"abort"
),
CTL
(
opt_abort
)},
{
NAME
(
"abort_conf"
),
CTL
(
opt_abort_conf
)},
{
NAME
(
"metadata_thp"
),
CTL
(
opt_metadata_thp
)},
{
NAME
(
"retain"
),
CTL
(
opt_retain
)},
{
NAME
(
"dss"
),
CTL
(
opt_dss
)},
{
NAME
(
"narenas"
),
CTL
(
opt_narenas
)},
{
NAME
(
"percpu_arena"
),
CTL
(
opt_percpu_arena
)},
{
NAME
(
"background_thread"
),
CTL
(
opt_background_thread
)},
{
NAME
(
"max_background_threads"
),
CTL
(
opt_max_background_threads
)},
{
NAME
(
"dirty_decay_ms"
),
CTL
(
opt_dirty_decay_ms
)},
{
NAME
(
"muzzy_decay_ms"
),
CTL
(
opt_muzzy_decay_ms
)},
{
NAME
(
"stats_print"
),
CTL
(
opt_stats_print
)},
{
NAME
(
"stats_print_opts"
),
CTL
(
opt_stats_print_opts
)},
{
NAME
(
"junk"
),
CTL
(
opt_junk
)},
{
NAME
(
"zero"
),
CTL
(
opt_zero
)},
{
NAME
(
"utrace"
),
CTL
(
opt_utrace
)},
{
NAME
(
"xmalloc"
),
CTL
(
opt_xmalloc
)},
{
NAME
(
"tcache"
),
CTL
(
opt_tcache
)},
{
NAME
(
"thp"
),
CTL
(
opt_thp
)},
{
NAME
(
"lg_extent_max_active_fit"
),
CTL
(
opt_lg_extent_max_active_fit
)},
{
NAME
(
"lg_tcache_max"
),
CTL
(
opt_lg_tcache_max
)},
{
NAME
(
"prof"
),
CTL
(
opt_prof
)},
{
NAME
(
"prof_prefix"
),
CTL
(
opt_prof_prefix
)},
{
NAME
(
"prof_active"
),
CTL
(
opt_prof_active
)},
{
NAME
(
"prof_thread_active_init"
),
CTL
(
opt_prof_thread_active_init
)},
{
NAME
(
"lg_prof_sample"
),
CTL
(
opt_lg_prof_sample
)},
{
NAME
(
"lg_prof_interval"
),
CTL
(
opt_lg_prof_interval
)},
{
NAME
(
"prof_gdump"
),
CTL
(
opt_prof_gdump
)},
{
NAME
(
"prof_final"
),
CTL
(
opt_prof_final
)},
{
NAME
(
"prof_leak"
),
CTL
(
opt_prof_leak
)},
{
NAME
(
"prof_accum"
),
CTL
(
opt_prof_accum
)}
};
static
const
ctl_named_node_t
tcache_node
[]
=
{
{
NAME
(
"create"
),
CTL
(
tcache_create
)},
{
NAME
(
"flush"
),
CTL
(
tcache_flush
)},
{
NAME
(
"destroy"
),
CTL
(
tcache_destroy
)}
};
static
const
ctl_named_node_t
arena_i_node
[]
=
{
{
NAME
(
"initialized"
),
CTL
(
arena_i_initialized
)},
{
NAME
(
"decay"
),
CTL
(
arena_i_decay
)},
{
NAME
(
"purge"
),
CTL
(
arena_i_purge
)},
{
NAME
(
"reset"
),
CTL
(
arena_i_reset
)},
{
NAME
(
"destroy"
),
CTL
(
arena_i_destroy
)},
{
NAME
(
"dss"
),
CTL
(
arena_i_dss
)},
{
NAME
(
"dirty_decay_ms"
),
CTL
(
arena_i_dirty_decay_ms
)},
{
NAME
(
"muzzy_decay_ms"
),
CTL
(
arena_i_muzzy_decay_ms
)},
{
NAME
(
"extent_hooks"
),
CTL
(
arena_i_extent_hooks
)},
{
NAME
(
"retain_grow_limit"
),
CTL
(
arena_i_retain_grow_limit
)}
};
static
const
ctl_named_node_t
super_arena_i_node
[]
=
{
{
NAME
(
""
),
CHILD
(
named
,
arena_i
)}
};
static
const
ctl_indexed_node_t
arena_node
[]
=
{
{
INDEX
(
arena_i
)}
};
static
const
ctl_named_node_t
arenas_bin_i_node
[]
=
{
{
NAME
(
"size"
),
CTL
(
arenas_bin_i_size
)},
{
NAME
(
"nregs"
),
CTL
(
arenas_bin_i_nregs
)},
{
NAME
(
"slab_size"
),
CTL
(
arenas_bin_i_slab_size
)}
};
static
const
ctl_named_node_t
super_arenas_bin_i_node
[]
=
{
{
NAME
(
""
),
CHILD
(
named
,
arenas_bin_i
)}
};
static
const
ctl_indexed_node_t
arenas_bin_node
[]
=
{
{
INDEX
(
arenas_bin_i
)}
};
static
const
ctl_named_node_t
arenas_lextent_i_node
[]
=
{
{
NAME
(
"size"
),
CTL
(
arenas_lextent_i_size
)}
};
static
const
ctl_named_node_t
super_arenas_lextent_i_node
[]
=
{
{
NAME
(
""
),
CHILD
(
named
,
arenas_lextent_i
)}
};
static
const
ctl_indexed_node_t
arenas_lextent_node
[]
=
{
{
INDEX
(
arenas_lextent_i
)}
};
static
const
ctl_named_node_t
arenas_node
[]
=
{
{
NAME
(
"narenas"
),
CTL
(
arenas_narenas
)},
{
NAME
(
"dirty_decay_ms"
),
CTL
(
arenas_dirty_decay_ms
)},
{
NAME
(
"muzzy_decay_ms"
),
CTL
(
arenas_muzzy_decay_ms
)},
{
NAME
(
"quantum"
),
CTL
(
arenas_quantum
)},
{
NAME
(
"page"
),
CTL
(
arenas_page
)},
{
NAME
(
"tcache_max"
),
CTL
(
arenas_tcache_max
)},
{
NAME
(
"nbins"
),
CTL
(
arenas_nbins
)},
{
NAME
(
"nhbins"
),
CTL
(
arenas_nhbins
)},
{
NAME
(
"bin"
),
CHILD
(
indexed
,
arenas_bin
)},
{
NAME
(
"nlextents"
),
CTL
(
arenas_nlextents
)},
{
NAME
(
"lextent"
),
CHILD
(
indexed
,
arenas_lextent
)},
{
NAME
(
"create"
),
CTL
(
arenas_create
)},
{
NAME
(
"lookup"
),
CTL
(
arenas_lookup
)}
};
static
const
ctl_named_node_t
prof_node
[]
=
{
{
NAME
(
"thread_active_init"
),
CTL
(
prof_thread_active_init
)},
{
NAME
(
"active"
),
CTL
(
prof_active
)},
{
NAME
(
"dump"
),
CTL
(
prof_dump
)},
{
NAME
(
"gdump"
),
CTL
(
prof_gdump
)},
{
NAME
(
"reset"
),
CTL
(
prof_reset
)},
{
NAME
(
"interval"
),
CTL
(
prof_interval
)},
{
NAME
(
"lg_sample"
),
CTL
(
lg_prof_sample
)}
};
static
const
ctl_named_node_t
stats_arenas_i_small_node
[]
=
{
{
NAME
(
"allocated"
),
CTL
(
stats_arenas_i_small_allocated
)},
{
NAME
(
"nmalloc"
),
CTL
(
stats_arenas_i_small_nmalloc
)},
{
NAME
(
"ndalloc"
),
CTL
(
stats_arenas_i_small_ndalloc
)},
{
NAME
(
"nrequests"
),
CTL
(
stats_arenas_i_small_nrequests
)}
};
static
const
ctl_named_node_t
stats_arenas_i_large_node
[]
=
{
{
NAME
(
"allocated"
),
CTL
(
stats_arenas_i_large_allocated
)},
{
NAME
(
"nmalloc"
),
CTL
(
stats_arenas_i_large_nmalloc
)},
{
NAME
(
"ndalloc"
),
CTL
(
stats_arenas_i_large_ndalloc
)},
{
NAME
(
"nrequests"
),
CTL
(
stats_arenas_i_large_nrequests
)}
};
#define MUTEX_PROF_DATA_NODE(prefix) \
static const ctl_named_node_t stats_##prefix##_node[] = { \
{NAME("num_ops"), \
CTL(stats_##prefix##_num_ops)}, \
{NAME("num_wait"), \
CTL(stats_##prefix##_num_wait)}, \
{NAME("num_spin_acq"), \
CTL(stats_##prefix##_num_spin_acq)}, \
{NAME("num_owner_switch"), \
CTL(stats_##prefix##_num_owner_switch)}, \
{NAME("total_wait_time"), \
CTL(stats_##prefix##_total_wait_time)}, \
{NAME("max_wait_time"), \
CTL(stats_##prefix##_max_wait_time)}, \
{NAME("max_num_thds"), \
CTL(stats_##prefix##_max_num_thds)} \
/* Note that # of current waiting thread not provided. */
\
};
MUTEX_PROF_DATA_NODE
(
arenas_i_bins_j_mutex
)
static
const
ctl_named_node_t
stats_arenas_i_bins_j_node
[]
=
{
{
NAME
(
"nmalloc"
),
CTL
(
stats_arenas_i_bins_j_nmalloc
)},
{
NAME
(
"ndalloc"
),
CTL
(
stats_arenas_i_bins_j_ndalloc
)},
{
NAME
(
"nrequests"
),
CTL
(
stats_arenas_i_bins_j_nrequests
)},
{
NAME
(
"curregs"
),
CTL
(
stats_arenas_i_bins_j_curregs
)},
{
NAME
(
"nfills"
),
CTL
(
stats_arenas_i_bins_j_nfills
)},
{
NAME
(
"nflushes"
),
CTL
(
stats_arenas_i_bins_j_nflushes
)},
{
NAME
(
"nslabs"
),
CTL
(
stats_arenas_i_bins_j_nslabs
)},
{
NAME
(
"nreslabs"
),
CTL
(
stats_arenas_i_bins_j_nreslabs
)},
{
NAME
(
"curslabs"
),
CTL
(
stats_arenas_i_bins_j_curslabs
)},
{
NAME
(
"mutex"
),
CHILD
(
named
,
stats_arenas_i_bins_j_mutex
)}
};
static
const
ctl_named_node_t
super_stats_arenas_i_bins_j_node
[]
=
{
{
NAME
(
""
),
CHILD
(
named
,
stats_arenas_i_bins_j
)}
};
static
const
ctl_indexed_node_t
stats_arenas_i_bins_node
[]
=
{
{
INDEX
(
stats_arenas_i_bins_j
)}
};
static
const
ctl_named_node_t
stats_arenas_i_lextents_j_node
[]
=
{
{
NAME
(
"nmalloc"
),
CTL
(
stats_arenas_i_lextents_j_nmalloc
)},
{
NAME
(
"ndalloc"
),
CTL
(
stats_arenas_i_lextents_j_ndalloc
)},
{
NAME
(
"nrequests"
),
CTL
(
stats_arenas_i_lextents_j_nrequests
)},
{
NAME
(
"curlextents"
),
CTL
(
stats_arenas_i_lextents_j_curlextents
)}
};
static
const
ctl_named_node_t
super_stats_arenas_i_lextents_j_node
[]
=
{
{
NAME
(
""
),
CHILD
(
named
,
stats_arenas_i_lextents_j
)}
};
static
const
ctl_indexed_node_t
stats_arenas_i_lextents_node
[]
=
{
{
INDEX
(
stats_arenas_i_lextents_j
)}
};
#define OP(mtx) MUTEX_PROF_DATA_NODE(arenas_i_mutexes_##mtx)
MUTEX_PROF_ARENA_MUTEXES
#undef OP
static
const
ctl_named_node_t
stats_arenas_i_mutexes_node
[]
=
{
#define OP(mtx) {NAME(#mtx), CHILD(named, stats_arenas_i_mutexes_##mtx)},
MUTEX_PROF_ARENA_MUTEXES
#undef OP
};
static
const
ctl_named_node_t
stats_arenas_i_node
[]
=
{
{
NAME
(
"nthreads"
),
CTL
(
stats_arenas_i_nthreads
)},
{
NAME
(
"uptime"
),
CTL
(
stats_arenas_i_uptime
)},
{
NAME
(
"dss"
),
CTL
(
stats_arenas_i_dss
)},
{
NAME
(
"dirty_decay_ms"
),
CTL
(
stats_arenas_i_dirty_decay_ms
)},
{
NAME
(
"muzzy_decay_ms"
),
CTL
(
stats_arenas_i_muzzy_decay_ms
)},
{
NAME
(
"pactive"
),
CTL
(
stats_arenas_i_pactive
)},
{
NAME
(
"pdirty"
),
CTL
(
stats_arenas_i_pdirty
)},
{
NAME
(
"pmuzzy"
),
CTL
(
stats_arenas_i_pmuzzy
)},
{
NAME
(
"mapped"
),
CTL
(
stats_arenas_i_mapped
)},
{
NAME
(
"retained"
),
CTL
(
stats_arenas_i_retained
)},
{
NAME
(
"dirty_npurge"
),
CTL
(
stats_arenas_i_dirty_npurge
)},
{
NAME
(
"dirty_nmadvise"
),
CTL
(
stats_arenas_i_dirty_nmadvise
)},
{
NAME
(
"dirty_purged"
),
CTL
(
stats_arenas_i_dirty_purged
)},
{
NAME
(
"muzzy_npurge"
),
CTL
(
stats_arenas_i_muzzy_npurge
)},
{
NAME
(
"muzzy_nmadvise"
),
CTL
(
stats_arenas_i_muzzy_nmadvise
)},
{
NAME
(
"muzzy_purged"
),
CTL
(
stats_arenas_i_muzzy_purged
)},
{
NAME
(
"base"
),
CTL
(
stats_arenas_i_base
)},
{
NAME
(
"internal"
),
CTL
(
stats_arenas_i_internal
)},
{
NAME
(
"metadata_thp"
),
CTL
(
stats_arenas_i_metadata_thp
)},
{
NAME
(
"tcache_bytes"
),
CTL
(
stats_arenas_i_tcache_bytes
)},
{
NAME
(
"resident"
),
CTL
(
stats_arenas_i_resident
)},
{
NAME
(
"small"
),
CHILD
(
named
,
stats_arenas_i_small
)},
{
NAME
(
"large"
),
CHILD
(
named
,
stats_arenas_i_large
)},
{
NAME
(
"bins"
),
CHILD
(
indexed
,
stats_arenas_i_bins
)},
{
NAME
(
"lextents"
),
CHILD
(
indexed
,
stats_arenas_i_lextents
)},
{
NAME
(
"mutexes"
),
CHILD
(
named
,
stats_arenas_i_mutexes
)}
};
static
const
ctl_named_node_t
super_stats_arenas_i_node
[]
=
{
{
NAME
(
""
),
CHILD
(
named
,
stats_arenas_i
)}
};
static
const
ctl_indexed_node_t
stats_arenas_node
[]
=
{
{
INDEX
(
stats_arenas_i
)}
};
static
const
ctl_named_node_t
stats_background_thread_node
[]
=
{
{
NAME
(
"num_threads"
),
CTL
(
stats_background_thread_num_threads
)},
{
NAME
(
"num_runs"
),
CTL
(
stats_background_thread_num_runs
)},
{
NAME
(
"run_interval"
),
CTL
(
stats_background_thread_run_interval
)}
};
#define OP(mtx) MUTEX_PROF_DATA_NODE(mutexes_##mtx)
MUTEX_PROF_GLOBAL_MUTEXES
#undef OP
static
const
ctl_named_node_t
stats_mutexes_node
[]
=
{
#define OP(mtx) {NAME(#mtx), CHILD(named, stats_mutexes_##mtx)},
MUTEX_PROF_GLOBAL_MUTEXES
#undef OP
{
NAME
(
"reset"
),
CTL
(
stats_mutexes_reset
)}
};
#undef MUTEX_PROF_DATA_NODE
static
const
ctl_named_node_t
stats_node
[]
=
{
{
NAME
(
"allocated"
),
CTL
(
stats_allocated
)},
{
NAME
(
"active"
),
CTL
(
stats_active
)},
{
NAME
(
"metadata"
),
CTL
(
stats_metadata
)},
{
NAME
(
"metadata_thp"
),
CTL
(
stats_metadata_thp
)},
{
NAME
(
"resident"
),
CTL
(
stats_resident
)},
{
NAME
(
"mapped"
),
CTL
(
stats_mapped
)},
{
NAME
(
"retained"
),
CTL
(
stats_retained
)},
{
NAME
(
"background_thread"
),
CHILD
(
named
,
stats_background_thread
)},
{
NAME
(
"mutexes"
),
CHILD
(
named
,
stats_mutexes
)},
{
NAME
(
"arenas"
),
CHILD
(
indexed
,
stats_arenas
)}
};
static
const
ctl_named_node_t
root_node
[]
=
{
{
NAME
(
"version"
),
CTL
(
version
)},
{
NAME
(
"epoch"
),
CTL
(
epoch
)},
{
NAME
(
"background_thread"
),
CTL
(
background_thread
)},
{
NAME
(
"max_background_threads"
),
CTL
(
max_background_threads
)},
{
NAME
(
"thread"
),
CHILD
(
named
,
thread
)},
{
NAME
(
"config"
),
CHILD
(
named
,
config
)},
{
NAME
(
"opt"
),
CHILD
(
named
,
opt
)},
{
NAME
(
"tcache"
),
CHILD
(
named
,
tcache
)},
{
NAME
(
"arena"
),
CHILD
(
indexed
,
arena
)},
{
NAME
(
"arenas"
),
CHILD
(
named
,
arenas
)},
{
NAME
(
"prof"
),
CHILD
(
named
,
prof
)},
{
NAME
(
"stats"
),
CHILD
(
named
,
stats
)}
};
static
const
ctl_named_node_t
super_root_node
[]
=
{
{
NAME
(
""
),
CHILD
(
named
,
root
)}
};
#undef NAME
#undef CHILD
#undef CTL
#undef INDEX
/******************************************************************************/
/*
* Sets *dst + *src non-atomically. This is safe, since everything is
* synchronized by the ctl mutex.
*/
static
void
ctl_accum_arena_stats_u64
(
arena_stats_u64_t
*
dst
,
arena_stats_u64_t
*
src
)
{
#ifdef JEMALLOC_ATOMIC_U64
uint64_t
cur_dst
=
atomic_load_u64
(
dst
,
ATOMIC_RELAXED
);
uint64_t
cur_src
=
atomic_load_u64
(
src
,
ATOMIC_RELAXED
);
atomic_store_u64
(
dst
,
cur_dst
+
cur_src
,
ATOMIC_RELAXED
);
#else
*
dst
+=
*
src
;
#endif
}
/* Likewise: with ctl mutex synchronization, reading is simple. */
static
uint64_t
ctl_arena_stats_read_u64
(
arena_stats_u64_t
*
p
)
{
#ifdef JEMALLOC_ATOMIC_U64
return
atomic_load_u64
(
p
,
ATOMIC_RELAXED
);
#else
return
*
p
;
#endif
}
static
void
accum_atomic_zu
(
atomic_zu_t
*
dst
,
atomic_zu_t
*
src
)
{
size_t
cur_dst
=
atomic_load_zu
(
dst
,
ATOMIC_RELAXED
);
size_t
cur_src
=
atomic_load_zu
(
src
,
ATOMIC_RELAXED
);
atomic_store_zu
(
dst
,
cur_dst
+
cur_src
,
ATOMIC_RELAXED
);
}
/******************************************************************************/
static
unsigned
arenas_i2a_impl
(
size_t
i
,
bool
compat
,
bool
validate
)
{
unsigned
a
;
switch
(
i
)
{
case
MALLCTL_ARENAS_ALL
:
a
=
0
;
break
;
case
MALLCTL_ARENAS_DESTROYED
:
a
=
1
;
break
;
default:
if
(
compat
&&
i
==
ctl_arenas
->
narenas
)
{
/*
* Provide deprecated backward compatibility for
* accessing the merged stats at index narenas rather
* than via MALLCTL_ARENAS_ALL. This is scheduled for
* removal in 6.0.0.
*/
a
=
0
;
}
else
if
(
validate
&&
i
>=
ctl_arenas
->
narenas
)
{
a
=
UINT_MAX
;
}
else
{
/*
* This function should never be called for an index
* more than one past the range of indices that have
* initialized ctl data.
*/
assert
(
i
<
ctl_arenas
->
narenas
||
(
!
validate
&&
i
==
ctl_arenas
->
narenas
));
a
=
(
unsigned
)
i
+
2
;
}
break
;
}
return
a
;
}
static
unsigned
arenas_i2a
(
size_t
i
)
{
return
arenas_i2a_impl
(
i
,
true
,
false
);
}
static
ctl_arena_t
*
arenas_i_impl
(
tsd_t
*
tsd
,
size_t
i
,
bool
compat
,
bool
init
)
{
ctl_arena_t
*
ret
;
assert
(
!
compat
||
!
init
);
ret
=
ctl_arenas
->
arenas
[
arenas_i2a_impl
(
i
,
compat
,
false
)];
if
(
init
&&
ret
==
NULL
)
{
if
(
config_stats
)
{
struct
container_s
{
ctl_arena_t
ctl_arena
;
ctl_arena_stats_t
astats
;
};
struct
container_s
*
cont
=
(
struct
container_s
*
)
base_alloc
(
tsd_tsdn
(
tsd
),
b0get
(),
sizeof
(
struct
container_s
),
QUANTUM
);
if
(
cont
==
NULL
)
{
return
NULL
;
}
ret
=
&
cont
->
ctl_arena
;
ret
->
astats
=
&
cont
->
astats
;
}
else
{
ret
=
(
ctl_arena_t
*
)
base_alloc
(
tsd_tsdn
(
tsd
),
b0get
(),
sizeof
(
ctl_arena_t
),
QUANTUM
);
if
(
ret
==
NULL
)
{
return
NULL
;
}
}
ret
->
arena_ind
=
(
unsigned
)
i
;
ctl_arenas
->
arenas
[
arenas_i2a_impl
(
i
,
compat
,
false
)]
=
ret
;
}
assert
(
ret
==
NULL
||
arenas_i2a
(
ret
->
arena_ind
)
==
arenas_i2a
(
i
));
return
ret
;
}
static
ctl_arena_t
*
arenas_i
(
size_t
i
)
{
ctl_arena_t
*
ret
=
arenas_i_impl
(
tsd_fetch
(),
i
,
true
,
false
);
assert
(
ret
!=
NULL
);
return
ret
;
}
static
void
ctl_arena_clear
(
ctl_arena_t
*
ctl_arena
)
{
ctl_arena
->
nthreads
=
0
;
ctl_arena
->
dss
=
dss_prec_names
[
dss_prec_limit
];
ctl_arena
->
dirty_decay_ms
=
-
1
;
ctl_arena
->
muzzy_decay_ms
=
-
1
;
ctl_arena
->
pactive
=
0
;
ctl_arena
->
pdirty
=
0
;
ctl_arena
->
pmuzzy
=
0
;
if
(
config_stats
)
{
memset
(
&
ctl_arena
->
astats
->
astats
,
0
,
sizeof
(
arena_stats_t
));
ctl_arena
->
astats
->
allocated_small
=
0
;
ctl_arena
->
astats
->
nmalloc_small
=
0
;
ctl_arena
->
astats
->
ndalloc_small
=
0
;
ctl_arena
->
astats
->
nrequests_small
=
0
;
memset
(
ctl_arena
->
astats
->
bstats
,
0
,
NBINS
*
sizeof
(
bin_stats_t
));
memset
(
ctl_arena
->
astats
->
lstats
,
0
,
(
NSIZES
-
NBINS
)
*
sizeof
(
arena_stats_large_t
));
}
}
static
void
ctl_arena_stats_amerge
(
tsdn_t
*
tsdn
,
ctl_arena_t
*
ctl_arena
,
arena_t
*
arena
)
{
unsigned
i
;
if
(
config_stats
)
{
arena_stats_merge
(
tsdn
,
arena
,
&
ctl_arena
->
nthreads
,
&
ctl_arena
->
dss
,
&
ctl_arena
->
dirty_decay_ms
,
&
ctl_arena
->
muzzy_decay_ms
,
&
ctl_arena
->
pactive
,
&
ctl_arena
->
pdirty
,
&
ctl_arena
->
pmuzzy
,
&
ctl_arena
->
astats
->
astats
,
ctl_arena
->
astats
->
bstats
,
ctl_arena
->
astats
->
lstats
);
for
(
i
=
0
;
i
<
NBINS
;
i
++
)
{
ctl_arena
->
astats
->
allocated_small
+=
ctl_arena
->
astats
->
bstats
[
i
].
curregs
*
sz_index2size
(
i
);
ctl_arena
->
astats
->
nmalloc_small
+=
ctl_arena
->
astats
->
bstats
[
i
].
nmalloc
;
ctl_arena
->
astats
->
ndalloc_small
+=
ctl_arena
->
astats
->
bstats
[
i
].
ndalloc
;
ctl_arena
->
astats
->
nrequests_small
+=
ctl_arena
->
astats
->
bstats
[
i
].
nrequests
;
}
}
else
{
arena_basic_stats_merge
(
tsdn
,
arena
,
&
ctl_arena
->
nthreads
,
&
ctl_arena
->
dss
,
&
ctl_arena
->
dirty_decay_ms
,
&
ctl_arena
->
muzzy_decay_ms
,
&
ctl_arena
->
pactive
,
&
ctl_arena
->
pdirty
,
&
ctl_arena
->
pmuzzy
);
}
}
static
void
ctl_arena_stats_sdmerge
(
ctl_arena_t
*
ctl_sdarena
,
ctl_arena_t
*
ctl_arena
,
bool
destroyed
)
{
unsigned
i
;
if
(
!
destroyed
)
{
ctl_sdarena
->
nthreads
+=
ctl_arena
->
nthreads
;
ctl_sdarena
->
pactive
+=
ctl_arena
->
pactive
;
ctl_sdarena
->
pdirty
+=
ctl_arena
->
pdirty
;
ctl_sdarena
->
pmuzzy
+=
ctl_arena
->
pmuzzy
;
}
else
{
assert
(
ctl_arena
->
nthreads
==
0
);
assert
(
ctl_arena
->
pactive
==
0
);
assert
(
ctl_arena
->
pdirty
==
0
);
assert
(
ctl_arena
->
pmuzzy
==
0
);
}
if
(
config_stats
)
{
ctl_arena_stats_t
*
sdstats
=
ctl_sdarena
->
astats
;
ctl_arena_stats_t
*
astats
=
ctl_arena
->
astats
;
if
(
!
destroyed
)
{
accum_atomic_zu
(
&
sdstats
->
astats
.
mapped
,
&
astats
->
astats
.
mapped
);
accum_atomic_zu
(
&
sdstats
->
astats
.
retained
,
&
astats
->
astats
.
retained
);
}
ctl_accum_arena_stats_u64
(
&
sdstats
->
astats
.
decay_dirty
.
npurge
,
&
astats
->
astats
.
decay_dirty
.
npurge
);
ctl_accum_arena_stats_u64
(
&
sdstats
->
astats
.
decay_dirty
.
nmadvise
,
&
astats
->
astats
.
decay_dirty
.
nmadvise
);
ctl_accum_arena_stats_u64
(
&
sdstats
->
astats
.
decay_dirty
.
purged
,
&
astats
->
astats
.
decay_dirty
.
purged
);
ctl_accum_arena_stats_u64
(
&
sdstats
->
astats
.
decay_muzzy
.
npurge
,
&
astats
->
astats
.
decay_muzzy
.
npurge
);
ctl_accum_arena_stats_u64
(
&
sdstats
->
astats
.
decay_muzzy
.
nmadvise
,
&
astats
->
astats
.
decay_muzzy
.
nmadvise
);
ctl_accum_arena_stats_u64
(
&
sdstats
->
astats
.
decay_muzzy
.
purged
,
&
astats
->
astats
.
decay_muzzy
.
purged
);
#define OP(mtx) malloc_mutex_prof_merge( \
&(sdstats->astats.mutex_prof_data[ \
arena_prof_mutex_##mtx]), \
&(astats->astats.mutex_prof_data[ \
arena_prof_mutex_##mtx]));
MUTEX_PROF_ARENA_MUTEXES
#undef OP
if
(
!
destroyed
)
{
accum_atomic_zu
(
&
sdstats
->
astats
.
base
,
&
astats
->
astats
.
base
);
accum_atomic_zu
(
&
sdstats
->
astats
.
internal
,
&
astats
->
astats
.
internal
);
accum_atomic_zu
(
&
sdstats
->
astats
.
resident
,
&
astats
->
astats
.
resident
);
accum_atomic_zu
(
&
sdstats
->
astats
.
metadata_thp
,
&
astats
->
astats
.
metadata_thp
);
}
else
{
assert
(
atomic_load_zu
(
&
astats
->
astats
.
internal
,
ATOMIC_RELAXED
)
==
0
);
}
if
(
!
destroyed
)
{
sdstats
->
allocated_small
+=
astats
->
allocated_small
;
}
else
{
assert
(
astats
->
allocated_small
==
0
);
}
sdstats
->
nmalloc_small
+=
astats
->
nmalloc_small
;
sdstats
->
ndalloc_small
+=
astats
->
ndalloc_small
;
sdstats
->
nrequests_small
+=
astats
->
nrequests_small
;
if
(
!
destroyed
)
{
accum_atomic_zu
(
&
sdstats
->
astats
.
allocated_large
,
&
astats
->
astats
.
allocated_large
);
}
else
{
assert
(
atomic_load_zu
(
&
astats
->
astats
.
allocated_large
,
ATOMIC_RELAXED
)
==
0
);
}
ctl_accum_arena_stats_u64
(
&
sdstats
->
astats
.
nmalloc_large
,
&
astats
->
astats
.
nmalloc_large
);
ctl_accum_arena_stats_u64
(
&
sdstats
->
astats
.
ndalloc_large
,
&
astats
->
astats
.
ndalloc_large
);
ctl_accum_arena_stats_u64
(
&
sdstats
->
astats
.
nrequests_large
,
&
astats
->
astats
.
nrequests_large
);
accum_atomic_zu
(
&
sdstats
->
astats
.
tcache_bytes
,
&
astats
->
astats
.
tcache_bytes
);
if
(
ctl_arena
->
arena_ind
==
0
)
{
sdstats
->
astats
.
uptime
=
astats
->
astats
.
uptime
;
}
for
(
i
=
0
;
i
<
NBINS
;
i
++
)
{
sdstats
->
bstats
[
i
].
nmalloc
+=
astats
->
bstats
[
i
].
nmalloc
;
sdstats
->
bstats
[
i
].
ndalloc
+=
astats
->
bstats
[
i
].
ndalloc
;
sdstats
->
bstats
[
i
].
nrequests
+=
astats
->
bstats
[
i
].
nrequests
;
if
(
!
destroyed
)
{
sdstats
->
bstats
[
i
].
curregs
+=
astats
->
bstats
[
i
].
curregs
;
}
else
{
assert
(
astats
->
bstats
[
i
].
curregs
==
0
);
}
sdstats
->
bstats
[
i
].
nfills
+=
astats
->
bstats
[
i
].
nfills
;
sdstats
->
bstats
[
i
].
nflushes
+=
astats
->
bstats
[
i
].
nflushes
;
sdstats
->
bstats
[
i
].
nslabs
+=
astats
->
bstats
[
i
].
nslabs
;
sdstats
->
bstats
[
i
].
reslabs
+=
astats
->
bstats
[
i
].
reslabs
;
if
(
!
destroyed
)
{
sdstats
->
bstats
[
i
].
curslabs
+=
astats
->
bstats
[
i
].
curslabs
;
}
else
{
assert
(
astats
->
bstats
[
i
].
curslabs
==
0
);
}
malloc_mutex_prof_merge
(
&
sdstats
->
bstats
[
i
].
mutex_data
,
&
astats
->
bstats
[
i
].
mutex_data
);
}
for
(
i
=
0
;
i
<
NSIZES
-
NBINS
;
i
++
)
{
ctl_accum_arena_stats_u64
(
&
sdstats
->
lstats
[
i
].
nmalloc
,
&
astats
->
lstats
[
i
].
nmalloc
);
ctl_accum_arena_stats_u64
(
&
sdstats
->
lstats
[
i
].
ndalloc
,
&
astats
->
lstats
[
i
].
ndalloc
);
ctl_accum_arena_stats_u64
(
&
sdstats
->
lstats
[
i
].
nrequests
,
&
astats
->
lstats
[
i
].
nrequests
);
if
(
!
destroyed
)
{
sdstats
->
lstats
[
i
].
curlextents
+=
astats
->
lstats
[
i
].
curlextents
;
}
else
{
assert
(
astats
->
lstats
[
i
].
curlextents
==
0
);
}
}
}
}
static
void
ctl_arena_refresh
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
ctl_arena_t
*
ctl_sdarena
,
unsigned
i
,
bool
destroyed
)
{
ctl_arena_t
*
ctl_arena
=
arenas_i
(
i
);
ctl_arena_clear
(
ctl_arena
);
ctl_arena_stats_amerge
(
tsdn
,
ctl_arena
,
arena
);
/* Merge into sum stats as well. */
ctl_arena_stats_sdmerge
(
ctl_sdarena
,
ctl_arena
,
destroyed
);
}
static
unsigned
ctl_arena_init
(
tsd_t
*
tsd
,
extent_hooks_t
*
extent_hooks
)
{
unsigned
arena_ind
;
ctl_arena_t
*
ctl_arena
;
if
((
ctl_arena
=
ql_last
(
&
ctl_arenas
->
destroyed
,
destroyed_link
))
!=
NULL
)
{
ql_remove
(
&
ctl_arenas
->
destroyed
,
ctl_arena
,
destroyed_link
);
arena_ind
=
ctl_arena
->
arena_ind
;
}
else
{
arena_ind
=
ctl_arenas
->
narenas
;
}
/* Trigger stats allocation. */
if
(
arenas_i_impl
(
tsd
,
arena_ind
,
false
,
true
)
==
NULL
)
{
return
UINT_MAX
;
}
/* Initialize new arena. */
if
(
arena_init
(
tsd_tsdn
(
tsd
),
arena_ind
,
extent_hooks
)
==
NULL
)
{
return
UINT_MAX
;
}
if
(
arena_ind
==
ctl_arenas
->
narenas
)
{
ctl_arenas
->
narenas
++
;
}
return
arena_ind
;
}
static
void
ctl_background_thread_stats_read
(
tsdn_t
*
tsdn
)
{
background_thread_stats_t
*
stats
=
&
ctl_stats
->
background_thread
;
if
(
!
have_background_thread
||
background_thread_stats_read
(
tsdn
,
stats
))
{
memset
(
stats
,
0
,
sizeof
(
background_thread_stats_t
));
nstime_init
(
&
stats
->
run_interval
,
0
);
}
}
static
void
ctl_refresh
(
tsdn_t
*
tsdn
)
{
unsigned
i
;
ctl_arena_t
*
ctl_sarena
=
arenas_i
(
MALLCTL_ARENAS_ALL
);
VARIABLE_ARRAY
(
arena_t
*
,
tarenas
,
ctl_arenas
->
narenas
);
/*
* Clear sum stats, since they will be merged into by
* ctl_arena_refresh().
*/
ctl_arena_clear
(
ctl_sarena
);
for
(
i
=
0
;
i
<
ctl_arenas
->
narenas
;
i
++
)
{
tarenas
[
i
]
=
arena_get
(
tsdn
,
i
,
false
);
}
for
(
i
=
0
;
i
<
ctl_arenas
->
narenas
;
i
++
)
{
ctl_arena_t
*
ctl_arena
=
arenas_i
(
i
);
bool
initialized
=
(
tarenas
[
i
]
!=
NULL
);
ctl_arena
->
initialized
=
initialized
;
if
(
initialized
)
{
ctl_arena_refresh
(
tsdn
,
tarenas
[
i
],
ctl_sarena
,
i
,
false
);
}
}
if
(
config_stats
)
{
ctl_stats
->
allocated
=
ctl_sarena
->
astats
->
allocated_small
+
atomic_load_zu
(
&
ctl_sarena
->
astats
->
astats
.
allocated_large
,
ATOMIC_RELAXED
);
ctl_stats
->
active
=
(
ctl_sarena
->
pactive
<<
LG_PAGE
);
ctl_stats
->
metadata
=
atomic_load_zu
(
&
ctl_sarena
->
astats
->
astats
.
base
,
ATOMIC_RELAXED
)
+
atomic_load_zu
(
&
ctl_sarena
->
astats
->
astats
.
internal
,
ATOMIC_RELAXED
);
ctl_stats
->
metadata_thp
=
atomic_load_zu
(
&
ctl_sarena
->
astats
->
astats
.
metadata_thp
,
ATOMIC_RELAXED
);
ctl_stats
->
resident
=
atomic_load_zu
(
&
ctl_sarena
->
astats
->
astats
.
resident
,
ATOMIC_RELAXED
);
ctl_stats
->
mapped
=
atomic_load_zu
(
&
ctl_sarena
->
astats
->
astats
.
mapped
,
ATOMIC_RELAXED
);
ctl_stats
->
retained
=
atomic_load_zu
(
&
ctl_sarena
->
astats
->
astats
.
retained
,
ATOMIC_RELAXED
);
ctl_background_thread_stats_read
(
tsdn
);
#define READ_GLOBAL_MUTEX_PROF_DATA(i, mtx) \
malloc_mutex_lock(tsdn, &mtx); \
malloc_mutex_prof_read(tsdn, &ctl_stats->mutex_prof_data[i], &mtx); \
malloc_mutex_unlock(tsdn, &mtx);
if
(
config_prof
&&
opt_prof
)
{
READ_GLOBAL_MUTEX_PROF_DATA
(
global_prof_mutex_prof
,
bt2gctx_mtx
);
}
if
(
have_background_thread
)
{
READ_GLOBAL_MUTEX_PROF_DATA
(
global_prof_mutex_background_thread
,
background_thread_lock
);
}
else
{
memset
(
&
ctl_stats
->
mutex_prof_data
[
global_prof_mutex_background_thread
],
0
,
sizeof
(
mutex_prof_data_t
));
}
/* We own ctl mutex already. */
malloc_mutex_prof_read
(
tsdn
,
&
ctl_stats
->
mutex_prof_data
[
global_prof_mutex_ctl
],
&
ctl_mtx
);
#undef READ_GLOBAL_MUTEX_PROF_DATA
}
ctl_arenas
->
epoch
++
;
}
static
bool
ctl_init
(
tsd_t
*
tsd
)
{
bool
ret
;
tsdn_t
*
tsdn
=
tsd_tsdn
(
tsd
);
malloc_mutex_lock
(
tsdn
,
&
ctl_mtx
);
if
(
!
ctl_initialized
)
{
ctl_arena_t
*
ctl_sarena
,
*
ctl_darena
;
unsigned
i
;
/*
* Allocate demand-zeroed space for pointers to the full
* range of supported arena indices.
*/
if
(
ctl_arenas
==
NULL
)
{
ctl_arenas
=
(
ctl_arenas_t
*
)
base_alloc
(
tsdn
,
b0get
(),
sizeof
(
ctl_arenas_t
),
QUANTUM
);
if
(
ctl_arenas
==
NULL
)
{
ret
=
true
;
goto
label_return
;
}
}
if
(
config_stats
&&
ctl_stats
==
NULL
)
{
ctl_stats
=
(
ctl_stats_t
*
)
base_alloc
(
tsdn
,
b0get
(),
sizeof
(
ctl_stats_t
),
QUANTUM
);
if
(
ctl_stats
==
NULL
)
{
ret
=
true
;
goto
label_return
;
}
}
/*
* Allocate space for the current full range of arenas
* here rather than doing it lazily elsewhere, in order
* to limit when OOM-caused errors can occur.
*/
if
((
ctl_sarena
=
arenas_i_impl
(
tsd
,
MALLCTL_ARENAS_ALL
,
false
,
true
))
==
NULL
)
{
ret
=
true
;
goto
label_return
;
}
ctl_sarena
->
initialized
=
true
;
if
((
ctl_darena
=
arenas_i_impl
(
tsd
,
MALLCTL_ARENAS_DESTROYED
,
false
,
true
))
==
NULL
)
{
ret
=
true
;
goto
label_return
;
}
ctl_arena_clear
(
ctl_darena
);
/*
* Don't toggle ctl_darena to initialized until an arena is
* actually destroyed, so that arena.<i>.initialized can be used
* to query whether the stats are relevant.
*/
ctl_arenas
->
narenas
=
narenas_total_get
();
for
(
i
=
0
;
i
<
ctl_arenas
->
narenas
;
i
++
)
{
if
(
arenas_i_impl
(
tsd
,
i
,
false
,
true
)
==
NULL
)
{
ret
=
true
;
goto
label_return
;
}
}
ql_new
(
&
ctl_arenas
->
destroyed
);
ctl_refresh
(
tsdn
);
ctl_initialized
=
true
;
}
ret
=
false
;
label_return:
malloc_mutex_unlock
(
tsdn
,
&
ctl_mtx
);
return
ret
;
}
static
int
ctl_lookup
(
tsdn_t
*
tsdn
,
const
char
*
name
,
ctl_node_t
const
**
nodesp
,
size_t
*
mibp
,
size_t
*
depthp
)
{
int
ret
;
const
char
*
elm
,
*
tdot
,
*
dot
;
size_t
elen
,
i
,
j
;
const
ctl_named_node_t
*
node
;
elm
=
name
;
/* Equivalent to strchrnul(). */
dot
=
((
tdot
=
strchr
(
elm
,
'.'
))
!=
NULL
)
?
tdot
:
strchr
(
elm
,
'\0'
);
elen
=
(
size_t
)((
uintptr_t
)
dot
-
(
uintptr_t
)
elm
);
if
(
elen
==
0
)
{
ret
=
ENOENT
;
goto
label_return
;
}
node
=
super_root_node
;
for
(
i
=
0
;
i
<
*
depthp
;
i
++
)
{
assert
(
node
);
assert
(
node
->
nchildren
>
0
);
if
(
ctl_named_node
(
node
->
children
)
!=
NULL
)
{
const
ctl_named_node_t
*
pnode
=
node
;
/* Children are named. */
for
(
j
=
0
;
j
<
node
->
nchildren
;
j
++
)
{
const
ctl_named_node_t
*
child
=
ctl_named_children
(
node
,
j
);
if
(
strlen
(
child
->
name
)
==
elen
&&
strncmp
(
elm
,
child
->
name
,
elen
)
==
0
)
{
node
=
child
;
if
(
nodesp
!=
NULL
)
{
nodesp
[
i
]
=
(
const
ctl_node_t
*
)
node
;
}
mibp
[
i
]
=
j
;
break
;
}
}
if
(
node
==
pnode
)
{
ret
=
ENOENT
;
goto
label_return
;
}
}
else
{
uintmax_t
index
;
const
ctl_indexed_node_t
*
inode
;
/* Children are indexed. */
index
=
malloc_strtoumax
(
elm
,
NULL
,
10
);
if
(
index
==
UINTMAX_MAX
||
index
>
SIZE_T_MAX
)
{
ret
=
ENOENT
;
goto
label_return
;
}
inode
=
ctl_indexed_node
(
node
->
children
);
node
=
inode
->
index
(
tsdn
,
mibp
,
*
depthp
,
(
size_t
)
index
);
if
(
node
==
NULL
)
{
ret
=
ENOENT
;
goto
label_return
;
}
if
(
nodesp
!=
NULL
)
{
nodesp
[
i
]
=
(
const
ctl_node_t
*
)
node
;
}
mibp
[
i
]
=
(
size_t
)
index
;
}
if
(
node
->
ctl
!=
NULL
)
{
/* Terminal node. */
if
(
*
dot
!=
'\0'
)
{
/*
* The name contains more elements than are
* in this path through the tree.
*/
ret
=
ENOENT
;
goto
label_return
;
}
/* Complete lookup successful. */
*
depthp
=
i
+
1
;
break
;
}
/* Update elm. */
if
(
*
dot
==
'\0'
)
{
/* No more elements. */
ret
=
ENOENT
;
goto
label_return
;
}
elm
=
&
dot
[
1
];
dot
=
((
tdot
=
strchr
(
elm
,
'.'
))
!=
NULL
)
?
tdot
:
strchr
(
elm
,
'\0'
);
elen
=
(
size_t
)((
uintptr_t
)
dot
-
(
uintptr_t
)
elm
);
}
ret
=
0
;
label_return:
return
ret
;
}
int
ctl_byname
(
tsd_t
*
tsd
,
const
char
*
name
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
size_t
depth
;
ctl_node_t
const
*
nodes
[
CTL_MAX_DEPTH
];
size_t
mib
[
CTL_MAX_DEPTH
];
const
ctl_named_node_t
*
node
;
if
(
!
ctl_initialized
&&
ctl_init
(
tsd
))
{
ret
=
EAGAIN
;
goto
label_return
;
}
depth
=
CTL_MAX_DEPTH
;
ret
=
ctl_lookup
(
tsd_tsdn
(
tsd
),
name
,
nodes
,
mib
,
&
depth
);
if
(
ret
!=
0
)
{
goto
label_return
;
}
node
=
ctl_named_node
(
nodes
[
depth
-
1
]);
if
(
node
!=
NULL
&&
node
->
ctl
)
{
ret
=
node
->
ctl
(
tsd
,
mib
,
depth
,
oldp
,
oldlenp
,
newp
,
newlen
);
}
else
{
/* The name refers to a partial path through the ctl tree. */
ret
=
ENOENT
;
}
label_return:
return
(
ret
);
}
int
ctl_nametomib
(
tsd_t
*
tsd
,
const
char
*
name
,
size_t
*
mibp
,
size_t
*
miblenp
)
{
int
ret
;
if
(
!
ctl_initialized
&&
ctl_init
(
tsd
))
{
ret
=
EAGAIN
;
goto
label_return
;
}
ret
=
ctl_lookup
(
tsd_tsdn
(
tsd
),
name
,
NULL
,
mibp
,
miblenp
);
label_return:
return
(
ret
);
}
int
ctl_bymib
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
const
ctl_named_node_t
*
node
;
size_t
i
;
if
(
!
ctl_initialized
&&
ctl_init
(
tsd
))
{
ret
=
EAGAIN
;
goto
label_return
;
}
/* Iterate down the tree. */
node
=
super_root_node
;
for
(
i
=
0
;
i
<
miblen
;
i
++
)
{
assert
(
node
);
assert
(
node
->
nchildren
>
0
);
if
(
ctl_named_node
(
node
->
children
)
!=
NULL
)
{
/* Children are named. */
if
(
node
->
nchildren
<=
mib
[
i
])
{
ret
=
ENOENT
;
goto
label_return
;
}
node
=
ctl_named_children
(
node
,
mib
[
i
]);
}
else
{
const
ctl_indexed_node_t
*
inode
;
/* Indexed element. */
inode
=
ctl_indexed_node
(
node
->
children
);
node
=
inode
->
index
(
tsd_tsdn
(
tsd
),
mib
,
miblen
,
mib
[
i
]);
if
(
node
==
NULL
)
{
ret
=
ENOENT
;
goto
label_return
;
}
}
}
/* Call the ctl function. */
if
(
node
&&
node
->
ctl
)
{
ret
=
node
->
ctl
(
tsd
,
mib
,
miblen
,
oldp
,
oldlenp
,
newp
,
newlen
);
}
else
{
/* Partial MIB. */
ret
=
ENOENT
;
}
label_return:
return
(
ret
);
}
bool
ctl_boot
(
void
)
{
if
(
malloc_mutex_init
(
&
ctl_mtx
,
"ctl"
,
WITNESS_RANK_CTL
,
malloc_mutex_rank_exclusive
))
{
return
true
;
}
ctl_initialized
=
false
;
return
false
;
}
void
ctl_prefork
(
tsdn_t
*
tsdn
)
{
malloc_mutex_prefork
(
tsdn
,
&
ctl_mtx
);
}
void
ctl_postfork_parent
(
tsdn_t
*
tsdn
)
{
malloc_mutex_postfork_parent
(
tsdn
,
&
ctl_mtx
);
}
void
ctl_postfork_child
(
tsdn_t
*
tsdn
)
{
malloc_mutex_postfork_child
(
tsdn
,
&
ctl_mtx
);
}
/******************************************************************************/
/* *_ctl() functions. */
#define READONLY() do { \
if (newp != NULL || newlen != 0) { \
ret = EPERM; \
goto label_return; \
} \
} while (0)
#define WRITEONLY() do { \
if (oldp != NULL || oldlenp != NULL) { \
ret = EPERM; \
goto label_return; \
} \
} while (0)
#define READ_XOR_WRITE() do { \
if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \
newlen != 0)) { \
ret = EPERM; \
goto label_return; \
} \
} while (0)
#define READ(v, t) do { \
if (oldp != NULL && oldlenp != NULL) { \
if (*oldlenp != sizeof(t)) { \
size_t copylen = (sizeof(t) <= *oldlenp) \
? sizeof(t) : *oldlenp; \
memcpy(oldp, (void *)&(v), copylen); \
ret = EINVAL; \
goto label_return; \
} \
*(t *)oldp = (v); \
} \
} while (0)
#define WRITE(v, t) do { \
if (newp != NULL) { \
if (newlen != sizeof(t)) { \
ret = EINVAL; \
goto label_return; \
} \
(v) = *(t *)newp; \
} \
} while (0)
#define MIB_UNSIGNED(v, i) do { \
if (mib[i] > UINT_MAX) { \
ret = EFAULT; \
goto label_return; \
} \
v = (unsigned)mib[i]; \
} while (0)
/*
* There's a lot of code duplication in the following macros due to limitations
* in how nested cpp macros are expanded.
*/
#define CTL_RO_CLGEN(c, l, n, v, t) \
static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
if (!(c)) { \
return ENOENT; \
} \
if (l) { \
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
} \
READONLY(); \
oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
if (l) { \
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
} \
return ret; \
}
#define CTL_RO_CGEN(c, n, v, t) \
static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
if (!(c)) { \
return ENOENT; \
} \
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
READONLY(); \
oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
return ret; \
}
#define CTL_RO_GEN(n, v, t) \
static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
READONLY(); \
oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
return ret; \
}
/*
* ctl_mtx is not acquired, under the assumption that no pertinent data will
* mutate during the call.
*/
#define CTL_RO_NL_CGEN(c, n, v, t) \
static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
if (!(c)) { \
return ENOENT; \
} \
READONLY(); \
oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
return ret; \
}
#define CTL_RO_NL_GEN(n, v, t) \
static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
READONLY(); \
oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
return ret; \
}
#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \
static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
if (!(c)) { \
return ENOENT; \
} \
READONLY(); \
oldval = (m(tsd)); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
return ret; \
}
#define CTL_RO_CONFIG_GEN(n, t) \
static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
READONLY(); \
oldval = n; \
READ(oldval, t); \
\
ret = 0; \
label_return: \
return ret; \
}
/******************************************************************************/
CTL_RO_NL_GEN
(
version
,
JEMALLOC_VERSION
,
const
char
*
)
static
int
epoch_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
UNUSED
uint64_t
newval
;
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
WRITE
(
newval
,
uint64_t
);
if
(
newp
!=
NULL
)
{
ctl_refresh
(
tsd_tsdn
(
tsd
));
}
READ
(
ctl_arenas
->
epoch
,
uint64_t
);
ret
=
0
;
label_return:
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
return
ret
;
}
static
int
background_thread_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
bool
oldval
;
if
(
!
have_background_thread
)
{
return
ENOENT
;
}
background_thread_ctl_init
(
tsd_tsdn
(
tsd
));
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
background_thread_lock
);
if
(
newp
==
NULL
)
{
oldval
=
background_thread_enabled
();
READ
(
oldval
,
bool
);
}
else
{
if
(
newlen
!=
sizeof
(
bool
))
{
ret
=
EINVAL
;
goto
label_return
;
}
oldval
=
background_thread_enabled
();
READ
(
oldval
,
bool
);
bool
newval
=
*
(
bool
*
)
newp
;
if
(
newval
==
oldval
)
{
ret
=
0
;
goto
label_return
;
}
background_thread_enabled_set
(
tsd_tsdn
(
tsd
),
newval
);
if
(
newval
)
{
if
(
!
can_enable_background_thread
)
{
malloc_printf
(
"<jemalloc>: Error in dlsym("
"RTLD_NEXT,
\"
pthread_create
\"
). Cannot "
"enable background_thread
\n
"
);
ret
=
EFAULT
;
goto
label_return
;
}
if
(
background_threads_enable
(
tsd
))
{
ret
=
EFAULT
;
goto
label_return
;
}
}
else
{
if
(
background_threads_disable
(
tsd
))
{
ret
=
EFAULT
;
goto
label_return
;
}
}
}
ret
=
0
;
label_return:
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
background_thread_lock
);
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
return
ret
;
}
static
int
max_background_threads_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
size_t
oldval
;
if
(
!
have_background_thread
)
{
return
ENOENT
;
}
background_thread_ctl_init
(
tsd_tsdn
(
tsd
));
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
background_thread_lock
);
if
(
newp
==
NULL
)
{
oldval
=
max_background_threads
;
READ
(
oldval
,
size_t
);
}
else
{
if
(
newlen
!=
sizeof
(
size_t
))
{
ret
=
EINVAL
;
goto
label_return
;
}
oldval
=
max_background_threads
;
READ
(
oldval
,
size_t
);
size_t
newval
=
*
(
size_t
*
)
newp
;
if
(
newval
==
oldval
)
{
ret
=
0
;
goto
label_return
;
}
if
(
newval
>
opt_max_background_threads
)
{
ret
=
EINVAL
;
goto
label_return
;
}
if
(
background_thread_enabled
())
{
if
(
!
can_enable_background_thread
)
{
malloc_printf
(
"<jemalloc>: Error in dlsym("
"RTLD_NEXT,
\"
pthread_create
\"
). Cannot "
"enable background_thread
\n
"
);
ret
=
EFAULT
;
goto
label_return
;
}
background_thread_enabled_set
(
tsd_tsdn
(
tsd
),
false
);
if
(
background_threads_disable
(
tsd
))
{
ret
=
EFAULT
;
goto
label_return
;
}
max_background_threads
=
newval
;
background_thread_enabled_set
(
tsd_tsdn
(
tsd
),
true
);
if
(
background_threads_enable
(
tsd
))
{
ret
=
EFAULT
;
goto
label_return
;
}
}
else
{
max_background_threads
=
newval
;
}
}
ret
=
0
;
label_return:
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
background_thread_lock
);
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
return
ret
;
}
/******************************************************************************/
CTL_RO_CONFIG_GEN
(
config_cache_oblivious
,
bool
)
CTL_RO_CONFIG_GEN
(
config_debug
,
bool
)
CTL_RO_CONFIG_GEN
(
config_fill
,
bool
)
CTL_RO_CONFIG_GEN
(
config_lazy_lock
,
bool
)
CTL_RO_CONFIG_GEN
(
config_malloc_conf
,
const
char
*
)
CTL_RO_CONFIG_GEN
(
config_prof
,
bool
)
CTL_RO_CONFIG_GEN
(
config_prof_libgcc
,
bool
)
CTL_RO_CONFIG_GEN
(
config_prof_libunwind
,
bool
)
CTL_RO_CONFIG_GEN
(
config_stats
,
bool
)
CTL_RO_CONFIG_GEN
(
config_utrace
,
bool
)
CTL_RO_CONFIG_GEN
(
config_xmalloc
,
bool
)
/******************************************************************************/
CTL_RO_NL_GEN
(
opt_abort
,
opt_abort
,
bool
)
CTL_RO_NL_GEN
(
opt_abort_conf
,
opt_abort_conf
,
bool
)
CTL_RO_NL_GEN
(
opt_metadata_thp
,
metadata_thp_mode_names
[
opt_metadata_thp
],
const
char
*
)
CTL_RO_NL_GEN
(
opt_retain
,
opt_retain
,
bool
)
CTL_RO_NL_GEN
(
opt_dss
,
opt_dss
,
const
char
*
)
CTL_RO_NL_GEN
(
opt_narenas
,
opt_narenas
,
unsigned
)
CTL_RO_NL_GEN
(
opt_percpu_arena
,
percpu_arena_mode_names
[
opt_percpu_arena
],
const
char
*
)
CTL_RO_NL_GEN
(
opt_background_thread
,
opt_background_thread
,
bool
)
CTL_RO_NL_GEN
(
opt_max_background_threads
,
opt_max_background_threads
,
size_t
)
CTL_RO_NL_GEN
(
opt_dirty_decay_ms
,
opt_dirty_decay_ms
,
ssize_t
)
CTL_RO_NL_GEN
(
opt_muzzy_decay_ms
,
opt_muzzy_decay_ms
,
ssize_t
)
CTL_RO_NL_GEN
(
opt_stats_print
,
opt_stats_print
,
bool
)
CTL_RO_NL_GEN
(
opt_stats_print_opts
,
opt_stats_print_opts
,
const
char
*
)
CTL_RO_NL_CGEN
(
config_fill
,
opt_junk
,
opt_junk
,
const
char
*
)
CTL_RO_NL_CGEN
(
config_fill
,
opt_zero
,
opt_zero
,
bool
)
CTL_RO_NL_CGEN
(
config_utrace
,
opt_utrace
,
opt_utrace
,
bool
)
CTL_RO_NL_CGEN
(
config_xmalloc
,
opt_xmalloc
,
opt_xmalloc
,
bool
)
CTL_RO_NL_GEN
(
opt_tcache
,
opt_tcache
,
bool
)
CTL_RO_NL_GEN
(
opt_thp
,
thp_mode_names
[
opt_thp
],
const
char
*
)
CTL_RO_NL_GEN
(
opt_lg_extent_max_active_fit
,
opt_lg_extent_max_active_fit
,
size_t
)
CTL_RO_NL_GEN
(
opt_lg_tcache_max
,
opt_lg_tcache_max
,
ssize_t
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof
,
opt_prof
,
bool
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof_prefix
,
opt_prof_prefix
,
const
char
*
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof_active
,
opt_prof_active
,
bool
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof_thread_active_init
,
opt_prof_thread_active_init
,
bool
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_lg_prof_sample
,
opt_lg_prof_sample
,
size_t
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof_accum
,
opt_prof_accum
,
bool
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_lg_prof_interval
,
opt_lg_prof_interval
,
ssize_t
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof_gdump
,
opt_prof_gdump
,
bool
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof_final
,
opt_prof_final
,
bool
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof_leak
,
opt_prof_leak
,
bool
)
/******************************************************************************/
static
int
thread_arena_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
arena_t
*
oldarena
;
unsigned
newind
,
oldind
;
oldarena
=
arena_choose
(
tsd
,
NULL
);
if
(
oldarena
==
NULL
)
{
return
EAGAIN
;
}
newind
=
oldind
=
arena_ind_get
(
oldarena
);
WRITE
(
newind
,
unsigned
);
READ
(
oldind
,
unsigned
);
if
(
newind
!=
oldind
)
{
arena_t
*
newarena
;
if
(
newind
>=
narenas_total_get
())
{
/* New arena index is out of range. */
ret
=
EFAULT
;
goto
label_return
;
}
if
(
have_percpu_arena
&&
PERCPU_ARENA_ENABLED
(
opt_percpu_arena
))
{
if
(
newind
<
percpu_arena_ind_limit
(
opt_percpu_arena
))
{
/*
* If perCPU arena is enabled, thread_arena
* control is not allowed for the auto arena
* range.
*/
ret
=
EPERM
;
goto
label_return
;
}
}
/* Initialize arena if necessary. */
newarena
=
arena_get
(
tsd_tsdn
(
tsd
),
newind
,
true
);
if
(
newarena
==
NULL
)
{
ret
=
EAGAIN
;
goto
label_return
;
}
/* Set new arena/tcache associations. */
arena_migrate
(
tsd
,
oldind
,
newind
);
if
(
tcache_available
(
tsd
))
{
tcache_arena_reassociate
(
tsd_tsdn
(
tsd
),
tsd_tcachep_get
(
tsd
),
newarena
);
}
}
ret
=
0
;
label_return:
return
ret
;
}
CTL_TSD_RO_NL_CGEN
(
config_stats
,
thread_allocated
,
tsd_thread_allocated_get
,
uint64_t
)
CTL_TSD_RO_NL_CGEN
(
config_stats
,
thread_allocatedp
,
tsd_thread_allocatedp_get
,
uint64_t
*
)
CTL_TSD_RO_NL_CGEN
(
config_stats
,
thread_deallocated
,
tsd_thread_deallocated_get
,
uint64_t
)
CTL_TSD_RO_NL_CGEN
(
config_stats
,
thread_deallocatedp
,
tsd_thread_deallocatedp_get
,
uint64_t
*
)
static
int
thread_tcache_enabled_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
bool
oldval
;
oldval
=
tcache_enabled_get
(
tsd
);
if
(
newp
!=
NULL
)
{
if
(
newlen
!=
sizeof
(
bool
))
{
ret
=
EINVAL
;
goto
label_return
;
}
tcache_enabled_set
(
tsd
,
*
(
bool
*
)
newp
);
}
READ
(
oldval
,
bool
);
ret
=
0
;
label_return:
return
ret
;
}
static
int
thread_tcache_flush_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
if
(
!
tcache_available
(
tsd
))
{
ret
=
EFAULT
;
goto
label_return
;
}
READONLY
();
WRITEONLY
();
tcache_flush
(
tsd
);
ret
=
0
;
label_return:
return
ret
;
}
static
int
thread_prof_name_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
if
(
!
config_prof
)
{
return
ENOENT
;
}
READ_XOR_WRITE
();
if
(
newp
!=
NULL
)
{
if
(
newlen
!=
sizeof
(
const
char
*
))
{
ret
=
EINVAL
;
goto
label_return
;
}
if
((
ret
=
prof_thread_name_set
(
tsd
,
*
(
const
char
**
)
newp
))
!=
0
)
{
goto
label_return
;
}
}
else
{
const
char
*
oldname
=
prof_thread_name_get
(
tsd
);
READ
(
oldname
,
const
char
*
);
}
ret
=
0
;
label_return:
return
ret
;
}
static
int
thread_prof_active_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
bool
oldval
;
if
(
!
config_prof
)
{
return
ENOENT
;
}
oldval
=
prof_thread_active_get
(
tsd
);
if
(
newp
!=
NULL
)
{
if
(
newlen
!=
sizeof
(
bool
))
{
ret
=
EINVAL
;
goto
label_return
;
}
if
(
prof_thread_active_set
(
tsd
,
*
(
bool
*
)
newp
))
{
ret
=
EAGAIN
;
goto
label_return
;
}
}
READ
(
oldval
,
bool
);
ret
=
0
;
label_return:
return
ret
;
}
/******************************************************************************/
static
int
tcache_create_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
unsigned
tcache_ind
;
READONLY
();
if
(
tcaches_create
(
tsd
,
&
tcache_ind
))
{
ret
=
EFAULT
;
goto
label_return
;
}
READ
(
tcache_ind
,
unsigned
);
ret
=
0
;
label_return:
return
ret
;
}
static
int
tcache_flush_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
unsigned
tcache_ind
;
WRITEONLY
();
tcache_ind
=
UINT_MAX
;
WRITE
(
tcache_ind
,
unsigned
);
if
(
tcache_ind
==
UINT_MAX
)
{
ret
=
EFAULT
;
goto
label_return
;
}
tcaches_flush
(
tsd
,
tcache_ind
);
ret
=
0
;
label_return:
return
ret
;
}
static
int
tcache_destroy_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
unsigned
tcache_ind
;
WRITEONLY
();
tcache_ind
=
UINT_MAX
;
WRITE
(
tcache_ind
,
unsigned
);
if
(
tcache_ind
==
UINT_MAX
)
{
ret
=
EFAULT
;
goto
label_return
;
}
tcaches_destroy
(
tsd
,
tcache_ind
);
ret
=
0
;
label_return:
return
ret
;
}
/******************************************************************************/
static
int
arena_i_initialized_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
tsdn_t
*
tsdn
=
tsd_tsdn
(
tsd
);
unsigned
arena_ind
;
bool
initialized
;
READONLY
();
MIB_UNSIGNED
(
arena_ind
,
1
);
malloc_mutex_lock
(
tsdn
,
&
ctl_mtx
);
initialized
=
arenas_i
(
arena_ind
)
->
initialized
;
malloc_mutex_unlock
(
tsdn
,
&
ctl_mtx
);
READ
(
initialized
,
bool
);
ret
=
0
;
label_return:
return
ret
;
}
static
void
arena_i_decay
(
tsdn_t
*
tsdn
,
unsigned
arena_ind
,
bool
all
)
{
malloc_mutex_lock
(
tsdn
,
&
ctl_mtx
);
{
unsigned
narenas
=
ctl_arenas
->
narenas
;
/*
* Access via index narenas is deprecated, and scheduled for
* removal in 6.0.0.
*/
if
(
arena_ind
==
MALLCTL_ARENAS_ALL
||
arena_ind
==
narenas
)
{
unsigned
i
;
VARIABLE_ARRAY
(
arena_t
*
,
tarenas
,
narenas
);
for
(
i
=
0
;
i
<
narenas
;
i
++
)
{
tarenas
[
i
]
=
arena_get
(
tsdn
,
i
,
false
);
}
/*
* No further need to hold ctl_mtx, since narenas and
* tarenas contain everything needed below.
*/
malloc_mutex_unlock
(
tsdn
,
&
ctl_mtx
);
for
(
i
=
0
;
i
<
narenas
;
i
++
)
{
if
(
tarenas
[
i
]
!=
NULL
)
{
arena_decay
(
tsdn
,
tarenas
[
i
],
false
,
all
);
}
}
}
else
{
arena_t
*
tarena
;
assert
(
arena_ind
<
narenas
);
tarena
=
arena_get
(
tsdn
,
arena_ind
,
false
);
/* No further need to hold ctl_mtx. */
malloc_mutex_unlock
(
tsdn
,
&
ctl_mtx
);
if
(
tarena
!=
NULL
)
{
arena_decay
(
tsdn
,
tarena
,
false
,
all
);
}
}
}
}
static
int
arena_i_decay_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
unsigned
arena_ind
;
READONLY
();
WRITEONLY
();
MIB_UNSIGNED
(
arena_ind
,
1
);
arena_i_decay
(
tsd_tsdn
(
tsd
),
arena_ind
,
false
);
ret
=
0
;
label_return:
return
ret
;
}
static
int
arena_i_purge_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
unsigned
arena_ind
;
READONLY
();
WRITEONLY
();
MIB_UNSIGNED
(
arena_ind
,
1
);
arena_i_decay
(
tsd_tsdn
(
tsd
),
arena_ind
,
true
);
ret
=
0
;
label_return:
return
ret
;
}
static
int
arena_i_reset_destroy_helper
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
,
unsigned
*
arena_ind
,
arena_t
**
arena
)
{
int
ret
;
READONLY
();
WRITEONLY
();
MIB_UNSIGNED
(
*
arena_ind
,
1
);
*
arena
=
arena_get
(
tsd_tsdn
(
tsd
),
*
arena_ind
,
false
);
if
(
*
arena
==
NULL
||
arena_is_auto
(
*
arena
))
{
ret
=
EFAULT
;
goto
label_return
;
}
ret
=
0
;
label_return:
return
ret
;
}
static
void
arena_reset_prepare_background_thread
(
tsd_t
*
tsd
,
unsigned
arena_ind
)
{
/* Temporarily disable the background thread during arena reset. */
if
(
have_background_thread
)
{
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
background_thread_lock
);
if
(
background_thread_enabled
())
{
unsigned
ind
=
arena_ind
%
ncpus
;
background_thread_info_t
*
info
=
&
background_thread_info
[
ind
];
assert
(
info
->
state
==
background_thread_started
);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
info
->
mtx
);
info
->
state
=
background_thread_paused
;
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
info
->
mtx
);
}
}
}
static
void
arena_reset_finish_background_thread
(
tsd_t
*
tsd
,
unsigned
arena_ind
)
{
if
(
have_background_thread
)
{
if
(
background_thread_enabled
())
{
unsigned
ind
=
arena_ind
%
ncpus
;
background_thread_info_t
*
info
=
&
background_thread_info
[
ind
];
assert
(
info
->
state
==
background_thread_paused
);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
info
->
mtx
);
info
->
state
=
background_thread_started
;
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
info
->
mtx
);
}
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
background_thread_lock
);
}
}
static
int
arena_i_reset_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
unsigned
arena_ind
;
arena_t
*
arena
;
ret
=
arena_i_reset_destroy_helper
(
tsd
,
mib
,
miblen
,
oldp
,
oldlenp
,
newp
,
newlen
,
&
arena_ind
,
&
arena
);
if
(
ret
!=
0
)
{
return
ret
;
}
arena_reset_prepare_background_thread
(
tsd
,
arena_ind
);
arena_reset
(
tsd
,
arena
);
arena_reset_finish_background_thread
(
tsd
,
arena_ind
);
return
ret
;
}
static
int
arena_i_destroy_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
unsigned
arena_ind
;
arena_t
*
arena
;
ctl_arena_t
*
ctl_darena
,
*
ctl_arena
;
ret
=
arena_i_reset_destroy_helper
(
tsd
,
mib
,
miblen
,
oldp
,
oldlenp
,
newp
,
newlen
,
&
arena_ind
,
&
arena
);
if
(
ret
!=
0
)
{
goto
label_return
;
}
if
(
arena_nthreads_get
(
arena
,
false
)
!=
0
||
arena_nthreads_get
(
arena
,
true
)
!=
0
)
{
ret
=
EFAULT
;
goto
label_return
;
}
arena_reset_prepare_background_thread
(
tsd
,
arena_ind
);
/* Merge stats after resetting and purging arena. */
arena_reset
(
tsd
,
arena
);
arena_decay
(
tsd_tsdn
(
tsd
),
arena
,
false
,
true
);
ctl_darena
=
arenas_i
(
MALLCTL_ARENAS_DESTROYED
);
ctl_darena
->
initialized
=
true
;
ctl_arena_refresh
(
tsd_tsdn
(
tsd
),
arena
,
ctl_darena
,
arena_ind
,
true
);
/* Destroy arena. */
arena_destroy
(
tsd
,
arena
);
ctl_arena
=
arenas_i
(
arena_ind
);
ctl_arena
->
initialized
=
false
;
/* Record arena index for later recycling via arenas.create. */
ql_elm_new
(
ctl_arena
,
destroyed_link
);
ql_tail_insert
(
&
ctl_arenas
->
destroyed
,
ctl_arena
,
destroyed_link
);
arena_reset_finish_background_thread
(
tsd
,
arena_ind
);
assert
(
ret
==
0
);
label_return:
return
ret
;
}
static
int
arena_i_dss_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
const
char
*
dss
=
NULL
;
unsigned
arena_ind
;
dss_prec_t
dss_prec_old
=
dss_prec_limit
;
dss_prec_t
dss_prec
=
dss_prec_limit
;
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
WRITE
(
dss
,
const
char
*
);
MIB_UNSIGNED
(
arena_ind
,
1
);
if
(
dss
!=
NULL
)
{
int
i
;
bool
match
=
false
;
for
(
i
=
0
;
i
<
dss_prec_limit
;
i
++
)
{
if
(
strcmp
(
dss_prec_names
[
i
],
dss
)
==
0
)
{
dss_prec
=
i
;
match
=
true
;
break
;
}
}
if
(
!
match
)
{
ret
=
EINVAL
;
goto
label_return
;
}
}
/*
* Access via index narenas is deprecated, and scheduled for removal in
* 6.0.0.
*/
if
(
arena_ind
==
MALLCTL_ARENAS_ALL
||
arena_ind
==
ctl_arenas
->
narenas
)
{
if
(
dss_prec
!=
dss_prec_limit
&&
extent_dss_prec_set
(
dss_prec
))
{
ret
=
EFAULT
;
goto
label_return
;
}
dss_prec_old
=
extent_dss_prec_get
();
}
else
{
arena_t
*
arena
=
arena_get
(
tsd_tsdn
(
tsd
),
arena_ind
,
false
);
if
(
arena
==
NULL
||
(
dss_prec
!=
dss_prec_limit
&&
arena_dss_prec_set
(
arena
,
dss_prec
)))
{
ret
=
EFAULT
;
goto
label_return
;
}
dss_prec_old
=
arena_dss_prec_get
(
arena
);
}
dss
=
dss_prec_names
[
dss_prec_old
];
READ
(
dss
,
const
char
*
);
ret
=
0
;
label_return:
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
return
ret
;
}
static
int
arena_i_decay_ms_ctl_impl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
,
bool
dirty
)
{
int
ret
;
unsigned
arena_ind
;
arena_t
*
arena
;
MIB_UNSIGNED
(
arena_ind
,
1
);
arena
=
arena_get
(
tsd_tsdn
(
tsd
),
arena_ind
,
false
);
if
(
arena
==
NULL
)
{
ret
=
EFAULT
;
goto
label_return
;
}
if
(
oldp
!=
NULL
&&
oldlenp
!=
NULL
)
{
size_t
oldval
=
dirty
?
arena_dirty_decay_ms_get
(
arena
)
:
arena_muzzy_decay_ms_get
(
arena
);
READ
(
oldval
,
ssize_t
);
}
if
(
newp
!=
NULL
)
{
if
(
newlen
!=
sizeof
(
ssize_t
))
{
ret
=
EINVAL
;
goto
label_return
;
}
if
(
dirty
?
arena_dirty_decay_ms_set
(
tsd_tsdn
(
tsd
),
arena
,
*
(
ssize_t
*
)
newp
)
:
arena_muzzy_decay_ms_set
(
tsd_tsdn
(
tsd
),
arena
,
*
(
ssize_t
*
)
newp
))
{
ret
=
EFAULT
;
goto
label_return
;
}
}
ret
=
0
;
label_return:
return
ret
;
}
static
int
arena_i_dirty_decay_ms_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
return
arena_i_decay_ms_ctl_impl
(
tsd
,
mib
,
miblen
,
oldp
,
oldlenp
,
newp
,
newlen
,
true
);
}
static
int
arena_i_muzzy_decay_ms_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
return
arena_i_decay_ms_ctl_impl
(
tsd
,
mib
,
miblen
,
oldp
,
oldlenp
,
newp
,
newlen
,
false
);
}
static
int
arena_i_extent_hooks_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
unsigned
arena_ind
;
arena_t
*
arena
;
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
MIB_UNSIGNED
(
arena_ind
,
1
);
if
(
arena_ind
<
narenas_total_get
())
{
extent_hooks_t
*
old_extent_hooks
;
arena
=
arena_get
(
tsd_tsdn
(
tsd
),
arena_ind
,
false
);
if
(
arena
==
NULL
)
{
if
(
arena_ind
>=
narenas_auto
)
{
ret
=
EFAULT
;
goto
label_return
;
}
old_extent_hooks
=
(
extent_hooks_t
*
)
&
extent_hooks_default
;
READ
(
old_extent_hooks
,
extent_hooks_t
*
);
if
(
newp
!=
NULL
)
{
/* Initialize a new arena as a side effect. */
extent_hooks_t
*
new_extent_hooks
JEMALLOC_CC_SILENCE_INIT
(
NULL
);
WRITE
(
new_extent_hooks
,
extent_hooks_t
*
);
arena
=
arena_init
(
tsd_tsdn
(
tsd
),
arena_ind
,
new_extent_hooks
);
if
(
arena
==
NULL
)
{
ret
=
EFAULT
;
goto
label_return
;
}
}
}
else
{
if
(
newp
!=
NULL
)
{
extent_hooks_t
*
new_extent_hooks
JEMALLOC_CC_SILENCE_INIT
(
NULL
);
WRITE
(
new_extent_hooks
,
extent_hooks_t
*
);
old_extent_hooks
=
extent_hooks_set
(
tsd
,
arena
,
new_extent_hooks
);
READ
(
old_extent_hooks
,
extent_hooks_t
*
);
}
else
{
old_extent_hooks
=
extent_hooks_get
(
arena
);
READ
(
old_extent_hooks
,
extent_hooks_t
*
);
}
}
}
else
{
ret
=
EFAULT
;
goto
label_return
;
}
ret
=
0
;
label_return:
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
return
ret
;
}
static
int
arena_i_retain_grow_limit_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
unsigned
arena_ind
;
arena_t
*
arena
;
if
(
!
opt_retain
)
{
/* Only relevant when retain is enabled. */
return
ENOENT
;
}
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
MIB_UNSIGNED
(
arena_ind
,
1
);
if
(
arena_ind
<
narenas_total_get
()
&&
(
arena
=
arena_get
(
tsd_tsdn
(
tsd
),
arena_ind
,
false
))
!=
NULL
)
{
size_t
old_limit
,
new_limit
;
if
(
newp
!=
NULL
)
{
WRITE
(
new_limit
,
size_t
);
}
bool
err
=
arena_retain_grow_limit_get_set
(
tsd
,
arena
,
&
old_limit
,
newp
!=
NULL
?
&
new_limit
:
NULL
);
if
(
!
err
)
{
READ
(
old_limit
,
size_t
);
ret
=
0
;
}
else
{
ret
=
EFAULT
;
}
}
else
{
ret
=
EFAULT
;
}
label_return:
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
return
ret
;
}
static
const
ctl_named_node_t
*
arena_i_index
(
tsdn_t
*
tsdn
,
const
size_t
*
mib
,
size_t
miblen
,
size_t
i
)
{
const
ctl_named_node_t
*
ret
;
malloc_mutex_lock
(
tsdn
,
&
ctl_mtx
);
switch
(
i
)
{
case
MALLCTL_ARENAS_ALL
:
case
MALLCTL_ARENAS_DESTROYED
:
break
;
default:
if
(
i
>
ctl_arenas
->
narenas
)
{
ret
=
NULL
;
goto
label_return
;
}
break
;
}
ret
=
super_arena_i_node
;
label_return:
malloc_mutex_unlock
(
tsdn
,
&
ctl_mtx
);
return
ret
;
}
/******************************************************************************/
static
int
arenas_narenas_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
unsigned
narenas
;
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
READONLY
();
if
(
*
oldlenp
!=
sizeof
(
unsigned
))
{
ret
=
EINVAL
;
goto
label_return
;
}
narenas
=
ctl_arenas
->
narenas
;
READ
(
narenas
,
unsigned
);
ret
=
0
;
label_return:
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
return
ret
;
}
static
int
arenas_decay_ms_ctl_impl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
,
bool
dirty
)
{
int
ret
;
if
(
oldp
!=
NULL
&&
oldlenp
!=
NULL
)
{
size_t
oldval
=
(
dirty
?
arena_dirty_decay_ms_default_get
()
:
arena_muzzy_decay_ms_default_get
());
READ
(
oldval
,
ssize_t
);
}
if
(
newp
!=
NULL
)
{
if
(
newlen
!=
sizeof
(
ssize_t
))
{
ret
=
EINVAL
;
goto
label_return
;
}
if
(
dirty
?
arena_dirty_decay_ms_default_set
(
*
(
ssize_t
*
)
newp
)
:
arena_muzzy_decay_ms_default_set
(
*
(
ssize_t
*
)
newp
))
{
ret
=
EFAULT
;
goto
label_return
;
}
}
ret
=
0
;
label_return:
return
ret
;
}
static
int
arenas_dirty_decay_ms_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
return
arenas_decay_ms_ctl_impl
(
tsd
,
mib
,
miblen
,
oldp
,
oldlenp
,
newp
,
newlen
,
true
);
}
static
int
arenas_muzzy_decay_ms_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
return
arenas_decay_ms_ctl_impl
(
tsd
,
mib
,
miblen
,
oldp
,
oldlenp
,
newp
,
newlen
,
false
);
}
CTL_RO_NL_GEN
(
arenas_quantum
,
QUANTUM
,
size_t
)
CTL_RO_NL_GEN
(
arenas_page
,
PAGE
,
size_t
)
CTL_RO_NL_GEN
(
arenas_tcache_max
,
tcache_maxclass
,
size_t
)
CTL_RO_NL_GEN
(
arenas_nbins
,
NBINS
,
unsigned
)
CTL_RO_NL_GEN
(
arenas_nhbins
,
nhbins
,
unsigned
)
CTL_RO_NL_GEN
(
arenas_bin_i_size
,
bin_infos
[
mib
[
2
]].
reg_size
,
size_t
)
CTL_RO_NL_GEN
(
arenas_bin_i_nregs
,
bin_infos
[
mib
[
2
]].
nregs
,
uint32_t
)
CTL_RO_NL_GEN
(
arenas_bin_i_slab_size
,
bin_infos
[
mib
[
2
]].
slab_size
,
size_t
)
static
const
ctl_named_node_t
*
arenas_bin_i_index
(
tsdn_t
*
tsdn
,
const
size_t
*
mib
,
size_t
miblen
,
size_t
i
)
{
if
(
i
>
NBINS
)
{
return
NULL
;
}
return
super_arenas_bin_i_node
;
}
CTL_RO_NL_GEN
(
arenas_nlextents
,
NSIZES
-
NBINS
,
unsigned
)
CTL_RO_NL_GEN
(
arenas_lextent_i_size
,
sz_index2size
(
NBINS
+
(
szind_t
)
mib
[
2
]),
size_t
)
static
const
ctl_named_node_t
*
arenas_lextent_i_index
(
tsdn_t
*
tsdn
,
const
size_t
*
mib
,
size_t
miblen
,
size_t
i
)
{
if
(
i
>
NSIZES
-
NBINS
)
{
return
NULL
;
}
return
super_arenas_lextent_i_node
;
}
static
int
arenas_create_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
extent_hooks_t
*
extent_hooks
;
unsigned
arena_ind
;
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
extent_hooks
=
(
extent_hooks_t
*
)
&
extent_hooks_default
;
WRITE
(
extent_hooks
,
extent_hooks_t
*
);
if
((
arena_ind
=
ctl_arena_init
(
tsd
,
extent_hooks
))
==
UINT_MAX
)
{
ret
=
EAGAIN
;
goto
label_return
;
}
READ
(
arena_ind
,
unsigned
);
ret
=
0
;
label_return:
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
return
ret
;
}
static
int
arenas_lookup_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
unsigned
arena_ind
;
void
*
ptr
;
extent_t
*
extent
;
arena_t
*
arena
;
ptr
=
NULL
;
ret
=
EINVAL
;
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
WRITE
(
ptr
,
void
*
);
extent
=
iealloc
(
tsd_tsdn
(
tsd
),
ptr
);
if
(
extent
==
NULL
)
goto
label_return
;
arena
=
extent_arena_get
(
extent
);
if
(
arena
==
NULL
)
goto
label_return
;
arena_ind
=
arena_ind_get
(
arena
);
READ
(
arena_ind
,
unsigned
);
ret
=
0
;
label_return:
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
return
ret
;
}
/******************************************************************************/
static
int
prof_thread_active_init_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
bool
oldval
;
if
(
!
config_prof
)
{
return
ENOENT
;
}
if
(
newp
!=
NULL
)
{
if
(
newlen
!=
sizeof
(
bool
))
{
ret
=
EINVAL
;
goto
label_return
;
}
oldval
=
prof_thread_active_init_set
(
tsd_tsdn
(
tsd
),
*
(
bool
*
)
newp
);
}
else
{
oldval
=
prof_thread_active_init_get
(
tsd_tsdn
(
tsd
));
}
READ
(
oldval
,
bool
);
ret
=
0
;
label_return:
return
ret
;
}
static
int
prof_active_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
bool
oldval
;
if
(
!
config_prof
)
{
return
ENOENT
;
}
if
(
newp
!=
NULL
)
{
if
(
newlen
!=
sizeof
(
bool
))
{
ret
=
EINVAL
;
goto
label_return
;
}
oldval
=
prof_active_set
(
tsd_tsdn
(
tsd
),
*
(
bool
*
)
newp
);
}
else
{
oldval
=
prof_active_get
(
tsd_tsdn
(
tsd
));
}
READ
(
oldval
,
bool
);
ret
=
0
;
label_return:
return
ret
;
}
static
int
prof_dump_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
const
char
*
filename
=
NULL
;
if
(
!
config_prof
)
{
return
ENOENT
;
}
WRITEONLY
();
WRITE
(
filename
,
const
char
*
);
if
(
prof_mdump
(
tsd
,
filename
))
{
ret
=
EFAULT
;
goto
label_return
;
}
ret
=
0
;
label_return:
return
ret
;
}
static
int
prof_gdump_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
bool
oldval
;
if
(
!
config_prof
)
{
return
ENOENT
;
}
if
(
newp
!=
NULL
)
{
if
(
newlen
!=
sizeof
(
bool
))
{
ret
=
EINVAL
;
goto
label_return
;
}
oldval
=
prof_gdump_set
(
tsd_tsdn
(
tsd
),
*
(
bool
*
)
newp
);
}
else
{
oldval
=
prof_gdump_get
(
tsd_tsdn
(
tsd
));
}
READ
(
oldval
,
bool
);
ret
=
0
;
label_return:
return
ret
;
}
static
int
prof_reset_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
size_t
lg_sample
=
lg_prof_sample
;
if
(
!
config_prof
)
{
return
ENOENT
;
}
WRITEONLY
();
WRITE
(
lg_sample
,
size_t
);
if
(
lg_sample
>=
(
sizeof
(
uint64_t
)
<<
3
))
{
lg_sample
=
(
sizeof
(
uint64_t
)
<<
3
)
-
1
;
}
prof_reset
(
tsd
,
lg_sample
);
ret
=
0
;
label_return:
return
ret
;
}
CTL_RO_NL_CGEN
(
config_prof
,
prof_interval
,
prof_interval
,
uint64_t
)
CTL_RO_NL_CGEN
(
config_prof
,
lg_prof_sample
,
lg_prof_sample
,
size_t
)
/******************************************************************************/
CTL_RO_CGEN
(
config_stats
,
stats_allocated
,
ctl_stats
->
allocated
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_active
,
ctl_stats
->
active
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_metadata
,
ctl_stats
->
metadata
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_metadata_thp
,
ctl_stats
->
metadata_thp
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_resident
,
ctl_stats
->
resident
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_mapped
,
ctl_stats
->
mapped
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_retained
,
ctl_stats
->
retained
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_background_thread_num_threads
,
ctl_stats
->
background_thread
.
num_threads
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_background_thread_num_runs
,
ctl_stats
->
background_thread
.
num_runs
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_background_thread_run_interval
,
nstime_ns
(
&
ctl_stats
->
background_thread
.
run_interval
),
uint64_t
)
CTL_RO_GEN
(
stats_arenas_i_dss
,
arenas_i
(
mib
[
2
])
->
dss
,
const
char
*
)
CTL_RO_GEN
(
stats_arenas_i_dirty_decay_ms
,
arenas_i
(
mib
[
2
])
->
dirty_decay_ms
,
ssize_t
)
CTL_RO_GEN
(
stats_arenas_i_muzzy_decay_ms
,
arenas_i
(
mib
[
2
])
->
muzzy_decay_ms
,
ssize_t
)
CTL_RO_GEN
(
stats_arenas_i_nthreads
,
arenas_i
(
mib
[
2
])
->
nthreads
,
unsigned
)
CTL_RO_GEN
(
stats_arenas_i_uptime
,
nstime_ns
(
&
arenas_i
(
mib
[
2
])
->
astats
->
astats
.
uptime
),
uint64_t
)
CTL_RO_GEN
(
stats_arenas_i_pactive
,
arenas_i
(
mib
[
2
])
->
pactive
,
size_t
)
CTL_RO_GEN
(
stats_arenas_i_pdirty
,
arenas_i
(
mib
[
2
])
->
pdirty
,
size_t
)
CTL_RO_GEN
(
stats_arenas_i_pmuzzy
,
arenas_i
(
mib
[
2
])
->
pmuzzy
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_mapped
,
atomic_load_zu
(
&
arenas_i
(
mib
[
2
])
->
astats
->
astats
.
mapped
,
ATOMIC_RELAXED
),
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_retained
,
atomic_load_zu
(
&
arenas_i
(
mib
[
2
])
->
astats
->
astats
.
retained
,
ATOMIC_RELAXED
),
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_dirty_npurge
,
ctl_arena_stats_read_u64
(
&
arenas_i
(
mib
[
2
])
->
astats
->
astats
.
decay_dirty
.
npurge
),
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_dirty_nmadvise
,
ctl_arena_stats_read_u64
(
&
arenas_i
(
mib
[
2
])
->
astats
->
astats
.
decay_dirty
.
nmadvise
),
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_dirty_purged
,
ctl_arena_stats_read_u64
(
&
arenas_i
(
mib
[
2
])
->
astats
->
astats
.
decay_dirty
.
purged
),
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_muzzy_npurge
,
ctl_arena_stats_read_u64
(
&
arenas_i
(
mib
[
2
])
->
astats
->
astats
.
decay_muzzy
.
npurge
),
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_muzzy_nmadvise
,
ctl_arena_stats_read_u64
(
&
arenas_i
(
mib
[
2
])
->
astats
->
astats
.
decay_muzzy
.
nmadvise
),
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_muzzy_purged
,
ctl_arena_stats_read_u64
(
&
arenas_i
(
mib
[
2
])
->
astats
->
astats
.
decay_muzzy
.
purged
),
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_base
,
atomic_load_zu
(
&
arenas_i
(
mib
[
2
])
->
astats
->
astats
.
base
,
ATOMIC_RELAXED
),
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_internal
,
atomic_load_zu
(
&
arenas_i
(
mib
[
2
])
->
astats
->
astats
.
internal
,
ATOMIC_RELAXED
),
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_metadata_thp
,
atomic_load_zu
(
&
arenas_i
(
mib
[
2
])
->
astats
->
astats
.
metadata_thp
,
ATOMIC_RELAXED
),
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_tcache_bytes
,
atomic_load_zu
(
&
arenas_i
(
mib
[
2
])
->
astats
->
astats
.
tcache_bytes
,
ATOMIC_RELAXED
),
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_resident
,
atomic_load_zu
(
&
arenas_i
(
mib
[
2
])
->
astats
->
astats
.
resident
,
ATOMIC_RELAXED
),
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_small_allocated
,
arenas_i
(
mib
[
2
])
->
astats
->
allocated_small
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_small_nmalloc
,
arenas_i
(
mib
[
2
])
->
astats
->
nmalloc_small
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_small_ndalloc
,
arenas_i
(
mib
[
2
])
->
astats
->
ndalloc_small
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_small_nrequests
,
arenas_i
(
mib
[
2
])
->
astats
->
nrequests_small
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_large_allocated
,
atomic_load_zu
(
&
arenas_i
(
mib
[
2
])
->
astats
->
astats
.
allocated_large
,
ATOMIC_RELAXED
),
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_large_nmalloc
,
ctl_arena_stats_read_u64
(
&
arenas_i
(
mib
[
2
])
->
astats
->
astats
.
nmalloc_large
),
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_large_ndalloc
,
ctl_arena_stats_read_u64
(
&
arenas_i
(
mib
[
2
])
->
astats
->
astats
.
ndalloc_large
),
uint64_t
)
/*
* Note: "nmalloc" here instead of "nrequests" in the read. This is intentional.
*/
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_large_nrequests
,
ctl_arena_stats_read_u64
(
&
arenas_i
(
mib
[
2
])
->
astats
->
astats
.
nmalloc_large
),
uint64_t
)
/* Intentional. */
/* Lock profiling related APIs below. */
#define RO_MUTEX_CTL_GEN(n, l) \
CTL_RO_CGEN(config_stats, stats_##n##_num_ops, \
l.n_lock_ops, uint64_t) \
CTL_RO_CGEN(config_stats, stats_##n##_num_wait, \
l.n_wait_times, uint64_t) \
CTL_RO_CGEN(config_stats, stats_##n##_num_spin_acq, \
l.n_spin_acquired, uint64_t) \
CTL_RO_CGEN(config_stats, stats_##n##_num_owner_switch, \
l.n_owner_switches, uint64_t) \
CTL_RO_CGEN(config_stats, stats_##n##_total_wait_time, \
nstime_ns(&l.tot_wait_time), uint64_t) \
CTL_RO_CGEN(config_stats, stats_##n##_max_wait_time, \
nstime_ns(&l.max_wait_time), uint64_t) \
CTL_RO_CGEN(config_stats, stats_##n##_max_num_thds, \
l.max_n_thds, uint32_t)
/* Global mutexes. */
#define OP(mtx) \
RO_MUTEX_CTL_GEN(mutexes_##mtx, \
ctl_stats->mutex_prof_data[global_prof_mutex_##mtx])
MUTEX_PROF_GLOBAL_MUTEXES
#undef OP
/* Per arena mutexes */
#define OP(mtx) RO_MUTEX_CTL_GEN(arenas_i_mutexes_##mtx, \
arenas_i(mib[2])->astats->astats.mutex_prof_data[arena_prof_mutex_##mtx])
MUTEX_PROF_ARENA_MUTEXES
#undef OP
/* tcache bin mutex */
RO_MUTEX_CTL_GEN
(
arenas_i_bins_j_mutex
,
arenas_i
(
mib
[
2
])
->
astats
->
bstats
[
mib
[
4
]].
mutex_data
)
#undef RO_MUTEX_CTL_GEN
/* Resets all mutex stats, including global, arena and bin mutexes. */
static
int
stats_mutexes_reset_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
if
(
!
config_stats
)
{
return
ENOENT
;
}
tsdn_t
*
tsdn
=
tsd_tsdn
(
tsd
);
#define MUTEX_PROF_RESET(mtx) \
malloc_mutex_lock(tsdn, &mtx); \
malloc_mutex_prof_data_reset(tsdn, &mtx); \
malloc_mutex_unlock(tsdn, &mtx);
/* Global mutexes: ctl and prof. */
MUTEX_PROF_RESET
(
ctl_mtx
);
if
(
have_background_thread
)
{
MUTEX_PROF_RESET
(
background_thread_lock
);
}
if
(
config_prof
&&
opt_prof
)
{
MUTEX_PROF_RESET
(
bt2gctx_mtx
);
}
/* Per arena mutexes. */
unsigned
n
=
narenas_total_get
();
for
(
unsigned
i
=
0
;
i
<
n
;
i
++
)
{
arena_t
*
arena
=
arena_get
(
tsdn
,
i
,
false
);
if
(
!
arena
)
{
continue
;
}
MUTEX_PROF_RESET
(
arena
->
large_mtx
);
MUTEX_PROF_RESET
(
arena
->
extent_avail_mtx
);
MUTEX_PROF_RESET
(
arena
->
extents_dirty
.
mtx
);
MUTEX_PROF_RESET
(
arena
->
extents_muzzy
.
mtx
);
MUTEX_PROF_RESET
(
arena
->
extents_retained
.
mtx
);
MUTEX_PROF_RESET
(
arena
->
decay_dirty
.
mtx
);
MUTEX_PROF_RESET
(
arena
->
decay_muzzy
.
mtx
);
MUTEX_PROF_RESET
(
arena
->
tcache_ql_mtx
);
MUTEX_PROF_RESET
(
arena
->
base
->
mtx
);
for
(
szind_t
i
=
0
;
i
<
NBINS
;
i
++
)
{
bin_t
*
bin
=
&
arena
->
bins
[
i
];
MUTEX_PROF_RESET
(
bin
->
lock
);
}
}
#undef MUTEX_PROF_RESET
return
0
;
}
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_bins_j_nmalloc
,
arenas_i
(
mib
[
2
])
->
astats
->
bstats
[
mib
[
4
]].
nmalloc
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_bins_j_ndalloc
,
arenas_i
(
mib
[
2
])
->
astats
->
bstats
[
mib
[
4
]].
ndalloc
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_bins_j_nrequests
,
arenas_i
(
mib
[
2
])
->
astats
->
bstats
[
mib
[
4
]].
nrequests
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_bins_j_curregs
,
arenas_i
(
mib
[
2
])
->
astats
->
bstats
[
mib
[
4
]].
curregs
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_bins_j_nfills
,
arenas_i
(
mib
[
2
])
->
astats
->
bstats
[
mib
[
4
]].
nfills
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_bins_j_nflushes
,
arenas_i
(
mib
[
2
])
->
astats
->
bstats
[
mib
[
4
]].
nflushes
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_bins_j_nslabs
,
arenas_i
(
mib
[
2
])
->
astats
->
bstats
[
mib
[
4
]].
nslabs
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_bins_j_nreslabs
,
arenas_i
(
mib
[
2
])
->
astats
->
bstats
[
mib
[
4
]].
reslabs
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_bins_j_curslabs
,
arenas_i
(
mib
[
2
])
->
astats
->
bstats
[
mib
[
4
]].
curslabs
,
size_t
)
static
const
ctl_named_node_t
*
stats_arenas_i_bins_j_index
(
tsdn_t
*
tsdn
,
const
size_t
*
mib
,
size_t
miblen
,
size_t
j
)
{
if
(
j
>
NBINS
)
{
return
NULL
;
}
return
super_stats_arenas_i_bins_j_node
;
}
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_lextents_j_nmalloc
,
ctl_arena_stats_read_u64
(
&
arenas_i
(
mib
[
2
])
->
astats
->
lstats
[
mib
[
4
]].
nmalloc
),
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_lextents_j_ndalloc
,
ctl_arena_stats_read_u64
(
&
arenas_i
(
mib
[
2
])
->
astats
->
lstats
[
mib
[
4
]].
ndalloc
),
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_lextents_j_nrequests
,
ctl_arena_stats_read_u64
(
&
arenas_i
(
mib
[
2
])
->
astats
->
lstats
[
mib
[
4
]].
nrequests
),
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_lextents_j_curlextents
,
arenas_i
(
mib
[
2
])
->
astats
->
lstats
[
mib
[
4
]].
curlextents
,
size_t
)
static
const
ctl_named_node_t
*
stats_arenas_i_lextents_j_index
(
tsdn_t
*
tsdn
,
const
size_t
*
mib
,
size_t
miblen
,
size_t
j
)
{
if
(
j
>
NSIZES
-
NBINS
)
{
return
NULL
;
}
return
super_stats_arenas_i_lextents_j_node
;
}
static
const
ctl_named_node_t
*
stats_arenas_i_index
(
tsdn_t
*
tsdn
,
const
size_t
*
mib
,
size_t
miblen
,
size_t
i
)
{
const
ctl_named_node_t
*
ret
;
size_t
a
;
malloc_mutex_lock
(
tsdn
,
&
ctl_mtx
);
a
=
arenas_i2a_impl
(
i
,
true
,
true
);
if
(
a
==
UINT_MAX
||
!
ctl_arenas
->
arenas
[
a
]
->
initialized
)
{
ret
=
NULL
;
goto
label_return
;
}
ret
=
super_stats_arenas_i_node
;
label_return:
malloc_mutex_unlock
(
tsdn
,
&
ctl_mtx
);
return
ret
;
}
deps/jemalloc/src/div.c
deleted
100644 → 0
View file @
7ff7536e
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/div.h"
#include "jemalloc/internal/assert.h"
/*
* Suppose we have n = q * d, all integers. We know n and d, and want q = n / d.
*
* For any k, we have (here, all division is exact; not C-style rounding):
* floor(ceil(2^k / d) * n / 2^k) = floor((2^k + r) / d * n / 2^k), where
* r = (-2^k) mod d.
*
* Expanding this out:
* ... = floor(2^k / d * n / 2^k + r / d * n / 2^k)
* = floor(n / d + (r / d) * (n / 2^k)).
*
* The fractional part of n / d is 0 (because of the assumption that d divides n
* exactly), so we have:
* ... = n / d + floor((r / d) * (n / 2^k))
*
* So that our initial expression is equal to the quantity we seek, so long as
* (r / d) * (n / 2^k) < 1.
*
* r is a remainder mod d, so r < d and r / d < 1 always. We can make
* n / 2 ^ k < 1 by setting k = 32. This gets us a value of magic that works.
*/
void
div_init
(
div_info_t
*
div_info
,
size_t
d
)
{
/* Nonsensical. */
assert
(
d
!=
0
);
/*
* This would make the value of magic too high to fit into a uint32_t
* (we would want magic = 2^32 exactly). This would mess with code gen
* on 32-bit machines.
*/
assert
(
d
!=
1
);
uint64_t
two_to_k
=
((
uint64_t
)
1
<<
32
);
uint32_t
magic
=
(
uint32_t
)(
two_to_k
/
d
);
/*
* We want magic = ceil(2^k / d), but C gives us floor. We have to
* increment it unless the result was exact (i.e. unless d is a power of
* two).
*/
if
(
two_to_k
%
d
!=
0
)
{
magic
++
;
}
div_info
->
magic
=
magic
;
#ifdef JEMALLOC_DEBUG
div_info
->
d
=
d
;
#endif
}
deps/jemalloc/src/extent.c
deleted
100644 → 0
View file @
7ff7536e
#define JEMALLOC_EXTENT_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/extent_mmap.h"
#include "jemalloc/internal/ph.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex_pool.h"
/******************************************************************************/
/* Data. */
rtree_t
extents_rtree
;
/* Keyed by the address of the extent_t being protected. */
mutex_pool_t
extent_mutex_pool
;
size_t
opt_lg_extent_max_active_fit
=
LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT
;
static
const
bitmap_info_t
extents_bitmap_info
=
BITMAP_INFO_INITIALIZER
(
NPSIZES
+
1
);
static
void
*
extent_alloc_default
(
extent_hooks_t
*
extent_hooks
,
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
*
commit
,
unsigned
arena_ind
);
static
bool
extent_dalloc_default
(
extent_hooks_t
*
extent_hooks
,
void
*
addr
,
size_t
size
,
bool
committed
,
unsigned
arena_ind
);
static
void
extent_destroy_default
(
extent_hooks_t
*
extent_hooks
,
void
*
addr
,
size_t
size
,
bool
committed
,
unsigned
arena_ind
);
static
bool
extent_commit_default
(
extent_hooks_t
*
extent_hooks
,
void
*
addr
,
size_t
size
,
size_t
offset
,
size_t
length
,
unsigned
arena_ind
);
static
bool
extent_commit_impl
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
,
size_t
offset
,
size_t
length
,
bool
growing_retained
);
static
bool
extent_decommit_default
(
extent_hooks_t
*
extent_hooks
,
void
*
addr
,
size_t
size
,
size_t
offset
,
size_t
length
,
unsigned
arena_ind
);
#ifdef PAGES_CAN_PURGE_LAZY
static
bool
extent_purge_lazy_default
(
extent_hooks_t
*
extent_hooks
,
void
*
addr
,
size_t
size
,
size_t
offset
,
size_t
length
,
unsigned
arena_ind
);
#endif
static
bool
extent_purge_lazy_impl
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
,
size_t
offset
,
size_t
length
,
bool
growing_retained
);
#ifdef PAGES_CAN_PURGE_FORCED
static
bool
extent_purge_forced_default
(
extent_hooks_t
*
extent_hooks
,
void
*
addr
,
size_t
size
,
size_t
offset
,
size_t
length
,
unsigned
arena_ind
);
#endif
static
bool
extent_purge_forced_impl
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
,
size_t
offset
,
size_t
length
,
bool
growing_retained
);
#ifdef JEMALLOC_MAPS_COALESCE
static
bool
extent_split_default
(
extent_hooks_t
*
extent_hooks
,
void
*
addr
,
size_t
size
,
size_t
size_a
,
size_t
size_b
,
bool
committed
,
unsigned
arena_ind
);
#endif
static
extent_t
*
extent_split_impl
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
,
size_t
size_a
,
szind_t
szind_a
,
bool
slab_a
,
size_t
size_b
,
szind_t
szind_b
,
bool
slab_b
,
bool
growing_retained
);
#ifdef JEMALLOC_MAPS_COALESCE
static
bool
extent_merge_default
(
extent_hooks_t
*
extent_hooks
,
void
*
addr_a
,
size_t
size_a
,
void
*
addr_b
,
size_t
size_b
,
bool
committed
,
unsigned
arena_ind
);
#endif
static
bool
extent_merge_impl
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
a
,
extent_t
*
b
,
bool
growing_retained
);
const
extent_hooks_t
extent_hooks_default
=
{
extent_alloc_default
,
extent_dalloc_default
,
extent_destroy_default
,
extent_commit_default
,
extent_decommit_default
#ifdef PAGES_CAN_PURGE_LAZY
,
extent_purge_lazy_default
#else
,
NULL
#endif
#ifdef PAGES_CAN_PURGE_FORCED
,
extent_purge_forced_default
#else
,
NULL
#endif
#ifdef JEMALLOC_MAPS_COALESCE
,
extent_split_default
,
extent_merge_default
#endif
};
/* Used exclusively for gdump triggering. */
static
atomic_zu_t
curpages
;
static
atomic_zu_t
highpages
;
/******************************************************************************/
/*
* Function prototypes for static functions that are referenced prior to
* definition.
*/
static
void
extent_deregister
(
tsdn_t
*
tsdn
,
extent_t
*
extent
);
static
extent_t
*
extent_recycle
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extents_t
*
extents
,
void
*
new_addr
,
size_t
usize
,
size_t
pad
,
size_t
alignment
,
bool
slab
,
szind_t
szind
,
bool
*
zero
,
bool
*
commit
,
bool
growing_retained
);
static
extent_t
*
extent_try_coalesce
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
rtree_ctx_t
*
rtree_ctx
,
extents_t
*
extents
,
extent_t
*
extent
,
bool
*
coalesced
,
bool
growing_retained
);
static
void
extent_record
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extents_t
*
extents
,
extent_t
*
extent
,
bool
growing_retained
);
/******************************************************************************/
ph_gen
(
UNUSED
,
extent_avail_
,
extent_tree_t
,
extent_t
,
ph_link
,
extent_esnead_comp
)
typedef
enum
{
lock_result_success
,
lock_result_failure
,
lock_result_no_extent
}
lock_result_t
;
static
lock_result_t
extent_rtree_leaf_elm_try_lock
(
tsdn_t
*
tsdn
,
rtree_leaf_elm_t
*
elm
,
extent_t
**
result
)
{
extent_t
*
extent1
=
rtree_leaf_elm_extent_read
(
tsdn
,
&
extents_rtree
,
elm
,
true
);
if
(
extent1
==
NULL
)
{
return
lock_result_no_extent
;
}
/*
* It's possible that the extent changed out from under us, and with it
* the leaf->extent mapping. We have to recheck while holding the lock.
*/
extent_lock
(
tsdn
,
extent1
);
extent_t
*
extent2
=
rtree_leaf_elm_extent_read
(
tsdn
,
&
extents_rtree
,
elm
,
true
);
if
(
extent1
==
extent2
)
{
*
result
=
extent1
;
return
lock_result_success
;
}
else
{
extent_unlock
(
tsdn
,
extent1
);
return
lock_result_failure
;
}
}
/*
* Returns a pool-locked extent_t * if there's one associated with the given
* address, and NULL otherwise.
*/
static
extent_t
*
extent_lock_from_addr
(
tsdn_t
*
tsdn
,
rtree_ctx_t
*
rtree_ctx
,
void
*
addr
)
{
extent_t
*
ret
=
NULL
;
rtree_leaf_elm_t
*
elm
=
rtree_leaf_elm_lookup
(
tsdn
,
&
extents_rtree
,
rtree_ctx
,
(
uintptr_t
)
addr
,
false
,
false
);
if
(
elm
==
NULL
)
{
return
NULL
;
}
lock_result_t
lock_result
;
do
{
lock_result
=
extent_rtree_leaf_elm_try_lock
(
tsdn
,
elm
,
&
ret
);
}
while
(
lock_result
==
lock_result_failure
);
return
ret
;
}
extent_t
*
extent_alloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
malloc_mutex_lock
(
tsdn
,
&
arena
->
extent_avail_mtx
);
extent_t
*
extent
=
extent_avail_first
(
&
arena
->
extent_avail
);
if
(
extent
==
NULL
)
{
malloc_mutex_unlock
(
tsdn
,
&
arena
->
extent_avail_mtx
);
return
base_alloc_extent
(
tsdn
,
arena
->
base
);
}
extent_avail_remove
(
&
arena
->
extent_avail
,
extent
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
extent_avail_mtx
);
return
extent
;
}
void
extent_dalloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_t
*
extent
)
{
malloc_mutex_lock
(
tsdn
,
&
arena
->
extent_avail_mtx
);
extent_avail_insert
(
&
arena
->
extent_avail
,
extent
);
malloc_mutex_unlock
(
tsdn
,
&
arena
->
extent_avail_mtx
);
}
extent_hooks_t
*
extent_hooks_get
(
arena_t
*
arena
)
{
return
base_extent_hooks_get
(
arena
->
base
);
}
extent_hooks_t
*
extent_hooks_set
(
tsd_t
*
tsd
,
arena_t
*
arena
,
extent_hooks_t
*
extent_hooks
)
{
background_thread_info_t
*
info
;
if
(
have_background_thread
)
{
info
=
arena_background_thread_info_get
(
arena
);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
info
->
mtx
);
}
extent_hooks_t
*
ret
=
base_extent_hooks_set
(
arena
->
base
,
extent_hooks
);
if
(
have_background_thread
)
{
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
info
->
mtx
);
}
return
ret
;
}
static
void
extent_hooks_assure_initialized
(
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
)
{
if
(
*
r_extent_hooks
==
EXTENT_HOOKS_INITIALIZER
)
{
*
r_extent_hooks
=
extent_hooks_get
(
arena
);
}
}
#ifndef JEMALLOC_JET
static
#endif
size_t
extent_size_quantize_floor
(
size_t
size
)
{
size_t
ret
;
pszind_t
pind
;
assert
(
size
>
0
);
assert
((
size
&
PAGE_MASK
)
==
0
);
pind
=
sz_psz2ind
(
size
-
sz_large_pad
+
1
);
if
(
pind
==
0
)
{
/*
* Avoid underflow. This short-circuit would also do the right
* thing for all sizes in the range for which there are
* PAGE-spaced size classes, but it's simplest to just handle
* the one case that would cause erroneous results.
*/
return
size
;
}
ret
=
sz_pind2sz
(
pind
-
1
)
+
sz_large_pad
;
assert
(
ret
<=
size
);
return
ret
;
}
#ifndef JEMALLOC_JET
static
#endif
size_t
extent_size_quantize_ceil
(
size_t
size
)
{
size_t
ret
;
assert
(
size
>
0
);
assert
(
size
-
sz_large_pad
<=
LARGE_MAXCLASS
);
assert
((
size
&
PAGE_MASK
)
==
0
);
ret
=
extent_size_quantize_floor
(
size
);
if
(
ret
<
size
)
{
/*
* Skip a quantization that may have an adequately large extent,
* because under-sized extents may be mixed in. This only
* happens when an unusual size is requested, i.e. for aligned
* allocation, and is just one of several places where linear
* search would potentially find sufficiently aligned available
* memory somewhere lower.
*/
ret
=
sz_pind2sz
(
sz_psz2ind
(
ret
-
sz_large_pad
+
1
))
+
sz_large_pad
;
}
return
ret
;
}
/* Generate pairing heap functions. */
ph_gen
(,
extent_heap_
,
extent_heap_t
,
extent_t
,
ph_link
,
extent_snad_comp
)
bool
extents_init
(
tsdn_t
*
tsdn
,
extents_t
*
extents
,
extent_state_t
state
,
bool
delay_coalesce
)
{
if
(
malloc_mutex_init
(
&
extents
->
mtx
,
"extents"
,
WITNESS_RANK_EXTENTS
,
malloc_mutex_rank_exclusive
))
{
return
true
;
}
for
(
unsigned
i
=
0
;
i
<
NPSIZES
+
1
;
i
++
)
{
extent_heap_new
(
&
extents
->
heaps
[
i
]);
}
bitmap_init
(
extents
->
bitmap
,
&
extents_bitmap_info
,
true
);
extent_list_init
(
&
extents
->
lru
);
atomic_store_zu
(
&
extents
->
npages
,
0
,
ATOMIC_RELAXED
);
extents
->
state
=
state
;
extents
->
delay_coalesce
=
delay_coalesce
;
return
false
;
}
extent_state_t
extents_state_get
(
const
extents_t
*
extents
)
{
return
extents
->
state
;
}
size_t
extents_npages_get
(
extents_t
*
extents
)
{
return
atomic_load_zu
(
&
extents
->
npages
,
ATOMIC_RELAXED
);
}
static
void
extents_insert_locked
(
tsdn_t
*
tsdn
,
extents_t
*
extents
,
extent_t
*
extent
)
{
malloc_mutex_assert_owner
(
tsdn
,
&
extents
->
mtx
);
assert
(
extent_state_get
(
extent
)
==
extents
->
state
);
size_t
size
=
extent_size_get
(
extent
);
size_t
psz
=
extent_size_quantize_floor
(
size
);
pszind_t
pind
=
sz_psz2ind
(
psz
);
if
(
extent_heap_empty
(
&
extents
->
heaps
[
pind
]))
{
bitmap_unset
(
extents
->
bitmap
,
&
extents_bitmap_info
,
(
size_t
)
pind
);
}
extent_heap_insert
(
&
extents
->
heaps
[
pind
],
extent
);
extent_list_append
(
&
extents
->
lru
,
extent
);
size_t
npages
=
size
>>
LG_PAGE
;
/*
* All modifications to npages hold the mutex (as asserted above), so we
* don't need an atomic fetch-add; we can get by with a load followed by
* a store.
*/
size_t
cur_extents_npages
=
atomic_load_zu
(
&
extents
->
npages
,
ATOMIC_RELAXED
);
atomic_store_zu
(
&
extents
->
npages
,
cur_extents_npages
+
npages
,
ATOMIC_RELAXED
);
}
static
void
extents_remove_locked
(
tsdn_t
*
tsdn
,
extents_t
*
extents
,
extent_t
*
extent
)
{
malloc_mutex_assert_owner
(
tsdn
,
&
extents
->
mtx
);
assert
(
extent_state_get
(
extent
)
==
extents
->
state
);
size_t
size
=
extent_size_get
(
extent
);
size_t
psz
=
extent_size_quantize_floor
(
size
);
pszind_t
pind
=
sz_psz2ind
(
psz
);
extent_heap_remove
(
&
extents
->
heaps
[
pind
],
extent
);
if
(
extent_heap_empty
(
&
extents
->
heaps
[
pind
]))
{
bitmap_set
(
extents
->
bitmap
,
&
extents_bitmap_info
,
(
size_t
)
pind
);
}
extent_list_remove
(
&
extents
->
lru
,
extent
);
size_t
npages
=
size
>>
LG_PAGE
;
/*
* As in extents_insert_locked, we hold extents->mtx and so don't need
* atomic operations for updating extents->npages.
*/
size_t
cur_extents_npages
=
atomic_load_zu
(
&
extents
->
npages
,
ATOMIC_RELAXED
);
assert
(
cur_extents_npages
>=
npages
);
atomic_store_zu
(
&
extents
->
npages
,
cur_extents_npages
-
(
size
>>
LG_PAGE
),
ATOMIC_RELAXED
);
}
/*
* Find an extent with size [min_size, max_size) to satisfy the alignment
* requirement. For each size, try only the first extent in the heap.
*/
static
extent_t
*
extents_fit_alignment
(
extents_t
*
extents
,
size_t
min_size
,
size_t
max_size
,
size_t
alignment
)
{
pszind_t
pind
=
sz_psz2ind
(
extent_size_quantize_ceil
(
min_size
));
pszind_t
pind_max
=
sz_psz2ind
(
extent_size_quantize_ceil
(
max_size
));
for
(
pszind_t
i
=
(
pszind_t
)
bitmap_ffu
(
extents
->
bitmap
,
&
extents_bitmap_info
,
(
size_t
)
pind
);
i
<
pind_max
;
i
=
(
pszind_t
)
bitmap_ffu
(
extents
->
bitmap
,
&
extents_bitmap_info
,
(
size_t
)
i
+
1
))
{
assert
(
i
<
NPSIZES
);
assert
(
!
extent_heap_empty
(
&
extents
->
heaps
[
i
]));
extent_t
*
extent
=
extent_heap_first
(
&
extents
->
heaps
[
i
]);
uintptr_t
base
=
(
uintptr_t
)
extent_base_get
(
extent
);
size_t
candidate_size
=
extent_size_get
(
extent
);
assert
(
candidate_size
>=
min_size
);
uintptr_t
next_align
=
ALIGNMENT_CEILING
((
uintptr_t
)
base
,
PAGE_CEILING
(
alignment
));
if
(
base
>
next_align
||
base
+
candidate_size
<=
next_align
)
{
/* Overflow or not crossing the next alignment. */
continue
;
}
size_t
leadsize
=
next_align
-
base
;
if
(
candidate_size
-
leadsize
>=
min_size
)
{
return
extent
;
}
}
return
NULL
;
}
/* Do any-best-fit extent selection, i.e. select any extent that best fits. */
static
extent_t
*
extents_best_fit_locked
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extents_t
*
extents
,
size_t
size
)
{
pszind_t
pind
=
sz_psz2ind
(
extent_size_quantize_ceil
(
size
));
pszind_t
i
=
(
pszind_t
)
bitmap_ffu
(
extents
->
bitmap
,
&
extents_bitmap_info
,
(
size_t
)
pind
);
if
(
i
<
NPSIZES
+
1
)
{
/*
* In order to reduce fragmentation, avoid reusing and splitting
* large extents for much smaller sizes.
*/
if
((
sz_pind2sz
(
i
)
>>
opt_lg_extent_max_active_fit
)
>
size
)
{
return
NULL
;
}
assert
(
!
extent_heap_empty
(
&
extents
->
heaps
[
i
]));
extent_t
*
extent
=
extent_heap_first
(
&
extents
->
heaps
[
i
]);
assert
(
extent_size_get
(
extent
)
>=
size
);
return
extent
;
}
return
NULL
;
}
/*
* Do first-fit extent selection, i.e. select the oldest/lowest extent that is
* large enough.
*/
static
extent_t
*
extents_first_fit_locked
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extents_t
*
extents
,
size_t
size
)
{
extent_t
*
ret
=
NULL
;
pszind_t
pind
=
sz_psz2ind
(
extent_size_quantize_ceil
(
size
));
for
(
pszind_t
i
=
(
pszind_t
)
bitmap_ffu
(
extents
->
bitmap
,
&
extents_bitmap_info
,
(
size_t
)
pind
);
i
<
NPSIZES
+
1
;
i
=
(
pszind_t
)
bitmap_ffu
(
extents
->
bitmap
,
&
extents_bitmap_info
,
(
size_t
)
i
+
1
))
{
assert
(
!
extent_heap_empty
(
&
extents
->
heaps
[
i
]));
extent_t
*
extent
=
extent_heap_first
(
&
extents
->
heaps
[
i
]);
assert
(
extent_size_get
(
extent
)
>=
size
);
if
(
ret
==
NULL
||
extent_snad_comp
(
extent
,
ret
)
<
0
)
{
ret
=
extent
;
}
if
(
i
==
NPSIZES
)
{
break
;
}
assert
(
i
<
NPSIZES
);
}
return
ret
;
}
/*
* Do {best,first}-fit extent selection, where the selection policy choice is
* based on extents->delay_coalesce. Best-fit selection requires less
* searching, but its layout policy is less stable and may cause higher virtual
* memory fragmentation as a side effect.
*/
static
extent_t
*
extents_fit_locked
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extents_t
*
extents
,
size_t
esize
,
size_t
alignment
)
{
malloc_mutex_assert_owner
(
tsdn
,
&
extents
->
mtx
);
size_t
max_size
=
esize
+
PAGE_CEILING
(
alignment
)
-
PAGE
;
/* Beware size_t wrap-around. */
if
(
max_size
<
esize
)
{
return
NULL
;
}
extent_t
*
extent
=
extents
->
delay_coalesce
?
extents_best_fit_locked
(
tsdn
,
arena
,
extents
,
max_size
)
:
extents_first_fit_locked
(
tsdn
,
arena
,
extents
,
max_size
);
if
(
alignment
>
PAGE
&&
extent
==
NULL
)
{
/*
* max_size guarantees the alignment requirement but is rather
* pessimistic. Next we try to satisfy the aligned allocation
* with sizes in [esize, max_size).
*/
extent
=
extents_fit_alignment
(
extents
,
esize
,
max_size
,
alignment
);
}
return
extent
;
}
static
bool
extent_try_delayed_coalesce
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
rtree_ctx_t
*
rtree_ctx
,
extents_t
*
extents
,
extent_t
*
extent
)
{
extent_state_set
(
extent
,
extent_state_active
);
bool
coalesced
;
extent
=
extent_try_coalesce
(
tsdn
,
arena
,
r_extent_hooks
,
rtree_ctx
,
extents
,
extent
,
&
coalesced
,
false
);
extent_state_set
(
extent
,
extents_state_get
(
extents
));
if
(
!
coalesced
)
{
return
true
;
}
extents_insert_locked
(
tsdn
,
extents
,
extent
);
return
false
;
}
extent_t
*
extents_alloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extents_t
*
extents
,
void
*
new_addr
,
size_t
size
,
size_t
pad
,
size_t
alignment
,
bool
slab
,
szind_t
szind
,
bool
*
zero
,
bool
*
commit
)
{
assert
(
size
+
pad
!=
0
);
assert
(
alignment
!=
0
);
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
0
);
extent_t
*
extent
=
extent_recycle
(
tsdn
,
arena
,
r_extent_hooks
,
extents
,
new_addr
,
size
,
pad
,
alignment
,
slab
,
szind
,
zero
,
commit
,
false
);
assert
(
extent
==
NULL
||
extent_dumpable_get
(
extent
));
return
extent
;
}
void
extents_dalloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extents_t
*
extents
,
extent_t
*
extent
)
{
assert
(
extent_base_get
(
extent
)
!=
NULL
);
assert
(
extent_size_get
(
extent
)
!=
0
);
assert
(
extent_dumpable_get
(
extent
));
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
0
);
extent_addr_set
(
extent
,
extent_base_get
(
extent
));
extent_zeroed_set
(
extent
,
false
);
extent_record
(
tsdn
,
arena
,
r_extent_hooks
,
extents
,
extent
,
false
);
}
extent_t
*
extents_evict
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extents_t
*
extents
,
size_t
npages_min
)
{
rtree_ctx_t
rtree_ctx_fallback
;
rtree_ctx_t
*
rtree_ctx
=
tsdn_rtree_ctx
(
tsdn
,
&
rtree_ctx_fallback
);
malloc_mutex_lock
(
tsdn
,
&
extents
->
mtx
);
/*
* Get the LRU coalesced extent, if any. If coalescing was delayed,
* the loop will iterate until the LRU extent is fully coalesced.
*/
extent_t
*
extent
;
while
(
true
)
{
/* Get the LRU extent, if any. */
extent
=
extent_list_first
(
&
extents
->
lru
);
if
(
extent
==
NULL
)
{
goto
label_return
;
}
/* Check the eviction limit. */
size_t
extents_npages
=
atomic_load_zu
(
&
extents
->
npages
,
ATOMIC_RELAXED
);
if
(
extents_npages
<=
npages_min
)
{
extent
=
NULL
;
goto
label_return
;
}
extents_remove_locked
(
tsdn
,
extents
,
extent
);
if
(
!
extents
->
delay_coalesce
)
{
break
;
}
/* Try to coalesce. */
if
(
extent_try_delayed_coalesce
(
tsdn
,
arena
,
r_extent_hooks
,
rtree_ctx
,
extents
,
extent
))
{
break
;
}
/*
* The LRU extent was just coalesced and the result placed in
* the LRU at its neighbor's position. Start over.
*/
}
/*
* Either mark the extent active or deregister it to protect against
* concurrent operations.
*/
switch
(
extents_state_get
(
extents
))
{
case
extent_state_active
:
not_reached
();
case
extent_state_dirty
:
case
extent_state_muzzy
:
extent_state_set
(
extent
,
extent_state_active
);
break
;
case
extent_state_retained
:
extent_deregister
(
tsdn
,
extent
);
break
;
default:
not_reached
();
}
label_return:
malloc_mutex_unlock
(
tsdn
,
&
extents
->
mtx
);
return
extent
;
}
static
void
extents_leak
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extents_t
*
extents
,
extent_t
*
extent
,
bool
growing_retained
)
{
/*
* Leak extent after making sure its pages have already been purged, so
* that this is only a virtual memory leak.
*/
if
(
extents_state_get
(
extents
)
==
extent_state_dirty
)
{
if
(
extent_purge_lazy_impl
(
tsdn
,
arena
,
r_extent_hooks
,
extent
,
0
,
extent_size_get
(
extent
),
growing_retained
))
{
extent_purge_forced_impl
(
tsdn
,
arena
,
r_extent_hooks
,
extent
,
0
,
extent_size_get
(
extent
),
growing_retained
);
}
}
extent_dalloc
(
tsdn
,
arena
,
extent
);
}
void
extents_prefork
(
tsdn_t
*
tsdn
,
extents_t
*
extents
)
{
malloc_mutex_prefork
(
tsdn
,
&
extents
->
mtx
);
}
void
extents_postfork_parent
(
tsdn_t
*
tsdn
,
extents_t
*
extents
)
{
malloc_mutex_postfork_parent
(
tsdn
,
&
extents
->
mtx
);
}
void
extents_postfork_child
(
tsdn_t
*
tsdn
,
extents_t
*
extents
)
{
malloc_mutex_postfork_child
(
tsdn
,
&
extents
->
mtx
);
}
static
void
extent_deactivate_locked
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extents_t
*
extents
,
extent_t
*
extent
)
{
assert
(
extent_arena_get
(
extent
)
==
arena
);
assert
(
extent_state_get
(
extent
)
==
extent_state_active
);
extent_state_set
(
extent
,
extents_state_get
(
extents
));
extents_insert_locked
(
tsdn
,
extents
,
extent
);
}
static
void
extent_deactivate
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extents_t
*
extents
,
extent_t
*
extent
)
{
malloc_mutex_lock
(
tsdn
,
&
extents
->
mtx
);
extent_deactivate_locked
(
tsdn
,
arena
,
extents
,
extent
);
malloc_mutex_unlock
(
tsdn
,
&
extents
->
mtx
);
}
static
void
extent_activate_locked
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extents_t
*
extents
,
extent_t
*
extent
)
{
assert
(
extent_arena_get
(
extent
)
==
arena
);
assert
(
extent_state_get
(
extent
)
==
extents_state_get
(
extents
));
extents_remove_locked
(
tsdn
,
extents
,
extent
);
extent_state_set
(
extent
,
extent_state_active
);
}
static
bool
extent_rtree_leaf_elms_lookup
(
tsdn_t
*
tsdn
,
rtree_ctx_t
*
rtree_ctx
,
const
extent_t
*
extent
,
bool
dependent
,
bool
init_missing
,
rtree_leaf_elm_t
**
r_elm_a
,
rtree_leaf_elm_t
**
r_elm_b
)
{
*
r_elm_a
=
rtree_leaf_elm_lookup
(
tsdn
,
&
extents_rtree
,
rtree_ctx
,
(
uintptr_t
)
extent_base_get
(
extent
),
dependent
,
init_missing
);
if
(
!
dependent
&&
*
r_elm_a
==
NULL
)
{
return
true
;
}
assert
(
*
r_elm_a
!=
NULL
);
*
r_elm_b
=
rtree_leaf_elm_lookup
(
tsdn
,
&
extents_rtree
,
rtree_ctx
,
(
uintptr_t
)
extent_last_get
(
extent
),
dependent
,
init_missing
);
if
(
!
dependent
&&
*
r_elm_b
==
NULL
)
{
return
true
;
}
assert
(
*
r_elm_b
!=
NULL
);
return
false
;
}
static
void
extent_rtree_write_acquired
(
tsdn_t
*
tsdn
,
rtree_leaf_elm_t
*
elm_a
,
rtree_leaf_elm_t
*
elm_b
,
extent_t
*
extent
,
szind_t
szind
,
bool
slab
)
{
rtree_leaf_elm_write
(
tsdn
,
&
extents_rtree
,
elm_a
,
extent
,
szind
,
slab
);
if
(
elm_b
!=
NULL
)
{
rtree_leaf_elm_write
(
tsdn
,
&
extents_rtree
,
elm_b
,
extent
,
szind
,
slab
);
}
}
static
void
extent_interior_register
(
tsdn_t
*
tsdn
,
rtree_ctx_t
*
rtree_ctx
,
extent_t
*
extent
,
szind_t
szind
)
{
assert
(
extent_slab_get
(
extent
));
/* Register interior. */
for
(
size_t
i
=
1
;
i
<
(
extent_size_get
(
extent
)
>>
LG_PAGE
)
-
1
;
i
++
)
{
rtree_write
(
tsdn
,
&
extents_rtree
,
rtree_ctx
,
(
uintptr_t
)
extent_base_get
(
extent
)
+
(
uintptr_t
)(
i
<<
LG_PAGE
),
extent
,
szind
,
true
);
}
}
static
void
extent_gdump_add
(
tsdn_t
*
tsdn
,
const
extent_t
*
extent
)
{
cassert
(
config_prof
);
/* prof_gdump() requirement. */
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
0
);
if
(
opt_prof
&&
extent_state_get
(
extent
)
==
extent_state_active
)
{
size_t
nadd
=
extent_size_get
(
extent
)
>>
LG_PAGE
;
size_t
cur
=
atomic_fetch_add_zu
(
&
curpages
,
nadd
,
ATOMIC_RELAXED
)
+
nadd
;
size_t
high
=
atomic_load_zu
(
&
highpages
,
ATOMIC_RELAXED
);
while
(
cur
>
high
&&
!
atomic_compare_exchange_weak_zu
(
&
highpages
,
&
high
,
cur
,
ATOMIC_RELAXED
,
ATOMIC_RELAXED
))
{
/*
* Don't refresh cur, because it may have decreased
* since this thread lost the highpages update race.
* Note that high is updated in case of CAS failure.
*/
}
if
(
cur
>
high
&&
prof_gdump_get_unlocked
())
{
prof_gdump
(
tsdn
);
}
}
}
static
void
extent_gdump_sub
(
tsdn_t
*
tsdn
,
const
extent_t
*
extent
)
{
cassert
(
config_prof
);
if
(
opt_prof
&&
extent_state_get
(
extent
)
==
extent_state_active
)
{
size_t
nsub
=
extent_size_get
(
extent
)
>>
LG_PAGE
;
assert
(
atomic_load_zu
(
&
curpages
,
ATOMIC_RELAXED
)
>=
nsub
);
atomic_fetch_sub_zu
(
&
curpages
,
nsub
,
ATOMIC_RELAXED
);
}
}
static
bool
extent_register_impl
(
tsdn_t
*
tsdn
,
extent_t
*
extent
,
bool
gdump_add
)
{
rtree_ctx_t
rtree_ctx_fallback
;
rtree_ctx_t
*
rtree_ctx
=
tsdn_rtree_ctx
(
tsdn
,
&
rtree_ctx_fallback
);
rtree_leaf_elm_t
*
elm_a
,
*
elm_b
;
/*
* We need to hold the lock to protect against a concurrent coalesce
* operation that sees us in a partial state.
*/
extent_lock
(
tsdn
,
extent
);
if
(
extent_rtree_leaf_elms_lookup
(
tsdn
,
rtree_ctx
,
extent
,
false
,
true
,
&
elm_a
,
&
elm_b
))
{
return
true
;
}
szind_t
szind
=
extent_szind_get_maybe_invalid
(
extent
);
bool
slab
=
extent_slab_get
(
extent
);
extent_rtree_write_acquired
(
tsdn
,
elm_a
,
elm_b
,
extent
,
szind
,
slab
);
if
(
slab
)
{
extent_interior_register
(
tsdn
,
rtree_ctx
,
extent
,
szind
);
}
extent_unlock
(
tsdn
,
extent
);
if
(
config_prof
&&
gdump_add
)
{
extent_gdump_add
(
tsdn
,
extent
);
}
return
false
;
}
static
bool
extent_register
(
tsdn_t
*
tsdn
,
extent_t
*
extent
)
{
return
extent_register_impl
(
tsdn
,
extent
,
true
);
}
static
bool
extent_register_no_gdump_add
(
tsdn_t
*
tsdn
,
extent_t
*
extent
)
{
return
extent_register_impl
(
tsdn
,
extent
,
false
);
}
static
void
extent_reregister
(
tsdn_t
*
tsdn
,
extent_t
*
extent
)
{
bool
err
=
extent_register
(
tsdn
,
extent
);
assert
(
!
err
);
}
/*
* Removes all pointers to the given extent from the global rtree indices for
* its interior. This is relevant for slab extents, for which we need to do
* metadata lookups at places other than the head of the extent. We deregister
* on the interior, then, when an extent moves from being an active slab to an
* inactive state.
*/
static
void
extent_interior_deregister
(
tsdn_t
*
tsdn
,
rtree_ctx_t
*
rtree_ctx
,
extent_t
*
extent
)
{
size_t
i
;
assert
(
extent_slab_get
(
extent
));
for
(
i
=
1
;
i
<
(
extent_size_get
(
extent
)
>>
LG_PAGE
)
-
1
;
i
++
)
{
rtree_clear
(
tsdn
,
&
extents_rtree
,
rtree_ctx
,
(
uintptr_t
)
extent_base_get
(
extent
)
+
(
uintptr_t
)(
i
<<
LG_PAGE
));
}
}
/*
* Removes all pointers to the given extent from the global rtree.
*/
static
void
extent_deregister_impl
(
tsdn_t
*
tsdn
,
extent_t
*
extent
,
bool
gdump
)
{
rtree_ctx_t
rtree_ctx_fallback
;
rtree_ctx_t
*
rtree_ctx
=
tsdn_rtree_ctx
(
tsdn
,
&
rtree_ctx_fallback
);
rtree_leaf_elm_t
*
elm_a
,
*
elm_b
;
extent_rtree_leaf_elms_lookup
(
tsdn
,
rtree_ctx
,
extent
,
true
,
false
,
&
elm_a
,
&
elm_b
);
extent_lock
(
tsdn
,
extent
);
extent_rtree_write_acquired
(
tsdn
,
elm_a
,
elm_b
,
NULL
,
NSIZES
,
false
);
if
(
extent_slab_get
(
extent
))
{
extent_interior_deregister
(
tsdn
,
rtree_ctx
,
extent
);
extent_slab_set
(
extent
,
false
);
}
extent_unlock
(
tsdn
,
extent
);
if
(
config_prof
&&
gdump
)
{
extent_gdump_sub
(
tsdn
,
extent
);
}
}
static
void
extent_deregister
(
tsdn_t
*
tsdn
,
extent_t
*
extent
)
{
extent_deregister_impl
(
tsdn
,
extent
,
true
);
}
static
void
extent_deregister_no_gdump_sub
(
tsdn_t
*
tsdn
,
extent_t
*
extent
)
{
extent_deregister_impl
(
tsdn
,
extent
,
false
);
}
/*
* Tries to find and remove an extent from extents that can be used for the
* given allocation request.
*/
static
extent_t
*
extent_recycle_extract
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
rtree_ctx_t
*
rtree_ctx
,
extents_t
*
extents
,
void
*
new_addr
,
size_t
size
,
size_t
pad
,
size_t
alignment
,
bool
slab
,
bool
growing_retained
)
{
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
growing_retained
?
1
:
0
);
assert
(
alignment
>
0
);
if
(
config_debug
&&
new_addr
!=
NULL
)
{
/*
* Non-NULL new_addr has two use cases:
*
* 1) Recycle a known-extant extent, e.g. during purging.
* 2) Perform in-place expanding reallocation.
*
* Regardless of use case, new_addr must either refer to a
* non-existing extent, or to the base of an extant extent,
* since only active slabs support interior lookups (which of
* course cannot be recycled).
*/
assert
(
PAGE_ADDR2BASE
(
new_addr
)
==
new_addr
);
assert
(
pad
==
0
);
assert
(
alignment
<=
PAGE
);
}
size_t
esize
=
size
+
pad
;
malloc_mutex_lock
(
tsdn
,
&
extents
->
mtx
);
extent_hooks_assure_initialized
(
arena
,
r_extent_hooks
);
extent_t
*
extent
;
if
(
new_addr
!=
NULL
)
{
extent
=
extent_lock_from_addr
(
tsdn
,
rtree_ctx
,
new_addr
);
if
(
extent
!=
NULL
)
{
/*
* We might null-out extent to report an error, but we
* still need to unlock the associated mutex after.
*/
extent_t
*
unlock_extent
=
extent
;
assert
(
extent_base_get
(
extent
)
==
new_addr
);
if
(
extent_arena_get
(
extent
)
!=
arena
||
extent_size_get
(
extent
)
<
esize
||
extent_state_get
(
extent
)
!=
extents_state_get
(
extents
))
{
extent
=
NULL
;
}
extent_unlock
(
tsdn
,
unlock_extent
);
}
}
else
{
extent
=
extents_fit_locked
(
tsdn
,
arena
,
extents
,
esize
,
alignment
);
}
if
(
extent
==
NULL
)
{
malloc_mutex_unlock
(
tsdn
,
&
extents
->
mtx
);
return
NULL
;
}
extent_activate_locked
(
tsdn
,
arena
,
extents
,
extent
);
malloc_mutex_unlock
(
tsdn
,
&
extents
->
mtx
);
return
extent
;
}
/*
* Given an allocation request and an extent guaranteed to be able to satisfy
* it, this splits off lead and trail extents, leaving extent pointing to an
* extent satisfying the allocation.
* This function doesn't put lead or trail into any extents_t; it's the caller's
* job to ensure that they can be reused.
*/
typedef
enum
{
/*
* Split successfully. lead, extent, and trail, are modified to extents
* describing the ranges before, in, and after the given allocation.
*/
extent_split_interior_ok
,
/*
* The extent can't satisfy the given allocation request. None of the
* input extent_t *s are touched.
*/
extent_split_interior_cant_alloc
,
/*
* In a potentially invalid state. Must leak (if *to_leak is non-NULL),
* and salvage what's still salvageable (if *to_salvage is non-NULL).
* None of lead, extent, or trail are valid.
*/
extent_split_interior_error
}
extent_split_interior_result_t
;
static
extent_split_interior_result_t
extent_split_interior
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
rtree_ctx_t
*
rtree_ctx
,
/* The result of splitting, in case of success. */
extent_t
**
extent
,
extent_t
**
lead
,
extent_t
**
trail
,
/* The mess to clean up, in case of error. */
extent_t
**
to_leak
,
extent_t
**
to_salvage
,
void
*
new_addr
,
size_t
size
,
size_t
pad
,
size_t
alignment
,
bool
slab
,
szind_t
szind
,
bool
growing_retained
)
{
size_t
esize
=
size
+
pad
;
size_t
leadsize
=
ALIGNMENT_CEILING
((
uintptr_t
)
extent_base_get
(
*
extent
),
PAGE_CEILING
(
alignment
))
-
(
uintptr_t
)
extent_base_get
(
*
extent
);
assert
(
new_addr
==
NULL
||
leadsize
==
0
);
if
(
extent_size_get
(
*
extent
)
<
leadsize
+
esize
)
{
return
extent_split_interior_cant_alloc
;
}
size_t
trailsize
=
extent_size_get
(
*
extent
)
-
leadsize
-
esize
;
*
lead
=
NULL
;
*
trail
=
NULL
;
*
to_leak
=
NULL
;
*
to_salvage
=
NULL
;
/* Split the lead. */
if
(
leadsize
!=
0
)
{
*
lead
=
*
extent
;
*
extent
=
extent_split_impl
(
tsdn
,
arena
,
r_extent_hooks
,
*
lead
,
leadsize
,
NSIZES
,
false
,
esize
+
trailsize
,
szind
,
slab
,
growing_retained
);
if
(
*
extent
==
NULL
)
{
*
to_leak
=
*
lead
;
*
lead
=
NULL
;
return
extent_split_interior_error
;
}
}
/* Split the trail. */
if
(
trailsize
!=
0
)
{
*
trail
=
extent_split_impl
(
tsdn
,
arena
,
r_extent_hooks
,
*
extent
,
esize
,
szind
,
slab
,
trailsize
,
NSIZES
,
false
,
growing_retained
);
if
(
*
trail
==
NULL
)
{
*
to_leak
=
*
extent
;
*
to_salvage
=
*
lead
;
*
lead
=
NULL
;
*
extent
=
NULL
;
return
extent_split_interior_error
;
}
}
if
(
leadsize
==
0
&&
trailsize
==
0
)
{
/*
* Splitting causes szind to be set as a side effect, but no
* splitting occurred.
*/
extent_szind_set
(
*
extent
,
szind
);
if
(
szind
!=
NSIZES
)
{
rtree_szind_slab_update
(
tsdn
,
&
extents_rtree
,
rtree_ctx
,
(
uintptr_t
)
extent_addr_get
(
*
extent
),
szind
,
slab
);
if
(
slab
&&
extent_size_get
(
*
extent
)
>
PAGE
)
{
rtree_szind_slab_update
(
tsdn
,
&
extents_rtree
,
rtree_ctx
,
(
uintptr_t
)
extent_past_get
(
*
extent
)
-
(
uintptr_t
)
PAGE
,
szind
,
slab
);
}
}
}
return
extent_split_interior_ok
;
}
/*
* This fulfills the indicated allocation request out of the given extent (which
* the caller should have ensured was big enough). If there's any unused space
* before or after the resulting allocation, that space is given its own extent
* and put back into extents.
*/
static
extent_t
*
extent_recycle_split
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
rtree_ctx_t
*
rtree_ctx
,
extents_t
*
extents
,
void
*
new_addr
,
size_t
size
,
size_t
pad
,
size_t
alignment
,
bool
slab
,
szind_t
szind
,
extent_t
*
extent
,
bool
growing_retained
)
{
extent_t
*
lead
;
extent_t
*
trail
;
extent_t
*
to_leak
;
extent_t
*
to_salvage
;
extent_split_interior_result_t
result
=
extent_split_interior
(
tsdn
,
arena
,
r_extent_hooks
,
rtree_ctx
,
&
extent
,
&
lead
,
&
trail
,
&
to_leak
,
&
to_salvage
,
new_addr
,
size
,
pad
,
alignment
,
slab
,
szind
,
growing_retained
);
if
(
result
==
extent_split_interior_ok
)
{
if
(
lead
!=
NULL
)
{
extent_deactivate
(
tsdn
,
arena
,
extents
,
lead
);
}
if
(
trail
!=
NULL
)
{
extent_deactivate
(
tsdn
,
arena
,
extents
,
trail
);
}
return
extent
;
}
else
{
/*
* We should have picked an extent that was large enough to
* fulfill our allocation request.
*/
assert
(
result
==
extent_split_interior_error
);
if
(
to_salvage
!=
NULL
)
{
extent_deregister
(
tsdn
,
to_salvage
);
}
if
(
to_leak
!=
NULL
)
{
void
*
leak
=
extent_base_get
(
to_leak
);
extent_deregister_no_gdump_sub
(
tsdn
,
to_leak
);
extents_leak
(
tsdn
,
arena
,
r_extent_hooks
,
extents
,
to_leak
,
growing_retained
);
assert
(
extent_lock_from_addr
(
tsdn
,
rtree_ctx
,
leak
)
==
NULL
);
}
return
NULL
;
}
unreachable
();
}
/*
* Tries to satisfy the given allocation request by reusing one of the extents
* in the given extents_t.
*/
static
extent_t
*
extent_recycle
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extents_t
*
extents
,
void
*
new_addr
,
size_t
size
,
size_t
pad
,
size_t
alignment
,
bool
slab
,
szind_t
szind
,
bool
*
zero
,
bool
*
commit
,
bool
growing_retained
)
{
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
growing_retained
?
1
:
0
);
assert
(
new_addr
==
NULL
||
!
slab
);
assert
(
pad
==
0
||
!
slab
);
assert
(
!*
zero
||
!
slab
);
rtree_ctx_t
rtree_ctx_fallback
;
rtree_ctx_t
*
rtree_ctx
=
tsdn_rtree_ctx
(
tsdn
,
&
rtree_ctx_fallback
);
extent_t
*
extent
=
extent_recycle_extract
(
tsdn
,
arena
,
r_extent_hooks
,
rtree_ctx
,
extents
,
new_addr
,
size
,
pad
,
alignment
,
slab
,
growing_retained
);
if
(
extent
==
NULL
)
{
return
NULL
;
}
extent
=
extent_recycle_split
(
tsdn
,
arena
,
r_extent_hooks
,
rtree_ctx
,
extents
,
new_addr
,
size
,
pad
,
alignment
,
slab
,
szind
,
extent
,
growing_retained
);
if
(
extent
==
NULL
)
{
return
NULL
;
}
if
(
*
commit
&&
!
extent_committed_get
(
extent
))
{
if
(
extent_commit_impl
(
tsdn
,
arena
,
r_extent_hooks
,
extent
,
0
,
extent_size_get
(
extent
),
growing_retained
))
{
extent_record
(
tsdn
,
arena
,
r_extent_hooks
,
extents
,
extent
,
growing_retained
);
return
NULL
;
}
extent_zeroed_set
(
extent
,
true
);
}
if
(
extent_committed_get
(
extent
))
{
*
commit
=
true
;
}
if
(
extent_zeroed_get
(
extent
))
{
*
zero
=
true
;
}
if
(
pad
!=
0
)
{
extent_addr_randomize
(
tsdn
,
extent
,
alignment
);
}
assert
(
extent_state_get
(
extent
)
==
extent_state_active
);
if
(
slab
)
{
extent_slab_set
(
extent
,
slab
);
extent_interior_register
(
tsdn
,
rtree_ctx
,
extent
,
szind
);
}
if
(
*
zero
)
{
void
*
addr
=
extent_base_get
(
extent
);
size_t
size
=
extent_size_get
(
extent
);
if
(
!
extent_zeroed_get
(
extent
))
{
if
(
pages_purge_forced
(
addr
,
size
))
{
memset
(
addr
,
0
,
size
);
}
}
else
if
(
config_debug
)
{
size_t
*
p
=
(
size_t
*
)(
uintptr_t
)
addr
;
for
(
size_t
i
=
0
;
i
<
size
/
sizeof
(
size_t
);
i
++
)
{
assert
(
p
[
i
]
==
0
);
}
}
}
return
extent
;
}
/*
* If the caller specifies (!*zero), it is still possible to receive zeroed
* memory, in which case *zero is toggled to true. arena_extent_alloc() takes
* advantage of this to avoid demanding zeroed extents, but taking advantage of
* them if they are returned.
*/
static
void
*
extent_alloc_core
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
*
commit
,
dss_prec_t
dss_prec
)
{
void
*
ret
;
assert
(
size
!=
0
);
assert
(
alignment
!=
0
);
/* "primary" dss. */
if
(
have_dss
&&
dss_prec
==
dss_prec_primary
&&
(
ret
=
extent_alloc_dss
(
tsdn
,
arena
,
new_addr
,
size
,
alignment
,
zero
,
commit
))
!=
NULL
)
{
return
ret
;
}
/* mmap. */
if
((
ret
=
extent_alloc_mmap
(
new_addr
,
size
,
alignment
,
zero
,
commit
))
!=
NULL
)
{
return
ret
;
}
/* "secondary" dss. */
if
(
have_dss
&&
dss_prec
==
dss_prec_secondary
&&
(
ret
=
extent_alloc_dss
(
tsdn
,
arena
,
new_addr
,
size
,
alignment
,
zero
,
commit
))
!=
NULL
)
{
return
ret
;
}
/* All strategies for allocation failed. */
return
NULL
;
}
static
void
*
extent_alloc_default_impl
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
*
commit
)
{
void
*
ret
=
extent_alloc_core
(
tsdn
,
arena
,
new_addr
,
size
,
alignment
,
zero
,
commit
,
(
dss_prec_t
)
atomic_load_u
(
&
arena
->
dss_prec
,
ATOMIC_RELAXED
));
if
(
have_madvise_huge
&&
ret
)
{
pages_set_thp_state
(
ret
,
size
);
}
return
ret
;
}
static
void
*
extent_alloc_default
(
extent_hooks_t
*
extent_hooks
,
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
*
commit
,
unsigned
arena_ind
)
{
tsdn_t
*
tsdn
;
arena_t
*
arena
;
tsdn
=
tsdn_fetch
();
arena
=
arena_get
(
tsdn
,
arena_ind
,
false
);
/*
* The arena we're allocating on behalf of must have been initialized
* already.
*/
assert
(
arena
!=
NULL
);
return
extent_alloc_default_impl
(
tsdn
,
arena
,
new_addr
,
size
,
alignment
,
zero
,
commit
);
}
static
void
extent_hook_pre_reentrancy
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
tsd_t
*
tsd
=
tsdn_null
(
tsdn
)
?
tsd_fetch
()
:
tsdn_tsd
(
tsdn
);
if
(
arena
==
arena_get
(
tsd_tsdn
(
tsd
),
0
,
false
))
{
/*
* The only legitimate case of customized extent hooks for a0 is
* hooks with no allocation activities. One such example is to
* place metadata on pre-allocated resources such as huge pages.
* In that case, rely on reentrancy_level checks to catch
* infinite recursions.
*/
pre_reentrancy
(
tsd
,
NULL
);
}
else
{
pre_reentrancy
(
tsd
,
arena
);
}
}
static
void
extent_hook_post_reentrancy
(
tsdn_t
*
tsdn
)
{
tsd_t
*
tsd
=
tsdn_null
(
tsdn
)
?
tsd_fetch
()
:
tsdn_tsd
(
tsdn
);
post_reentrancy
(
tsd
);
}
/*
* If virtual memory is retained, create increasingly larger extents from which
* to split requested extents in order to limit the total number of disjoint
* virtual memory ranges retained by each arena.
*/
static
extent_t
*
extent_grow_retained
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
size_t
size
,
size_t
pad
,
size_t
alignment
,
bool
slab
,
szind_t
szind
,
bool
*
zero
,
bool
*
commit
)
{
malloc_mutex_assert_owner
(
tsdn
,
&
arena
->
extent_grow_mtx
);
assert
(
pad
==
0
||
!
slab
);
assert
(
!*
zero
||
!
slab
);
size_t
esize
=
size
+
pad
;
size_t
alloc_size_min
=
esize
+
PAGE_CEILING
(
alignment
)
-
PAGE
;
/* Beware size_t wrap-around. */
if
(
alloc_size_min
<
esize
)
{
goto
label_err
;
}
/*
* Find the next extent size in the series that would be large enough to
* satisfy this request.
*/
pszind_t
egn_skip
=
0
;
size_t
alloc_size
=
sz_pind2sz
(
arena
->
extent_grow_next
+
egn_skip
);
while
(
alloc_size
<
alloc_size_min
)
{
egn_skip
++
;
if
(
arena
->
extent_grow_next
+
egn_skip
==
NPSIZES
)
{
/* Outside legal range. */
goto
label_err
;
}
assert
(
arena
->
extent_grow_next
+
egn_skip
<
NPSIZES
);
alloc_size
=
sz_pind2sz
(
arena
->
extent_grow_next
+
egn_skip
);
}
extent_t
*
extent
=
extent_alloc
(
tsdn
,
arena
);
if
(
extent
==
NULL
)
{
goto
label_err
;
}
bool
zeroed
=
false
;
bool
committed
=
false
;
void
*
ptr
;
if
(
*
r_extent_hooks
==
&
extent_hooks_default
)
{
ptr
=
extent_alloc_default_impl
(
tsdn
,
arena
,
NULL
,
alloc_size
,
PAGE
,
&
zeroed
,
&
committed
);
}
else
{
extent_hook_pre_reentrancy
(
tsdn
,
arena
);
ptr
=
(
*
r_extent_hooks
)
->
alloc
(
*
r_extent_hooks
,
NULL
,
alloc_size
,
PAGE
,
&
zeroed
,
&
committed
,
arena_ind_get
(
arena
));
extent_hook_post_reentrancy
(
tsdn
);
}
extent_init
(
extent
,
arena
,
ptr
,
alloc_size
,
false
,
NSIZES
,
arena_extent_sn_next
(
arena
),
extent_state_active
,
zeroed
,
committed
,
true
);
if
(
ptr
==
NULL
)
{
extent_dalloc
(
tsdn
,
arena
,
extent
);
goto
label_err
;
}
if
(
extent_register_no_gdump_add
(
tsdn
,
extent
))
{
extents_leak
(
tsdn
,
arena
,
r_extent_hooks
,
&
arena
->
extents_retained
,
extent
,
true
);
goto
label_err
;
}
if
(
extent_zeroed_get
(
extent
)
&&
extent_committed_get
(
extent
))
{
*
zero
=
true
;
}
if
(
extent_committed_get
(
extent
))
{
*
commit
=
true
;
}
rtree_ctx_t
rtree_ctx_fallback
;
rtree_ctx_t
*
rtree_ctx
=
tsdn_rtree_ctx
(
tsdn
,
&
rtree_ctx_fallback
);
extent_t
*
lead
;
extent_t
*
trail
;
extent_t
*
to_leak
;
extent_t
*
to_salvage
;
extent_split_interior_result_t
result
=
extent_split_interior
(
tsdn
,
arena
,
r_extent_hooks
,
rtree_ctx
,
&
extent
,
&
lead
,
&
trail
,
&
to_leak
,
&
to_salvage
,
NULL
,
size
,
pad
,
alignment
,
slab
,
szind
,
true
);
if
(
result
==
extent_split_interior_ok
)
{
if
(
lead
!=
NULL
)
{
extent_record
(
tsdn
,
arena
,
r_extent_hooks
,
&
arena
->
extents_retained
,
lead
,
true
);
}
if
(
trail
!=
NULL
)
{
extent_record
(
tsdn
,
arena
,
r_extent_hooks
,
&
arena
->
extents_retained
,
trail
,
true
);
}
}
else
{
/*
* We should have allocated a sufficiently large extent; the
* cant_alloc case should not occur.
*/
assert
(
result
==
extent_split_interior_error
);
if
(
to_salvage
!=
NULL
)
{
if
(
config_prof
)
{
extent_gdump_add
(
tsdn
,
to_salvage
);
}
extent_record
(
tsdn
,
arena
,
r_extent_hooks
,
&
arena
->
extents_retained
,
to_salvage
,
true
);
}
if
(
to_leak
!=
NULL
)
{
extent_deregister_no_gdump_sub
(
tsdn
,
to_leak
);
extents_leak
(
tsdn
,
arena
,
r_extent_hooks
,
&
arena
->
extents_retained
,
to_leak
,
true
);
}
goto
label_err
;
}
if
(
*
commit
&&
!
extent_committed_get
(
extent
))
{
if
(
extent_commit_impl
(
tsdn
,
arena
,
r_extent_hooks
,
extent
,
0
,
extent_size_get
(
extent
),
true
))
{
extent_record
(
tsdn
,
arena
,
r_extent_hooks
,
&
arena
->
extents_retained
,
extent
,
true
);
goto
label_err
;
}
extent_zeroed_set
(
extent
,
true
);
}
/*
* Increment extent_grow_next if doing so wouldn't exceed the allowed
* range.
*/
if
(
arena
->
extent_grow_next
+
egn_skip
+
1
<=
arena
->
retain_grow_limit
)
{
arena
->
extent_grow_next
+=
egn_skip
+
1
;
}
else
{
arena
->
extent_grow_next
=
arena
->
retain_grow_limit
;
}
/* All opportunities for failure are past. */
malloc_mutex_unlock
(
tsdn
,
&
arena
->
extent_grow_mtx
);
if
(
config_prof
)
{
/* Adjust gdump stats now that extent is final size. */
extent_gdump_add
(
tsdn
,
extent
);
}
if
(
pad
!=
0
)
{
extent_addr_randomize
(
tsdn
,
extent
,
alignment
);
}
if
(
slab
)
{
rtree_ctx_t
rtree_ctx_fallback
;
rtree_ctx_t
*
rtree_ctx
=
tsdn_rtree_ctx
(
tsdn
,
&
rtree_ctx_fallback
);
extent_slab_set
(
extent
,
true
);
extent_interior_register
(
tsdn
,
rtree_ctx
,
extent
,
szind
);
}
if
(
*
zero
&&
!
extent_zeroed_get
(
extent
))
{
void
*
addr
=
extent_base_get
(
extent
);
size_t
size
=
extent_size_get
(
extent
);
if
(
pages_purge_forced
(
addr
,
size
))
{
memset
(
addr
,
0
,
size
);
}
}
return
extent
;
label_err:
malloc_mutex_unlock
(
tsdn
,
&
arena
->
extent_grow_mtx
);
return
NULL
;
}
static
extent_t
*
extent_alloc_retained
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
void
*
new_addr
,
size_t
size
,
size_t
pad
,
size_t
alignment
,
bool
slab
,
szind_t
szind
,
bool
*
zero
,
bool
*
commit
)
{
assert
(
size
!=
0
);
assert
(
alignment
!=
0
);
malloc_mutex_lock
(
tsdn
,
&
arena
->
extent_grow_mtx
);
extent_t
*
extent
=
extent_recycle
(
tsdn
,
arena
,
r_extent_hooks
,
&
arena
->
extents_retained
,
new_addr
,
size
,
pad
,
alignment
,
slab
,
szind
,
zero
,
commit
,
true
);
if
(
extent
!=
NULL
)
{
malloc_mutex_unlock
(
tsdn
,
&
arena
->
extent_grow_mtx
);
if
(
config_prof
)
{
extent_gdump_add
(
tsdn
,
extent
);
}
}
else
if
(
opt_retain
&&
new_addr
==
NULL
)
{
extent
=
extent_grow_retained
(
tsdn
,
arena
,
r_extent_hooks
,
size
,
pad
,
alignment
,
slab
,
szind
,
zero
,
commit
);
/* extent_grow_retained() always releases extent_grow_mtx. */
}
else
{
malloc_mutex_unlock
(
tsdn
,
&
arena
->
extent_grow_mtx
);
}
malloc_mutex_assert_not_owner
(
tsdn
,
&
arena
->
extent_grow_mtx
);
return
extent
;
}
static
extent_t
*
extent_alloc_wrapper_hard
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
void
*
new_addr
,
size_t
size
,
size_t
pad
,
size_t
alignment
,
bool
slab
,
szind_t
szind
,
bool
*
zero
,
bool
*
commit
)
{
size_t
esize
=
size
+
pad
;
extent_t
*
extent
=
extent_alloc
(
tsdn
,
arena
);
if
(
extent
==
NULL
)
{
return
NULL
;
}
void
*
addr
;
if
(
*
r_extent_hooks
==
&
extent_hooks_default
)
{
/* Call directly to propagate tsdn. */
addr
=
extent_alloc_default_impl
(
tsdn
,
arena
,
new_addr
,
esize
,
alignment
,
zero
,
commit
);
}
else
{
extent_hook_pre_reentrancy
(
tsdn
,
arena
);
addr
=
(
*
r_extent_hooks
)
->
alloc
(
*
r_extent_hooks
,
new_addr
,
esize
,
alignment
,
zero
,
commit
,
arena_ind_get
(
arena
));
extent_hook_post_reentrancy
(
tsdn
);
}
if
(
addr
==
NULL
)
{
extent_dalloc
(
tsdn
,
arena
,
extent
);
return
NULL
;
}
extent_init
(
extent
,
arena
,
addr
,
esize
,
slab
,
szind
,
arena_extent_sn_next
(
arena
),
extent_state_active
,
*
zero
,
*
commit
,
true
);
if
(
pad
!=
0
)
{
extent_addr_randomize
(
tsdn
,
extent
,
alignment
);
}
if
(
extent_register
(
tsdn
,
extent
))
{
extents_leak
(
tsdn
,
arena
,
r_extent_hooks
,
&
arena
->
extents_retained
,
extent
,
false
);
return
NULL
;
}
return
extent
;
}
extent_t
*
extent_alloc_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
void
*
new_addr
,
size_t
size
,
size_t
pad
,
size_t
alignment
,
bool
slab
,
szind_t
szind
,
bool
*
zero
,
bool
*
commit
)
{
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
0
);
extent_hooks_assure_initialized
(
arena
,
r_extent_hooks
);
extent_t
*
extent
=
extent_alloc_retained
(
tsdn
,
arena
,
r_extent_hooks
,
new_addr
,
size
,
pad
,
alignment
,
slab
,
szind
,
zero
,
commit
);
if
(
extent
==
NULL
)
{
if
(
opt_retain
&&
new_addr
!=
NULL
)
{
/*
* When retain is enabled and new_addr is set, we do not
* attempt extent_alloc_wrapper_hard which does mmap
* that is very unlikely to succeed (unless it happens
* to be at the end).
*/
return
NULL
;
}
extent
=
extent_alloc_wrapper_hard
(
tsdn
,
arena
,
r_extent_hooks
,
new_addr
,
size
,
pad
,
alignment
,
slab
,
szind
,
zero
,
commit
);
}
assert
(
extent
==
NULL
||
extent_dumpable_get
(
extent
));
return
extent
;
}
static
bool
extent_can_coalesce
(
arena_t
*
arena
,
extents_t
*
extents
,
const
extent_t
*
inner
,
const
extent_t
*
outer
)
{
assert
(
extent_arena_get
(
inner
)
==
arena
);
if
(
extent_arena_get
(
outer
)
!=
arena
)
{
return
false
;
}
assert
(
extent_state_get
(
inner
)
==
extent_state_active
);
if
(
extent_state_get
(
outer
)
!=
extents
->
state
)
{
return
false
;
}
if
(
extent_committed_get
(
inner
)
!=
extent_committed_get
(
outer
))
{
return
false
;
}
return
true
;
}
static
bool
extent_coalesce
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extents_t
*
extents
,
extent_t
*
inner
,
extent_t
*
outer
,
bool
forward
,
bool
growing_retained
)
{
assert
(
extent_can_coalesce
(
arena
,
extents
,
inner
,
outer
));
extent_activate_locked
(
tsdn
,
arena
,
extents
,
outer
);
malloc_mutex_unlock
(
tsdn
,
&
extents
->
mtx
);
bool
err
=
extent_merge_impl
(
tsdn
,
arena
,
r_extent_hooks
,
forward
?
inner
:
outer
,
forward
?
outer
:
inner
,
growing_retained
);
malloc_mutex_lock
(
tsdn
,
&
extents
->
mtx
);
if
(
err
)
{
extent_deactivate_locked
(
tsdn
,
arena
,
extents
,
outer
);
}
return
err
;
}
static
extent_t
*
extent_try_coalesce
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
rtree_ctx_t
*
rtree_ctx
,
extents_t
*
extents
,
extent_t
*
extent
,
bool
*
coalesced
,
bool
growing_retained
)
{
/*
* Continue attempting to coalesce until failure, to protect against
* races with other threads that are thwarted by this one.
*/
bool
again
;
do
{
again
=
false
;
/* Try to coalesce forward. */
extent_t
*
next
=
extent_lock_from_addr
(
tsdn
,
rtree_ctx
,
extent_past_get
(
extent
));
if
(
next
!=
NULL
)
{
/*
* extents->mtx only protects against races for
* like-state extents, so call extent_can_coalesce()
* before releasing next's pool lock.
*/
bool
can_coalesce
=
extent_can_coalesce
(
arena
,
extents
,
extent
,
next
);
extent_unlock
(
tsdn
,
next
);
if
(
can_coalesce
&&
!
extent_coalesce
(
tsdn
,
arena
,
r_extent_hooks
,
extents
,
extent
,
next
,
true
,
growing_retained
))
{
if
(
extents
->
delay_coalesce
)
{
/* Do minimal coalescing. */
*
coalesced
=
true
;
return
extent
;
}
again
=
true
;
}
}
/* Try to coalesce backward. */
extent_t
*
prev
=
extent_lock_from_addr
(
tsdn
,
rtree_ctx
,
extent_before_get
(
extent
));
if
(
prev
!=
NULL
)
{
bool
can_coalesce
=
extent_can_coalesce
(
arena
,
extents
,
extent
,
prev
);
extent_unlock
(
tsdn
,
prev
);
if
(
can_coalesce
&&
!
extent_coalesce
(
tsdn
,
arena
,
r_extent_hooks
,
extents
,
extent
,
prev
,
false
,
growing_retained
))
{
extent
=
prev
;
if
(
extents
->
delay_coalesce
)
{
/* Do minimal coalescing. */
*
coalesced
=
true
;
return
extent
;
}
again
=
true
;
}
}
}
while
(
again
);
if
(
extents
->
delay_coalesce
)
{
*
coalesced
=
false
;
}
return
extent
;
}
/*
* Does the metadata management portions of putting an unused extent into the
* given extents_t (coalesces, deregisters slab interiors, the heap operations).
*/
static
void
extent_record
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extents_t
*
extents
,
extent_t
*
extent
,
bool
growing_retained
)
{
rtree_ctx_t
rtree_ctx_fallback
;
rtree_ctx_t
*
rtree_ctx
=
tsdn_rtree_ctx
(
tsdn
,
&
rtree_ctx_fallback
);
assert
((
extents_state_get
(
extents
)
!=
extent_state_dirty
&&
extents_state_get
(
extents
)
!=
extent_state_muzzy
)
||
!
extent_zeroed_get
(
extent
));
malloc_mutex_lock
(
tsdn
,
&
extents
->
mtx
);
extent_hooks_assure_initialized
(
arena
,
r_extent_hooks
);
extent_szind_set
(
extent
,
NSIZES
);
if
(
extent_slab_get
(
extent
))
{
extent_interior_deregister
(
tsdn
,
rtree_ctx
,
extent
);
extent_slab_set
(
extent
,
false
);
}
assert
(
rtree_extent_read
(
tsdn
,
&
extents_rtree
,
rtree_ctx
,
(
uintptr_t
)
extent_base_get
(
extent
),
true
)
==
extent
);
if
(
!
extents
->
delay_coalesce
)
{
extent
=
extent_try_coalesce
(
tsdn
,
arena
,
r_extent_hooks
,
rtree_ctx
,
extents
,
extent
,
NULL
,
growing_retained
);
}
else
if
(
extent_size_get
(
extent
)
>=
LARGE_MINCLASS
)
{
/* Always coalesce large extents eagerly. */
bool
coalesced
;
size_t
prev_size
;
do
{
prev_size
=
extent_size_get
(
extent
);
assert
(
extent_state_get
(
extent
)
==
extent_state_active
);
extent
=
extent_try_coalesce
(
tsdn
,
arena
,
r_extent_hooks
,
rtree_ctx
,
extents
,
extent
,
&
coalesced
,
growing_retained
);
}
while
(
coalesced
&&
extent_size_get
(
extent
)
>=
prev_size
+
LARGE_MINCLASS
);
}
extent_deactivate_locked
(
tsdn
,
arena
,
extents
,
extent
);
malloc_mutex_unlock
(
tsdn
,
&
extents
->
mtx
);
}
void
extent_dalloc_gap
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_t
*
extent
)
{
extent_hooks_t
*
extent_hooks
=
EXTENT_HOOKS_INITIALIZER
;
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
0
);
if
(
extent_register
(
tsdn
,
extent
))
{
extents_leak
(
tsdn
,
arena
,
&
extent_hooks
,
&
arena
->
extents_retained
,
extent
,
false
);
return
;
}
extent_dalloc_wrapper
(
tsdn
,
arena
,
&
extent_hooks
,
extent
);
}
static
bool
extent_dalloc_default_impl
(
void
*
addr
,
size_t
size
)
{
if
(
!
have_dss
||
!
extent_in_dss
(
addr
))
{
return
extent_dalloc_mmap
(
addr
,
size
);
}
return
true
;
}
static
bool
extent_dalloc_default
(
extent_hooks_t
*
extent_hooks
,
void
*
addr
,
size_t
size
,
bool
committed
,
unsigned
arena_ind
)
{
return
extent_dalloc_default_impl
(
addr
,
size
);
}
static
bool
extent_dalloc_wrapper_try
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
)
{
bool
err
;
assert
(
extent_base_get
(
extent
)
!=
NULL
);
assert
(
extent_size_get
(
extent
)
!=
0
);
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
0
);
extent_addr_set
(
extent
,
extent_base_get
(
extent
));
extent_hooks_assure_initialized
(
arena
,
r_extent_hooks
);
/* Try to deallocate. */
if
(
*
r_extent_hooks
==
&
extent_hooks_default
)
{
/* Call directly to propagate tsdn. */
err
=
extent_dalloc_default_impl
(
extent_base_get
(
extent
),
extent_size_get
(
extent
));
}
else
{
extent_hook_pre_reentrancy
(
tsdn
,
arena
);
err
=
((
*
r_extent_hooks
)
->
dalloc
==
NULL
||
(
*
r_extent_hooks
)
->
dalloc
(
*
r_extent_hooks
,
extent_base_get
(
extent
),
extent_size_get
(
extent
),
extent_committed_get
(
extent
),
arena_ind_get
(
arena
)));
extent_hook_post_reentrancy
(
tsdn
);
}
if
(
!
err
)
{
extent_dalloc
(
tsdn
,
arena
,
extent
);
}
return
err
;
}
void
extent_dalloc_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
)
{
assert
(
extent_dumpable_get
(
extent
));
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
0
);
/*
* Deregister first to avoid a race with other allocating threads, and
* reregister if deallocation fails.
*/
extent_deregister
(
tsdn
,
extent
);
if
(
!
extent_dalloc_wrapper_try
(
tsdn
,
arena
,
r_extent_hooks
,
extent
))
{
return
;
}
extent_reregister
(
tsdn
,
extent
);
if
(
*
r_extent_hooks
!=
&
extent_hooks_default
)
{
extent_hook_pre_reentrancy
(
tsdn
,
arena
);
}
/* Try to decommit; purge if that fails. */
bool
zeroed
;
if
(
!
extent_committed_get
(
extent
))
{
zeroed
=
true
;
}
else
if
(
!
extent_decommit_wrapper
(
tsdn
,
arena
,
r_extent_hooks
,
extent
,
0
,
extent_size_get
(
extent
)))
{
zeroed
=
true
;
}
else
if
((
*
r_extent_hooks
)
->
purge_forced
!=
NULL
&&
!
(
*
r_extent_hooks
)
->
purge_forced
(
*
r_extent_hooks
,
extent_base_get
(
extent
),
extent_size_get
(
extent
),
0
,
extent_size_get
(
extent
),
arena_ind_get
(
arena
)))
{
zeroed
=
true
;
}
else
if
(
extent_state_get
(
extent
)
==
extent_state_muzzy
||
((
*
r_extent_hooks
)
->
purge_lazy
!=
NULL
&&
!
(
*
r_extent_hooks
)
->
purge_lazy
(
*
r_extent_hooks
,
extent_base_get
(
extent
),
extent_size_get
(
extent
),
0
,
extent_size_get
(
extent
),
arena_ind_get
(
arena
))))
{
zeroed
=
false
;
}
else
{
zeroed
=
false
;
}
if
(
*
r_extent_hooks
!=
&
extent_hooks_default
)
{
extent_hook_post_reentrancy
(
tsdn
);
}
extent_zeroed_set
(
extent
,
zeroed
);
if
(
config_prof
)
{
extent_gdump_sub
(
tsdn
,
extent
);
}
extent_record
(
tsdn
,
arena
,
r_extent_hooks
,
&
arena
->
extents_retained
,
extent
,
false
);
}
static
void
extent_destroy_default_impl
(
void
*
addr
,
size_t
size
)
{
if
(
!
have_dss
||
!
extent_in_dss
(
addr
))
{
pages_unmap
(
addr
,
size
);
}
}
static
void
extent_destroy_default
(
extent_hooks_t
*
extent_hooks
,
void
*
addr
,
size_t
size
,
bool
committed
,
unsigned
arena_ind
)
{
extent_destroy_default_impl
(
addr
,
size
);
}
void
extent_destroy_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
)
{
assert
(
extent_base_get
(
extent
)
!=
NULL
);
assert
(
extent_size_get
(
extent
)
!=
0
);
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
0
);
/* Deregister first to avoid a race with other allocating threads. */
extent_deregister
(
tsdn
,
extent
);
extent_addr_set
(
extent
,
extent_base_get
(
extent
));
extent_hooks_assure_initialized
(
arena
,
r_extent_hooks
);
/* Try to destroy; silently fail otherwise. */
if
(
*
r_extent_hooks
==
&
extent_hooks_default
)
{
/* Call directly to propagate tsdn. */
extent_destroy_default_impl
(
extent_base_get
(
extent
),
extent_size_get
(
extent
));
}
else
if
((
*
r_extent_hooks
)
->
destroy
!=
NULL
)
{
extent_hook_pre_reentrancy
(
tsdn
,
arena
);
(
*
r_extent_hooks
)
->
destroy
(
*
r_extent_hooks
,
extent_base_get
(
extent
),
extent_size_get
(
extent
),
extent_committed_get
(
extent
),
arena_ind_get
(
arena
));
extent_hook_post_reentrancy
(
tsdn
);
}
extent_dalloc
(
tsdn
,
arena
,
extent
);
}
static
bool
extent_commit_default
(
extent_hooks_t
*
extent_hooks
,
void
*
addr
,
size_t
size
,
size_t
offset
,
size_t
length
,
unsigned
arena_ind
)
{
return
pages_commit
((
void
*
)((
uintptr_t
)
addr
+
(
uintptr_t
)
offset
),
length
);
}
static
bool
extent_commit_impl
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
,
size_t
offset
,
size_t
length
,
bool
growing_retained
)
{
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
growing_retained
?
1
:
0
);
extent_hooks_assure_initialized
(
arena
,
r_extent_hooks
);
if
(
*
r_extent_hooks
!=
&
extent_hooks_default
)
{
extent_hook_pre_reentrancy
(
tsdn
,
arena
);
}
bool
err
=
((
*
r_extent_hooks
)
->
commit
==
NULL
||
(
*
r_extent_hooks
)
->
commit
(
*
r_extent_hooks
,
extent_base_get
(
extent
),
extent_size_get
(
extent
),
offset
,
length
,
arena_ind_get
(
arena
)));
if
(
*
r_extent_hooks
!=
&
extent_hooks_default
)
{
extent_hook_post_reentrancy
(
tsdn
);
}
extent_committed_set
(
extent
,
extent_committed_get
(
extent
)
||
!
err
);
return
err
;
}
bool
extent_commit_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
,
size_t
offset
,
size_t
length
)
{
return
extent_commit_impl
(
tsdn
,
arena
,
r_extent_hooks
,
extent
,
offset
,
length
,
false
);
}
static
bool
extent_decommit_default
(
extent_hooks_t
*
extent_hooks
,
void
*
addr
,
size_t
size
,
size_t
offset
,
size_t
length
,
unsigned
arena_ind
)
{
return
pages_decommit
((
void
*
)((
uintptr_t
)
addr
+
(
uintptr_t
)
offset
),
length
);
}
bool
extent_decommit_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
,
size_t
offset
,
size_t
length
)
{
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
0
);
extent_hooks_assure_initialized
(
arena
,
r_extent_hooks
);
if
(
*
r_extent_hooks
!=
&
extent_hooks_default
)
{
extent_hook_pre_reentrancy
(
tsdn
,
arena
);
}
bool
err
=
((
*
r_extent_hooks
)
->
decommit
==
NULL
||
(
*
r_extent_hooks
)
->
decommit
(
*
r_extent_hooks
,
extent_base_get
(
extent
),
extent_size_get
(
extent
),
offset
,
length
,
arena_ind_get
(
arena
)));
if
(
*
r_extent_hooks
!=
&
extent_hooks_default
)
{
extent_hook_post_reentrancy
(
tsdn
);
}
extent_committed_set
(
extent
,
extent_committed_get
(
extent
)
&&
err
);
return
err
;
}
#ifdef PAGES_CAN_PURGE_LAZY
static
bool
extent_purge_lazy_default
(
extent_hooks_t
*
extent_hooks
,
void
*
addr
,
size_t
size
,
size_t
offset
,
size_t
length
,
unsigned
arena_ind
)
{
assert
(
addr
!=
NULL
);
assert
((
offset
&
PAGE_MASK
)
==
0
);
assert
(
length
!=
0
);
assert
((
length
&
PAGE_MASK
)
==
0
);
return
pages_purge_lazy
((
void
*
)((
uintptr_t
)
addr
+
(
uintptr_t
)
offset
),
length
);
}
#endif
static
bool
extent_purge_lazy_impl
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
,
size_t
offset
,
size_t
length
,
bool
growing_retained
)
{
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
growing_retained
?
1
:
0
);
extent_hooks_assure_initialized
(
arena
,
r_extent_hooks
);
if
((
*
r_extent_hooks
)
->
purge_lazy
==
NULL
)
{
return
true
;
}
if
(
*
r_extent_hooks
!=
&
extent_hooks_default
)
{
extent_hook_pre_reentrancy
(
tsdn
,
arena
);
}
bool
err
=
(
*
r_extent_hooks
)
->
purge_lazy
(
*
r_extent_hooks
,
extent_base_get
(
extent
),
extent_size_get
(
extent
),
offset
,
length
,
arena_ind_get
(
arena
));
if
(
*
r_extent_hooks
!=
&
extent_hooks_default
)
{
extent_hook_post_reentrancy
(
tsdn
);
}
return
err
;
}
bool
extent_purge_lazy_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
,
size_t
offset
,
size_t
length
)
{
return
extent_purge_lazy_impl
(
tsdn
,
arena
,
r_extent_hooks
,
extent
,
offset
,
length
,
false
);
}
#ifdef PAGES_CAN_PURGE_FORCED
static
bool
extent_purge_forced_default
(
extent_hooks_t
*
extent_hooks
,
void
*
addr
,
size_t
size
,
size_t
offset
,
size_t
length
,
unsigned
arena_ind
)
{
assert
(
addr
!=
NULL
);
assert
((
offset
&
PAGE_MASK
)
==
0
);
assert
(
length
!=
0
);
assert
((
length
&
PAGE_MASK
)
==
0
);
return
pages_purge_forced
((
void
*
)((
uintptr_t
)
addr
+
(
uintptr_t
)
offset
),
length
);
}
#endif
static
bool
extent_purge_forced_impl
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
,
size_t
offset
,
size_t
length
,
bool
growing_retained
)
{
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
growing_retained
?
1
:
0
);
extent_hooks_assure_initialized
(
arena
,
r_extent_hooks
);
if
((
*
r_extent_hooks
)
->
purge_forced
==
NULL
)
{
return
true
;
}
if
(
*
r_extent_hooks
!=
&
extent_hooks_default
)
{
extent_hook_pre_reentrancy
(
tsdn
,
arena
);
}
bool
err
=
(
*
r_extent_hooks
)
->
purge_forced
(
*
r_extent_hooks
,
extent_base_get
(
extent
),
extent_size_get
(
extent
),
offset
,
length
,
arena_ind_get
(
arena
));
if
(
*
r_extent_hooks
!=
&
extent_hooks_default
)
{
extent_hook_post_reentrancy
(
tsdn
);
}
return
err
;
}
bool
extent_purge_forced_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
,
size_t
offset
,
size_t
length
)
{
return
extent_purge_forced_impl
(
tsdn
,
arena
,
r_extent_hooks
,
extent
,
offset
,
length
,
false
);
}
#ifdef JEMALLOC_MAPS_COALESCE
static
bool
extent_split_default
(
extent_hooks_t
*
extent_hooks
,
void
*
addr
,
size_t
size
,
size_t
size_a
,
size_t
size_b
,
bool
committed
,
unsigned
arena_ind
)
{
return
!
maps_coalesce
;
}
#endif
/*
* Accepts the extent to split, and the characteristics of each side of the
* split. The 'a' parameters go with the 'lead' of the resulting pair of
* extents (the lower addressed portion of the split), and the 'b' parameters go
* with the trail (the higher addressed portion). This makes 'extent' the lead,
* and returns the trail (except in case of error).
*/
static
extent_t
*
extent_split_impl
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
,
size_t
size_a
,
szind_t
szind_a
,
bool
slab_a
,
size_t
size_b
,
szind_t
szind_b
,
bool
slab_b
,
bool
growing_retained
)
{
assert
(
extent_size_get
(
extent
)
==
size_a
+
size_b
);
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
growing_retained
?
1
:
0
);
extent_hooks_assure_initialized
(
arena
,
r_extent_hooks
);
if
((
*
r_extent_hooks
)
->
split
==
NULL
)
{
return
NULL
;
}
extent_t
*
trail
=
extent_alloc
(
tsdn
,
arena
);
if
(
trail
==
NULL
)
{
goto
label_error_a
;
}
extent_init
(
trail
,
arena
,
(
void
*
)((
uintptr_t
)
extent_base_get
(
extent
)
+
size_a
),
size_b
,
slab_b
,
szind_b
,
extent_sn_get
(
extent
),
extent_state_get
(
extent
),
extent_zeroed_get
(
extent
),
extent_committed_get
(
extent
),
extent_dumpable_get
(
extent
));
rtree_ctx_t
rtree_ctx_fallback
;
rtree_ctx_t
*
rtree_ctx
=
tsdn_rtree_ctx
(
tsdn
,
&
rtree_ctx_fallback
);
rtree_leaf_elm_t
*
lead_elm_a
,
*
lead_elm_b
;
{
extent_t
lead
;
extent_init
(
&
lead
,
arena
,
extent_addr_get
(
extent
),
size_a
,
slab_a
,
szind_a
,
extent_sn_get
(
extent
),
extent_state_get
(
extent
),
extent_zeroed_get
(
extent
),
extent_committed_get
(
extent
),
extent_dumpable_get
(
extent
));
extent_rtree_leaf_elms_lookup
(
tsdn
,
rtree_ctx
,
&
lead
,
false
,
true
,
&
lead_elm_a
,
&
lead_elm_b
);
}
rtree_leaf_elm_t
*
trail_elm_a
,
*
trail_elm_b
;
extent_rtree_leaf_elms_lookup
(
tsdn
,
rtree_ctx
,
trail
,
false
,
true
,
&
trail_elm_a
,
&
trail_elm_b
);
if
(
lead_elm_a
==
NULL
||
lead_elm_b
==
NULL
||
trail_elm_a
==
NULL
||
trail_elm_b
==
NULL
)
{
goto
label_error_b
;
}
extent_lock2
(
tsdn
,
extent
,
trail
);
if
(
*
r_extent_hooks
!=
&
extent_hooks_default
)
{
extent_hook_pre_reentrancy
(
tsdn
,
arena
);
}
bool
err
=
(
*
r_extent_hooks
)
->
split
(
*
r_extent_hooks
,
extent_base_get
(
extent
),
size_a
+
size_b
,
size_a
,
size_b
,
extent_committed_get
(
extent
),
arena_ind_get
(
arena
));
if
(
*
r_extent_hooks
!=
&
extent_hooks_default
)
{
extent_hook_post_reentrancy
(
tsdn
);
}
if
(
err
)
{
goto
label_error_c
;
}
extent_size_set
(
extent
,
size_a
);
extent_szind_set
(
extent
,
szind_a
);
extent_rtree_write_acquired
(
tsdn
,
lead_elm_a
,
lead_elm_b
,
extent
,
szind_a
,
slab_a
);
extent_rtree_write_acquired
(
tsdn
,
trail_elm_a
,
trail_elm_b
,
trail
,
szind_b
,
slab_b
);
extent_unlock2
(
tsdn
,
extent
,
trail
);
return
trail
;
label_error_c:
extent_unlock2
(
tsdn
,
extent
,
trail
);
label_error_b:
extent_dalloc
(
tsdn
,
arena
,
trail
);
label_error_a:
return
NULL
;
}
extent_t
*
extent_split_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
,
size_t
size_a
,
szind_t
szind_a
,
bool
slab_a
,
size_t
size_b
,
szind_t
szind_b
,
bool
slab_b
)
{
return
extent_split_impl
(
tsdn
,
arena
,
r_extent_hooks
,
extent
,
size_a
,
szind_a
,
slab_a
,
size_b
,
szind_b
,
slab_b
,
false
);
}
static
bool
extent_merge_default_impl
(
void
*
addr_a
,
void
*
addr_b
)
{
if
(
!
maps_coalesce
)
{
return
true
;
}
if
(
have_dss
&&
!
extent_dss_mergeable
(
addr_a
,
addr_b
))
{
return
true
;
}
return
false
;
}
#ifdef JEMALLOC_MAPS_COALESCE
static
bool
extent_merge_default
(
extent_hooks_t
*
extent_hooks
,
void
*
addr_a
,
size_t
size_a
,
void
*
addr_b
,
size_t
size_b
,
bool
committed
,
unsigned
arena_ind
)
{
return
extent_merge_default_impl
(
addr_a
,
addr_b
);
}
#endif
static
bool
extent_merge_impl
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
a
,
extent_t
*
b
,
bool
growing_retained
)
{
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
growing_retained
?
1
:
0
);
extent_hooks_assure_initialized
(
arena
,
r_extent_hooks
);
if
((
*
r_extent_hooks
)
->
merge
==
NULL
)
{
return
true
;
}
bool
err
;
if
(
*
r_extent_hooks
==
&
extent_hooks_default
)
{
/* Call directly to propagate tsdn. */
err
=
extent_merge_default_impl
(
extent_base_get
(
a
),
extent_base_get
(
b
));
}
else
{
extent_hook_pre_reentrancy
(
tsdn
,
arena
);
err
=
(
*
r_extent_hooks
)
->
merge
(
*
r_extent_hooks
,
extent_base_get
(
a
),
extent_size_get
(
a
),
extent_base_get
(
b
),
extent_size_get
(
b
),
extent_committed_get
(
a
),
arena_ind_get
(
arena
));
extent_hook_post_reentrancy
(
tsdn
);
}
if
(
err
)
{
return
true
;
}
/*
* The rtree writes must happen while all the relevant elements are
* owned, so the following code uses decomposed helper functions rather
* than extent_{,de}register() to do things in the right order.
*/
rtree_ctx_t
rtree_ctx_fallback
;
rtree_ctx_t
*
rtree_ctx
=
tsdn_rtree_ctx
(
tsdn
,
&
rtree_ctx_fallback
);
rtree_leaf_elm_t
*
a_elm_a
,
*
a_elm_b
,
*
b_elm_a
,
*
b_elm_b
;
extent_rtree_leaf_elms_lookup
(
tsdn
,
rtree_ctx
,
a
,
true
,
false
,
&
a_elm_a
,
&
a_elm_b
);
extent_rtree_leaf_elms_lookup
(
tsdn
,
rtree_ctx
,
b
,
true
,
false
,
&
b_elm_a
,
&
b_elm_b
);
extent_lock2
(
tsdn
,
a
,
b
);
if
(
a_elm_b
!=
NULL
)
{
rtree_leaf_elm_write
(
tsdn
,
&
extents_rtree
,
a_elm_b
,
NULL
,
NSIZES
,
false
);
}
if
(
b_elm_b
!=
NULL
)
{
rtree_leaf_elm_write
(
tsdn
,
&
extents_rtree
,
b_elm_a
,
NULL
,
NSIZES
,
false
);
}
else
{
b_elm_b
=
b_elm_a
;
}
extent_size_set
(
a
,
extent_size_get
(
a
)
+
extent_size_get
(
b
));
extent_szind_set
(
a
,
NSIZES
);
extent_sn_set
(
a
,
(
extent_sn_get
(
a
)
<
extent_sn_get
(
b
))
?
extent_sn_get
(
a
)
:
extent_sn_get
(
b
));
extent_zeroed_set
(
a
,
extent_zeroed_get
(
a
)
&&
extent_zeroed_get
(
b
));
extent_rtree_write_acquired
(
tsdn
,
a_elm_a
,
b_elm_b
,
a
,
NSIZES
,
false
);
extent_unlock2
(
tsdn
,
a
,
b
);
extent_dalloc
(
tsdn
,
extent_arena_get
(
b
),
b
);
return
false
;
}
bool
extent_merge_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
a
,
extent_t
*
b
)
{
return
extent_merge_impl
(
tsdn
,
arena
,
r_extent_hooks
,
a
,
b
,
false
);
}
bool
extent_boot
(
void
)
{
if
(
rtree_new
(
&
extents_rtree
,
true
))
{
return
true
;
}
if
(
mutex_pool_init
(
&
extent_mutex_pool
,
"extent_mutex_pool"
,
WITNESS_RANK_EXTENT_POOL
))
{
return
true
;
}
if
(
have_dss
)
{
extent_dss_boot
();
}
return
false
;
}
deps/jemalloc/src/extent_dss.c
deleted
100644 → 0
View file @
7ff7536e
#define JEMALLOC_EXTENT_DSS_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/spin.h"
/******************************************************************************/
/* Data. */
const
char
*
opt_dss
=
DSS_DEFAULT
;
const
char
*
dss_prec_names
[]
=
{
"disabled"
,
"primary"
,
"secondary"
,
"N/A"
};
/*
* Current dss precedence default, used when creating new arenas. NB: This is
* stored as unsigned rather than dss_prec_t because in principle there's no
* guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use
* atomic operations to synchronize the setting.
*/
static
atomic_u_t
dss_prec_default
=
ATOMIC_INIT
(
(
unsigned
)
DSS_PREC_DEFAULT
);
/* Base address of the DSS. */
static
void
*
dss_base
;
/* Atomic boolean indicating whether a thread is currently extending DSS. */
static
atomic_b_t
dss_extending
;
/* Atomic boolean indicating whether the DSS is exhausted. */
static
atomic_b_t
dss_exhausted
;
/* Atomic current upper limit on DSS addresses. */
static
atomic_p_t
dss_max
;
/******************************************************************************/
static
void
*
extent_dss_sbrk
(
intptr_t
increment
)
{
#ifdef JEMALLOC_DSS
return
sbrk
(
increment
);
#else
not_implemented
();
return
NULL
;
#endif
}
dss_prec_t
extent_dss_prec_get
(
void
)
{
dss_prec_t
ret
;
if
(
!
have_dss
)
{
return
dss_prec_disabled
;
}
ret
=
(
dss_prec_t
)
atomic_load_u
(
&
dss_prec_default
,
ATOMIC_ACQUIRE
);
return
ret
;
}
bool
extent_dss_prec_set
(
dss_prec_t
dss_prec
)
{
if
(
!
have_dss
)
{
return
(
dss_prec
!=
dss_prec_disabled
);
}
atomic_store_u
(
&
dss_prec_default
,
(
unsigned
)
dss_prec
,
ATOMIC_RELEASE
);
return
false
;
}
static
void
extent_dss_extending_start
(
void
)
{
spin_t
spinner
=
SPIN_INITIALIZER
;
while
(
true
)
{
bool
expected
=
false
;
if
(
atomic_compare_exchange_weak_b
(
&
dss_extending
,
&
expected
,
true
,
ATOMIC_ACQ_REL
,
ATOMIC_RELAXED
))
{
break
;
}
spin_adaptive
(
&
spinner
);
}
}
static
void
extent_dss_extending_finish
(
void
)
{
assert
(
atomic_load_b
(
&
dss_extending
,
ATOMIC_RELAXED
));
atomic_store_b
(
&
dss_extending
,
false
,
ATOMIC_RELEASE
);
}
static
void
*
extent_dss_max_update
(
void
*
new_addr
)
{
/*
* Get the current end of the DSS as max_cur and assure that dss_max is
* up to date.
*/
void
*
max_cur
=
extent_dss_sbrk
(
0
);
if
(
max_cur
==
(
void
*
)
-
1
)
{
return
NULL
;
}
atomic_store_p
(
&
dss_max
,
max_cur
,
ATOMIC_RELEASE
);
/* Fixed new_addr can only be supported if it is at the edge of DSS. */
if
(
new_addr
!=
NULL
&&
max_cur
!=
new_addr
)
{
return
NULL
;
}
return
max_cur
;
}
void
*
extent_alloc_dss
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
*
commit
)
{
extent_t
*
gap
;
cassert
(
have_dss
);
assert
(
size
>
0
);
assert
(
alignment
>
0
);
/*
* sbrk() uses a signed increment argument, so take care not to
* interpret a large allocation request as a negative increment.
*/
if
((
intptr_t
)
size
<
0
)
{
return
NULL
;
}
gap
=
extent_alloc
(
tsdn
,
arena
);
if
(
gap
==
NULL
)
{
return
NULL
;
}
extent_dss_extending_start
();
if
(
!
atomic_load_b
(
&
dss_exhausted
,
ATOMIC_ACQUIRE
))
{
/*
* The loop is necessary to recover from races with other
* threads that are using the DSS for something other than
* malloc.
*/
while
(
true
)
{
void
*
max_cur
=
extent_dss_max_update
(
new_addr
);
if
(
max_cur
==
NULL
)
{
goto
label_oom
;
}
/*
* Compute how much page-aligned gap space (if any) is
* necessary to satisfy alignment. This space can be
* recycled for later use.
*/
void
*
gap_addr_page
=
(
void
*
)(
PAGE_CEILING
(
(
uintptr_t
)
max_cur
));
void
*
ret
=
(
void
*
)
ALIGNMENT_CEILING
(
(
uintptr_t
)
gap_addr_page
,
alignment
);
size_t
gap_size_page
=
(
uintptr_t
)
ret
-
(
uintptr_t
)
gap_addr_page
;
if
(
gap_size_page
!=
0
)
{
extent_init
(
gap
,
arena
,
gap_addr_page
,
gap_size_page
,
false
,
NSIZES
,
arena_extent_sn_next
(
arena
),
extent_state_active
,
false
,
true
,
true
);
}
/*
* Compute the address just past the end of the desired
* allocation space.
*/
void
*
dss_next
=
(
void
*
)((
uintptr_t
)
ret
+
size
);
if
((
uintptr_t
)
ret
<
(
uintptr_t
)
max_cur
||
(
uintptr_t
)
dss_next
<
(
uintptr_t
)
max_cur
)
{
goto
label_oom
;
/* Wrap-around. */
}
/* Compute the increment, including subpage bytes. */
void
*
gap_addr_subpage
=
max_cur
;
size_t
gap_size_subpage
=
(
uintptr_t
)
ret
-
(
uintptr_t
)
gap_addr_subpage
;
intptr_t
incr
=
gap_size_subpage
+
size
;
assert
((
uintptr_t
)
max_cur
+
incr
==
(
uintptr_t
)
ret
+
size
);
/* Try to allocate. */
void
*
dss_prev
=
extent_dss_sbrk
(
incr
);
if
(
dss_prev
==
max_cur
)
{
/* Success. */
atomic_store_p
(
&
dss_max
,
dss_next
,
ATOMIC_RELEASE
);
extent_dss_extending_finish
();
if
(
gap_size_page
!=
0
)
{
extent_dalloc_gap
(
tsdn
,
arena
,
gap
);
}
else
{
extent_dalloc
(
tsdn
,
arena
,
gap
);
}
if
(
!*
commit
)
{
*
commit
=
pages_decommit
(
ret
,
size
);
}
if
(
*
zero
&&
*
commit
)
{
extent_hooks_t
*
extent_hooks
=
EXTENT_HOOKS_INITIALIZER
;
extent_t
extent
;
extent_init
(
&
extent
,
arena
,
ret
,
size
,
size
,
false
,
NSIZES
,
extent_state_active
,
false
,
true
,
true
);
if
(
extent_purge_forced_wrapper
(
tsdn
,
arena
,
&
extent_hooks
,
&
extent
,
0
,
size
))
{
memset
(
ret
,
0
,
size
);
}
}
return
ret
;
}
/*
* Failure, whether due to OOM or a race with a raw
* sbrk() call from outside the allocator.
*/
if
(
dss_prev
==
(
void
*
)
-
1
)
{
/* OOM. */
atomic_store_b
(
&
dss_exhausted
,
true
,
ATOMIC_RELEASE
);
goto
label_oom
;
}
}
}
label_oom:
extent_dss_extending_finish
();
extent_dalloc
(
tsdn
,
arena
,
gap
);
return
NULL
;
}
static
bool
extent_in_dss_helper
(
void
*
addr
,
void
*
max
)
{
return
((
uintptr_t
)
addr
>=
(
uintptr_t
)
dss_base
&&
(
uintptr_t
)
addr
<
(
uintptr_t
)
max
);
}
bool
extent_in_dss
(
void
*
addr
)
{
cassert
(
have_dss
);
return
extent_in_dss_helper
(
addr
,
atomic_load_p
(
&
dss_max
,
ATOMIC_ACQUIRE
));
}
bool
extent_dss_mergeable
(
void
*
addr_a
,
void
*
addr_b
)
{
void
*
max
;
cassert
(
have_dss
);
if
((
uintptr_t
)
addr_a
<
(
uintptr_t
)
dss_base
&&
(
uintptr_t
)
addr_b
<
(
uintptr_t
)
dss_base
)
{
return
true
;
}
max
=
atomic_load_p
(
&
dss_max
,
ATOMIC_ACQUIRE
);
return
(
extent_in_dss_helper
(
addr_a
,
max
)
==
extent_in_dss_helper
(
addr_b
,
max
));
}
void
extent_dss_boot
(
void
)
{
cassert
(
have_dss
);
dss_base
=
extent_dss_sbrk
(
0
);
atomic_store_b
(
&
dss_extending
,
false
,
ATOMIC_RELAXED
);
atomic_store_b
(
&
dss_exhausted
,
dss_base
==
(
void
*
)
-
1
,
ATOMIC_RELAXED
);
atomic_store_p
(
&
dss_max
,
dss_base
,
ATOMIC_RELAXED
);
}
/******************************************************************************/
deps/jemalloc/src/extent_mmap.c
deleted
100644 → 0
View file @
7ff7536e
#define JEMALLOC_EXTENT_MMAP_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/extent_mmap.h"
/******************************************************************************/
/* Data. */
bool
opt_retain
=
#ifdef JEMALLOC_RETAIN
true
#else
false
#endif
;
/******************************************************************************/
void
*
extent_alloc_mmap
(
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
*
commit
)
{
void
*
ret
=
pages_map
(
new_addr
,
size
,
ALIGNMENT_CEILING
(
alignment
,
PAGE
),
commit
);
if
(
ret
==
NULL
)
{
return
NULL
;
}
assert
(
ret
!=
NULL
);
if
(
*
commit
)
{
*
zero
=
true
;
}
return
ret
;
}
bool
extent_dalloc_mmap
(
void
*
addr
,
size_t
size
)
{
if
(
!
opt_retain
)
{
pages_unmap
(
addr
,
size
);
}
return
opt_retain
;
}
deps/jemalloc/src/hash.c
deleted
100644 → 0
View file @
7ff7536e
#define JEMALLOC_HASH_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
deps/jemalloc/src/hooks.c
deleted
100644 → 0
View file @
7ff7536e
#include "jemalloc/internal/jemalloc_preamble.h"
/*
* The hooks are a little bit screwy -- they're not genuinely exported in the
* sense that we want them available to end-users, but we do want them visible
* from outside the generated library, so that we can use them in test code.
*/
JEMALLOC_EXPORT
void
(
*
hooks_arena_new_hook
)()
=
NULL
;
JEMALLOC_EXPORT
void
(
*
hooks_libc_hook
)()
=
NULL
;
deps/jemalloc/src/jemalloc.c
deleted
100644 → 0
View file @
7ff7536e
#define JEMALLOC_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/extent_mmap.h"
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/log.h"
#include "jemalloc/internal/malloc_io.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/spin.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/util.h"
/******************************************************************************/
/* Data. */
/* Runtime configuration options. */
const
char
*
je_malloc_conf
#ifndef _WIN32
JEMALLOC_ATTR
(
weak
)
#endif
;
bool
opt_abort
=
#ifdef JEMALLOC_DEBUG
true
#else
false
#endif
;
bool
opt_abort_conf
=
#ifdef JEMALLOC_DEBUG
true
#else
false
#endif
;
const
char
*
opt_junk
=
#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
"true"
#else
"false"
#endif
;
bool
opt_junk_alloc
=
#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
true
#else
false
#endif
;
bool
opt_junk_free
=
#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
true
#else
false
#endif
;
bool
opt_utrace
=
false
;
bool
opt_xmalloc
=
false
;
bool
opt_zero
=
false
;
unsigned
opt_narenas
=
0
;
unsigned
ncpus
;
/* Protects arenas initialization. */
malloc_mutex_t
arenas_lock
;
/*
* Arenas that are used to service external requests. Not all elements of the
* arenas array are necessarily used; arenas are created lazily as needed.
*
* arenas[0..narenas_auto) are used for automatic multiplexing of threads and
* arenas. arenas[narenas_auto..narenas_total) are only used if the application
* takes some action to create them and allocate from them.
*
* Points to an arena_t.
*/
JEMALLOC_ALIGNED
(
CACHELINE
)
atomic_p_t
arenas
[
MALLOCX_ARENA_LIMIT
];
static
atomic_u_t
narenas_total
;
/* Use narenas_total_*(). */
static
arena_t
*
a0
;
/* arenas[0]; read-only after initialization. */
unsigned
narenas_auto
;
/* Read-only after initialization. */
typedef
enum
{
malloc_init_uninitialized
=
3
,
malloc_init_a0_initialized
=
2
,
malloc_init_recursible
=
1
,
malloc_init_initialized
=
0
/* Common case --> jnz. */
}
malloc_init_t
;
static
malloc_init_t
malloc_init_state
=
malloc_init_uninitialized
;
/* False should be the common case. Set to true to trigger initialization. */
bool
malloc_slow
=
true
;
/* When malloc_slow is true, set the corresponding bits for sanity check. */
enum
{
flag_opt_junk_alloc
=
(
1U
),
flag_opt_junk_free
=
(
1U
<<
1
),
flag_opt_zero
=
(
1U
<<
2
),
flag_opt_utrace
=
(
1U
<<
3
),
flag_opt_xmalloc
=
(
1U
<<
4
)
};
static
uint8_t
malloc_slow_flags
;
#ifdef JEMALLOC_THREADED_INIT
/* Used to let the initializing thread recursively allocate. */
# define NO_INITIALIZER ((unsigned long)0)
# define INITIALIZER pthread_self()
# define IS_INITIALIZER (malloc_initializer == pthread_self())
static
pthread_t
malloc_initializer
=
NO_INITIALIZER
;
#else
# define NO_INITIALIZER false
# define INITIALIZER true
# define IS_INITIALIZER malloc_initializer
static
bool
malloc_initializer
=
NO_INITIALIZER
;
#endif
/* Used to avoid initialization races. */
#ifdef _WIN32
#if _WIN32_WINNT >= 0x0600
static
malloc_mutex_t
init_lock
=
SRWLOCK_INIT
;
#else
static
malloc_mutex_t
init_lock
;
static
bool
init_lock_initialized
=
false
;
JEMALLOC_ATTR
(
constructor
)
static
void
WINAPI
_init_init_lock
(
void
)
{
/*
* If another constructor in the same binary is using mallctl to e.g.
* set up extent hooks, it may end up running before this one, and
* malloc_init_hard will crash trying to lock the uninitialized lock. So
* we force an initialization of the lock in malloc_init_hard as well.
* We don't try to care about atomicity of the accessed to the
* init_lock_initialized boolean, since it really only matters early in
* the process creation, before any separate thread normally starts
* doing anything.
*/
if
(
!
init_lock_initialized
)
{
malloc_mutex_init
(
&
init_lock
,
"init"
,
WITNESS_RANK_INIT
,
malloc_mutex_rank_exclusive
);
}
init_lock_initialized
=
true
;
}
#ifdef _MSC_VER
# pragma section(".CRT$XCU", read)
JEMALLOC_SECTION
(
".CRT$XCU"
)
JEMALLOC_ATTR
(
used
)
static
const
void
(
WINAPI
*
init_init_lock
)(
void
)
=
_init_init_lock
;
#endif
#endif
#else
static
malloc_mutex_t
init_lock
=
MALLOC_MUTEX_INITIALIZER
;
#endif
typedef
struct
{
void
*
p
;
/* Input pointer (as in realloc(p, s)). */
size_t
s
;
/* Request size. */
void
*
r
;
/* Result pointer. */
}
malloc_utrace_t
;
#ifdef JEMALLOC_UTRACE
# define UTRACE(a, b, c) do { \
if (unlikely(opt_utrace)) { \
int utrace_serrno = errno; \
malloc_utrace_t ut; \
ut.p = (a); \
ut.s = (b); \
ut.r = (c); \
utrace(&ut, sizeof(ut)); \
errno = utrace_serrno; \
} \
} while (0)
#else
# define UTRACE(a, b, c)
#endif
/* Whether encountered any invalid config options. */
static
bool
had_conf_error
=
false
;
/******************************************************************************/
/*
* Function prototypes for static functions that are referenced prior to
* definition.
*/
static
bool
malloc_init_hard_a0
(
void
);
static
bool
malloc_init_hard
(
void
);
/******************************************************************************/
/*
* Begin miscellaneous support functions.
*/
bool
malloc_initialized
(
void
)
{
return
(
malloc_init_state
==
malloc_init_initialized
);
}
JEMALLOC_ALWAYS_INLINE
bool
malloc_init_a0
(
void
)
{
if
(
unlikely
(
malloc_init_state
==
malloc_init_uninitialized
))
{
return
malloc_init_hard_a0
();
}
return
false
;
}
JEMALLOC_ALWAYS_INLINE
bool
malloc_init
(
void
)
{
if
(
unlikely
(
!
malloc_initialized
())
&&
malloc_init_hard
())
{
return
true
;
}
return
false
;
}
/*
* The a0*() functions are used instead of i{d,}alloc() in situations that
* cannot tolerate TLS variable access.
*/
static
void
*
a0ialloc
(
size_t
size
,
bool
zero
,
bool
is_internal
)
{
if
(
unlikely
(
malloc_init_a0
()))
{
return
NULL
;
}
return
iallocztm
(
TSDN_NULL
,
size
,
sz_size2index
(
size
),
zero
,
NULL
,
is_internal
,
arena_get
(
TSDN_NULL
,
0
,
true
),
true
);
}
static
void
a0idalloc
(
void
*
ptr
,
bool
is_internal
)
{
idalloctm
(
TSDN_NULL
,
ptr
,
NULL
,
NULL
,
is_internal
,
true
);
}
void
*
a0malloc
(
size_t
size
)
{
return
a0ialloc
(
size
,
false
,
true
);
}
void
a0dalloc
(
void
*
ptr
)
{
a0idalloc
(
ptr
,
true
);
}
/*
* FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
* situations that cannot tolerate TLS variable access (TLS allocation and very
* early internal data structure initialization).
*/
void
*
bootstrap_malloc
(
size_t
size
)
{
if
(
unlikely
(
size
==
0
))
{
size
=
1
;
}
return
a0ialloc
(
size
,
false
,
false
);
}
void
*
bootstrap_calloc
(
size_t
num
,
size_t
size
)
{
size_t
num_size
;
num_size
=
num
*
size
;
if
(
unlikely
(
num_size
==
0
))
{
assert
(
num
==
0
||
size
==
0
);
num_size
=
1
;
}
return
a0ialloc
(
num_size
,
true
,
false
);
}
void
bootstrap_free
(
void
*
ptr
)
{
if
(
unlikely
(
ptr
==
NULL
))
{
return
;
}
a0idalloc
(
ptr
,
false
);
}
void
arena_set
(
unsigned
ind
,
arena_t
*
arena
)
{
atomic_store_p
(
&
arenas
[
ind
],
arena
,
ATOMIC_RELEASE
);
}
static
void
narenas_total_set
(
unsigned
narenas
)
{
atomic_store_u
(
&
narenas_total
,
narenas
,
ATOMIC_RELEASE
);
}
static
void
narenas_total_inc
(
void
)
{
atomic_fetch_add_u
(
&
narenas_total
,
1
,
ATOMIC_RELEASE
);
}
unsigned
narenas_total_get
(
void
)
{
return
atomic_load_u
(
&
narenas_total
,
ATOMIC_ACQUIRE
);
}
/* Create a new arena and insert it into the arenas array at index ind. */
static
arena_t
*
arena_init_locked
(
tsdn_t
*
tsdn
,
unsigned
ind
,
extent_hooks_t
*
extent_hooks
)
{
arena_t
*
arena
;
assert
(
ind
<=
narenas_total_get
());
if
(
ind
>=
MALLOCX_ARENA_LIMIT
)
{
return
NULL
;
}
if
(
ind
==
narenas_total_get
())
{
narenas_total_inc
();
}
/*
* Another thread may have already initialized arenas[ind] if it's an
* auto arena.
*/
arena
=
arena_get
(
tsdn
,
ind
,
false
);
if
(
arena
!=
NULL
)
{
assert
(
ind
<
narenas_auto
);
return
arena
;
}
/* Actually initialize the arena. */
arena
=
arena_new
(
tsdn
,
ind
,
extent_hooks
);
return
arena
;
}
static
void
arena_new_create_background_thread
(
tsdn_t
*
tsdn
,
unsigned
ind
)
{
if
(
ind
==
0
)
{
return
;
}
if
(
have_background_thread
)
{
bool
err
;
malloc_mutex_lock
(
tsdn
,
&
background_thread_lock
);
err
=
background_thread_create
(
tsdn_tsd
(
tsdn
),
ind
);
malloc_mutex_unlock
(
tsdn
,
&
background_thread_lock
);
if
(
err
)
{
malloc_printf
(
"<jemalloc>: error in background thread "
"creation for arena %u. Abort.
\n
"
,
ind
);
abort
();
}
}
}
arena_t
*
arena_init
(
tsdn_t
*
tsdn
,
unsigned
ind
,
extent_hooks_t
*
extent_hooks
)
{
arena_t
*
arena
;
malloc_mutex_lock
(
tsdn
,
&
arenas_lock
);
arena
=
arena_init_locked
(
tsdn
,
ind
,
extent_hooks
);
malloc_mutex_unlock
(
tsdn
,
&
arenas_lock
);
arena_new_create_background_thread
(
tsdn
,
ind
);
return
arena
;
}
static
void
arena_bind
(
tsd_t
*
tsd
,
unsigned
ind
,
bool
internal
)
{
arena_t
*
arena
=
arena_get
(
tsd_tsdn
(
tsd
),
ind
,
false
);
arena_nthreads_inc
(
arena
,
internal
);
if
(
internal
)
{
tsd_iarena_set
(
tsd
,
arena
);
}
else
{
tsd_arena_set
(
tsd
,
arena
);
}
}
void
arena_migrate
(
tsd_t
*
tsd
,
unsigned
oldind
,
unsigned
newind
)
{
arena_t
*
oldarena
,
*
newarena
;
oldarena
=
arena_get
(
tsd_tsdn
(
tsd
),
oldind
,
false
);
newarena
=
arena_get
(
tsd_tsdn
(
tsd
),
newind
,
false
);
arena_nthreads_dec
(
oldarena
,
false
);
arena_nthreads_inc
(
newarena
,
false
);
tsd_arena_set
(
tsd
,
newarena
);
}
static
void
arena_unbind
(
tsd_t
*
tsd
,
unsigned
ind
,
bool
internal
)
{
arena_t
*
arena
;
arena
=
arena_get
(
tsd_tsdn
(
tsd
),
ind
,
false
);
arena_nthreads_dec
(
arena
,
internal
);
if
(
internal
)
{
tsd_iarena_set
(
tsd
,
NULL
);
}
else
{
tsd_arena_set
(
tsd
,
NULL
);
}
}
arena_tdata_t
*
arena_tdata_get_hard
(
tsd_t
*
tsd
,
unsigned
ind
)
{
arena_tdata_t
*
tdata
,
*
arenas_tdata_old
;
arena_tdata_t
*
arenas_tdata
=
tsd_arenas_tdata_get
(
tsd
);
unsigned
narenas_tdata_old
,
i
;
unsigned
narenas_tdata
=
tsd_narenas_tdata_get
(
tsd
);
unsigned
narenas_actual
=
narenas_total_get
();
/*
* Dissociate old tdata array (and set up for deallocation upon return)
* if it's too small.
*/
if
(
arenas_tdata
!=
NULL
&&
narenas_tdata
<
narenas_actual
)
{
arenas_tdata_old
=
arenas_tdata
;
narenas_tdata_old
=
narenas_tdata
;
arenas_tdata
=
NULL
;
narenas_tdata
=
0
;
tsd_arenas_tdata_set
(
tsd
,
arenas_tdata
);
tsd_narenas_tdata_set
(
tsd
,
narenas_tdata
);
}
else
{
arenas_tdata_old
=
NULL
;
narenas_tdata_old
=
0
;
}
/* Allocate tdata array if it's missing. */
if
(
arenas_tdata
==
NULL
)
{
bool
*
arenas_tdata_bypassp
=
tsd_arenas_tdata_bypassp_get
(
tsd
);
narenas_tdata
=
(
ind
<
narenas_actual
)
?
narenas_actual
:
ind
+
1
;
if
(
tsd_nominal
(
tsd
)
&&
!*
arenas_tdata_bypassp
)
{
*
arenas_tdata_bypassp
=
true
;
arenas_tdata
=
(
arena_tdata_t
*
)
a0malloc
(
sizeof
(
arena_tdata_t
)
*
narenas_tdata
);
*
arenas_tdata_bypassp
=
false
;
}
if
(
arenas_tdata
==
NULL
)
{
tdata
=
NULL
;
goto
label_return
;
}
assert
(
tsd_nominal
(
tsd
)
&&
!*
arenas_tdata_bypassp
);
tsd_arenas_tdata_set
(
tsd
,
arenas_tdata
);
tsd_narenas_tdata_set
(
tsd
,
narenas_tdata
);
}
/*
* Copy to tdata array. It's possible that the actual number of arenas
* has increased since narenas_total_get() was called above, but that
* causes no correctness issues unless two threads concurrently execute
* the arenas.create mallctl, which we trust mallctl synchronization to
* prevent.
*/
/* Copy/initialize tickers. */
for
(
i
=
0
;
i
<
narenas_actual
;
i
++
)
{
if
(
i
<
narenas_tdata_old
)
{
ticker_copy
(
&
arenas_tdata
[
i
].
decay_ticker
,
&
arenas_tdata_old
[
i
].
decay_ticker
);
}
else
{
ticker_init
(
&
arenas_tdata
[
i
].
decay_ticker
,
DECAY_NTICKS_PER_UPDATE
);
}
}
if
(
narenas_tdata
>
narenas_actual
)
{
memset
(
&
arenas_tdata
[
narenas_actual
],
0
,
sizeof
(
arena_tdata_t
)
*
(
narenas_tdata
-
narenas_actual
));
}
/* Read the refreshed tdata array. */
tdata
=
&
arenas_tdata
[
ind
];
label_return:
if
(
arenas_tdata_old
!=
NULL
)
{
a0dalloc
(
arenas_tdata_old
);
}
return
tdata
;
}
/* Slow path, called only by arena_choose(). */
arena_t
*
arena_choose_hard
(
tsd_t
*
tsd
,
bool
internal
)
{
arena_t
*
ret
JEMALLOC_CC_SILENCE_INIT
(
NULL
);
if
(
have_percpu_arena
&&
PERCPU_ARENA_ENABLED
(
opt_percpu_arena
))
{
unsigned
choose
=
percpu_arena_choose
();
ret
=
arena_get
(
tsd_tsdn
(
tsd
),
choose
,
true
);
assert
(
ret
!=
NULL
);
arena_bind
(
tsd
,
arena_ind_get
(
ret
),
false
);
arena_bind
(
tsd
,
arena_ind_get
(
ret
),
true
);
return
ret
;
}
if
(
narenas_auto
>
1
)
{
unsigned
i
,
j
,
choose
[
2
],
first_null
;
bool
is_new_arena
[
2
];
/*
* Determine binding for both non-internal and internal
* allocation.
*
* choose[0]: For application allocation.
* choose[1]: For internal metadata allocation.
*/
for
(
j
=
0
;
j
<
2
;
j
++
)
{
choose
[
j
]
=
0
;
is_new_arena
[
j
]
=
false
;
}
first_null
=
narenas_auto
;
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
arenas_lock
);
assert
(
arena_get
(
tsd_tsdn
(
tsd
),
0
,
false
)
!=
NULL
);
for
(
i
=
1
;
i
<
narenas_auto
;
i
++
)
{
if
(
arena_get
(
tsd_tsdn
(
tsd
),
i
,
false
)
!=
NULL
)
{
/*
* Choose the first arena that has the lowest
* number of threads assigned to it.
*/
for
(
j
=
0
;
j
<
2
;
j
++
)
{
if
(
arena_nthreads_get
(
arena_get
(
tsd_tsdn
(
tsd
),
i
,
false
),
!!
j
)
<
arena_nthreads_get
(
arena_get
(
tsd_tsdn
(
tsd
),
choose
[
j
],
false
),
!!
j
))
{
choose
[
j
]
=
i
;
}
}
}
else
if
(
first_null
==
narenas_auto
)
{
/*
* Record the index of the first uninitialized
* arena, in case all extant arenas are in use.
*
* NB: It is possible for there to be
* discontinuities in terms of initialized
* versus uninitialized arenas, due to the
* "thread.arena" mallctl.
*/
first_null
=
i
;
}
}
for
(
j
=
0
;
j
<
2
;
j
++
)
{
if
(
arena_nthreads_get
(
arena_get
(
tsd_tsdn
(
tsd
),
choose
[
j
],
false
),
!!
j
)
==
0
||
first_null
==
narenas_auto
)
{
/*
* Use an unloaded arena, or the least loaded
* arena if all arenas are already initialized.
*/
if
(
!!
j
==
internal
)
{
ret
=
arena_get
(
tsd_tsdn
(
tsd
),
choose
[
j
],
false
);
}
}
else
{
arena_t
*
arena
;
/* Initialize a new arena. */
choose
[
j
]
=
first_null
;
arena
=
arena_init_locked
(
tsd_tsdn
(
tsd
),
choose
[
j
],
(
extent_hooks_t
*
)
&
extent_hooks_default
);
if
(
arena
==
NULL
)
{
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
arenas_lock
);
return
NULL
;
}
is_new_arena
[
j
]
=
true
;
if
(
!!
j
==
internal
)
{
ret
=
arena
;
}
}
arena_bind
(
tsd
,
choose
[
j
],
!!
j
);
}
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
arenas_lock
);
for
(
j
=
0
;
j
<
2
;
j
++
)
{
if
(
is_new_arena
[
j
])
{
assert
(
choose
[
j
]
>
0
);
arena_new_create_background_thread
(
tsd_tsdn
(
tsd
),
choose
[
j
]);
}
}
}
else
{
ret
=
arena_get
(
tsd_tsdn
(
tsd
),
0
,
false
);
arena_bind
(
tsd
,
0
,
false
);
arena_bind
(
tsd
,
0
,
true
);
}
return
ret
;
}
void
iarena_cleanup
(
tsd_t
*
tsd
)
{
arena_t
*
iarena
;
iarena
=
tsd_iarena_get
(
tsd
);
if
(
iarena
!=
NULL
)
{
arena_unbind
(
tsd
,
arena_ind_get
(
iarena
),
true
);
}
}
void
arena_cleanup
(
tsd_t
*
tsd
)
{
arena_t
*
arena
;
arena
=
tsd_arena_get
(
tsd
);
if
(
arena
!=
NULL
)
{
arena_unbind
(
tsd
,
arena_ind_get
(
arena
),
false
);
}
}
void
arenas_tdata_cleanup
(
tsd_t
*
tsd
)
{
arena_tdata_t
*
arenas_tdata
;
/* Prevent tsd->arenas_tdata from being (re)created. */
*
tsd_arenas_tdata_bypassp_get
(
tsd
)
=
true
;
arenas_tdata
=
tsd_arenas_tdata_get
(
tsd
);
if
(
arenas_tdata
!=
NULL
)
{
tsd_arenas_tdata_set
(
tsd
,
NULL
);
a0dalloc
(
arenas_tdata
);
}
}
static
void
stats_print_atexit
(
void
)
{
if
(
config_stats
)
{
tsdn_t
*
tsdn
;
unsigned
narenas
,
i
;
tsdn
=
tsdn_fetch
();
/*
* Merge stats from extant threads. This is racy, since
* individual threads do not lock when recording tcache stats
* events. As a consequence, the final stats may be slightly
* out of date by the time they are reported, if other threads
* continue to allocate.
*/
for
(
i
=
0
,
narenas
=
narenas_total_get
();
i
<
narenas
;
i
++
)
{
arena_t
*
arena
=
arena_get
(
tsdn
,
i
,
false
);
if
(
arena
!=
NULL
)
{
tcache_t
*
tcache
;
malloc_mutex_lock
(
tsdn
,
&
arena
->
tcache_ql_mtx
);
ql_foreach
(
tcache
,
&
arena
->
tcache_ql
,
link
)
{
tcache_stats_merge
(
tsdn
,
tcache
,
arena
);
}
malloc_mutex_unlock
(
tsdn
,
&
arena
->
tcache_ql_mtx
);
}
}
}
je_malloc_stats_print
(
NULL
,
NULL
,
opt_stats_print_opts
);
}
/*
* Ensure that we don't hold any locks upon entry to or exit from allocator
* code (in a "broad" sense that doesn't count a reentrant allocation as an
* entrance or exit).
*/
JEMALLOC_ALWAYS_INLINE
void
check_entry_exit_locking
(
tsdn_t
*
tsdn
)
{
if
(
!
config_debug
)
{
return
;
}
if
(
tsdn_null
(
tsdn
))
{
return
;
}
tsd_t
*
tsd
=
tsdn_tsd
(
tsdn
);
/*
* It's possible we hold locks at entry/exit if we're in a nested
* allocation.
*/
int8_t
reentrancy_level
=
tsd_reentrancy_level_get
(
tsd
);
if
(
reentrancy_level
!=
0
)
{
return
;
}
witness_assert_lockless
(
tsdn_witness_tsdp_get
(
tsdn
));
}
/*
* End miscellaneous support functions.
*/
/******************************************************************************/
/*
* Begin initialization functions.
*/
static
char
*
jemalloc_secure_getenv
(
const
char
*
name
)
{
#ifdef JEMALLOC_HAVE_SECURE_GETENV
return
secure_getenv
(
name
);
#else
# ifdef JEMALLOC_HAVE_ISSETUGID
if
(
issetugid
()
!=
0
)
{
return
NULL
;
}
# endif
return
getenv
(
name
);
#endif
}
static
unsigned
malloc_ncpus
(
void
)
{
long
result
;
#ifdef _WIN32
SYSTEM_INFO
si
;
GetSystemInfo
(
&
si
);
result
=
si
.
dwNumberOfProcessors
;
#elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT)
/*
* glibc >= 2.6 has the CPU_COUNT macro.
*
* glibc's sysconf() uses isspace(). glibc allocates for the first time
* *before* setting up the isspace tables. Therefore we need a
* different method to get the number of CPUs.
*/
{
cpu_set_t
set
;
pthread_getaffinity_np
(
pthread_self
(),
sizeof
(
set
),
&
set
);
result
=
CPU_COUNT
(
&
set
);
}
#else
result
=
sysconf
(
_SC_NPROCESSORS_ONLN
);
#endif
return
((
result
==
-
1
)
?
1
:
(
unsigned
)
result
);
}
static
void
init_opt_stats_print_opts
(
const
char
*
v
,
size_t
vlen
)
{
size_t
opts_len
=
strlen
(
opt_stats_print_opts
);
assert
(
opts_len
<=
stats_print_tot_num_options
);
for
(
size_t
i
=
0
;
i
<
vlen
;
i
++
)
{
switch
(
v
[
i
])
{
#define OPTION(o, v, d, s) case o: break;
STATS_PRINT_OPTIONS
#undef OPTION
default:
continue
;
}
if
(
strchr
(
opt_stats_print_opts
,
v
[
i
])
!=
NULL
)
{
/* Ignore repeated. */
continue
;
}
opt_stats_print_opts
[
opts_len
++
]
=
v
[
i
];
opt_stats_print_opts
[
opts_len
]
=
'\0'
;
assert
(
opts_len
<=
stats_print_tot_num_options
);
}
assert
(
opts_len
==
strlen
(
opt_stats_print_opts
));
}
static
bool
malloc_conf_next
(
char
const
**
opts_p
,
char
const
**
k_p
,
size_t
*
klen_p
,
char
const
**
v_p
,
size_t
*
vlen_p
)
{
bool
accept
;
const
char
*
opts
=
*
opts_p
;
*
k_p
=
opts
;
for
(
accept
=
false
;
!
accept
;)
{
switch
(
*
opts
)
{
case
'A'
:
case
'B'
:
case
'C'
:
case
'D'
:
case
'E'
:
case
'F'
:
case
'G'
:
case
'H'
:
case
'I'
:
case
'J'
:
case
'K'
:
case
'L'
:
case
'M'
:
case
'N'
:
case
'O'
:
case
'P'
:
case
'Q'
:
case
'R'
:
case
'S'
:
case
'T'
:
case
'U'
:
case
'V'
:
case
'W'
:
case
'X'
:
case
'Y'
:
case
'Z'
:
case
'a'
:
case
'b'
:
case
'c'
:
case
'd'
:
case
'e'
:
case
'f'
:
case
'g'
:
case
'h'
:
case
'i'
:
case
'j'
:
case
'k'
:
case
'l'
:
case
'm'
:
case
'n'
:
case
'o'
:
case
'p'
:
case
'q'
:
case
'r'
:
case
's'
:
case
't'
:
case
'u'
:
case
'v'
:
case
'w'
:
case
'x'
:
case
'y'
:
case
'z'
:
case
'0'
:
case
'1'
:
case
'2'
:
case
'3'
:
case
'4'
:
case
'5'
:
case
'6'
:
case
'7'
:
case
'8'
:
case
'9'
:
case
'_'
:
opts
++
;
break
;
case
':'
:
opts
++
;
*
klen_p
=
(
uintptr_t
)
opts
-
1
-
(
uintptr_t
)
*
k_p
;
*
v_p
=
opts
;
accept
=
true
;
break
;
case
'\0'
:
if
(
opts
!=
*
opts_p
)
{
malloc_write
(
"<jemalloc>: Conf string ends "
"with key
\n
"
);
}
return
true
;
default:
malloc_write
(
"<jemalloc>: Malformed conf string
\n
"
);
return
true
;
}
}
for
(
accept
=
false
;
!
accept
;)
{
switch
(
*
opts
)
{
case
','
:
opts
++
;
/*
* Look ahead one character here, because the next time
* this function is called, it will assume that end of
* input has been cleanly reached if no input remains,
* but we have optimistically already consumed the
* comma if one exists.
*/
if
(
*
opts
==
'\0'
)
{
malloc_write
(
"<jemalloc>: Conf string ends "
"with comma
\n
"
);
}
*
vlen_p
=
(
uintptr_t
)
opts
-
1
-
(
uintptr_t
)
*
v_p
;
accept
=
true
;
break
;
case
'\0'
:
*
vlen_p
=
(
uintptr_t
)
opts
-
(
uintptr_t
)
*
v_p
;
accept
=
true
;
break
;
default:
opts
++
;
break
;
}
}
*
opts_p
=
opts
;
return
false
;
}
static
void
malloc_abort_invalid_conf
(
void
)
{
assert
(
opt_abort_conf
);
malloc_printf
(
"<jemalloc>: Abort (abort_conf:true) on invalid conf "
"value (see above).
\n
"
);
abort
();
}
static
void
malloc_conf_error
(
const
char
*
msg
,
const
char
*
k
,
size_t
klen
,
const
char
*
v
,
size_t
vlen
)
{
malloc_printf
(
"<jemalloc>: %s: %.*s:%.*s
\n
"
,
msg
,
(
int
)
klen
,
k
,
(
int
)
vlen
,
v
);
/* If abort_conf is set, error out after processing all options. */
had_conf_error
=
true
;
}
static
void
malloc_slow_flag_init
(
void
)
{
/*
* Combine the runtime options into malloc_slow for fast path. Called
* after processing all the options.
*/
malloc_slow_flags
|=
(
opt_junk_alloc
?
flag_opt_junk_alloc
:
0
)
|
(
opt_junk_free
?
flag_opt_junk_free
:
0
)
|
(
opt_zero
?
flag_opt_zero
:
0
)
|
(
opt_utrace
?
flag_opt_utrace
:
0
)
|
(
opt_xmalloc
?
flag_opt_xmalloc
:
0
);
malloc_slow
=
(
malloc_slow_flags
!=
0
);
}
static
void
malloc_conf_init
(
void
)
{
unsigned
i
;
char
buf
[
PATH_MAX
+
1
];
const
char
*
opts
,
*
k
,
*
v
;
size_t
klen
,
vlen
;
for
(
i
=
0
;
i
<
4
;
i
++
)
{
/* Get runtime configuration. */
switch
(
i
)
{
case
0
:
opts
=
config_malloc_conf
;
break
;
case
1
:
if
(
je_malloc_conf
!=
NULL
)
{
/*
* Use options that were compiled into the
* program.
*/
opts
=
je_malloc_conf
;
}
else
{
/* No configuration specified. */
buf
[
0
]
=
'\0'
;
opts
=
buf
;
}
break
;
case
2
:
{
ssize_t
linklen
=
0
;
#ifndef _WIN32
int
saved_errno
=
errno
;
const
char
*
linkname
=
# ifdef JEMALLOC_PREFIX
"/etc/"
JEMALLOC_PREFIX
"malloc.conf"
# else
"/etc/malloc.conf"
# endif
;
/*
* Try to use the contents of the "/etc/malloc.conf"
* symbolic link's name.
*/
linklen
=
readlink
(
linkname
,
buf
,
sizeof
(
buf
)
-
1
);
if
(
linklen
==
-
1
)
{
/* No configuration specified. */
linklen
=
0
;
/* Restore errno. */
set_errno
(
saved_errno
);
}
#endif
buf
[
linklen
]
=
'\0'
;
opts
=
buf
;
break
;
}
case
3
:
{
const
char
*
envname
=
#ifdef JEMALLOC_PREFIX
JEMALLOC_CPREFIX
"MALLOC_CONF"
#else
"MALLOC_CONF"
#endif
;
if
((
opts
=
jemalloc_secure_getenv
(
envname
))
!=
NULL
)
{
/*
* Do nothing; opts is already initialized to
* the value of the MALLOC_CONF environment
* variable.
*/
}
else
{
/* No configuration specified. */
buf
[
0
]
=
'\0'
;
opts
=
buf
;
}
break
;
}
default
:
not_reached
();
buf
[
0
]
=
'\0'
;
opts
=
buf
;
}
while
(
*
opts
!=
'\0'
&&
!
malloc_conf_next
(
&
opts
,
&
k
,
&
klen
,
&
v
,
&
vlen
))
{
#define CONF_MATCH(n) \
(sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
#define CONF_MATCH_VALUE(n) \
(sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
#define CONF_HANDLE_BOOL(o, n) \
if (CONF_MATCH(n)) { \
if (CONF_MATCH_VALUE("true")) { \
o = true; \
} else if (CONF_MATCH_VALUE("false")) { \
o = false; \
} else { \
malloc_conf_error( \
"Invalid conf value", \
k, klen, v, vlen); \
} \
continue; \
}
#define CONF_MIN_no(um, min) false
#define CONF_MIN_yes(um, min) ((um) < (min))
#define CONF_MAX_no(um, max) false
#define CONF_MAX_yes(um, max) ((um) > (max))
#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \
if (CONF_MATCH(n)) { \
uintmax_t um; \
char *end; \
\
set_errno(0); \
um = malloc_strtoumax(v, &end, 0); \
if (get_errno() != 0 || (uintptr_t)end -\
(uintptr_t)v != vlen) { \
malloc_conf_error( \
"Invalid conf value", \
k, klen, v, vlen); \
} else if (clip) { \
if (CONF_MIN_##check_min(um, \
(t)(min))) { \
o = (t)(min); \
} else if ( \
CONF_MAX_##check_max(um, \
(t)(max))) { \
o = (t)(max); \
} else { \
o = (t)um; \
} \
} else { \
if (CONF_MIN_##check_min(um, \
(t)(min)) || \
CONF_MAX_##check_max(um, \
(t)(max))) { \
malloc_conf_error( \
"Out-of-range " \
"conf value", \
k, klen, v, vlen); \
} else { \
o = (t)um; \
} \
} \
continue; \
}
#define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \
clip) \
CONF_HANDLE_T_U(unsigned, o, n, min, max, \
check_min, check_max, clip)
#define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \
CONF_HANDLE_T_U(size_t, o, n, min, max, \
check_min, check_max, clip)
#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
if (CONF_MATCH(n)) { \
long l; \
char *end; \
\
set_errno(0); \
l = strtol(v, &end, 0); \
if (get_errno() != 0 || (uintptr_t)end -\
(uintptr_t)v != vlen) { \
malloc_conf_error( \
"Invalid conf value", \
k, klen, v, vlen); \
} else if (l < (ssize_t)(min) || l > \
(ssize_t)(max)) { \
malloc_conf_error( \
"Out-of-range conf value", \
k, klen, v, vlen); \
} else { \
o = l; \
} \
continue; \
}
#define CONF_HANDLE_CHAR_P(o, n, d) \
if (CONF_MATCH(n)) { \
size_t cpylen = (vlen <= \
sizeof(o)-1) ? vlen : \
sizeof(o)-1; \
strncpy(o, v, cpylen); \
o[cpylen] = '\0'; \
continue; \
}
CONF_HANDLE_BOOL
(
opt_abort
,
"abort"
)
CONF_HANDLE_BOOL
(
opt_abort_conf
,
"abort_conf"
)
if
(
strncmp
(
"metadata_thp"
,
k
,
klen
)
==
0
)
{
int
i
;
bool
match
=
false
;
for
(
i
=
0
;
i
<
metadata_thp_mode_limit
;
i
++
)
{
if
(
strncmp
(
metadata_thp_mode_names
[
i
],
v
,
vlen
)
==
0
)
{
opt_metadata_thp
=
i
;
match
=
true
;
break
;
}
}
if
(
!
match
)
{
malloc_conf_error
(
"Invalid conf value"
,
k
,
klen
,
v
,
vlen
);
}
continue
;
}
CONF_HANDLE_BOOL
(
opt_retain
,
"retain"
)
if
(
strncmp
(
"dss"
,
k
,
klen
)
==
0
)
{
int
i
;
bool
match
=
false
;
for
(
i
=
0
;
i
<
dss_prec_limit
;
i
++
)
{
if
(
strncmp
(
dss_prec_names
[
i
],
v
,
vlen
)
==
0
)
{
if
(
extent_dss_prec_set
(
i
))
{
malloc_conf_error
(
"Error setting dss"
,
k
,
klen
,
v
,
vlen
);
}
else
{
opt_dss
=
dss_prec_names
[
i
];
match
=
true
;
break
;
}
}
}
if
(
!
match
)
{
malloc_conf_error
(
"Invalid conf value"
,
k
,
klen
,
v
,
vlen
);
}
continue
;
}
CONF_HANDLE_UNSIGNED
(
opt_narenas
,
"narenas"
,
1
,
UINT_MAX
,
yes
,
no
,
false
)
CONF_HANDLE_SSIZE_T
(
opt_dirty_decay_ms
,
"dirty_decay_ms"
,
-
1
,
NSTIME_SEC_MAX
*
KQU
(
1000
)
<
QU
(
SSIZE_MAX
)
?
NSTIME_SEC_MAX
*
KQU
(
1000
)
:
SSIZE_MAX
);
CONF_HANDLE_SSIZE_T
(
opt_muzzy_decay_ms
,
"muzzy_decay_ms"
,
-
1
,
NSTIME_SEC_MAX
*
KQU
(
1000
)
<
QU
(
SSIZE_MAX
)
?
NSTIME_SEC_MAX
*
KQU
(
1000
)
:
SSIZE_MAX
);
CONF_HANDLE_BOOL
(
opt_stats_print
,
"stats_print"
)
if
(
CONF_MATCH
(
"stats_print_opts"
))
{
init_opt_stats_print_opts
(
v
,
vlen
);
continue
;
}
if
(
config_fill
)
{
if
(
CONF_MATCH
(
"junk"
))
{
if
(
CONF_MATCH_VALUE
(
"true"
))
{
opt_junk
=
"true"
;
opt_junk_alloc
=
opt_junk_free
=
true
;
}
else
if
(
CONF_MATCH_VALUE
(
"false"
))
{
opt_junk
=
"false"
;
opt_junk_alloc
=
opt_junk_free
=
false
;
}
else
if
(
CONF_MATCH_VALUE
(
"alloc"
))
{
opt_junk
=
"alloc"
;
opt_junk_alloc
=
true
;
opt_junk_free
=
false
;
}
else
if
(
CONF_MATCH_VALUE
(
"free"
))
{
opt_junk
=
"free"
;
opt_junk_alloc
=
false
;
opt_junk_free
=
true
;
}
else
{
malloc_conf_error
(
"Invalid conf value"
,
k
,
klen
,
v
,
vlen
);
}
continue
;
}
CONF_HANDLE_BOOL
(
opt_zero
,
"zero"
)
}
if
(
config_utrace
)
{
CONF_HANDLE_BOOL
(
opt_utrace
,
"utrace"
)
}
if
(
config_xmalloc
)
{
CONF_HANDLE_BOOL
(
opt_xmalloc
,
"xmalloc"
)
}
CONF_HANDLE_BOOL
(
opt_tcache
,
"tcache"
)
CONF_HANDLE_SIZE_T
(
opt_lg_extent_max_active_fit
,
"lg_extent_max_active_fit"
,
0
,
(
sizeof
(
size_t
)
<<
3
),
yes
,
yes
,
false
)
CONF_HANDLE_SSIZE_T
(
opt_lg_tcache_max
,
"lg_tcache_max"
,
-
1
,
(
sizeof
(
size_t
)
<<
3
)
-
1
)
if
(
strncmp
(
"percpu_arena"
,
k
,
klen
)
==
0
)
{
bool
match
=
false
;
for
(
int
i
=
percpu_arena_mode_names_base
;
i
<
percpu_arena_mode_names_limit
;
i
++
)
{
if
(
strncmp
(
percpu_arena_mode_names
[
i
],
v
,
vlen
)
==
0
)
{
if
(
!
have_percpu_arena
)
{
malloc_conf_error
(
"No getcpu support"
,
k
,
klen
,
v
,
vlen
);
}
opt_percpu_arena
=
i
;
match
=
true
;
break
;
}
}
if
(
!
match
)
{
malloc_conf_error
(
"Invalid conf value"
,
k
,
klen
,
v
,
vlen
);
}
continue
;
}
CONF_HANDLE_BOOL
(
opt_background_thread
,
"background_thread"
);
CONF_HANDLE_SIZE_T
(
opt_max_background_threads
,
"max_background_threads"
,
1
,
opt_max_background_threads
,
yes
,
yes
,
true
);
if
(
config_prof
)
{
CONF_HANDLE_BOOL
(
opt_prof
,
"prof"
)
CONF_HANDLE_CHAR_P
(
opt_prof_prefix
,
"prof_prefix"
,
"jeprof"
)
CONF_HANDLE_BOOL
(
opt_prof_active
,
"prof_active"
)
CONF_HANDLE_BOOL
(
opt_prof_thread_active_init
,
"prof_thread_active_init"
)
CONF_HANDLE_SIZE_T
(
opt_lg_prof_sample
,
"lg_prof_sample"
,
0
,
(
sizeof
(
uint64_t
)
<<
3
)
-
1
,
no
,
yes
,
true
)
CONF_HANDLE_BOOL
(
opt_prof_accum
,
"prof_accum"
)
CONF_HANDLE_SSIZE_T
(
opt_lg_prof_interval
,
"lg_prof_interval"
,
-
1
,
(
sizeof
(
uint64_t
)
<<
3
)
-
1
)
CONF_HANDLE_BOOL
(
opt_prof_gdump
,
"prof_gdump"
)
CONF_HANDLE_BOOL
(
opt_prof_final
,
"prof_final"
)
CONF_HANDLE_BOOL
(
opt_prof_leak
,
"prof_leak"
)
}
if
(
config_log
)
{
if
(
CONF_MATCH
(
"log"
))
{
size_t
cpylen
=
(
vlen
<=
sizeof
(
log_var_names
)
?
vlen
:
sizeof
(
log_var_names
)
-
1
);
strncpy
(
log_var_names
,
v
,
cpylen
);
log_var_names
[
cpylen
]
=
'\0'
;
continue
;
}
}
if
(
CONF_MATCH
(
"thp"
))
{
bool
match
=
false
;
for
(
int
i
=
0
;
i
<
thp_mode_names_limit
;
i
++
)
{
if
(
strncmp
(
thp_mode_names
[
i
],
v
,
vlen
)
==
0
)
{
if
(
!
have_madvise_huge
)
{
malloc_conf_error
(
"No THP support"
,
k
,
klen
,
v
,
vlen
);
}
opt_thp
=
i
;
match
=
true
;
break
;
}
}
if
(
!
match
)
{
malloc_conf_error
(
"Invalid conf value"
,
k
,
klen
,
v
,
vlen
);
}
continue
;
}
malloc_conf_error
(
"Invalid conf pair"
,
k
,
klen
,
v
,
vlen
);
#undef CONF_MATCH
#undef CONF_MATCH_VALUE
#undef CONF_HANDLE_BOOL
#undef CONF_MIN_no
#undef CONF_MIN_yes
#undef CONF_MAX_no
#undef CONF_MAX_yes
#undef CONF_HANDLE_T_U
#undef CONF_HANDLE_UNSIGNED
#undef CONF_HANDLE_SIZE_T
#undef CONF_HANDLE_SSIZE_T
#undef CONF_HANDLE_CHAR_P
}
if
(
opt_abort_conf
&&
had_conf_error
)
{
malloc_abort_invalid_conf
();
}
}
atomic_store_b
(
&
log_init_done
,
true
,
ATOMIC_RELEASE
);
}
static
bool
malloc_init_hard_needed
(
void
)
{
if
(
malloc_initialized
()
||
(
IS_INITIALIZER
&&
malloc_init_state
==
malloc_init_recursible
))
{
/*
* Another thread initialized the allocator before this one
* acquired init_lock, or this thread is the initializing
* thread, and it is recursively allocating.
*/
return
false
;
}
#ifdef JEMALLOC_THREADED_INIT
if
(
malloc_initializer
!=
NO_INITIALIZER
&&
!
IS_INITIALIZER
)
{
/* Busy-wait until the initializing thread completes. */
spin_t
spinner
=
SPIN_INITIALIZER
;
do
{
malloc_mutex_unlock
(
TSDN_NULL
,
&
init_lock
);
spin_adaptive
(
&
spinner
);
malloc_mutex_lock
(
TSDN_NULL
,
&
init_lock
);
}
while
(
!
malloc_initialized
());
return
false
;
}
#endif
return
true
;
}
static
bool
malloc_init_hard_a0_locked
()
{
malloc_initializer
=
INITIALIZER
;
if
(
config_prof
)
{
prof_boot0
();
}
malloc_conf_init
();
if
(
opt_stats_print
)
{
/* Print statistics at exit. */
if
(
atexit
(
stats_print_atexit
)
!=
0
)
{
malloc_write
(
"<jemalloc>: Error in atexit()
\n
"
);
if
(
opt_abort
)
{
abort
();
}
}
}
if
(
pages_boot
())
{
return
true
;
}
if
(
base_boot
(
TSDN_NULL
))
{
return
true
;
}
if
(
extent_boot
())
{
return
true
;
}
if
(
ctl_boot
())
{
return
true
;
}
if
(
config_prof
)
{
prof_boot1
();
}
arena_boot
();
if
(
tcache_boot
(
TSDN_NULL
))
{
return
true
;
}
if
(
malloc_mutex_init
(
&
arenas_lock
,
"arenas"
,
WITNESS_RANK_ARENAS
,
malloc_mutex_rank_exclusive
))
{
return
true
;
}
/*
* Create enough scaffolding to allow recursive allocation in
* malloc_ncpus().
*/
narenas_auto
=
1
;
memset
(
arenas
,
0
,
sizeof
(
arena_t
*
)
*
narenas_auto
);
/*
* Initialize one arena here. The rest are lazily created in
* arena_choose_hard().
*/
if
(
arena_init
(
TSDN_NULL
,
0
,
(
extent_hooks_t
*
)
&
extent_hooks_default
)
==
NULL
)
{
return
true
;
}
a0
=
arena_get
(
TSDN_NULL
,
0
,
false
);
malloc_init_state
=
malloc_init_a0_initialized
;
return
false
;
}
static
bool
malloc_init_hard_a0
(
void
)
{
bool
ret
;
malloc_mutex_lock
(
TSDN_NULL
,
&
init_lock
);
ret
=
malloc_init_hard_a0_locked
();
malloc_mutex_unlock
(
TSDN_NULL
,
&
init_lock
);
return
ret
;
}
/* Initialize data structures which may trigger recursive allocation. */
static
bool
malloc_init_hard_recursible
(
void
)
{
malloc_init_state
=
malloc_init_recursible
;
ncpus
=
malloc_ncpus
();
#if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \
&& !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \
!defined(__native_client__))
/* LinuxThreads' pthread_atfork() allocates. */
if
(
pthread_atfork
(
jemalloc_prefork
,
jemalloc_postfork_parent
,
jemalloc_postfork_child
)
!=
0
)
{
malloc_write
(
"<jemalloc>: Error in pthread_atfork()
\n
"
);
if
(
opt_abort
)
{
abort
();
}
return
true
;
}
#endif
if
(
background_thread_boot0
())
{
return
true
;
}
return
false
;
}
static
unsigned
malloc_narenas_default
(
void
)
{
assert
(
ncpus
>
0
);
/*
* For SMP systems, create more than one arena per CPU by
* default.
*/
if
(
ncpus
>
1
)
{
return
ncpus
<<
2
;
}
else
{
return
1
;
}
}
static
percpu_arena_mode_t
percpu_arena_as_initialized
(
percpu_arena_mode_t
mode
)
{
assert
(
!
malloc_initialized
());
assert
(
mode
<=
percpu_arena_disabled
);
if
(
mode
!=
percpu_arena_disabled
)
{
mode
+=
percpu_arena_mode_enabled_base
;
}
return
mode
;
}
static
bool
malloc_init_narenas
(
void
)
{
assert
(
ncpus
>
0
);
if
(
opt_percpu_arena
!=
percpu_arena_disabled
)
{
if
(
!
have_percpu_arena
||
malloc_getcpu
()
<
0
)
{
opt_percpu_arena
=
percpu_arena_disabled
;
malloc_printf
(
"<jemalloc>: perCPU arena getcpu() not "
"available. Setting narenas to %u.
\n
"
,
opt_narenas
?
opt_narenas
:
malloc_narenas_default
());
if
(
opt_abort
)
{
abort
();
}
}
else
{
if
(
ncpus
>=
MALLOCX_ARENA_LIMIT
)
{
malloc_printf
(
"<jemalloc>: narenas w/ percpu"
"arena beyond limit (%d)
\n
"
,
ncpus
);
if
(
opt_abort
)
{
abort
();
}
return
true
;
}
/* NB: opt_percpu_arena isn't fully initialized yet. */
if
(
percpu_arena_as_initialized
(
opt_percpu_arena
)
==
per_phycpu_arena
&&
ncpus
%
2
!=
0
)
{
malloc_printf
(
"<jemalloc>: invalid "
"configuration -- per physical CPU arena "
"with odd number (%u) of CPUs (no hyper "
"threading?).
\n
"
,
ncpus
);
if
(
opt_abort
)
abort
();
}
unsigned
n
=
percpu_arena_ind_limit
(
percpu_arena_as_initialized
(
opt_percpu_arena
));
if
(
opt_narenas
<
n
)
{
/*
* If narenas is specified with percpu_arena
* enabled, actual narenas is set as the greater
* of the two. percpu_arena_choose will be free
* to use any of the arenas based on CPU
* id. This is conservative (at a small cost)
* but ensures correctness.
*
* If for some reason the ncpus determined at
* boot is not the actual number (e.g. because
* of affinity setting from numactl), reserving
* narenas this way provides a workaround for
* percpu_arena.
*/
opt_narenas
=
n
;
}
}
}
if
(
opt_narenas
==
0
)
{
opt_narenas
=
malloc_narenas_default
();
}
assert
(
opt_narenas
>
0
);
narenas_auto
=
opt_narenas
;
/*
* Limit the number of arenas to the indexing range of MALLOCX_ARENA().
*/
if
(
narenas_auto
>=
MALLOCX_ARENA_LIMIT
)
{
narenas_auto
=
MALLOCX_ARENA_LIMIT
-
1
;
malloc_printf
(
"<jemalloc>: Reducing narenas to limit (%d)
\n
"
,
narenas_auto
);
}
narenas_total_set
(
narenas_auto
);
return
false
;
}
static
void
malloc_init_percpu
(
void
)
{
opt_percpu_arena
=
percpu_arena_as_initialized
(
opt_percpu_arena
);
}
static
bool
malloc_init_hard_finish
(
void
)
{
if
(
malloc_mutex_boot
())
{
return
true
;
}
malloc_init_state
=
malloc_init_initialized
;
malloc_slow_flag_init
();
return
false
;
}
static
void
malloc_init_hard_cleanup
(
tsdn_t
*
tsdn
,
bool
reentrancy_set
)
{
malloc_mutex_assert_owner
(
tsdn
,
&
init_lock
);
malloc_mutex_unlock
(
tsdn
,
&
init_lock
);
if
(
reentrancy_set
)
{
assert
(
!
tsdn_null
(
tsdn
));
tsd_t
*
tsd
=
tsdn_tsd
(
tsdn
);
assert
(
tsd_reentrancy_level_get
(
tsd
)
>
0
);
post_reentrancy
(
tsd
);
}
}
static
bool
malloc_init_hard
(
void
)
{
tsd_t
*
tsd
;
#if defined(_WIN32) && _WIN32_WINNT < 0x0600
_init_init_lock
();
#endif
malloc_mutex_lock
(
TSDN_NULL
,
&
init_lock
);
#define UNLOCK_RETURN(tsdn, ret, reentrancy) \
malloc_init_hard_cleanup(tsdn, reentrancy); \
return ret;
if
(
!
malloc_init_hard_needed
())
{
UNLOCK_RETURN
(
TSDN_NULL
,
false
,
false
)
}
if
(
malloc_init_state
!=
malloc_init_a0_initialized
&&
malloc_init_hard_a0_locked
())
{
UNLOCK_RETURN
(
TSDN_NULL
,
true
,
false
)
}
malloc_mutex_unlock
(
TSDN_NULL
,
&
init_lock
);
/* Recursive allocation relies on functional tsd. */
tsd
=
malloc_tsd_boot0
();
if
(
tsd
==
NULL
)
{
return
true
;
}
if
(
malloc_init_hard_recursible
())
{
return
true
;
}
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
init_lock
);
/* Set reentrancy level to 1 during init. */
pre_reentrancy
(
tsd
,
NULL
);
/* Initialize narenas before prof_boot2 (for allocation). */
if
(
malloc_init_narenas
()
||
background_thread_boot1
(
tsd_tsdn
(
tsd
)))
{
UNLOCK_RETURN
(
tsd_tsdn
(
tsd
),
true
,
true
)
}
if
(
config_prof
&&
prof_boot2
(
tsd
))
{
UNLOCK_RETURN
(
tsd_tsdn
(
tsd
),
true
,
true
)
}
malloc_init_percpu
();
if
(
malloc_init_hard_finish
())
{
UNLOCK_RETURN
(
tsd_tsdn
(
tsd
),
true
,
true
)
}
post_reentrancy
(
tsd
);
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
init_lock
);
witness_assert_lockless
(
witness_tsd_tsdn
(
tsd_witness_tsdp_get_unsafe
(
tsd
)));
malloc_tsd_boot1
();
/* Update TSD after tsd_boot1. */
tsd
=
tsd_fetch
();
if
(
opt_background_thread
)
{
assert
(
have_background_thread
);
/*
* Need to finish init & unlock first before creating background
* threads (pthread_create depends on malloc). ctl_init (which
* sets isthreaded) needs to be called without holding any lock.
*/
background_thread_ctl_init
(
tsd_tsdn
(
tsd
));
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
background_thread_lock
);
bool
err
=
background_thread_create
(
tsd
,
0
);
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
background_thread_lock
);
if
(
err
)
{
return
true
;
}
}
#undef UNLOCK_RETURN
return
false
;
}
/*
* End initialization functions.
*/
/******************************************************************************/
/*
* Begin allocation-path internal functions and data structures.
*/
/*
* Settings determined by the documented behavior of the allocation functions.
*/
typedef
struct
static_opts_s
static_opts_t
;
struct
static_opts_s
{
/* Whether or not allocation size may overflow. */
bool
may_overflow
;
/* Whether or not allocations of size 0 should be treated as size 1. */
bool
bump_empty_alloc
;
/*
* Whether to assert that allocations are not of size 0 (after any
* bumping).
*/
bool
assert_nonempty_alloc
;
/*
* Whether or not to modify the 'result' argument to malloc in case of
* error.
*/
bool
null_out_result_on_error
;
/* Whether to set errno when we encounter an error condition. */
bool
set_errno_on_error
;
/*
* The minimum valid alignment for functions requesting aligned storage.
*/
size_t
min_alignment
;
/* The error string to use if we oom. */
const
char
*
oom_string
;
/* The error string to use if the passed-in alignment is invalid. */
const
char
*
invalid_alignment_string
;
/*
* False if we're configured to skip some time-consuming operations.
*
* This isn't really a malloc "behavior", but it acts as a useful
* summary of several other static (or at least, static after program
* initialization) options.
*/
bool
slow
;
};
JEMALLOC_ALWAYS_INLINE
void
static_opts_init
(
static_opts_t
*
static_opts
)
{
static_opts
->
may_overflow
=
false
;
static_opts
->
bump_empty_alloc
=
false
;
static_opts
->
assert_nonempty_alloc
=
false
;
static_opts
->
null_out_result_on_error
=
false
;
static_opts
->
set_errno_on_error
=
false
;
static_opts
->
min_alignment
=
0
;
static_opts
->
oom_string
=
""
;
static_opts
->
invalid_alignment_string
=
""
;
static_opts
->
slow
=
false
;
}
/*
* These correspond to the macros in jemalloc/jemalloc_macros.h. Broadly, we
* should have one constant here per magic value there. Note however that the
* representations need not be related.
*/
#define TCACHE_IND_NONE ((unsigned)-1)
#define TCACHE_IND_AUTOMATIC ((unsigned)-2)
#define ARENA_IND_AUTOMATIC ((unsigned)-1)
typedef
struct
dynamic_opts_s
dynamic_opts_t
;
struct
dynamic_opts_s
{
void
**
result
;
size_t
num_items
;
size_t
item_size
;
size_t
alignment
;
bool
zero
;
unsigned
tcache_ind
;
unsigned
arena_ind
;
};
JEMALLOC_ALWAYS_INLINE
void
dynamic_opts_init
(
dynamic_opts_t
*
dynamic_opts
)
{
dynamic_opts
->
result
=
NULL
;
dynamic_opts
->
num_items
=
0
;
dynamic_opts
->
item_size
=
0
;
dynamic_opts
->
alignment
=
0
;
dynamic_opts
->
zero
=
false
;
dynamic_opts
->
tcache_ind
=
TCACHE_IND_AUTOMATIC
;
dynamic_opts
->
arena_ind
=
ARENA_IND_AUTOMATIC
;
}
/* ind is ignored if dopts->alignment > 0. */
JEMALLOC_ALWAYS_INLINE
void
*
imalloc_no_sample
(
static_opts_t
*
sopts
,
dynamic_opts_t
*
dopts
,
tsd_t
*
tsd
,
size_t
size
,
size_t
usize
,
szind_t
ind
)
{
tcache_t
*
tcache
;
arena_t
*
arena
;
/* Fill in the tcache. */
if
(
dopts
->
tcache_ind
==
TCACHE_IND_AUTOMATIC
)
{
if
(
likely
(
!
sopts
->
slow
))
{
/* Getting tcache ptr unconditionally. */
tcache
=
tsd_tcachep_get
(
tsd
);
assert
(
tcache
==
tcache_get
(
tsd
));
}
else
{
tcache
=
tcache_get
(
tsd
);
}
}
else
if
(
dopts
->
tcache_ind
==
TCACHE_IND_NONE
)
{
tcache
=
NULL
;
}
else
{
tcache
=
tcaches_get
(
tsd
,
dopts
->
tcache_ind
);
}
/* Fill in the arena. */
if
(
dopts
->
arena_ind
==
ARENA_IND_AUTOMATIC
)
{
/*
* In case of automatic arena management, we defer arena
* computation until as late as we can, hoping to fill the
* allocation out of the tcache.
*/
arena
=
NULL
;
}
else
{
arena
=
arena_get
(
tsd_tsdn
(
tsd
),
dopts
->
arena_ind
,
true
);
}
if
(
unlikely
(
dopts
->
alignment
!=
0
))
{
return
ipalloct
(
tsd_tsdn
(
tsd
),
usize
,
dopts
->
alignment
,
dopts
->
zero
,
tcache
,
arena
);
}
return
iallocztm
(
tsd_tsdn
(
tsd
),
size
,
ind
,
dopts
->
zero
,
tcache
,
false
,
arena
,
sopts
->
slow
);
}
JEMALLOC_ALWAYS_INLINE
void
*
imalloc_sample
(
static_opts_t
*
sopts
,
dynamic_opts_t
*
dopts
,
tsd_t
*
tsd
,
size_t
usize
,
szind_t
ind
)
{
void
*
ret
;
/*
* For small allocations, sampling bumps the usize. If so, we allocate
* from the ind_large bucket.
*/
szind_t
ind_large
;
size_t
bumped_usize
=
usize
;
if
(
usize
<=
SMALL_MAXCLASS
)
{
assert
(((
dopts
->
alignment
==
0
)
?
sz_s2u
(
LARGE_MINCLASS
)
:
sz_sa2u
(
LARGE_MINCLASS
,
dopts
->
alignment
))
==
LARGE_MINCLASS
);
ind_large
=
sz_size2index
(
LARGE_MINCLASS
);
bumped_usize
=
sz_s2u
(
LARGE_MINCLASS
);
ret
=
imalloc_no_sample
(
sopts
,
dopts
,
tsd
,
bumped_usize
,
bumped_usize
,
ind_large
);
if
(
unlikely
(
ret
==
NULL
))
{
return
NULL
;
}
arena_prof_promote
(
tsd_tsdn
(
tsd
),
ret
,
usize
);
}
else
{
ret
=
imalloc_no_sample
(
sopts
,
dopts
,
tsd
,
usize
,
usize
,
ind
);
}
return
ret
;
}
/*
* Returns true if the allocation will overflow, and false otherwise. Sets
* *size to the product either way.
*/
JEMALLOC_ALWAYS_INLINE
bool
compute_size_with_overflow
(
bool
may_overflow
,
dynamic_opts_t
*
dopts
,
size_t
*
size
)
{
/*
* This function is just num_items * item_size, except that we may have
* to check for overflow.
*/
if
(
!
may_overflow
)
{
assert
(
dopts
->
num_items
==
1
);
*
size
=
dopts
->
item_size
;
return
false
;
}
/* A size_t with its high-half bits all set to 1. */
static
const
size_t
high_bits
=
SIZE_T_MAX
<<
(
sizeof
(
size_t
)
*
8
/
2
);
*
size
=
dopts
->
item_size
*
dopts
->
num_items
;
if
(
unlikely
(
*
size
==
0
))
{
return
(
dopts
->
num_items
!=
0
&&
dopts
->
item_size
!=
0
);
}
/*
* We got a non-zero size, but we don't know if we overflowed to get
* there. To avoid having to do a divide, we'll be clever and note that
* if both A and B can be represented in N/2 bits, then their product
* can be represented in N bits (without the possibility of overflow).
*/
if
(
likely
((
high_bits
&
(
dopts
->
num_items
|
dopts
->
item_size
))
==
0
))
{
return
false
;
}
if
(
likely
(
*
size
/
dopts
->
item_size
==
dopts
->
num_items
))
{
return
false
;
}
return
true
;
}
JEMALLOC_ALWAYS_INLINE
int
imalloc_body
(
static_opts_t
*
sopts
,
dynamic_opts_t
*
dopts
,
tsd_t
*
tsd
)
{
/* Where the actual allocated memory will live. */
void
*
allocation
=
NULL
;
/* Filled in by compute_size_with_overflow below. */
size_t
size
=
0
;
/*
* For unaligned allocations, we need only ind. For aligned
* allocations, or in case of stats or profiling we need usize.
*
* These are actually dead stores, in that their values are reset before
* any branch on their value is taken. Sometimes though, it's
* convenient to pass them as arguments before this point. To avoid
* undefined behavior then, we initialize them with dummy stores.
*/
szind_t
ind
=
0
;
size_t
usize
=
0
;
/* Reentrancy is only checked on slow path. */
int8_t
reentrancy_level
;
/* Compute the amount of memory the user wants. */
if
(
unlikely
(
compute_size_with_overflow
(
sopts
->
may_overflow
,
dopts
,
&
size
)))
{
goto
label_oom
;
}
/* Validate the user input. */
if
(
sopts
->
bump_empty_alloc
)
{
if
(
unlikely
(
size
==
0
))
{
size
=
1
;
}
}
if
(
sopts
->
assert_nonempty_alloc
)
{
assert
(
size
!=
0
);
}
if
(
unlikely
(
dopts
->
alignment
<
sopts
->
min_alignment
||
(
dopts
->
alignment
&
(
dopts
->
alignment
-
1
))
!=
0
))
{
goto
label_invalid_alignment
;
}
/* This is the beginning of the "core" algorithm. */
if
(
dopts
->
alignment
==
0
)
{
ind
=
sz_size2index
(
size
);
if
(
unlikely
(
ind
>=
NSIZES
))
{
goto
label_oom
;
}
if
(
config_stats
||
(
config_prof
&&
opt_prof
))
{
usize
=
sz_index2size
(
ind
);
assert
(
usize
>
0
&&
usize
<=
LARGE_MAXCLASS
);
}
}
else
{
usize
=
sz_sa2u
(
size
,
dopts
->
alignment
);
if
(
unlikely
(
usize
==
0
||
usize
>
LARGE_MAXCLASS
))
{
goto
label_oom
;
}
}
check_entry_exit_locking
(
tsd_tsdn
(
tsd
));
/*
* If we need to handle reentrancy, we can do it out of a
* known-initialized arena (i.e. arena 0).
*/
reentrancy_level
=
tsd_reentrancy_level_get
(
tsd
);
if
(
sopts
->
slow
&&
unlikely
(
reentrancy_level
>
0
))
{
/*
* We should never specify particular arenas or tcaches from
* within our internal allocations.
*/
assert
(
dopts
->
tcache_ind
==
TCACHE_IND_AUTOMATIC
||
dopts
->
tcache_ind
==
TCACHE_IND_NONE
);
assert
(
dopts
->
arena_ind
==
ARENA_IND_AUTOMATIC
);
dopts
->
tcache_ind
=
TCACHE_IND_NONE
;
/* We know that arena 0 has already been initialized. */
dopts
->
arena_ind
=
0
;
}
/* If profiling is on, get our profiling context. */
if
(
config_prof
&&
opt_prof
)
{
/*
* Note that if we're going down this path, usize must have been
* initialized in the previous if statement.
*/
prof_tctx_t
*
tctx
=
prof_alloc_prep
(
tsd
,
usize
,
prof_active_get_unlocked
(),
true
);
alloc_ctx_t
alloc_ctx
;
if
(
likely
((
uintptr_t
)
tctx
==
(
uintptr_t
)
1U
))
{
alloc_ctx
.
slab
=
(
usize
<=
SMALL_MAXCLASS
);
allocation
=
imalloc_no_sample
(
sopts
,
dopts
,
tsd
,
usize
,
usize
,
ind
);
}
else
if
((
uintptr_t
)
tctx
>
(
uintptr_t
)
1U
)
{
/*
* Note that ind might still be 0 here. This is fine;
* imalloc_sample ignores ind if dopts->alignment > 0.
*/
allocation
=
imalloc_sample
(
sopts
,
dopts
,
tsd
,
usize
,
ind
);
alloc_ctx
.
slab
=
false
;
}
else
{
allocation
=
NULL
;
}
if
(
unlikely
(
allocation
==
NULL
))
{
prof_alloc_rollback
(
tsd
,
tctx
,
true
);
goto
label_oom
;
}
prof_malloc
(
tsd_tsdn
(
tsd
),
allocation
,
usize
,
&
alloc_ctx
,
tctx
);
}
else
{
/*
* If dopts->alignment > 0, then ind is still 0, but usize was
* computed in the previous if statement. Down the positive
* alignment path, imalloc_no_sample ignores ind and size
* (relying only on usize).
*/
allocation
=
imalloc_no_sample
(
sopts
,
dopts
,
tsd
,
size
,
usize
,
ind
);
if
(
unlikely
(
allocation
==
NULL
))
{
goto
label_oom
;
}
}
/*
* Allocation has been done at this point. We still have some
* post-allocation work to do though.
*/
assert
(
dopts
->
alignment
==
0
||
((
uintptr_t
)
allocation
&
(
dopts
->
alignment
-
1
))
==
ZU
(
0
));
if
(
config_stats
)
{
assert
(
usize
==
isalloc
(
tsd_tsdn
(
tsd
),
allocation
));
*
tsd_thread_allocatedp_get
(
tsd
)
+=
usize
;
}
if
(
sopts
->
slow
)
{
UTRACE
(
0
,
size
,
allocation
);
}
/* Success! */
check_entry_exit_locking
(
tsd_tsdn
(
tsd
));
*
dopts
->
result
=
allocation
;
return
0
;
label_oom:
if
(
unlikely
(
sopts
->
slow
)
&&
config_xmalloc
&&
unlikely
(
opt_xmalloc
))
{
malloc_write
(
sopts
->
oom_string
);
abort
();
}
if
(
sopts
->
slow
)
{
UTRACE
(
NULL
,
size
,
NULL
);
}
check_entry_exit_locking
(
tsd_tsdn
(
tsd
));
if
(
sopts
->
set_errno_on_error
)
{
set_errno
(
ENOMEM
);
}
if
(
sopts
->
null_out_result_on_error
)
{
*
dopts
->
result
=
NULL
;
}
return
ENOMEM
;
/*
* This label is only jumped to by one goto; we move it out of line
* anyways to avoid obscuring the non-error paths, and for symmetry with
* the oom case.
*/
label_invalid_alignment:
if
(
config_xmalloc
&&
unlikely
(
opt_xmalloc
))
{
malloc_write
(
sopts
->
invalid_alignment_string
);
abort
();
}
if
(
sopts
->
set_errno_on_error
)
{
set_errno
(
EINVAL
);
}
if
(
sopts
->
slow
)
{
UTRACE
(
NULL
,
size
,
NULL
);
}
check_entry_exit_locking
(
tsd_tsdn
(
tsd
));
if
(
sopts
->
null_out_result_on_error
)
{
*
dopts
->
result
=
NULL
;
}
return
EINVAL
;
}
/* Returns the errno-style error code of the allocation. */
JEMALLOC_ALWAYS_INLINE
int
imalloc
(
static_opts_t
*
sopts
,
dynamic_opts_t
*
dopts
)
{
if
(
unlikely
(
!
malloc_initialized
())
&&
unlikely
(
malloc_init
()))
{
if
(
config_xmalloc
&&
unlikely
(
opt_xmalloc
))
{
malloc_write
(
sopts
->
oom_string
);
abort
();
}
UTRACE
(
NULL
,
dopts
->
num_items
*
dopts
->
item_size
,
NULL
);
set_errno
(
ENOMEM
);
*
dopts
->
result
=
NULL
;
return
ENOMEM
;
}
/* We always need the tsd. Let's grab it right away. */
tsd_t
*
tsd
=
tsd_fetch
();
assert
(
tsd
);
if
(
likely
(
tsd_fast
(
tsd
)))
{
/* Fast and common path. */
tsd_assert_fast
(
tsd
);
sopts
->
slow
=
false
;
return
imalloc_body
(
sopts
,
dopts
,
tsd
);
}
else
{
sopts
->
slow
=
true
;
return
imalloc_body
(
sopts
,
dopts
,
tsd
);
}
}
/******************************************************************************/
/*
* Begin malloc(3)-compatible functions.
*/
JEMALLOC_EXPORT
JEMALLOC_ALLOCATOR
JEMALLOC_RESTRICT_RETURN
void
JEMALLOC_NOTHROW
*
JEMALLOC_ATTR
(
malloc
)
JEMALLOC_ALLOC_SIZE
(
1
)
je_malloc
(
size_t
size
)
{
void
*
ret
;
static_opts_t
sopts
;
dynamic_opts_t
dopts
;
LOG
(
"core.malloc.entry"
,
"size: %zu"
,
size
);
static_opts_init
(
&
sopts
);
dynamic_opts_init
(
&
dopts
);
sopts
.
bump_empty_alloc
=
true
;
sopts
.
null_out_result_on_error
=
true
;
sopts
.
set_errno_on_error
=
true
;
sopts
.
oom_string
=
"<jemalloc>: Error in malloc(): out of memory
\n
"
;
dopts
.
result
=
&
ret
;
dopts
.
num_items
=
1
;
dopts
.
item_size
=
size
;
imalloc
(
&
sopts
,
&
dopts
);
LOG
(
"core.malloc.exit"
,
"result: %p"
,
ret
);
return
ret
;
}
JEMALLOC_EXPORT
int
JEMALLOC_NOTHROW
JEMALLOC_ATTR
(
nonnull
(
1
))
je_posix_memalign
(
void
**
memptr
,
size_t
alignment
,
size_t
size
)
{
int
ret
;
static_opts_t
sopts
;
dynamic_opts_t
dopts
;
LOG
(
"core.posix_memalign.entry"
,
"mem ptr: %p, alignment: %zu, "
"size: %zu"
,
memptr
,
alignment
,
size
);
static_opts_init
(
&
sopts
);
dynamic_opts_init
(
&
dopts
);
sopts
.
bump_empty_alloc
=
true
;
sopts
.
min_alignment
=
sizeof
(
void
*
);
sopts
.
oom_string
=
"<jemalloc>: Error allocating aligned memory: out of memory
\n
"
;
sopts
.
invalid_alignment_string
=
"<jemalloc>: Error allocating aligned memory: invalid alignment
\n
"
;
dopts
.
result
=
memptr
;
dopts
.
num_items
=
1
;
dopts
.
item_size
=
size
;
dopts
.
alignment
=
alignment
;
ret
=
imalloc
(
&
sopts
,
&
dopts
);
LOG
(
"core.posix_memalign.exit"
,
"result: %d, alloc ptr: %p"
,
ret
,
*
memptr
);
return
ret
;
}
JEMALLOC_EXPORT
JEMALLOC_ALLOCATOR
JEMALLOC_RESTRICT_RETURN
void
JEMALLOC_NOTHROW
*
JEMALLOC_ATTR
(
malloc
)
JEMALLOC_ALLOC_SIZE
(
2
)
je_aligned_alloc
(
size_t
alignment
,
size_t
size
)
{
void
*
ret
;
static_opts_t
sopts
;
dynamic_opts_t
dopts
;
LOG
(
"core.aligned_alloc.entry"
,
"alignment: %zu, size: %zu
\n
"
,
alignment
,
size
);
static_opts_init
(
&
sopts
);
dynamic_opts_init
(
&
dopts
);
sopts
.
bump_empty_alloc
=
true
;
sopts
.
null_out_result_on_error
=
true
;
sopts
.
set_errno_on_error
=
true
;
sopts
.
min_alignment
=
1
;
sopts
.
oom_string
=
"<jemalloc>: Error allocating aligned memory: out of memory
\n
"
;
sopts
.
invalid_alignment_string
=
"<jemalloc>: Error allocating aligned memory: invalid alignment
\n
"
;
dopts
.
result
=
&
ret
;
dopts
.
num_items
=
1
;
dopts
.
item_size
=
size
;
dopts
.
alignment
=
alignment
;
imalloc
(
&
sopts
,
&
dopts
);
LOG
(
"core.aligned_alloc.exit"
,
"result: %p"
,
ret
);
return
ret
;
}
JEMALLOC_EXPORT
JEMALLOC_ALLOCATOR
JEMALLOC_RESTRICT_RETURN
void
JEMALLOC_NOTHROW
*
JEMALLOC_ATTR
(
malloc
)
JEMALLOC_ALLOC_SIZE2
(
1
,
2
)
je_calloc
(
size_t
num
,
size_t
size
)
{
void
*
ret
;
static_opts_t
sopts
;
dynamic_opts_t
dopts
;
LOG
(
"core.calloc.entry"
,
"num: %zu, size: %zu
\n
"
,
num
,
size
);
static_opts_init
(
&
sopts
);
dynamic_opts_init
(
&
dopts
);
sopts
.
may_overflow
=
true
;
sopts
.
bump_empty_alloc
=
true
;
sopts
.
null_out_result_on_error
=
true
;
sopts
.
set_errno_on_error
=
true
;
sopts
.
oom_string
=
"<jemalloc>: Error in calloc(): out of memory
\n
"
;
dopts
.
result
=
&
ret
;
dopts
.
num_items
=
num
;
dopts
.
item_size
=
size
;
dopts
.
zero
=
true
;
imalloc
(
&
sopts
,
&
dopts
);
LOG
(
"core.calloc.exit"
,
"result: %p"
,
ret
);
return
ret
;
}
static
void
*
irealloc_prof_sample
(
tsd_t
*
tsd
,
void
*
old_ptr
,
size_t
old_usize
,
size_t
usize
,
prof_tctx_t
*
tctx
)
{
void
*
p
;
if
(
tctx
==
NULL
)
{
return
NULL
;
}
if
(
usize
<=
SMALL_MAXCLASS
)
{
p
=
iralloc
(
tsd
,
old_ptr
,
old_usize
,
LARGE_MINCLASS
,
0
,
false
);
if
(
p
==
NULL
)
{
return
NULL
;
}
arena_prof_promote
(
tsd_tsdn
(
tsd
),
p
,
usize
);
}
else
{
p
=
iralloc
(
tsd
,
old_ptr
,
old_usize
,
usize
,
0
,
false
);
}
return
p
;
}
JEMALLOC_ALWAYS_INLINE
void
*
irealloc_prof
(
tsd_t
*
tsd
,
void
*
old_ptr
,
size_t
old_usize
,
size_t
usize
,
alloc_ctx_t
*
alloc_ctx
)
{
void
*
p
;
bool
prof_active
;
prof_tctx_t
*
old_tctx
,
*
tctx
;
prof_active
=
prof_active_get_unlocked
();
old_tctx
=
prof_tctx_get
(
tsd_tsdn
(
tsd
),
old_ptr
,
alloc_ctx
);
tctx
=
prof_alloc_prep
(
tsd
,
usize
,
prof_active
,
true
);
if
(
unlikely
((
uintptr_t
)
tctx
!=
(
uintptr_t
)
1U
))
{
p
=
irealloc_prof_sample
(
tsd
,
old_ptr
,
old_usize
,
usize
,
tctx
);
}
else
{
p
=
iralloc
(
tsd
,
old_ptr
,
old_usize
,
usize
,
0
,
false
);
}
if
(
unlikely
(
p
==
NULL
))
{
prof_alloc_rollback
(
tsd
,
tctx
,
true
);
return
NULL
;
}
prof_realloc
(
tsd
,
p
,
usize
,
tctx
,
prof_active
,
true
,
old_ptr
,
old_usize
,
old_tctx
);
return
p
;
}
JEMALLOC_ALWAYS_INLINE
void
ifree
(
tsd_t
*
tsd
,
void
*
ptr
,
tcache_t
*
tcache
,
bool
slow_path
)
{
if
(
!
slow_path
)
{
tsd_assert_fast
(
tsd
);
}
check_entry_exit_locking
(
tsd_tsdn
(
tsd
));
if
(
tsd_reentrancy_level_get
(
tsd
)
!=
0
)
{
assert
(
slow_path
);
}
assert
(
ptr
!=
NULL
);
assert
(
malloc_initialized
()
||
IS_INITIALIZER
);
alloc_ctx_t
alloc_ctx
;
rtree_ctx_t
*
rtree_ctx
=
tsd_rtree_ctx
(
tsd
);
rtree_szind_slab_read
(
tsd_tsdn
(
tsd
),
&
extents_rtree
,
rtree_ctx
,
(
uintptr_t
)
ptr
,
true
,
&
alloc_ctx
.
szind
,
&
alloc_ctx
.
slab
);
assert
(
alloc_ctx
.
szind
!=
NSIZES
);
size_t
usize
;
if
(
config_prof
&&
opt_prof
)
{
usize
=
sz_index2size
(
alloc_ctx
.
szind
);
prof_free
(
tsd
,
ptr
,
usize
,
&
alloc_ctx
);
}
else
if
(
config_stats
)
{
usize
=
sz_index2size
(
alloc_ctx
.
szind
);
}
if
(
config_stats
)
{
*
tsd_thread_deallocatedp_get
(
tsd
)
+=
usize
;
}
if
(
likely
(
!
slow_path
))
{
idalloctm
(
tsd_tsdn
(
tsd
),
ptr
,
tcache
,
&
alloc_ctx
,
false
,
false
);
}
else
{
idalloctm
(
tsd_tsdn
(
tsd
),
ptr
,
tcache
,
&
alloc_ctx
,
false
,
true
);
}
}
JEMALLOC_ALWAYS_INLINE
void
isfree
(
tsd_t
*
tsd
,
void
*
ptr
,
size_t
usize
,
tcache_t
*
tcache
,
bool
slow_path
)
{
if
(
!
slow_path
)
{
tsd_assert_fast
(
tsd
);
}
check_entry_exit_locking
(
tsd_tsdn
(
tsd
));
if
(
tsd_reentrancy_level_get
(
tsd
)
!=
0
)
{
assert
(
slow_path
);
}
assert
(
ptr
!=
NULL
);
assert
(
malloc_initialized
()
||
IS_INITIALIZER
);
alloc_ctx_t
alloc_ctx
,
*
ctx
;
if
(
!
config_cache_oblivious
&&
((
uintptr_t
)
ptr
&
PAGE_MASK
)
!=
0
)
{
/*
* When cache_oblivious is disabled and ptr is not page aligned,
* the allocation was not sampled -- usize can be used to
* determine szind directly.
*/
alloc_ctx
.
szind
=
sz_size2index
(
usize
);
alloc_ctx
.
slab
=
true
;
ctx
=
&
alloc_ctx
;
if
(
config_debug
)
{
alloc_ctx_t
dbg_ctx
;
rtree_ctx_t
*
rtree_ctx
=
tsd_rtree_ctx
(
tsd
);
rtree_szind_slab_read
(
tsd_tsdn
(
tsd
),
&
extents_rtree
,
rtree_ctx
,
(
uintptr_t
)
ptr
,
true
,
&
dbg_ctx
.
szind
,
&
dbg_ctx
.
slab
);
assert
(
dbg_ctx
.
szind
==
alloc_ctx
.
szind
);
assert
(
dbg_ctx
.
slab
==
alloc_ctx
.
slab
);
}
}
else
if
(
config_prof
&&
opt_prof
)
{
rtree_ctx_t
*
rtree_ctx
=
tsd_rtree_ctx
(
tsd
);
rtree_szind_slab_read
(
tsd_tsdn
(
tsd
),
&
extents_rtree
,
rtree_ctx
,
(
uintptr_t
)
ptr
,
true
,
&
alloc_ctx
.
szind
,
&
alloc_ctx
.
slab
);
assert
(
alloc_ctx
.
szind
==
sz_size2index
(
usize
));
ctx
=
&
alloc_ctx
;
}
else
{
ctx
=
NULL
;
}
if
(
config_prof
&&
opt_prof
)
{
prof_free
(
tsd
,
ptr
,
usize
,
ctx
);
}
if
(
config_stats
)
{
*
tsd_thread_deallocatedp_get
(
tsd
)
+=
usize
;
}
if
(
likely
(
!
slow_path
))
{
isdalloct
(
tsd_tsdn
(
tsd
),
ptr
,
usize
,
tcache
,
ctx
,
false
);
}
else
{
isdalloct
(
tsd_tsdn
(
tsd
),
ptr
,
usize
,
tcache
,
ctx
,
true
);
}
}
JEMALLOC_EXPORT
JEMALLOC_ALLOCATOR
JEMALLOC_RESTRICT_RETURN
void
JEMALLOC_NOTHROW
*
JEMALLOC_ALLOC_SIZE
(
2
)
je_realloc
(
void
*
ptr
,
size_t
size
)
{
void
*
ret
;
tsdn_t
*
tsdn
JEMALLOC_CC_SILENCE_INIT
(
NULL
);
size_t
usize
JEMALLOC_CC_SILENCE_INIT
(
0
);
size_t
old_usize
=
0
;
LOG
(
"core.realloc.entry"
,
"ptr: %p, size: %zu
\n
"
,
ptr
,
size
);
if
(
unlikely
(
size
==
0
))
{
if
(
ptr
!=
NULL
)
{
/* realloc(ptr, 0) is equivalent to free(ptr). */
UTRACE
(
ptr
,
0
,
0
);
tcache_t
*
tcache
;
tsd_t
*
tsd
=
tsd_fetch
();
if
(
tsd_reentrancy_level_get
(
tsd
)
==
0
)
{
tcache
=
tcache_get
(
tsd
);
}
else
{
tcache
=
NULL
;
}
ifree
(
tsd
,
ptr
,
tcache
,
true
);
LOG
(
"core.realloc.exit"
,
"result: %p"
,
NULL
);
return
NULL
;
}
size
=
1
;
}
if
(
likely
(
ptr
!=
NULL
))
{
assert
(
malloc_initialized
()
||
IS_INITIALIZER
);
tsd_t
*
tsd
=
tsd_fetch
();
check_entry_exit_locking
(
tsd_tsdn
(
tsd
));
alloc_ctx_t
alloc_ctx
;
rtree_ctx_t
*
rtree_ctx
=
tsd_rtree_ctx
(
tsd
);
rtree_szind_slab_read
(
tsd_tsdn
(
tsd
),
&
extents_rtree
,
rtree_ctx
,
(
uintptr_t
)
ptr
,
true
,
&
alloc_ctx
.
szind
,
&
alloc_ctx
.
slab
);
assert
(
alloc_ctx
.
szind
!=
NSIZES
);
old_usize
=
sz_index2size
(
alloc_ctx
.
szind
);
assert
(
old_usize
==
isalloc
(
tsd_tsdn
(
tsd
),
ptr
));
if
(
config_prof
&&
opt_prof
)
{
usize
=
sz_s2u
(
size
);
ret
=
unlikely
(
usize
==
0
||
usize
>
LARGE_MAXCLASS
)
?
NULL
:
irealloc_prof
(
tsd
,
ptr
,
old_usize
,
usize
,
&
alloc_ctx
);
}
else
{
if
(
config_stats
)
{
usize
=
sz_s2u
(
size
);
}
ret
=
iralloc
(
tsd
,
ptr
,
old_usize
,
size
,
0
,
false
);
}
tsdn
=
tsd_tsdn
(
tsd
);
}
else
{
/* realloc(NULL, size) is equivalent to malloc(size). */
void
*
ret
=
je_malloc
(
size
);
LOG
(
"core.realloc.exit"
,
"result: %p"
,
ret
);
return
ret
;
}
if
(
unlikely
(
ret
==
NULL
))
{
if
(
config_xmalloc
&&
unlikely
(
opt_xmalloc
))
{
malloc_write
(
"<jemalloc>: Error in realloc(): "
"out of memory
\n
"
);
abort
();
}
set_errno
(
ENOMEM
);
}
if
(
config_stats
&&
likely
(
ret
!=
NULL
))
{
tsd_t
*
tsd
;
assert
(
usize
==
isalloc
(
tsdn
,
ret
));
tsd
=
tsdn_tsd
(
tsdn
);
*
tsd_thread_allocatedp_get
(
tsd
)
+=
usize
;
*
tsd_thread_deallocatedp_get
(
tsd
)
+=
old_usize
;
}
UTRACE
(
ptr
,
size
,
ret
);
check_entry_exit_locking
(
tsdn
);
LOG
(
"core.realloc.exit"
,
"result: %p"
,
ret
);
return
ret
;
}
JEMALLOC_EXPORT
void
JEMALLOC_NOTHROW
je_free
(
void
*
ptr
)
{
LOG
(
"core.free.entry"
,
"ptr: %p"
,
ptr
);
UTRACE
(
ptr
,
0
,
0
);
if
(
likely
(
ptr
!=
NULL
))
{
/*
* We avoid setting up tsd fully (e.g. tcache, arena binding)
* based on only free() calls -- other activities trigger the
* minimal to full transition. This is because free() may
* happen during thread shutdown after tls deallocation: if a
* thread never had any malloc activities until then, a
* fully-setup tsd won't be destructed properly.
*/
tsd_t
*
tsd
=
tsd_fetch_min
();
check_entry_exit_locking
(
tsd_tsdn
(
tsd
));
tcache_t
*
tcache
;
if
(
likely
(
tsd_fast
(
tsd
)))
{
tsd_assert_fast
(
tsd
);
/* Unconditionally get tcache ptr on fast path. */
tcache
=
tsd_tcachep_get
(
tsd
);
ifree
(
tsd
,
ptr
,
tcache
,
false
);
}
else
{
if
(
likely
(
tsd_reentrancy_level_get
(
tsd
)
==
0
))
{
tcache
=
tcache_get
(
tsd
);
}
else
{
tcache
=
NULL
;
}
ifree
(
tsd
,
ptr
,
tcache
,
true
);
}
check_entry_exit_locking
(
tsd_tsdn
(
tsd
));
}
LOG
(
"core.free.exit"
,
""
);
}
/*
* End malloc(3)-compatible functions.
*/
/******************************************************************************/
/*
* Begin non-standard override functions.
*/
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
JEMALLOC_EXPORT
JEMALLOC_ALLOCATOR
JEMALLOC_RESTRICT_RETURN
void
JEMALLOC_NOTHROW
*
JEMALLOC_ATTR
(
malloc
)
je_memalign
(
size_t
alignment
,
size_t
size
)
{
void
*
ret
;
static_opts_t
sopts
;
dynamic_opts_t
dopts
;
LOG
(
"core.memalign.entry"
,
"alignment: %zu, size: %zu
\n
"
,
alignment
,
size
);
static_opts_init
(
&
sopts
);
dynamic_opts_init
(
&
dopts
);
sopts
.
bump_empty_alloc
=
true
;
sopts
.
min_alignment
=
1
;
sopts
.
oom_string
=
"<jemalloc>: Error allocating aligned memory: out of memory
\n
"
;
sopts
.
invalid_alignment_string
=
"<jemalloc>: Error allocating aligned memory: invalid alignment
\n
"
;
sopts
.
null_out_result_on_error
=
true
;
dopts
.
result
=
&
ret
;
dopts
.
num_items
=
1
;
dopts
.
item_size
=
size
;
dopts
.
alignment
=
alignment
;
imalloc
(
&
sopts
,
&
dopts
);
LOG
(
"core.memalign.exit"
,
"result: %p"
,
ret
);
return
ret
;
}
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC
JEMALLOC_EXPORT
JEMALLOC_ALLOCATOR
JEMALLOC_RESTRICT_RETURN
void
JEMALLOC_NOTHROW
*
JEMALLOC_ATTR
(
malloc
)
je_valloc
(
size_t
size
)
{
void
*
ret
;
static_opts_t
sopts
;
dynamic_opts_t
dopts
;
LOG
(
"core.valloc.entry"
,
"size: %zu
\n
"
,
size
);
static_opts_init
(
&
sopts
);
dynamic_opts_init
(
&
dopts
);
sopts
.
bump_empty_alloc
=
true
;
sopts
.
null_out_result_on_error
=
true
;
sopts
.
min_alignment
=
PAGE
;
sopts
.
oom_string
=
"<jemalloc>: Error allocating aligned memory: out of memory
\n
"
;
sopts
.
invalid_alignment_string
=
"<jemalloc>: Error allocating aligned memory: invalid alignment
\n
"
;
dopts
.
result
=
&
ret
;
dopts
.
num_items
=
1
;
dopts
.
item_size
=
size
;
dopts
.
alignment
=
PAGE
;
imalloc
(
&
sopts
,
&
dopts
);
LOG
(
"core.valloc.exit"
,
"result: %p
\n
"
,
ret
);
return
ret
;
}
#endif
#if defined(JEMALLOC_IS_MALLOC) && defined(JEMALLOC_GLIBC_MALLOC_HOOK)
/*
* glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
* to inconsistently reference libc's malloc(3)-compatible functions
* (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
*
* These definitions interpose hooks in glibc. The functions are actually
* passed an extra argument for the caller return address, which will be
* ignored.
*/
JEMALLOC_EXPORT
void
(
*
__free_hook
)(
void
*
ptr
)
=
je_free
;
JEMALLOC_EXPORT
void
*
(
*
__malloc_hook
)(
size_t
size
)
=
je_malloc
;
JEMALLOC_EXPORT
void
*
(
*
__realloc_hook
)(
void
*
ptr
,
size_t
size
)
=
je_realloc
;
# ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
JEMALLOC_EXPORT
void
*
(
*
__memalign_hook
)(
size_t
alignment
,
size_t
size
)
=
je_memalign
;
# endif
# ifdef CPU_COUNT
/*
* To enable static linking with glibc, the libc specific malloc interface must
* be implemented also, so none of glibc's malloc.o functions are added to the
* link.
*/
# define ALIAS(je_fn) __attribute__((alias (#je_fn), used))
/* To force macro expansion of je_ prefix before stringification. */
# define PREALIAS(je_fn) ALIAS(je_fn)
# ifdef JEMALLOC_OVERRIDE___LIBC_CALLOC
void
*
__libc_calloc
(
size_t
n
,
size_t
size
)
PREALIAS
(
je_calloc
);
# endif
# ifdef JEMALLOC_OVERRIDE___LIBC_FREE
void
__libc_free
(
void
*
ptr
)
PREALIAS
(
je_free
);
# endif
# ifdef JEMALLOC_OVERRIDE___LIBC_MALLOC
void
*
__libc_malloc
(
size_t
size
)
PREALIAS
(
je_malloc
);
# endif
# ifdef JEMALLOC_OVERRIDE___LIBC_MEMALIGN
void
*
__libc_memalign
(
size_t
align
,
size_t
s
)
PREALIAS
(
je_memalign
);
# endif
# ifdef JEMALLOC_OVERRIDE___LIBC_REALLOC
void
*
__libc_realloc
(
void
*
ptr
,
size_t
size
)
PREALIAS
(
je_realloc
);
# endif
# ifdef JEMALLOC_OVERRIDE___LIBC_VALLOC
void
*
__libc_valloc
(
size_t
size
)
PREALIAS
(
je_valloc
);
# endif
# ifdef JEMALLOC_OVERRIDE___POSIX_MEMALIGN
int
__posix_memalign
(
void
**
r
,
size_t
a
,
size_t
s
)
PREALIAS
(
je_posix_memalign
);
# endif
# undef PREALIAS
# undef ALIAS
# endif
#endif
/*
* End non-standard override functions.
*/
/******************************************************************************/
/*
* Begin non-standard functions.
*/
JEMALLOC_EXPORT
JEMALLOC_ALLOCATOR
JEMALLOC_RESTRICT_RETURN
void
JEMALLOC_NOTHROW
*
JEMALLOC_ATTR
(
malloc
)
JEMALLOC_ALLOC_SIZE
(
1
)
je_mallocx
(
size_t
size
,
int
flags
)
{
void
*
ret
;
static_opts_t
sopts
;
dynamic_opts_t
dopts
;
LOG
(
"core.mallocx.entry"
,
"size: %zu, flags: %d"
,
size
,
flags
);
static_opts_init
(
&
sopts
);
dynamic_opts_init
(
&
dopts
);
sopts
.
assert_nonempty_alloc
=
true
;
sopts
.
null_out_result_on_error
=
true
;
sopts
.
oom_string
=
"<jemalloc>: Error in mallocx(): out of memory
\n
"
;
dopts
.
result
=
&
ret
;
dopts
.
num_items
=
1
;
dopts
.
item_size
=
size
;
if
(
unlikely
(
flags
!=
0
))
{
if
((
flags
&
MALLOCX_LG_ALIGN_MASK
)
!=
0
)
{
dopts
.
alignment
=
MALLOCX_ALIGN_GET_SPECIFIED
(
flags
);
}
dopts
.
zero
=
MALLOCX_ZERO_GET
(
flags
);
if
((
flags
&
MALLOCX_TCACHE_MASK
)
!=
0
)
{
if
((
flags
&
MALLOCX_TCACHE_MASK
)
==
MALLOCX_TCACHE_NONE
)
{
dopts
.
tcache_ind
=
TCACHE_IND_NONE
;
}
else
{
dopts
.
tcache_ind
=
MALLOCX_TCACHE_GET
(
flags
);
}
}
else
{
dopts
.
tcache_ind
=
TCACHE_IND_AUTOMATIC
;
}
if
((
flags
&
MALLOCX_ARENA_MASK
)
!=
0
)
dopts
.
arena_ind
=
MALLOCX_ARENA_GET
(
flags
);
}
imalloc
(
&
sopts
,
&
dopts
);
LOG
(
"core.mallocx.exit"
,
"result: %p"
,
ret
);
return
ret
;
}
static
void
*
irallocx_prof_sample
(
tsdn_t
*
tsdn
,
void
*
old_ptr
,
size_t
old_usize
,
size_t
usize
,
size_t
alignment
,
bool
zero
,
tcache_t
*
tcache
,
arena_t
*
arena
,
prof_tctx_t
*
tctx
)
{
void
*
p
;
if
(
tctx
==
NULL
)
{
return
NULL
;
}
if
(
usize
<=
SMALL_MAXCLASS
)
{
p
=
iralloct
(
tsdn
,
old_ptr
,
old_usize
,
LARGE_MINCLASS
,
alignment
,
zero
,
tcache
,
arena
);
if
(
p
==
NULL
)
{
return
NULL
;
}
arena_prof_promote
(
tsdn
,
p
,
usize
);
}
else
{
p
=
iralloct
(
tsdn
,
old_ptr
,
old_usize
,
usize
,
alignment
,
zero
,
tcache
,
arena
);
}
return
p
;
}
JEMALLOC_ALWAYS_INLINE
void
*
irallocx_prof
(
tsd_t
*
tsd
,
void
*
old_ptr
,
size_t
old_usize
,
size_t
size
,
size_t
alignment
,
size_t
*
usize
,
bool
zero
,
tcache_t
*
tcache
,
arena_t
*
arena
,
alloc_ctx_t
*
alloc_ctx
)
{
void
*
p
;
bool
prof_active
;
prof_tctx_t
*
old_tctx
,
*
tctx
;
prof_active
=
prof_active_get_unlocked
();
old_tctx
=
prof_tctx_get
(
tsd_tsdn
(
tsd
),
old_ptr
,
alloc_ctx
);
tctx
=
prof_alloc_prep
(
tsd
,
*
usize
,
prof_active
,
false
);
if
(
unlikely
((
uintptr_t
)
tctx
!=
(
uintptr_t
)
1U
))
{
p
=
irallocx_prof_sample
(
tsd_tsdn
(
tsd
),
old_ptr
,
old_usize
,
*
usize
,
alignment
,
zero
,
tcache
,
arena
,
tctx
);
}
else
{
p
=
iralloct
(
tsd_tsdn
(
tsd
),
old_ptr
,
old_usize
,
size
,
alignment
,
zero
,
tcache
,
arena
);
}
if
(
unlikely
(
p
==
NULL
))
{
prof_alloc_rollback
(
tsd
,
tctx
,
false
);
return
NULL
;
}
if
(
p
==
old_ptr
&&
alignment
!=
0
)
{
/*
* The allocation did not move, so it is possible that the size
* class is smaller than would guarantee the requested
* alignment, and that the alignment constraint was
* serendipitously satisfied. Additionally, old_usize may not
* be the same as the current usize because of in-place large
* reallocation. Therefore, query the actual value of usize.
*/
*
usize
=
isalloc
(
tsd_tsdn
(
tsd
),
p
);
}
prof_realloc
(
tsd
,
p
,
*
usize
,
tctx
,
prof_active
,
false
,
old_ptr
,
old_usize
,
old_tctx
);
return
p
;
}
JEMALLOC_EXPORT
JEMALLOC_ALLOCATOR
JEMALLOC_RESTRICT_RETURN
void
JEMALLOC_NOTHROW
*
JEMALLOC_ALLOC_SIZE
(
2
)
je_rallocx
(
void
*
ptr
,
size_t
size
,
int
flags
)
{
void
*
p
;
tsd_t
*
tsd
;
size_t
usize
;
size_t
old_usize
;
size_t
alignment
=
MALLOCX_ALIGN_GET
(
flags
);
bool
zero
=
flags
&
MALLOCX_ZERO
;
arena_t
*
arena
;
tcache_t
*
tcache
;
LOG
(
"core.rallocx.entry"
,
"ptr: %p, size: %zu, flags: %d"
,
ptr
,
size
,
flags
);
assert
(
ptr
!=
NULL
);
assert
(
size
!=
0
);
assert
(
malloc_initialized
()
||
IS_INITIALIZER
);
tsd
=
tsd_fetch
();
check_entry_exit_locking
(
tsd_tsdn
(
tsd
));
if
(
unlikely
((
flags
&
MALLOCX_ARENA_MASK
)
!=
0
))
{
unsigned
arena_ind
=
MALLOCX_ARENA_GET
(
flags
);
arena
=
arena_get
(
tsd_tsdn
(
tsd
),
arena_ind
,
true
);
if
(
unlikely
(
arena
==
NULL
))
{
goto
label_oom
;
}
}
else
{
arena
=
NULL
;
}
if
(
unlikely
((
flags
&
MALLOCX_TCACHE_MASK
)
!=
0
))
{
if
((
flags
&
MALLOCX_TCACHE_MASK
)
==
MALLOCX_TCACHE_NONE
)
{
tcache
=
NULL
;
}
else
{
tcache
=
tcaches_get
(
tsd
,
MALLOCX_TCACHE_GET
(
flags
));
}
}
else
{
tcache
=
tcache_get
(
tsd
);
}
alloc_ctx_t
alloc_ctx
;
rtree_ctx_t
*
rtree_ctx
=
tsd_rtree_ctx
(
tsd
);
rtree_szind_slab_read
(
tsd_tsdn
(
tsd
),
&
extents_rtree
,
rtree_ctx
,
(
uintptr_t
)
ptr
,
true
,
&
alloc_ctx
.
szind
,
&
alloc_ctx
.
slab
);
assert
(
alloc_ctx
.
szind
!=
NSIZES
);
old_usize
=
sz_index2size
(
alloc_ctx
.
szind
);
assert
(
old_usize
==
isalloc
(
tsd_tsdn
(
tsd
),
ptr
));
if
(
config_prof
&&
opt_prof
)
{
usize
=
(
alignment
==
0
)
?
sz_s2u
(
size
)
:
sz_sa2u
(
size
,
alignment
);
if
(
unlikely
(
usize
==
0
||
usize
>
LARGE_MAXCLASS
))
{
goto
label_oom
;
}
p
=
irallocx_prof
(
tsd
,
ptr
,
old_usize
,
size
,
alignment
,
&
usize
,
zero
,
tcache
,
arena
,
&
alloc_ctx
);
if
(
unlikely
(
p
==
NULL
))
{
goto
label_oom
;
}
}
else
{
p
=
iralloct
(
tsd_tsdn
(
tsd
),
ptr
,
old_usize
,
size
,
alignment
,
zero
,
tcache
,
arena
);
if
(
unlikely
(
p
==
NULL
))
{
goto
label_oom
;
}
if
(
config_stats
)
{
usize
=
isalloc
(
tsd_tsdn
(
tsd
),
p
);
}
}
assert
(
alignment
==
0
||
((
uintptr_t
)
p
&
(
alignment
-
1
))
==
ZU
(
0
));
if
(
config_stats
)
{
*
tsd_thread_allocatedp_get
(
tsd
)
+=
usize
;
*
tsd_thread_deallocatedp_get
(
tsd
)
+=
old_usize
;
}
UTRACE
(
ptr
,
size
,
p
);
check_entry_exit_locking
(
tsd_tsdn
(
tsd
));
LOG
(
"core.rallocx.exit"
,
"result: %p"
,
p
);
return
p
;
label_oom:
if
(
config_xmalloc
&&
unlikely
(
opt_xmalloc
))
{
malloc_write
(
"<jemalloc>: Error in rallocx(): out of memory
\n
"
);
abort
();
}
UTRACE
(
ptr
,
size
,
0
);
check_entry_exit_locking
(
tsd_tsdn
(
tsd
));
LOG
(
"core.rallocx.exit"
,
"result: %p"
,
NULL
);
return
NULL
;
}
JEMALLOC_ALWAYS_INLINE
size_t
ixallocx_helper
(
tsdn_t
*
tsdn
,
void
*
ptr
,
size_t
old_usize
,
size_t
size
,
size_t
extra
,
size_t
alignment
,
bool
zero
)
{
size_t
usize
;
if
(
ixalloc
(
tsdn
,
ptr
,
old_usize
,
size
,
extra
,
alignment
,
zero
))
{
return
old_usize
;
}
usize
=
isalloc
(
tsdn
,
ptr
);
return
usize
;
}
static
size_t
ixallocx_prof_sample
(
tsdn_t
*
tsdn
,
void
*
ptr
,
size_t
old_usize
,
size_t
size
,
size_t
extra
,
size_t
alignment
,
bool
zero
,
prof_tctx_t
*
tctx
)
{
size_t
usize
;
if
(
tctx
==
NULL
)
{
return
old_usize
;
}
usize
=
ixallocx_helper
(
tsdn
,
ptr
,
old_usize
,
size
,
extra
,
alignment
,
zero
);
return
usize
;
}
JEMALLOC_ALWAYS_INLINE
size_t
ixallocx_prof
(
tsd_t
*
tsd
,
void
*
ptr
,
size_t
old_usize
,
size_t
size
,
size_t
extra
,
size_t
alignment
,
bool
zero
,
alloc_ctx_t
*
alloc_ctx
)
{
size_t
usize_max
,
usize
;
bool
prof_active
;
prof_tctx_t
*
old_tctx
,
*
tctx
;
prof_active
=
prof_active_get_unlocked
();
old_tctx
=
prof_tctx_get
(
tsd_tsdn
(
tsd
),
ptr
,
alloc_ctx
);
/*
* usize isn't knowable before ixalloc() returns when extra is non-zero.
* Therefore, compute its maximum possible value and use that in
* prof_alloc_prep() to decide whether to capture a backtrace.
* prof_realloc() will use the actual usize to decide whether to sample.
*/
if
(
alignment
==
0
)
{
usize_max
=
sz_s2u
(
size
+
extra
);
assert
(
usize_max
>
0
&&
usize_max
<=
LARGE_MAXCLASS
);
}
else
{
usize_max
=
sz_sa2u
(
size
+
extra
,
alignment
);
if
(
unlikely
(
usize_max
==
0
||
usize_max
>
LARGE_MAXCLASS
))
{
/*
* usize_max is out of range, and chances are that
* allocation will fail, but use the maximum possible
* value and carry on with prof_alloc_prep(), just in
* case allocation succeeds.
*/
usize_max
=
LARGE_MAXCLASS
;
}
}
tctx
=
prof_alloc_prep
(
tsd
,
usize_max
,
prof_active
,
false
);
if
(
unlikely
((
uintptr_t
)
tctx
!=
(
uintptr_t
)
1U
))
{
usize
=
ixallocx_prof_sample
(
tsd_tsdn
(
tsd
),
ptr
,
old_usize
,
size
,
extra
,
alignment
,
zero
,
tctx
);
}
else
{
usize
=
ixallocx_helper
(
tsd_tsdn
(
tsd
),
ptr
,
old_usize
,
size
,
extra
,
alignment
,
zero
);
}
if
(
usize
==
old_usize
)
{
prof_alloc_rollback
(
tsd
,
tctx
,
false
);
return
usize
;
}
prof_realloc
(
tsd
,
ptr
,
usize
,
tctx
,
prof_active
,
false
,
ptr
,
old_usize
,
old_tctx
);
return
usize
;
}
JEMALLOC_EXPORT
size_t
JEMALLOC_NOTHROW
je_xallocx
(
void
*
ptr
,
size_t
size
,
size_t
extra
,
int
flags
)
{
tsd_t
*
tsd
;
size_t
usize
,
old_usize
;
size_t
alignment
=
MALLOCX_ALIGN_GET
(
flags
);
bool
zero
=
flags
&
MALLOCX_ZERO
;
LOG
(
"core.xallocx.entry"
,
"ptr: %p, size: %zu, extra: %zu, "
"flags: %d"
,
ptr
,
size
,
extra
,
flags
);
assert
(
ptr
!=
NULL
);
assert
(
size
!=
0
);
assert
(
SIZE_T_MAX
-
size
>=
extra
);
assert
(
malloc_initialized
()
||
IS_INITIALIZER
);
tsd
=
tsd_fetch
();
check_entry_exit_locking
(
tsd_tsdn
(
tsd
));
alloc_ctx_t
alloc_ctx
;
rtree_ctx_t
*
rtree_ctx
=
tsd_rtree_ctx
(
tsd
);
rtree_szind_slab_read
(
tsd_tsdn
(
tsd
),
&
extents_rtree
,
rtree_ctx
,
(
uintptr_t
)
ptr
,
true
,
&
alloc_ctx
.
szind
,
&
alloc_ctx
.
slab
);
assert
(
alloc_ctx
.
szind
!=
NSIZES
);
old_usize
=
sz_index2size
(
alloc_ctx
.
szind
);
assert
(
old_usize
==
isalloc
(
tsd_tsdn
(
tsd
),
ptr
));
/*
* The API explicitly absolves itself of protecting against (size +
* extra) numerical overflow, but we may need to clamp extra to avoid
* exceeding LARGE_MAXCLASS.
*
* Ordinarily, size limit checking is handled deeper down, but here we
* have to check as part of (size + extra) clamping, since we need the
* clamped value in the above helper functions.
*/
if
(
unlikely
(
size
>
LARGE_MAXCLASS
))
{
usize
=
old_usize
;
goto
label_not_resized
;
}
if
(
unlikely
(
LARGE_MAXCLASS
-
size
<
extra
))
{
extra
=
LARGE_MAXCLASS
-
size
;
}
if
(
config_prof
&&
opt_prof
)
{
usize
=
ixallocx_prof
(
tsd
,
ptr
,
old_usize
,
size
,
extra
,
alignment
,
zero
,
&
alloc_ctx
);
}
else
{
usize
=
ixallocx_helper
(
tsd_tsdn
(
tsd
),
ptr
,
old_usize
,
size
,
extra
,
alignment
,
zero
);
}
if
(
unlikely
(
usize
==
old_usize
))
{
goto
label_not_resized
;
}
if
(
config_stats
)
{
*
tsd_thread_allocatedp_get
(
tsd
)
+=
usize
;
*
tsd_thread_deallocatedp_get
(
tsd
)
+=
old_usize
;
}
label_not_resized:
UTRACE
(
ptr
,
size
,
ptr
);
check_entry_exit_locking
(
tsd_tsdn
(
tsd
));
LOG
(
"core.xallocx.exit"
,
"result: %zu"
,
usize
);
return
usize
;
}
JEMALLOC_EXPORT
size_t
JEMALLOC_NOTHROW
JEMALLOC_ATTR
(
pure
)
je_sallocx
(
const
void
*
ptr
,
UNUSED
int
flags
)
{
size_t
usize
;
tsdn_t
*
tsdn
;
LOG
(
"core.sallocx.entry"
,
"ptr: %p, flags: %d"
,
ptr
,
flags
);
assert
(
malloc_initialized
()
||
IS_INITIALIZER
);
assert
(
ptr
!=
NULL
);
tsdn
=
tsdn_fetch
();
check_entry_exit_locking
(
tsdn
);
if
(
config_debug
||
force_ivsalloc
)
{
usize
=
ivsalloc
(
tsdn
,
ptr
);
assert
(
force_ivsalloc
||
usize
!=
0
);
}
else
{
usize
=
isalloc
(
tsdn
,
ptr
);
}
check_entry_exit_locking
(
tsdn
);
LOG
(
"core.sallocx.exit"
,
"result: %zu"
,
usize
);
return
usize
;
}
JEMALLOC_EXPORT
void
JEMALLOC_NOTHROW
je_dallocx
(
void
*
ptr
,
int
flags
)
{
LOG
(
"core.dallocx.entry"
,
"ptr: %p, flags: %d"
,
ptr
,
flags
);
assert
(
ptr
!=
NULL
);
assert
(
malloc_initialized
()
||
IS_INITIALIZER
);
tsd_t
*
tsd
=
tsd_fetch
();
bool
fast
=
tsd_fast
(
tsd
);
check_entry_exit_locking
(
tsd_tsdn
(
tsd
));
tcache_t
*
tcache
;
if
(
unlikely
((
flags
&
MALLOCX_TCACHE_MASK
)
!=
0
))
{
/* Not allowed to be reentrant and specify a custom tcache. */
assert
(
tsd_reentrancy_level_get
(
tsd
)
==
0
);
if
((
flags
&
MALLOCX_TCACHE_MASK
)
==
MALLOCX_TCACHE_NONE
)
{
tcache
=
NULL
;
}
else
{
tcache
=
tcaches_get
(
tsd
,
MALLOCX_TCACHE_GET
(
flags
));
}
}
else
{
if
(
likely
(
fast
))
{
tcache
=
tsd_tcachep_get
(
tsd
);
assert
(
tcache
==
tcache_get
(
tsd
));
}
else
{
if
(
likely
(
tsd_reentrancy_level_get
(
tsd
)
==
0
))
{
tcache
=
tcache_get
(
tsd
);
}
else
{
tcache
=
NULL
;
}
}
}
UTRACE
(
ptr
,
0
,
0
);
if
(
likely
(
fast
))
{
tsd_assert_fast
(
tsd
);
ifree
(
tsd
,
ptr
,
tcache
,
false
);
}
else
{
ifree
(
tsd
,
ptr
,
tcache
,
true
);
}
check_entry_exit_locking
(
tsd_tsdn
(
tsd
));
LOG
(
"core.dallocx.exit"
,
""
);
}
JEMALLOC_ALWAYS_INLINE
size_t
inallocx
(
tsdn_t
*
tsdn
,
size_t
size
,
int
flags
)
{
check_entry_exit_locking
(
tsdn
);
size_t
usize
;
if
(
likely
((
flags
&
MALLOCX_LG_ALIGN_MASK
)
==
0
))
{
usize
=
sz_s2u
(
size
);
}
else
{
usize
=
sz_sa2u
(
size
,
MALLOCX_ALIGN_GET_SPECIFIED
(
flags
));
}
check_entry_exit_locking
(
tsdn
);
return
usize
;
}
JEMALLOC_EXPORT
void
JEMALLOC_NOTHROW
je_sdallocx
(
void
*
ptr
,
size_t
size
,
int
flags
)
{
assert
(
ptr
!=
NULL
);
assert
(
malloc_initialized
()
||
IS_INITIALIZER
);
LOG
(
"core.sdallocx.entry"
,
"ptr: %p, size: %zu, flags: %d"
,
ptr
,
size
,
flags
);
tsd_t
*
tsd
=
tsd_fetch
();
bool
fast
=
tsd_fast
(
tsd
);
size_t
usize
=
inallocx
(
tsd_tsdn
(
tsd
),
size
,
flags
);
assert
(
usize
==
isalloc
(
tsd_tsdn
(
tsd
),
ptr
));
check_entry_exit_locking
(
tsd_tsdn
(
tsd
));
tcache_t
*
tcache
;
if
(
unlikely
((
flags
&
MALLOCX_TCACHE_MASK
)
!=
0
))
{
/* Not allowed to be reentrant and specify a custom tcache. */
assert
(
tsd_reentrancy_level_get
(
tsd
)
==
0
);
if
((
flags
&
MALLOCX_TCACHE_MASK
)
==
MALLOCX_TCACHE_NONE
)
{
tcache
=
NULL
;
}
else
{
tcache
=
tcaches_get
(
tsd
,
MALLOCX_TCACHE_GET
(
flags
));
}
}
else
{
if
(
likely
(
fast
))
{
tcache
=
tsd_tcachep_get
(
tsd
);
assert
(
tcache
==
tcache_get
(
tsd
));
}
else
{
if
(
likely
(
tsd_reentrancy_level_get
(
tsd
)
==
0
))
{
tcache
=
tcache_get
(
tsd
);
}
else
{
tcache
=
NULL
;
}
}
}
UTRACE
(
ptr
,
0
,
0
);
if
(
likely
(
fast
))
{
tsd_assert_fast
(
tsd
);
isfree
(
tsd
,
ptr
,
usize
,
tcache
,
false
);
}
else
{
isfree
(
tsd
,
ptr
,
usize
,
tcache
,
true
);
}
check_entry_exit_locking
(
tsd_tsdn
(
tsd
));
LOG
(
"core.sdallocx.exit"
,
""
);
}
JEMALLOC_EXPORT
size_t
JEMALLOC_NOTHROW
JEMALLOC_ATTR
(
pure
)
je_nallocx
(
size_t
size
,
int
flags
)
{
size_t
usize
;
tsdn_t
*
tsdn
;
assert
(
size
!=
0
);
if
(
unlikely
(
malloc_init
()))
{
LOG
(
"core.nallocx.exit"
,
"result: %zu"
,
ZU
(
0
));
return
0
;
}
tsdn
=
tsdn_fetch
();
check_entry_exit_locking
(
tsdn
);
usize
=
inallocx
(
tsdn
,
size
,
flags
);
if
(
unlikely
(
usize
>
LARGE_MAXCLASS
))
{
LOG
(
"core.nallocx.exit"
,
"result: %zu"
,
ZU
(
0
));
return
0
;
}
check_entry_exit_locking
(
tsdn
);
LOG
(
"core.nallocx.exit"
,
"result: %zu"
,
usize
);
return
usize
;
}
JEMALLOC_EXPORT
int
JEMALLOC_NOTHROW
je_mallctl
(
const
char
*
name
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
tsd_t
*
tsd
;
LOG
(
"core.mallctl.entry"
,
"name: %s"
,
name
);
if
(
unlikely
(
malloc_init
()))
{
LOG
(
"core.mallctl.exit"
,
"result: %d"
,
EAGAIN
);
return
EAGAIN
;
}
tsd
=
tsd_fetch
();
check_entry_exit_locking
(
tsd_tsdn
(
tsd
));
ret
=
ctl_byname
(
tsd
,
name
,
oldp
,
oldlenp
,
newp
,
newlen
);
check_entry_exit_locking
(
tsd_tsdn
(
tsd
));
LOG
(
"core.mallctl.exit"
,
"result: %d"
,
ret
);
return
ret
;
}
JEMALLOC_EXPORT
int
JEMALLOC_NOTHROW
je_mallctlnametomib
(
const
char
*
name
,
size_t
*
mibp
,
size_t
*
miblenp
)
{
int
ret
;
LOG
(
"core.mallctlnametomib.entry"
,
"name: %s"
,
name
);
if
(
unlikely
(
malloc_init
()))
{
LOG
(
"core.mallctlnametomib.exit"
,
"result: %d"
,
EAGAIN
);
return
EAGAIN
;
}
tsd_t
*
tsd
=
tsd_fetch
();
check_entry_exit_locking
(
tsd_tsdn
(
tsd
));
ret
=
ctl_nametomib
(
tsd
,
name
,
mibp
,
miblenp
);
check_entry_exit_locking
(
tsd_tsdn
(
tsd
));
LOG
(
"core.mallctlnametomib.exit"
,
"result: %d"
,
ret
);
return
ret
;
}
JEMALLOC_EXPORT
int
JEMALLOC_NOTHROW
je_mallctlbymib
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
tsd_t
*
tsd
;
LOG
(
"core.mallctlbymib.entry"
,
""
);
if
(
unlikely
(
malloc_init
()))
{
LOG
(
"core.mallctlbymib.exit"
,
"result: %d"
,
EAGAIN
);
return
EAGAIN
;
}
tsd
=
tsd_fetch
();
check_entry_exit_locking
(
tsd_tsdn
(
tsd
));
ret
=
ctl_bymib
(
tsd
,
mib
,
miblen
,
oldp
,
oldlenp
,
newp
,
newlen
);
check_entry_exit_locking
(
tsd_tsdn
(
tsd
));
LOG
(
"core.mallctlbymib.exit"
,
"result: %d"
,
ret
);
return
ret
;
}
JEMALLOC_EXPORT
void
JEMALLOC_NOTHROW
je_malloc_stats_print
(
void
(
*
write_cb
)(
void
*
,
const
char
*
),
void
*
cbopaque
,
const
char
*
opts
)
{
tsdn_t
*
tsdn
;
LOG
(
"core.malloc_stats_print.entry"
,
""
);
tsdn
=
tsdn_fetch
();
check_entry_exit_locking
(
tsdn
);
stats_print
(
write_cb
,
cbopaque
,
opts
);
check_entry_exit_locking
(
tsdn
);
LOG
(
"core.malloc_stats_print.exit"
,
""
);
}
JEMALLOC_EXPORT
size_t
JEMALLOC_NOTHROW
je_malloc_usable_size
(
JEMALLOC_USABLE_SIZE_CONST
void
*
ptr
)
{
size_t
ret
;
tsdn_t
*
tsdn
;
LOG
(
"core.malloc_usable_size.entry"
,
"ptr: %p"
,
ptr
);
assert
(
malloc_initialized
()
||
IS_INITIALIZER
);
tsdn
=
tsdn_fetch
();
check_entry_exit_locking
(
tsdn
);
if
(
unlikely
(
ptr
==
NULL
))
{
ret
=
0
;
}
else
{
if
(
config_debug
||
force_ivsalloc
)
{
ret
=
ivsalloc
(
tsdn
,
ptr
);
assert
(
force_ivsalloc
||
ret
!=
0
);
}
else
{
ret
=
isalloc
(
tsdn
,
ptr
);
}
}
check_entry_exit_locking
(
tsdn
);
LOG
(
"core.malloc_usable_size.exit"
,
"result: %zu"
,
ret
);
return
ret
;
}
/*
* End non-standard functions.
*/
/******************************************************************************/
/*
* The following functions are used by threading libraries for protection of
* malloc during fork().
*/
/*
* If an application creates a thread before doing any allocation in the main
* thread, then calls fork(2) in the main thread followed by memory allocation
* in the child process, a race can occur that results in deadlock within the
* child: the main thread may have forked while the created thread had
* partially initialized the allocator. Ordinarily jemalloc prevents
* fork/malloc races via the following functions it registers during
* initialization using pthread_atfork(), but of course that does no good if
* the allocator isn't fully initialized at fork time. The following library
* constructor is a partial solution to this problem. It may still be possible
* to trigger the deadlock described above, but doing so would involve forking
* via a library constructor that runs before jemalloc's runs.
*/
#ifndef JEMALLOC_JET
JEMALLOC_ATTR
(
constructor
)
static
void
jemalloc_constructor
(
void
)
{
malloc_init
();
}
#endif
#ifndef JEMALLOC_MUTEX_INIT_CB
void
jemalloc_prefork
(
void
)
#else
JEMALLOC_EXPORT
void
_malloc_prefork
(
void
)
#endif
{
tsd_t
*
tsd
;
unsigned
i
,
j
,
narenas
;
arena_t
*
arena
;
#ifdef JEMALLOC_MUTEX_INIT_CB
if
(
!
malloc_initialized
())
{
return
;
}
#endif
assert
(
malloc_initialized
());
tsd
=
tsd_fetch
();
narenas
=
narenas_total_get
();
witness_prefork
(
tsd_witness_tsdp_get
(
tsd
));
/* Acquire all mutexes in a safe order. */
ctl_prefork
(
tsd_tsdn
(
tsd
));
tcache_prefork
(
tsd_tsdn
(
tsd
));
malloc_mutex_prefork
(
tsd_tsdn
(
tsd
),
&
arenas_lock
);
if
(
have_background_thread
)
{
background_thread_prefork0
(
tsd_tsdn
(
tsd
));
}
prof_prefork0
(
tsd_tsdn
(
tsd
));
if
(
have_background_thread
)
{
background_thread_prefork1
(
tsd_tsdn
(
tsd
));
}
/* Break arena prefork into stages to preserve lock order. */
for
(
i
=
0
;
i
<
8
;
i
++
)
{
for
(
j
=
0
;
j
<
narenas
;
j
++
)
{
if
((
arena
=
arena_get
(
tsd_tsdn
(
tsd
),
j
,
false
))
!=
NULL
)
{
switch
(
i
)
{
case
0
:
arena_prefork0
(
tsd_tsdn
(
tsd
),
arena
);
break
;
case
1
:
arena_prefork1
(
tsd_tsdn
(
tsd
),
arena
);
break
;
case
2
:
arena_prefork2
(
tsd_tsdn
(
tsd
),
arena
);
break
;
case
3
:
arena_prefork3
(
tsd_tsdn
(
tsd
),
arena
);
break
;
case
4
:
arena_prefork4
(
tsd_tsdn
(
tsd
),
arena
);
break
;
case
5
:
arena_prefork5
(
tsd_tsdn
(
tsd
),
arena
);
break
;
case
6
:
arena_prefork6
(
tsd_tsdn
(
tsd
),
arena
);
break
;
case
7
:
arena_prefork7
(
tsd_tsdn
(
tsd
),
arena
);
break
;
default:
not_reached
();
}
}
}
}
prof_prefork1
(
tsd_tsdn
(
tsd
));
}
#ifndef JEMALLOC_MUTEX_INIT_CB
void
jemalloc_postfork_parent
(
void
)
#else
JEMALLOC_EXPORT
void
_malloc_postfork
(
void
)
#endif
{
tsd_t
*
tsd
;
unsigned
i
,
narenas
;
#ifdef JEMALLOC_MUTEX_INIT_CB
if
(
!
malloc_initialized
())
{
return
;
}
#endif
assert
(
malloc_initialized
());
tsd
=
tsd_fetch
();
witness_postfork_parent
(
tsd_witness_tsdp_get
(
tsd
));
/* Release all mutexes, now that fork() has completed. */
for
(
i
=
0
,
narenas
=
narenas_total_get
();
i
<
narenas
;
i
++
)
{
arena_t
*
arena
;
if
((
arena
=
arena_get
(
tsd_tsdn
(
tsd
),
i
,
false
))
!=
NULL
)
{
arena_postfork_parent
(
tsd_tsdn
(
tsd
),
arena
);
}
}
prof_postfork_parent
(
tsd_tsdn
(
tsd
));
if
(
have_background_thread
)
{
background_thread_postfork_parent
(
tsd_tsdn
(
tsd
));
}
malloc_mutex_postfork_parent
(
tsd_tsdn
(
tsd
),
&
arenas_lock
);
tcache_postfork_parent
(
tsd_tsdn
(
tsd
));
ctl_postfork_parent
(
tsd_tsdn
(
tsd
));
}
void
jemalloc_postfork_child
(
void
)
{
tsd_t
*
tsd
;
unsigned
i
,
narenas
;
assert
(
malloc_initialized
());
tsd
=
tsd_fetch
();
witness_postfork_child
(
tsd_witness_tsdp_get
(
tsd
));
/* Release all mutexes, now that fork() has completed. */
for
(
i
=
0
,
narenas
=
narenas_total_get
();
i
<
narenas
;
i
++
)
{
arena_t
*
arena
;
if
((
arena
=
arena_get
(
tsd_tsdn
(
tsd
),
i
,
false
))
!=
NULL
)
{
arena_postfork_child
(
tsd_tsdn
(
tsd
),
arena
);
}
}
prof_postfork_child
(
tsd_tsdn
(
tsd
));
if
(
have_background_thread
)
{
background_thread_postfork_child
(
tsd_tsdn
(
tsd
));
}
malloc_mutex_postfork_child
(
tsd_tsdn
(
tsd
),
&
arenas_lock
);
tcache_postfork_child
(
tsd_tsdn
(
tsd
));
ctl_postfork_child
(
tsd_tsdn
(
tsd
));
}
/******************************************************************************/
/* Helps the application decide if a pointer is worth re-allocating in order to reduce fragmentation.
* returns 1 if the allocation should be moved, and 0 if the allocation be kept.
* If the application decides to re-allocate it should use MALLOCX_TCACHE_NONE when doing so. */
JEMALLOC_EXPORT
int
JEMALLOC_NOTHROW
get_defrag_hint
(
void
*
ptr
)
{
assert
(
ptr
!=
NULL
);
return
iget_defrag_hint
(
TSDN_NULL
,
ptr
);
}
Prev
1
…
4
5
6
7
8
9
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment