Unverified Commit c4b4b6c0 authored by Oran Agra's avatar Oran Agra Committed by GitHub
Browse files

Merge pull request #9623 from yoav-steinberg/upgrade_jemalloc_5.2.1

Upgraded to jemalloc 5.2.1 from 5.1.0.
Cherry picked all relevant fixes (by diffing our 5.1.0 to upstream 5.10 and finding relevant commits).
Details of what was done:

[cherry-picked] fd7d51c3 2021-05-03 Resolve nonsense static analysis warnings (Oran Agra)
[cherry-picked] 448c435b 2020-09-29 Fix compilation warnings in Lua and jemalloc dependencies (#7785) (YoongHM)
[skipped - already in upstream] 9216b96b 2020-09-21 Fix compilation warning in jemalloc's malloc_vsnprintf (#7789) (YoongHM)
[cherry-picked] 88d71f47 2020-05-20 fix a rare active defrag edge case bug leading to stagnation (Oran Agra)
[skipped - already in upstream] 2fec7d9c 2019-05-30 Jemalloc: Avoid blocking on background thread lock for stats.
[cherry-picked] 920158ec 2018-07-11 Active defrag fixes for 32bit builds (again) (Oran Agra)
[cherry-picked] e8099cab 2018-06-26 add defrag hint support into jemalloc 5 (Oran Agra)
[re-done] 4e729fcd 2018-05-24 Generate configure for Jemalloc. (antirez)

Additionally had to do this:
7727cc2 2021-10-10 Fix defrag to support sharded bins in arena (added in v5.2.1) (Yoav Steinberg)

When reviewing please look at all except the first commit which is just replacing 5.1.0 with 5.2.1 sources.
Also I think we should merge this without squashing to preserve the changes we did to to jemalloc.
parents 276b460e 85737e67
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#define JEMALLOC_VERSION_BUGFIX @jemalloc_version_bugfix@ #define JEMALLOC_VERSION_BUGFIX @jemalloc_version_bugfix@
#define JEMALLOC_VERSION_NREV @jemalloc_version_nrev@ #define JEMALLOC_VERSION_NREV @jemalloc_version_nrev@
#define JEMALLOC_VERSION_GID "@jemalloc_version_gid@" #define JEMALLOC_VERSION_GID "@jemalloc_version_gid@"
#define JEMALLOC_VERSION_GID_IDENT @jemalloc_version_gid@
#define MALLOCX_LG_ALIGN(la) ((int)(la)) #define MALLOCX_LG_ALIGN(la) ((int)(la))
#if LG_SIZEOF_PTR == 2 #if LG_SIZEOF_PTR == 2
...@@ -68,6 +69,7 @@ ...@@ -68,6 +69,7 @@
# define JEMALLOC_EXPORT __declspec(dllimport) # define JEMALLOC_EXPORT __declspec(dllimport)
# endif # endif
# endif # endif
# define JEMALLOC_FORMAT_ARG(i)
# define JEMALLOC_FORMAT_PRINTF(s, i) # define JEMALLOC_FORMAT_PRINTF(s, i)
# define JEMALLOC_NOINLINE __declspec(noinline) # define JEMALLOC_NOINLINE __declspec(noinline)
# ifdef __cplusplus # ifdef __cplusplus
...@@ -95,6 +97,11 @@ ...@@ -95,6 +97,11 @@
# ifndef JEMALLOC_EXPORT # ifndef JEMALLOC_EXPORT
# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default")) # define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
# endif # endif
# ifdef JEMALLOC_HAVE_ATTR_FORMAT_ARG
# define JEMALLOC_FORMAT_ARG(i) JEMALLOC_ATTR(__format_arg__(3))
# else
# define JEMALLOC_FORMAT_ARG(i)
# endif
# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF # ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i)) # define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i))
# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF) # elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF)
......
...@@ -47,7 +47,7 @@ ...@@ -47,7 +47,7 @@
<ClCompile Include="..\..\..\..\src\extent_dss.c" /> <ClCompile Include="..\..\..\..\src\extent_dss.c" />
<ClCompile Include="..\..\..\..\src\extent_mmap.c" /> <ClCompile Include="..\..\..\..\src\extent_mmap.c" />
<ClCompile Include="..\..\..\..\src\hash.c" /> <ClCompile Include="..\..\..\..\src\hash.c" />
<ClCompile Include="..\..\..\..\src\hooks.c" /> <ClCompile Include="..\..\..\..\src\hook.c" />
<ClCompile Include="..\..\..\..\src\jemalloc.c" /> <ClCompile Include="..\..\..\..\src\jemalloc.c" />
<ClCompile Include="..\..\..\..\src\large.c" /> <ClCompile Include="..\..\..\..\src\large.c" />
<ClCompile Include="..\..\..\..\src\log.c" /> <ClCompile Include="..\..\..\..\src\log.c" />
...@@ -59,12 +59,14 @@ ...@@ -59,12 +59,14 @@
<ClCompile Include="..\..\..\..\src\prng.c" /> <ClCompile Include="..\..\..\..\src\prng.c" />
<ClCompile Include="..\..\..\..\src\prof.c" /> <ClCompile Include="..\..\..\..\src\prof.c" />
<ClCompile Include="..\..\..\..\src\rtree.c" /> <ClCompile Include="..\..\..\..\src\rtree.c" />
<ClCompile Include="..\..\..\..\src\sc.c" />
<ClCompile Include="..\..\..\..\src\stats.c" /> <ClCompile Include="..\..\..\..\src\stats.c" />
<ClCompile Include="..\..\..\..\src\sz.c" /> <ClCompile Include="..\..\..\..\src\sz.c" />
<ClCompile Include="..\..\..\..\src\tcache.c" /> <ClCompile Include="..\..\..\..\src\tcache.c" />
<ClCompile Include="..\..\..\..\src\ticker.c" /> <ClCompile Include="..\..\..\..\src\ticker.c" />
<ClCompile Include="..\..\..\..\src\tsd.c" /> <ClCompile Include="..\..\..\..\src\tsd.c" />
<ClCompile Include="..\..\..\..\src\witness.c" /> <ClCompile Include="..\..\..\..\src\witness.c" />
<ClCompile Include="..\..\..\..\src\safety_check.c" />
</ItemGroup> </ItemGroup>
<PropertyGroup Label="Globals"> <PropertyGroup Label="Globals">
<ProjectGuid>{8D6BB292-9E1C-413D-9F98-4864BDC1514A}</ProjectGuid> <ProjectGuid>{8D6BB292-9E1C-413D-9F98-4864BDC1514A}</ProjectGuid>
...@@ -345,4 +347,4 @@ ...@@ -345,4 +347,4 @@
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets"> <ImportGroup Label="ExtensionTargets">
</ImportGroup> </ImportGroup>
</Project> </Project>
\ No newline at end of file
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
<ClCompile Include="..\..\..\..\src\hash.c"> <ClCompile Include="..\..\..\..\src\hash.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\..\..\..\src\hooks.c"> <ClCompile Include="..\..\..\..\src\hook.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\..\..\..\src\jemalloc.c"> <ClCompile Include="..\..\..\..\src\jemalloc.c">
...@@ -70,6 +70,9 @@ ...@@ -70,6 +70,9 @@
<ClCompile Include="..\..\..\..\src\rtree.c"> <ClCompile Include="..\..\..\..\src\rtree.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\..\..\..\src\sc.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\stats.c"> <ClCompile Include="..\..\..\..\src\stats.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
...@@ -97,5 +100,8 @@ ...@@ -97,5 +100,8 @@
<ClCompile Include="..\..\..\..\src\div.c"> <ClCompile Include="..\..\..\..\src\div.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\..\..\..\src\safety_check.c">
<Filter>Source Files</Filter>
</ClCompile>
</ItemGroup> </ItemGroup>
</Project> </Project>
\ No newline at end of file
...@@ -47,7 +47,7 @@ ...@@ -47,7 +47,7 @@
<ClCompile Include="..\..\..\..\src\extent_dss.c" /> <ClCompile Include="..\..\..\..\src\extent_dss.c" />
<ClCompile Include="..\..\..\..\src\extent_mmap.c" /> <ClCompile Include="..\..\..\..\src\extent_mmap.c" />
<ClCompile Include="..\..\..\..\src\hash.c" /> <ClCompile Include="..\..\..\..\src\hash.c" />
<ClCompile Include="..\..\..\..\src\hooks.c" /> <ClCompile Include="..\..\..\..\src\hook.c" />
<ClCompile Include="..\..\..\..\src\jemalloc.c" /> <ClCompile Include="..\..\..\..\src\jemalloc.c" />
<ClCompile Include="..\..\..\..\src\large.c" /> <ClCompile Include="..\..\..\..\src\large.c" />
<ClCompile Include="..\..\..\..\src\log.c" /> <ClCompile Include="..\..\..\..\src\log.c" />
...@@ -59,12 +59,15 @@ ...@@ -59,12 +59,15 @@
<ClCompile Include="..\..\..\..\src\prng.c" /> <ClCompile Include="..\..\..\..\src\prng.c" />
<ClCompile Include="..\..\..\..\src\prof.c" /> <ClCompile Include="..\..\..\..\src\prof.c" />
<ClCompile Include="..\..\..\..\src\rtree.c" /> <ClCompile Include="..\..\..\..\src\rtree.c" />
<ClCompile Include="..\..\..\..\src\sc.c" />
<ClCompile Include="..\..\..\..\src\stats.c" /> <ClCompile Include="..\..\..\..\src\stats.c" />
<ClCompile Include="..\..\..\..\src\sz.c" /> <ClCompile Include="..\..\..\..\src\sz.c" />
<ClCompile Include="..\..\..\..\src\tcache.c" /> <ClCompile Include="..\..\..\..\src\tcache.c" />
<ClCompile Include="..\..\..\..\src\test_hooks.c" />
<ClCompile Include="..\..\..\..\src\ticker.c" /> <ClCompile Include="..\..\..\..\src\ticker.c" />
<ClCompile Include="..\..\..\..\src\tsd.c" /> <ClCompile Include="..\..\..\..\src\tsd.c" />
<ClCompile Include="..\..\..\..\src\witness.c" /> <ClCompile Include="..\..\..\..\src\witness.c" />
<ClCompile Include="..\..\..\..\src\safety_check.c" />
</ItemGroup> </ItemGroup>
<PropertyGroup Label="Globals"> <PropertyGroup Label="Globals">
<ProjectGuid>{8D6BB292-9E1C-413D-9F98-4864BDC1514A}</ProjectGuid> <ProjectGuid>{8D6BB292-9E1C-413D-9F98-4864BDC1514A}</ProjectGuid>
...@@ -344,4 +347,4 @@ ...@@ -344,4 +347,4 @@
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets"> <ImportGroup Label="ExtensionTargets">
</ImportGroup> </ImportGroup>
</Project> </Project>
\ No newline at end of file
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
<ClCompile Include="..\..\..\..\src\hash.c"> <ClCompile Include="..\..\..\..\src\hash.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\..\..\..\src\hooks.c"> <ClCompile Include="..\..\..\..\src\hook.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\..\..\..\src\jemalloc.c"> <ClCompile Include="..\..\..\..\src\jemalloc.c">
...@@ -70,6 +70,9 @@ ...@@ -70,6 +70,9 @@
<ClCompile Include="..\..\..\..\src\rtree.c"> <ClCompile Include="..\..\..\..\src\rtree.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\..\..\..\src\sc.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\stats.c"> <ClCompile Include="..\..\..\..\src\stats.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
...@@ -97,5 +100,11 @@ ...@@ -97,5 +100,11 @@
<ClCompile Include="..\..\..\..\src\div.c"> <ClCompile Include="..\..\..\..\src\div.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\..\..\..\src\test_hooks.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\safety_check.c">
<Filter>Source Files</Filter>
</ClCompile>
</ItemGroup> </ItemGroup>
</Project> </Project>
\ No newline at end of file
...@@ -4,6 +4,7 @@ import sys ...@@ -4,6 +4,7 @@ import sys
from itertools import combinations from itertools import combinations
from os import uname from os import uname
from multiprocessing import cpu_count from multiprocessing import cpu_count
from subprocess import call
# Later, we want to test extended vaddr support. Apparently, the "real" way of # Later, we want to test extended vaddr support. Apparently, the "real" way of
# checking this is flaky on OS X. # checking this is flaky on OS X.
...@@ -13,13 +14,25 @@ nparallel = cpu_count() * 2 ...@@ -13,13 +14,25 @@ nparallel = cpu_count() * 2
uname = uname()[0] uname = uname()[0]
if "BSD" in uname:
make_cmd = 'gmake'
else:
make_cmd = 'make'
def powerset(items): def powerset(items):
result = [] result = []
for i in xrange(len(items) + 1): for i in xrange(len(items) + 1):
result += combinations(items, i) result += combinations(items, i)
return result return result
possible_compilers = [('gcc', 'g++'), ('clang', 'clang++')] possible_compilers = []
for cc, cxx in (['gcc', 'g++'], ['clang', 'clang++']):
try:
cmd_ret = call([cc, "-v"])
if cmd_ret == 0:
possible_compilers.append((cc, cxx))
except:
pass
possible_compiler_opts = [ possible_compiler_opts = [
'-m32', '-m32',
] ]
...@@ -27,6 +40,7 @@ possible_config_opts = [ ...@@ -27,6 +40,7 @@ possible_config_opts = [
'--enable-debug', '--enable-debug',
'--enable-prof', '--enable-prof',
'--disable-stats', '--disable-stats',
'--enable-opt-safety-checks',
] ]
if bits_64: if bits_64:
possible_config_opts.append('--with-lg-vaddr=56') possible_config_opts.append('--with-lg-vaddr=56')
...@@ -39,7 +53,7 @@ possible_malloc_conf_opts = [ ...@@ -39,7 +53,7 @@ possible_malloc_conf_opts = [
] ]
print 'set -e' print 'set -e'
print 'if [ -f Makefile ] ; then make relclean ; fi' print 'if [ -f Makefile ] ; then %(make_cmd)s relclean ; fi' % {'make_cmd': make_cmd}
print 'autoconf' print 'autoconf'
print 'rm -rf run_tests.out' print 'rm -rf run_tests.out'
print 'mkdir run_tests.out' print 'mkdir run_tests.out'
...@@ -102,11 +116,11 @@ cd run_test_%(ind)d.out ...@@ -102,11 +116,11 @@ cd run_test_%(ind)d.out
echo "==> %(config_line)s" >> run_test.log echo "==> %(config_line)s" >> run_test.log
%(config_line)s >> run_test.log 2>&1 || abort %(config_line)s >> run_test.log 2>&1 || abort
run_cmd make all tests run_cmd %(make_cmd)s all tests
run_cmd make check run_cmd %(make_cmd)s check
run_cmd make distclean run_cmd %(make_cmd)s distclean
EOF EOF
chmod 755 run_test_%(ind)d.sh""" % {'ind': ind, 'config_line': config_line} chmod 755 run_test_%(ind)d.sh""" % {'ind': ind, 'config_line': config_line, 'make_cmd': make_cmd}
ind += 1 ind += 1
print 'for i in `seq 0 %(last_ind)d` ; do echo run_test_${i}.sh ; done | xargs -P %(nparallel)d -n 1 sh' % {'last_ind': ind-1, 'nparallel': nparallel} print 'for i in `seq 0 %(last_ind)d` ; do echo run_test_${i}.sh ; done | xargs -P %(nparallel)d -n 1 sh' % {'last_ind': ind-1, 'nparallel': nparallel}
...@@ -4,6 +4,7 @@ from itertools import combinations ...@@ -4,6 +4,7 @@ from itertools import combinations
travis_template = """\ travis_template = """\
language: generic language: generic
dist: precise
matrix: matrix:
include: include:
...@@ -11,6 +12,7 @@ matrix: ...@@ -11,6 +12,7 @@ matrix:
before_script: before_script:
- autoconf - autoconf
- scripts/gen_travis.py > travis_script && diff .travis.yml travis_script
- ./configure ${COMPILER_FLAGS:+ \ - ./configure ${COMPILER_FLAGS:+ \
CC="$CC $COMPILER_FLAGS" \ CC="$CC $COMPILER_FLAGS" \
CXX="$CXX $COMPILER_FLAGS" } \ CXX="$CXX $COMPILER_FLAGS" } \
...@@ -43,6 +45,8 @@ configure_flag_unusuals = [ ...@@ -43,6 +45,8 @@ configure_flag_unusuals = [
'--enable-debug', '--enable-debug',
'--enable-prof', '--enable-prof',
'--disable-stats', '--disable-stats',
'--disable-libdl',
'--enable-opt-safety-checks',
] ]
malloc_conf_unusuals = [ malloc_conf_unusuals = [
...@@ -61,47 +65,85 @@ unusual_combinations_to_test = [] ...@@ -61,47 +65,85 @@ unusual_combinations_to_test = []
for i in xrange(MAX_UNUSUAL_OPTIONS + 1): for i in xrange(MAX_UNUSUAL_OPTIONS + 1):
unusual_combinations_to_test += combinations(all_unusuals, i) unusual_combinations_to_test += combinations(all_unusuals, i)
include_rows = "" gcc_multilib_set = False
for unusual_combination in unusual_combinations_to_test: # Formats a job from a combination of flags
os = os_default def format_job(combination):
if os_unusual in unusual_combination: global gcc_multilib_set
os = os_unusual
compilers = compilers_default
if compilers_unusual in unusual_combination:
compilers = compilers_unusual
compiler_flags = [ os = os_unusual if os_unusual in combination else os_default
x for x in unusual_combination if x in compiler_flag_unusuals] compilers = compilers_unusual if compilers_unusual in combination else compilers_default
configure_flags = [ compiler_flags = [x for x in combination if x in compiler_flag_unusuals]
x for x in unusual_combination if x in configure_flag_unusuals] configure_flags = [x for x in combination if x in configure_flag_unusuals]
malloc_conf = [x for x in combination if x in malloc_conf_unusuals]
malloc_conf = [
x for x in unusual_combination if x in malloc_conf_unusuals]
# Filter out unsupported configurations on OS X. # Filter out unsupported configurations on OS X.
if os == 'osx' and ('dss:primary' in malloc_conf or \ if os == 'osx' and ('dss:primary' in malloc_conf or \
'percpu_arena:percpu' in malloc_conf or 'background_thread:true' \ 'percpu_arena:percpu' in malloc_conf or 'background_thread:true' \
in malloc_conf): in malloc_conf):
continue return ""
if len(malloc_conf) > 0: if len(malloc_conf) > 0:
configure_flags.append('--with-malloc-conf=' + ",".join(malloc_conf)) configure_flags.append('--with-malloc-conf=' + ",".join(malloc_conf))
# Filter out an unsupported configuration - heap profiling on OS X. # Filter out an unsupported configuration - heap profiling on OS X.
if os == 'osx' and '--enable-prof' in configure_flags: if os == 'osx' and '--enable-prof' in configure_flags:
continue return ""
# We get some spurious errors when -Warray-bounds is enabled. # We get some spurious errors when -Warray-bounds is enabled.
env_string = ('{} COMPILER_FLAGS="{}" CONFIGURE_FLAGS="{}" ' env_string = ('{} COMPILER_FLAGS="{}" CONFIGURE_FLAGS="{}" '
'EXTRA_CFLAGS="-Werror -Wno-array-bounds"').format( 'EXTRA_CFLAGS="-Werror -Wno-array-bounds"').format(
compilers, " ".join(compiler_flags), " ".join(configure_flags)) compilers, " ".join(compiler_flags), " ".join(configure_flags))
include_rows += ' - os: %s\n' % os job = ""
include_rows += ' env: %s\n' % env_string job += ' - os: %s\n' % os
if '-m32' in unusual_combination and os == 'linux': job += ' env: %s\n' % env_string
include_rows += ' addons:\n' if '-m32' in combination and os == 'linux':
include_rows += ' apt:\n' job += ' addons:'
include_rows += ' packages:\n' if gcc_multilib_set:
include_rows += ' - gcc-multilib\n' job += ' *gcc_multilib\n'
else:
job += ' &gcc_multilib\n'
job += ' apt:\n'
job += ' packages:\n'
job += ' - gcc-multilib\n'
gcc_multilib_set = True
return job
include_rows = ""
for combination in unusual_combinations_to_test:
include_rows += format_job(combination)
# Development build
include_rows += '''\
# Development build
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --disable-cache-oblivious --enable-stats --enable-log --enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
'''
# Enable-expermental-smallocx
include_rows += '''\
# --enable-expermental-smallocx:
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --enable-experimental-smallocx --enable-stats --enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
'''
# Valgrind build bots
include_rows += '''
# Valgrind
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds" JEMALLOC_TEST_PREFIX="valgrind"
addons:
apt:
packages:
- valgrind
'''
# To enable valgrind on macosx add:
#
# - os: osx
# env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds" JEMALLOC_TEST_PREFIX="valgrind"
# install: brew install valgrind
#
# It currently fails due to: https://github.com/jemalloc/jemalloc/issues/1274
print travis_template % include_rows print travis_template % include_rows
...@@ -8,9 +8,11 @@ ...@@ -8,9 +8,11 @@
#include "jemalloc/internal/extent_mmap.h" #include "jemalloc/internal/extent_mmap.h"
#include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/rtree.h" #include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/safety_check.h"
#include "jemalloc/internal/util.h" #include "jemalloc/internal/util.h"
JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
/******************************************************************************/ /******************************************************************************/
/* Data. */ /* Data. */
...@@ -40,7 +42,11 @@ const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = { ...@@ -40,7 +42,11 @@ const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = {
#undef STEP #undef STEP
}; };
static div_info_t arena_binind_div_info[NBINS]; static div_info_t arena_binind_div_info[SC_NBINS];
size_t opt_oversize_threshold = OVERSIZE_THRESHOLD_DEFAULT;
size_t oversize_threshold = OVERSIZE_THRESHOLD_DEFAULT;
static unsigned huge_arena_ind;
/******************************************************************************/ /******************************************************************************/
/* /*
...@@ -61,7 +67,7 @@ static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, ...@@ -61,7 +67,7 @@ static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
/******************************************************************************/ /******************************************************************************/
void void
arena_basic_stats_merge(UNUSED tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
size_t *nactive, size_t *ndirty, size_t *nmuzzy) { size_t *nactive, size_t *ndirty, size_t *nmuzzy) {
*nthreads += arena_nthreads_get(arena, false); *nthreads += arena_nthreads_get(arena, false);
...@@ -77,7 +83,8 @@ void ...@@ -77,7 +83,8 @@ void
arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats, size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
bin_stats_t *bstats, arena_stats_large_t *lstats) { bin_stats_t *bstats, arena_stats_large_t *lstats,
arena_stats_extents_t *estats) {
cassert(config_stats); cassert(config_stats);
arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms, arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms,
...@@ -94,6 +101,10 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, ...@@ -94,6 +101,10 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
arena_stats_accum_zu(&astats->retained, arena_stats_accum_zu(&astats->retained,
extents_npages_get(&arena->extents_retained) << LG_PAGE); extents_npages_get(&arena->extents_retained) << LG_PAGE);
atomic_store_zu(&astats->extent_avail,
atomic_load_zu(&arena->extent_avail_cnt, ATOMIC_RELAXED),
ATOMIC_RELAXED);
arena_stats_accum_u64(&astats->decay_dirty.npurge, arena_stats_accum_u64(&astats->decay_dirty.npurge,
arena_stats_read_u64(tsdn, &arena->stats, arena_stats_read_u64(tsdn, &arena->stats,
&arena->stats.decay_dirty.npurge)); &arena->stats.decay_dirty.npurge));
...@@ -121,8 +132,10 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, ...@@ -121,8 +132,10 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
(((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) + (((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) +
extents_npages_get(&arena->extents_dirty) + extents_npages_get(&arena->extents_dirty) +
extents_npages_get(&arena->extents_muzzy)) << LG_PAGE))); extents_npages_get(&arena->extents_muzzy)) << LG_PAGE)));
arena_stats_accum_zu(&astats->abandoned_vm, atomic_load_zu(
&arena->stats.abandoned_vm, ATOMIC_RELAXED));
for (szind_t i = 0; i < NSIZES - NBINS; i++) { for (szind_t i = 0; i < SC_NSIZES - SC_NBINS; i++) {
uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats, uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats,
&arena->stats.lstats[i].nmalloc); &arena->stats.lstats[i].nmalloc);
arena_stats_accum_u64(&lstats[i].nmalloc, nmalloc); arena_stats_accum_u64(&lstats[i].nmalloc, nmalloc);
...@@ -140,12 +153,43 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, ...@@ -140,12 +153,43 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
arena_stats_accum_u64(&astats->nrequests_large, arena_stats_accum_u64(&astats->nrequests_large,
nmalloc + nrequests); nmalloc + nrequests);
/* nfill == nmalloc for large currently. */
arena_stats_accum_u64(&lstats[i].nfills, nmalloc);
arena_stats_accum_u64(&astats->nfills_large, nmalloc);
uint64_t nflush = arena_stats_read_u64(tsdn, &arena->stats,
&arena->stats.lstats[i].nflushes);
arena_stats_accum_u64(&lstats[i].nflushes, nflush);
arena_stats_accum_u64(&astats->nflushes_large, nflush);
assert(nmalloc >= ndalloc); assert(nmalloc >= ndalloc);
assert(nmalloc - ndalloc <= SIZE_T_MAX); assert(nmalloc - ndalloc <= SIZE_T_MAX);
size_t curlextents = (size_t)(nmalloc - ndalloc); size_t curlextents = (size_t)(nmalloc - ndalloc);
lstats[i].curlextents += curlextents; lstats[i].curlextents += curlextents;
arena_stats_accum_zu(&astats->allocated_large, arena_stats_accum_zu(&astats->allocated_large,
curlextents * sz_index2size(NBINS + i)); curlextents * sz_index2size(SC_NBINS + i));
}
for (pszind_t i = 0; i < SC_NPSIZES; i++) {
size_t dirty, muzzy, retained, dirty_bytes, muzzy_bytes,
retained_bytes;
dirty = extents_nextents_get(&arena->extents_dirty, i);
muzzy = extents_nextents_get(&arena->extents_muzzy, i);
retained = extents_nextents_get(&arena->extents_retained, i);
dirty_bytes = extents_nbytes_get(&arena->extents_dirty, i);
muzzy_bytes = extents_nbytes_get(&arena->extents_muzzy, i);
retained_bytes =
extents_nbytes_get(&arena->extents_retained, i);
atomic_store_zu(&estats[i].ndirty, dirty, ATOMIC_RELAXED);
atomic_store_zu(&estats[i].nmuzzy, muzzy, ATOMIC_RELAXED);
atomic_store_zu(&estats[i].nretained, retained, ATOMIC_RELAXED);
atomic_store_zu(&estats[i].dirty_bytes, dirty_bytes,
ATOMIC_RELAXED);
atomic_store_zu(&estats[i].muzzy_bytes, muzzy_bytes,
ATOMIC_RELAXED);
atomic_store_zu(&estats[i].retained_bytes, retained_bytes,
ATOMIC_RELAXED);
} }
arena_stats_unlock(tsdn, &arena->stats); arena_stats_unlock(tsdn, &arena->stats);
...@@ -156,7 +200,7 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, ...@@ -156,7 +200,7 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
cache_bin_array_descriptor_t *descriptor; cache_bin_array_descriptor_t *descriptor;
ql_foreach(descriptor, &arena->cache_bin_array_descriptor_ql, link) { ql_foreach(descriptor, &arena->cache_bin_array_descriptor_ql, link) {
szind_t i = 0; szind_t i = 0;
for (; i < NBINS; i++) { for (; i < SC_NBINS; i++) {
cache_bin_t *tbin = &descriptor->bins_small[i]; cache_bin_t *tbin = &descriptor->bins_small[i];
arena_stats_accum_zu(&astats->tcache_bytes, arena_stats_accum_zu(&astats->tcache_bytes,
tbin->ncached * sz_index2size(i)); tbin->ncached * sz_index2size(i));
...@@ -200,8 +244,11 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, ...@@ -200,8 +244,11 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
nstime_update(&astats->uptime); nstime_update(&astats->uptime);
nstime_subtract(&astats->uptime, &arena->create_time); nstime_subtract(&astats->uptime, &arena->create_time);
for (szind_t i = 0; i < NBINS; i++) { for (szind_t i = 0; i < SC_NBINS; i++) {
bin_stats_merge(tsdn, &bstats[i], &arena->bins[i]); for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
bin_stats_merge(tsdn, &bstats[i],
&arena->bins[i].bin_shards[j]);
}
} }
} }
...@@ -236,6 +283,54 @@ arena_slab_reg_alloc(extent_t *slab, const bin_info_t *bin_info) { ...@@ -236,6 +283,54 @@ arena_slab_reg_alloc(extent_t *slab, const bin_info_t *bin_info) {
return ret; return ret;
} }
static void
arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info,
unsigned cnt, void** ptrs) {
arena_slab_data_t *slab_data = extent_slab_data_get(slab);
assert(extent_nfree_get(slab) >= cnt);
assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
#if (! defined JEMALLOC_INTERNAL_POPCOUNTL) || (defined BITMAP_USE_TREE)
for (unsigned i = 0; i < cnt; i++) {
size_t regind = bitmap_sfu(slab_data->bitmap,
&bin_info->bitmap_info);
*(ptrs + i) = (void *)((uintptr_t)extent_addr_get(slab) +
(uintptr_t)(bin_info->reg_size * regind));
}
#else
unsigned group = 0;
bitmap_t g = slab_data->bitmap[group];
unsigned i = 0;
while (i < cnt) {
while (g == 0) {
g = slab_data->bitmap[++group];
}
size_t shift = group << LG_BITMAP_GROUP_NBITS;
size_t pop = popcount_lu(g);
if (pop > (cnt - i)) {
pop = cnt - i;
}
/*
* Load from memory locations only once, outside the
* hot loop below.
*/
uintptr_t base = (uintptr_t)extent_addr_get(slab);
uintptr_t regsize = (uintptr_t)bin_info->reg_size;
while (pop--) {
size_t bit = cfs_lu(&g);
size_t regind = shift + bit;
*(ptrs + i) = (void *)(base + regsize * regind);
i++;
}
slab_data->bitmap[group] = g;
}
#endif
extent_nfree_sub(slab, cnt);
}
#ifndef JEMALLOC_JET #ifndef JEMALLOC_JET
static static
#endif #endif
...@@ -291,11 +386,11 @@ arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { ...@@ -291,11 +386,11 @@ arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
cassert(config_stats); cassert(config_stats);
if (usize < LARGE_MINCLASS) { if (usize < SC_LARGE_MINCLASS) {
usize = LARGE_MINCLASS; usize = SC_LARGE_MINCLASS;
} }
index = sz_size2index(usize); index = sz_size2index(usize);
hindex = (index >= NBINS) ? index - NBINS : 0; hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0;
arena_stats_add_u64(tsdn, &arena->stats, arena_stats_add_u64(tsdn, &arena->stats,
&arena->stats.lstats[hindex].nmalloc, 1); &arena->stats.lstats[hindex].nmalloc, 1);
...@@ -307,11 +402,11 @@ arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { ...@@ -307,11 +402,11 @@ arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
cassert(config_stats); cassert(config_stats);
if (usize < LARGE_MINCLASS) { if (usize < SC_LARGE_MINCLASS) {
usize = LARGE_MINCLASS; usize = SC_LARGE_MINCLASS;
} }
index = sz_size2index(usize); index = sz_size2index(usize);
hindex = (index >= NBINS) ? index - NBINS : 0; hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0;
arena_stats_add_u64(tsdn, &arena->stats, arena_stats_add_u64(tsdn, &arena->stats,
&arena->stats.lstats[hindex].ndalloc, 1); &arena->stats.lstats[hindex].ndalloc, 1);
...@@ -324,6 +419,11 @@ arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize, ...@@ -324,6 +419,11 @@ arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize,
arena_large_malloc_stats_update(tsdn, arena, usize); arena_large_malloc_stats_update(tsdn, arena, usize);
} }
static bool
arena_may_have_muzzy(arena_t *arena) {
return (pages_can_purge_lazy && (arena_muzzy_decay_ms_get(arena) != 0));
}
extent_t * extent_t *
arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool *zero) { size_t alignment, bool *zero) {
...@@ -338,7 +438,7 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, ...@@ -338,7 +438,7 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
extent_t *extent = extents_alloc(tsdn, arena, &extent_hooks, extent_t *extent = extents_alloc(tsdn, arena, &extent_hooks,
&arena->extents_dirty, NULL, usize, sz_large_pad, alignment, false, &arena->extents_dirty, NULL, usize, sz_large_pad, alignment, false,
szind, zero, &commit); szind, zero, &commit);
if (extent == NULL) { if (extent == NULL && arena_may_have_muzzy(arena)) {
extent = extents_alloc(tsdn, arena, &extent_hooks, extent = extents_alloc(tsdn, arena, &extent_hooks,
&arena->extents_muzzy, NULL, usize, sz_large_pad, alignment, &arena->extents_muzzy, NULL, usize, sz_large_pad, alignment,
false, szind, zero, &commit); false, szind, zero, &commit);
...@@ -743,7 +843,7 @@ static size_t ...@@ -743,7 +843,7 @@ static size_t
arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, arena_decay_stashed(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents, extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents,
bool all, extent_list_t *decay_extents, bool is_background_thread) { bool all, extent_list_t *decay_extents, bool is_background_thread) {
UNUSED size_t nmadvise, nunmapped; size_t nmadvise, nunmapped;
size_t npurged; size_t npurged;
if (config_stats) { if (config_stats) {
...@@ -834,7 +934,7 @@ arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, ...@@ -834,7 +934,7 @@ arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents, size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents,
npages_limit, npages_decay_max, &decay_extents); npages_limit, npages_decay_max, &decay_extents);
if (npurge != 0) { if (npurge != 0) {
UNUSED size_t npurged = arena_decay_stashed(tsdn, arena, size_t npurged = arena_decay_stashed(tsdn, arena,
&extent_hooks, decay, extents, all, &decay_extents, &extent_hooks, decay, extents, all, &decay_extents,
is_background_thread); is_background_thread);
assert(npurged == npurge); assert(npurged == npurge);
...@@ -863,7 +963,7 @@ arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, ...@@ -863,7 +963,7 @@ arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents, bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents,
is_background_thread); is_background_thread);
UNUSED size_t npages_new; size_t npages_new;
if (epoch_advanced) { if (epoch_advanced) {
/* Backlog is updated on epoch advance. */ /* Backlog is updated on epoch advance. */
npages_new = decay->backlog[SMOOTHSTEP_NSTEPS-1]; npages_new = decay->backlog[SMOOTHSTEP_NSTEPS-1];
...@@ -913,11 +1013,17 @@ static void ...@@ -913,11 +1013,17 @@ static void
arena_bin_slabs_nonfull_insert(bin_t *bin, extent_t *slab) { arena_bin_slabs_nonfull_insert(bin_t *bin, extent_t *slab) {
assert(extent_nfree_get(slab) > 0); assert(extent_nfree_get(slab) > 0);
extent_heap_insert(&bin->slabs_nonfull, slab); extent_heap_insert(&bin->slabs_nonfull, slab);
if (config_stats) {
bin->stats.nonfull_slabs++;
}
} }
static void static void
arena_bin_slabs_nonfull_remove(bin_t *bin, extent_t *slab) { arena_bin_slabs_nonfull_remove(bin_t *bin, extent_t *slab) {
extent_heap_remove(&bin->slabs_nonfull, slab); extent_heap_remove(&bin->slabs_nonfull, slab);
if (config_stats) {
bin->stats.nonfull_slabs--;
}
} }
static extent_t * static extent_t *
...@@ -928,6 +1034,7 @@ arena_bin_slabs_nonfull_tryget(bin_t *bin) { ...@@ -928,6 +1034,7 @@ arena_bin_slabs_nonfull_tryget(bin_t *bin) {
} }
if (config_stats) { if (config_stats) {
bin->stats.reslabs++; bin->stats.reslabs++;
bin->stats.nonfull_slabs--;
} }
return slab; return slab;
} }
...@@ -954,6 +1061,37 @@ arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, extent_t *slab) { ...@@ -954,6 +1061,37 @@ arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, extent_t *slab) {
extent_list_remove(&bin->slabs_full, slab); extent_list_remove(&bin->slabs_full, slab);
} }
static void
arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) {
extent_t *slab;
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
if (bin->slabcur != NULL) {
slab = bin->slabcur;
bin->slabcur = NULL;
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
}
while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) != NULL) {
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
}
for (slab = extent_list_first(&bin->slabs_full); slab != NULL;
slab = extent_list_first(&bin->slabs_full)) {
arena_bin_slabs_full_remove(arena, bin, slab);
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
}
if (config_stats) {
bin->stats.curregs = 0;
bin->stats.curslabs = 0;
}
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
}
void void
arena_reset(tsd_t *tsd, arena_t *arena) { arena_reset(tsd_t *tsd, arena_t *arena) {
/* /*
...@@ -983,7 +1121,7 @@ arena_reset(tsd_t *tsd, arena_t *arena) { ...@@ -983,7 +1121,7 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
assert(alloc_ctx.szind != NSIZES); assert(alloc_ctx.szind != SC_NSIZES);
if (config_stats || (config_prof && opt_prof)) { if (config_stats || (config_prof && opt_prof)) {
usize = sz_index2size(alloc_ctx.szind); usize = sz_index2size(alloc_ctx.szind);
...@@ -999,35 +1137,11 @@ arena_reset(tsd_t *tsd, arena_t *arena) { ...@@ -999,35 +1137,11 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
/* Bins. */ /* Bins. */
for (unsigned i = 0; i < NBINS; i++) { for (unsigned i = 0; i < SC_NBINS; i++) {
extent_t *slab; for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
bin_t *bin = &arena->bins[i]; arena_bin_reset(tsd, arena,
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); &arena->bins[i].bin_shards[j]);
if (bin->slabcur != NULL) {
slab = bin->slabcur;
bin->slabcur = NULL;
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
} }
while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) !=
NULL) {
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
}
for (slab = extent_list_first(&bin->slabs_full); slab != NULL;
slab = extent_list_first(&bin->slabs_full)) {
arena_bin_slabs_full_remove(arena, bin, slab);
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
}
if (config_stats) {
bin->stats.curregs = 0;
bin->stats.curslabs = 0;
}
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
} }
atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED); atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
...@@ -1112,7 +1226,7 @@ arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, ...@@ -1112,7 +1226,7 @@ arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena,
} }
static extent_t * static extent_t *
arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard,
const bin_info_t *bin_info) { const bin_info_t *bin_info) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0); WITNESS_RANK_CORE, 0);
...@@ -1124,7 +1238,7 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, ...@@ -1124,7 +1238,7 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks, extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks,
&arena->extents_dirty, NULL, bin_info->slab_size, 0, PAGE, true, &arena->extents_dirty, NULL, bin_info->slab_size, 0, PAGE, true,
binind, &zero, &commit); binind, &zero, &commit);
if (slab == NULL) { if (slab == NULL && arena_may_have_muzzy(arena)) {
slab = extents_alloc(tsdn, arena, &extent_hooks, slab = extents_alloc(tsdn, arena, &extent_hooks,
&arena->extents_muzzy, NULL, bin_info->slab_size, 0, PAGE, &arena->extents_muzzy, NULL, bin_info->slab_size, 0, PAGE,
true, binind, &zero, &commit); true, binind, &zero, &commit);
...@@ -1140,7 +1254,7 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, ...@@ -1140,7 +1254,7 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
/* Initialize slab internals. */ /* Initialize slab internals. */
arena_slab_data_t *slab_data = extent_slab_data_get(slab); arena_slab_data_t *slab_data = extent_slab_data_get(slab);
extent_nfree_set(slab, bin_info->nregs); extent_nfree_binshard_set(slab, bin_info->nregs, binshard);
bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false); bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false);
arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE); arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE);
...@@ -1150,7 +1264,7 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, ...@@ -1150,7 +1264,7 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
static extent_t * static extent_t *
arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin, arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
szind_t binind) { szind_t binind, unsigned binshard) {
extent_t *slab; extent_t *slab;
const bin_info_t *bin_info; const bin_info_t *bin_info;
...@@ -1166,7 +1280,7 @@ arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin, ...@@ -1166,7 +1280,7 @@ arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
/* Allocate a new slab. */ /* Allocate a new slab. */
malloc_mutex_unlock(tsdn, &bin->lock); malloc_mutex_unlock(tsdn, &bin->lock);
/******************************/ /******************************/
slab = arena_slab_alloc(tsdn, arena, binind, bin_info); slab = arena_slab_alloc(tsdn, arena, binind, binshard, bin_info);
/********************************/ /********************************/
malloc_mutex_lock(tsdn, &bin->lock); malloc_mutex_lock(tsdn, &bin->lock);
if (slab != NULL) { if (slab != NULL) {
...@@ -1193,7 +1307,7 @@ arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin, ...@@ -1193,7 +1307,7 @@ arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
/* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */ /* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */
static void * static void *
arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin, arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
szind_t binind) { szind_t binind, unsigned binshard) {
const bin_info_t *bin_info; const bin_info_t *bin_info;
extent_t *slab; extent_t *slab;
...@@ -1202,7 +1316,7 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin, ...@@ -1202,7 +1316,7 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
arena_bin_slabs_full_insert(arena, bin, bin->slabcur); arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
bin->slabcur = NULL; bin->slabcur = NULL;
} }
slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind); slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind, binshard);
if (bin->slabcur != NULL) { if (bin->slabcur != NULL) {
/* /*
* Another thread updated slabcur while this one ran without the * Another thread updated slabcur while this one ran without the
...@@ -1246,46 +1360,75 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin, ...@@ -1246,46 +1360,75 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
return arena_slab_reg_alloc(slab, bin_info); return arena_slab_reg_alloc(slab, bin_info);
} }
/* Choose a bin shard and return the locked bin. */
bin_t *
arena_bin_choose_lock(tsdn_t *tsdn, arena_t *arena, szind_t binind,
unsigned *binshard) {
bin_t *bin;
if (tsdn_null(tsdn) || tsd_arena_get(tsdn_tsd(tsdn)) == NULL) {
*binshard = 0;
} else {
*binshard = tsd_binshardsp_get(tsdn_tsd(tsdn))->binshard[binind];
}
assert(*binshard < bin_infos[binind].n_shards);
bin = &arena->bins[binind].bin_shards[*binshard];
malloc_mutex_lock(tsdn, &bin->lock);
return bin;
}
void void
arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) { cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) {
unsigned i, nfill; unsigned i, nfill, cnt;
bin_t *bin;
assert(tbin->ncached == 0); assert(tbin->ncached == 0);
if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) { if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) {
prof_idump(tsdn); prof_idump(tsdn);
} }
bin = &arena->bins[binind];
malloc_mutex_lock(tsdn, &bin->lock); unsigned binshard;
bin_t *bin = arena_bin_choose_lock(tsdn, arena, binind, &binshard);
for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
tcache->lg_fill_div[binind]); i < nfill; i++) { tcache->lg_fill_div[binind]); i < nfill; i += cnt) {
extent_t *slab; extent_t *slab;
void *ptr;
if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) >
0) { 0) {
ptr = arena_slab_reg_alloc(slab, &bin_infos[binind]); unsigned tofill = nfill - i;
cnt = tofill < extent_nfree_get(slab) ?
tofill : extent_nfree_get(slab);
arena_slab_reg_alloc_batch(
slab, &bin_infos[binind], cnt,
tbin->avail - nfill + i);
} else { } else {
ptr = arena_bin_malloc_hard(tsdn, arena, bin, binind); cnt = 1;
} void *ptr = arena_bin_malloc_hard(tsdn, arena, bin,
if (ptr == NULL) { binind, binshard);
/* /*
* OOM. tbin->avail isn't yet filled down to its first * OOM. tbin->avail isn't yet filled down to its first
* element, so the successful allocations (if any) must * element, so the successful allocations (if any) must
* be moved just before tbin->avail before bailing out. * be moved just before tbin->avail before bailing out.
*/ */
if (i > 0) { if (ptr == NULL) {
memmove(tbin->avail - i, tbin->avail - nfill, if (i > 0) {
i * sizeof(void *)); memmove(tbin->avail - i,
tbin->avail - nfill,
i * sizeof(void *));
}
break;
} }
break; /* Insert such that low regions get used first. */
*(tbin->avail - nfill + i) = ptr;
} }
if (config_fill && unlikely(opt_junk_alloc)) { if (config_fill && unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ptr, &bin_infos[binind], true); for (unsigned j = 0; j < cnt; j++) {
void* ptr = *(tbin->avail - nfill + i + j);
arena_alloc_junk_small(ptr, &bin_infos[binind],
true);
}
} }
/* Insert such that low regions get used first. */
*(tbin->avail - nfill + i) = ptr;
} }
if (config_stats) { if (config_stats) {
bin->stats.nmalloc += i; bin->stats.nmalloc += i;
...@@ -1320,15 +1463,15 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) { ...@@ -1320,15 +1463,15 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
size_t usize; size_t usize;
extent_t *slab; extent_t *slab;
assert(binind < NBINS); assert(binind < SC_NBINS);
bin = &arena->bins[binind];
usize = sz_index2size(binind); usize = sz_index2size(binind);
unsigned binshard;
bin = arena_bin_choose_lock(tsdn, arena, binind, &binshard);
malloc_mutex_lock(tsdn, &bin->lock);
if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) { if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) {
ret = arena_slab_reg_alloc(slab, &bin_infos[binind]); ret = arena_slab_reg_alloc(slab, &bin_infos[binind]);
} else { } else {
ret = arena_bin_malloc_hard(tsdn, arena, bin, binind); ret = arena_bin_malloc_hard(tsdn, arena, bin, binind, binshard);
} }
if (ret == NULL) { if (ret == NULL) {
...@@ -1373,13 +1516,13 @@ arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, ...@@ -1373,13 +1516,13 @@ arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
assert(!tsdn_null(tsdn) || arena != NULL); assert(!tsdn_null(tsdn) || arena != NULL);
if (likely(!tsdn_null(tsdn))) { if (likely(!tsdn_null(tsdn))) {
arena = arena_choose(tsdn_tsd(tsdn), arena); arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, size);
} }
if (unlikely(arena == NULL)) { if (unlikely(arena == NULL)) {
return NULL; return NULL;
} }
if (likely(size <= SMALL_MAXCLASS)) { if (likely(size <= SC_SMALL_MAXCLASS)) {
return arena_malloc_small(tsdn, arena, ind, zero); return arena_malloc_small(tsdn, arena, ind, zero);
} }
return large_malloc(tsdn, arena, sz_index2size(ind), zero); return large_malloc(tsdn, arena, sz_index2size(ind), zero);
...@@ -1390,8 +1533,9 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, ...@@ -1390,8 +1533,9 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
bool zero, tcache_t *tcache) { bool zero, tcache_t *tcache) {
void *ret; void *ret;
if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE if (usize <= SC_SMALL_MAXCLASS
&& (usize & PAGE_MASK) == 0))) { && (alignment < PAGE
|| (alignment == PAGE && (usize & PAGE_MASK) == 0))) {
/* Small; alignment doesn't require special slab placement. */ /* Small; alignment doesn't require special slab placement. */
ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize), ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize),
zero, tcache, true); zero, tcache, true);
...@@ -1406,11 +1550,15 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, ...@@ -1406,11 +1550,15 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
} }
void void
arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize) { arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize) {
cassert(config_prof); cassert(config_prof);
assert(ptr != NULL); assert(ptr != NULL);
assert(isalloc(tsdn, ptr) == LARGE_MINCLASS); assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS);
assert(usize <= SMALL_MAXCLASS); assert(usize <= SC_SMALL_MAXCLASS);
if (config_opt_safety_checks) {
safety_check_set_redzone(ptr, usize, SC_LARGE_MINCLASS);
}
rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
...@@ -1434,15 +1582,15 @@ arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) { ...@@ -1434,15 +1582,15 @@ arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) {
cassert(config_prof); cassert(config_prof);
assert(ptr != NULL); assert(ptr != NULL);
extent_szind_set(extent, NBINS); extent_szind_set(extent, SC_NBINS);
rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
NBINS, false); SC_NBINS, false);
assert(isalloc(tsdn, ptr) == LARGE_MINCLASS); assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS);
return LARGE_MINCLASS; return SC_LARGE_MINCLASS;
} }
void void
...@@ -1452,10 +1600,19 @@ arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache, ...@@ -1452,10 +1600,19 @@ arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
assert(opt_prof); assert(opt_prof);
extent_t *extent = iealloc(tsdn, ptr); extent_t *extent = iealloc(tsdn, ptr);
size_t usize = arena_prof_demote(tsdn, extent, ptr); size_t usize = extent_usize_get(extent);
if (usize <= tcache_maxclass) { size_t bumped_usize = arena_prof_demote(tsdn, extent, ptr);
if (config_opt_safety_checks && usize < SC_LARGE_MINCLASS) {
/*
* Currently, we only do redzoning for small sampled
* allocations.
*/
assert(bumped_usize == SC_LARGE_MINCLASS);
safety_check_verify_redzone(ptr, usize, bumped_usize);
}
if (bumped_usize <= tcache_maxclass && tcache != NULL) {
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
sz_size2index(usize), slow_path); sz_size2index(bumped_usize), slow_path);
} else { } else {
large_dalloc(tsdn, extent); large_dalloc(tsdn, extent);
} }
...@@ -1499,7 +1656,7 @@ arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, ...@@ -1499,7 +1656,7 @@ arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
} }
static void static void
arena_bin_lower_slab(UNUSED tsdn_t *tsdn, arena_t *arena, extent_t *slab, arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
bin_t *bin) { bin_t *bin) {
assert(extent_nfree_get(slab) > 0); assert(extent_nfree_get(slab) > 0);
...@@ -1526,11 +1683,9 @@ arena_bin_lower_slab(UNUSED tsdn_t *tsdn, arena_t *arena, extent_t *slab, ...@@ -1526,11 +1683,9 @@ arena_bin_lower_slab(UNUSED tsdn_t *tsdn, arena_t *arena, extent_t *slab,
} }
static void static void
arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, extent_t *slab, arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
void *ptr, bool junked) { szind_t binind, extent_t *slab, void *ptr, bool junked) {
arena_slab_data_t *slab_data = extent_slab_data_get(slab); arena_slab_data_t *slab_data = extent_slab_data_get(slab);
szind_t binind = extent_szind_get(slab);
bin_t *bin = &arena->bins[binind];
const bin_info_t *bin_info = &bin_infos[binind]; const bin_info_t *bin_info = &bin_infos[binind];
if (!junked && config_fill && unlikely(opt_junk_free)) { if (!junked && config_fill && unlikely(opt_junk_free)) {
...@@ -1554,18 +1709,21 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, extent_t *slab, ...@@ -1554,18 +1709,21 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
} }
void void
arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, extent_t *extent, arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
void *ptr) { szind_t binind, extent_t *extent, void *ptr) {
arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, true); arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, extent, ptr,
true);
} }
static void static void
arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) { arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) {
szind_t binind = extent_szind_get(extent); szind_t binind = extent_szind_get(extent);
bin_t *bin = &arena->bins[binind]; unsigned binshard = extent_binshard_get(extent);
bin_t *bin = &arena->bins[binind].bin_shards[binshard];
malloc_mutex_lock(tsdn, &bin->lock); malloc_mutex_lock(tsdn, &bin->lock);
arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, false); arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, extent, ptr,
false);
malloc_mutex_unlock(tsdn, &bin->lock); malloc_mutex_unlock(tsdn, &bin->lock);
} }
...@@ -1580,38 +1738,48 @@ arena_dalloc_small(tsdn_t *tsdn, void *ptr) { ...@@ -1580,38 +1738,48 @@ arena_dalloc_small(tsdn_t *tsdn, void *ptr) {
bool bool
arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero) { size_t extra, bool zero, size_t *newsize) {
bool ret;
/* Calls with non-zero extra had to clamp extra. */ /* Calls with non-zero extra had to clamp extra. */
assert(extra == 0 || size + extra <= LARGE_MAXCLASS); assert(extra == 0 || size + extra <= SC_LARGE_MAXCLASS);
if (unlikely(size > LARGE_MAXCLASS)) { extent_t *extent = iealloc(tsdn, ptr);
return true; if (unlikely(size > SC_LARGE_MAXCLASS)) {
ret = true;
goto done;
} }
extent_t *extent = iealloc(tsdn, ptr);
size_t usize_min = sz_s2u(size); size_t usize_min = sz_s2u(size);
size_t usize_max = sz_s2u(size + extra); size_t usize_max = sz_s2u(size + extra);
if (likely(oldsize <= SMALL_MAXCLASS && usize_min <= SMALL_MAXCLASS)) { if (likely(oldsize <= SC_SMALL_MAXCLASS && usize_min
<= SC_SMALL_MAXCLASS)) {
/* /*
* Avoid moving the allocation if the size class can be left the * Avoid moving the allocation if the size class can be left the
* same. * same.
*/ */
assert(bin_infos[sz_size2index(oldsize)].reg_size == assert(bin_infos[sz_size2index(oldsize)].reg_size ==
oldsize); oldsize);
if ((usize_max > SMALL_MAXCLASS || sz_size2index(usize_max) != if ((usize_max > SC_SMALL_MAXCLASS
sz_size2index(oldsize)) && (size > oldsize || usize_max < || sz_size2index(usize_max) != sz_size2index(oldsize))
oldsize)) { && (size > oldsize || usize_max < oldsize)) {
return true; ret = true;
goto done;
} }
arena_decay_tick(tsdn, extent_arena_get(extent)); arena_decay_tick(tsdn, extent_arena_get(extent));
return false; ret = false;
} else if (oldsize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS) { } else if (oldsize >= SC_LARGE_MINCLASS
return large_ralloc_no_move(tsdn, extent, usize_min, usize_max, && usize_max >= SC_LARGE_MINCLASS) {
ret = large_ralloc_no_move(tsdn, extent, usize_min, usize_max,
zero); zero);
} else {
ret = true;
} }
done:
assert(extent == iealloc(tsdn, ptr));
*newsize = extent_usize_get(extent);
return true; return ret;
} }
static void * static void *
...@@ -1622,7 +1790,7 @@ arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, ...@@ -1622,7 +1790,7 @@ arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
zero, tcache, true); zero, tcache, true);
} }
usize = sz_sa2u(usize, alignment); usize = sz_sa2u(usize, alignment);
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
return NULL; return NULL;
} }
return ipalloct(tsdn, usize, alignment, zero, tcache, arena); return ipalloct(tsdn, usize, alignment, zero, tcache, arena);
...@@ -1630,22 +1798,30 @@ arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, ...@@ -1630,22 +1798,30 @@ arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
void * void *
arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize, arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
size_t size, size_t alignment, bool zero, tcache_t *tcache) { size_t size, size_t alignment, bool zero, tcache_t *tcache,
hook_ralloc_args_t *hook_args) {
size_t usize = sz_s2u(size); size_t usize = sz_s2u(size);
if (unlikely(usize == 0 || size > LARGE_MAXCLASS)) { if (unlikely(usize == 0 || size > SC_LARGE_MAXCLASS)) {
return NULL; return NULL;
} }
if (likely(usize <= SMALL_MAXCLASS)) { if (likely(usize <= SC_SMALL_MAXCLASS)) {
/* Try to avoid moving the allocation. */ /* Try to avoid moving the allocation. */
if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero)) { UNUSED size_t newsize;
if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero,
&newsize)) {
hook_invoke_expand(hook_args->is_realloc
? hook_expand_realloc : hook_expand_rallocx,
ptr, oldsize, usize, (uintptr_t)ptr,
hook_args->args);
return ptr; return ptr;
} }
} }
if (oldsize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS) { if (oldsize >= SC_LARGE_MINCLASS
return large_ralloc(tsdn, arena, iealloc(tsdn, ptr), usize, && usize >= SC_LARGE_MINCLASS) {
alignment, zero, tcache); return large_ralloc(tsdn, arena, ptr, usize,
alignment, zero, tcache, hook_args);
} }
/* /*
...@@ -1658,11 +1834,16 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize, ...@@ -1658,11 +1834,16 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
return NULL; return NULL;
} }
hook_invoke_alloc(hook_args->is_realloc
? hook_alloc_realloc : hook_alloc_rallocx, ret, (uintptr_t)ret,
hook_args->args);
hook_invoke_dalloc(hook_args->is_realloc
? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args);
/* /*
* Junk/zero-filling were already done by * Junk/zero-filling were already done by
* ipalloc()/arena_malloc(). * ipalloc()/arena_malloc().
*/ */
size_t copysize = (usize < oldsize) ? usize : oldsize; size_t copysize = (usize < oldsize) ? usize : oldsize;
memcpy(ret, ptr, copysize); memcpy(ret, ptr, copysize);
isdalloct(tsdn, ptr, oldsize, tcache, NULL, true); isdalloct(tsdn, ptr, oldsize, tcache, NULL, true);
...@@ -1720,8 +1901,7 @@ arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit, ...@@ -1720,8 +1901,7 @@ arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit,
if (new_limit != NULL) { if (new_limit != NULL) {
size_t limit = *new_limit; size_t limit = *new_limit;
/* Grow no more than the new limit. */ /* Grow no more than the new limit. */
if ((new_ind = sz_psz2ind(limit + 1) - 1) > if ((new_ind = sz_psz2ind(limit + 1) - 1) >= SC_NPSIZES) {
EXTENT_GROW_MAX_PIND) {
return true; return true;
} }
} }
...@@ -1773,7 +1953,12 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { ...@@ -1773,7 +1953,12 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
} }
} }
arena = (arena_t *)base_alloc(tsdn, base, sizeof(arena_t), CACHELINE); unsigned nbins_total = 0;
for (i = 0; i < SC_NBINS; i++) {
nbins_total += bin_infos[i].n_shards;
}
size_t arena_size = sizeof(arena_t) + sizeof(bin_t) * nbins_total;
arena = (arena_t *)base_alloc(tsdn, base, arena_size, CACHELINE);
if (arena == NULL) { if (arena == NULL) {
goto label_error; goto label_error;
} }
...@@ -1865,7 +2050,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { ...@@ -1865,7 +2050,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
} }
arena->extent_grow_next = sz_psz2ind(HUGEPAGE); arena->extent_grow_next = sz_psz2ind(HUGEPAGE);
arena->retain_grow_limit = EXTENT_GROW_MAX_PIND; arena->retain_grow_limit = sz_psz2ind(SC_LARGE_MAXCLASS);
if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow", if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow",
WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) { WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
goto label_error; goto label_error;
...@@ -1878,12 +2063,20 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { ...@@ -1878,12 +2063,20 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
} }
/* Initialize bins. */ /* Initialize bins. */
for (i = 0; i < NBINS; i++) { uintptr_t bin_addr = (uintptr_t)arena + sizeof(arena_t);
bool err = bin_init(&arena->bins[i]); atomic_store_u(&arena->binshard_next, 0, ATOMIC_RELEASE);
if (err) { for (i = 0; i < SC_NBINS; i++) {
goto label_error; unsigned nshards = bin_infos[i].n_shards;
arena->bins[i].bin_shards = (bin_t *)bin_addr;
bin_addr += nshards * sizeof(bin_t);
for (unsigned j = 0; j < nshards; j++) {
bool err = bin_init(&arena->bins[i].bin_shards[j]);
if (err) {
goto label_error;
}
} }
} }
assert(bin_addr == (uintptr_t)arena + arena_size);
arena->base = base; arena->base = base;
/* Set arena before creating background threads. */ /* Set arena before creating background threads. */
...@@ -1900,8 +2093,8 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { ...@@ -1900,8 +2093,8 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
*/ */
assert(!tsdn_null(tsdn)); assert(!tsdn_null(tsdn));
pre_reentrancy(tsdn_tsd(tsdn), arena); pre_reentrancy(tsdn_tsd(tsdn), arena);
if (hooks_arena_new_hook) { if (test_hooks_arena_new_hook) {
hooks_arena_new_hook(); test_hooks_arena_new_hook();
} }
post_reentrancy(tsdn_tsd(tsdn)); post_reentrancy(tsdn_tsd(tsdn));
} }
...@@ -1914,20 +2107,75 @@ label_error: ...@@ -1914,20 +2107,75 @@ label_error:
return NULL; return NULL;
} }
arena_t *
arena_choose_huge(tsd_t *tsd) {
/* huge_arena_ind can be 0 during init (will use a0). */
if (huge_arena_ind == 0) {
assert(!malloc_initialized());
}
arena_t *huge_arena = arena_get(tsd_tsdn(tsd), huge_arena_ind, false);
if (huge_arena == NULL) {
/* Create the huge arena on demand. */
assert(huge_arena_ind != 0);
huge_arena = arena_get(tsd_tsdn(tsd), huge_arena_ind, true);
if (huge_arena == NULL) {
return NULL;
}
/*
* Purge eagerly for huge allocations, because: 1) number of
* huge allocations is usually small, which means ticker based
* decay is not reliable; and 2) less immediate reuse is
* expected for huge allocations.
*/
if (arena_dirty_decay_ms_default_get() > 0) {
arena_dirty_decay_ms_set(tsd_tsdn(tsd), huge_arena, 0);
}
if (arena_muzzy_decay_ms_default_get() > 0) {
arena_muzzy_decay_ms_set(tsd_tsdn(tsd), huge_arena, 0);
}
}
return huge_arena;
}
bool
arena_init_huge(void) {
bool huge_enabled;
/* The threshold should be large size class. */
if (opt_oversize_threshold > SC_LARGE_MAXCLASS ||
opt_oversize_threshold < SC_LARGE_MINCLASS) {
opt_oversize_threshold = 0;
oversize_threshold = SC_LARGE_MAXCLASS + PAGE;
huge_enabled = false;
} else {
/* Reserve the index for the huge arena. */
huge_arena_ind = narenas_total_get();
oversize_threshold = opt_oversize_threshold;
huge_enabled = true;
}
return huge_enabled;
}
bool
arena_is_huge(unsigned arena_ind) {
if (huge_arena_ind == 0) {
return false;
}
return (arena_ind == huge_arena_ind);
}
void void
arena_boot(void) { arena_boot(sc_data_t *sc_data) {
arena_dirty_decay_ms_default_set(opt_dirty_decay_ms); arena_dirty_decay_ms_default_set(opt_dirty_decay_ms);
arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms); arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms);
#define REGIND_bin_yes(index, reg_size) \ for (unsigned i = 0; i < SC_NBINS; i++) {
div_init(&arena_binind_div_info[(index)], (reg_size)); sc_t *sc = &sc_data->sc[i];
#define REGIND_bin_no(index, reg_size) div_init(&arena_binind_div_info[i],
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \ (1U << sc->lg_base) + (sc->ndelta << sc->lg_delta));
lg_delta_lookup) \ }
REGIND_bin_##bin(index, (1U<<lg_grp) + (ndelta << lg_delta))
SIZE_CLASSES
#undef REGIND_bin_yes
#undef REGIND_bin_no
#undef SC
} }
void void
...@@ -1972,8 +2220,10 @@ arena_prefork6(tsdn_t *tsdn, arena_t *arena) { ...@@ -1972,8 +2220,10 @@ arena_prefork6(tsdn_t *tsdn, arena_t *arena) {
void void
arena_prefork7(tsdn_t *tsdn, arena_t *arena) { arena_prefork7(tsdn_t *tsdn, arena_t *arena) {
for (unsigned i = 0; i < NBINS; i++) { for (unsigned i = 0; i < SC_NBINS; i++) {
bin_prefork(tsdn, &arena->bins[i]); for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
bin_prefork(tsdn, &arena->bins[i].bin_shards[j]);
}
} }
} }
...@@ -1981,8 +2231,11 @@ void ...@@ -1981,8 +2231,11 @@ void
arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) { arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
unsigned i; unsigned i;
for (i = 0; i < NBINS; i++) { for (i = 0; i < SC_NBINS; i++) {
bin_postfork_parent(tsdn, &arena->bins[i]); for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
bin_postfork_parent(tsdn,
&arena->bins[i].bin_shards[j]);
}
} }
malloc_mutex_postfork_parent(tsdn, &arena->large_mtx); malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
base_postfork_parent(tsdn, arena->base); base_postfork_parent(tsdn, arena->base);
...@@ -2025,8 +2278,10 @@ arena_postfork_child(tsdn_t *tsdn, arena_t *arena) { ...@@ -2025,8 +2278,10 @@ arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
} }
} }
for (i = 0; i < NBINS; i++) { for (i = 0; i < SC_NBINS; i++) {
bin_postfork_child(tsdn, &arena->bins[i]); for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
bin_postfork_child(tsdn, &arena->bins[i].bin_shards[j]);
}
} }
malloc_mutex_postfork_child(tsdn, &arena->large_mtx); malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
base_postfork_child(tsdn, arena->base); base_postfork_child(tsdn, arena->base);
......
...@@ -4,6 +4,8 @@ ...@@ -4,6 +4,8 @@
#include "jemalloc/internal/assert.h" #include "jemalloc/internal/assert.h"
JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
/******************************************************************************/ /******************************************************************************/
/* Data. */ /* Data. */
...@@ -11,7 +13,7 @@ ...@@ -11,7 +13,7 @@
#define BACKGROUND_THREAD_DEFAULT false #define BACKGROUND_THREAD_DEFAULT false
/* Read-only after initialization. */ /* Read-only after initialization. */
bool opt_background_thread = BACKGROUND_THREAD_DEFAULT; bool opt_background_thread = BACKGROUND_THREAD_DEFAULT;
size_t opt_max_background_threads = MAX_BACKGROUND_THREAD_LIMIT; size_t opt_max_background_threads = MAX_BACKGROUND_THREAD_LIMIT + 1;
/* Used for thread creation, termination and stats. */ /* Used for thread creation, termination and stats. */
malloc_mutex_t background_thread_lock; malloc_mutex_t background_thread_lock;
...@@ -22,13 +24,9 @@ size_t max_background_threads; ...@@ -22,13 +24,9 @@ size_t max_background_threads;
/* Thread info per-index. */ /* Thread info per-index. */
background_thread_info_t *background_thread_info; background_thread_info_t *background_thread_info;
/* False if no necessary runtime support. */
bool can_enable_background_thread;
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER #ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
#include <dlfcn.h>
static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *, static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *,
void *(*)(void *), void *__restrict); void *(*)(void *), void *__restrict);
...@@ -81,7 +79,7 @@ background_thread_info_init(tsdn_t *tsdn, background_thread_info_t *info) { ...@@ -81,7 +79,7 @@ background_thread_info_init(tsdn_t *tsdn, background_thread_info_t *info) {
} }
static inline bool static inline bool
set_current_thread_affinity(UNUSED int cpu) { set_current_thread_affinity(int cpu) {
#if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY) #if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
cpu_set_t cpuset; cpu_set_t cpuset;
CPU_ZERO(&cpuset); CPU_ZERO(&cpuset);
...@@ -510,6 +508,8 @@ background_thread_entry(void *ind_arg) { ...@@ -510,6 +508,8 @@ background_thread_entry(void *ind_arg) {
assert(thread_ind < max_background_threads); assert(thread_ind < max_background_threads);
#ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP #ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
pthread_setname_np(pthread_self(), "jemalloc_bg_thd"); pthread_setname_np(pthread_self(), "jemalloc_bg_thd");
#elif defined(__FreeBSD__)
pthread_set_name_np(pthread_self(), "jemalloc_bg_thd");
#endif #endif
if (opt_percpu_arena != percpu_arena_disabled) { if (opt_percpu_arena != percpu_arena_disabled) {
set_current_thread_affinity((int)thread_ind); set_current_thread_affinity((int)thread_ind);
...@@ -534,9 +534,8 @@ background_thread_init(tsd_t *tsd, background_thread_info_t *info) { ...@@ -534,9 +534,8 @@ background_thread_init(tsd_t *tsd, background_thread_info_t *info) {
n_background_threads++; n_background_threads++;
} }
/* Create a new background thread if needed. */ static bool
bool background_thread_create_locked(tsd_t *tsd, unsigned arena_ind) {
background_thread_create(tsd_t *tsd, unsigned arena_ind) {
assert(have_background_thread); assert(have_background_thread);
malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock); malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
...@@ -589,6 +588,19 @@ background_thread_create(tsd_t *tsd, unsigned arena_ind) { ...@@ -589,6 +588,19 @@ background_thread_create(tsd_t *tsd, unsigned arena_ind) {
return false; return false;
} }
/* Create a new background thread if needed. */
bool
background_thread_create(tsd_t *tsd, unsigned arena_ind) {
assert(have_background_thread);
bool ret;
malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
ret = background_thread_create_locked(tsd, arena_ind);
malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
return ret;
}
bool bool
background_threads_enable(tsd_t *tsd) { background_threads_enable(tsd_t *tsd) {
assert(n_background_threads == 0); assert(n_background_threads == 0);
...@@ -622,7 +634,7 @@ background_threads_enable(tsd_t *tsd) { ...@@ -622,7 +634,7 @@ background_threads_enable(tsd_t *tsd) {
} }
} }
return background_thread_create(tsd, 0); return background_thread_create_locked(tsd, 0);
} }
bool bool
...@@ -813,21 +825,34 @@ background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) { ...@@ -813,21 +825,34 @@ background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) {
#undef BILLION #undef BILLION
#undef BACKGROUND_THREAD_MIN_INTERVAL_NS #undef BACKGROUND_THREAD_MIN_INTERVAL_NS
#ifdef JEMALLOC_HAVE_DLSYM
#include <dlfcn.h>
#endif
static bool static bool
pthread_create_fptr_init(void) { pthread_create_fptr_init(void) {
if (pthread_create_fptr != NULL) { if (pthread_create_fptr != NULL) {
return false; return false;
} }
/*
* Try the next symbol first, because 1) when use lazy_lock we have a
* wrapper for pthread_create; and 2) application may define its own
* wrapper as well (and can call malloc within the wrapper).
*/
#ifdef JEMALLOC_HAVE_DLSYM
pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create"); pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create");
#else
pthread_create_fptr = NULL;
#endif
if (pthread_create_fptr == NULL) { if (pthread_create_fptr == NULL) {
can_enable_background_thread = false; if (config_lazy_lock) {
if (config_lazy_lock || opt_background_thread) {
malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, " malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, "
"\"pthread_create\")\n"); "\"pthread_create\")\n");
abort(); abort();
} else {
/* Fall back to the default symbol. */
pthread_create_fptr = pthread_create;
} }
} else {
can_enable_background_thread = true;
} }
return false; return false;
...@@ -872,9 +897,8 @@ background_thread_boot1(tsdn_t *tsdn) { ...@@ -872,9 +897,8 @@ background_thread_boot1(tsdn_t *tsdn) {
assert(have_background_thread); assert(have_background_thread);
assert(narenas_total_get() > 0); assert(narenas_total_get() > 0);
if (opt_max_background_threads == MAX_BACKGROUND_THREAD_LIMIT && if (opt_max_background_threads > MAX_BACKGROUND_THREAD_LIMIT) {
ncpus < MAX_BACKGROUND_THREAD_LIMIT) { opt_max_background_threads = DEFAULT_NUM_BACKGROUND_THREAD;
opt_max_background_threads = ncpus;
} }
max_background_threads = opt_max_background_threads; max_background_threads = opt_max_background_threads;
......
...@@ -262,8 +262,8 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks, ...@@ -262,8 +262,8 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks,
*/ */
size_t min_block_size = HUGEPAGE_CEILING(sz_psz2u(header_size + gap_size size_t min_block_size = HUGEPAGE_CEILING(sz_psz2u(header_size + gap_size
+ usize)); + usize));
pszind_t pind_next = (*pind_last + 1 < NPSIZES) ? *pind_last + 1 : pszind_t pind_next = (*pind_last + 1 < sz_psz2ind(SC_LARGE_MAXCLASS)) ?
*pind_last; *pind_last + 1 : *pind_last;
size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next)); size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next));
size_t block_size = (min_block_size > next_block_size) ? min_block_size size_t block_size = (min_block_size > next_block_size) ? min_block_size
: next_block_size; : next_block_size;
...@@ -372,7 +372,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { ...@@ -372,7 +372,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
base->extent_sn_next = extent_sn_next; base->extent_sn_next = extent_sn_next;
base->blocks = block; base->blocks = block;
base->auto_thp_switched = false; base->auto_thp_switched = false;
for (szind_t i = 0; i < NSIZES; i++) { for (szind_t i = 0; i < SC_NSIZES; i++) {
extent_heap_new(&base->avail[i]); extent_heap_new(&base->avail[i]);
} }
if (config_stats) { if (config_stats) {
...@@ -426,7 +426,7 @@ base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment, ...@@ -426,7 +426,7 @@ base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment,
extent_t *extent = NULL; extent_t *extent = NULL;
malloc_mutex_lock(tsdn, &base->mtx); malloc_mutex_lock(tsdn, &base->mtx);
for (szind_t i = sz_size2index(asize); i < NSIZES; i++) { for (szind_t i = sz_size2index(asize); i < SC_NSIZES; i++) {
extent = extent_heap_remove_first(&base->avail[i]); extent = extent_heap_remove_first(&base->avail[i]);
if (extent != NULL) { if (extent != NULL) {
/* Use existing space. */ /* Use existing space. */
......
#include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/bin.h" #include "jemalloc/internal/bin.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/witness.h" #include "jemalloc/internal/witness.h"
const bin_info_t bin_infos[NBINS] = { bin_info_t bin_infos[SC_NBINS];
#define BIN_INFO_bin_yes(reg_size, slab_size, nregs) \
{reg_size, slab_size, nregs, BITMAP_INFO_INITIALIZER(nregs)}, static void
#define BIN_INFO_bin_no(reg_size, slab_size, nregs) bin_infos_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \ bin_info_t bin_infos[SC_NBINS]) {
lg_delta_lookup) \ for (unsigned i = 0; i < SC_NBINS; i++) {
BIN_INFO_bin_##bin((1U<<lg_grp) + (ndelta<<lg_delta), \ bin_info_t *bin_info = &bin_infos[i];
(pgs << LG_PAGE), (pgs << LG_PAGE) / ((1U<<lg_grp) + \ sc_t *sc = &sc_data->sc[i];
(ndelta<<lg_delta))) bin_info->reg_size = ((size_t)1U << sc->lg_base)
SIZE_CLASSES + ((size_t)sc->ndelta << sc->lg_delta);
#undef BIN_INFO_bin_yes bin_info->slab_size = (sc->pgs << LG_PAGE);
#undef BIN_INFO_bin_no bin_info->nregs =
#undef SC (uint32_t)(bin_info->slab_size / bin_info->reg_size);
}; bin_info->n_shards = bin_shard_sizes[i];
bitmap_info_t bitmap_info = BITMAP_INFO_INITIALIZER(
bin_info->nregs);
bin_info->bitmap_info = bitmap_info;
}
}
bool
bin_update_shard_size(unsigned bin_shard_sizes[SC_NBINS], size_t start_size,
size_t end_size, size_t nshards) {
if (nshards > BIN_SHARDS_MAX || nshards == 0) {
return true;
}
if (start_size > SC_SMALL_MAXCLASS) {
return false;
}
if (end_size > SC_SMALL_MAXCLASS) {
end_size = SC_SMALL_MAXCLASS;
}
/* Compute the index since this may happen before sz init. */
szind_t ind1 = sz_size2index_compute(start_size);
szind_t ind2 = sz_size2index_compute(end_size);
for (unsigned i = ind1; i <= ind2; i++) {
bin_shard_sizes[i] = (unsigned)nshards;
}
return false;
}
void
bin_shard_sizes_boot(unsigned bin_shard_sizes[SC_NBINS]) {
/* Load the default number of shards. */
for (unsigned i = 0; i < SC_NBINS; i++) {
bin_shard_sizes[i] = N_BIN_SHARDS_DEFAULT;
}
}
void
bin_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) {
assert(sc_data->initialized);
bin_infos_init(sc_data, bin_shard_sizes, bin_infos);
}
bool bool
bin_init(bin_t *bin) { bin_init(bin_t *bin) {
......
...@@ -275,7 +275,8 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh) { ...@@ -275,7 +275,8 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh) {
lg_curcells++; lg_curcells++;
usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { if (unlikely(usize == 0
|| usize > SC_LARGE_MAXCLASS)) {
ret = true; ret = true;
goto label_return; goto label_return;
} }
...@@ -320,7 +321,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) { ...@@ -320,7 +321,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) {
lg_prevbuckets = ckh->lg_curbuckets; lg_prevbuckets = ckh->lg_curbuckets;
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1; lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
return; return;
} }
tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL, tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL,
...@@ -396,7 +397,7 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ...@@ -396,7 +397,7 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ckh->keycomp = keycomp; ckh->keycomp = keycomp;
usize = sz_sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE); usize = sz_sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
ret = true; ret = true;
goto label_return; goto label_return;
} }
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
#include "jemalloc/internal/extent_mmap.h" #include "jemalloc/internal/extent_mmap.h"
#include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/nstime.h" #include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/sc.h"
#include "jemalloc/internal/util.h" #include "jemalloc/internal/util.h"
/******************************************************************************/ /******************************************************************************/
...@@ -72,6 +72,7 @@ CTL_PROTO(config_debug) ...@@ -72,6 +72,7 @@ CTL_PROTO(config_debug)
CTL_PROTO(config_fill) CTL_PROTO(config_fill)
CTL_PROTO(config_lazy_lock) CTL_PROTO(config_lazy_lock)
CTL_PROTO(config_malloc_conf) CTL_PROTO(config_malloc_conf)
CTL_PROTO(config_opt_safety_checks)
CTL_PROTO(config_prof) CTL_PROTO(config_prof)
CTL_PROTO(config_prof_libgcc) CTL_PROTO(config_prof_libgcc)
CTL_PROTO(config_prof_libunwind) CTL_PROTO(config_prof_libunwind)
...@@ -80,11 +81,13 @@ CTL_PROTO(config_utrace) ...@@ -80,11 +81,13 @@ CTL_PROTO(config_utrace)
CTL_PROTO(config_xmalloc) CTL_PROTO(config_xmalloc)
CTL_PROTO(opt_abort) CTL_PROTO(opt_abort)
CTL_PROTO(opt_abort_conf) CTL_PROTO(opt_abort_conf)
CTL_PROTO(opt_confirm_conf)
CTL_PROTO(opt_metadata_thp) CTL_PROTO(opt_metadata_thp)
CTL_PROTO(opt_retain) CTL_PROTO(opt_retain)
CTL_PROTO(opt_dss) CTL_PROTO(opt_dss)
CTL_PROTO(opt_narenas) CTL_PROTO(opt_narenas)
CTL_PROTO(opt_percpu_arena) CTL_PROTO(opt_percpu_arena)
CTL_PROTO(opt_oversize_threshold)
CTL_PROTO(opt_background_thread) CTL_PROTO(opt_background_thread)
CTL_PROTO(opt_max_background_threads) CTL_PROTO(opt_max_background_threads)
CTL_PROTO(opt_dirty_decay_ms) CTL_PROTO(opt_dirty_decay_ms)
...@@ -126,6 +129,7 @@ INDEX_PROTO(arena_i) ...@@ -126,6 +129,7 @@ INDEX_PROTO(arena_i)
CTL_PROTO(arenas_bin_i_size) CTL_PROTO(arenas_bin_i_size)
CTL_PROTO(arenas_bin_i_nregs) CTL_PROTO(arenas_bin_i_nregs)
CTL_PROTO(arenas_bin_i_slab_size) CTL_PROTO(arenas_bin_i_slab_size)
CTL_PROTO(arenas_bin_i_nshards)
INDEX_PROTO(arenas_bin_i) INDEX_PROTO(arenas_bin_i)
CTL_PROTO(arenas_lextent_i_size) CTL_PROTO(arenas_lextent_i_size)
INDEX_PROTO(arenas_lextent_i) INDEX_PROTO(arenas_lextent_i)
...@@ -147,14 +151,20 @@ CTL_PROTO(prof_gdump) ...@@ -147,14 +151,20 @@ CTL_PROTO(prof_gdump)
CTL_PROTO(prof_reset) CTL_PROTO(prof_reset)
CTL_PROTO(prof_interval) CTL_PROTO(prof_interval)
CTL_PROTO(lg_prof_sample) CTL_PROTO(lg_prof_sample)
CTL_PROTO(prof_log_start)
CTL_PROTO(prof_log_stop)
CTL_PROTO(stats_arenas_i_small_allocated) CTL_PROTO(stats_arenas_i_small_allocated)
CTL_PROTO(stats_arenas_i_small_nmalloc) CTL_PROTO(stats_arenas_i_small_nmalloc)
CTL_PROTO(stats_arenas_i_small_ndalloc) CTL_PROTO(stats_arenas_i_small_ndalloc)
CTL_PROTO(stats_arenas_i_small_nrequests) CTL_PROTO(stats_arenas_i_small_nrequests)
CTL_PROTO(stats_arenas_i_small_nfills)
CTL_PROTO(stats_arenas_i_small_nflushes)
CTL_PROTO(stats_arenas_i_large_allocated) CTL_PROTO(stats_arenas_i_large_allocated)
CTL_PROTO(stats_arenas_i_large_nmalloc) CTL_PROTO(stats_arenas_i_large_nmalloc)
CTL_PROTO(stats_arenas_i_large_ndalloc) CTL_PROTO(stats_arenas_i_large_ndalloc)
CTL_PROTO(stats_arenas_i_large_nrequests) CTL_PROTO(stats_arenas_i_large_nrequests)
CTL_PROTO(stats_arenas_i_large_nfills)
CTL_PROTO(stats_arenas_i_large_nflushes)
CTL_PROTO(stats_arenas_i_bins_j_nmalloc) CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
CTL_PROTO(stats_arenas_i_bins_j_ndalloc) CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
CTL_PROTO(stats_arenas_i_bins_j_nrequests) CTL_PROTO(stats_arenas_i_bins_j_nrequests)
...@@ -164,12 +174,20 @@ CTL_PROTO(stats_arenas_i_bins_j_nflushes) ...@@ -164,12 +174,20 @@ CTL_PROTO(stats_arenas_i_bins_j_nflushes)
CTL_PROTO(stats_arenas_i_bins_j_nslabs) CTL_PROTO(stats_arenas_i_bins_j_nslabs)
CTL_PROTO(stats_arenas_i_bins_j_nreslabs) CTL_PROTO(stats_arenas_i_bins_j_nreslabs)
CTL_PROTO(stats_arenas_i_bins_j_curslabs) CTL_PROTO(stats_arenas_i_bins_j_curslabs)
CTL_PROTO(stats_arenas_i_bins_j_nonfull_slabs)
INDEX_PROTO(stats_arenas_i_bins_j) INDEX_PROTO(stats_arenas_i_bins_j)
CTL_PROTO(stats_arenas_i_lextents_j_nmalloc) CTL_PROTO(stats_arenas_i_lextents_j_nmalloc)
CTL_PROTO(stats_arenas_i_lextents_j_ndalloc) CTL_PROTO(stats_arenas_i_lextents_j_ndalloc)
CTL_PROTO(stats_arenas_i_lextents_j_nrequests) CTL_PROTO(stats_arenas_i_lextents_j_nrequests)
CTL_PROTO(stats_arenas_i_lextents_j_curlextents) CTL_PROTO(stats_arenas_i_lextents_j_curlextents)
INDEX_PROTO(stats_arenas_i_lextents_j) INDEX_PROTO(stats_arenas_i_lextents_j)
CTL_PROTO(stats_arenas_i_extents_j_ndirty)
CTL_PROTO(stats_arenas_i_extents_j_nmuzzy)
CTL_PROTO(stats_arenas_i_extents_j_nretained)
CTL_PROTO(stats_arenas_i_extents_j_dirty_bytes)
CTL_PROTO(stats_arenas_i_extents_j_muzzy_bytes)
CTL_PROTO(stats_arenas_i_extents_j_retained_bytes)
INDEX_PROTO(stats_arenas_i_extents_j)
CTL_PROTO(stats_arenas_i_nthreads) CTL_PROTO(stats_arenas_i_nthreads)
CTL_PROTO(stats_arenas_i_uptime) CTL_PROTO(stats_arenas_i_uptime)
CTL_PROTO(stats_arenas_i_dss) CTL_PROTO(stats_arenas_i_dss)
...@@ -180,6 +198,7 @@ CTL_PROTO(stats_arenas_i_pdirty) ...@@ -180,6 +198,7 @@ CTL_PROTO(stats_arenas_i_pdirty)
CTL_PROTO(stats_arenas_i_pmuzzy) CTL_PROTO(stats_arenas_i_pmuzzy)
CTL_PROTO(stats_arenas_i_mapped) CTL_PROTO(stats_arenas_i_mapped)
CTL_PROTO(stats_arenas_i_retained) CTL_PROTO(stats_arenas_i_retained)
CTL_PROTO(stats_arenas_i_extent_avail)
CTL_PROTO(stats_arenas_i_dirty_npurge) CTL_PROTO(stats_arenas_i_dirty_npurge)
CTL_PROTO(stats_arenas_i_dirty_nmadvise) CTL_PROTO(stats_arenas_i_dirty_nmadvise)
CTL_PROTO(stats_arenas_i_dirty_purged) CTL_PROTO(stats_arenas_i_dirty_purged)
...@@ -191,6 +210,7 @@ CTL_PROTO(stats_arenas_i_internal) ...@@ -191,6 +210,7 @@ CTL_PROTO(stats_arenas_i_internal)
CTL_PROTO(stats_arenas_i_metadata_thp) CTL_PROTO(stats_arenas_i_metadata_thp)
CTL_PROTO(stats_arenas_i_tcache_bytes) CTL_PROTO(stats_arenas_i_tcache_bytes)
CTL_PROTO(stats_arenas_i_resident) CTL_PROTO(stats_arenas_i_resident)
CTL_PROTO(stats_arenas_i_abandoned_vm)
INDEX_PROTO(stats_arenas_i) INDEX_PROTO(stats_arenas_i)
CTL_PROTO(stats_allocated) CTL_PROTO(stats_allocated)
CTL_PROTO(stats_active) CTL_PROTO(stats_active)
...@@ -202,6 +222,12 @@ CTL_PROTO(stats_metadata_thp) ...@@ -202,6 +222,12 @@ CTL_PROTO(stats_metadata_thp)
CTL_PROTO(stats_resident) CTL_PROTO(stats_resident)
CTL_PROTO(stats_mapped) CTL_PROTO(stats_mapped)
CTL_PROTO(stats_retained) CTL_PROTO(stats_retained)
CTL_PROTO(experimental_hooks_install)
CTL_PROTO(experimental_hooks_remove)
CTL_PROTO(experimental_utilization_query)
CTL_PROTO(experimental_utilization_batch_query)
CTL_PROTO(experimental_arenas_i_pactivep)
INDEX_PROTO(experimental_arenas_i)
#define MUTEX_STATS_CTL_PROTO_GEN(n) \ #define MUTEX_STATS_CTL_PROTO_GEN(n) \
CTL_PROTO(stats_##n##_num_ops) \ CTL_PROTO(stats_##n##_num_ops) \
...@@ -270,6 +296,7 @@ static const ctl_named_node_t config_node[] = { ...@@ -270,6 +296,7 @@ static const ctl_named_node_t config_node[] = {
{NAME("fill"), CTL(config_fill)}, {NAME("fill"), CTL(config_fill)},
{NAME("lazy_lock"), CTL(config_lazy_lock)}, {NAME("lazy_lock"), CTL(config_lazy_lock)},
{NAME("malloc_conf"), CTL(config_malloc_conf)}, {NAME("malloc_conf"), CTL(config_malloc_conf)},
{NAME("opt_safety_checks"), CTL(config_opt_safety_checks)},
{NAME("prof"), CTL(config_prof)}, {NAME("prof"), CTL(config_prof)},
{NAME("prof_libgcc"), CTL(config_prof_libgcc)}, {NAME("prof_libgcc"), CTL(config_prof_libgcc)},
{NAME("prof_libunwind"), CTL(config_prof_libunwind)}, {NAME("prof_libunwind"), CTL(config_prof_libunwind)},
...@@ -281,11 +308,13 @@ static const ctl_named_node_t config_node[] = { ...@@ -281,11 +308,13 @@ static const ctl_named_node_t config_node[] = {
static const ctl_named_node_t opt_node[] = { static const ctl_named_node_t opt_node[] = {
{NAME("abort"), CTL(opt_abort)}, {NAME("abort"), CTL(opt_abort)},
{NAME("abort_conf"), CTL(opt_abort_conf)}, {NAME("abort_conf"), CTL(opt_abort_conf)},
{NAME("confirm_conf"), CTL(opt_confirm_conf)},
{NAME("metadata_thp"), CTL(opt_metadata_thp)}, {NAME("metadata_thp"), CTL(opt_metadata_thp)},
{NAME("retain"), CTL(opt_retain)}, {NAME("retain"), CTL(opt_retain)},
{NAME("dss"), CTL(opt_dss)}, {NAME("dss"), CTL(opt_dss)},
{NAME("narenas"), CTL(opt_narenas)}, {NAME("narenas"), CTL(opt_narenas)},
{NAME("percpu_arena"), CTL(opt_percpu_arena)}, {NAME("percpu_arena"), CTL(opt_percpu_arena)},
{NAME("oversize_threshold"), CTL(opt_oversize_threshold)},
{NAME("background_thread"), CTL(opt_background_thread)}, {NAME("background_thread"), CTL(opt_background_thread)},
{NAME("max_background_threads"), CTL(opt_max_background_threads)}, {NAME("max_background_threads"), CTL(opt_max_background_threads)},
{NAME("dirty_decay_ms"), CTL(opt_dirty_decay_ms)}, {NAME("dirty_decay_ms"), CTL(opt_dirty_decay_ms)},
...@@ -341,7 +370,8 @@ static const ctl_indexed_node_t arena_node[] = { ...@@ -341,7 +370,8 @@ static const ctl_indexed_node_t arena_node[] = {
static const ctl_named_node_t arenas_bin_i_node[] = { static const ctl_named_node_t arenas_bin_i_node[] = {
{NAME("size"), CTL(arenas_bin_i_size)}, {NAME("size"), CTL(arenas_bin_i_size)},
{NAME("nregs"), CTL(arenas_bin_i_nregs)}, {NAME("nregs"), CTL(arenas_bin_i_nregs)},
{NAME("slab_size"), CTL(arenas_bin_i_slab_size)} {NAME("slab_size"), CTL(arenas_bin_i_slab_size)},
{NAME("nshards"), CTL(arenas_bin_i_nshards)}
}; };
static const ctl_named_node_t super_arenas_bin_i_node[] = { static const ctl_named_node_t super_arenas_bin_i_node[] = {
{NAME(""), CHILD(named, arenas_bin_i)} {NAME(""), CHILD(named, arenas_bin_i)}
...@@ -385,21 +415,26 @@ static const ctl_named_node_t prof_node[] = { ...@@ -385,21 +415,26 @@ static const ctl_named_node_t prof_node[] = {
{NAME("gdump"), CTL(prof_gdump)}, {NAME("gdump"), CTL(prof_gdump)},
{NAME("reset"), CTL(prof_reset)}, {NAME("reset"), CTL(prof_reset)},
{NAME("interval"), CTL(prof_interval)}, {NAME("interval"), CTL(prof_interval)},
{NAME("lg_sample"), CTL(lg_prof_sample)} {NAME("lg_sample"), CTL(lg_prof_sample)},
{NAME("log_start"), CTL(prof_log_start)},
{NAME("log_stop"), CTL(prof_log_stop)}
}; };
static const ctl_named_node_t stats_arenas_i_small_node[] = { static const ctl_named_node_t stats_arenas_i_small_node[] = {
{NAME("allocated"), CTL(stats_arenas_i_small_allocated)}, {NAME("allocated"), CTL(stats_arenas_i_small_allocated)},
{NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)}, {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)},
{NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)}, {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)},
{NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)} {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)},
{NAME("nfills"), CTL(stats_arenas_i_small_nfills)},
{NAME("nflushes"), CTL(stats_arenas_i_small_nflushes)}
}; };
static const ctl_named_node_t stats_arenas_i_large_node[] = { static const ctl_named_node_t stats_arenas_i_large_node[] = {
{NAME("allocated"), CTL(stats_arenas_i_large_allocated)}, {NAME("allocated"), CTL(stats_arenas_i_large_allocated)},
{NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)}, {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)},
{NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)}, {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)},
{NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)} {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)},
{NAME("nfills"), CTL(stats_arenas_i_large_nfills)},
{NAME("nflushes"), CTL(stats_arenas_i_large_nflushes)}
}; };
#define MUTEX_PROF_DATA_NODE(prefix) \ #define MUTEX_PROF_DATA_NODE(prefix) \
...@@ -433,6 +468,7 @@ static const ctl_named_node_t stats_arenas_i_bins_j_node[] = { ...@@ -433,6 +468,7 @@ static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
{NAME("nslabs"), CTL(stats_arenas_i_bins_j_nslabs)}, {NAME("nslabs"), CTL(stats_arenas_i_bins_j_nslabs)},
{NAME("nreslabs"), CTL(stats_arenas_i_bins_j_nreslabs)}, {NAME("nreslabs"), CTL(stats_arenas_i_bins_j_nreslabs)},
{NAME("curslabs"), CTL(stats_arenas_i_bins_j_curslabs)}, {NAME("curslabs"), CTL(stats_arenas_i_bins_j_curslabs)},
{NAME("nonfull_slabs"), CTL(stats_arenas_i_bins_j_nonfull_slabs)},
{NAME("mutex"), CHILD(named, stats_arenas_i_bins_j_mutex)} {NAME("mutex"), CHILD(named, stats_arenas_i_bins_j_mutex)}
}; };
...@@ -458,6 +494,23 @@ static const ctl_indexed_node_t stats_arenas_i_lextents_node[] = { ...@@ -458,6 +494,23 @@ static const ctl_indexed_node_t stats_arenas_i_lextents_node[] = {
{INDEX(stats_arenas_i_lextents_j)} {INDEX(stats_arenas_i_lextents_j)}
}; };
static const ctl_named_node_t stats_arenas_i_extents_j_node[] = {
{NAME("ndirty"), CTL(stats_arenas_i_extents_j_ndirty)},
{NAME("nmuzzy"), CTL(stats_arenas_i_extents_j_nmuzzy)},
{NAME("nretained"), CTL(stats_arenas_i_extents_j_nretained)},
{NAME("dirty_bytes"), CTL(stats_arenas_i_extents_j_dirty_bytes)},
{NAME("muzzy_bytes"), CTL(stats_arenas_i_extents_j_muzzy_bytes)},
{NAME("retained_bytes"), CTL(stats_arenas_i_extents_j_retained_bytes)}
};
static const ctl_named_node_t super_stats_arenas_i_extents_j_node[] = {
{NAME(""), CHILD(named, stats_arenas_i_extents_j)}
};
static const ctl_indexed_node_t stats_arenas_i_extents_node[] = {
{INDEX(stats_arenas_i_extents_j)}
};
#define OP(mtx) MUTEX_PROF_DATA_NODE(arenas_i_mutexes_##mtx) #define OP(mtx) MUTEX_PROF_DATA_NODE(arenas_i_mutexes_##mtx)
MUTEX_PROF_ARENA_MUTEXES MUTEX_PROF_ARENA_MUTEXES
#undef OP #undef OP
...@@ -479,6 +532,7 @@ static const ctl_named_node_t stats_arenas_i_node[] = { ...@@ -479,6 +532,7 @@ static const ctl_named_node_t stats_arenas_i_node[] = {
{NAME("pmuzzy"), CTL(stats_arenas_i_pmuzzy)}, {NAME("pmuzzy"), CTL(stats_arenas_i_pmuzzy)},
{NAME("mapped"), CTL(stats_arenas_i_mapped)}, {NAME("mapped"), CTL(stats_arenas_i_mapped)},
{NAME("retained"), CTL(stats_arenas_i_retained)}, {NAME("retained"), CTL(stats_arenas_i_retained)},
{NAME("extent_avail"), CTL(stats_arenas_i_extent_avail)},
{NAME("dirty_npurge"), CTL(stats_arenas_i_dirty_npurge)}, {NAME("dirty_npurge"), CTL(stats_arenas_i_dirty_npurge)},
{NAME("dirty_nmadvise"), CTL(stats_arenas_i_dirty_nmadvise)}, {NAME("dirty_nmadvise"), CTL(stats_arenas_i_dirty_nmadvise)},
{NAME("dirty_purged"), CTL(stats_arenas_i_dirty_purged)}, {NAME("dirty_purged"), CTL(stats_arenas_i_dirty_purged)},
...@@ -490,10 +544,12 @@ static const ctl_named_node_t stats_arenas_i_node[] = { ...@@ -490,10 +544,12 @@ static const ctl_named_node_t stats_arenas_i_node[] = {
{NAME("metadata_thp"), CTL(stats_arenas_i_metadata_thp)}, {NAME("metadata_thp"), CTL(stats_arenas_i_metadata_thp)},
{NAME("tcache_bytes"), CTL(stats_arenas_i_tcache_bytes)}, {NAME("tcache_bytes"), CTL(stats_arenas_i_tcache_bytes)},
{NAME("resident"), CTL(stats_arenas_i_resident)}, {NAME("resident"), CTL(stats_arenas_i_resident)},
{NAME("abandoned_vm"), CTL(stats_arenas_i_abandoned_vm)},
{NAME("small"), CHILD(named, stats_arenas_i_small)}, {NAME("small"), CHILD(named, stats_arenas_i_small)},
{NAME("large"), CHILD(named, stats_arenas_i_large)}, {NAME("large"), CHILD(named, stats_arenas_i_large)},
{NAME("bins"), CHILD(indexed, stats_arenas_i_bins)}, {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
{NAME("lextents"), CHILD(indexed, stats_arenas_i_lextents)}, {NAME("lextents"), CHILD(indexed, stats_arenas_i_lextents)},
{NAME("extents"), CHILD(indexed, stats_arenas_i_extents)},
{NAME("mutexes"), CHILD(named, stats_arenas_i_mutexes)} {NAME("mutexes"), CHILD(named, stats_arenas_i_mutexes)}
}; };
static const ctl_named_node_t super_stats_arenas_i_node[] = { static const ctl_named_node_t super_stats_arenas_i_node[] = {
...@@ -536,6 +592,33 @@ static const ctl_named_node_t stats_node[] = { ...@@ -536,6 +592,33 @@ static const ctl_named_node_t stats_node[] = {
{NAME("arenas"), CHILD(indexed, stats_arenas)} {NAME("arenas"), CHILD(indexed, stats_arenas)}
}; };
static const ctl_named_node_t experimental_hooks_node[] = {
{NAME("install"), CTL(experimental_hooks_install)},
{NAME("remove"), CTL(experimental_hooks_remove)}
};
static const ctl_named_node_t experimental_utilization_node[] = {
{NAME("query"), CTL(experimental_utilization_query)},
{NAME("batch_query"), CTL(experimental_utilization_batch_query)}
};
static const ctl_named_node_t experimental_arenas_i_node[] = {
{NAME("pactivep"), CTL(experimental_arenas_i_pactivep)}
};
static const ctl_named_node_t super_experimental_arenas_i_node[] = {
{NAME(""), CHILD(named, experimental_arenas_i)}
};
static const ctl_indexed_node_t experimental_arenas_node[] = {
{INDEX(experimental_arenas_i)}
};
static const ctl_named_node_t experimental_node[] = {
{NAME("hooks"), CHILD(named, experimental_hooks)},
{NAME("utilization"), CHILD(named, experimental_utilization)},
{NAME("arenas"), CHILD(indexed, experimental_arenas)}
};
static const ctl_named_node_t root_node[] = { static const ctl_named_node_t root_node[] = {
{NAME("version"), CTL(version)}, {NAME("version"), CTL(version)},
{NAME("epoch"), CTL(epoch)}, {NAME("epoch"), CTL(epoch)},
...@@ -548,7 +631,8 @@ static const ctl_named_node_t root_node[] = { ...@@ -548,7 +631,8 @@ static const ctl_named_node_t root_node[] = {
{NAME("arena"), CHILD(indexed, arena)}, {NAME("arena"), CHILD(indexed, arena)},
{NAME("arenas"), CHILD(named, arenas)}, {NAME("arenas"), CHILD(named, arenas)},
{NAME("prof"), CHILD(named, prof)}, {NAME("prof"), CHILD(named, prof)},
{NAME("stats"), CHILD(named, stats)} {NAME("stats"), CHILD(named, stats)},
{NAME("experimental"), CHILD(named, experimental)}
}; };
static const ctl_named_node_t super_root_node[] = { static const ctl_named_node_t super_root_node[] = {
{NAME(""), CHILD(named, root)} {NAME(""), CHILD(named, root)}
...@@ -696,10 +780,14 @@ ctl_arena_clear(ctl_arena_t *ctl_arena) { ...@@ -696,10 +780,14 @@ ctl_arena_clear(ctl_arena_t *ctl_arena) {
ctl_arena->astats->nmalloc_small = 0; ctl_arena->astats->nmalloc_small = 0;
ctl_arena->astats->ndalloc_small = 0; ctl_arena->astats->ndalloc_small = 0;
ctl_arena->astats->nrequests_small = 0; ctl_arena->astats->nrequests_small = 0;
memset(ctl_arena->astats->bstats, 0, NBINS * ctl_arena->astats->nfills_small = 0;
ctl_arena->astats->nflushes_small = 0;
memset(ctl_arena->astats->bstats, 0, SC_NBINS *
sizeof(bin_stats_t)); sizeof(bin_stats_t));
memset(ctl_arena->astats->lstats, 0, (NSIZES - NBINS) * memset(ctl_arena->astats->lstats, 0, (SC_NSIZES - SC_NBINS) *
sizeof(arena_stats_large_t)); sizeof(arena_stats_large_t));
memset(ctl_arena->astats->estats, 0, SC_NPSIZES *
sizeof(arena_stats_extents_t));
} }
} }
...@@ -713,9 +801,9 @@ ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) { ...@@ -713,9 +801,9 @@ ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) {
&ctl_arena->muzzy_decay_ms, &ctl_arena->pactive, &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive,
&ctl_arena->pdirty, &ctl_arena->pmuzzy, &ctl_arena->pdirty, &ctl_arena->pmuzzy,
&ctl_arena->astats->astats, ctl_arena->astats->bstats, &ctl_arena->astats->astats, ctl_arena->astats->bstats,
ctl_arena->astats->lstats); ctl_arena->astats->lstats, ctl_arena->astats->estats);
for (i = 0; i < NBINS; i++) { for (i = 0; i < SC_NBINS; i++) {
ctl_arena->astats->allocated_small += ctl_arena->astats->allocated_small +=
ctl_arena->astats->bstats[i].curregs * ctl_arena->astats->bstats[i].curregs *
sz_index2size(i); sz_index2size(i);
...@@ -725,6 +813,10 @@ ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) { ...@@ -725,6 +813,10 @@ ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) {
ctl_arena->astats->bstats[i].ndalloc; ctl_arena->astats->bstats[i].ndalloc;
ctl_arena->astats->nrequests_small += ctl_arena->astats->nrequests_small +=
ctl_arena->astats->bstats[i].nrequests; ctl_arena->astats->bstats[i].nrequests;
ctl_arena->astats->nfills_small +=
ctl_arena->astats->bstats[i].nfills;
ctl_arena->astats->nflushes_small +=
ctl_arena->astats->bstats[i].nflushes;
} }
} else { } else {
arena_basic_stats_merge(tsdn, arena, &ctl_arena->nthreads, arena_basic_stats_merge(tsdn, arena, &ctl_arena->nthreads,
...@@ -760,6 +852,8 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena, ...@@ -760,6 +852,8 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
&astats->astats.mapped); &astats->astats.mapped);
accum_atomic_zu(&sdstats->astats.retained, accum_atomic_zu(&sdstats->astats.retained,
&astats->astats.retained); &astats->astats.retained);
accum_atomic_zu(&sdstats->astats.extent_avail,
&astats->astats.extent_avail);
} }
ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.npurge, ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.npurge,
...@@ -805,6 +899,8 @@ MUTEX_PROF_ARENA_MUTEXES ...@@ -805,6 +899,8 @@ MUTEX_PROF_ARENA_MUTEXES
sdstats->nmalloc_small += astats->nmalloc_small; sdstats->nmalloc_small += astats->nmalloc_small;
sdstats->ndalloc_small += astats->ndalloc_small; sdstats->ndalloc_small += astats->ndalloc_small;
sdstats->nrequests_small += astats->nrequests_small; sdstats->nrequests_small += astats->nrequests_small;
sdstats->nfills_small += astats->nfills_small;
sdstats->nflushes_small += astats->nflushes_small;
if (!destroyed) { if (!destroyed) {
accum_atomic_zu(&sdstats->astats.allocated_large, accum_atomic_zu(&sdstats->astats.allocated_large,
...@@ -819,6 +915,8 @@ MUTEX_PROF_ARENA_MUTEXES ...@@ -819,6 +915,8 @@ MUTEX_PROF_ARENA_MUTEXES
&astats->astats.ndalloc_large); &astats->astats.ndalloc_large);
ctl_accum_arena_stats_u64(&sdstats->astats.nrequests_large, ctl_accum_arena_stats_u64(&sdstats->astats.nrequests_large,
&astats->astats.nrequests_large); &astats->astats.nrequests_large);
accum_atomic_zu(&sdstats->astats.abandoned_vm,
&astats->astats.abandoned_vm);
accum_atomic_zu(&sdstats->astats.tcache_bytes, accum_atomic_zu(&sdstats->astats.tcache_bytes,
&astats->astats.tcache_bytes); &astats->astats.tcache_bytes);
...@@ -827,7 +925,8 @@ MUTEX_PROF_ARENA_MUTEXES ...@@ -827,7 +925,8 @@ MUTEX_PROF_ARENA_MUTEXES
sdstats->astats.uptime = astats->astats.uptime; sdstats->astats.uptime = astats->astats.uptime;
} }
for (i = 0; i < NBINS; i++) { /* Merge bin stats. */
for (i = 0; i < SC_NBINS; i++) {
sdstats->bstats[i].nmalloc += astats->bstats[i].nmalloc; sdstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
sdstats->bstats[i].ndalloc += astats->bstats[i].ndalloc; sdstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
sdstats->bstats[i].nrequests += sdstats->bstats[i].nrequests +=
...@@ -846,14 +945,18 @@ MUTEX_PROF_ARENA_MUTEXES ...@@ -846,14 +945,18 @@ MUTEX_PROF_ARENA_MUTEXES
if (!destroyed) { if (!destroyed) {
sdstats->bstats[i].curslabs += sdstats->bstats[i].curslabs +=
astats->bstats[i].curslabs; astats->bstats[i].curslabs;
sdstats->bstats[i].nonfull_slabs +=
astats->bstats[i].nonfull_slabs;
} else { } else {
assert(astats->bstats[i].curslabs == 0); assert(astats->bstats[i].curslabs == 0);
assert(astats->bstats[i].nonfull_slabs == 0);
} }
malloc_mutex_prof_merge(&sdstats->bstats[i].mutex_data, malloc_mutex_prof_merge(&sdstats->bstats[i].mutex_data,
&astats->bstats[i].mutex_data); &astats->bstats[i].mutex_data);
} }
for (i = 0; i < NSIZES - NBINS; i++) { /* Merge stats for large allocations. */
for (i = 0; i < SC_NSIZES - SC_NBINS; i++) {
ctl_accum_arena_stats_u64(&sdstats->lstats[i].nmalloc, ctl_accum_arena_stats_u64(&sdstats->lstats[i].nmalloc,
&astats->lstats[i].nmalloc); &astats->lstats[i].nmalloc);
ctl_accum_arena_stats_u64(&sdstats->lstats[i].ndalloc, ctl_accum_arena_stats_u64(&sdstats->lstats[i].ndalloc,
...@@ -867,6 +970,22 @@ MUTEX_PROF_ARENA_MUTEXES ...@@ -867,6 +970,22 @@ MUTEX_PROF_ARENA_MUTEXES
assert(astats->lstats[i].curlextents == 0); assert(astats->lstats[i].curlextents == 0);
} }
} }
/* Merge extents stats. */
for (i = 0; i < SC_NPSIZES; i++) {
accum_atomic_zu(&sdstats->estats[i].ndirty,
&astats->estats[i].ndirty);
accum_atomic_zu(&sdstats->estats[i].nmuzzy,
&astats->estats[i].nmuzzy);
accum_atomic_zu(&sdstats->estats[i].nretained,
&astats->estats[i].nretained);
accum_atomic_zu(&sdstats->estats[i].dirty_bytes,
&astats->estats[i].dirty_bytes);
accum_atomic_zu(&sdstats->estats[i].muzzy_bytes,
&astats->estats[i].muzzy_bytes);
accum_atomic_zu(&sdstats->estats[i].retained_bytes,
&astats->estats[i].retained_bytes);
}
} }
} }
...@@ -1378,8 +1497,8 @@ label_return: \ ...@@ -1378,8 +1497,8 @@ label_return: \
#define CTL_RO_CGEN(c, n, v, t) \ #define CTL_RO_CGEN(c, n, v, t) \
static int \ static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
size_t *oldlenp, void *newp, size_t newlen) { \ void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \ int ret; \
t oldval; \ t oldval; \
\ \
...@@ -1421,8 +1540,8 @@ label_return: \ ...@@ -1421,8 +1540,8 @@ label_return: \
*/ */
#define CTL_RO_NL_CGEN(c, n, v, t) \ #define CTL_RO_NL_CGEN(c, n, v, t) \
static int \ static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
size_t *oldlenp, void *newp, size_t newlen) { \ void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \ int ret; \
t oldval; \ t oldval; \
\ \
...@@ -1440,8 +1559,8 @@ label_return: \ ...@@ -1440,8 +1559,8 @@ label_return: \
#define CTL_RO_NL_GEN(n, v, t) \ #define CTL_RO_NL_GEN(n, v, t) \
static int \ static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
size_t *oldlenp, void *newp, size_t newlen) { \ void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \ int ret; \
t oldval; \ t oldval; \
\ \
...@@ -1475,8 +1594,8 @@ label_return: \ ...@@ -1475,8 +1594,8 @@ label_return: \
#define CTL_RO_CONFIG_GEN(n, t) \ #define CTL_RO_CONFIG_GEN(n, t) \
static int \ static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
size_t *oldlenp, void *newp, size_t newlen) { \ void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \ int ret; \
t oldval; \ t oldval; \
\ \
...@@ -1494,8 +1613,8 @@ label_return: \ ...@@ -1494,8 +1613,8 @@ label_return: \
CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *) CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
static int static int
epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
size_t *oldlenp, void *newp, size_t newlen) { void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret; int ret;
UNUSED uint64_t newval; UNUSED uint64_t newval;
...@@ -1513,8 +1632,9 @@ label_return: ...@@ -1513,8 +1632,9 @@ label_return:
} }
static int static int
background_thread_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, background_thread_ctl(tsd_t *tsd, const size_t *mib,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) { size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen) {
int ret; int ret;
bool oldval; bool oldval;
...@@ -1544,13 +1664,6 @@ background_thread_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, ...@@ -1544,13 +1664,6 @@ background_thread_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
background_thread_enabled_set(tsd_tsdn(tsd), newval); background_thread_enabled_set(tsd_tsdn(tsd), newval);
if (newval) { if (newval) {
if (!can_enable_background_thread) {
malloc_printf("<jemalloc>: Error in dlsym("
"RTLD_NEXT, \"pthread_create\"). Cannot "
"enable background_thread\n");
ret = EFAULT;
goto label_return;
}
if (background_threads_enable(tsd)) { if (background_threads_enable(tsd)) {
ret = EFAULT; ret = EFAULT;
goto label_return; goto label_return;
...@@ -1571,8 +1684,9 @@ label_return: ...@@ -1571,8 +1684,9 @@ label_return:
} }
static int static int
max_background_threads_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, max_background_threads_ctl(tsd_t *tsd, const size_t *mib,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) { size_t miblen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen) {
int ret; int ret;
size_t oldval; size_t oldval;
...@@ -1605,13 +1719,6 @@ max_background_threads_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, ...@@ -1605,13 +1719,6 @@ max_background_threads_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
} }
if (background_thread_enabled()) { if (background_thread_enabled()) {
if (!can_enable_background_thread) {
malloc_printf("<jemalloc>: Error in dlsym("
"RTLD_NEXT, \"pthread_create\"). Cannot "
"enable background_thread\n");
ret = EFAULT;
goto label_return;
}
background_thread_enabled_set(tsd_tsdn(tsd), false); background_thread_enabled_set(tsd_tsdn(tsd), false);
if (background_threads_disable(tsd)) { if (background_threads_disable(tsd)) {
ret = EFAULT; ret = EFAULT;
...@@ -1642,6 +1749,7 @@ CTL_RO_CONFIG_GEN(config_debug, bool) ...@@ -1642,6 +1749,7 @@ CTL_RO_CONFIG_GEN(config_debug, bool)
CTL_RO_CONFIG_GEN(config_fill, bool) CTL_RO_CONFIG_GEN(config_fill, bool)
CTL_RO_CONFIG_GEN(config_lazy_lock, bool) CTL_RO_CONFIG_GEN(config_lazy_lock, bool)
CTL_RO_CONFIG_GEN(config_malloc_conf, const char *) CTL_RO_CONFIG_GEN(config_malloc_conf, const char *)
CTL_RO_CONFIG_GEN(config_opt_safety_checks, bool)
CTL_RO_CONFIG_GEN(config_prof, bool) CTL_RO_CONFIG_GEN(config_prof, bool)
CTL_RO_CONFIG_GEN(config_prof_libgcc, bool) CTL_RO_CONFIG_GEN(config_prof_libgcc, bool)
CTL_RO_CONFIG_GEN(config_prof_libunwind, bool) CTL_RO_CONFIG_GEN(config_prof_libunwind, bool)
...@@ -1653,6 +1761,7 @@ CTL_RO_CONFIG_GEN(config_xmalloc, bool) ...@@ -1653,6 +1761,7 @@ CTL_RO_CONFIG_GEN(config_xmalloc, bool)
CTL_RO_NL_GEN(opt_abort, opt_abort, bool) CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
CTL_RO_NL_GEN(opt_abort_conf, opt_abort_conf, bool) CTL_RO_NL_GEN(opt_abort_conf, opt_abort_conf, bool)
CTL_RO_NL_GEN(opt_confirm_conf, opt_confirm_conf, bool)
CTL_RO_NL_GEN(opt_metadata_thp, metadata_thp_mode_names[opt_metadata_thp], CTL_RO_NL_GEN(opt_metadata_thp, metadata_thp_mode_names[opt_metadata_thp],
const char *) const char *)
CTL_RO_NL_GEN(opt_retain, opt_retain, bool) CTL_RO_NL_GEN(opt_retain, opt_retain, bool)
...@@ -1660,6 +1769,7 @@ CTL_RO_NL_GEN(opt_dss, opt_dss, const char *) ...@@ -1660,6 +1769,7 @@ CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned) CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned)
CTL_RO_NL_GEN(opt_percpu_arena, percpu_arena_mode_names[opt_percpu_arena], CTL_RO_NL_GEN(opt_percpu_arena, percpu_arena_mode_names[opt_percpu_arena],
const char *) const char *)
CTL_RO_NL_GEN(opt_oversize_threshold, opt_oversize_threshold, size_t)
CTL_RO_NL_GEN(opt_background_thread, opt_background_thread, bool) CTL_RO_NL_GEN(opt_background_thread, opt_background_thread, bool)
CTL_RO_NL_GEN(opt_max_background_threads, opt_max_background_threads, size_t) CTL_RO_NL_GEN(opt_max_background_threads, opt_max_background_threads, size_t)
CTL_RO_NL_GEN(opt_dirty_decay_ms, opt_dirty_decay_ms, ssize_t) CTL_RO_NL_GEN(opt_dirty_decay_ms, opt_dirty_decay_ms, ssize_t)
...@@ -1690,8 +1800,8 @@ CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool) ...@@ -1690,8 +1800,8 @@ CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
/******************************************************************************/ /******************************************************************************/
static int static int
thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
size_t *oldlenp, void *newp, size_t newlen) { void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret; int ret;
arena_t *oldarena; arena_t *oldarena;
unsigned newind, oldind; unsigned newind, oldind;
...@@ -1755,8 +1865,9 @@ CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp, ...@@ -1755,8 +1865,9 @@ CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp,
tsd_thread_deallocatedp_get, uint64_t *) tsd_thread_deallocatedp_get, uint64_t *)
static int static int
thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) { size_t miblen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen) {
int ret; int ret;
bool oldval; bool oldval;
...@@ -1776,8 +1887,9 @@ label_return: ...@@ -1776,8 +1887,9 @@ label_return:
} }
static int static int
thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) { size_t miblen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen) {
int ret; int ret;
if (!tcache_available(tsd)) { if (!tcache_available(tsd)) {
...@@ -1796,8 +1908,9 @@ label_return: ...@@ -1796,8 +1908,9 @@ label_return:
} }
static int static int
thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, thread_prof_name_ctl(tsd_t *tsd, const size_t *mib,
size_t *oldlenp, void *newp, size_t newlen) { size_t miblen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen) {
int ret; int ret;
if (!config_prof) { if (!config_prof) {
...@@ -1827,8 +1940,9 @@ label_return: ...@@ -1827,8 +1940,9 @@ label_return:
} }
static int static int
thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, thread_prof_active_ctl(tsd_t *tsd, const size_t *mib,
size_t *oldlenp, void *newp, size_t newlen) { size_t miblen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen) {
int ret; int ret;
bool oldval; bool oldval;
...@@ -1857,8 +1971,8 @@ label_return: ...@@ -1857,8 +1971,8 @@ label_return:
/******************************************************************************/ /******************************************************************************/
static int static int
tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
size_t *oldlenp, void *newp, size_t newlen) { void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret; int ret;
unsigned tcache_ind; unsigned tcache_ind;
...@@ -1875,8 +1989,8 @@ label_return: ...@@ -1875,8 +1989,8 @@ label_return:
} }
static int static int
tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
size_t *oldlenp, void *newp, size_t newlen) { void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret; int ret;
unsigned tcache_ind; unsigned tcache_ind;
...@@ -1895,8 +2009,8 @@ label_return: ...@@ -1895,8 +2009,8 @@ label_return:
} }
static int static int
tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
size_t *oldlenp, void *newp, size_t newlen) { void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret; int ret;
unsigned tcache_ind; unsigned tcache_ind;
...@@ -2044,9 +2158,8 @@ arena_reset_prepare_background_thread(tsd_t *tsd, unsigned arena_ind) { ...@@ -2044,9 +2158,8 @@ arena_reset_prepare_background_thread(tsd_t *tsd, unsigned arena_ind) {
if (have_background_thread) { if (have_background_thread) {
malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock); malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
if (background_thread_enabled()) { if (background_thread_enabled()) {
unsigned ind = arena_ind % ncpus;
background_thread_info_t *info = background_thread_info_t *info =
&background_thread_info[ind]; background_thread_info_get(arena_ind);
assert(info->state == background_thread_started); assert(info->state == background_thread_started);
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
info->state = background_thread_paused; info->state = background_thread_paused;
...@@ -2059,9 +2172,8 @@ static void ...@@ -2059,9 +2172,8 @@ static void
arena_reset_finish_background_thread(tsd_t *tsd, unsigned arena_ind) { arena_reset_finish_background_thread(tsd_t *tsd, unsigned arena_ind) {
if (have_background_thread) { if (have_background_thread) {
if (background_thread_enabled()) { if (background_thread_enabled()) {
unsigned ind = arena_ind % ncpus;
background_thread_info_t *info = background_thread_info_t *info =
&background_thread_info[ind]; background_thread_info_get(arena_ind);
assert(info->state == background_thread_paused); assert(info->state == background_thread_paused);
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
info->state = background_thread_started; info->state = background_thread_started;
...@@ -2217,6 +2329,17 @@ arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen, ...@@ -2217,6 +2329,17 @@ arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
ret = EINVAL; ret = EINVAL;
goto label_return; goto label_return;
} }
if (arena_is_huge(arena_ind) && *(ssize_t *)newp > 0) {
/*
* By default the huge arena purges eagerly. If it is
* set to non-zero decay time afterwards, background
* thread might be needed.
*/
if (background_thread_create(tsd, arena_ind)) {
ret = EFAULT;
goto label_return;
}
}
if (dirty ? arena_dirty_decay_ms_set(tsd_tsdn(tsd), arena, if (dirty ? arena_dirty_decay_ms_set(tsd_tsdn(tsd), arena,
*(ssize_t *)newp) : arena_muzzy_decay_ms_set(tsd_tsdn(tsd), *(ssize_t *)newp) : arena_muzzy_decay_ms_set(tsd_tsdn(tsd),
arena, *(ssize_t *)newp)) { arena, *(ssize_t *)newp)) {
...@@ -2300,8 +2423,9 @@ label_return: ...@@ -2300,8 +2423,9 @@ label_return:
} }
static int static int
arena_i_retain_grow_limit_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, arena_i_retain_grow_limit_ctl(tsd_t *tsd, const size_t *mib,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) { size_t miblen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen) {
int ret; int ret;
unsigned arena_ind; unsigned arena_ind;
arena_t *arena; arena_t *arena;
...@@ -2336,7 +2460,8 @@ label_return: ...@@ -2336,7 +2460,8 @@ label_return:
} }
static const ctl_named_node_t * static const ctl_named_node_t *
arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
size_t i) {
const ctl_named_node_t *ret; const ctl_named_node_t *ret;
malloc_mutex_lock(tsdn, &ctl_mtx); malloc_mutex_lock(tsdn, &ctl_mtx);
...@@ -2361,8 +2486,8 @@ label_return: ...@@ -2361,8 +2486,8 @@ label_return:
/******************************************************************************/ /******************************************************************************/
static int static int
arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
size_t *oldlenp, void *newp, size_t newlen) { void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret; int ret;
unsigned narenas; unsigned narenas;
...@@ -2382,8 +2507,9 @@ label_return: ...@@ -2382,8 +2507,9 @@ label_return:
} }
static int static int
arenas_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen, arenas_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib,
void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) { size_t miblen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen, bool dirty) {
int ret; int ret;
if (oldp != NULL && oldlenp != NULL) { if (oldp != NULL && oldlenp != NULL) {
...@@ -2425,34 +2551,36 @@ arenas_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, ...@@ -2425,34 +2551,36 @@ arenas_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t) CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
CTL_RO_NL_GEN(arenas_page, PAGE, size_t) CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
CTL_RO_NL_GEN(arenas_tcache_max, tcache_maxclass, size_t) CTL_RO_NL_GEN(arenas_tcache_max, tcache_maxclass, size_t)
CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned) CTL_RO_NL_GEN(arenas_nbins, SC_NBINS, unsigned)
CTL_RO_NL_GEN(arenas_nhbins, nhbins, unsigned) CTL_RO_NL_GEN(arenas_nhbins, nhbins, unsigned)
CTL_RO_NL_GEN(arenas_bin_i_size, bin_infos[mib[2]].reg_size, size_t) CTL_RO_NL_GEN(arenas_bin_i_size, bin_infos[mib[2]].reg_size, size_t)
CTL_RO_NL_GEN(arenas_bin_i_nregs, bin_infos[mib[2]].nregs, uint32_t) CTL_RO_NL_GEN(arenas_bin_i_nregs, bin_infos[mib[2]].nregs, uint32_t)
CTL_RO_NL_GEN(arenas_bin_i_slab_size, bin_infos[mib[2]].slab_size, size_t) CTL_RO_NL_GEN(arenas_bin_i_slab_size, bin_infos[mib[2]].slab_size, size_t)
CTL_RO_NL_GEN(arenas_bin_i_nshards, bin_infos[mib[2]].n_shards, uint32_t)
static const ctl_named_node_t * static const ctl_named_node_t *
arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib,
if (i > NBINS) { size_t miblen, size_t i) {
if (i > SC_NBINS) {
return NULL; return NULL;
} }
return super_arenas_bin_i_node; return super_arenas_bin_i_node;
} }
CTL_RO_NL_GEN(arenas_nlextents, NSIZES - NBINS, unsigned) CTL_RO_NL_GEN(arenas_nlextents, SC_NSIZES - SC_NBINS, unsigned)
CTL_RO_NL_GEN(arenas_lextent_i_size, sz_index2size(NBINS+(szind_t)mib[2]), CTL_RO_NL_GEN(arenas_lextent_i_size, sz_index2size(SC_NBINS+(szind_t)mib[2]),
size_t) size_t)
static const ctl_named_node_t * static const ctl_named_node_t *
arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib,
size_t i) { size_t miblen, size_t i) {
if (i > NSIZES - NBINS) { if (i > SC_NSIZES - SC_NBINS) {
return NULL; return NULL;
} }
return super_arenas_lextent_i_node; return super_arenas_lextent_i_node;
} }
static int static int
arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
size_t *oldlenp, void *newp, size_t newlen) { void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret; int ret;
extent_hooks_t *extent_hooks; extent_hooks_t *extent_hooks;
unsigned arena_ind; unsigned arena_ind;
...@@ -2474,8 +2602,9 @@ label_return: ...@@ -2474,8 +2602,9 @@ label_return:
} }
static int static int
arenas_lookup_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, arenas_lookup_ctl(tsd_t *tsd, const size_t *mib,
size_t *oldlenp, void *newp, size_t newlen) { size_t miblen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen) {
int ret; int ret;
unsigned arena_ind; unsigned arena_ind;
void *ptr; void *ptr;
...@@ -2506,8 +2635,9 @@ label_return: ...@@ -2506,8 +2635,9 @@ label_return:
/******************************************************************************/ /******************************************************************************/
static int static int
prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) { size_t miblen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen) {
int ret; int ret;
bool oldval; bool oldval;
...@@ -2533,8 +2663,8 @@ label_return: ...@@ -2533,8 +2663,8 @@ label_return:
} }
static int static int
prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
size_t *oldlenp, void *newp, size_t newlen) { void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret; int ret;
bool oldval; bool oldval;
...@@ -2559,8 +2689,8 @@ label_return: ...@@ -2559,8 +2689,8 @@ label_return:
} }
static int static int
prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
size_t *oldlenp, void *newp, size_t newlen) { void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret; int ret;
const char *filename = NULL; const char *filename = NULL;
...@@ -2582,8 +2712,8 @@ label_return: ...@@ -2582,8 +2712,8 @@ label_return:
} }
static int static int
prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
size_t *oldlenp, void *newp, size_t newlen) { void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret; int ret;
bool oldval; bool oldval;
...@@ -2608,8 +2738,8 @@ label_return: ...@@ -2608,8 +2738,8 @@ label_return:
} }
static int static int
prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
size_t *oldlenp, void *newp, size_t newlen) { void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret; int ret;
size_t lg_sample = lg_prof_sample; size_t lg_sample = lg_prof_sample;
...@@ -2633,6 +2763,44 @@ label_return: ...@@ -2633,6 +2763,44 @@ label_return:
CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t) CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t) CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t)
static int
prof_log_start_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen) {
int ret;
const char *filename = NULL;
if (!config_prof) {
return ENOENT;
}
WRITEONLY();
WRITE(filename, const char *);
if (prof_log_start(tsd_tsdn(tsd), filename)) {
ret = EFAULT;
goto label_return;
}
ret = 0;
label_return:
return ret;
}
static int
prof_log_stop_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen) {
if (!config_prof) {
return ENOENT;
}
if (prof_log_stop(tsd_tsdn(tsd))) {
return EFAULT;
}
return 0;
}
/******************************************************************************/ /******************************************************************************/
CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats->allocated, size_t) CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats->allocated, size_t)
...@@ -2667,6 +2835,10 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_mapped, ...@@ -2667,6 +2835,10 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
CTL_RO_CGEN(config_stats, stats_arenas_i_retained, CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.retained, ATOMIC_RELAXED), atomic_load_zu(&arenas_i(mib[2])->astats->astats.retained, ATOMIC_RELAXED),
size_t) size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_extent_avail,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.extent_avail,
ATOMIC_RELAXED),
size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_npurge, CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_npurge,
ctl_arena_stats_read_u64( ctl_arena_stats_read_u64(
...@@ -2703,6 +2875,9 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_bytes, ...@@ -2703,6 +2875,9 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_bytes,
CTL_RO_CGEN(config_stats, stats_arenas_i_resident, CTL_RO_CGEN(config_stats, stats_arenas_i_resident,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.resident, ATOMIC_RELAXED), atomic_load_zu(&arenas_i(mib[2])->astats->astats.resident, ATOMIC_RELAXED),
size_t) size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_abandoned_vm,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.abandoned_vm,
ATOMIC_RELAXED), size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated, CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
arenas_i(mib[2])->astats->allocated_small, size_t) arenas_i(mib[2])->astats->allocated_small, size_t)
...@@ -2712,6 +2887,10 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc, ...@@ -2712,6 +2887,10 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc,
arenas_i(mib[2])->astats->ndalloc_small, uint64_t) arenas_i(mib[2])->astats->ndalloc_small, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests, CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests,
arenas_i(mib[2])->astats->nrequests_small, uint64_t) arenas_i(mib[2])->astats->nrequests_small, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_nfills,
arenas_i(mib[2])->astats->nfills_small, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_nflushes,
arenas_i(mib[2])->astats->nflushes_small, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated, CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.allocated_large, atomic_load_zu(&arenas_i(mib[2])->astats->astats.allocated_large,
ATOMIC_RELAXED), size_t) ATOMIC_RELAXED), size_t)
...@@ -2721,12 +2900,19 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc, ...@@ -2721,12 +2900,19 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc, CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
ctl_arena_stats_read_u64( ctl_arena_stats_read_u64(
&arenas_i(mib[2])->astats->astats.ndalloc_large), uint64_t) &arenas_i(mib[2])->astats->astats.ndalloc_large), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
ctl_arena_stats_read_u64(
&arenas_i(mib[2])->astats->astats.nrequests_large), uint64_t)
/* /*
* Note: "nmalloc" here instead of "nrequests" in the read. This is intentional. * Note: "nmalloc_large" here instead of "nfills" in the read. This is
* intentional (large has no batch fill).
*/ */
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests, CTL_RO_CGEN(config_stats, stats_arenas_i_large_nfills,
ctl_arena_stats_read_u64( ctl_arena_stats_read_u64(
&arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t) /* Intentional. */ &arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nflushes,
ctl_arena_stats_read_u64(
&arenas_i(mib[2])->astats->astats.nflushes_large), uint64_t)
/* Lock profiling related APIs below. */ /* Lock profiling related APIs below. */
#define RO_MUTEX_CTL_GEN(n, l) \ #define RO_MUTEX_CTL_GEN(n, l) \
...@@ -2765,8 +2951,9 @@ RO_MUTEX_CTL_GEN(arenas_i_bins_j_mutex, ...@@ -2765,8 +2951,9 @@ RO_MUTEX_CTL_GEN(arenas_i_bins_j_mutex,
/* Resets all mutex stats, including global, arena and bin mutexes. */ /* Resets all mutex stats, including global, arena and bin mutexes. */
static int static int
stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) { size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen) {
if (!config_stats) { if (!config_stats) {
return ENOENT; return ENOENT;
} }
...@@ -2806,9 +2993,11 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, ...@@ -2806,9 +2993,11 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
MUTEX_PROF_RESET(arena->tcache_ql_mtx); MUTEX_PROF_RESET(arena->tcache_ql_mtx);
MUTEX_PROF_RESET(arena->base->mtx); MUTEX_PROF_RESET(arena->base->mtx);
for (szind_t i = 0; i < NBINS; i++) { for (szind_t i = 0; i < SC_NBINS; i++) {
bin_t *bin = &arena->bins[i]; for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
MUTEX_PROF_RESET(bin->lock); bin_t *bin = &arena->bins[i].bin_shards[j];
MUTEX_PROF_RESET(bin->lock);
}
} }
} }
#undef MUTEX_PROF_RESET #undef MUTEX_PROF_RESET
...@@ -2833,11 +3022,13 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreslabs, ...@@ -2833,11 +3022,13 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreslabs,
arenas_i(mib[2])->astats->bstats[mib[4]].reslabs, uint64_t) arenas_i(mib[2])->astats->bstats[mib[4]].reslabs, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs, CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs,
arenas_i(mib[2])->astats->bstats[mib[4]].curslabs, size_t) arenas_i(mib[2])->astats->bstats[mib[4]].curslabs, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nonfull_slabs,
arenas_i(mib[2])->astats->bstats[mib[4]].nonfull_slabs, size_t)
static const ctl_named_node_t * static const ctl_named_node_t *
stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib,
size_t j) { size_t miblen, size_t j) {
if (j > NBINS) { if (j > SC_NBINS) {
return NULL; return NULL;
} }
return super_stats_arenas_i_bins_j_node; return super_stats_arenas_i_bins_j_node;
...@@ -2856,22 +3047,65 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents, ...@@ -2856,22 +3047,65 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents,
arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t) arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t)
static const ctl_named_node_t * static const ctl_named_node_t *
stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib,
size_t j) { size_t miblen, size_t j) {
if (j > NSIZES - NBINS) { if (j > SC_NSIZES - SC_NBINS) {
return NULL; return NULL;
} }
return super_stats_arenas_i_lextents_j_node; return super_stats_arenas_i_lextents_j_node;
} }
CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_ndirty,
atomic_load_zu(
&arenas_i(mib[2])->astats->estats[mib[4]].ndirty,
ATOMIC_RELAXED), size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_nmuzzy,
atomic_load_zu(
&arenas_i(mib[2])->astats->estats[mib[4]].nmuzzy,
ATOMIC_RELAXED), size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_nretained,
atomic_load_zu(
&arenas_i(mib[2])->astats->estats[mib[4]].nretained,
ATOMIC_RELAXED), size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_dirty_bytes,
atomic_load_zu(
&arenas_i(mib[2])->astats->estats[mib[4]].dirty_bytes,
ATOMIC_RELAXED), size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_muzzy_bytes,
atomic_load_zu(
&arenas_i(mib[2])->astats->estats[mib[4]].muzzy_bytes,
ATOMIC_RELAXED), size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_retained_bytes,
atomic_load_zu(
&arenas_i(mib[2])->astats->estats[mib[4]].retained_bytes,
ATOMIC_RELAXED), size_t);
static const ctl_named_node_t *
stats_arenas_i_extents_j_index(tsdn_t *tsdn, const size_t *mib,
size_t miblen, size_t j) {
if (j >= SC_NPSIZES) {
return NULL;
}
return super_stats_arenas_i_extents_j_node;
}
static bool
ctl_arenas_i_verify(size_t i) {
size_t a = arenas_i2a_impl(i, true, true);
if (a == UINT_MAX || !ctl_arenas->arenas[a]->initialized) {
return true;
}
return false;
}
static const ctl_named_node_t * static const ctl_named_node_t *
stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib,
size_t miblen, size_t i) {
const ctl_named_node_t *ret; const ctl_named_node_t *ret;
size_t a;
malloc_mutex_lock(tsdn, &ctl_mtx); malloc_mutex_lock(tsdn, &ctl_mtx);
a = arenas_i2a_impl(i, true, true); if (ctl_arenas_i_verify(i)) {
if (a == UINT_MAX || !ctl_arenas->arenas[a]->initialized) {
ret = NULL; ret = NULL;
goto label_return; goto label_return;
} }
...@@ -2881,3 +3115,321 @@ label_return: ...@@ -2881,3 +3115,321 @@ label_return:
malloc_mutex_unlock(tsdn, &ctl_mtx); malloc_mutex_unlock(tsdn, &ctl_mtx);
return ret; return ret;
} }
static int
experimental_hooks_install_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
if (oldp == NULL || oldlenp == NULL|| newp == NULL) {
ret = EINVAL;
goto label_return;
}
/*
* Note: this is a *private* struct. This is an experimental interface;
* forcing the user to know the jemalloc internals well enough to
* extract the ABI hopefully ensures nobody gets too comfortable with
* this API, which can change at a moment's notice.
*/
hooks_t hooks;
WRITE(hooks, hooks_t);
void *handle = hook_install(tsd_tsdn(tsd), &hooks);
if (handle == NULL) {
ret = EAGAIN;
goto label_return;
}
READ(handle, void *);
ret = 0;
label_return:
return ret;
}
static int
experimental_hooks_remove_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
WRITEONLY();
void *handle = NULL;
WRITE(handle, void *);
if (handle == NULL) {
ret = EINVAL;
goto label_return;
}
hook_remove(tsd_tsdn(tsd), handle);
ret = 0;
label_return:
return ret;
}
/*
* Output six memory utilization entries for an input pointer, the first one of
* type (void *) and the remaining five of type size_t, describing the following
* (in the same order):
*
* (a) memory address of the extent a potential reallocation would go into,
* == the five fields below describe about the extent the pointer resides in ==
* (b) number of free regions in the extent,
* (c) number of regions in the extent,
* (d) size of the extent in terms of bytes,
* (e) total number of free regions in the bin the extent belongs to, and
* (f) total number of regions in the bin the extent belongs to.
*
* Note that "(e)" and "(f)" are only available when stats are enabled;
* otherwise their values are undefined.
*
* This API is mainly intended for small class allocations, where extents are
* used as slab.
*
* In case of large class allocations, "(a)" will be NULL, and "(e)" and "(f)"
* will be zero (if stats are enabled; otherwise undefined). The other three
* fields will be properly set though the values are trivial: "(b)" will be 0,
* "(c)" will be 1, and "(d)" will be the usable size.
*
* The input pointer and size are respectively passed in by newp and newlen,
* and the output fields and size are respectively oldp and *oldlenp.
*
* It can be beneficial to define the following macros to make it easier to
* access the output:
*
* #define SLABCUR_READ(out) (*(void **)out)
* #define COUNTS(out) ((size_t *)((void **)out + 1))
* #define NFREE_READ(out) COUNTS(out)[0]
* #define NREGS_READ(out) COUNTS(out)[1]
* #define SIZE_READ(out) COUNTS(out)[2]
* #define BIN_NFREE_READ(out) COUNTS(out)[3]
* #define BIN_NREGS_READ(out) COUNTS(out)[4]
*
* and then write e.g. NFREE_READ(oldp) to fetch the output. See the unit test
* test_query in test/unit/extent_util.c for an example.
*
* For a typical defragmentation workflow making use of this API for
* understanding the fragmentation level, please refer to the comment for
* experimental_utilization_batch_query_ctl.
*
* It's up to the application how to determine the significance of
* fragmentation relying on the outputs returned. Possible choices are:
*
* (a) if extent utilization ratio is below certain threshold,
* (b) if extent memory consumption is above certain threshold,
* (c) if extent utilization ratio is significantly below bin utilization ratio,
* (d) if input pointer deviates a lot from potential reallocation address, or
* (e) some selection/combination of the above.
*
* The caller needs to make sure that the input/output arguments are valid,
* in particular, that the size of the output is correct, i.e.:
*
* *oldlenp = sizeof(void *) + sizeof(size_t) * 5
*
* Otherwise, the function immediately returns EINVAL without touching anything.
*
* In the rare case where there's no associated extent found for the input
* pointer, the function zeros out all output fields and return. Please refer
* to the comment for experimental_utilization_batch_query_ctl to understand the
* motivation from C++.
*/
static int
experimental_utilization_query_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
assert(sizeof(extent_util_stats_verbose_t)
== sizeof(void *) + sizeof(size_t) * 5);
if (oldp == NULL || oldlenp == NULL
|| *oldlenp != sizeof(extent_util_stats_verbose_t)
|| newp == NULL) {
ret = EINVAL;
goto label_return;
}
void *ptr = NULL;
WRITE(ptr, void *);
extent_util_stats_verbose_t *util_stats
= (extent_util_stats_verbose_t *)oldp;
extent_util_stats_verbose_get(tsd_tsdn(tsd), ptr,
&util_stats->nfree, &util_stats->nregs, &util_stats->size,
&util_stats->bin_nfree, &util_stats->bin_nregs,
&util_stats->slabcur_addr);
ret = 0;
label_return:
return ret;
}
/*
* Given an input array of pointers, output three memory utilization entries of
* type size_t for each input pointer about the extent it resides in:
*
* (a) number of free regions in the extent,
* (b) number of regions in the extent, and
* (c) size of the extent in terms of bytes.
*
* This API is mainly intended for small class allocations, where extents are
* used as slab. In case of large class allocations, the outputs are trivial:
* "(a)" will be 0, "(b)" will be 1, and "(c)" will be the usable size.
*
* Note that multiple input pointers may reside on a same extent so the output
* fields may contain duplicates.
*
* The format of the input/output looks like:
*
* input[0]: 1st_pointer_to_query | output[0]: 1st_extent_n_free_regions
* | output[1]: 1st_extent_n_regions
* | output[2]: 1st_extent_size
* input[1]: 2nd_pointer_to_query | output[3]: 2nd_extent_n_free_regions
* | output[4]: 2nd_extent_n_regions
* | output[5]: 2nd_extent_size
* ... | ...
*
* The input array and size are respectively passed in by newp and newlen, and
* the output array and size are respectively oldp and *oldlenp.
*
* It can be beneficial to define the following macros to make it easier to
* access the output:
*
* #define NFREE_READ(out, i) out[(i) * 3]
* #define NREGS_READ(out, i) out[(i) * 3 + 1]
* #define SIZE_READ(out, i) out[(i) * 3 + 2]
*
* and then write e.g. NFREE_READ(oldp, i) to fetch the output. See the unit
* test test_batch in test/unit/extent_util.c for a concrete example.
*
* A typical workflow would be composed of the following steps:
*
* (1) flush tcache: mallctl("thread.tcache.flush", ...)
* (2) initialize input array of pointers to query fragmentation
* (3) allocate output array to hold utilization statistics
* (4) query utilization: mallctl("experimental.utilization.batch_query", ...)
* (5) (optional) decide if it's worthwhile to defragment; otherwise stop here
* (6) disable tcache: mallctl("thread.tcache.enabled", ...)
* (7) defragment allocations with significant fragmentation, e.g.:
* for each allocation {
* if it's fragmented {
* malloc(...);
* memcpy(...);
* free(...);
* }
* }
* (8) enable tcache: mallctl("thread.tcache.enabled", ...)
*
* The application can determine the significance of fragmentation themselves
* relying on the statistics returned, both at the overall level i.e. step "(5)"
* and at individual allocation level i.e. within step "(7)". Possible choices
* are:
*
* (a) whether memory utilization ratio is below certain threshold,
* (b) whether memory consumption is above certain threshold, or
* (c) some combination of the two.
*
* The caller needs to make sure that the input/output arrays are valid and
* their sizes are proper as well as matched, meaning:
*
* (a) newlen = n_pointers * sizeof(const void *)
* (b) *oldlenp = n_pointers * sizeof(size_t) * 3
* (c) n_pointers > 0
*
* Otherwise, the function immediately returns EINVAL without touching anything.
*
* In the rare case where there's no associated extent found for some pointers,
* rather than immediately terminating the computation and raising an error,
* the function simply zeros out the corresponding output fields and continues
* the computation until all input pointers are handled. The motivations of
* such a design are as follows:
*
* (a) The function always either processes nothing or processes everything, and
* never leaves the output half touched and half untouched.
*
* (b) It facilitates usage needs especially common in C++. A vast variety of
* C++ objects are instantiated with multiple dynamic memory allocations. For
* example, std::string and std::vector typically use at least two allocations,
* one for the metadata and one for the actual content. Other types may use
* even more allocations. When inquiring about utilization statistics, the
* caller often wants to examine into all such allocations, especially internal
* one(s), rather than just the topmost one. The issue comes when some
* implementations do certain optimizations to reduce/aggregate some internal
* allocations, e.g. putting short strings directly into the metadata, and such
* decisions are not known to the caller. Therefore, we permit pointers to
* memory usages that may not be returned by previous malloc calls, and we
* provide the caller a convenient way to identify such cases.
*/
static int
experimental_utilization_batch_query_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
assert(sizeof(extent_util_stats_t) == sizeof(size_t) * 3);
const size_t len = newlen / sizeof(const void *);
if (oldp == NULL || oldlenp == NULL || newp == NULL || newlen == 0
|| newlen != len * sizeof(const void *)
|| *oldlenp != len * sizeof(extent_util_stats_t)) {
ret = EINVAL;
goto label_return;
}
void **ptrs = (void **)newp;
extent_util_stats_t *util_stats = (extent_util_stats_t *)oldp;
size_t i;
for (i = 0; i < len; ++i) {
extent_util_stats_get(tsd_tsdn(tsd), ptrs[i],
&util_stats[i].nfree, &util_stats[i].nregs,
&util_stats[i].size);
}
ret = 0;
label_return:
return ret;
}
static const ctl_named_node_t *
experimental_arenas_i_index(tsdn_t *tsdn, const size_t *mib,
size_t miblen, size_t i) {
const ctl_named_node_t *ret;
malloc_mutex_lock(tsdn, &ctl_mtx);
if (ctl_arenas_i_verify(i)) {
ret = NULL;
goto label_return;
}
ret = super_experimental_arenas_i_node;
label_return:
malloc_mutex_unlock(tsdn, &ctl_mtx);
return ret;
}
static int
experimental_arenas_i_pactivep_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
if (!config_stats) {
return ENOENT;
}
if (oldp == NULL || oldlenp == NULL || *oldlenp != sizeof(size_t *)) {
return EINVAL;
}
unsigned arena_ind;
arena_t *arena;
int ret;
size_t *pactivep;
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
READONLY();
MIB_UNSIGNED(arena_ind, 2);
if (arena_ind < narenas_total_get() && (arena =
arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) {
#if defined(JEMALLOC_GCC_ATOMIC_ATOMICS) || \
defined(JEMALLOC_GCC_SYNC_ATOMICS) || defined(_MSC_VER)
/* Expose the underlying counter for fast read. */
pactivep = (size_t *)&(arena->nactive.repr);
READ(pactivep, size_t *);
ret = 0;
#else
ret = EFAULT;
#endif
} else {
ret = EFAULT;
}
label_return:
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return ret;
}
...@@ -20,7 +20,7 @@ mutex_pool_t extent_mutex_pool; ...@@ -20,7 +20,7 @@ mutex_pool_t extent_mutex_pool;
size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT; size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT;
static const bitmap_info_t extents_bitmap_info = static const bitmap_info_t extents_bitmap_info =
BITMAP_INFO_INITIALIZER(NPSIZES+1); BITMAP_INFO_INITIALIZER(SC_NPSIZES+1);
static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr,
size_t size, size_t alignment, bool *zero, bool *commit, size_t size, size_t alignment, bool *zero, bool *commit,
...@@ -50,20 +50,16 @@ static bool extent_purge_forced_default(extent_hooks_t *extent_hooks, ...@@ -50,20 +50,16 @@ static bool extent_purge_forced_default(extent_hooks_t *extent_hooks,
static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena, static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
size_t length, bool growing_retained); size_t length, bool growing_retained);
#ifdef JEMALLOC_MAPS_COALESCE
static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr, static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr,
size_t size, size_t size_a, size_t size_b, bool committed, size_t size, size_t size_a, size_t size_b, bool committed,
unsigned arena_ind); unsigned arena_ind);
#endif
static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena, static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b, szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
bool growing_retained); bool growing_retained);
#ifdef JEMALLOC_MAPS_COALESCE
static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a,
size_t size_a, void *addr_b, size_t size_b, bool committed, size_t size_a, void *addr_b, size_t size_b, bool committed,
unsigned arena_ind); unsigned arena_ind);
#endif
static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena, static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b, extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
bool growing_retained); bool growing_retained);
...@@ -88,11 +84,9 @@ const extent_hooks_t extent_hooks_default = { ...@@ -88,11 +84,9 @@ const extent_hooks_t extent_hooks_default = {
, ,
NULL NULL
#endif #endif
#ifdef JEMALLOC_MAPS_COALESCE
, ,
extent_split_default, extent_split_default,
extent_merge_default extent_merge_default
#endif
}; };
/* Used exclusively for gdump triggering. */ /* Used exclusively for gdump triggering. */
...@@ -119,9 +113,13 @@ static void extent_record(tsdn_t *tsdn, arena_t *arena, ...@@ -119,9 +113,13 @@ static void extent_record(tsdn_t *tsdn, arena_t *arena,
/******************************************************************************/ /******************************************************************************/
ph_gen(UNUSED, extent_avail_, extent_tree_t, extent_t, ph_link, #define ATTR_NONE /* does nothing */
ph_gen(ATTR_NONE, extent_avail_, extent_tree_t, extent_t, ph_link,
extent_esnead_comp) extent_esnead_comp)
#undef ATTR_NONE
typedef enum { typedef enum {
lock_result_success, lock_result_success,
lock_result_failure, lock_result_failure,
...@@ -130,13 +128,16 @@ typedef enum { ...@@ -130,13 +128,16 @@ typedef enum {
static lock_result_t static lock_result_t
extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm, extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm,
extent_t **result) { extent_t **result, bool inactive_only) {
extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree, extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree,
elm, true); elm, true);
if (extent1 == NULL) { /* Slab implies active extents and should be skipped. */
if (extent1 == NULL || (inactive_only && rtree_leaf_elm_slab_read(tsdn,
&extents_rtree, elm, true))) {
return lock_result_no_extent; return lock_result_no_extent;
} }
/* /*
* It's possible that the extent changed out from under us, and with it * It's possible that the extent changed out from under us, and with it
* the leaf->extent mapping. We have to recheck while holding the lock. * the leaf->extent mapping. We have to recheck while holding the lock.
...@@ -159,7 +160,8 @@ extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm, ...@@ -159,7 +160,8 @@ extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm,
* address, and NULL otherwise. * address, and NULL otherwise.
*/ */
static extent_t * static extent_t *
extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr) { extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr,
bool inactive_only) {
extent_t *ret = NULL; extent_t *ret = NULL;
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree,
rtree_ctx, (uintptr_t)addr, false, false); rtree_ctx, (uintptr_t)addr, false, false);
...@@ -168,7 +170,8 @@ extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr) { ...@@ -168,7 +170,8 @@ extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr) {
} }
lock_result_t lock_result; lock_result_t lock_result;
do { do {
lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret); lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret,
inactive_only);
} while (lock_result == lock_result_failure); } while (lock_result == lock_result_failure);
return ret; return ret;
} }
...@@ -182,6 +185,7 @@ extent_alloc(tsdn_t *tsdn, arena_t *arena) { ...@@ -182,6 +185,7 @@ extent_alloc(tsdn_t *tsdn, arena_t *arena) {
return base_alloc_extent(tsdn, arena->base); return base_alloc_extent(tsdn, arena->base);
} }
extent_avail_remove(&arena->extent_avail, extent); extent_avail_remove(&arena->extent_avail, extent);
atomic_fetch_sub_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED);
malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx); malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
return extent; return extent;
} }
...@@ -190,6 +194,7 @@ void ...@@ -190,6 +194,7 @@ void
extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
malloc_mutex_lock(tsdn, &arena->extent_avail_mtx); malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
extent_avail_insert(&arena->extent_avail, extent); extent_avail_insert(&arena->extent_avail, extent);
atomic_fetch_add_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED);
malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx); malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
} }
...@@ -255,7 +260,7 @@ extent_size_quantize_ceil(size_t size) { ...@@ -255,7 +260,7 @@ extent_size_quantize_ceil(size_t size) {
size_t ret; size_t ret;
assert(size > 0); assert(size > 0);
assert(size - sz_large_pad <= LARGE_MAXCLASS); assert(size - sz_large_pad <= SC_LARGE_MAXCLASS);
assert((size & PAGE_MASK) == 0); assert((size & PAGE_MASK) == 0);
ret = extent_size_quantize_floor(size); ret = extent_size_quantize_floor(size);
...@@ -284,7 +289,7 @@ extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state, ...@@ -284,7 +289,7 @@ extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
malloc_mutex_rank_exclusive)) { malloc_mutex_rank_exclusive)) {
return true; return true;
} }
for (unsigned i = 0; i < NPSIZES+1; i++) { for (unsigned i = 0; i < SC_NPSIZES + 1; i++) {
extent_heap_new(&extents->heaps[i]); extent_heap_new(&extents->heaps[i]);
} }
bitmap_init(extents->bitmap, &extents_bitmap_info, true); bitmap_init(extents->bitmap, &extents_bitmap_info, true);
...@@ -305,6 +310,32 @@ extents_npages_get(extents_t *extents) { ...@@ -305,6 +310,32 @@ extents_npages_get(extents_t *extents) {
return atomic_load_zu(&extents->npages, ATOMIC_RELAXED); return atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
} }
size_t
extents_nextents_get(extents_t *extents, pszind_t pind) {
return atomic_load_zu(&extents->nextents[pind], ATOMIC_RELAXED);
}
size_t
extents_nbytes_get(extents_t *extents, pszind_t pind) {
return atomic_load_zu(&extents->nbytes[pind], ATOMIC_RELAXED);
}
static void
extents_stats_add(extents_t *extent, pszind_t pind, size_t sz) {
size_t cur = atomic_load_zu(&extent->nextents[pind], ATOMIC_RELAXED);
atomic_store_zu(&extent->nextents[pind], cur + 1, ATOMIC_RELAXED);
cur = atomic_load_zu(&extent->nbytes[pind], ATOMIC_RELAXED);
atomic_store_zu(&extent->nbytes[pind], cur + sz, ATOMIC_RELAXED);
}
static void
extents_stats_sub(extents_t *extent, pszind_t pind, size_t sz) {
size_t cur = atomic_load_zu(&extent->nextents[pind], ATOMIC_RELAXED);
atomic_store_zu(&extent->nextents[pind], cur - 1, ATOMIC_RELAXED);
cur = atomic_load_zu(&extent->nbytes[pind], ATOMIC_RELAXED);
atomic_store_zu(&extent->nbytes[pind], cur - sz, ATOMIC_RELAXED);
}
static void static void
extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) { extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
malloc_mutex_assert_owner(tsdn, &extents->mtx); malloc_mutex_assert_owner(tsdn, &extents->mtx);
...@@ -318,6 +349,11 @@ extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) { ...@@ -318,6 +349,11 @@ extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
(size_t)pind); (size_t)pind);
} }
extent_heap_insert(&extents->heaps[pind], extent); extent_heap_insert(&extents->heaps[pind], extent);
if (config_stats) {
extents_stats_add(extents, pind, size);
}
extent_list_append(&extents->lru, extent); extent_list_append(&extents->lru, extent);
size_t npages = size >> LG_PAGE; size_t npages = size >> LG_PAGE;
/* /*
...@@ -340,6 +376,11 @@ extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) { ...@@ -340,6 +376,11 @@ extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
size_t psz = extent_size_quantize_floor(size); size_t psz = extent_size_quantize_floor(size);
pszind_t pind = sz_psz2ind(psz); pszind_t pind = sz_psz2ind(psz);
extent_heap_remove(&extents->heaps[pind], extent); extent_heap_remove(&extents->heaps[pind], extent);
if (config_stats) {
extents_stats_sub(extents, pind, size);
}
if (extent_heap_empty(&extents->heaps[pind])) { if (extent_heap_empty(&extents->heaps[pind])) {
bitmap_set(extents->bitmap, &extents_bitmap_info, bitmap_set(extents->bitmap, &extents_bitmap_info,
(size_t)pind); (size_t)pind);
...@@ -371,7 +412,7 @@ extents_fit_alignment(extents_t *extents, size_t min_size, size_t max_size, ...@@ -371,7 +412,7 @@ extents_fit_alignment(extents_t *extents, size_t min_size, size_t max_size,
&extents_bitmap_info, (size_t)pind); i < pind_max; i = &extents_bitmap_info, (size_t)pind); i < pind_max; i =
(pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
(size_t)i+1)) { (size_t)i+1)) {
assert(i < NPSIZES); assert(i < SC_NPSIZES);
assert(!extent_heap_empty(&extents->heaps[i])); assert(!extent_heap_empty(&extents->heaps[i]));
extent_t *extent = extent_heap_first(&extents->heaps[i]); extent_t *extent = extent_heap_first(&extents->heaps[i]);
uintptr_t base = (uintptr_t)extent_base_get(extent); uintptr_t base = (uintptr_t)extent_base_get(extent);
...@@ -394,30 +435,6 @@ extents_fit_alignment(extents_t *extents, size_t min_size, size_t max_size, ...@@ -394,30 +435,6 @@ extents_fit_alignment(extents_t *extents, size_t min_size, size_t max_size,
return NULL; return NULL;
} }
/* Do any-best-fit extent selection, i.e. select any extent that best fits. */
static extent_t *
extents_best_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
size_t size) {
pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
(size_t)pind);
if (i < NPSIZES+1) {
/*
* In order to reduce fragmentation, avoid reusing and splitting
* large extents for much smaller sizes.
*/
if ((sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) {
return NULL;
}
assert(!extent_heap_empty(&extents->heaps[i]));
extent_t *extent = extent_heap_first(&extents->heaps[i]);
assert(extent_size_get(extent) >= size);
return extent;
}
return NULL;
}
/* /*
* Do first-fit extent selection, i.e. select the oldest/lowest extent that is * Do first-fit extent selection, i.e. select the oldest/lowest extent that is
* large enough. * large enough.
...@@ -428,30 +445,49 @@ extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, ...@@ -428,30 +445,49 @@ extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
extent_t *ret = NULL; extent_t *ret = NULL;
pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size)); pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
if (!maps_coalesce && !opt_retain) {
/*
* No split / merge allowed (Windows w/o retain). Try exact fit
* only.
*/
return extent_heap_empty(&extents->heaps[pind]) ? NULL :
extent_heap_first(&extents->heaps[pind]);
}
for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
&extents_bitmap_info, (size_t)pind); i < NPSIZES+1; i = &extents_bitmap_info, (size_t)pind);
(pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, i < SC_NPSIZES + 1;
i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
(size_t)i+1)) { (size_t)i+1)) {
assert(!extent_heap_empty(&extents->heaps[i])); assert(!extent_heap_empty(&extents->heaps[i]));
extent_t *extent = extent_heap_first(&extents->heaps[i]); extent_t *extent = extent_heap_first(&extents->heaps[i]);
assert(extent_size_get(extent) >= size); assert(extent_size_get(extent) >= size);
/*
* In order to reduce fragmentation, avoid reusing and splitting
* large extents for much smaller sizes.
*
* Only do check for dirty extents (delay_coalesce).
*/
if (extents->delay_coalesce &&
(sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) {
break;
}
if (ret == NULL || extent_snad_comp(extent, ret) < 0) { if (ret == NULL || extent_snad_comp(extent, ret) < 0) {
ret = extent; ret = extent;
} }
if (i == NPSIZES) { if (i == SC_NPSIZES) {
break; break;
} }
assert(i < NPSIZES); assert(i < SC_NPSIZES);
} }
return ret; return ret;
} }
/* /*
* Do {best,first}-fit extent selection, where the selection policy choice is * Do first-fit extent selection, where the selection policy choice is
* based on extents->delay_coalesce. Best-fit selection requires less * based on extents->delay_coalesce.
* searching, but its layout policy is less stable and may cause higher virtual
* memory fragmentation as a side effect.
*/ */
static extent_t * static extent_t *
extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
...@@ -464,8 +500,7 @@ extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, ...@@ -464,8 +500,7 @@ extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
return NULL; return NULL;
} }
extent_t *extent = extents->delay_coalesce ? extent_t *extent =
extents_best_fit_locked(tsdn, arena, extents, max_size) :
extents_first_fit_locked(tsdn, arena, extents, max_size); extents_first_fit_locked(tsdn, arena, extents, max_size);
if (alignment > PAGE && extent == NULL) { if (alignment > PAGE && extent == NULL) {
...@@ -592,16 +627,24 @@ label_return: ...@@ -592,16 +627,24 @@ label_return:
return extent; return extent;
} }
/*
* This can only happen when we fail to allocate a new extent struct (which
* indicates OOM), e.g. when trying to split an existing extent.
*/
static void static void
extents_leak(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extents_abandon_vm(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
extents_t *extents, extent_t *extent, bool growing_retained) { extents_t *extents, extent_t *extent, bool growing_retained) {
size_t sz = extent_size_get(extent);
if (config_stats) {
arena_stats_accum_zu(&arena->stats.abandoned_vm, sz);
}
/* /*
* Leak extent after making sure its pages have already been purged, so * Leak extent after making sure its pages have already been purged, so
* that this is only a virtual memory leak. * that this is only a virtual memory leak.
*/ */
if (extents_state_get(extents) == extent_state_dirty) { if (extents_state_get(extents) == extent_state_dirty) {
if (extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, if (extent_purge_lazy_impl(tsdn, arena, r_extent_hooks,
extent, 0, extent_size_get(extent), growing_retained)) { extent, 0, sz, growing_retained)) {
extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent_purge_forced_impl(tsdn, arena, r_extent_hooks,
extent, 0, extent_size_get(extent), extent, 0, extent_size_get(extent),
growing_retained); growing_retained);
...@@ -748,6 +791,7 @@ extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) { ...@@ -748,6 +791,7 @@ extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) {
if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true, if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true,
&elm_a, &elm_b)) { &elm_a, &elm_b)) {
extent_unlock(tsdn, extent);
return true; return true;
} }
...@@ -817,7 +861,7 @@ extent_deregister_impl(tsdn_t *tsdn, extent_t *extent, bool gdump) { ...@@ -817,7 +861,7 @@ extent_deregister_impl(tsdn_t *tsdn, extent_t *extent, bool gdump) {
extent_lock(tsdn, extent); extent_lock(tsdn, extent);
extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, NSIZES, false); extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, SC_NSIZES, false);
if (extent_slab_get(extent)) { if (extent_slab_get(extent)) {
extent_interior_deregister(tsdn, rtree_ctx, extent); extent_interior_deregister(tsdn, rtree_ctx, extent);
extent_slab_set(extent, false); extent_slab_set(extent, false);
...@@ -874,7 +918,8 @@ extent_recycle_extract(tsdn_t *tsdn, arena_t *arena, ...@@ -874,7 +918,8 @@ extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
extent_hooks_assure_initialized(arena, r_extent_hooks); extent_hooks_assure_initialized(arena, r_extent_hooks);
extent_t *extent; extent_t *extent;
if (new_addr != NULL) { if (new_addr != NULL) {
extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr); extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr,
false);
if (extent != NULL) { if (extent != NULL) {
/* /*
* We might null-out extent to report an error, but we * We might null-out extent to report an error, but we
...@@ -958,7 +1003,7 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena, ...@@ -958,7 +1003,7 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena,
if (leadsize != 0) { if (leadsize != 0) {
*lead = *extent; *lead = *extent;
*extent = extent_split_impl(tsdn, arena, r_extent_hooks, *extent = extent_split_impl(tsdn, arena, r_extent_hooks,
*lead, leadsize, NSIZES, false, esize + trailsize, szind, *lead, leadsize, SC_NSIZES, false, esize + trailsize, szind,
slab, growing_retained); slab, growing_retained);
if (*extent == NULL) { if (*extent == NULL) {
*to_leak = *lead; *to_leak = *lead;
...@@ -970,7 +1015,7 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena, ...@@ -970,7 +1015,7 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena,
/* Split the trail. */ /* Split the trail. */
if (trailsize != 0) { if (trailsize != 0) {
*trail = extent_split_impl(tsdn, arena, r_extent_hooks, *extent, *trail = extent_split_impl(tsdn, arena, r_extent_hooks, *extent,
esize, szind, slab, trailsize, NSIZES, false, esize, szind, slab, trailsize, SC_NSIZES, false,
growing_retained); growing_retained);
if (*trail == NULL) { if (*trail == NULL) {
*to_leak = *extent; *to_leak = *extent;
...@@ -987,7 +1032,7 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena, ...@@ -987,7 +1032,7 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena,
* splitting occurred. * splitting occurred.
*/ */
extent_szind_set(*extent, szind); extent_szind_set(*extent, szind);
if (szind != NSIZES) { if (szind != SC_NSIZES) {
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)extent_addr_get(*extent), szind, slab); (uintptr_t)extent_addr_get(*extent), szind, slab);
if (slab && extent_size_get(*extent) > PAGE) { if (slab && extent_size_get(*extent) > PAGE) {
...@@ -1023,6 +1068,17 @@ extent_recycle_split(tsdn_t *tsdn, arena_t *arena, ...@@ -1023,6 +1068,17 @@ extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
&to_leak, &to_salvage, new_addr, size, pad, alignment, slab, szind, &to_leak, &to_salvage, new_addr, size, pad, alignment, slab, szind,
growing_retained); growing_retained);
if (!maps_coalesce && result != extent_split_interior_ok
&& !opt_retain) {
/*
* Split isn't supported (implies Windows w/o retain). Avoid
* leaking the extents.
*/
assert(to_leak != NULL && lead == NULL && trail == NULL);
extent_deactivate(tsdn, arena, extents, to_leak);
return NULL;
}
if (result == extent_split_interior_ok) { if (result == extent_split_interior_ok) {
if (lead != NULL) { if (lead != NULL) {
extent_deactivate(tsdn, arena, extents, lead); extent_deactivate(tsdn, arena, extents, lead);
...@@ -1043,16 +1099,27 @@ extent_recycle_split(tsdn_t *tsdn, arena_t *arena, ...@@ -1043,16 +1099,27 @@ extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
if (to_leak != NULL) { if (to_leak != NULL) {
void *leak = extent_base_get(to_leak); void *leak = extent_base_get(to_leak);
extent_deregister_no_gdump_sub(tsdn, to_leak); extent_deregister_no_gdump_sub(tsdn, to_leak);
extents_leak(tsdn, arena, r_extent_hooks, extents, extents_abandon_vm(tsdn, arena, r_extent_hooks, extents,
to_leak, growing_retained); to_leak, growing_retained);
assert(extent_lock_from_addr(tsdn, rtree_ctx, leak) assert(extent_lock_from_addr(tsdn, rtree_ctx, leak,
== NULL); false) == NULL);
} }
return NULL; return NULL;
} }
unreachable(); unreachable();
} }
static bool
extent_need_manual_zero(arena_t *arena) {
/*
* Need to manually zero the extent on repopulating if either; 1) non
* default extent hooks installed (in which case the purge semantics may
* change); or 2) transparent huge pages enabled.
*/
return (!arena_has_default_hooks(arena) ||
(opt_thp == thp_mode_always));
}
/* /*
* Tries to satisfy the given allocation request by reusing one of the extents * Tries to satisfy the given allocation request by reusing one of the extents
* in the given extents_t. * in the given extents_t.
...@@ -1092,7 +1159,9 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, ...@@ -1092,7 +1159,9 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
extent, growing_retained); extent, growing_retained);
return NULL; return NULL;
} }
extent_zeroed_set(extent, true); if (!extent_need_manual_zero(arena)) {
extent_zeroed_set(extent, true);
}
} }
if (extent_committed_get(extent)) { if (extent_committed_get(extent)) {
...@@ -1113,14 +1182,16 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, ...@@ -1113,14 +1182,16 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
if (*zero) { if (*zero) {
void *addr = extent_base_get(extent); void *addr = extent_base_get(extent);
size_t size = extent_size_get(extent);
if (!extent_zeroed_get(extent)) { if (!extent_zeroed_get(extent)) {
if (pages_purge_forced(addr, size)) { size_t size = extent_size_get(extent);
if (extent_need_manual_zero(arena) ||
pages_purge_forced(addr, size)) {
memset(addr, 0, size); memset(addr, 0, size);
} }
} else if (config_debug) { } else if (config_debug) {
size_t *p = (size_t *)(uintptr_t)addr; size_t *p = (size_t *)(uintptr_t)addr;
for (size_t i = 0; i < size / sizeof(size_t); i++) { /* Check the first page only. */
for (size_t i = 0; i < PAGE / sizeof(size_t); i++) {
assert(p[i] == 0); assert(p[i] == 0);
} }
} }
...@@ -1191,7 +1262,7 @@ extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size, ...@@ -1191,7 +1262,7 @@ extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
assert(arena != NULL); assert(arena != NULL);
return extent_alloc_default_impl(tsdn, arena, new_addr, size, return extent_alloc_default_impl(tsdn, arena, new_addr, size,
alignment, zero, commit); ALIGNMENT_CEILING(alignment, PAGE), zero, commit);
} }
static void static void
...@@ -1244,11 +1315,11 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ...@@ -1244,11 +1315,11 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip); size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
while (alloc_size < alloc_size_min) { while (alloc_size < alloc_size_min) {
egn_skip++; egn_skip++;
if (arena->extent_grow_next + egn_skip == NPSIZES) { if (arena->extent_grow_next + egn_skip >=
sz_psz2ind(SC_LARGE_MAXCLASS)) {
/* Outside legal range. */ /* Outside legal range. */
goto label_err; goto label_err;
} }
assert(arena->extent_grow_next + egn_skip < NPSIZES);
alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip); alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
} }
...@@ -1271,17 +1342,16 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ...@@ -1271,17 +1342,16 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
extent_hook_post_reentrancy(tsdn); extent_hook_post_reentrancy(tsdn);
} }
extent_init(extent, arena, ptr, alloc_size, false, NSIZES, extent_init(extent, arena, ptr, alloc_size, false, SC_NSIZES,
arena_extent_sn_next(arena), extent_state_active, zeroed, arena_extent_sn_next(arena), extent_state_active, zeroed,
committed, true); committed, true, EXTENT_IS_HEAD);
if (ptr == NULL) { if (ptr == NULL) {
extent_dalloc(tsdn, arena, extent); extent_dalloc(tsdn, arena, extent);
goto label_err; goto label_err;
} }
if (extent_register_no_gdump_add(tsdn, extent)) { if (extent_register_no_gdump_add(tsdn, extent)) {
extents_leak(tsdn, arena, r_extent_hooks, extent_dalloc(tsdn, arena, extent);
&arena->extents_retained, extent, true);
goto label_err; goto label_err;
} }
...@@ -1328,7 +1398,7 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ...@@ -1328,7 +1398,7 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
} }
if (to_leak != NULL) { if (to_leak != NULL) {
extent_deregister_no_gdump_sub(tsdn, to_leak); extent_deregister_no_gdump_sub(tsdn, to_leak);
extents_leak(tsdn, arena, r_extent_hooks, extents_abandon_vm(tsdn, arena, r_extent_hooks,
&arena->extents_retained, to_leak, true); &arena->extents_retained, to_leak, true);
} }
goto label_err; goto label_err;
...@@ -1341,7 +1411,9 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ...@@ -1341,7 +1411,9 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
&arena->extents_retained, extent, true); &arena->extents_retained, extent, true);
goto label_err; goto label_err;
} }
extent_zeroed_set(extent, true); if (!extent_need_manual_zero(arena)) {
extent_zeroed_set(extent, true);
}
} }
/* /*
...@@ -1375,7 +1447,8 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ...@@ -1375,7 +1447,8 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
if (*zero && !extent_zeroed_get(extent)) { if (*zero && !extent_zeroed_get(extent)) {
void *addr = extent_base_get(extent); void *addr = extent_base_get(extent);
size_t size = extent_size_get(extent); size_t size = extent_size_get(extent);
if (pages_purge_forced(addr, size)) { if (extent_need_manual_zero(arena) ||
pages_purge_forced(addr, size)) {
memset(addr, 0, size); memset(addr, 0, size);
} }
} }
...@@ -1425,14 +1498,15 @@ extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena, ...@@ -1425,14 +1498,15 @@ extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
return NULL; return NULL;
} }
void *addr; void *addr;
size_t palignment = ALIGNMENT_CEILING(alignment, PAGE);
if (*r_extent_hooks == &extent_hooks_default) { if (*r_extent_hooks == &extent_hooks_default) {
/* Call directly to propagate tsdn. */ /* Call directly to propagate tsdn. */
addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize, addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize,
alignment, zero, commit); palignment, zero, commit);
} else { } else {
extent_hook_pre_reentrancy(tsdn, arena); extent_hook_pre_reentrancy(tsdn, arena);
addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr, addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr,
esize, alignment, zero, commit, arena_ind_get(arena)); esize, palignment, zero, commit, arena_ind_get(arena));
extent_hook_post_reentrancy(tsdn); extent_hook_post_reentrancy(tsdn);
} }
if (addr == NULL) { if (addr == NULL) {
...@@ -1441,13 +1515,12 @@ extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena, ...@@ -1441,13 +1515,12 @@ extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
} }
extent_init(extent, arena, addr, esize, slab, szind, extent_init(extent, arena, addr, esize, slab, szind,
arena_extent_sn_next(arena), extent_state_active, *zero, *commit, arena_extent_sn_next(arena), extent_state_active, *zero, *commit,
true); true, EXTENT_NOT_HEAD);
if (pad != 0) { if (pad != 0) {
extent_addr_randomize(tsdn, extent, alignment); extent_addr_randomize(tsdn, extent, alignment);
} }
if (extent_register(tsdn, extent)) { if (extent_register(tsdn, extent)) {
extents_leak(tsdn, arena, r_extent_hooks, extent_dalloc(tsdn, arena, extent);
&arena->extents_retained, extent, false);
return NULL; return NULL;
} }
...@@ -1524,9 +1597,15 @@ extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, ...@@ -1524,9 +1597,15 @@ extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
} }
static extent_t * static extent_t *
extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, extent_try_coalesce_impl(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
extent_t *extent, bool *coalesced, bool growing_retained) { extent_t *extent, bool *coalesced, bool growing_retained,
bool inactive_only) {
/*
* We avoid checking / locking inactive neighbors for large size
* classes, since they are eagerly coalesced on deallocation which can
* cause lock contention.
*/
/* /*
* Continue attempting to coalesce until failure, to protect against * Continue attempting to coalesce until failure, to protect against
* races with other threads that are thwarted by this one. * races with other threads that are thwarted by this one.
...@@ -1537,7 +1616,7 @@ extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, ...@@ -1537,7 +1616,7 @@ extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
/* Try to coalesce forward. */ /* Try to coalesce forward. */
extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx, extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx,
extent_past_get(extent)); extent_past_get(extent), inactive_only);
if (next != NULL) { if (next != NULL) {
/* /*
* extents->mtx only protects against races for * extents->mtx only protects against races for
...@@ -1563,7 +1642,7 @@ extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, ...@@ -1563,7 +1642,7 @@ extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
/* Try to coalesce backward. */ /* Try to coalesce backward. */
extent_t *prev = extent_lock_from_addr(tsdn, rtree_ctx, extent_t *prev = extent_lock_from_addr(tsdn, rtree_ctx,
extent_before_get(extent)); extent_before_get(extent), inactive_only);
if (prev != NULL) { if (prev != NULL) {
bool can_coalesce = extent_can_coalesce(arena, extents, bool can_coalesce = extent_can_coalesce(arena, extents,
extent, prev); extent, prev);
...@@ -1589,6 +1668,22 @@ extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, ...@@ -1589,6 +1668,22 @@ extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
return extent; return extent;
} }
static extent_t *
extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
extent_t *extent, bool *coalesced, bool growing_retained) {
return extent_try_coalesce_impl(tsdn, arena, r_extent_hooks, rtree_ctx,
extents, extent, coalesced, growing_retained, false);
}
static extent_t *
extent_try_coalesce_large(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
extent_t *extent, bool *coalesced, bool growing_retained) {
return extent_try_coalesce_impl(tsdn, arena, r_extent_hooks, rtree_ctx,
extents, extent, coalesced, growing_retained, true);
}
/* /*
* Does the metadata management portions of putting an unused extent into the * Does the metadata management portions of putting an unused extent into the
* given extents_t (coalesces, deregisters slab interiors, the heap operations). * given extents_t (coalesces, deregisters slab interiors, the heap operations).
...@@ -1606,7 +1701,7 @@ extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, ...@@ -1606,7 +1701,7 @@ extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
malloc_mutex_lock(tsdn, &extents->mtx); malloc_mutex_lock(tsdn, &extents->mtx);
extent_hooks_assure_initialized(arena, r_extent_hooks); extent_hooks_assure_initialized(arena, r_extent_hooks);
extent_szind_set(extent, NSIZES); extent_szind_set(extent, SC_NSIZES);
if (extent_slab_get(extent)) { if (extent_slab_get(extent)) {
extent_interior_deregister(tsdn, rtree_ctx, extent); extent_interior_deregister(tsdn, rtree_ctx, extent);
extent_slab_set(extent, false); extent_slab_set(extent, false);
...@@ -1618,18 +1713,22 @@ extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, ...@@ -1618,18 +1713,22 @@ extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
if (!extents->delay_coalesce) { if (!extents->delay_coalesce) {
extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, extent = extent_try_coalesce(tsdn, arena, r_extent_hooks,
rtree_ctx, extents, extent, NULL, growing_retained); rtree_ctx, extents, extent, NULL, growing_retained);
} else if (extent_size_get(extent) >= LARGE_MINCLASS) { } else if (extent_size_get(extent) >= SC_LARGE_MINCLASS) {
assert(extents == &arena->extents_dirty);
/* Always coalesce large extents eagerly. */ /* Always coalesce large extents eagerly. */
bool coalesced; bool coalesced;
size_t prev_size;
do { do {
prev_size = extent_size_get(extent);
assert(extent_state_get(extent) == extent_state_active); assert(extent_state_get(extent) == extent_state_active);
extent = extent_try_coalesce(tsdn, arena, extent = extent_try_coalesce_large(tsdn, arena,
r_extent_hooks, rtree_ctx, extents, extent, r_extent_hooks, rtree_ctx, extents, extent,
&coalesced, growing_retained); &coalesced, growing_retained);
} while (coalesced && } while (coalesced);
extent_size_get(extent) >= prev_size + LARGE_MINCLASS); if (extent_size_get(extent) >= oversize_threshold) {
/* Shortcut to purge the oversize extent eagerly. */
malloc_mutex_unlock(tsdn, &extents->mtx);
arena_decay_extent(tsdn, arena, r_extent_hooks, extent);
return;
}
} }
extent_deactivate_locked(tsdn, arena, extents, extent); extent_deactivate_locked(tsdn, arena, extents, extent);
...@@ -1644,13 +1743,18 @@ extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { ...@@ -1644,13 +1743,18 @@ extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
WITNESS_RANK_CORE, 0); WITNESS_RANK_CORE, 0);
if (extent_register(tsdn, extent)) { if (extent_register(tsdn, extent)) {
extents_leak(tsdn, arena, &extent_hooks, extent_dalloc(tsdn, arena, extent);
&arena->extents_retained, extent, false);
return; return;
} }
extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent); extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent);
} }
static bool
extent_may_dalloc(void) {
/* With retain enabled, the default dalloc always fails. */
return !opt_retain;
}
static bool static bool
extent_dalloc_default_impl(void *addr, size_t size) { extent_dalloc_default_impl(void *addr, size_t size) {
if (!have_dss || !extent_in_dss(addr)) { if (!have_dss || !extent_in_dss(addr)) {
...@@ -1706,16 +1810,20 @@ extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, ...@@ -1706,16 +1810,20 @@ extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0); WITNESS_RANK_CORE, 0);
/* /* Avoid calling the default extent_dalloc unless have to. */
* Deregister first to avoid a race with other allocating threads, and if (*r_extent_hooks != &extent_hooks_default || extent_may_dalloc()) {
* reregister if deallocation fails. /*
*/ * Deregister first to avoid a race with other allocating
extent_deregister(tsdn, extent); * threads, and reregister if deallocation fails.
if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks, extent)) { */
return; extent_deregister(tsdn, extent);
if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks,
extent)) {
return;
}
extent_reregister(tsdn, extent);
} }
extent_reregister(tsdn, extent);
if (*r_extent_hooks != &extent_hooks_default) { if (*r_extent_hooks != &extent_hooks_default) {
extent_hook_pre_reentrancy(tsdn, arena); extent_hook_pre_reentrancy(tsdn, arena);
} }
...@@ -1955,13 +2063,20 @@ extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena, ...@@ -1955,13 +2063,20 @@ extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
offset, length, false); offset, length, false);
} }
#ifdef JEMALLOC_MAPS_COALESCE
static bool static bool
extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size, extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
size_t size_a, size_t size_b, bool committed, unsigned arena_ind) { size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
return !maps_coalesce; if (!maps_coalesce) {
/*
* Without retain, only whole regions can be purged (required by
* MEM_RELEASE on Windows) -- therefore disallow splitting. See
* comments in extent_head_no_merge().
*/
return !opt_retain;
}
return false;
} }
#endif
/* /*
* Accepts the extent to split, and the characteristics of each side of the * Accepts the extent to split, and the characteristics of each side of the
...@@ -1993,7 +2108,8 @@ extent_split_impl(tsdn_t *tsdn, arena_t *arena, ...@@ -1993,7 +2108,8 @@ extent_split_impl(tsdn_t *tsdn, arena_t *arena,
extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) + extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
size_a), size_b, slab_b, szind_b, extent_sn_get(extent), size_a), size_b, slab_b, szind_b, extent_sn_get(extent),
extent_state_get(extent), extent_zeroed_get(extent), extent_state_get(extent), extent_zeroed_get(extent),
extent_committed_get(extent), extent_dumpable_get(extent)); extent_committed_get(extent), extent_dumpable_get(extent),
EXTENT_NOT_HEAD);
rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
...@@ -2004,7 +2120,8 @@ extent_split_impl(tsdn_t *tsdn, arena_t *arena, ...@@ -2004,7 +2120,8 @@ extent_split_impl(tsdn_t *tsdn, arena_t *arena,
extent_init(&lead, arena, extent_addr_get(extent), size_a, extent_init(&lead, arena, extent_addr_get(extent), size_a,
slab_a, szind_a, extent_sn_get(extent), slab_a, szind_a, extent_sn_get(extent),
extent_state_get(extent), extent_zeroed_get(extent), extent_state_get(extent), extent_zeroed_get(extent),
extent_committed_get(extent), extent_dumpable_get(extent)); extent_committed_get(extent), extent_dumpable_get(extent),
EXTENT_NOT_HEAD);
extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false, extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false,
true, &lead_elm_a, &lead_elm_b); true, &lead_elm_a, &lead_elm_b);
...@@ -2062,7 +2179,7 @@ extent_split_wrapper(tsdn_t *tsdn, arena_t *arena, ...@@ -2062,7 +2179,7 @@ extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
static bool static bool
extent_merge_default_impl(void *addr_a, void *addr_b) { extent_merge_default_impl(void *addr_a, void *addr_b) {
if (!maps_coalesce) { if (!maps_coalesce && !opt_retain) {
return true; return true;
} }
if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) { if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) {
...@@ -2072,13 +2189,51 @@ extent_merge_default_impl(void *addr_a, void *addr_b) { ...@@ -2072,13 +2189,51 @@ extent_merge_default_impl(void *addr_a, void *addr_b) {
return false; return false;
} }
#ifdef JEMALLOC_MAPS_COALESCE /*
* Returns true if the given extents can't be merged because of their head bit
* settings. Assumes the second extent has the higher address.
*/
static bool
extent_head_no_merge(extent_t *a, extent_t *b) {
assert(extent_base_get(a) < extent_base_get(b));
/*
* When coalesce is not always allowed (Windows), only merge extents
* from the same VirtualAlloc region under opt.retain (in which case
* MEM_DECOMMIT is utilized for purging).
*/
if (maps_coalesce) {
return false;
}
if (!opt_retain) {
return true;
}
/* If b is a head extent, disallow the cross-region merge. */
if (extent_is_head_get(b)) {
/*
* Additionally, sn should not overflow with retain; sanity
* check that different regions have unique sn.
*/
assert(extent_sn_comp(a, b) != 0);
return true;
}
assert(extent_sn_comp(a, b) == 0);
return false;
}
static bool static bool
extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a, extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
void *addr_b, size_t size_b, bool committed, unsigned arena_ind) { void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
if (!maps_coalesce) {
tsdn_t *tsdn = tsdn_fetch();
extent_t *a = iealloc(tsdn, addr_a);
extent_t *b = iealloc(tsdn, addr_b);
if (extent_head_no_merge(a, b)) {
return true;
}
}
return extent_merge_default_impl(addr_a, addr_b); return extent_merge_default_impl(addr_a, addr_b);
} }
#endif
static bool static bool
extent_merge_impl(tsdn_t *tsdn, arena_t *arena, extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
...@@ -2086,10 +2241,11 @@ extent_merge_impl(tsdn_t *tsdn, arena_t *arena, ...@@ -2086,10 +2241,11 @@ extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
bool growing_retained) { bool growing_retained) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0); WITNESS_RANK_CORE, growing_retained ? 1 : 0);
assert(extent_base_get(a) < extent_base_get(b));
extent_hooks_assure_initialized(arena, r_extent_hooks); extent_hooks_assure_initialized(arena, r_extent_hooks);
if ((*r_extent_hooks)->merge == NULL) { if ((*r_extent_hooks)->merge == NULL || extent_head_no_merge(a, b)) {
return true; return true;
} }
...@@ -2128,22 +2284,23 @@ extent_merge_impl(tsdn_t *tsdn, arena_t *arena, ...@@ -2128,22 +2284,23 @@ extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
if (a_elm_b != NULL) { if (a_elm_b != NULL) {
rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL, rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL,
NSIZES, false); SC_NSIZES, false);
} }
if (b_elm_b != NULL) { if (b_elm_b != NULL) {
rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL, rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL,
NSIZES, false); SC_NSIZES, false);
} else { } else {
b_elm_b = b_elm_a; b_elm_b = b_elm_a;
} }
extent_size_set(a, extent_size_get(a) + extent_size_get(b)); extent_size_set(a, extent_size_get(a) + extent_size_get(b));
extent_szind_set(a, NSIZES); extent_szind_set(a, SC_NSIZES);
extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ? extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ?
extent_sn_get(a) : extent_sn_get(b)); extent_sn_get(a) : extent_sn_get(b));
extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b)); extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, NSIZES, false); extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, SC_NSIZES,
false);
extent_unlock2(tsdn, a, b); extent_unlock2(tsdn, a, b);
...@@ -2175,3 +2332,72 @@ extent_boot(void) { ...@@ -2175,3 +2332,72 @@ extent_boot(void) {
return false; return false;
} }
void
extent_util_stats_get(tsdn_t *tsdn, const void *ptr,
size_t *nfree, size_t *nregs, size_t *size) {
assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL);
const extent_t *extent = iealloc(tsdn, ptr);
if (unlikely(extent == NULL)) {
*nfree = *nregs = *size = 0;
return;
}
*size = extent_size_get(extent);
if (!extent_slab_get(extent)) {
*nfree = 0;
*nregs = 1;
} else {
*nfree = extent_nfree_get(extent);
*nregs = bin_infos[extent_szind_get(extent)].nregs;
assert(*nfree <= *nregs);
assert(*nfree * extent_usize_get(extent) <= *size);
}
}
void
extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
size_t *nfree, size_t *nregs, size_t *size,
size_t *bin_nfree, size_t *bin_nregs, void **slabcur_addr) {
assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL
&& bin_nfree != NULL && bin_nregs != NULL && slabcur_addr != NULL);
const extent_t *extent = iealloc(tsdn, ptr);
if (unlikely(extent == NULL)) {
*nfree = *nregs = *size = *bin_nfree = *bin_nregs = 0;
*slabcur_addr = NULL;
return;
}
*size = extent_size_get(extent);
if (!extent_slab_get(extent)) {
*nfree = *bin_nfree = *bin_nregs = 0;
*nregs = 1;
*slabcur_addr = NULL;
return;
}
*nfree = extent_nfree_get(extent);
const szind_t szind = extent_szind_get(extent);
*nregs = bin_infos[szind].nregs;
assert(*nfree <= *nregs);
assert(*nfree * extent_usize_get(extent) <= *size);
const arena_t *arena = extent_arena_get(extent);
assert(arena != NULL);
const unsigned binshard = extent_binshard_get(extent);
bin_t *bin = &arena->bins[szind].bin_shards[binshard];
malloc_mutex_lock(tsdn, &bin->lock);
if (config_stats) {
*bin_nregs = *nregs * bin->stats.curslabs;
assert(*bin_nregs >= bin->stats.curregs);
*bin_nfree = *bin_nregs - bin->stats.curregs;
} else {
*bin_nfree = *bin_nregs = 0;
}
*slabcur_addr = extent_addr_get(bin->slabcur);
assert(*slabcur_addr != NULL);
malloc_mutex_unlock(tsdn, &bin->lock);
}
...@@ -113,7 +113,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, ...@@ -113,7 +113,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
cassert(have_dss); cassert(have_dss);
assert(size > 0); assert(size > 0);
assert(alignment > 0); assert(alignment == ALIGNMENT_CEILING(alignment, PAGE));
/* /*
* sbrk() uses a signed increment argument, so take care not to * sbrk() uses a signed increment argument, so take care not to
...@@ -154,9 +154,10 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, ...@@ -154,9 +154,10 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
(uintptr_t)gap_addr_page; (uintptr_t)gap_addr_page;
if (gap_size_page != 0) { if (gap_size_page != 0) {
extent_init(gap, arena, gap_addr_page, extent_init(gap, arena, gap_addr_page,
gap_size_page, false, NSIZES, gap_size_page, false, SC_NSIZES,
arena_extent_sn_next(arena), arena_extent_sn_next(arena),
extent_state_active, false, true, true); extent_state_active, false, true, true,
EXTENT_NOT_HEAD);
} }
/* /*
* Compute the address just past the end of the desired * Compute the address just past the end of the desired
...@@ -198,9 +199,9 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, ...@@ -198,9 +199,9 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
extent_t extent; extent_t extent;
extent_init(&extent, arena, ret, size, extent_init(&extent, arena, ret, size,
size, false, NSIZES, size, false, SC_NSIZES,
extent_state_active, false, true, extent_state_active, false, true,
true); true, EXTENT_NOT_HEAD);
if (extent_purge_forced_wrapper(tsdn, if (extent_purge_forced_wrapper(tsdn,
arena, &extent_hooks, &extent, 0, arena, &extent_hooks, &extent, 0,
size)) { size)) {
......
...@@ -21,8 +21,8 @@ bool opt_retain = ...@@ -21,8 +21,8 @@ bool opt_retain =
void * void *
extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero, extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
bool *commit) { bool *commit) {
void *ret = pages_map(new_addr, size, ALIGNMENT_CEILING(alignment, assert(alignment == ALIGNMENT_CEILING(alignment, PAGE));
PAGE), commit); void *ret = pages_map(new_addr, size, alignment, commit);
if (ret == NULL) { if (ret == NULL) {
return NULL; return NULL;
} }
......
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/hook.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/seq.h"
typedef struct hooks_internal_s hooks_internal_t;
struct hooks_internal_s {
hooks_t hooks;
bool in_use;
};
seq_define(hooks_internal_t, hooks)
static atomic_u_t nhooks = ATOMIC_INIT(0);
static seq_hooks_t hooks[HOOK_MAX];
static malloc_mutex_t hooks_mu;
bool
hook_boot() {
return malloc_mutex_init(&hooks_mu, "hooks", WITNESS_RANK_HOOK,
malloc_mutex_rank_exclusive);
}
static void *
hook_install_locked(hooks_t *to_install) {
hooks_internal_t hooks_internal;
for (int i = 0; i < HOOK_MAX; i++) {
bool success = seq_try_load_hooks(&hooks_internal, &hooks[i]);
/* We hold mu; no concurrent access. */
assert(success);
if (!hooks_internal.in_use) {
hooks_internal.hooks = *to_install;
hooks_internal.in_use = true;
seq_store_hooks(&hooks[i], &hooks_internal);
atomic_store_u(&nhooks,
atomic_load_u(&nhooks, ATOMIC_RELAXED) + 1,
ATOMIC_RELAXED);
return &hooks[i];
}
}
return NULL;
}
void *
hook_install(tsdn_t *tsdn, hooks_t *to_install) {
malloc_mutex_lock(tsdn, &hooks_mu);
void *ret = hook_install_locked(to_install);
if (ret != NULL) {
tsd_global_slow_inc(tsdn);
}
malloc_mutex_unlock(tsdn, &hooks_mu);
return ret;
}
static void
hook_remove_locked(seq_hooks_t *to_remove) {
hooks_internal_t hooks_internal;
bool success = seq_try_load_hooks(&hooks_internal, to_remove);
/* We hold mu; no concurrent access. */
assert(success);
/* Should only remove hooks that were added. */
assert(hooks_internal.in_use);
hooks_internal.in_use = false;
seq_store_hooks(to_remove, &hooks_internal);
atomic_store_u(&nhooks, atomic_load_u(&nhooks, ATOMIC_RELAXED) - 1,
ATOMIC_RELAXED);
}
void
hook_remove(tsdn_t *tsdn, void *opaque) {
if (config_debug) {
char *hooks_begin = (char *)&hooks[0];
char *hooks_end = (char *)&hooks[HOOK_MAX];
char *hook = (char *)opaque;
assert(hooks_begin <= hook && hook < hooks_end
&& (hook - hooks_begin) % sizeof(seq_hooks_t) == 0);
}
malloc_mutex_lock(tsdn, &hooks_mu);
hook_remove_locked((seq_hooks_t *)opaque);
tsd_global_slow_dec(tsdn);
malloc_mutex_unlock(tsdn, &hooks_mu);
}
#define FOR_EACH_HOOK_BEGIN(hooks_internal_ptr) \
for (int for_each_hook_counter = 0; \
for_each_hook_counter < HOOK_MAX; \
for_each_hook_counter++) { \
bool for_each_hook_success = seq_try_load_hooks( \
(hooks_internal_ptr), &hooks[for_each_hook_counter]); \
if (!for_each_hook_success) { \
continue; \
} \
if (!(hooks_internal_ptr)->in_use) { \
continue; \
}
#define FOR_EACH_HOOK_END \
}
static bool *
hook_reentrantp() {
/*
* We prevent user reentrancy within hooks. This is basically just a
* thread-local bool that triggers an early-exit.
*
* We don't fold in_hook into reentrancy. There are two reasons for
* this:
* - Right now, we turn on reentrancy during things like extent hook
* execution. Allocating during extent hooks is not officially
* supported, but we don't want to break it for the time being. These
* sorts of allocations should probably still be hooked, though.
* - If a hook allocates, we may want it to be relatively fast (after
* all, it executes on every allocator operation). Turning on
* reentrancy is a fairly heavyweight mode (disabling tcache,
* redirecting to arena 0, etc.). It's possible we may one day want
* to turn on reentrant mode here, if it proves too difficult to keep
* this working. But that's fairly easy for us to see; OTOH, people
* not using hooks because they're too slow is easy for us to miss.
*
* The tricky part is
* that this code might get invoked even if we don't have access to tsd.
* This function mimics getting a pointer to thread-local data, except
* that it might secretly return a pointer to some global data if we
* know that the caller will take the early-exit path.
* If we return a bool that indicates that we are reentrant, then the
* caller will go down the early exit path, leaving the global
* untouched.
*/
static bool in_hook_global = true;
tsdn_t *tsdn = tsdn_fetch();
tcache_t *tcache = tsdn_tcachep_get(tsdn);
if (tcache != NULL) {
return &tcache->in_hook;
}
return &in_hook_global;
}
#define HOOK_PROLOGUE \
if (likely(atomic_load_u(&nhooks, ATOMIC_RELAXED) == 0)) { \
return; \
} \
bool *in_hook = hook_reentrantp(); \
if (*in_hook) { \
return; \
} \
*in_hook = true;
#define HOOK_EPILOGUE \
*in_hook = false;
void
hook_invoke_alloc(hook_alloc_t type, void *result, uintptr_t result_raw,
uintptr_t args_raw[3]) {
HOOK_PROLOGUE
hooks_internal_t hook;
FOR_EACH_HOOK_BEGIN(&hook)
hook_alloc h = hook.hooks.alloc_hook;
if (h != NULL) {
h(hook.hooks.extra, type, result, result_raw, args_raw);
}
FOR_EACH_HOOK_END
HOOK_EPILOGUE
}
void
hook_invoke_dalloc(hook_dalloc_t type, void *address, uintptr_t args_raw[3]) {
HOOK_PROLOGUE
hooks_internal_t hook;
FOR_EACH_HOOK_BEGIN(&hook)
hook_dalloc h = hook.hooks.dalloc_hook;
if (h != NULL) {
h(hook.hooks.extra, type, address, args_raw);
}
FOR_EACH_HOOK_END
HOOK_EPILOGUE
}
void
hook_invoke_expand(hook_expand_t type, void *address, size_t old_usize,
size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]) {
HOOK_PROLOGUE
hooks_internal_t hook;
FOR_EACH_HOOK_BEGIN(&hook)
hook_expand h = hook.hooks.expand_hook;
if (h != NULL) {
h(hook.hooks.extra, type, address, old_usize, new_usize,
result_raw, args_raw);
}
FOR_EACH_HOOK_END
HOOK_EPILOGUE
}
...@@ -7,12 +7,14 @@ ...@@ -7,12 +7,14 @@
#include "jemalloc/internal/ctl.h" #include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/extent_dss.h" #include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/extent_mmap.h" #include "jemalloc/internal/extent_mmap.h"
#include "jemalloc/internal/hook.h"
#include "jemalloc/internal/jemalloc_internal_types.h" #include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/log.h" #include "jemalloc/internal/log.h"
#include "jemalloc/internal/malloc_io.h" #include "jemalloc/internal/malloc_io.h"
#include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/rtree.h" #include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/safety_check.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/spin.h" #include "jemalloc/internal/spin.h"
#include "jemalloc/internal/sz.h" #include "jemalloc/internal/sz.h"
#include "jemalloc/internal/ticker.h" #include "jemalloc/internal/ticker.h"
...@@ -41,6 +43,8 @@ bool opt_abort_conf = ...@@ -41,6 +43,8 @@ bool opt_abort_conf =
false false
#endif #endif
; ;
/* Intentionally default off, even with debug builds. */
bool opt_confirm_conf = false;
const char *opt_junk = const char *opt_junk =
#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
"true" "true"
...@@ -85,8 +89,10 @@ malloc_mutex_t arenas_lock; ...@@ -85,8 +89,10 @@ malloc_mutex_t arenas_lock;
JEMALLOC_ALIGNED(CACHELINE) JEMALLOC_ALIGNED(CACHELINE)
atomic_p_t arenas[MALLOCX_ARENA_LIMIT]; atomic_p_t arenas[MALLOCX_ARENA_LIMIT];
static atomic_u_t narenas_total; /* Use narenas_total_*(). */ static atomic_u_t narenas_total; /* Use narenas_total_*(). */
static arena_t *a0; /* arenas[0]; read-only after initialization. */ /* Below three are read-only after initialization. */
unsigned narenas_auto; /* Read-only after initialization. */ static arena_t *a0; /* arenas[0]. */
unsigned narenas_auto;
unsigned manual_arena_base;
typedef enum { typedef enum {
malloc_init_uninitialized = 3, malloc_init_uninitialized = 3,
...@@ -326,7 +332,7 @@ arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { ...@@ -326,7 +332,7 @@ arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
*/ */
arena = arena_get(tsdn, ind, false); arena = arena_get(tsdn, ind, false);
if (arena != NULL) { if (arena != NULL) {
assert(ind < narenas_auto); assert(arena_is_auto(arena));
return arena; return arena;
} }
...@@ -341,12 +347,12 @@ arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) { ...@@ -341,12 +347,12 @@ arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) {
if (ind == 0) { if (ind == 0) {
return; return;
} }
if (have_background_thread) { /*
bool err; * Avoid creating a new background thread just for the huge arena, which
malloc_mutex_lock(tsdn, &background_thread_lock); * purges eagerly by default.
err = background_thread_create(tsdn_tsd(tsdn), ind); */
malloc_mutex_unlock(tsdn, &background_thread_lock); if (have_background_thread && !arena_is_huge(ind)) {
if (err) { if (background_thread_create(tsdn_tsd(tsdn), ind)) {
malloc_printf("<jemalloc>: error in background thread " malloc_printf("<jemalloc>: error in background thread "
"creation for arena %u. Abort.\n", ind); "creation for arena %u. Abort.\n", ind);
abort(); abort();
...@@ -376,6 +382,14 @@ arena_bind(tsd_t *tsd, unsigned ind, bool internal) { ...@@ -376,6 +382,14 @@ arena_bind(tsd_t *tsd, unsigned ind, bool internal) {
tsd_iarena_set(tsd, arena); tsd_iarena_set(tsd, arena);
} else { } else {
tsd_arena_set(tsd, arena); tsd_arena_set(tsd, arena);
unsigned shard = atomic_fetch_add_u(&arena->binshard_next, 1,
ATOMIC_RELAXED);
tsd_binshards_t *bins = tsd_binshardsp_get(tsd);
for (unsigned i = 0; i < SC_NBINS; i++) {
assert(bin_infos[i].n_shards > 0 &&
bin_infos[i].n_shards <= BIN_SHARDS_MAX);
bins->binshard[i] = shard % bin_infos[i].n_shards;
}
} }
} }
...@@ -761,6 +775,50 @@ init_opt_stats_print_opts(const char *v, size_t vlen) { ...@@ -761,6 +775,50 @@ init_opt_stats_print_opts(const char *v, size_t vlen) {
assert(opts_len == strlen(opt_stats_print_opts)); assert(opts_len == strlen(opt_stats_print_opts));
} }
/* Reads the next size pair in a multi-sized option. */
static bool
malloc_conf_multi_sizes_next(const char **slab_size_segment_cur,
size_t *vlen_left, size_t *slab_start, size_t *slab_end, size_t *new_size) {
const char *cur = *slab_size_segment_cur;
char *end;
uintmax_t um;
set_errno(0);
/* First number, then '-' */
um = malloc_strtoumax(cur, &end, 0);
if (get_errno() != 0 || *end != '-') {
return true;
}
*slab_start = (size_t)um;
cur = end + 1;
/* Second number, then ':' */
um = malloc_strtoumax(cur, &end, 0);
if (get_errno() != 0 || *end != ':') {
return true;
}
*slab_end = (size_t)um;
cur = end + 1;
/* Last number */
um = malloc_strtoumax(cur, &end, 0);
if (get_errno() != 0) {
return true;
}
*new_size = (size_t)um;
/* Consume the separator if there is one. */
if (*end == '|') {
end++;
}
*vlen_left -= end - *slab_size_segment_cur;
*slab_size_segment_cur = end;
return false;
}
static bool static bool
malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
char const **v_p, size_t *vlen_p) { char const **v_p, size_t *vlen_p) {
...@@ -850,6 +908,11 @@ malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, ...@@ -850,6 +908,11 @@ malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k, malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
(int)vlen, v); (int)vlen, v);
/* If abort_conf is set, error out after processing all options. */ /* If abort_conf is set, error out after processing all options. */
const char *experimental = "experimental_";
if (strncmp(k, experimental, strlen(experimental)) == 0) {
/* However, tolerate experimental features. */
return;
}
had_conf_error = true; had_conf_error = true;
} }
...@@ -868,88 +931,141 @@ malloc_slow_flag_init(void) { ...@@ -868,88 +931,141 @@ malloc_slow_flag_init(void) {
malloc_slow = (malloc_slow_flags != 0); malloc_slow = (malloc_slow_flags != 0);
} }
static void /* Number of sources for initializing malloc_conf */
malloc_conf_init(void) { #define MALLOC_CONF_NSOURCES 4
unsigned i;
char buf[PATH_MAX + 1];
const char *opts, *k, *v;
size_t klen, vlen;
for (i = 0; i < 4; i++) { static const char *
/* Get runtime configuration. */ obtain_malloc_conf(unsigned which_source, char buf[PATH_MAX + 1]) {
switch (i) { if (config_debug) {
case 0: static unsigned read_source = 0;
opts = config_malloc_conf; /*
break; * Each source should only be read once, to minimize # of
case 1: * syscalls on init.
if (je_malloc_conf != NULL) { */
/* assert(read_source++ == which_source);
* Use options that were compiled into the }
* program. assert(which_source < MALLOC_CONF_NSOURCES);
*/
opts = je_malloc_conf; const char *ret;
} else { switch (which_source) {
/* No configuration specified. */ case 0:
buf[0] = '\0'; ret = config_malloc_conf;
opts = buf; break;
} case 1:
break; if (je_malloc_conf != NULL) {
case 2: { /* Use options that were compiled into the program. */
ssize_t linklen = 0; ret = je_malloc_conf;
} else {
/* No configuration specified. */
ret = NULL;
}
break;
case 2: {
ssize_t linklen = 0;
#ifndef _WIN32 #ifndef _WIN32
int saved_errno = errno; int saved_errno = errno;
const char *linkname = const char *linkname =
# ifdef JEMALLOC_PREFIX # ifdef JEMALLOC_PREFIX
"/etc/"JEMALLOC_PREFIX"malloc.conf" "/etc/"JEMALLOC_PREFIX"malloc.conf"
# else # else
"/etc/malloc.conf" "/etc/malloc.conf"
# endif # endif
; ;
/* /*
* Try to use the contents of the "/etc/malloc.conf" * Try to use the contents of the "/etc/malloc.conf" symbolic
* symbolic link's name. * link's name.
*/ */
linklen = readlink(linkname, buf, sizeof(buf) - 1); #ifndef JEMALLOC_READLINKAT
if (linklen == -1) { linklen = readlink(linkname, buf, PATH_MAX);
/* No configuration specified. */ #else
linklen = 0; linklen = readlinkat(AT_FDCWD, linkname, buf, PATH_MAX);
/* Restore errno. */
set_errno(saved_errno);
}
#endif #endif
buf[linklen] = '\0'; if (linklen == -1) {
opts = buf; /* No configuration specified. */
break; linklen = 0;
} case 3: { /* Restore errno. */
const char *envname = set_errno(saved_errno);
}
#endif
buf[linklen] = '\0';
ret = buf;
break;
} case 3: {
const char *envname =
#ifdef JEMALLOC_PREFIX #ifdef JEMALLOC_PREFIX
JEMALLOC_CPREFIX"MALLOC_CONF" JEMALLOC_CPREFIX"MALLOC_CONF"
#else #else
"MALLOC_CONF" "MALLOC_CONF"
#endif #endif
; ;
if ((opts = jemalloc_secure_getenv(envname)) != NULL) { if ((ret = jemalloc_secure_getenv(envname)) != NULL) {
/* /*
* Do nothing; opts is already initialized to * Do nothing; opts is already initialized to the value
* the value of the MALLOC_CONF environment * of the MALLOC_CONF environment variable.
* variable. */
*/ } else {
} else { /* No configuration specified. */
/* No configuration specified. */ ret = NULL;
buf[0] = '\0'; }
opts = buf; break;
} } default:
break; not_reached();
} default: ret = NULL;
not_reached(); }
buf[0] = '\0'; return ret;
opts = buf; }
static void
malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
bool initial_call, const char *opts_cache[MALLOC_CONF_NSOURCES],
char buf[PATH_MAX + 1]) {
static const char *opts_explain[MALLOC_CONF_NSOURCES] = {
"string specified via --with-malloc-conf",
"string pointed to by the global variable malloc_conf",
"\"name\" of the file referenced by the symbolic link named "
"/etc/malloc.conf",
"value of the environment variable MALLOC_CONF"
};
unsigned i;
const char *opts, *k, *v;
size_t klen, vlen;
for (i = 0; i < MALLOC_CONF_NSOURCES; i++) {
/* Get runtime configuration. */
if (initial_call) {
opts_cache[i] = obtain_malloc_conf(i, buf);
}
opts = opts_cache[i];
if (!initial_call && opt_confirm_conf) {
malloc_printf(
"<jemalloc>: malloc_conf #%u (%s): \"%s\"\n",
i + 1, opts_explain[i], opts != NULL ? opts : "");
}
if (opts == NULL) {
continue;
} }
while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v, while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
&vlen)) { &vlen)) {
#define CONF_ERROR(msg, k, klen, v, vlen) \
if (!initial_call) { \
malloc_conf_error( \
msg, k, klen, v, vlen); \
cur_opt_valid = false; \
}
#define CONF_CONTINUE { \
if (!initial_call && opt_confirm_conf \
&& cur_opt_valid) { \
malloc_printf("<jemalloc>: -- " \
"Set conf value: %.*s:%.*s" \
"\n", (int)klen, k, \
(int)vlen, v); \
} \
continue; \
}
#define CONF_MATCH(n) \ #define CONF_MATCH(n) \
(sizeof(n)-1 == klen && strncmp(n, k, klen) == 0) (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
#define CONF_MATCH_VALUE(n) \ #define CONF_MATCH_VALUE(n) \
...@@ -961,16 +1077,23 @@ malloc_conf_init(void) { ...@@ -961,16 +1077,23 @@ malloc_conf_init(void) {
} else if (CONF_MATCH_VALUE("false")) { \ } else if (CONF_MATCH_VALUE("false")) { \
o = false; \ o = false; \
} else { \ } else { \
malloc_conf_error( \ CONF_ERROR("Invalid conf value",\
"Invalid conf value", \
k, klen, v, vlen); \ k, klen, v, vlen); \
} \ } \
continue; \ CONF_CONTINUE; \
} }
#define CONF_MIN_no(um, min) false /*
#define CONF_MIN_yes(um, min) ((um) < (min)) * One of the CONF_MIN macros below expands, in one of the use points,
#define CONF_MAX_no(um, max) false * to "unsigned integer < 0", which is always false, triggering the
#define CONF_MAX_yes(um, max) ((um) > (max)) * GCC -Wtype-limits warning, which we disable here and re-enable below.
*/
JEMALLOC_DIAGNOSTIC_PUSH
JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS
#define CONF_DONT_CHECK_MIN(um, min) false
#define CONF_CHECK_MIN(um, min) ((um) < (min))
#define CONF_DONT_CHECK_MAX(um, max) false
#define CONF_CHECK_MAX(um, max) ((um) > (max))
#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \ #define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \
if (CONF_MATCH(n)) { \ if (CONF_MATCH(n)) { \
uintmax_t um; \ uintmax_t um; \
...@@ -980,26 +1103,21 @@ malloc_conf_init(void) { ...@@ -980,26 +1103,21 @@ malloc_conf_init(void) {
um = malloc_strtoumax(v, &end, 0); \ um = malloc_strtoumax(v, &end, 0); \
if (get_errno() != 0 || (uintptr_t)end -\ if (get_errno() != 0 || (uintptr_t)end -\
(uintptr_t)v != vlen) { \ (uintptr_t)v != vlen) { \
malloc_conf_error( \ CONF_ERROR("Invalid conf value",\
"Invalid conf value", \
k, klen, v, vlen); \ k, klen, v, vlen); \
} else if (clip) { \ } else if (clip) { \
if (CONF_MIN_##check_min(um, \ if (check_min(um, (t)(min))) { \
(t)(min))) { \
o = (t)(min); \ o = (t)(min); \
} else if ( \ } else if ( \
CONF_MAX_##check_max(um, \ check_max(um, (t)(max))) { \
(t)(max))) { \
o = (t)(max); \ o = (t)(max); \
} else { \ } else { \
o = (t)um; \ o = (t)um; \
} \ } \
} else { \ } else { \
if (CONF_MIN_##check_min(um, \ if (check_min(um, (t)(min)) || \
(t)(min)) || \ check_max(um, (t)(max))) { \
CONF_MAX_##check_max(um, \ CONF_ERROR( \
(t)(max))) { \
malloc_conf_error( \
"Out-of-range " \ "Out-of-range " \
"conf value", \ "conf value", \
k, klen, v, vlen); \ k, klen, v, vlen); \
...@@ -1007,7 +1125,7 @@ malloc_conf_init(void) { ...@@ -1007,7 +1125,7 @@ malloc_conf_init(void) {
o = (t)um; \ o = (t)um; \
} \ } \
} \ } \
continue; \ CONF_CONTINUE; \
} }
#define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \ #define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \
clip) \ clip) \
...@@ -1025,18 +1143,17 @@ malloc_conf_init(void) { ...@@ -1025,18 +1143,17 @@ malloc_conf_init(void) {
l = strtol(v, &end, 0); \ l = strtol(v, &end, 0); \
if (get_errno() != 0 || (uintptr_t)end -\ if (get_errno() != 0 || (uintptr_t)end -\
(uintptr_t)v != vlen) { \ (uintptr_t)v != vlen) { \
malloc_conf_error( \ CONF_ERROR("Invalid conf value",\
"Invalid conf value", \
k, klen, v, vlen); \ k, klen, v, vlen); \
} else if (l < (ssize_t)(min) || l > \ } else if (l < (ssize_t)(min) || l > \
(ssize_t)(max)) { \ (ssize_t)(max)) { \
malloc_conf_error( \ CONF_ERROR( \
"Out-of-range conf value", \ "Out-of-range conf value", \
k, klen, v, vlen); \ k, klen, v, vlen); \
} else { \ } else { \
o = l; \ o = l; \
} \ } \
continue; \ CONF_CONTINUE; \
} }
#define CONF_HANDLE_CHAR_P(o, n, d) \ #define CONF_HANDLE_CHAR_P(o, n, d) \
if (CONF_MATCH(n)) { \ if (CONF_MATCH(n)) { \
...@@ -1045,7 +1162,14 @@ malloc_conf_init(void) { ...@@ -1045,7 +1162,14 @@ malloc_conf_init(void) {
sizeof(o)-1; \ sizeof(o)-1; \
strncpy(o, v, cpylen); \ strncpy(o, v, cpylen); \
o[cpylen] = '\0'; \ o[cpylen] = '\0'; \
continue; \ CONF_CONTINUE; \
}
bool cur_opt_valid = true;
CONF_HANDLE_BOOL(opt_confirm_conf, "confirm_conf")
if (initial_call) {
continue;
} }
CONF_HANDLE_BOOL(opt_abort, "abort") CONF_HANDLE_BOOL(opt_abort, "abort")
...@@ -1062,10 +1186,10 @@ malloc_conf_init(void) { ...@@ -1062,10 +1186,10 @@ malloc_conf_init(void) {
} }
} }
if (!match) { if (!match) {
malloc_conf_error("Invalid conf value", CONF_ERROR("Invalid conf value",
k, klen, v, vlen); k, klen, v, vlen);
} }
continue; CONF_CONTINUE;
} }
CONF_HANDLE_BOOL(opt_retain, "retain") CONF_HANDLE_BOOL(opt_retain, "retain")
if (strncmp("dss", k, klen) == 0) { if (strncmp("dss", k, klen) == 0) {
...@@ -1075,7 +1199,7 @@ malloc_conf_init(void) { ...@@ -1075,7 +1199,7 @@ malloc_conf_init(void) {
if (strncmp(dss_prec_names[i], v, vlen) if (strncmp(dss_prec_names[i], v, vlen)
== 0) { == 0) {
if (extent_dss_prec_set(i)) { if (extent_dss_prec_set(i)) {
malloc_conf_error( CONF_ERROR(
"Error setting dss", "Error setting dss",
k, klen, v, vlen); k, klen, v, vlen);
} else { } else {
...@@ -1087,13 +1211,36 @@ malloc_conf_init(void) { ...@@ -1087,13 +1211,36 @@ malloc_conf_init(void) {
} }
} }
if (!match) { if (!match) {
malloc_conf_error("Invalid conf value", CONF_ERROR("Invalid conf value",
k, klen, v, vlen); k, klen, v, vlen);
} }
continue; CONF_CONTINUE;
} }
CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1, CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
UINT_MAX, yes, no, false) UINT_MAX, CONF_CHECK_MIN, CONF_DONT_CHECK_MAX,
false)
if (CONF_MATCH("bin_shards")) {
const char *bin_shards_segment_cur = v;
size_t vlen_left = vlen;
do {
size_t size_start;
size_t size_end;
size_t nshards;
bool err = malloc_conf_multi_sizes_next(
&bin_shards_segment_cur, &vlen_left,
&size_start, &size_end, &nshards);
if (err || bin_update_shard_size(
bin_shard_sizes, size_start,
size_end, nshards)) {
CONF_ERROR(
"Invalid settings for "
"bin_shards", k, klen, v,
vlen);
break;
}
} while (vlen_left > 0);
CONF_CONTINUE;
}
CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms, CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms,
"dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) < "dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) : QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
...@@ -1105,7 +1252,7 @@ malloc_conf_init(void) { ...@@ -1105,7 +1252,7 @@ malloc_conf_init(void) {
CONF_HANDLE_BOOL(opt_stats_print, "stats_print") CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
if (CONF_MATCH("stats_print_opts")) { if (CONF_MATCH("stats_print_opts")) {
init_opt_stats_print_opts(v, vlen); init_opt_stats_print_opts(v, vlen);
continue; CONF_CONTINUE;
} }
if (config_fill) { if (config_fill) {
if (CONF_MATCH("junk")) { if (CONF_MATCH("junk")) {
...@@ -1126,11 +1273,11 @@ malloc_conf_init(void) { ...@@ -1126,11 +1273,11 @@ malloc_conf_init(void) {
opt_junk_alloc = false; opt_junk_alloc = false;
opt_junk_free = true; opt_junk_free = true;
} else { } else {
malloc_conf_error( CONF_ERROR(
"Invalid conf value", k, "Invalid conf value",
klen, v, vlen); k, klen, v, vlen);
} }
continue; CONF_CONTINUE;
} }
CONF_HANDLE_BOOL(opt_zero, "zero") CONF_HANDLE_BOOL(opt_zero, "zero")
} }
...@@ -1141,11 +1288,25 @@ malloc_conf_init(void) { ...@@ -1141,11 +1288,25 @@ malloc_conf_init(void) {
CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc") CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
} }
CONF_HANDLE_BOOL(opt_tcache, "tcache") CONF_HANDLE_BOOL(opt_tcache, "tcache")
CONF_HANDLE_SIZE_T(opt_lg_extent_max_active_fit,
"lg_extent_max_active_fit", 0,
(sizeof(size_t) << 3), yes, yes, false)
CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max", CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max",
-1, (sizeof(size_t) << 3) - 1) -1, (sizeof(size_t) << 3) - 1)
/*
* The runtime option of oversize_threshold remains
* undocumented. It may be tweaked in the next major
* release (6.0). The default value 8M is rather
* conservative / safe. Tuning it further down may
* improve fragmentation a bit more, but may also cause
* contention on the huge arena.
*/
CONF_HANDLE_SIZE_T(opt_oversize_threshold,
"oversize_threshold", 0, SC_LARGE_MAXCLASS,
CONF_DONT_CHECK_MIN, CONF_CHECK_MAX, false)
CONF_HANDLE_SIZE_T(opt_lg_extent_max_active_fit,
"lg_extent_max_active_fit", 0,
(sizeof(size_t) << 3), CONF_DONT_CHECK_MIN,
CONF_CHECK_MAX, false)
if (strncmp("percpu_arena", k, klen) == 0) { if (strncmp("percpu_arena", k, klen) == 0) {
bool match = false; bool match = false;
for (int i = percpu_arena_mode_names_base; i < for (int i = percpu_arena_mode_names_base; i <
...@@ -1153,7 +1314,7 @@ malloc_conf_init(void) { ...@@ -1153,7 +1314,7 @@ malloc_conf_init(void) {
if (strncmp(percpu_arena_mode_names[i], if (strncmp(percpu_arena_mode_names[i],
v, vlen) == 0) { v, vlen) == 0) {
if (!have_percpu_arena) { if (!have_percpu_arena) {
malloc_conf_error( CONF_ERROR(
"No getcpu support", "No getcpu support",
k, klen, v, vlen); k, klen, v, vlen);
} }
...@@ -1163,17 +1324,42 @@ malloc_conf_init(void) { ...@@ -1163,17 +1324,42 @@ malloc_conf_init(void) {
} }
} }
if (!match) { if (!match) {
malloc_conf_error("Invalid conf value", CONF_ERROR("Invalid conf value",
k, klen, v, vlen); k, klen, v, vlen);
} }
continue; CONF_CONTINUE;
} }
CONF_HANDLE_BOOL(opt_background_thread, CONF_HANDLE_BOOL(opt_background_thread,
"background_thread"); "background_thread");
CONF_HANDLE_SIZE_T(opt_max_background_threads, CONF_HANDLE_SIZE_T(opt_max_background_threads,
"max_background_threads", 1, "max_background_threads", 1,
opt_max_background_threads, yes, yes, opt_max_background_threads,
CONF_CHECK_MIN, CONF_CHECK_MAX,
true); true);
if (CONF_MATCH("slab_sizes")) {
bool err;
const char *slab_size_segment_cur = v;
size_t vlen_left = vlen;
do {
size_t slab_start;
size_t slab_end;
size_t pgs;
err = malloc_conf_multi_sizes_next(
&slab_size_segment_cur,
&vlen_left, &slab_start, &slab_end,
&pgs);
if (!err) {
sc_data_update_slab_size(
sc_data, slab_start,
slab_end, (int)pgs);
} else {
CONF_ERROR("Invalid settings "
"for slab_sizes",
k, klen, v, vlen);
}
} while (!err && vlen_left > 0);
CONF_CONTINUE;
}
if (config_prof) { if (config_prof) {
CONF_HANDLE_BOOL(opt_prof, "prof") CONF_HANDLE_BOOL(opt_prof, "prof")
CONF_HANDLE_CHAR_P(opt_prof_prefix, CONF_HANDLE_CHAR_P(opt_prof_prefix,
...@@ -1183,7 +1369,8 @@ malloc_conf_init(void) { ...@@ -1183,7 +1369,8 @@ malloc_conf_init(void) {
"prof_thread_active_init") "prof_thread_active_init")
CONF_HANDLE_SIZE_T(opt_lg_prof_sample, CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
"lg_prof_sample", 0, (sizeof(uint64_t) << 3) "lg_prof_sample", 0, (sizeof(uint64_t) << 3)
- 1, no, yes, true) - 1, CONF_DONT_CHECK_MIN, CONF_CHECK_MAX,
true)
CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum") CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
"lg_prof_interval", -1, "lg_prof_interval", -1,
...@@ -1191,6 +1378,7 @@ malloc_conf_init(void) { ...@@ -1191,6 +1378,7 @@ malloc_conf_init(void) {
CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump") CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
CONF_HANDLE_BOOL(opt_prof_final, "prof_final") CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak") CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
CONF_HANDLE_BOOL(opt_prof_log, "prof_log")
} }
if (config_log) { if (config_log) {
if (CONF_MATCH("log")) { if (CONF_MATCH("log")) {
...@@ -1199,7 +1387,7 @@ malloc_conf_init(void) { ...@@ -1199,7 +1387,7 @@ malloc_conf_init(void) {
vlen : sizeof(log_var_names) - 1); vlen : sizeof(log_var_names) - 1);
strncpy(log_var_names, v, cpylen); strncpy(log_var_names, v, cpylen);
log_var_names[cpylen] = '\0'; log_var_names[cpylen] = '\0';
continue; CONF_CONTINUE;
} }
} }
if (CONF_MATCH("thp")) { if (CONF_MATCH("thp")) {
...@@ -1208,7 +1396,7 @@ malloc_conf_init(void) { ...@@ -1208,7 +1396,7 @@ malloc_conf_init(void) {
if (strncmp(thp_mode_names[i],v, vlen) if (strncmp(thp_mode_names[i],v, vlen)
== 0) { == 0) {
if (!have_madvise_huge) { if (!have_madvise_huge) {
malloc_conf_error( CONF_ERROR(
"No THP support", "No THP support",
k, klen, v, vlen); k, klen, v, vlen);
} }
...@@ -1218,25 +1406,28 @@ malloc_conf_init(void) { ...@@ -1218,25 +1406,28 @@ malloc_conf_init(void) {
} }
} }
if (!match) { if (!match) {
malloc_conf_error("Invalid conf value", CONF_ERROR("Invalid conf value",
k, klen, v, vlen); k, klen, v, vlen);
} }
continue; CONF_CONTINUE;
} }
malloc_conf_error("Invalid conf pair", k, klen, v, CONF_ERROR("Invalid conf pair", k, klen, v, vlen);
vlen); #undef CONF_ERROR
#undef CONF_CONTINUE
#undef CONF_MATCH #undef CONF_MATCH
#undef CONF_MATCH_VALUE #undef CONF_MATCH_VALUE
#undef CONF_HANDLE_BOOL #undef CONF_HANDLE_BOOL
#undef CONF_MIN_no #undef CONF_DONT_CHECK_MIN
#undef CONF_MIN_yes #undef CONF_CHECK_MIN
#undef CONF_MAX_no #undef CONF_DONT_CHECK_MAX
#undef CONF_MAX_yes #undef CONF_CHECK_MAX
#undef CONF_HANDLE_T_U #undef CONF_HANDLE_T_U
#undef CONF_HANDLE_UNSIGNED #undef CONF_HANDLE_UNSIGNED
#undef CONF_HANDLE_SIZE_T #undef CONF_HANDLE_SIZE_T
#undef CONF_HANDLE_SSIZE_T #undef CONF_HANDLE_SSIZE_T
#undef CONF_HANDLE_CHAR_P #undef CONF_HANDLE_CHAR_P
/* Re-enable diagnostic "-Wtype-limits" */
JEMALLOC_DIAGNOSTIC_POP
} }
if (opt_abort_conf && had_conf_error) { if (opt_abort_conf && had_conf_error) {
malloc_abort_invalid_conf(); malloc_abort_invalid_conf();
...@@ -1245,6 +1436,19 @@ malloc_conf_init(void) { ...@@ -1245,6 +1436,19 @@ malloc_conf_init(void) {
atomic_store_b(&log_init_done, true, ATOMIC_RELEASE); atomic_store_b(&log_init_done, true, ATOMIC_RELEASE);
} }
static void
malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) {
const char *opts_cache[MALLOC_CONF_NSOURCES] = {NULL, NULL, NULL, NULL};
char buf[PATH_MAX + 1];
/* The first call only set the confirm_conf option and opts_cache */
malloc_conf_init_helper(NULL, NULL, true, opts_cache, buf);
malloc_conf_init_helper(sc_data, bin_shard_sizes, false, opts_cache,
NULL);
}
#undef MALLOC_CONF_NSOURCES
static bool static bool
malloc_init_hard_needed(void) { malloc_init_hard_needed(void) {
if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state == if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
...@@ -1275,10 +1479,33 @@ static bool ...@@ -1275,10 +1479,33 @@ static bool
malloc_init_hard_a0_locked() { malloc_init_hard_a0_locked() {
malloc_initializer = INITIALIZER; malloc_initializer = INITIALIZER;
JEMALLOC_DIAGNOSTIC_PUSH
JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
sc_data_t sc_data = {0};
JEMALLOC_DIAGNOSTIC_POP
/*
* Ordering here is somewhat tricky; we need sc_boot() first, since that
* determines what the size classes will be, and then
* malloc_conf_init(), since any slab size tweaking will need to be done
* before sz_boot and bin_boot, which assume that the values they read
* out of sc_data_global are final.
*/
sc_boot(&sc_data);
unsigned bin_shard_sizes[SC_NBINS];
bin_shard_sizes_boot(bin_shard_sizes);
/*
* prof_boot0 only initializes opt_prof_prefix. We need to do it before
* we parse malloc_conf options, in case malloc_conf parsing overwrites
* it.
*/
if (config_prof) { if (config_prof) {
prof_boot0(); prof_boot0();
} }
malloc_conf_init(); malloc_conf_init(&sc_data, bin_shard_sizes);
sz_boot(&sc_data);
bin_boot(&sc_data, bin_shard_sizes);
if (opt_stats_print) { if (opt_stats_print) {
/* Print statistics at exit. */ /* Print statistics at exit. */
if (atexit(stats_print_atexit) != 0) { if (atexit(stats_print_atexit) != 0) {
...@@ -1303,7 +1530,7 @@ malloc_init_hard_a0_locked() { ...@@ -1303,7 +1530,7 @@ malloc_init_hard_a0_locked() {
if (config_prof) { if (config_prof) {
prof_boot1(); prof_boot1();
} }
arena_boot(); arena_boot(&sc_data);
if (tcache_boot(TSDN_NULL)) { if (tcache_boot(TSDN_NULL)) {
return true; return true;
} }
...@@ -1311,11 +1538,13 @@ malloc_init_hard_a0_locked() { ...@@ -1311,11 +1538,13 @@ malloc_init_hard_a0_locked() {
malloc_mutex_rank_exclusive)) { malloc_mutex_rank_exclusive)) {
return true; return true;
} }
hook_boot();
/* /*
* Create enough scaffolding to allow recursive allocation in * Create enough scaffolding to allow recursive allocation in
* malloc_ncpus(). * malloc_ncpus().
*/ */
narenas_auto = 1; narenas_auto = 1;
manual_arena_base = narenas_auto + 1;
memset(arenas, 0, sizeof(arena_t *) * narenas_auto); memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
/* /*
* Initialize one arena here. The rest are lazily created in * Initialize one arena here. The rest are lazily created in
...@@ -1463,6 +1692,10 @@ malloc_init_narenas(void) { ...@@ -1463,6 +1692,10 @@ malloc_init_narenas(void) {
narenas_auto); narenas_auto);
} }
narenas_total_set(narenas_auto); narenas_total_set(narenas_auto);
if (arena_init_huge()) {
narenas_total_inc();
}
manual_arena_base = narenas_total_get();
return false; return false;
} }
...@@ -1560,11 +1793,7 @@ malloc_init_hard(void) { ...@@ -1560,11 +1793,7 @@ malloc_init_hard(void) {
* sets isthreaded) needs to be called without holding any lock. * sets isthreaded) needs to be called without holding any lock.
*/ */
background_thread_ctl_init(tsd_tsdn(tsd)); background_thread_ctl_init(tsd_tsdn(tsd));
if (background_thread_create(tsd, 0)) {
malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
bool err = background_thread_create(tsd, 0);
malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
if (err) {
return true; return true;
} }
} }
...@@ -1587,8 +1816,12 @@ typedef struct static_opts_s static_opts_t; ...@@ -1587,8 +1816,12 @@ typedef struct static_opts_s static_opts_t;
struct static_opts_s { struct static_opts_s {
/* Whether or not allocation size may overflow. */ /* Whether or not allocation size may overflow. */
bool may_overflow; bool may_overflow;
/* Whether or not allocations of size 0 should be treated as size 1. */
bool bump_empty_alloc; /*
* Whether or not allocations (with alignment) of size 0 should be
* treated as size 1.
*/
bool bump_empty_aligned_alloc;
/* /*
* Whether to assert that allocations are not of size 0 (after any * Whether to assert that allocations are not of size 0 (after any
* bumping). * bumping).
...@@ -1621,12 +1854,16 @@ struct static_opts_s { ...@@ -1621,12 +1854,16 @@ struct static_opts_s {
* initialization) options. * initialization) options.
*/ */
bool slow; bool slow;
/*
* Return size.
*/
bool usize;
}; };
JEMALLOC_ALWAYS_INLINE void JEMALLOC_ALWAYS_INLINE void
static_opts_init(static_opts_t *static_opts) { static_opts_init(static_opts_t *static_opts) {
static_opts->may_overflow = false; static_opts->may_overflow = false;
static_opts->bump_empty_alloc = false; static_opts->bump_empty_aligned_alloc = false;
static_opts->assert_nonempty_alloc = false; static_opts->assert_nonempty_alloc = false;
static_opts->null_out_result_on_error = false; static_opts->null_out_result_on_error = false;
static_opts->set_errno_on_error = false; static_opts->set_errno_on_error = false;
...@@ -1634,6 +1871,7 @@ static_opts_init(static_opts_t *static_opts) { ...@@ -1634,6 +1871,7 @@ static_opts_init(static_opts_t *static_opts) {
static_opts->oom_string = ""; static_opts->oom_string = "";
static_opts->invalid_alignment_string = ""; static_opts->invalid_alignment_string = "";
static_opts->slow = false; static_opts->slow = false;
static_opts->usize = false;
} }
/* /*
...@@ -1648,6 +1886,7 @@ static_opts_init(static_opts_t *static_opts) { ...@@ -1648,6 +1886,7 @@ static_opts_init(static_opts_t *static_opts) {
typedef struct dynamic_opts_s dynamic_opts_t; typedef struct dynamic_opts_s dynamic_opts_t;
struct dynamic_opts_s { struct dynamic_opts_s {
void **result; void **result;
size_t usize;
size_t num_items; size_t num_items;
size_t item_size; size_t item_size;
size_t alignment; size_t alignment;
...@@ -1659,6 +1898,7 @@ struct dynamic_opts_s { ...@@ -1659,6 +1898,7 @@ struct dynamic_opts_s {
JEMALLOC_ALWAYS_INLINE void JEMALLOC_ALWAYS_INLINE void
dynamic_opts_init(dynamic_opts_t *dynamic_opts) { dynamic_opts_init(dynamic_opts_t *dynamic_opts) {
dynamic_opts->result = NULL; dynamic_opts->result = NULL;
dynamic_opts->usize = 0;
dynamic_opts->num_items = 0; dynamic_opts->num_items = 0;
dynamic_opts->item_size = 0; dynamic_opts->item_size = 0;
dynamic_opts->alignment = 0; dynamic_opts->alignment = 0;
...@@ -1722,12 +1962,13 @@ imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, ...@@ -1722,12 +1962,13 @@ imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
szind_t ind_large; szind_t ind_large;
size_t bumped_usize = usize; size_t bumped_usize = usize;
if (usize <= SMALL_MAXCLASS) { if (usize <= SC_SMALL_MAXCLASS) {
assert(((dopts->alignment == 0) ? sz_s2u(LARGE_MINCLASS) : assert(((dopts->alignment == 0) ?
sz_sa2u(LARGE_MINCLASS, dopts->alignment)) sz_s2u(SC_LARGE_MINCLASS) :
== LARGE_MINCLASS); sz_sa2u(SC_LARGE_MINCLASS, dopts->alignment))
ind_large = sz_size2index(LARGE_MINCLASS); == SC_LARGE_MINCLASS);
bumped_usize = sz_s2u(LARGE_MINCLASS); ind_large = sz_size2index(SC_LARGE_MINCLASS);
bumped_usize = sz_s2u(SC_LARGE_MINCLASS);
ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize, ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize,
bumped_usize, ind_large); bumped_usize, ind_large);
if (unlikely(ret == NULL)) { if (unlikely(ret == NULL)) {
...@@ -1810,17 +2051,6 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) { ...@@ -1810,17 +2051,6 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
goto label_oom; goto label_oom;
} }
/* Validate the user input. */
if (sopts->bump_empty_alloc) {
if (unlikely(size == 0)) {
size = 1;
}
}
if (sopts->assert_nonempty_alloc) {
assert (size != 0);
}
if (unlikely(dopts->alignment < sopts->min_alignment if (unlikely(dopts->alignment < sopts->min_alignment
|| (dopts->alignment & (dopts->alignment - 1)) != 0)) { || (dopts->alignment & (dopts->alignment - 1)) != 0)) {
goto label_invalid_alignment; goto label_invalid_alignment;
...@@ -1830,19 +2060,32 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) { ...@@ -1830,19 +2060,32 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
if (dopts->alignment == 0) { if (dopts->alignment == 0) {
ind = sz_size2index(size); ind = sz_size2index(size);
if (unlikely(ind >= NSIZES)) { if (unlikely(ind >= SC_NSIZES)) {
goto label_oom; goto label_oom;
} }
if (config_stats || (config_prof && opt_prof)) { if (config_stats || (config_prof && opt_prof) || sopts->usize) {
usize = sz_index2size(ind); usize = sz_index2size(ind);
assert(usize > 0 && usize <= LARGE_MAXCLASS); dopts->usize = usize;
assert(usize > 0 && usize
<= SC_LARGE_MAXCLASS);
} }
} else { } else {
if (sopts->bump_empty_aligned_alloc) {
if (unlikely(size == 0)) {
size = 1;
}
}
usize = sz_sa2u(size, dopts->alignment); usize = sz_sa2u(size, dopts->alignment);
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { dopts->usize = usize;
if (unlikely(usize == 0
|| usize > SC_LARGE_MAXCLASS)) {
goto label_oom; goto label_oom;
} }
} }
/* Validate the user input. */
if (sopts->assert_nonempty_alloc) {
assert (size != 0);
}
check_entry_exit_locking(tsd_tsdn(tsd)); check_entry_exit_locking(tsd_tsdn(tsd));
...@@ -1875,7 +2118,8 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) { ...@@ -1875,7 +2118,8 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
alloc_ctx_t alloc_ctx; alloc_ctx_t alloc_ctx;
if (likely((uintptr_t)tctx == (uintptr_t)1U)) { if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
alloc_ctx.slab = (usize <= SMALL_MAXCLASS); alloc_ctx.slab = (usize
<= SC_SMALL_MAXCLASS);
allocation = imalloc_no_sample( allocation = imalloc_no_sample(
sopts, dopts, tsd, usize, usize, ind); sopts, dopts, tsd, usize, usize, ind);
} else if ((uintptr_t)tctx > (uintptr_t)1U) { } else if ((uintptr_t)tctx > (uintptr_t)1U) {
...@@ -1980,9 +2224,8 @@ label_invalid_alignment: ...@@ -1980,9 +2224,8 @@ label_invalid_alignment:
return EINVAL; return EINVAL;
} }
/* Returns the errno-style error code of the allocation. */ JEMALLOC_ALWAYS_INLINE bool
JEMALLOC_ALWAYS_INLINE int imalloc_init_check(static_opts_t *sopts, dynamic_opts_t *dopts) {
imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) {
if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) { if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) {
if (config_xmalloc && unlikely(opt_xmalloc)) { if (config_xmalloc && unlikely(opt_xmalloc)) {
malloc_write(sopts->oom_string); malloc_write(sopts->oom_string);
...@@ -1992,6 +2235,16 @@ imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) { ...@@ -1992,6 +2235,16 @@ imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) {
set_errno(ENOMEM); set_errno(ENOMEM);
*dopts->result = NULL; *dopts->result = NULL;
return false;
}
return true;
}
/* Returns the errno-style error code of the allocation. */
JEMALLOC_ALWAYS_INLINE int
imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) {
if (tsd_get_allocates() && !imalloc_init_check(sopts, dopts)) {
return ENOMEM; return ENOMEM;
} }
...@@ -2004,19 +2257,18 @@ imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) { ...@@ -2004,19 +2257,18 @@ imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) {
sopts->slow = false; sopts->slow = false;
return imalloc_body(sopts, dopts, tsd); return imalloc_body(sopts, dopts, tsd);
} else { } else {
if (!tsd_get_allocates() && !imalloc_init_check(sopts, dopts)) {
return ENOMEM;
}
sopts->slow = true; sopts->slow = true;
return imalloc_body(sopts, dopts, tsd); return imalloc_body(sopts, dopts, tsd);
} }
} }
/******************************************************************************/
/*
* Begin malloc(3)-compatible functions.
*/
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN JEMALLOC_NOINLINE
void JEMALLOC_NOTHROW * void *
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) malloc_default(size_t size) {
je_malloc(size_t size) {
void *ret; void *ret;
static_opts_t sopts; static_opts_t sopts;
dynamic_opts_t dopts; dynamic_opts_t dopts;
...@@ -2026,7 +2278,6 @@ je_malloc(size_t size) { ...@@ -2026,7 +2278,6 @@ je_malloc(size_t size) {
static_opts_init(&sopts); static_opts_init(&sopts);
dynamic_opts_init(&dopts); dynamic_opts_init(&dopts);
sopts.bump_empty_alloc = true;
sopts.null_out_result_on_error = true; sopts.null_out_result_on_error = true;
sopts.set_errno_on_error = true; sopts.set_errno_on_error = true;
sopts.oom_string = "<jemalloc>: Error in malloc(): out of memory\n"; sopts.oom_string = "<jemalloc>: Error in malloc(): out of memory\n";
...@@ -2036,12 +2287,107 @@ je_malloc(size_t size) { ...@@ -2036,12 +2287,107 @@ je_malloc(size_t size) {
dopts.item_size = size; dopts.item_size = size;
imalloc(&sopts, &dopts); imalloc(&sopts, &dopts);
/*
* Note that this branch gets optimized away -- it immediately follows
* the check on tsd_fast that sets sopts.slow.
*/
if (sopts.slow) {
uintptr_t args[3] = {size};
hook_invoke_alloc(hook_alloc_malloc, ret, (uintptr_t)ret, args);
}
LOG("core.malloc.exit", "result: %p", ret); LOG("core.malloc.exit", "result: %p", ret);
return ret; return ret;
} }
/******************************************************************************/
/*
* Begin malloc(3)-compatible functions.
*/
/*
* malloc() fastpath.
*
* Fastpath assumes size <= SC_LOOKUP_MAXCLASS, and that we hit
* tcache. If either of these is false, we tail-call to the slowpath,
* malloc_default(). Tail-calling is used to avoid any caller-saved
* registers.
*
* fastpath supports ticker and profiling, both of which will also
* tail-call to the slowpath if they fire.
*/
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
je_malloc(size_t size) {
LOG("core.malloc.entry", "size: %zu", size);
if (tsd_get_allocates() && unlikely(!malloc_initialized())) {
return malloc_default(size);
}
tsd_t *tsd = tsd_get(false);
if (unlikely(!tsd || !tsd_fast(tsd) || (size > SC_LOOKUP_MAXCLASS))) {
return malloc_default(size);
}
tcache_t *tcache = tsd_tcachep_get(tsd);
if (unlikely(ticker_trytick(&tcache->gc_ticker))) {
return malloc_default(size);
}
szind_t ind = sz_size2index_lookup(size);
size_t usize;
if (config_stats || config_prof) {
usize = sz_index2size(ind);
}
/* Fast path relies on size being a bin. I.e. SC_LOOKUP_MAXCLASS < SC_SMALL_MAXCLASS */
assert(ind < SC_NBINS);
assert(size <= SC_SMALL_MAXCLASS);
if (config_prof) {
int64_t bytes_until_sample = tsd_bytes_until_sample_get(tsd);
bytes_until_sample -= usize;
tsd_bytes_until_sample_set(tsd, bytes_until_sample);
if (unlikely(bytes_until_sample < 0)) {
/*
* Avoid a prof_active check on the fastpath.
* If prof_active is false, set bytes_until_sample to
* a large value. If prof_active is set to true,
* bytes_until_sample will be reset.
*/
if (!prof_active) {
tsd_bytes_until_sample_set(tsd, SSIZE_MAX);
}
return malloc_default(size);
}
}
cache_bin_t *bin = tcache_small_bin_get(tcache, ind);
bool tcache_success;
void* ret = cache_bin_alloc_easy(bin, &tcache_success);
if (tcache_success) {
if (config_stats) {
*tsd_thread_allocatedp_get(tsd) += usize;
bin->tstats.nrequests++;
}
if (config_prof) {
tcache->prof_accumbytes += usize;
}
LOG("core.malloc.exit", "result: %p", ret);
/* Fastpath success */
return ret;
}
return malloc_default(size);
}
JEMALLOC_EXPORT int JEMALLOC_NOTHROW JEMALLOC_EXPORT int JEMALLOC_NOTHROW
JEMALLOC_ATTR(nonnull(1)) JEMALLOC_ATTR(nonnull(1))
je_posix_memalign(void **memptr, size_t alignment, size_t size) { je_posix_memalign(void **memptr, size_t alignment, size_t size) {
...@@ -2055,7 +2401,7 @@ je_posix_memalign(void **memptr, size_t alignment, size_t size) { ...@@ -2055,7 +2401,7 @@ je_posix_memalign(void **memptr, size_t alignment, size_t size) {
static_opts_init(&sopts); static_opts_init(&sopts);
dynamic_opts_init(&dopts); dynamic_opts_init(&dopts);
sopts.bump_empty_alloc = true; sopts.bump_empty_aligned_alloc = true;
sopts.min_alignment = sizeof(void *); sopts.min_alignment = sizeof(void *);
sopts.oom_string = sopts.oom_string =
"<jemalloc>: Error allocating aligned memory: out of memory\n"; "<jemalloc>: Error allocating aligned memory: out of memory\n";
...@@ -2068,6 +2414,12 @@ je_posix_memalign(void **memptr, size_t alignment, size_t size) { ...@@ -2068,6 +2414,12 @@ je_posix_memalign(void **memptr, size_t alignment, size_t size) {
dopts.alignment = alignment; dopts.alignment = alignment;
ret = imalloc(&sopts, &dopts); ret = imalloc(&sopts, &dopts);
if (sopts.slow) {
uintptr_t args[3] = {(uintptr_t)memptr, (uintptr_t)alignment,
(uintptr_t)size};
hook_invoke_alloc(hook_alloc_posix_memalign, *memptr,
(uintptr_t)ret, args);
}
LOG("core.posix_memalign.exit", "result: %d, alloc ptr: %p", ret, LOG("core.posix_memalign.exit", "result: %d, alloc ptr: %p", ret,
*memptr); *memptr);
...@@ -2090,7 +2442,7 @@ je_aligned_alloc(size_t alignment, size_t size) { ...@@ -2090,7 +2442,7 @@ je_aligned_alloc(size_t alignment, size_t size) {
static_opts_init(&sopts); static_opts_init(&sopts);
dynamic_opts_init(&dopts); dynamic_opts_init(&dopts);
sopts.bump_empty_alloc = true; sopts.bump_empty_aligned_alloc = true;
sopts.null_out_result_on_error = true; sopts.null_out_result_on_error = true;
sopts.set_errno_on_error = true; sopts.set_errno_on_error = true;
sopts.min_alignment = 1; sopts.min_alignment = 1;
...@@ -2105,6 +2457,11 @@ je_aligned_alloc(size_t alignment, size_t size) { ...@@ -2105,6 +2457,11 @@ je_aligned_alloc(size_t alignment, size_t size) {
dopts.alignment = alignment; dopts.alignment = alignment;
imalloc(&sopts, &dopts); imalloc(&sopts, &dopts);
if (sopts.slow) {
uintptr_t args[3] = {(uintptr_t)alignment, (uintptr_t)size};
hook_invoke_alloc(hook_alloc_aligned_alloc, ret,
(uintptr_t)ret, args);
}
LOG("core.aligned_alloc.exit", "result: %p", ret); LOG("core.aligned_alloc.exit", "result: %p", ret);
...@@ -2125,7 +2482,6 @@ je_calloc(size_t num, size_t size) { ...@@ -2125,7 +2482,6 @@ je_calloc(size_t num, size_t size) {
dynamic_opts_init(&dopts); dynamic_opts_init(&dopts);
sopts.may_overflow = true; sopts.may_overflow = true;
sopts.bump_empty_alloc = true;
sopts.null_out_result_on_error = true; sopts.null_out_result_on_error = true;
sopts.set_errno_on_error = true; sopts.set_errno_on_error = true;
sopts.oom_string = "<jemalloc>: Error in calloc(): out of memory\n"; sopts.oom_string = "<jemalloc>: Error in calloc(): out of memory\n";
...@@ -2136,6 +2492,10 @@ je_calloc(size_t num, size_t size) { ...@@ -2136,6 +2492,10 @@ je_calloc(size_t num, size_t size) {
dopts.zero = true; dopts.zero = true;
imalloc(&sopts, &dopts); imalloc(&sopts, &dopts);
if (sopts.slow) {
uintptr_t args[3] = {(uintptr_t)num, (uintptr_t)size};
hook_invoke_alloc(hook_alloc_calloc, ret, (uintptr_t)ret, args);
}
LOG("core.calloc.exit", "result: %p", ret); LOG("core.calloc.exit", "result: %p", ret);
...@@ -2144,20 +2504,22 @@ je_calloc(size_t num, size_t size) { ...@@ -2144,20 +2504,22 @@ je_calloc(size_t num, size_t size) {
static void * static void *
irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
prof_tctx_t *tctx) { prof_tctx_t *tctx, hook_ralloc_args_t *hook_args) {
void *p; void *p;
if (tctx == NULL) { if (tctx == NULL) {
return NULL; return NULL;
} }
if (usize <= SMALL_MAXCLASS) { if (usize <= SC_SMALL_MAXCLASS) {
p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false); p = iralloc(tsd, old_ptr, old_usize,
SC_LARGE_MINCLASS, 0, false, hook_args);
if (p == NULL) { if (p == NULL) {
return NULL; return NULL;
} }
arena_prof_promote(tsd_tsdn(tsd), p, usize); arena_prof_promote(tsd_tsdn(tsd), p, usize);
} else { } else {
p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); p = iralloc(tsd, old_ptr, old_usize, usize, 0, false,
hook_args);
} }
return p; return p;
...@@ -2165,7 +2527,7 @@ irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, ...@@ -2165,7 +2527,7 @@ irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
JEMALLOC_ALWAYS_INLINE void * JEMALLOC_ALWAYS_INLINE void *
irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
alloc_ctx_t *alloc_ctx) { alloc_ctx_t *alloc_ctx, hook_ralloc_args_t *hook_args) {
void *p; void *p;
bool prof_active; bool prof_active;
prof_tctx_t *old_tctx, *tctx; prof_tctx_t *old_tctx, *tctx;
...@@ -2174,9 +2536,11 @@ irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, ...@@ -2174,9 +2536,11 @@ irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx); old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx);
tctx = prof_alloc_prep(tsd, usize, prof_active, true); tctx = prof_alloc_prep(tsd, usize, prof_active, true);
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx); p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx,
hook_args);
} else { } else {
p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); p = iralloc(tsd, old_ptr, old_usize, usize, 0, false,
hook_args);
} }
if (unlikely(p == NULL)) { if (unlikely(p == NULL)) {
prof_alloc_rollback(tsd, tctx, true); prof_alloc_rollback(tsd, tctx, true);
...@@ -2205,7 +2569,7 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) { ...@@ -2205,7 +2569,7 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
assert(alloc_ctx.szind != NSIZES); assert(alloc_ctx.szind != SC_NSIZES);
size_t usize; size_t usize;
if (config_prof && opt_prof) { if (config_prof && opt_prof) {
...@@ -2286,11 +2650,12 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) { ...@@ -2286,11 +2650,12 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW * void JEMALLOC_NOTHROW *
JEMALLOC_ALLOC_SIZE(2) JEMALLOC_ALLOC_SIZE(2)
je_realloc(void *ptr, size_t size) { je_realloc(void *ptr, size_t arg_size) {
void *ret; void *ret;
tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL); tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL);
size_t usize JEMALLOC_CC_SILENCE_INIT(0); size_t usize JEMALLOC_CC_SILENCE_INIT(0);
size_t old_usize = 0; size_t old_usize = 0;
size_t size = arg_size;
LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size); LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size);
...@@ -2305,6 +2670,10 @@ je_realloc(void *ptr, size_t size) { ...@@ -2305,6 +2670,10 @@ je_realloc(void *ptr, size_t size) {
} else { } else {
tcache = NULL; tcache = NULL;
} }
uintptr_t args[3] = {(uintptr_t)ptr, size};
hook_invoke_dalloc(hook_dalloc_realloc, ptr, args);
ifree(tsd, ptr, tcache, true); ifree(tsd, ptr, tcache, true);
LOG("core.realloc.exit", "result: %p", NULL); LOG("core.realloc.exit", "result: %p", NULL);
...@@ -2319,29 +2688,58 @@ je_realloc(void *ptr, size_t size) { ...@@ -2319,29 +2688,58 @@ je_realloc(void *ptr, size_t size) {
check_entry_exit_locking(tsd_tsdn(tsd)); check_entry_exit_locking(tsd_tsdn(tsd));
hook_ralloc_args_t hook_args = {true, {(uintptr_t)ptr,
(uintptr_t)arg_size, 0, 0}};
alloc_ctx_t alloc_ctx; alloc_ctx_t alloc_ctx;
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
assert(alloc_ctx.szind != NSIZES); assert(alloc_ctx.szind != SC_NSIZES);
old_usize = sz_index2size(alloc_ctx.szind); old_usize = sz_index2size(alloc_ctx.szind);
assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
if (config_prof && opt_prof) { if (config_prof && opt_prof) {
usize = sz_s2u(size); usize = sz_s2u(size);
ret = unlikely(usize == 0 || usize > LARGE_MAXCLASS) ? if (unlikely(usize == 0
NULL : irealloc_prof(tsd, ptr, old_usize, usize, || usize > SC_LARGE_MAXCLASS)) {
&alloc_ctx); ret = NULL;
} else {
ret = irealloc_prof(tsd, ptr, old_usize, usize,
&alloc_ctx, &hook_args);
}
} else { } else {
if (config_stats) { if (config_stats) {
usize = sz_s2u(size); usize = sz_s2u(size);
} }
ret = iralloc(tsd, ptr, old_usize, size, 0, false); ret = iralloc(tsd, ptr, old_usize, size, 0, false,
&hook_args);
} }
tsdn = tsd_tsdn(tsd); tsdn = tsd_tsdn(tsd);
} else { } else {
/* realloc(NULL, size) is equivalent to malloc(size). */ /* realloc(NULL, size) is equivalent to malloc(size). */
void *ret = je_malloc(size); static_opts_t sopts;
LOG("core.realloc.exit", "result: %p", ret); dynamic_opts_t dopts;
static_opts_init(&sopts);
dynamic_opts_init(&dopts);
sopts.null_out_result_on_error = true;
sopts.set_errno_on_error = true;
sopts.oom_string =
"<jemalloc>: Error in realloc(): out of memory\n";
dopts.result = &ret;
dopts.num_items = 1;
dopts.item_size = size;
imalloc(&sopts, &dopts);
if (sopts.slow) {
uintptr_t args[3] = {(uintptr_t)ptr, arg_size};
hook_invoke_alloc(hook_alloc_realloc, ret,
(uintptr_t)ret, args);
}
return ret; return ret;
} }
...@@ -2368,10 +2766,9 @@ je_realloc(void *ptr, size_t size) { ...@@ -2368,10 +2766,9 @@ je_realloc(void *ptr, size_t size) {
return ret; return ret;
} }
JEMALLOC_EXPORT void JEMALLOC_NOTHROW JEMALLOC_NOINLINE
je_free(void *ptr) { void
LOG("core.free.entry", "ptr: %p", ptr); free_default(void *ptr) {
UTRACE(ptr, 0, 0); UTRACE(ptr, 0, 0);
if (likely(ptr != NULL)) { if (likely(ptr != NULL)) {
/* /*
...@@ -2397,10 +2794,79 @@ je_free(void *ptr) { ...@@ -2397,10 +2794,79 @@ je_free(void *ptr) {
} else { } else {
tcache = NULL; tcache = NULL;
} }
uintptr_t args_raw[3] = {(uintptr_t)ptr};
hook_invoke_dalloc(hook_dalloc_free, ptr, args_raw);
ifree(tsd, ptr, tcache, true); ifree(tsd, ptr, tcache, true);
} }
check_entry_exit_locking(tsd_tsdn(tsd)); check_entry_exit_locking(tsd_tsdn(tsd));
} }
}
JEMALLOC_ALWAYS_INLINE
bool free_fastpath(void *ptr, size_t size, bool size_hint) {
tsd_t *tsd = tsd_get(false);
if (unlikely(!tsd || !tsd_fast(tsd))) {
return false;
}
tcache_t *tcache = tsd_tcachep_get(tsd);
alloc_ctx_t alloc_ctx;
/*
* If !config_cache_oblivious, we can check PAGE alignment to
* detect sampled objects. Otherwise addresses are
* randomized, and we have to look it up in the rtree anyway.
* See also isfree().
*/
if (!size_hint || config_cache_oblivious) {
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
bool res = rtree_szind_slab_read_fast(tsd_tsdn(tsd), &extents_rtree,
rtree_ctx, (uintptr_t)ptr,
&alloc_ctx.szind, &alloc_ctx.slab);
/* Note: profiled objects will have alloc_ctx.slab set */
if (!res || !alloc_ctx.slab) {
return false;
}
assert(alloc_ctx.szind != SC_NSIZES);
} else {
/*
* Check for both sizes that are too large, and for sampled objects.
* Sampled objects are always page-aligned. The sampled object check
* will also check for null ptr.
*/
if (size > SC_LOOKUP_MAXCLASS || (((uintptr_t)ptr & PAGE_MASK) == 0)) {
return false;
}
alloc_ctx.szind = sz_size2index_lookup(size);
}
if (unlikely(ticker_trytick(&tcache->gc_ticker))) {
return false;
}
cache_bin_t *bin = tcache_small_bin_get(tcache, alloc_ctx.szind);
cache_bin_info_t *bin_info = &tcache_bin_info[alloc_ctx.szind];
if (!cache_bin_dalloc_easy(bin, bin_info, ptr)) {
return false;
}
if (config_stats) {
size_t usize = sz_index2size(alloc_ctx.szind);
*tsd_thread_deallocatedp_get(tsd) += usize;
}
return true;
}
JEMALLOC_EXPORT void JEMALLOC_NOTHROW
je_free(void *ptr) {
LOG("core.free.entry", "ptr: %p", ptr);
if (!free_fastpath(ptr, 0, false)) {
free_default(ptr);
}
LOG("core.free.exit", ""); LOG("core.free.exit", "");
} }
...@@ -2427,7 +2893,6 @@ je_memalign(size_t alignment, size_t size) { ...@@ -2427,7 +2893,6 @@ je_memalign(size_t alignment, size_t size) {
static_opts_init(&sopts); static_opts_init(&sopts);
dynamic_opts_init(&dopts); dynamic_opts_init(&dopts);
sopts.bump_empty_alloc = true;
sopts.min_alignment = 1; sopts.min_alignment = 1;
sopts.oom_string = sopts.oom_string =
"<jemalloc>: Error allocating aligned memory: out of memory\n"; "<jemalloc>: Error allocating aligned memory: out of memory\n";
...@@ -2441,6 +2906,11 @@ je_memalign(size_t alignment, size_t size) { ...@@ -2441,6 +2906,11 @@ je_memalign(size_t alignment, size_t size) {
dopts.alignment = alignment; dopts.alignment = alignment;
imalloc(&sopts, &dopts); imalloc(&sopts, &dopts);
if (sopts.slow) {
uintptr_t args[3] = {alignment, size};
hook_invoke_alloc(hook_alloc_memalign, ret, (uintptr_t)ret,
args);
}
LOG("core.memalign.exit", "result: %p", ret); LOG("core.memalign.exit", "result: %p", ret);
return ret; return ret;
...@@ -2462,7 +2932,6 @@ je_valloc(size_t size) { ...@@ -2462,7 +2932,6 @@ je_valloc(size_t size) {
static_opts_init(&sopts); static_opts_init(&sopts);
dynamic_opts_init(&dopts); dynamic_opts_init(&dopts);
sopts.bump_empty_alloc = true;
sopts.null_out_result_on_error = true; sopts.null_out_result_on_error = true;
sopts.min_alignment = PAGE; sopts.min_alignment = PAGE;
sopts.oom_string = sopts.oom_string =
...@@ -2476,6 +2945,10 @@ je_valloc(size_t size) { ...@@ -2476,6 +2945,10 @@ je_valloc(size_t size) {
dopts.alignment = PAGE; dopts.alignment = PAGE;
imalloc(&sopts, &dopts); imalloc(&sopts, &dopts);
if (sopts.slow) {
uintptr_t args[3] = {size};
hook_invoke_alloc(hook_alloc_valloc, ret, (uintptr_t)ret, args);
}
LOG("core.valloc.exit", "result: %p\n", ret); LOG("core.valloc.exit", "result: %p\n", ret);
return ret; return ret;
...@@ -2543,6 +3016,82 @@ int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign); ...@@ -2543,6 +3016,82 @@ int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign);
* Begin non-standard functions. * Begin non-standard functions.
*/ */
#ifdef JEMALLOC_EXPERIMENTAL_SMALLOCX_API
#define JEMALLOC_SMALLOCX_CONCAT_HELPER(x, y) x ## y
#define JEMALLOC_SMALLOCX_CONCAT_HELPER2(x, y) \
JEMALLOC_SMALLOCX_CONCAT_HELPER(x, y)
typedef struct {
void *ptr;
size_t size;
} smallocx_return_t;
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
smallocx_return_t JEMALLOC_NOTHROW
/*
* The attribute JEMALLOC_ATTR(malloc) cannot be used due to:
* - https://gcc.gnu.org/bugzilla/show_bug.cgi?id=86488
*/
JEMALLOC_SMALLOCX_CONCAT_HELPER2(je_smallocx_, JEMALLOC_VERSION_GID_IDENT)
(size_t size, int flags) {
/*
* Note: the attribute JEMALLOC_ALLOC_SIZE(1) cannot be
* used here because it makes writing beyond the `size`
* of the `ptr` undefined behavior, but the objective
* of this function is to allow writing beyond `size`
* up to `smallocx_return_t::size`.
*/
smallocx_return_t ret;
static_opts_t sopts;
dynamic_opts_t dopts;
LOG("core.smallocx.entry", "size: %zu, flags: %d", size, flags);
static_opts_init(&sopts);
dynamic_opts_init(&dopts);
sopts.assert_nonempty_alloc = true;
sopts.null_out_result_on_error = true;
sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n";
sopts.usize = true;
dopts.result = &ret.ptr;
dopts.num_items = 1;
dopts.item_size = size;
if (unlikely(flags != 0)) {
if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) {
dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
}
dopts.zero = MALLOCX_ZERO_GET(flags);
if ((flags & MALLOCX_TCACHE_MASK) != 0) {
if ((flags & MALLOCX_TCACHE_MASK)
== MALLOCX_TCACHE_NONE) {
dopts.tcache_ind = TCACHE_IND_NONE;
} else {
dopts.tcache_ind = MALLOCX_TCACHE_GET(flags);
}
} else {
dopts.tcache_ind = TCACHE_IND_AUTOMATIC;
}
if ((flags & MALLOCX_ARENA_MASK) != 0)
dopts.arena_ind = MALLOCX_ARENA_GET(flags);
}
imalloc(&sopts, &dopts);
assert(dopts.usize == je_nallocx(size, flags));
ret.size = dopts.usize;
LOG("core.smallocx.exit", "result: %p, size: %zu", ret.ptr, ret.size);
return ret;
}
#undef JEMALLOC_SMALLOCX_CONCAT_HELPER
#undef JEMALLOC_SMALLOCX_CONCAT_HELPER2
#endif
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW * void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
...@@ -2586,6 +3135,11 @@ je_mallocx(size_t size, int flags) { ...@@ -2586,6 +3135,11 @@ je_mallocx(size_t size, int flags) {
} }
imalloc(&sopts, &dopts); imalloc(&sopts, &dopts);
if (sopts.slow) {
uintptr_t args[3] = {size, flags};
hook_invoke_alloc(hook_alloc_mallocx, ret, (uintptr_t)ret,
args);
}
LOG("core.mallocx.exit", "result: %p", ret); LOG("core.mallocx.exit", "result: %p", ret);
return ret; return ret;
...@@ -2594,22 +3148,23 @@ je_mallocx(size_t size, int flags) { ...@@ -2594,22 +3148,23 @@ je_mallocx(size_t size, int flags) {
static void * static void *
irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize, irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize,
size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena, size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
prof_tctx_t *tctx) { prof_tctx_t *tctx, hook_ralloc_args_t *hook_args) {
void *p; void *p;
if (tctx == NULL) { if (tctx == NULL) {
return NULL; return NULL;
} }
if (usize <= SMALL_MAXCLASS) { if (usize <= SC_SMALL_MAXCLASS) {
p = iralloct(tsdn, old_ptr, old_usize, LARGE_MINCLASS, p = iralloct(tsdn, old_ptr, old_usize,
alignment, zero, tcache, arena); SC_LARGE_MINCLASS, alignment, zero, tcache,
arena, hook_args);
if (p == NULL) { if (p == NULL) {
return NULL; return NULL;
} }
arena_prof_promote(tsdn, p, usize); arena_prof_promote(tsdn, p, usize);
} else { } else {
p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero, p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero,
tcache, arena); tcache, arena, hook_args);
} }
return p; return p;
...@@ -2618,7 +3173,7 @@ irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize, ...@@ -2618,7 +3173,7 @@ irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize,
JEMALLOC_ALWAYS_INLINE void * JEMALLOC_ALWAYS_INLINE void *
irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
size_t alignment, size_t *usize, bool zero, tcache_t *tcache, size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
arena_t *arena, alloc_ctx_t *alloc_ctx) { arena_t *arena, alloc_ctx_t *alloc_ctx, hook_ralloc_args_t *hook_args) {
void *p; void *p;
bool prof_active; bool prof_active;
prof_tctx_t *old_tctx, *tctx; prof_tctx_t *old_tctx, *tctx;
...@@ -2628,10 +3183,10 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, ...@@ -2628,10 +3183,10 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
tctx = prof_alloc_prep(tsd, *usize, prof_active, false); tctx = prof_alloc_prep(tsd, *usize, prof_active, false);
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize, p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize,
*usize, alignment, zero, tcache, arena, tctx); *usize, alignment, zero, tcache, arena, tctx, hook_args);
} else { } else {
p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment, p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment,
zero, tcache, arena); zero, tcache, arena, hook_args);
} }
if (unlikely(p == NULL)) { if (unlikely(p == NULL)) {
prof_alloc_rollback(tsd, tctx, false); prof_alloc_rollback(tsd, tctx, false);
...@@ -2702,23 +3257,27 @@ je_rallocx(void *ptr, size_t size, int flags) { ...@@ -2702,23 +3257,27 @@ je_rallocx(void *ptr, size_t size, int flags) {
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
assert(alloc_ctx.szind != NSIZES); assert(alloc_ctx.szind != SC_NSIZES);
old_usize = sz_index2size(alloc_ctx.szind); old_usize = sz_index2size(alloc_ctx.szind);
assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
hook_ralloc_args_t hook_args = {false, {(uintptr_t)ptr, size, flags,
0}};
if (config_prof && opt_prof) { if (config_prof && opt_prof) {
usize = (alignment == 0) ? usize = (alignment == 0) ?
sz_s2u(size) : sz_sa2u(size, alignment); sz_s2u(size) : sz_sa2u(size, alignment);
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { if (unlikely(usize == 0
|| usize > SC_LARGE_MAXCLASS)) {
goto label_oom; goto label_oom;
} }
p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize, p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
zero, tcache, arena, &alloc_ctx); zero, tcache, arena, &alloc_ctx, &hook_args);
if (unlikely(p == NULL)) { if (unlikely(p == NULL)) {
goto label_oom; goto label_oom;
} }
} else { } else {
p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment, p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment,
zero, tcache, arena); zero, tcache, arena, &hook_args);
if (unlikely(p == NULL)) { if (unlikely(p == NULL)) {
goto label_oom; goto label_oom;
} }
...@@ -2752,14 +3311,14 @@ label_oom: ...@@ -2752,14 +3311,14 @@ label_oom:
JEMALLOC_ALWAYS_INLINE size_t JEMALLOC_ALWAYS_INLINE size_t
ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
size_t extra, size_t alignment, bool zero) { size_t extra, size_t alignment, bool zero) {
size_t usize; size_t newsize;
if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero)) { if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero,
&newsize)) {
return old_usize; return old_usize;
} }
usize = isalloc(tsdn, ptr);
return usize; return newsize;
} }
static size_t static size_t
...@@ -2793,17 +3352,19 @@ ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, ...@@ -2793,17 +3352,19 @@ ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
*/ */
if (alignment == 0) { if (alignment == 0) {
usize_max = sz_s2u(size+extra); usize_max = sz_s2u(size+extra);
assert(usize_max > 0 && usize_max <= LARGE_MAXCLASS); assert(usize_max > 0
&& usize_max <= SC_LARGE_MAXCLASS);
} else { } else {
usize_max = sz_sa2u(size+extra, alignment); usize_max = sz_sa2u(size+extra, alignment);
if (unlikely(usize_max == 0 || usize_max > LARGE_MAXCLASS)) { if (unlikely(usize_max == 0
|| usize_max > SC_LARGE_MAXCLASS)) {
/* /*
* usize_max is out of range, and chances are that * usize_max is out of range, and chances are that
* allocation will fail, but use the maximum possible * allocation will fail, but use the maximum possible
* value and carry on with prof_alloc_prep(), just in * value and carry on with prof_alloc_prep(), just in
* case allocation succeeds. * case allocation succeeds.
*/ */
usize_max = LARGE_MAXCLASS; usize_max = SC_LARGE_MAXCLASS;
} }
} }
tctx = prof_alloc_prep(tsd, usize_max, prof_active, false); tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
...@@ -2846,24 +3407,24 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) { ...@@ -2846,24 +3407,24 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
assert(alloc_ctx.szind != NSIZES); assert(alloc_ctx.szind != SC_NSIZES);
old_usize = sz_index2size(alloc_ctx.szind); old_usize = sz_index2size(alloc_ctx.szind);
assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
/* /*
* The API explicitly absolves itself of protecting against (size + * The API explicitly absolves itself of protecting against (size +
* extra) numerical overflow, but we may need to clamp extra to avoid * extra) numerical overflow, but we may need to clamp extra to avoid
* exceeding LARGE_MAXCLASS. * exceeding SC_LARGE_MAXCLASS.
* *
* Ordinarily, size limit checking is handled deeper down, but here we * Ordinarily, size limit checking is handled deeper down, but here we
* have to check as part of (size + extra) clamping, since we need the * have to check as part of (size + extra) clamping, since we need the
* clamped value in the above helper functions. * clamped value in the above helper functions.
*/ */
if (unlikely(size > LARGE_MAXCLASS)) { if (unlikely(size > SC_LARGE_MAXCLASS)) {
usize = old_usize; usize = old_usize;
goto label_not_resized; goto label_not_resized;
} }
if (unlikely(LARGE_MAXCLASS - size < extra)) { if (unlikely(SC_LARGE_MAXCLASS - size < extra)) {
extra = LARGE_MAXCLASS - size; extra = SC_LARGE_MAXCLASS - size;
} }
if (config_prof && opt_prof) { if (config_prof && opt_prof) {
...@@ -2882,6 +3443,12 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) { ...@@ -2882,6 +3443,12 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
*tsd_thread_deallocatedp_get(tsd) += old_usize; *tsd_thread_deallocatedp_get(tsd) += old_usize;
} }
label_not_resized: label_not_resized:
if (unlikely(!tsd_fast(tsd))) {
uintptr_t args[4] = {(uintptr_t)ptr, size, extra, flags};
hook_invoke_expand(hook_expand_xallocx, ptr, old_usize,
usize, (uintptr_t)usize, args);
}
UTRACE(ptr, size, ptr); UTRACE(ptr, size, ptr);
check_entry_exit_locking(tsd_tsdn(tsd)); check_entry_exit_locking(tsd_tsdn(tsd));
...@@ -2891,7 +3458,7 @@ label_not_resized: ...@@ -2891,7 +3458,7 @@ label_not_resized:
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
JEMALLOC_ATTR(pure) JEMALLOC_ATTR(pure)
je_sallocx(const void *ptr, UNUSED int flags) { je_sallocx(const void *ptr, int flags) {
size_t usize; size_t usize;
tsdn_t *tsdn; tsdn_t *tsdn;
...@@ -2954,6 +3521,8 @@ je_dallocx(void *ptr, int flags) { ...@@ -2954,6 +3521,8 @@ je_dallocx(void *ptr, int flags) {
tsd_assert_fast(tsd); tsd_assert_fast(tsd);
ifree(tsd, ptr, tcache, false); ifree(tsd, ptr, tcache, false);
} else { } else {
uintptr_t args_raw[3] = {(uintptr_t)ptr, flags};
hook_invoke_dalloc(hook_dalloc_dallocx, ptr, args_raw);
ifree(tsd, ptr, tcache, true); ifree(tsd, ptr, tcache, true);
} }
check_entry_exit_locking(tsd_tsdn(tsd)); check_entry_exit_locking(tsd_tsdn(tsd));
...@@ -2975,14 +3544,11 @@ inallocx(tsdn_t *tsdn, size_t size, int flags) { ...@@ -2975,14 +3544,11 @@ inallocx(tsdn_t *tsdn, size_t size, int flags) {
return usize; return usize;
} }
JEMALLOC_EXPORT void JEMALLOC_NOTHROW JEMALLOC_NOINLINE void
je_sdallocx(void *ptr, size_t size, int flags) { sdallocx_default(void *ptr, size_t size, int flags) {
assert(ptr != NULL); assert(ptr != NULL);
assert(malloc_initialized() || IS_INITIALIZER); assert(malloc_initialized() || IS_INITIALIZER);
LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
size, flags);
tsd_t *tsd = tsd_fetch(); tsd_t *tsd = tsd_fetch();
bool fast = tsd_fast(tsd); bool fast = tsd_fast(tsd);
size_t usize = inallocx(tsd_tsdn(tsd), size, flags); size_t usize = inallocx(tsd_tsdn(tsd), size, flags);
...@@ -3016,10 +3582,35 @@ je_sdallocx(void *ptr, size_t size, int flags) { ...@@ -3016,10 +3582,35 @@ je_sdallocx(void *ptr, size_t size, int flags) {
tsd_assert_fast(tsd); tsd_assert_fast(tsd);
isfree(tsd, ptr, usize, tcache, false); isfree(tsd, ptr, usize, tcache, false);
} else { } else {
uintptr_t args_raw[3] = {(uintptr_t)ptr, size, flags};
hook_invoke_dalloc(hook_dalloc_sdallocx, ptr, args_raw);
isfree(tsd, ptr, usize, tcache, true); isfree(tsd, ptr, usize, tcache, true);
} }
check_entry_exit_locking(tsd_tsdn(tsd)); check_entry_exit_locking(tsd_tsdn(tsd));
}
JEMALLOC_EXPORT void JEMALLOC_NOTHROW
je_sdallocx(void *ptr, size_t size, int flags) {
LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
size, flags);
if (flags !=0 || !free_fastpath(ptr, size, true)) {
sdallocx_default(ptr, size, flags);
}
LOG("core.sdallocx.exit", "");
}
void JEMALLOC_NOTHROW
je_sdallocx_noflags(void *ptr, size_t size) {
LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: 0", ptr,
size);
if (!free_fastpath(ptr, size, true)) {
sdallocx_default(ptr, size, 0);
}
LOG("core.sdallocx.exit", ""); LOG("core.sdallocx.exit", "");
} }
...@@ -3040,7 +3631,7 @@ je_nallocx(size_t size, int flags) { ...@@ -3040,7 +3631,7 @@ je_nallocx(size_t size, int flags) {
check_entry_exit_locking(tsdn); check_entry_exit_locking(tsdn);
usize = inallocx(tsdn, size, flags); usize = inallocx(tsdn, size, flags);
if (unlikely(usize > LARGE_MAXCLASS)) { if (unlikely(usize > SC_LARGE_MAXCLASS)) {
LOG("core.nallocx.exit", "result: %zu", ZU(0)); LOG("core.nallocx.exit", "result: %zu", ZU(0));
return 0; return 0;
} }
...@@ -3256,6 +3847,7 @@ _malloc_prefork(void) ...@@ -3256,6 +3847,7 @@ _malloc_prefork(void)
} }
} }
prof_prefork1(tsd_tsdn(tsd)); prof_prefork1(tsd_tsdn(tsd));
tsd_prefork(tsd);
} }
#ifndef JEMALLOC_MUTEX_INIT_CB #ifndef JEMALLOC_MUTEX_INIT_CB
...@@ -3278,6 +3870,8 @@ _malloc_postfork(void) ...@@ -3278,6 +3870,8 @@ _malloc_postfork(void)
tsd = tsd_fetch(); tsd = tsd_fetch();
tsd_postfork_parent(tsd);
witness_postfork_parent(tsd_witness_tsdp_get(tsd)); witness_postfork_parent(tsd_witness_tsdp_get(tsd));
/* Release all mutexes, now that fork() has completed. */ /* Release all mutexes, now that fork() has completed. */
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
...@@ -3305,6 +3899,8 @@ jemalloc_postfork_child(void) { ...@@ -3305,6 +3899,8 @@ jemalloc_postfork_child(void) {
tsd = tsd_fetch(); tsd = tsd_fetch();
tsd_postfork_child(tsd);
witness_postfork_child(tsd_witness_tsdp_get(tsd)); witness_postfork_child(tsd_witness_tsdp_get(tsd));
/* Release all mutexes, now that fork() has completed. */ /* Release all mutexes, now that fork() has completed. */
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
......
...@@ -128,14 +128,14 @@ operator delete(void *ptr, std::size_t size) noexcept { ...@@ -128,14 +128,14 @@ operator delete(void *ptr, std::size_t size) noexcept {
if (unlikely(ptr == nullptr)) { if (unlikely(ptr == nullptr)) {
return; return;
} }
je_sdallocx(ptr, size, /*flags=*/0); je_sdallocx_noflags(ptr, size);
} }
void operator delete[](void *ptr, std::size_t size) noexcept { void operator delete[](void *ptr, std::size_t size) noexcept {
if (unlikely(ptr == nullptr)) { if (unlikely(ptr == nullptr)) {
return; return;
} }
je_sdallocx(ptr, size, /*flags=*/0); je_sdallocx_noflags(ptr, size);
} }
#endif // __cpp_sized_deallocation #endif // __cpp_sized_deallocation
...@@ -28,7 +28,7 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, ...@@ -28,7 +28,7 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
assert(!tsdn_null(tsdn) || arena != NULL); assert(!tsdn_null(tsdn) || arena != NULL);
ausize = sz_sa2u(usize, alignment); ausize = sz_sa2u(usize, alignment);
if (unlikely(ausize == 0 || ausize > LARGE_MAXCLASS)) { if (unlikely(ausize == 0 || ausize > SC_LARGE_MAXCLASS)) {
return NULL; return NULL;
} }
...@@ -42,7 +42,7 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, ...@@ -42,7 +42,7 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
*/ */
is_zeroed = zero; is_zeroed = zero;
if (likely(!tsdn_null(tsdn))) { if (likely(!tsdn_null(tsdn))) {
arena = arena_choose(tsdn_tsd(tsdn), arena); arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, usize);
} }
if (unlikely(arena == NULL) || (extent = arena_extent_alloc_large(tsdn, if (unlikely(arena == NULL) || (extent = arena_extent_alloc_large(tsdn,
arena, usize, alignment, &is_zeroed)) == NULL) { arena, usize, alignment, &is_zeroed)) == NULL) {
...@@ -109,7 +109,7 @@ large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) { ...@@ -109,7 +109,7 @@ large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) {
if (diff != 0) { if (diff != 0) {
extent_t *trail = extent_split_wrapper(tsdn, arena, extent_t *trail = extent_split_wrapper(tsdn, arena,
&extent_hooks, extent, usize + sz_large_pad, &extent_hooks, extent, usize + sz_large_pad,
sz_size2index(usize), false, diff, NSIZES, false); sz_size2index(usize), false, diff, SC_NSIZES, false);
if (trail == NULL) { if (trail == NULL) {
return true; return true;
} }
...@@ -154,17 +154,17 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize, ...@@ -154,17 +154,17 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
bool new_mapping; bool new_mapping;
if ((trail = extents_alloc(tsdn, arena, &extent_hooks, if ((trail = extents_alloc(tsdn, arena, &extent_hooks,
&arena->extents_dirty, extent_past_get(extent), trailsize, 0, &arena->extents_dirty, extent_past_get(extent), trailsize, 0,
CACHELINE, false, NSIZES, &is_zeroed_trail, &commit)) != NULL CACHELINE, false, SC_NSIZES, &is_zeroed_trail, &commit)) != NULL
|| (trail = extents_alloc(tsdn, arena, &extent_hooks, || (trail = extents_alloc(tsdn, arena, &extent_hooks,
&arena->extents_muzzy, extent_past_get(extent), trailsize, 0, &arena->extents_muzzy, extent_past_get(extent), trailsize, 0,
CACHELINE, false, NSIZES, &is_zeroed_trail, &commit)) != NULL) { CACHELINE, false, SC_NSIZES, &is_zeroed_trail, &commit)) != NULL) {
if (config_stats) { if (config_stats) {
new_mapping = false; new_mapping = false;
} }
} else { } else {
if ((trail = extent_alloc_wrapper(tsdn, arena, &extent_hooks, if ((trail = extent_alloc_wrapper(tsdn, arena, &extent_hooks,
extent_past_get(extent), trailsize, 0, CACHELINE, false, extent_past_get(extent), trailsize, 0, CACHELINE, false,
NSIZES, &is_zeroed_trail, &commit)) == NULL) { SC_NSIZES, &is_zeroed_trail, &commit)) == NULL) {
return true; return true;
} }
if (config_stats) { if (config_stats) {
...@@ -221,9 +221,10 @@ large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min, ...@@ -221,9 +221,10 @@ large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
size_t oldusize = extent_usize_get(extent); size_t oldusize = extent_usize_get(extent);
/* The following should have been caught by callers. */ /* The following should have been caught by callers. */
assert(usize_min > 0 && usize_max <= LARGE_MAXCLASS); assert(usize_min > 0 && usize_max <= SC_LARGE_MAXCLASS);
/* Both allocation sizes must be large to avoid a move. */ /* Both allocation sizes must be large to avoid a move. */
assert(oldusize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS); assert(oldusize >= SC_LARGE_MINCLASS
&& usize_max >= SC_LARGE_MINCLASS);
if (usize_max > oldusize) { if (usize_max > oldusize) {
/* Attempt to expand the allocation in-place. */ /* Attempt to expand the allocation in-place. */
...@@ -270,17 +271,23 @@ large_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, ...@@ -270,17 +271,23 @@ large_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
} }
void * void *
large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize, large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
size_t alignment, bool zero, tcache_t *tcache) { size_t alignment, bool zero, tcache_t *tcache,
size_t oldusize = extent_usize_get(extent); hook_ralloc_args_t *hook_args) {
extent_t *extent = iealloc(tsdn, ptr);
size_t oldusize = extent_usize_get(extent);
/* The following should have been caught by callers. */ /* The following should have been caught by callers. */
assert(usize > 0 && usize <= LARGE_MAXCLASS); assert(usize > 0 && usize <= SC_LARGE_MAXCLASS);
/* Both allocation sizes must be large to avoid a move. */ /* Both allocation sizes must be large to avoid a move. */
assert(oldusize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS); assert(oldusize >= SC_LARGE_MINCLASS
&& usize >= SC_LARGE_MINCLASS);
/* Try to avoid moving the allocation. */ /* Try to avoid moving the allocation. */
if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero)) { if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero)) {
hook_invoke_expand(hook_args->is_realloc
? hook_expand_realloc : hook_expand_rallocx, ptr, oldusize,
usize, (uintptr_t)ptr, hook_args->args);
return extent_addr_get(extent); return extent_addr_get(extent);
} }
...@@ -295,6 +302,12 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize, ...@@ -295,6 +302,12 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize,
return NULL; return NULL;
} }
hook_invoke_alloc(hook_args->is_realloc
? hook_alloc_realloc : hook_alloc_rallocx, ret, (uintptr_t)ret,
hook_args->args);
hook_invoke_dalloc(hook_args->is_realloc
? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args);
size_t copysize = (usize < oldusize) ? usize : oldusize; size_t copysize = (usize < oldusize) ? usize : oldusize;
memcpy(ret, extent_addr_get(extent), copysize); memcpy(ret, extent_addr_get(extent), copysize);
isdalloct(tsdn, extent_addr_get(extent), oldusize, tcache, NULL, true); isdalloct(tsdn, extent_addr_get(extent), oldusize, tcache, NULL, true);
...@@ -318,8 +331,9 @@ large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent, ...@@ -318,8 +331,9 @@ large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
large_dalloc_maybe_junk(extent_addr_get(extent), large_dalloc_maybe_junk(extent_addr_get(extent),
extent_usize_get(extent)); extent_usize_get(extent));
} else { } else {
malloc_mutex_assert_owner(tsdn, &arena->large_mtx); /* Only hold the large_mtx if necessary. */
if (!arena_is_auto(arena)) { if (!arena_is_auto(arena)) {
malloc_mutex_assert_owner(tsdn, &arena->large_mtx);
extent_list_remove(&arena->large, extent); extent_list_remove(&arena->large, extent);
} }
} }
...@@ -369,3 +383,13 @@ void ...@@ -369,3 +383,13 @@ void
large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent) { large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent) {
large_prof_tctx_set(tsdn, extent, (prof_tctx_t *)(uintptr_t)1U); large_prof_tctx_set(tsdn, extent, (prof_tctx_t *)(uintptr_t)1U);
} }
nstime_t
large_prof_alloc_time_get(const extent_t *extent) {
return extent_prof_alloc_time_get(extent);
}
void
large_prof_alloc_time_set(extent_t *extent, nstime_t t) {
extent_prof_alloc_time_set(extent, t);
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment