Unverified Commit c4b4b6c0 authored by Oran Agra's avatar Oran Agra Committed by GitHub
Browse files

Merge pull request #9623 from yoav-steinberg/upgrade_jemalloc_5.2.1

Upgraded to jemalloc 5.2.1 from 5.1.0.
Cherry picked all relevant fixes (by diffing our 5.1.0 to upstream 5.10 and finding relevant commits).
Details of what was done:

[cherry-picked] fd7d51c3 2021-05-03 Resolve nonsense static analysis warnings (Oran Agra)
[cherry-picked] 448c435b 2020-09-29 Fix compilation warnings in Lua and jemalloc dependencies (#7785) (YoongHM)
[skipped - already in upstream] 9216b96b 2020-09-21 Fix compilation warning in jemalloc's malloc_vsnprintf (#7789) (YoongHM)
[cherry-picked] 88d71f47 2020-05-20 fix a rare active defrag edge case bug leading to stagnation (Oran Agra)
[skipped - already in upstream] 2fec7d9c 2019-05-30 Jemalloc: Avoid blocking on background thread lock for stats.
[cherry-picked] 920158ec 2018-07-11 Active defrag fixes for 32bit builds (again) (Oran Agra)
[cherry-picked] e8099cab 2018-06-26 add defrag hint support into jemalloc 5 (Oran Agra)
[re-done] 4e729fcd 2018-05-24 Generate configure for Jemalloc. (antirez)

Additionally had to do this:
7727cc2 2021-10-10 Fix defrag to support sharded bins in arena (added in v5.2.1) (Yoav Steinberg)

When reviewing please look at all except the first commit which is just replacing 5.1.0 with 5.2.1 sources.
Also I think we should merge this without squashing to preserve the changes we did to to jemalloc.
parents 276b460e 85737e67
......@@ -88,7 +88,7 @@ JEMALLOC_LDFLAGS= $(LDFLAGS)
jemalloc: .make-prerequisites
@printf '%b %b\n' $(MAKECOLOR)MAKE$(ENDCOLOR) $(BINCOLOR)$@$(ENDCOLOR)
cd jemalloc && ./configure --with-version=5.1.0-0-g0 --with-lg-quantum=3 --with-jemalloc-prefix=je_ CFLAGS="$(JEMALLOC_CFLAGS)" LDFLAGS="$(JEMALLOC_LDFLAGS)"
cd jemalloc && ./configure --with-version=5.2.1-0-g0 --with-lg-quantum=3 --with-jemalloc-prefix=je_ CFLAGS="$(JEMALLOC_CFLAGS)" LDFLAGS="$(JEMALLOC_LDFLAGS)"
cd jemalloc && $(MAKE) CFLAGS="$(JEMALLOC_CFLAGS)" LDFLAGS="$(JEMALLOC_LDFLAGS)" lib/libjemalloc.a
.PHONY: jemalloc
......@@ -41,6 +41,22 @@ the following additional steps:
changed, otherwise you could just copy the old implementation if you are
upgrading just to a similar version of Jemalloc.
#### Updating/upgrading jemalloc
The jemalloc directory is pulled as a subtee from the upstream jemalloc github repo. To update it you should run from the project root:
1. `git subtree pull --prefix deps/jemalloc https://github.com/jemalloc/jemalloc.git <version-tag> --squash`<br>
This should hopefully merge the local changes into the new version.
2. In case any conflicts arise (due to our changes) you'll need to resolve them and commit.
3. Reconfigure jemalloc:<br>
```sh
rm deps/jemalloc/VERSION deps/jemalloc/configure
cd deps/jemalloc
./autogen.sh --with-version=<version-tag>-0-g0
```
4. Update jemalloc's version in `deps/Makefile`: search for "`--with-version=<old-version-tag>-0-g0`" and update it accordingly.
5. Commit the changes (VERSION,configure,Makefile).
Hiredis
---
......@@ -78,5 +94,3 @@ and our version:
1. Makefile is modified to allow a different compiler than GCC.
2. We have the implementation source code, and directly link to the following external libraries: `lua_cjson.o`, `lua_struct.o`, `lua_cmsgpack.o` and `lua_bit.o`.
3. There is a security fix in `ldo.c`, line 498: The check for `LUA_SIGNATURE[0]` is removed in order to avoid direct bytecode execution.
......@@ -5,27 +5,27 @@ environment:
- MSYSTEM: MINGW64
CPU: x86_64
MSVC: amd64
CONFIG_FLAGS: --enable-debug
- MSYSTEM: MINGW64
CPU: x86_64
CONFIG_FLAGS: --enable-debug
- MSYSTEM: MINGW32
CPU: i686
MSVC: x86
- MSYSTEM: MINGW64
CPU: x86_64
CONFIG_FLAGS: --enable-debug
- MSYSTEM: MINGW32
CPU: i686
CONFIG_FLAGS: --enable-debug
- MSYSTEM: MINGW64
CPU: x86_64
MSVC: amd64
CONFIG_FLAGS: --enable-debug
- MSYSTEM: MINGW64
CPU: x86_64
- MSYSTEM: MINGW32
CPU: i686
MSVC: x86
CONFIG_FLAGS: --enable-debug
- MSYSTEM: MINGW64
CPU: x86_64
CONFIG_FLAGS: --enable-debug
- MSYSTEM: MINGW32
CPU: i686
CONFIG_FLAGS: --enable-debug
install:
- set PATH=c:\msys64\%MSYSTEM%\bin;c:\msys64\usr\bin;%PATH%
......
env:
CIRRUS_CLONE_DEPTH: 1
ARCH: amd64
task:
freebsd_instance:
matrix:
image: freebsd-12-0-release-amd64
image: freebsd-11-2-release-amd64
install_script:
- sed -i.bak -e 's,pkg+http://pkg.FreeBSD.org/\${ABI}/quarterly,pkg+http://pkg.FreeBSD.org/\${ABI}/latest,' /etc/pkg/FreeBSD.conf
- pkg upgrade -y
- pkg install -y autoconf gmake
script:
- autoconf
#- ./configure ${COMPILER_FLAGS:+ CC="$CC $COMPILER_FLAGS" CXX="$CXX $COMPILER_FLAGS" } $CONFIGURE_FLAGS
- ./configure
- export JFLAG=`sysctl -n kern.smp.cpus`
- gmake -j${JFLAG}
- gmake -j${JFLAG} tests
- gmake check
......@@ -30,7 +30,6 @@
/include/jemalloc/internal/public_namespace.h
/include/jemalloc/internal/public_symbols.txt
/include/jemalloc/internal/public_unnamespace.h
/include/jemalloc/internal/size_classes.h
/include/jemalloc/jemalloc.h
/include/jemalloc/jemalloc_defs.h
/include/jemalloc/jemalloc_macros.h
......
......@@ -11,7 +11,7 @@ matrix:
env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
addons:
addons: &gcc_multilib
apt:
packages:
- gcc-multilib
......@@ -21,6 +21,10 @@ matrix:
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-libdl" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
......@@ -37,20 +41,25 @@ matrix:
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: osx
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: osx
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-libdl" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: osx
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: osx
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=clang CXX=clang++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
addons:
apt:
packages:
- gcc-multilib
addons: *gcc_multilib
- os: linux
env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-libdl" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
......@@ -61,50 +70,39 @@ matrix:
env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--enable-debug" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
addons:
apt:
packages:
- gcc-multilib
addons: *gcc_multilib
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
addons:
apt:
packages:
- gcc-multilib
addons: *gcc_multilib
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
addons:
apt:
packages:
- gcc-multilib
addons: *gcc_multilib
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--disable-libdl" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
addons: *gcc_multilib
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
addons: *gcc_multilib
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
addons:
apt:
packages:
- gcc-multilib
addons: *gcc_multilib
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
addons:
apt:
packages:
- gcc-multilib
addons: *gcc_multilib
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
addons:
apt:
packages:
- gcc-multilib
addons: *gcc_multilib
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
addons:
apt:
packages:
- gcc-multilib
addons: *gcc_multilib
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --disable-libdl" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
......@@ -115,6 +113,10 @@ matrix:
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --disable-libdl" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
......@@ -123,6 +125,10 @@ matrix:
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats --disable-libdl" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats --enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats --with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
......@@ -131,6 +137,24 @@ matrix:
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats --with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats --with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-libdl --enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-libdl --with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-libdl --with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-libdl --with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-libdl --with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-opt-safety-checks --with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-opt-safety-checks --with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-opt-safety-checks --with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-opt-safety-checks --with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false,dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
......@@ -143,10 +167,25 @@ matrix:
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=dss:primary,background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=percpu_arena:percpu,background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
# Development build
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --disable-cache-oblivious --enable-stats --enable-log --enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
# --enable-expermental-smallocx:
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --enable-experimental-smallocx --enable-stats --enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
# Valgrind
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds" JEMALLOC_TEST_PREFIX="valgrind"
addons:
apt:
packages:
- valgrind
before_script:
- autoconf
- scripts/gen_travis.py > travis_script && diff .travis.yml travis_script
- ./configure ${COMPILER_FLAGS:+ CC="$CC $COMPILER_FLAGS" CXX="$CXX $COMPILER_FLAGS" } $CONFIGURE_FLAGS
- make -j3
- make -j3 tests
......
Unless otherwise specified, files in the jemalloc source distribution are
subject to the following license:
--------------------------------------------------------------------------------
Copyright (C) 2002-2018 Jason Evans <jasone@canonware.com>.
Copyright (C) 2002-present Jason Evans <jasone@canonware.com>.
All rights reserved.
Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved.
Copyright (C) 2009-2018 Facebook, Inc. All rights reserved.
Copyright (C) 2009-present Facebook, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
......
......@@ -4,7 +4,143 @@ brevity. Much more detail can be found in the git revision history:
https://github.com/jemalloc/jemalloc
* 5.1.0 (May 4th, 2018)
* 5.2.1 (August 5, 2019)
This release is primarily about Windows. A critical virtual memory leak is
resolved on all Windows platforms. The regression was present in all releases
since 5.0.0.
Bug fixes:
- Fix a severe virtual memory leak on Windows. This regression was first
released in 5.0.0. (@Ignition, @j0t, @frederik-h, @davidtgoldblatt,
@interwq)
- Fix size 0 handling in posix_memalign(). This regression was first released
in 5.2.0. (@interwq)
- Fix the prof_log unit test which may observe unexpected backtraces from
compiler optimizations. The test was first added in 5.2.0. (@marxin,
@gnzlbg, @interwq)
- Fix the declaration of the extent_avail tree. This regression was first
released in 5.1.0. (@zoulasc)
- Fix an incorrect reference in jeprof. This functionality was first released
in 3.0.0. (@prehistoric-penguin)
- Fix an assertion on the deallocation fast-path. This regression was first
released in 5.2.0. (@yinan1048576)
- Fix the TLS_MODEL attribute in headers. This regression was first released
in 5.0.0. (@zoulasc, @interwq)
Optimizations and refactors:
- Implement opt.retain on Windows and enable by default on 64-bit. (@interwq,
@davidtgoldblatt)
- Optimize away a branch on the operator delete[] path. (@mgrice)
- Add format annotation to the format generator function. (@zoulasc)
- Refactor and improve the size class header generation. (@yinan1048576)
- Remove best fit. (@djwatson)
- Avoid blocking on background thread locks for stats. (@oranagra, @interwq)
* 5.2.0 (April 2, 2019)
This release includes a few notable improvements, which are summarized below:
1) improved fast-path performance from the optimizations by @djwatson; 2)
reduced virtual memory fragmentation and metadata usage; and 3) bug fixes on
setting the number of background threads. In addition, peak / spike memory
usage is improved with certain allocation patterns. As usual, the release and
prior dev versions have gone through large-scale production testing.
New features:
- Implement oversize_threshold, which uses a dedicated arena for allocations
crossing the specified threshold to reduce fragmentation. (@interwq)
- Add extents usage information to stats. (@tyleretzel)
- Log time information for sampled allocations. (@tyleretzel)
- Support 0 size in sdallocx. (@djwatson)
- Output rate for certain counters in malloc_stats. (@zinoale)
- Add configure option --enable-readlinkat, which allows the use of readlinkat
over readlink. (@davidtgoldblatt)
- Add configure options --{enable,disable}-{static,shared} to allow not
building unwanted libraries. (@Ericson2314)
- Add configure option --disable-libdl to enable fully static builds.
(@interwq)
- Add mallctl interfaces:
+ opt.oversize_threshold (@interwq)
+ stats.arenas.<i>.extent_avail (@tyleretzel)
+ stats.arenas.<i>.extents.<j>.n{dirty,muzzy,retained} (@tyleretzel)
+ stats.arenas.<i>.extents.<j>.{dirty,muzzy,retained}_bytes
(@tyleretzel)
Portability improvements:
- Update MSVC builds. (@maksqwe, @rustyx)
- Workaround a compiler optimizer bug on s390x. (@rkmisra)
- Make use of pthread_set_name_np(3) on FreeBSD. (@trasz)
- Implement malloc_getcpu() to enable percpu_arena for windows. (@santagada)
- Link against -pthread instead of -lpthread. (@paravoid)
- Make background_thread not dependent on libdl. (@interwq)
- Add stringify to fix a linker directive issue on MSVC. (@daverigby)
- Detect and fall back when 8-bit atomics are unavailable. (@interwq)
- Fall back to the default pthread_create if dlsym(3) fails. (@interwq)
Optimizations and refactors:
- Refactor the TSD module. (@davidtgoldblatt)
- Avoid taking extents_muzzy mutex when muzzy is disabled. (@interwq)
- Avoid taking large_mtx for auto arenas on the tcache flush path. (@interwq)
- Optimize ixalloc by avoiding a size lookup. (@interwq)
- Implement opt.oversize_threshold which uses a dedicated arena for requests
crossing the threshold, also eagerly purges the oversize extents. Default
the threshold to 8 MiB. (@interwq)
- Clean compilation with -Wextra. (@gnzlbg, @jasone)
- Refactor the size class module. (@davidtgoldblatt)
- Refactor the stats emitter. (@tyleretzel)
- Optimize pow2_ceil. (@rkmisra)
- Avoid runtime detection of lazy purging on FreeBSD. (@trasz)
- Optimize mmap(2) alignment handling on FreeBSD. (@trasz)
- Improve error handling for THP state initialization. (@jsteemann)
- Rework the malloc() fast path. (@djwatson)
- Rework the free() fast path. (@djwatson)
- Refactor and optimize the tcache fill / flush paths. (@djwatson)
- Optimize sync / lwsync on PowerPC. (@chmeeedalf)
- Bypass extent_dalloc() when retain is enabled. (@interwq)
- Optimize the locking on large deallocation. (@interwq)
- Reduce the number of pages committed from sanity checking in debug build.
(@trasz, @interwq)
- Deprecate OSSpinLock. (@interwq)
- Lower the default number of background threads to 4 (when the feature
is enabled). (@interwq)
- Optimize the trylock spin wait. (@djwatson)
- Use arena index for arena-matching checks. (@interwq)
- Avoid forced decay on thread termination when using background threads.
(@interwq)
- Disable muzzy decay by default. (@djwatson, @interwq)
- Only initialize libgcc unwinder when profiling is enabled. (@paravoid,
@interwq)
Bug fixes (all only relevant to jemalloc 5.x):
- Fix background thread index issues with max_background_threads. (@djwatson,
@interwq)
- Fix stats output for opt.lg_extent_max_active_fit. (@interwq)
- Fix opt.prof_prefix initialization. (@davidtgoldblatt)
- Properly trigger decay on tcache destroy. (@interwq, @amosbird)
- Fix tcache.flush. (@interwq)
- Detect whether explicit extent zero out is necessary with huge pages or
custom extent hooks, which may change the purge semantics. (@interwq)
- Fix a side effect caused by extent_max_active_fit combined with decay-based
purging, where freed extents can accumulate and not be reused for an
extended period of time. (@interwq, @mpghf)
- Fix a missing unlock on extent register error handling. (@zoulasc)
Testing:
- Simplify the Travis script output. (@gnzlbg)
- Update the test scripts for FreeBSD. (@devnexen)
- Add unit tests for the producer-consumer pattern. (@interwq)
- Add Cirrus-CI config for FreeBSD builds. (@jasone)
- Add size-matching sanity checks on tcache flush. (@davidtgoldblatt,
@interwq)
Incompatible changes:
- Remove --with-lg-page-sizes. (@davidtgoldblatt)
Documentation:
- Attempt to build docs by default, however skip doc building when xsltproc
is missing. (@interwq, @cmuellner)
* 5.1.0 (May 4, 2018)
This release is primarily about fine-tuning, ranging from several new features
to numerous notable performance and portability enhancements. The release and
......
......@@ -221,13 +221,6 @@ any of the following arguments (not a definitive list) to 'configure':
system page size may change between configuration and execution, e.g. when
cross compiling.
* `--with-lg-page-sizes=<lg-page-sizes>`
Specify the comma-separated base 2 logs of the page sizes to support. This
option may be useful when cross compiling in combination with
`--with-lg-page`, but its primary use case is for integration with FreeBSD's
libc, wherein jemalloc is embedded.
* `--with-lg-hugepage=<lg-hugepage>`
Specify the base 2 log of the system huge page size. This option is useful
......@@ -276,6 +269,11 @@ any of the following arguments (not a definitive list) to 'configure':
in the same process, which will almost certainly result in confusing runtime
crashes if pointers leak from one implementation to the other.
* `--disable-libdl`
Disable the usage of libdl, namely dlsym(3) which is required by the lazy
lock option. This can allow building static binaries.
The following environment variables (not a definitive list) impact configure's
behavior:
......
......@@ -47,6 +47,7 @@ REV := @rev@
install_suffix := @install_suffix@
ABI := @abi@
XSLTPROC := @XSLTPROC@
XSLROOT := @XSLROOT@
AUTOCONF := @AUTOCONF@
_RPATH = @RPATH@
RPATH = $(if $(1),$(call _RPATH,$(1)))
......@@ -55,8 +56,12 @@ cfghdrs_out := @cfghdrs_out@
cfgoutputs_in := $(addprefix $(srcroot),@cfgoutputs_in@)
cfgoutputs_out := @cfgoutputs_out@
enable_autogen := @enable_autogen@
enable_doc := @enable_doc@
enable_shared := @enable_shared@
enable_static := @enable_static@
enable_prof := @enable_prof@
enable_zone_allocator := @enable_zone_allocator@
enable_experimental_smallocx := @enable_experimental_smallocx@
MALLOC_CONF := @JEMALLOC_CPREFIX@MALLOC_CONF
link_whole_archive := @link_whole_archive@
DSO_LDFLAGS = @DSO_LDFLAGS@
......@@ -102,7 +107,7 @@ C_SRCS := $(srcroot)src/jemalloc.c \
$(srcroot)src/extent_dss.c \
$(srcroot)src/extent_mmap.c \
$(srcroot)src/hash.c \
$(srcroot)src/hooks.c \
$(srcroot)src/hook.c \
$(srcroot)src/large.c \
$(srcroot)src/log.c \
$(srcroot)src/malloc_io.c \
......@@ -113,9 +118,12 @@ C_SRCS := $(srcroot)src/jemalloc.c \
$(srcroot)src/prng.c \
$(srcroot)src/prof.c \
$(srcroot)src/rtree.c \
$(srcroot)src/safety_check.c \
$(srcroot)src/stats.c \
$(srcroot)src/sc.c \
$(srcroot)src/sz.c \
$(srcroot)src/tcache.c \
$(srcroot)src/test_hooks.c \
$(srcroot)src/ticker.c \
$(srcroot)src/tsd.c \
$(srcroot)src/witness.c
......@@ -165,14 +173,18 @@ TESTS_UNIT := \
$(srcroot)test/unit/background_thread_enable.c \
$(srcroot)test/unit/base.c \
$(srcroot)test/unit/bitmap.c \
$(srcroot)test/unit/bit_util.c \
$(srcroot)test/unit/binshard.c \
$(srcroot)test/unit/ckh.c \
$(srcroot)test/unit/decay.c \
$(srcroot)test/unit/div.c \
$(srcroot)test/unit/emitter.c \
$(srcroot)test/unit/extent_quantize.c \
$(srcroot)test/unit/extent_util.c \
$(srcroot)test/unit/fork.c \
$(srcroot)test/unit/hash.c \
$(srcroot)test/unit/hooks.c \
$(srcroot)test/unit/hook.c \
$(srcroot)test/unit/huge.c \
$(srcroot)test/unit/junk.c \
$(srcroot)test/unit/junk_alloc.c \
$(srcroot)test/unit/junk_free.c \
......@@ -190,6 +202,7 @@ TESTS_UNIT := \
$(srcroot)test/unit/prof_active.c \
$(srcroot)test/unit/prof_gdump.c \
$(srcroot)test/unit/prof_idump.c \
$(srcroot)test/unit/prof_log.c \
$(srcroot)test/unit/prof_reset.c \
$(srcroot)test/unit/prof_tctx.c \
$(srcroot)test/unit/prof_thread_name.c \
......@@ -198,13 +211,17 @@ TESTS_UNIT := \
$(srcroot)test/unit/rb.c \
$(srcroot)test/unit/retained.c \
$(srcroot)test/unit/rtree.c \
$(srcroot)test/unit/safety_check.c \
$(srcroot)test/unit/seq.c \
$(srcroot)test/unit/SFMT.c \
$(srcroot)test/unit/sc.c \
$(srcroot)test/unit/size_classes.c \
$(srcroot)test/unit/slab.c \
$(srcroot)test/unit/smoothstep.c \
$(srcroot)test/unit/spin.c \
$(srcroot)test/unit/stats.c \
$(srcroot)test/unit/stats_print.c \
$(srcroot)test/unit/test_hooks.c \
$(srcroot)test/unit/ticker.c \
$(srcroot)test/unit/nstime.c \
$(srcroot)test/unit/tsd.c \
......@@ -217,15 +234,21 @@ endif
TESTS_INTEGRATION := $(srcroot)test/integration/aligned_alloc.c \
$(srcroot)test/integration/allocated.c \
$(srcroot)test/integration/extent.c \
$(srcroot)test/integration/malloc.c \
$(srcroot)test/integration/mallocx.c \
$(srcroot)test/integration/MALLOCX_ARENA.c \
$(srcroot)test/integration/overflow.c \
$(srcroot)test/integration/posix_memalign.c \
$(srcroot)test/integration/rallocx.c \
$(srcroot)test/integration/sdallocx.c \
$(srcroot)test/integration/slab_sizes.c \
$(srcroot)test/integration/thread_arena.c \
$(srcroot)test/integration/thread_tcache_enabled.c \
$(srcroot)test/integration/xallocx.c
ifeq (@enable_experimental_smallocx@, 1)
TESTS_INTEGRATION += \
$(srcroot)test/integration/smallocx.c
endif
ifeq (@enable_cxx@, 1)
CPP_SRCS := $(srcroot)src/jemalloc_cpp.cpp
TESTS_INTEGRATION_CPP := $(srcroot)test/integration/cpp/basic.cpp
......@@ -233,7 +256,9 @@ else
CPP_SRCS :=
TESTS_INTEGRATION_CPP :=
endif
TESTS_STRESS := $(srcroot)test/stress/microbench.c
TESTS_STRESS := $(srcroot)test/stress/microbench.c \
$(srcroot)test/stress/hookbench.c
TESTS := $(TESTS_UNIT) $(TESTS_INTEGRATION) $(TESTS_INTEGRATION_CPP) $(TESTS_STRESS)
......@@ -274,10 +299,24 @@ all: build_lib
dist: build_doc
$(objroot)doc/%.html : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/html.xsl
ifneq ($(XSLROOT),)
$(XSLTPROC) -o $@ $(objroot)doc/html.xsl $<
else
ifeq ($(wildcard $(DOCS_HTML)),)
@echo "<p>Missing xsltproc. Doc not built.</p>" > $@
endif
@echo "Missing xsltproc. "$@" not (re)built."
endif
$(objroot)doc/%.3 : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/manpages.xsl
ifneq ($(XSLROOT),)
$(XSLTPROC) -o $@ $(objroot)doc/manpages.xsl $<
else
ifeq ($(wildcard $(DOCS_MAN3)),)
@echo "Missing xsltproc. Doc not built." > $@
endif
@echo "Missing xsltproc. "$@" not (re)built."
endif
build_doc_html: $(DOCS_HTML)
build_doc_man: $(DOCS_MAN3)
......@@ -400,7 +439,7 @@ $(objroot)test/unit/%$(EXE): $(objroot)test/unit/%.$(O) $(C_JET_OBJS) $(C_TESTLI
$(objroot)test/integration/%$(EXE): $(objroot)test/integration/%.$(O) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
@mkdir -p $(@D)
$(CC) $(TEST_LD_MODE) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(LJEMALLOC) $(LDFLAGS) $(filter-out -lm,$(filter -lrt -lpthread -lstdc++,$(LIBS))) $(LM) $(EXTRA_LDFLAGS)
$(CC) $(TEST_LD_MODE) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(LJEMALLOC) $(LDFLAGS) $(filter-out -lm,$(filter -lrt -pthread -lstdc++,$(LIBS))) $(LM) $(EXTRA_LDFLAGS)
$(objroot)test/integration/cpp/%$(EXE): $(objroot)test/integration/cpp/%.$(O) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
@mkdir -p $(@D)
......@@ -412,7 +451,12 @@ $(objroot)test/stress/%$(EXE): $(objroot)test/stress/%.$(O) $(C_JET_OBJS) $(C_TE
build_lib_shared: $(DSOS)
build_lib_static: $(STATIC_LIBS)
build_lib: build_lib_shared build_lib_static
ifeq ($(enable_shared), 1)
build_lib: build_lib_shared
endif
ifeq ($(enable_static), 1)
build_lib: build_lib_static
endif
install_bin:
$(INSTALL) -d $(BINDIR)
......@@ -449,7 +493,13 @@ install_lib_pc: $(PC)
$(INSTALL) -m 644 $$l $(LIBDIR)/pkgconfig; \
done
install_lib: install_lib_shared install_lib_static install_lib_pc
ifeq ($(enable_shared), 1)
install_lib: install_lib_shared
endif
ifeq ($(enable_static), 1)
install_lib: install_lib_static
endif
install_lib: install_lib_pc
install_doc_html:
$(INSTALL) -d $(DATADIR)/doc/jemalloc$(install_suffix)
......@@ -465,9 +515,13 @@ install_doc_man:
$(INSTALL) -m 644 $$d $(MANDIR)/man3; \
done
install_doc: install_doc_html install_doc_man
install_doc: build_doc install_doc_html install_doc_man
install: install_bin install_include install_lib
install: install_bin install_include install_lib install_doc
ifeq ($(enable_doc), 1)
install: install_doc
endif
tests_unit: $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%$(EXE))
tests_integration: $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%$(EXE)) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%$(EXE))
......
......@@ -2909,6 +2909,7 @@ sub RemoveUninterestingFrames {
'@JEMALLOC_PREFIX@xallocx',
'@JEMALLOC_PREFIX@dallocx',
'@JEMALLOC_PREFIX@sdallocx',
'@JEMALLOC_PREFIX@sdallocx_noflags',
'tc_calloc',
'tc_cfree',
'tc_malloc',
......@@ -5366,7 +5367,7 @@ sub GetProcedureBoundaries {
my $demangle_flag = "";
my $cppfilt_flag = "";
my $to_devnull = ">$dev_null 2>&1";
if (system(ShellEscape($nm, "--demangle", "image") . $to_devnull) == 0) {
if (system(ShellEscape($nm, "--demangle", $image) . $to_devnull) == 0) {
# In this mode, we do "nm --demangle <foo>"
$demangle_flag = "--demangle";
$cppfilt_flag = "";
......
This diff is collapsed.
This diff is collapsed.
......@@ -424,7 +424,7 @@ for (i = 0; i < nbins; i++) {
called repeatedly. General information that never changes during
execution can be omitted by specifying <quote>g</quote> as a character
within the <parameter>opts</parameter> string. Note that
<function>malloc_message()</function> uses the
<function>malloc_stats_print()</function> uses the
<function>mallctl*()</function> functions internally, so inconsistent
statistics can be reported if multiple threads use these functions
simultaneously. If <option>--enable-stats</option> is specified during
......@@ -433,10 +433,11 @@ for (i = 0; i < nbins; i++) {
arena statistics, respectively; <quote>b</quote> and <quote>l</quote> can
be specified to omit per size class statistics for bins and large objects,
respectively; <quote>x</quote> can be specified to omit all mutex
statistics. Unrecognized characters are silently ignored. Note that
thread caching may prevent some statistics from being completely up to
date, since extra locking would be required to merge counters that track
thread cache operations.</para>
statistics; <quote>e</quote> can be used to omit extent statistics.
Unrecognized characters are silently ignored. Note that thread caching
may prevent some statistics from being completely up to date, since extra
locking would be required to merge counters that track thread cache
operations.</para>
<para>The <function>malloc_usable_size()</function> function
returns the usable size of the allocation pointed to by
......@@ -903,6 +904,23 @@ mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".decay",
</para></listitem>
</varlistentry>
<varlistentry id="opt.confirm_conf">
<term>
<mallctl>opt.confirm_conf</mallctl>
(<type>bool</type>)
<literal>r-</literal>
</term>
<listitem><para>Confirm-runtime-options-when-program-starts
enabled/disabled. If true, the string specified via
<option>--with-malloc-conf</option>, the string pointed to by the
global variable <varname>malloc_conf</varname>, the <quote>name</quote>
of the file referenced by the symbolic link named
<filename class="symlink">/etc/malloc.conf</filename>, and the value of
the environment variable <envar>MALLOC_CONF</envar>, will be printed in
order. Then, each option being set will be individually printed. This
option is disabled by default.</para></listitem>
</varlistentry>
<varlistentry id="opt.abort_conf">
<term>
<mallctl>opt.abort_conf</mallctl>
......@@ -943,16 +961,19 @@ mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".decay",
<citerefentry><refentrytitle>munmap</refentrytitle>
<manvolnum>2</manvolnum></citerefentry> or equivalent (see <link
linkend="stats.retained">stats.retained</link> for related details).
This option is disabled by default unless discarding virtual memory is
known to trigger
platform-specific performance problems, e.g. for [64-bit] Linux, which
has a quirk in its virtual memory allocation algorithm that causes
semi-permanent VM map holes under normal jemalloc operation. Although
<citerefentry><refentrytitle>munmap</refentrytitle>
<manvolnum>2</manvolnum></citerefentry> causes issues on 32-bit Linux as
well, retaining virtual memory for 32-bit Linux is disabled by default
due to the practical possibility of address space exhaustion.
</para></listitem>
It also makes jemalloc use <citerefentry>
<refentrytitle>mmap</refentrytitle><manvolnum>2</manvolnum>
</citerefentry> or equivalent in a more greedy way, mapping larger
chunks in one go. This option is disabled by default unless discarding
virtual memory is known to trigger platform-specific performance
problems, namely 1) for [64-bit] Linux, which has a quirk in its virtual
memory allocation algorithm that causes semi-permanent VM map holes
under normal jemalloc operation; and 2) for [64-bit] Windows, which
disallows split / merged regions with
<parameter><constant>MEM_RELEASE</constant></parameter>. Although the
same issues may present on 32-bit platforms as well, retaining virtual
memory for 32-bit Linux and Windows is disabled by default due to the
practical possibility of address space exhaustion. </para></listitem>
</varlistentry>
<varlistentry id="opt.dss">
......@@ -988,6 +1009,24 @@ mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".decay",
number of CPUs, or one if there is a single CPU.</para></listitem>
</varlistentry>
<varlistentry id="opt.oversize_threshold">
<term>
<mallctl>opt.oversize_threshold</mallctl>
(<type>size_t</type>)
<literal>r-</literal>
</term>
<listitem><para>The threshold in bytes of which requests are considered
oversize. Allocation requests with greater sizes are fulfilled from a
dedicated arena (automatically managed, however not within
<literal>narenas</literal>), in order to reduce fragmentation by not
mixing huge allocations with small ones. In addition, the decay API
guarantees on the extents greater than the specified threshold may be
overridden. Note that requests with arena index specified via
<constant>MALLOCX_ARENA</constant>, or threads associated with explicit
arenas will not be considered. The default threshold is 8MiB. Values
not within large size classes disables this feature.</para></listitem>
</varlistentry>
<varlistentry id="opt.percpu_arena">
<term>
<mallctl>opt.percpu_arena</mallctl>
......@@ -1009,7 +1048,7 @@ mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".decay",
<varlistentry id="opt.background_thread">
<term>
<mallctl>opt.background_thread</mallctl>
(<type>const bool</type>)
(<type>bool</type>)
<literal>r-</literal>
</term>
<listitem><para>Internal background worker threads enabled/disabled.
......@@ -1024,7 +1063,7 @@ mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".decay",
<varlistentry id="opt.max_background_threads">
<term>
<mallctl>opt.max_background_threads</mallctl>
(<type>const size_t</type>)
(<type>size_t</type>)
<literal>r-</literal>
</term>
<listitem><para>Maximum number of background threads that will be created
......@@ -1055,7 +1094,11 @@ mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".decay",
linkend="arena.i.dirty_decay_ms"><mallctl>arena.&lt;i&gt;.dirty_decay_ms</mallctl></link>
for related dynamic control options. See <link
linkend="opt.muzzy_decay_ms"><mallctl>opt.muzzy_decay_ms</mallctl></link>
for a description of muzzy pages.</para></listitem>
for a description of muzzy pages.for a description of muzzy pages. Note
that when the <link
linkend="opt.oversize_threshold"><mallctl>oversize_threshold</mallctl></link>
feature is enabled, the arenas reserved for oversize requests may have
its own default decay settings.</para></listitem>
</varlistentry>
<varlistentry id="opt.muzzy_decay_ms">
......@@ -1763,10 +1806,11 @@ malloc_conf = "xmalloc:true";]]></programlisting>
to control allocation for arenas explicitly created via <link
linkend="arenas.create"><mallctl>arenas.create</mallctl></link> such
that all extents originate from an application-supplied extent allocator
(by specifying the custom extent hook functions during arena creation),
but the automatically created arenas will have already created extents
prior to the application having an opportunity to take over extent
allocation.</para>
(by specifying the custom extent hook functions during arena creation).
However, the API guarantees for the automatically created arenas may be
relaxed -- hooks set there may be called in a "best effort" fashion; in
addition there may be extents created prior to the application having an
opportunity to take over extent allocation.</para>
<programlisting language="C"><![CDATA[
typedef extent_hooks_s extent_hooks_t;
......@@ -2593,6 +2637,17 @@ struct extent_hooks_s {
details.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.extent_avail">
<term>
<mallctl>stats.arenas.&lt;i&gt;.extent_avail</mallctl>
(<type>size_t</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Number of allocated (but unused) extent structs in this
arena.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.base">
<term>
<mallctl>stats.arenas.&lt;i&gt;.base</mallctl>
......@@ -2760,6 +2815,28 @@ struct extent_hooks_s {
all bin size classes.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.small.nfills">
<term>
<mallctl>stats.arenas.&lt;i&gt;.small.nfills</mallctl>
(<type>uint64_t</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Cumulative number of tcache fills by all small size
classes.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.small.nflushes">
<term>
<mallctl>stats.arenas.&lt;i&gt;.small.nflushes</mallctl>
(<type>uint64_t</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Cumulative number of tcache flushes by all small size
classes.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.large.allocated">
<term>
<mallctl>stats.arenas.&lt;i&gt;.large.allocated</mallctl>
......@@ -2810,6 +2887,28 @@ struct extent_hooks_s {
all large size classes.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.large.nfills">
<term>
<mallctl>stats.arenas.&lt;i&gt;.large.nfills</mallctl>
(<type>uint64_t</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Cumulative number of tcache fills by all large size
classes.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.large.nflushes">
<term>
<mallctl>stats.arenas.&lt;i&gt;.large.nflushes</mallctl>
(<type>uint64_t</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Cumulative number of tcache flushes by all large size
classes.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.bins.j.nmalloc">
<term>
<mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nmalloc</mallctl>
......@@ -2909,6 +3008,17 @@ struct extent_hooks_s {
<listitem><para>Current number of slabs.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.bins.j.nonfull_slabs">
<term>
<mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nonfull_slabs</mallctl>
(<type>size_t</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Current number of nonfull slabs.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.bins.mutex">
<term>
<mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.mutex.{counter}</mallctl>
......@@ -2922,6 +3032,30 @@ struct extent_hooks_s {
counters</link>.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.extents.n">
<term>
<mallctl>stats.arenas.&lt;i&gt;.extents.&lt;j&gt;.n{extent_type}</mallctl>
(<type>size_t</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para> Number of extents of the given type in this arena in
the bucket corresponding to page size index &lt;j&gt;. The extent type
is one of dirty, muzzy, or retained.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.extents.bytes">
<term>
<mallctl>stats.arenas.&lt;i&gt;.extents.&lt;j&gt;.{extent_type}_bytes</mallctl>
(<type>size_t</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para> Sum of the bytes managed by extents of the given type
in this arena in the bucket corresponding to page size index &lt;j&gt;.
The extent type is one of dirty, muzzy, or retained.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.lextents.j.nmalloc">
<term>
<mallctl>stats.arenas.&lt;i&gt;.lextents.&lt;j&gt;.nmalloc</mallctl>
......
......@@ -3,8 +3,8 @@
#include "jemalloc/internal/bin.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/hook.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats.h"
extern ssize_t opt_dirty_decay_ms;
......@@ -16,13 +16,17 @@ extern const char *percpu_arena_mode_names[];
extern const uint64_t h_steps[SMOOTHSTEP_NSTEPS];
extern malloc_mutex_t arenas_lock;
extern size_t opt_oversize_threshold;
extern size_t oversize_threshold;
void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms,
ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy);
void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
bin_stats_t *bstats, arena_stats_large_t *lstats);
bin_stats_t *bstats, arena_stats_large_t *lstats,
arena_stats_extents_t *estats);
void arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent);
#ifdef JEMALLOC_JET
......@@ -56,16 +60,17 @@ void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
szind_t ind, bool zero);
void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool zero, tcache_t *tcache);
void arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize);
void arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize);
void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
bool slow_path);
void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
extent_t *extent, void *ptr);
void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
szind_t binind, extent_t *extent, void *ptr);
void arena_dalloc_small(tsdn_t *tsdn, void *ptr);
bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero);
size_t extra, bool zero, size_t *newsize);
void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
size_t size, size_t alignment, bool zero, tcache_t *tcache);
size_t size, size_t alignment, bool zero, tcache_t *tcache,
hook_ralloc_args_t *hook_args);
dss_prec_t arena_dss_prec_get(arena_t *arena);
bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
ssize_t arena_dirty_decay_ms_default_get(void);
......@@ -79,7 +84,12 @@ void arena_nthreads_inc(arena_t *arena, bool internal);
void arena_nthreads_dec(arena_t *arena, bool internal);
size_t arena_extent_sn_next(arena_t *arena);
arena_t *arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
void arena_boot(void);
bool arena_init_huge(void);
bool arena_is_huge(unsigned arena_ind);
arena_t *arena_choose_huge(tsd_t *tsd);
bin_t *arena_bin_choose_lock(tsdn_t *tsdn, arena_t *arena, szind_t binind,
unsigned *binshard);
void arena_boot(sc_data_t *sc_data);
void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
void arena_prefork1(tsdn_t *tsdn, arena_t *arena);
void arena_prefork2(tsdn_t *tsdn, arena_t *arena);
......
......@@ -4,10 +4,36 @@
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/ticker.h"
JEMALLOC_ALWAYS_INLINE bool
arena_has_default_hooks(arena_t *arena) {
return (extent_hooks_get(arena) == &extent_hooks_default);
}
JEMALLOC_ALWAYS_INLINE arena_t *
arena_choose_maybe_huge(tsd_t *tsd, arena_t *arena, size_t size) {
if (arena != NULL) {
return arena;
}
/*
* For huge allocations, use the dedicated huge arena if both are true:
* 1) is using auto arena selection (i.e. arena == NULL), and 2) the
* thread is not assigned to a manual arena.
*/
if (unlikely(size >= oversize_threshold)) {
arena_t *tsd_arena = tsd_arena_get(tsd);
if (tsd_arena == NULL || arena_is_auto(tsd_arena)) {
return arena_choose_huge(tsd);
}
}
return arena_choose(tsd, NULL);
}
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
cassert(config_prof);
......@@ -28,7 +54,7 @@ arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
}
JEMALLOC_ALWAYS_INLINE void
arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, UNUSED size_t usize,
arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) {
cassert(config_prof);
assert(ptr != NULL);
......@@ -47,7 +73,7 @@ arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, UNUSED size_t usize,
}
static inline void
arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, UNUSED prof_tctx_t *tctx) {
arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) {
cassert(config_prof);
assert(ptr != NULL);
......@@ -57,6 +83,32 @@ arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, UNUSED prof_tctx_t *tctx) {
large_prof_tctx_reset(tsdn, extent);
}
JEMALLOC_ALWAYS_INLINE nstime_t
arena_prof_alloc_time_get(tsdn_t *tsdn, const void *ptr,
alloc_ctx_t *alloc_ctx) {
cassert(config_prof);
assert(ptr != NULL);
extent_t *extent = iealloc(tsdn, ptr);
/*
* Unlike arena_prof_prof_tctx_{get, set}, we only call this once we're
* sure we have a sampled allocation.
*/
assert(!extent_slab_get(extent));
return large_prof_alloc_time_get(extent);
}
JEMALLOC_ALWAYS_INLINE void
arena_prof_alloc_time_set(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx,
nstime_t t) {
cassert(config_prof);
assert(ptr != NULL);
extent_t *extent = iealloc(tsdn, ptr);
assert(!extent_slab_get(extent));
large_prof_alloc_time_set(extent, t);
}
JEMALLOC_ALWAYS_INLINE void
arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) {
tsd_t *tsd;
......@@ -83,14 +135,33 @@ arena_decay_tick(tsdn_t *tsdn, arena_t *arena) {
arena_decay_ticks(tsdn, arena, 1);
}
/* Purge a single extent to retained / unmapped directly. */
JEMALLOC_ALWAYS_INLINE void
arena_decay_extent(tsdn_t *tsdn,arena_t *arena, extent_hooks_t **r_extent_hooks,
extent_t *extent) {
size_t extent_size = extent_size_get(extent);
extent_dalloc_wrapper(tsdn, arena,
r_extent_hooks, extent);
if (config_stats) {
/* Update stats accordingly. */
arena_stats_lock(tsdn, &arena->stats);
arena_stats_add_u64(tsdn, &arena->stats,
&arena->decay_dirty.stats->nmadvise, 1);
arena_stats_add_u64(tsdn, &arena->stats,
&arena->decay_dirty.stats->purged, extent_size >> LG_PAGE);
arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped,
extent_size);
arena_stats_unlock(tsdn, &arena->stats);
}
}
JEMALLOC_ALWAYS_INLINE void *
arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
tcache_t *tcache, bool slow_path) {
assert(!tsdn_null(tsdn) || tcache == NULL);
assert(size != 0);
if (likely(tcache != NULL)) {
if (likely(size <= SMALL_MAXCLASS)) {
if (likely(size <= SC_SMALL_MAXCLASS)) {
return tcache_alloc_small(tsdn_tsd(tsdn), arena,
tcache, size, ind, zero, slow_path);
}
......@@ -119,7 +190,7 @@ arena_salloc(tsdn_t *tsdn, const void *ptr) {
szind_t szind = rtree_szind_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true);
assert(szind != NSIZES);
assert(szind != SC_NSIZES);
return sz_index2size(szind);
}
......@@ -152,11 +223,21 @@ arena_vsalloc(tsdn_t *tsdn, const void *ptr) {
/* Only slab members should be looked up via interior pointers. */
assert(extent_addr_get(extent) == ptr || extent_slab_get(extent));
assert(szind != NSIZES);
assert(szind != SC_NSIZES);
return sz_index2size(szind);
}
static inline void
arena_dalloc_large_no_tcache(tsdn_t *tsdn, void *ptr, szind_t szind) {
if (config_prof && unlikely(szind < SC_NBINS)) {
arena_dalloc_promoted(tsdn, ptr, NULL, true);
} else {
extent_t *extent = iealloc(tsdn, ptr);
large_dalloc(tsdn, extent);
}
}
static inline void
arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) {
assert(ptr != NULL);
......@@ -173,13 +254,28 @@ arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) {
extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
rtree_ctx, (uintptr_t)ptr, true);
assert(szind == extent_szind_get(extent));
assert(szind < NSIZES);
assert(szind < SC_NSIZES);
assert(slab == extent_slab_get(extent));
}
if (likely(slab)) {
/* Small allocation. */
arena_dalloc_small(tsdn, ptr);
} else {
arena_dalloc_large_no_tcache(tsdn, ptr, szind);
}
}
JEMALLOC_ALWAYS_INLINE void
arena_dalloc_large(tsdn_t *tsdn, void *ptr, tcache_t *tcache, szind_t szind,
bool slow_path) {
if (szind < nhbins) {
if (config_prof && unlikely(szind < SC_NBINS)) {
arena_dalloc_promoted(tsdn, ptr, tcache, slow_path);
} else {
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, szind,
slow_path);
}
} else {
extent_t *extent = iealloc(tsdn, ptr);
large_dalloc(tsdn, extent);
......@@ -203,7 +299,7 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
if (alloc_ctx != NULL) {
szind = alloc_ctx->szind;
slab = alloc_ctx->slab;
assert(szind != NSIZES);
assert(szind != SC_NSIZES);
} else {
rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
......@@ -215,7 +311,7 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
rtree_ctx, (uintptr_t)ptr, true);
assert(szind == extent_szind_get(extent));
assert(szind < NSIZES);
assert(szind < SC_NSIZES);
assert(slab == extent_slab_get(extent));
}
......@@ -224,25 +320,14 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind,
slow_path);
} else {
if (szind < nhbins) {
if (config_prof && unlikely(szind < NBINS)) {
arena_dalloc_promoted(tsdn, ptr, tcache,
slow_path);
} else {
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
szind, slow_path);
}
} else {
extent_t *extent = iealloc(tsdn, ptr);
large_dalloc(tsdn, extent);
}
arena_dalloc_large(tsdn, ptr, tcache, szind, slow_path);
}
}
static inline void
arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
assert(ptr != NULL);
assert(size <= LARGE_MAXCLASS);
assert(size <= SC_LARGE_MAXCLASS);
szind_t szind;
bool slab;
......@@ -252,7 +337,7 @@ arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
* object, so base szind and slab on the given size.
*/
szind = sz_size2index(size);
slab = (szind < NBINS);
slab = (szind < SC_NBINS);
}
if ((config_prof && opt_prof) || config_debug) {
......@@ -264,7 +349,7 @@ arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
(uintptr_t)ptr, true, &szind, &slab);
assert(szind == sz_size2index(size));
assert((config_prof && opt_prof) || slab == (szind < NBINS));
assert((config_prof && opt_prof) || slab == (szind < SC_NBINS));
if (config_debug) {
extent_t *extent = rtree_extent_read(tsdn,
......@@ -278,8 +363,7 @@ arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
/* Small allocation. */
arena_dalloc_small(tsdn, ptr);
} else {
extent_t *extent = iealloc(tsdn, ptr);
large_dalloc(tsdn, extent);
arena_dalloc_large_no_tcache(tsdn, ptr, szind);
}
}
......@@ -288,7 +372,7 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
alloc_ctx_t *alloc_ctx, bool slow_path) {
assert(!tsdn_null(tsdn) || tcache == NULL);
assert(ptr != NULL);
assert(size <= LARGE_MAXCLASS);
assert(size <= SC_LARGE_MAXCLASS);
if (unlikely(tcache == NULL)) {
arena_sdalloc_no_tcache(tsdn, ptr, size);
......@@ -297,7 +381,7 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
szind_t szind;
bool slab;
UNUSED alloc_ctx_t local_ctx;
alloc_ctx_t local_ctx;
if (config_prof && opt_prof) {
if (alloc_ctx == NULL) {
/* Uncommon case and should be a static check. */
......@@ -318,7 +402,7 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
* object, so base szind and slab on the given size.
*/
szind = sz_size2index(size);
slab = (szind < NBINS);
slab = (szind < SC_NBINS);
}
if (config_debug) {
......@@ -336,18 +420,7 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind,
slow_path);
} else {
if (szind < nhbins) {
if (config_prof && unlikely(szind < NBINS)) {
arena_dalloc_promoted(tsdn, ptr, tcache,
slow_path);
} else {
tcache_dalloc_large(tsdn_tsd(tsdn),
tcache, ptr, szind, slow_path);
}
} else {
extent_t *extent = iealloc(tsdn, ptr);
large_dalloc(tsdn, extent);
}
arena_dalloc_large(tsdn, ptr, tcache, szind, slow_path);
}
}
......
......@@ -4,7 +4,9 @@
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex_prof.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/sc.h"
JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
/*
* In those architectures that support 64-bit atomics, we use atomic updates for
......@@ -33,6 +35,13 @@ struct arena_stats_large_s {
* periodically merges into this counter.
*/
arena_stats_u64_t nrequests; /* Partially derived. */
/*
* Number of tcache fills / flushes for large (similarly, periodically
* merged). Note that there is no large tcache batch-fill currently
* (i.e. only fill 1 at a time); however flush may be batched.
*/
arena_stats_u64_t nfills; /* Partially derived. */
arena_stats_u64_t nflushes; /* Partially derived. */
/* Current number of allocations of this size class. */
size_t curlextents; /* Derived. */
......@@ -48,6 +57,22 @@ struct arena_stats_decay_s {
arena_stats_u64_t purged;
};
typedef struct arena_stats_extents_s arena_stats_extents_t;
struct arena_stats_extents_s {
/*
* Stats for a given index in the range [0, SC_NPSIZES] in an extents_t.
* We track both bytes and # of extents: two extents in the same bucket
* may have different sizes if adjacent size classes differ by more than
* a page, so bytes cannot always be derived from # of extents.
*/
atomic_zu_t ndirty;
atomic_zu_t dirty_bytes;
atomic_zu_t nmuzzy;
atomic_zu_t muzzy_bytes;
atomic_zu_t nretained;
atomic_zu_t retained_bytes;
};
/*
* Arena stats. Note that fields marked "derived" are not directly maintained
* within the arena code; rather their values are derived during stats merge
......@@ -69,6 +94,9 @@ struct arena_stats_s {
*/
atomic_zu_t retained; /* Derived. */
/* Number of extent_t structs allocated by base, but not being used. */
atomic_zu_t extent_avail;
arena_stats_decay_t decay_dirty;
arena_stats_decay_t decay_muzzy;
......@@ -80,22 +108,27 @@ struct arena_stats_s {
atomic_zu_t allocated_large; /* Derived. */
arena_stats_u64_t nmalloc_large; /* Derived. */
arena_stats_u64_t ndalloc_large; /* Derived. */
arena_stats_u64_t nfills_large; /* Derived. */
arena_stats_u64_t nflushes_large; /* Derived. */
arena_stats_u64_t nrequests_large; /* Derived. */
/* VM space had to be leaked (undocumented). Normally 0. */
atomic_zu_t abandoned_vm;
/* Number of bytes cached in tcache associated with this arena. */
atomic_zu_t tcache_bytes; /* Derived. */
mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes];
/* One element for each large size class. */
arena_stats_large_t lstats[NSIZES - NBINS];
arena_stats_large_t lstats[SC_NSIZES - SC_NBINS];
/* Arena uptime. */
nstime_t uptime;
};
static inline bool
arena_stats_init(UNUSED tsdn_t *tsdn, arena_stats_t *arena_stats) {
arena_stats_init(tsdn_t *tsdn, arena_stats_t *arena_stats) {
if (config_debug) {
for (size_t i = 0; i < sizeof(arena_stats_t); i++) {
assert(((char *)arena_stats)[i] == 0);
......@@ -147,11 +180,11 @@ arena_stats_add_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
#endif
}
UNUSED static inline void
static inline void
arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
arena_stats_u64_t *p, uint64_t x) {
#ifdef JEMALLOC_ATOMIC_U64
UNUSED uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED);
uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED);
assert(r - x <= r);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
......@@ -176,7 +209,8 @@ arena_stats_accum_u64(arena_stats_u64_t *dst, uint64_t src) {
}
static inline size_t
arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p) {
arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats,
atomic_zu_t *p) {
#ifdef JEMALLOC_ATOMIC_U64
return atomic_load_zu(p, ATOMIC_RELAXED);
#else
......@@ -186,8 +220,8 @@ arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p) {
}
static inline void
arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p,
size_t x) {
arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats,
atomic_zu_t *p, size_t x) {
#ifdef JEMALLOC_ATOMIC_U64
atomic_fetch_add_zu(p, x, ATOMIC_RELAXED);
#else
......@@ -198,10 +232,10 @@ arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p,
}
static inline void
arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p,
size_t x) {
arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats,
atomic_zu_t *p, size_t x) {
#ifdef JEMALLOC_ATOMIC_U64
UNUSED size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED);
size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED);
assert(r - x <= r);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
......@@ -218,11 +252,12 @@ arena_stats_accum_zu(atomic_zu_t *dst, size_t src) {
}
static inline void
arena_stats_large_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
arena_stats_large_flush_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
szind_t szind, uint64_t nrequests) {
arena_stats_lock(tsdn, arena_stats);
arena_stats_add_u64(tsdn, arena_stats, &arena_stats->lstats[szind -
NBINS].nrequests, nrequests);
arena_stats_large_t *lstats = &arena_stats->lstats[szind - SC_NBINS];
arena_stats_add_u64(tsdn, arena_stats, &lstats->nrequests, nrequests);
arena_stats_add_u64(tsdn, arena_stats, &lstats->nflushes, 1);
arena_stats_unlock(tsdn, arena_stats);
}
......@@ -233,5 +268,4 @@ arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t size) {
arena_stats_unlock(tsdn, arena_stats);
}
#endif /* JEMALLOC_INTERNAL_ARENA_STATS_H */
......@@ -10,7 +10,7 @@
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/ticker.h"
......@@ -90,6 +90,9 @@ struct arena_s {
*/
atomic_u_t nthreads[2];
/* Next bin shard for binding new threads. Synchronization: atomic. */
atomic_u_t binshard_next;
/*
* When percpu_arena is enabled, to amortize the cost of reading /
* updating the current CPU id, track the most recent thread accessing
......@@ -113,7 +116,6 @@ struct arena_s {
/* Synchronization: internal. */
prof_accum_t prof_accum;
uint64_t prof_accumbytes;
/*
* PRNG state for cache index randomization of large allocation base
......@@ -196,6 +198,7 @@ struct arena_s {
* Synchronization: extent_avail_mtx.
*/
extent_tree_t extent_avail;
atomic_zu_t extent_avail_cnt;
malloc_mutex_t extent_avail_mtx;
/*
......@@ -203,7 +206,7 @@ struct arena_s {
*
* Synchronization: internal.
*/
bin_t bins[NBINS];
bins_t bins[SC_NBINS];
/*
* Base allocator, from which arena metadata are allocated.
......
#ifndef JEMALLOC_INTERNAL_ARENA_TYPES_H
#define JEMALLOC_INTERNAL_ARENA_TYPES_H
#include "jemalloc/internal/sc.h"
/* Maximum number of regions in one slab. */
#define LG_SLAB_MAXREGS (LG_PAGE - LG_TINY_MIN)
#define LG_SLAB_MAXREGS (LG_PAGE - SC_LG_TINY_MIN)
#define SLAB_MAXREGS (1U << LG_SLAB_MAXREGS)
/* Default decay times in milliseconds. */
#define DIRTY_DECAY_MS_DEFAULT ZD(10 * 1000)
#define MUZZY_DECAY_MS_DEFAULT ZD(10 * 1000)
#define MUZZY_DECAY_MS_DEFAULT (0)
/* Number of event ticks between time checks. */
#define DECAY_NTICKS_PER_UPDATE 1000
......@@ -40,4 +42,10 @@ typedef enum {
#define PERCPU_ARENA_ENABLED(m) ((m) >= percpu_arena_mode_enabled_base)
#define PERCPU_ARENA_DEFAULT percpu_arena_disabled
/*
* When allocation_size >= oversize_threshold, use the dedicated huge arena
* (unless have explicitly spicified arena index). 0 disables the feature.
*/
#define OVERSIZE_THRESHOLD_DEFAULT (8 << 20)
#endif /* JEMALLOC_INTERNAL_ARENA_TYPES_H */
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment