Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
b8beda3c
Commit
b8beda3c
authored
May 01, 2023
by
Oran Agra
Browse files
Merge commit jemalloc 5.3.0
parents
d659c734
6d23d3ac
Changes
195
Show whitespace changes
Inline
Side-by-side
Too many changes to show.
To preserve performance only
195 of 195+
files are displayed.
Plain diff
Email patch
deps/jemalloc/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters
View file @
b8beda3c
...
@@ -16,15 +16,39 @@
...
@@ -16,15 +16,39 @@
<ClCompile
Include=
"..\..\..\..\src\base.c"
>
<ClCompile
Include=
"..\..\..\..\src\base.c"
>
<Filter>
Source Files
</Filter>
<Filter>
Source Files
</Filter>
</ClCompile>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\bin.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\bitmap.c"
>
<ClCompile
Include=
"..\..\..\..\src\bitmap.c"
>
<Filter>
Source Files
</Filter>
<Filter>
Source Files
</Filter>
</ClCompile>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\buf_writer.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\cache_bin.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\ckh.c"
>
<ClCompile
Include=
"..\..\..\..\src\ckh.c"
>
<Filter>
Source Files
</Filter>
<Filter>
Source Files
</Filter>
</ClCompile>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\counter.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\ctl.c"
>
<ClCompile
Include=
"..\..\..\..\src\ctl.c"
>
<Filter>
Source Files
</Filter>
<Filter>
Source Files
</Filter>
</ClCompile>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\decay.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\div.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\emap.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\exp_grow.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\extent.c"
>
<ClCompile
Include=
"..\..\..\..\src\extent.c"
>
<Filter>
Source Files
</Filter>
<Filter>
Source Files
</Filter>
</ClCompile>
</ClCompile>
...
@@ -34,45 +58,93 @@
...
@@ -34,45 +58,93 @@
<ClCompile
Include=
"..\..\..\..\src\extent_mmap.c"
>
<ClCompile
Include=
"..\..\..\..\src\extent_mmap.c"
>
<Filter>
Source Files
</Filter>
<Filter>
Source Files
</Filter>
</ClCompile>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\
hash
.c"
>
<ClCompile
Include=
"..\..\..\..\src\
fxp
.c"
>
<Filter>
Source Files
</Filter>
<Filter>
Source Files
</Filter>
</ClCompile>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\hook.c"
>
<ClCompile
Include=
"..\..\..\..\src\hook.c"
>
<Filter>
Source Files
</Filter>
<Filter>
Source Files
</Filter>
</ClCompile>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\hpa.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\hpa_hooks.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\hpdata.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\inspect.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\jemalloc.c"
>
<ClCompile
Include=
"..\..\..\..\src\jemalloc.c"
>
<Filter>
Source Files
</Filter>
<Filter>
Source Files
</Filter>
</ClCompile>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\large.c"
>
<ClCompile
Include=
"..\..\..\..\src\large.c"
>
<Filter>
Source Files
</Filter>
<Filter>
Source Files
</Filter>
</ClCompile>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\log.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\malloc_io.c"
>
<ClCompile
Include=
"..\..\..\..\src\malloc_io.c"
>
<Filter>
Source Files
</Filter>
<Filter>
Source Files
</Filter>
</ClCompile>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\mutex.c"
>
<ClCompile
Include=
"..\..\..\..\src\mutex.c"
>
<Filter>
Source Files
</Filter>
<Filter>
Source Files
</Filter>
</ClCompile>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\
mutex_pool
.c"
>
<ClCompile
Include=
"..\..\..\..\src\
nstime
.c"
>
<Filter>
Source Files
</Filter>
<Filter>
Source Files
</Filter>
</ClCompile>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\nstime.c"
>
<ClCompile
Include=
"..\..\..\..\src\pa.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\pa_extra.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\pai.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\pac.c"
>
<Filter>
Source Files
</Filter>
<Filter>
Source Files
</Filter>
</ClCompile>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\pages.c"
>
<ClCompile
Include=
"..\..\..\..\src\pages.c"
>
<Filter>
Source Files
</Filter>
<Filter>
Source Files
</Filter>
</ClCompile>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\p
rng
.c"
>
<ClCompile
Include=
"..\..\..\..\src\p
eak_event
.c"
>
<Filter>
Source Files
</Filter>
<Filter>
Source Files
</Filter>
</ClCompile>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\prof.c"
>
<ClCompile
Include=
"..\..\..\..\src\prof.c"
>
<Filter>
Source Files
</Filter>
<Filter>
Source Files
</Filter>
</ClCompile>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\prof_data.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\prof_log.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\prof_recent.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\prof_stats.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\prof_sys.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\psset.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\rtree.c"
>
<ClCompile
Include=
"..\..\..\..\src\rtree.c"
>
<Filter>
Source Files
</Filter>
<Filter>
Source Files
</Filter>
</ClCompile>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\safety_check.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\sc.c"
>
<ClCompile
Include=
"..\..\..\..\src\sc.c"
>
<Filter>
Source Files
</Filter>
<Filter>
Source Files
</Filter>
</ClCompile>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\sec.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\stats.c"
>
<ClCompile
Include=
"..\..\..\..\src\stats.c"
>
<Filter>
Source Files
</Filter>
<Filter>
Source Files
</Filter>
</ClCompile>
</ClCompile>
...
@@ -82,6 +154,12 @@
...
@@ -82,6 +154,12 @@
<ClCompile
Include=
"..\..\..\..\src\tcache.c"
>
<ClCompile
Include=
"..\..\..\..\src\tcache.c"
>
<Filter>
Source Files
</Filter>
<Filter>
Source Files
</Filter>
</ClCompile>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\test_hooks.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\thread_event.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\ticker.c"
>
<ClCompile
Include=
"..\..\..\..\src\ticker.c"
>
<Filter>
Source Files
</Filter>
<Filter>
Source Files
</Filter>
</ClCompile>
</ClCompile>
...
@@ -91,19 +169,28 @@
...
@@ -91,19 +169,28 @@
<ClCompile
Include=
"..\..\..\..\src\witness.c"
>
<ClCompile
Include=
"..\..\..\..\src\witness.c"
>
<Filter>
Source Files
</Filter>
<Filter>
Source Files
</Filter>
</ClCompile>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\
log
.c"
>
<ClCompile
Include=
"..\..\..\..\src\
bin_info
.c"
>
<Filter>
Source Files
</Filter>
<Filter>
Source Files
</Filter>
</ClCompile>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\
bin
.c"
>
<ClCompile
Include=
"..\..\..\..\src\
ecache
.c"
>
<Filter>
Source Files
</Filter>
<Filter>
Source Files
</Filter>
</ClCompile>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\
div
.c"
>
<ClCompile
Include=
"..\..\..\..\src\
edata
.c"
>
<Filter>
Source Files
</Filter>
<Filter>
Source Files
</Filter>
</ClCompile>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\
test_hooks
.c"
>
<ClCompile
Include=
"..\..\..\..\src\
edata_cache
.c"
>
<Filter>
Source Files
</Filter>
<Filter>
Source Files
</Filter>
</ClCompile>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\safety_check.c"
>
<ClCompile
Include=
"..\..\..\..\src\ehooks.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\eset.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\san.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\san_bump.c"
>
<Filter>
Source Files
</Filter>
<Filter>
Source Files
</Filter>
</ClCompile>
</ClCompile>
</ItemGroup>
</ItemGroup>
...
...
deps/jemalloc/msvc/test_threads/test_threads.cpp
View file @
b8beda3c
...
@@ -9,6 +9,7 @@
...
@@ -9,6 +9,7 @@
#include <thread>
#include <thread>
#include <vector>
#include <vector>
#include <stdio.h>
#include <stdio.h>
#define JEMALLOC_NO_DEMANGLE
#include <jemalloc/jemalloc.h>
#include <jemalloc/jemalloc.h>
using
std
::
vector
;
using
std
::
vector
;
...
...
deps/jemalloc/scripts/check-formatting.sh
0 → 100755
View file @
b8beda3c
#!/bin/bash
# The files that need to be properly formatted. We'll grow this incrementally
# until it includes all the jemalloc source files (as we convert things over),
# and then just replace it with
# find -name '*.c' -o -name '*.h' -o -name '*.cpp
FILES
=(
)
if
command
-v
clang-format &> /dev/null
;
then
CLANG_FORMAT
=
"clang-format"
elif
command
-v
clang-format-8 &> /dev/null
;
then
CLANG_FORMAT
=
"clang-format-8"
else
echo
"Couldn't find clang-format."
fi
if
!
$CLANG_FORMAT
-version
|
grep
"version 8
\.
"
&> /dev/null
;
then
echo
"clang-format is the wrong version."
exit
1
fi
for
file
in
${
FILES
[@]
}
;
do
if
!
cmp
--silent
$file
<
(
$CLANG_FORMAT
$file
)
&> /dev/null
;
then
echo
"Error:
$file
is not clang-formatted"
exit
1
fi
done
deps/jemalloc/scripts/freebsd/before_install.sh
0 → 100644
View file @
b8beda3c
#!/bin/tcsh
su
-m
root
-c
'pkg install -y git'
deps/jemalloc/scripts/freebsd/before_script.sh
0 → 100644
View file @
b8beda3c
#!/bin/tcsh
autoconf
# We don't perfectly track freebsd stdlib.h definitions. This is fine when
# we count as a system header, but breaks otherwise, like during these
# tests.
./configure
--with-jemalloc-prefix
=
ci_
${
COMPILER_FLAGS
:+
CC=
"
$CC
$COMPILER_FLAGS
"
CXX=
"
$CXX
$COMPILER_FLAGS
"
}
$CONFIGURE_FLAGS
JE_NCPUS
=
`
sysctl
-n
kern.smp.cpus
`
gmake
-j
${
JE_NCPUS
}
gmake
-j
${
JE_NCPUS
}
tests
deps/jemalloc/scripts/freebsd/script.sh
0 → 100644
View file @
b8beda3c
#!/bin/tcsh
gmake check
deps/jemalloc/scripts/gen_run_tests.py
View file @
b8beda3c
#!/usr/bin/env python
#!/usr/bin/env python
3
import
sys
import
sys
from
itertools
import
combinations
from
itertools
import
combinations
...
@@ -14,14 +14,14 @@ nparallel = cpu_count() * 2
...
@@ -14,14 +14,14 @@ nparallel = cpu_count() * 2
uname
=
uname
()[
0
]
uname
=
uname
()[
0
]
if
"BSD"
in
uname
:
if
call
(
"command -v gmake"
,
shell
=
True
)
==
0
:
make_cmd
=
'gmake'
make_cmd
=
'gmake'
else
:
else
:
make_cmd
=
'make'
make_cmd
=
'make'
def
powerset
(
items
):
def
powerset
(
items
):
result
=
[]
result
=
[]
for
i
in
x
range
(
len
(
items
)
+
1
):
for
i
in
range
(
len
(
items
)
+
1
):
result
+=
combinations
(
items
,
i
)
result
+=
combinations
(
items
,
i
)
return
result
return
result
...
@@ -41,6 +41,7 @@ possible_config_opts = [
...
@@ -41,6 +41,7 @@ possible_config_opts = [
'--enable-prof'
,
'--enable-prof'
,
'--disable-stats'
,
'--disable-stats'
,
'--enable-opt-safety-checks'
,
'--enable-opt-safety-checks'
,
'--with-lg-page=16'
,
]
]
if
bits_64
:
if
bits_64
:
possible_config_opts
.
append
(
'--with-lg-vaddr=56'
)
possible_config_opts
.
append
(
'--with-lg-vaddr=56'
)
...
@@ -52,19 +53,20 @@ possible_malloc_conf_opts = [
...
@@ -52,19 +53,20 @@ possible_malloc_conf_opts = [
'background_thread:true'
,
'background_thread:true'
,
]
]
print
'set -e'
print
(
'set -e'
)
print
'if [ -f Makefile ] ; then %(make_cmd)s relclean ; fi'
%
{
'make_cmd'
:
make_cmd
}
print
(
'if [ -f Makefile ] ; then %(make_cmd)s relclean ; fi'
%
{
'make_cmd'
:
print
'autoconf'
make_cmd
})
print
'rm -rf run_tests.out'
print
(
'autoconf'
)
print
'mkdir run_tests.out'
print
(
'rm -rf run_tests.out'
)
print
'cd run_tests.out'
print
(
'mkdir run_tests.out'
)
print
(
'cd run_tests.out'
)
ind
=
0
ind
=
0
for
cc
,
cxx
in
possible_compilers
:
for
cc
,
cxx
in
possible_compilers
:
for
compiler_opts
in
powerset
(
possible_compiler_opts
):
for
compiler_opts
in
powerset
(
possible_compiler_opts
):
for
config_opts
in
powerset
(
possible_config_opts
):
for
config_opts
in
powerset
(
possible_config_opts
):
for
malloc_conf_opts
in
powerset
(
possible_malloc_conf_opts
):
for
malloc_conf_opts
in
powerset
(
possible_malloc_conf_opts
):
if
cc
is
'clang'
\
if
cc
==
'clang'
\
and
'-m32'
in
possible_compiler_opts
\
and
'-m32'
in
possible_compiler_opts
\
and
'--enable-prof'
in
config_opts
:
and
'--enable-prof'
in
config_opts
:
continue
continue
...
@@ -92,7 +94,7 @@ for cc, cxx in possible_compilers:
...
@@ -92,7 +94,7 @@ for cc, cxx in possible_compilers:
if
(
uname
==
'Linux'
and
linux_supported
)
\
if
(
uname
==
'Linux'
and
linux_supported
)
\
or
(
not
linux_supported
and
(
uname
!=
'Darwin'
or
\
or
(
not
linux_supported
and
(
uname
!=
'Darwin'
or
\
not
darwin_unsupported
)):
not
darwin_unsupported
)):
print
"""cat <<EOF > run_test_%(ind)d.sh
print
(
"""cat <<EOF > run_test_%(ind)d.sh
#!/bin/sh
#!/bin/sh
set -e
set -e
...
@@ -120,7 +122,9 @@ run_cmd %(make_cmd)s all tests
...
@@ -120,7 +122,9 @@ run_cmd %(make_cmd)s all tests
run_cmd %(make_cmd)s check
run_cmd %(make_cmd)s check
run_cmd %(make_cmd)s distclean
run_cmd %(make_cmd)s distclean
EOF
EOF
chmod 755 run_test_%(ind)d.sh"""
%
{
'ind'
:
ind
,
'config_line'
:
config_line
,
'make_cmd'
:
make_cmd
}
chmod 755 run_test_%(ind)d.sh"""
%
{
'ind'
:
ind
,
'config_line'
:
config_line
,
'make_cmd'
:
make_cmd
})
ind
+=
1
ind
+=
1
print
'for i in `seq 0 %(last_ind)d` ; do echo run_test_${i}.sh ; done | xargs -P %(nparallel)d -n 1 sh'
%
{
'last_ind'
:
ind
-
1
,
'nparallel'
:
nparallel
}
print
(
'for i in `seq 0 %(last_ind)d` ; do echo run_test_${i}.sh ; done | xargs'
' -P %(nparallel)d -n 1 sh'
%
{
'last_ind'
:
ind
-
1
,
'nparallel'
:
nparallel
})
deps/jemalloc/scripts/gen_travis.py
View file @
b8beda3c
#!/usr/bin/env python
#!/usr/bin/env python
3
from
itertools
import
combinations
from
itertools
import
combinations
,
chain
from
enum
import
Enum
,
auto
travis_template
=
"""
\
language: generic
dist: precise
matrix:
LINUX
=
'linux'
OSX
=
'osx'
WINDOWS
=
'windows'
FREEBSD
=
'freebsd'
AMD64
=
'amd64'
ARM64
=
'arm64'
PPC64LE
=
'ppc64le'
TRAVIS_TEMPLATE
=
"""
\
# This config file is generated by ./scripts/gen_travis.py.
# Do not edit by hand.
# We use 'minimal', because 'generic' makes Windows VMs hang at startup. Also
# the software provided by 'generic' is simply not needed for our tests.
# Differences are explained here:
# https://docs.travis-ci.com/user/languages/minimal-and-generic/
language: minimal
dist: focal
jobs:
include:
include:
%s
{jobs}
before_install:
- |-
if test -f "./scripts/$TRAVIS_OS_NAME/before_install.sh"; then
source ./scripts/$TRAVIS_OS_NAME/before_install.sh
fi
before_script:
before_script:
- autoconf
- |-
- scripts/gen_travis.py > travis_script && diff .travis.yml travis_script
if test -f "./scripts/$TRAVIS_OS_NAME/before_script.sh"; then
- ./configure ${COMPILER_FLAGS:+
\
source ./scripts/$TRAVIS_OS_NAME/before_script.sh
CC="$CC $COMPILER_FLAGS"
\
else
CXX="$CXX $COMPILER_FLAGS" }
\
scripts/gen_travis.py > travis_script && diff .travis.yml travis_script
$CONFIGURE_FLAGS
autoconf
- make -j3
# If COMPILER_FLAGS are not empty, add them to CC and CXX
- make -j3 tests
./configure ${{COMPILER_FLAGS:+ CC="$CC $COMPILER_FLAGS"
\
CXX="$CXX $COMPILER_FLAGS"}} $CONFIGURE_FLAGS
make -j3
make -j3 tests
fi
script:
script:
- make check
- |-
if test -f "./scripts/$TRAVIS_OS_NAME/script.sh"; then
source ./scripts/$TRAVIS_OS_NAME/script.sh
else
make check
fi
"""
"""
class
Option
(
object
):
class
Type
:
COMPILER
=
auto
()
COMPILER_FLAG
=
auto
()
CONFIGURE_FLAG
=
auto
()
MALLOC_CONF
=
auto
()
FEATURE
=
auto
()
def
__init__
(
self
,
type
,
value
):
self
.
type
=
type
self
.
value
=
value
@
staticmethod
def
as_compiler
(
value
):
return
Option
(
Option
.
Type
.
COMPILER
,
value
)
@
staticmethod
def
as_compiler_flag
(
value
):
return
Option
(
Option
.
Type
.
COMPILER_FLAG
,
value
)
@
staticmethod
def
as_configure_flag
(
value
):
return
Option
(
Option
.
Type
.
CONFIGURE_FLAG
,
value
)
@
staticmethod
def
as_malloc_conf
(
value
):
return
Option
(
Option
.
Type
.
MALLOC_CONF
,
value
)
@
staticmethod
def
as_feature
(
value
):
return
Option
(
Option
.
Type
.
FEATURE
,
value
)
def
__eq__
(
self
,
obj
):
return
(
isinstance
(
obj
,
Option
)
and
obj
.
type
==
self
.
type
and
obj
.
value
==
self
.
value
)
# The 'default' configuration is gcc, on linux, with no compiler or configure
# The 'default' configuration is gcc, on linux, with no compiler or configure
# flags. We also test with clang, -m32, --enable-debug, --enable-prof,
# flags. We also test with clang, -m32, --enable-debug, --enable-prof,
# --disable-stats, and --with-malloc-conf=tcache:false. To avoid abusing
# --disable-stats, and --with-malloc-conf=tcache:false. To avoid abusing
# travis though, we don't test all 2**7 = 128 possible combinations of these;
# travis though, we don't test all 2**7 = 128 possible combinations of these;
# instead, we only test combinations of up to 2 'unusual' settings, under the
# instead, we only test combinations of up to 2 'unusual' settings, under the
# hope that bugs involving interactions of such settings are rare.
# hope that bugs involving interactions of such settings are rare.
# Things at once, for C(7, 0) + C(7, 1) + C(7, 2) = 29
MAX_UNUSUAL_OPTIONS
=
2
MAX_UNUSUAL_OPTIONS
=
2
os_default
=
'linux'
os_unusual
=
'osx'
compilers_default
=
'CC=gcc CXX=g++'
GCC
=
Option
.
as_compiler
(
'CC=gcc CXX=g++'
)
compilers_unusual
=
'CC=clang CXX=clang++'
CLANG
=
Option
.
as_compiler
(
'CC=clang CXX=clang++'
)
CL
=
Option
.
as_compiler
(
'CC=cl.exe CXX=cl.exe'
)
compilers_unusual
=
[
CLANG
,]
compiler_flag_unusuals
=
[
'-m32'
]
configure_flag_unusuals
=
[
CROSS_COMPILE_32BIT
=
Option
.
as_feature
(
'CROSS_COMPILE_32BIT'
)
feature_unusuals
=
[
CROSS_COMPILE_32BIT
]
configure_flag_unusuals
=
[
Option
.
as_configure_flag
(
opt
)
for
opt
in
(
'--enable-debug'
,
'--enable-debug'
,
'--enable-prof'
,
'--enable-prof'
,
'--disable-stats'
,
'--disable-stats'
,
'--disable-libdl'
,
'--disable-libdl'
,
'--enable-opt-safety-checks'
,
'--enable-opt-safety-checks'
,
]
'--with-lg-page=16'
,
)]
malloc_conf_unusuals
=
[
malloc_conf_unusuals
=
[
Option
.
as_malloc_conf
(
opt
)
for
opt
in
(
'tcache:false'
,
'tcache:false'
,
'dss:primary'
,
'dss:primary'
,
'percpu_arena:percpu'
,
'percpu_arena:percpu'
,
'background_thread:true'
,
'background_thread:true'
,
]
)
]
all_unusuals
=
(
[
os_unusual
]
+
[
compilers_unusual
]
+
compiler_flag_unusuals
+
configure_flag_unusuals
+
malloc_conf_unusuals
)
unusual_combinations_to_test
=
[]
all_unusuals
=
(
compilers_unusual
+
feature_unusuals
for
i
in
xrange
(
MAX_UNUSUAL_OPTIONS
+
1
):
+
configure_flag_unusuals
+
malloc_conf_unusuals
)
unusual_combinations_to_test
+=
combinations
(
all_unusuals
,
i
)
gcc_multilib_set
=
False
# Formats a job from a combination of flags
def
format_job
(
combination
):
global
gcc_multilib_set
os
=
os_unusual
if
os_unusual
in
combination
else
os_default
def
get_extra_cflags
(
os
,
compiler
):
compilers
=
compilers_unusual
if
compilers_unusual
in
combination
else
compilers_default
if
os
==
FREEBSD
:
return
[]
compiler_flags
=
[
x
for
x
in
combination
if
x
in
compiler_flag_unusuals
]
if
os
==
WINDOWS
:
configure_flags
=
[
x
for
x
in
combination
if
x
in
configure_flag_unusuals
]
# For non-CL compilers under Windows (for now it's only MinGW-GCC),
malloc_conf
=
[
x
for
x
in
combination
if
x
in
malloc_conf_unusuals
]
# -fcommon needs to be specified to correctly handle multiple
# 'malloc_conf' symbols and such, which are declared weak under Linux.
# Weak symbols don't work with MinGW-GCC.
if
compiler
!=
CL
.
value
:
return
[
'-fcommon'
]
else
:
return
[]
# Filter out unsupported configurations on OS X.
# We get some spurious errors when -Warray-bounds is enabled.
if
os
==
'osx'
and
(
'dss:primary'
in
malloc_conf
or
\
extra_cflags
=
[
'-Werror'
,
'-Wno-array-bounds'
]
'percpu_arena:percpu'
in
malloc_conf
or
'background_thread:true'
\
if
compiler
==
CLANG
.
value
or
os
==
OSX
:
in
malloc_conf
):
extra_cflags
+=
[
return
""
'-Wno-unknown-warning-option'
,
if
len
(
malloc_conf
)
>
0
:
'-Wno-ignored-attributes'
configure_flags
.
append
(
'--with-malloc-conf='
+
","
.
join
(
malloc_conf
))
]
if
os
==
OSX
:
extra_cflags
+=
[
'-Wno-deprecated-declarations'
,
]
return
extra_cflags
# Filter out an unsupported configuration - heap profiling on OS X.
if
os
==
'osx'
and
'--enable-prof'
in
configure_flags
:
return
""
# We get some spurious errors when -Warray-bounds is enabled.
# Formats a job from a combination of flags
env_string
=
(
'{} COMPILER_FLAGS="{}" CONFIGURE_FLAGS="{}" '
def
format_job
(
os
,
arch
,
combination
):
'EXTRA_CFLAGS="-Werror -Wno-array-bounds"'
).
format
(
compilers
=
[
x
.
value
for
x
in
combination
if
x
.
type
==
Option
.
Type
.
COMPILER
]
compilers
,
" "
.
join
(
compiler_flags
),
" "
.
join
(
configure_flags
))
assert
(
len
(
compilers
)
<=
1
)
compiler_flags
=
[
x
.
value
for
x
in
combination
if
x
.
type
==
Option
.
Type
.
COMPILER_FLAG
]
job
=
""
configure_flags
=
[
x
.
value
for
x
in
combination
if
x
.
type
==
Option
.
Type
.
CONFIGURE_FLAG
]
job
+=
' - os: %s
\n
'
%
os
malloc_conf
=
[
x
.
value
for
x
in
combination
if
x
.
type
==
Option
.
Type
.
MALLOC_CONF
]
job
+=
' env: %s
\n
'
%
env_string
features
=
[
x
.
value
for
x
in
combination
if
x
.
type
==
Option
.
Type
.
FEATURE
]
if
'-m32'
in
combination
and
os
==
'linux'
:
job
+=
' addons:'
if
len
(
malloc_conf
)
>
0
:
if
gcc_multilib_set
:
configure_flags
.
append
(
'--with-malloc-conf='
+
','
.
join
(
malloc_conf
))
job
+=
' *gcc_multilib
\n
'
if
not
compilers
:
compiler
=
GCC
.
value
else
:
else
:
job
+=
' &gcc_multilib
\n
'
compiler
=
compilers
[
0
]
job
+=
' apt:
\n
'
job
+=
' packages:
\n
'
extra_environment_vars
=
''
job
+=
' - gcc-multilib
\n
'
cross_compile
=
CROSS_COMPILE_32BIT
.
value
in
features
gcc_multilib_set
=
True
if
os
==
LINUX
and
cross_compile
:
compiler_flags
.
append
(
'-m32'
)
features_str
=
' '
.
join
([
' {}=yes'
.
format
(
feature
)
for
feature
in
features
])
stringify
=
lambda
arr
,
name
:
' {}="{}"'
.
format
(
name
,
' '
.
join
(
arr
))
if
arr
else
''
env_string
=
'{}{}{}{}{}{}'
.
format
(
compiler
,
features_str
,
stringify
(
compiler_flags
,
'COMPILER_FLAGS'
),
stringify
(
configure_flags
,
'CONFIGURE_FLAGS'
),
stringify
(
get_extra_cflags
(
os
,
compiler
),
'EXTRA_CFLAGS'
),
extra_environment_vars
)
job
=
' - os: {}
\n
'
.
format
(
os
)
job
+=
' arch: {}
\n
'
.
format
(
arch
)
job
+=
' env: {}'
.
format
(
env_string
)
return
job
return
job
include_rows
=
""
for
combination
in
unusual_combinations_to_test
:
include_rows
+=
format_job
(
combination
)
# Development build
def
generate_unusual_combinations
(
unusuals
,
max_unusual_opts
):
include_rows
+=
'''
\
"""
Generates different combinations of non-standard compilers, compiler flags,
configure flags and malloc_conf settings.
@param max_unusual_opts: Limit of unusual options per combination.
"""
return
chain
.
from_iterable
(
[
combinations
(
unusuals
,
i
)
for
i
in
range
(
max_unusual_opts
+
1
)])
def
included
(
combination
,
exclude
):
"""
Checks if the combination of options should be included in the Travis
testing matrix.
@param exclude: A list of options to be avoided.
"""
return
not
any
(
excluded
in
combination
for
excluded
in
exclude
)
def
generate_jobs
(
os
,
arch
,
exclude
,
max_unusual_opts
,
unusuals
=
all_unusuals
):
jobs
=
[]
for
combination
in
generate_unusual_combinations
(
unusuals
,
max_unusual_opts
):
if
included
(
combination
,
exclude
):
jobs
.
append
(
format_job
(
os
,
arch
,
combination
))
return
'
\n
'
.
join
(
jobs
)
def
generate_linux
(
arch
):
os
=
LINUX
# Only generate 2 unusual options for AMD64 to reduce matrix size
max_unusual_opts
=
MAX_UNUSUAL_OPTIONS
if
arch
==
AMD64
else
1
exclude
=
[]
if
arch
==
PPC64LE
:
# Avoid 32 bit builds and clang on PowerPC
exclude
=
(
CROSS_COMPILE_32BIT
,
CLANG
,)
return
generate_jobs
(
os
,
arch
,
exclude
,
max_unusual_opts
)
def
generate_macos
(
arch
):
os
=
OSX
max_unusual_opts
=
1
exclude
=
([
Option
.
as_malloc_conf
(
opt
)
for
opt
in
(
'dss:primary'
,
'percpu_arena:percpu'
,
'background_thread:true'
)]
+
[
Option
.
as_configure_flag
(
'--enable-prof'
)]
+
[
CLANG
,])
return
generate_jobs
(
os
,
arch
,
exclude
,
max_unusual_opts
)
def
generate_windows
(
arch
):
os
=
WINDOWS
max_unusual_opts
=
3
unusuals
=
(
Option
.
as_configure_flag
(
'--enable-debug'
),
CL
,
CROSS_COMPILE_32BIT
,
)
return
generate_jobs
(
os
,
arch
,
(),
max_unusual_opts
,
unusuals
)
def
generate_freebsd
(
arch
):
os
=
FREEBSD
max_unusual_opts
=
4
unusuals
=
(
Option
.
as_configure_flag
(
'--enable-debug'
),
Option
.
as_configure_flag
(
'--enable-prof --enable-prof-libunwind'
),
Option
.
as_configure_flag
(
'--with-lg-page=16 --with-malloc-conf=tcache:false'
),
CROSS_COMPILE_32BIT
,
)
return
generate_jobs
(
os
,
arch
,
(),
max_unusual_opts
,
unusuals
)
def
get_manual_jobs
():
return
"""
\
# Development build
# Development build
- os: linux
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --disable-cache-oblivious --enable-stats --enable-log --enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-debug
\
'''
--disable-cache-oblivious --enable-stats --enable-log --enable-prof"
\
EXTRA_CFLAGS="-Werror -Wno-array-bounds"
# Enable-expermental-smallocx
include_rows
+=
'''
\
# --enable-expermental-smallocx:
# --enable-expermental-smallocx:
- os: linux
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --enable-experimental-smallocx --enable-stats --enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-debug
\
'''
--enable-experimental-smallocx --enable-stats --enable-prof"
\
EXTRA_CFLAGS="-Werror -Wno-array-bounds"
"""
# Valgrind build bots
include_rows
+=
'''
def
main
():
# Valgrind
jobs
=
'
\n
'
.
join
((
- os: linux
generate_windows
(
AMD64
),
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds" JEMALLOC_TEST_PREFIX="valgrind"
addons:
generate_freebsd
(
AMD64
),
apt:
packages:
generate_linux
(
AMD64
),
- valgrind
generate_linux
(
PPC64LE
),
'''
generate_macos
(
AMD64
),
# To enable valgrind on macosx add:
#
get_manual_jobs
(),
#
- os: osx
))
# env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds" JEMALLOC_TEST_PREFIX="valgrind"
#
install: brew install valgrind
print
(
TRAVIS_TEMPLATE
.
format
(
jobs
=
jobs
))
#
# It currently fails due to: https://github.com/jemalloc/jemalloc/issues/1274
if
__name__
==
'__main__'
:
print
travis_template
%
include_rows
main
()
deps/jemalloc/scripts/linux/before_install.sh
0 → 100644
View file @
b8beda3c
#!/bin/bash
set
-ev
if
[[
"
$TRAVIS_OS_NAME
"
!=
"linux"
]]
;
then
echo
"Incorrect
\$
TRAVIS_OS_NAME: expected linux, got
$TRAVIS_OS_NAME
"
exit
1
fi
if
[[
"
$CROSS_COMPILE_32BIT
"
==
"yes"
]]
;
then
sudo
apt-get update
sudo
apt-get
-y
install
gcc-multilib g++-multilib
fi
deps/jemalloc/scripts/windows/before_install.sh
0 → 100644
View file @
b8beda3c
#!/bin/bash
set
-e
# The purpose of this script is to install build dependencies and set
# $build_env to a function that sets appropriate environment variables,
# to enable (mingw32|mingw64) environment if we want to compile with gcc, or
# (mingw32|mingw64) + vcvarsall.bat if we want to compile with cl.exe
if
[[
"
$TRAVIS_OS_NAME
"
!=
"windows"
]]
;
then
echo
"Incorrect
\$
TRAVIS_OS_NAME: expected windows, got
$TRAVIS_OS_NAME
"
exit
1
fi
[[
!
-f
C:/tools/msys64/msys2_shell.cmd
]]
&&
rm
-rf
C:/tools/msys64
choco uninstall
-y
mingw
choco upgrade
--no-progress
-y
msys2
msys_shell_cmd
=
"cmd //C RefreshEnv.cmd && set MSYS=winsymlinks:nativestrict && C:
\\
tools
\\
msys64
\\
msys2_shell.cmd"
msys2
()
{
$msys_shell_cmd
-defterm
-no-start
-msys2
-c
"
$*
"
;
}
mingw32
()
{
$msys_shell_cmd
-defterm
-no-start
-mingw32
-c
"
$*
"
;
}
mingw64
()
{
$msys_shell_cmd
-defterm
-no-start
-mingw64
-c
"
$*
"
;
}
if
[[
"
$CROSS_COMPILE_32BIT
"
==
"yes"
]]
;
then
mingw
=
mingw32
mingw_gcc_package_arch
=
i686
else
mingw
=
mingw64
mingw_gcc_package_arch
=
x86_64
fi
if
[[
"
$CC
"
==
*
"gcc"
*
]]
;
then
$mingw
pacman
-S
--noconfirm
--needed
\
autotools
\
git
\
mingw-w64-
${
mingw_gcc_package_arch
}
-make
\
mingw-w64-
${
mingw_gcc_package_arch
}
-gcc
\
mingw-w64-
${
mingw_gcc_package_arch
}
-binutils
build_env
=
$mingw
elif
[[
"
$CC
"
==
*
"cl"
*
]]
;
then
$mingw
pacman
-S
--noconfirm
--needed
\
autotools
\
git
\
mingw-w64-
${
mingw_gcc_package_arch
}
-make
\
mingw-w64-
${
mingw_gcc_package_arch
}
-binutils
# In order to use MSVC compiler (cl.exe), we need to correctly set some environment
# variables, namely PATH, INCLUDE, LIB and LIBPATH. The correct values of these
# variables are set by a batch script "vcvarsall.bat". The code below generates
# a batch script that calls "vcvarsall.bat" and prints the environment variables.
#
# Then, those environment variables are transformed from cmd to bash format and put
# into a script $apply_vsenv. If cl.exe needs to be used from bash, one can
# 'source $apply_vsenv' and it will apply the environment variables needed for cl.exe
# to be located and function correctly.
#
# At last, a function "mingw_with_msvc_vars" is generated which forwards user input
# into a correct mingw (32 or 64) subshell that automatically performs 'source $apply_vsenv',
# making it possible for autotools to discover and use cl.exe.
vcvarsall
=
"vcvarsall.tmp.bat"
echo
"@echo off"
>
$vcvarsall
echo
"call
\"
c:
\P
rogram Files (x86)
\M
icrosoft Visual Studio 14.0
\V
C
\\\v
cvarsall.bat
\"
$USE_MSVC
"
>>
$vcvarsall
echo
"set"
>>
$vcvarsall
apply_vsenv
=
"./apply_vsenv.sh"
cmd //C
$vcvarsall
|
grep
-E
"^PATH="
|
sed
-n
-e
's/\(.*\)=\(.*\)/export \1=$PATH:"\2"/g'
\
-e
's/\([a-zA-Z]\):[\\\/]/\/\1\//g'
\
-e
's/\\/\//g'
\
-e
's/;\//:\//gp'
>
$apply_vsenv
cmd //C
$vcvarsall
|
grep
-E
"^(INCLUDE|LIB|LIBPATH)="
|
sed
-n
-e
's/\(.*\)=\(.*\)/export \1="\2"/gp'
>>
$apply_vsenv
cat
$apply_vsenv
mingw_with_msvc_vars
()
{
$msys_shell_cmd
-defterm
-no-start
-
$mingw
-c
"source
$apply_vsenv
&& ""
$*
"
;
}
build_env
=
mingw_with_msvc_vars
rm
-f
$vcvarsall
else
echo
"Unknown C compiler:
$CC
"
exit
1
fi
echo
"Build environment function:
$build_env
"
deps/jemalloc/scripts/windows/before_script.sh
0 → 100644
View file @
b8beda3c
#!/bin/bash
set
-e
if
[[
"
$TRAVIS_OS_NAME
"
!=
"windows"
]]
;
then
echo
"Incorrect
\$
TRAVIS_OS_NAME: expected windows, got
$TRAVIS_OS_NAME
"
exit
1
fi
$build_env
autoconf
$build_env
./configure
$CONFIGURE_FLAGS
# mingw32-make simply means "make", unrelated to mingw32 vs mingw64.
# Simply disregard the prefix and treat is as "make".
$build_env
mingw32-make
-j3
# At the moment, it's impossible to make tests in parallel,
# seemingly due to concurrent writes to '.pdb' file. I don't know why
# that happens, because we explicitly supply '/Fs' to the compiler.
# Until we figure out how to fix it, we should build tests sequentially
# on Windows.
$build_env
mingw32-make tests
deps/jemalloc/scripts/windows/script.sh
0 → 100644
View file @
b8beda3c
#!/bin/bash
set
-e
if
[[
"
$TRAVIS_OS_NAME
"
!=
"windows"
]]
;
then
echo
"Incorrect
\$
TRAVIS_OS_NAME: expected windows, got
$TRAVIS_OS_NAME
"
exit
1
fi
$build_env
mingw32-make
-k
check
deps/jemalloc/src/arena.c
View file @
b8beda3c
#define JEMALLOC_ARENA_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/div.h"
#include "jemalloc/internal/decay.h"
#include "jemalloc/internal/ehooks.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/extent_mmap.h"
#include "jemalloc/internal/extent_mmap.h"
#include "jemalloc/internal/san.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/safety_check.h"
#include "jemalloc/internal/safety_check.h"
...
@@ -35,34 +36,37 @@ ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT;
...
@@ -35,34 +36,37 @@ ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT;
static
atomic_zd_t
dirty_decay_ms_default
;
static
atomic_zd_t
dirty_decay_ms_default
;
static
atomic_zd_t
muzzy_decay_ms_default
;
static
atomic_zd_t
muzzy_decay_ms_default
;
const
uint64_t
h_steps
[
SMOOTHSTEP_NSTEPS
]
=
{
emap_t
arena_emap_global
;
#define STEP(step, h, x, y) \
pa_central_t
arena_pa_central_global
;
h,
SMOOTHSTEP
#undef STEP
};
static
div_info_t
arena_binind_div_info
[
SC_NBINS
];
div_info_t
arena_binind_div_info
[
SC_NBINS
];
size_t
opt_oversize_threshold
=
OVERSIZE_THRESHOLD_DEFAULT
;
size_t
opt_oversize_threshold
=
OVERSIZE_THRESHOLD_DEFAULT
;
size_t
oversize_threshold
=
OVERSIZE_THRESHOLD_DEFAULT
;
size_t
oversize_threshold
=
OVERSIZE_THRESHOLD_DEFAULT
;
uint32_t
arena_bin_offsets
[
SC_NBINS
];
static
unsigned
nbins_total
;
static
unsigned
huge_arena_ind
;
static
unsigned
huge_arena_ind
;
const
arena_config_t
arena_config_default
=
{
/* .extent_hooks = */
(
extent_hooks_t
*
)
&
ehooks_default_extent_hooks
,
/* .metadata_use_hooks = */
true
,
};
/******************************************************************************/
/******************************************************************************/
/*
/*
* Function prototypes for static functions that are referenced prior to
* Function prototypes for static functions that are referenced prior to
* definition.
* definition.
*/
*/
static
void
arena_decay_to_limit
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_decay_t
*
decay
,
extents_t
*
extents
,
bool
all
,
size_t
npages_limit
,
size_t
npages_decay_max
,
bool
is_background_thread
);
static
bool
arena_decay_dirty
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
static
bool
arena_decay_dirty
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bool
is_background_thread
,
bool
all
);
bool
is_background_thread
,
bool
all
);
static
void
arena_dalloc_bin_slab
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_t
*
slab
,
static
void
arena_bin_lower_slab
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
edata_t
*
slab
,
bin_t
*
bin
);
static
void
arena_bin_lower_slab
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_t
*
slab
,
bin_t
*
bin
);
bin_t
*
bin
);
static
void
arena_maybe_do_deferred_work
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
decay_t
*
decay
,
size_t
npages_new
);
/******************************************************************************/
/******************************************************************************/
...
@@ -72,19 +76,17 @@ arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
...
@@ -72,19 +76,17 @@ arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
size_t
*
nactive
,
size_t
*
ndirty
,
size_t
*
nmuzzy
)
{
size_t
*
nactive
,
size_t
*
ndirty
,
size_t
*
nmuzzy
)
{
*
nthreads
+=
arena_nthreads_get
(
arena
,
false
);
*
nthreads
+=
arena_nthreads_get
(
arena
,
false
);
*
dss
=
dss_prec_names
[
arena_dss_prec_get
(
arena
)];
*
dss
=
dss_prec_names
[
arena_dss_prec_get
(
arena
)];
*
dirty_decay_ms
=
arena_dirty_decay_ms_get
(
arena
);
*
dirty_decay_ms
=
arena_decay_ms_get
(
arena
,
extent_state_dirty
);
*
muzzy_decay_ms
=
arena_muzzy_decay_ms_get
(
arena
);
*
muzzy_decay_ms
=
arena_decay_ms_get
(
arena
,
extent_state_muzzy
);
*
nactive
+=
atomic_load_zu
(
&
arena
->
nactive
,
ATOMIC_RELAXED
);
pa_shard_basic_stats_merge
(
&
arena
->
pa_shard
,
nactive
,
ndirty
,
nmuzzy
);
*
ndirty
+=
extents_npages_get
(
&
arena
->
extents_dirty
);
*
nmuzzy
+=
extents_npages_get
(
&
arena
->
extents_muzzy
);
}
}
void
void
arena_stats_merge
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
unsigned
*
nthreads
,
arena_stats_merge
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
unsigned
*
nthreads
,
const
char
**
dss
,
ssize_t
*
dirty_decay_ms
,
ssize_t
*
muzzy_decay_ms
,
const
char
**
dss
,
ssize_t
*
dirty_decay_ms
,
ssize_t
*
muzzy_decay_ms
,
size_t
*
nactive
,
size_t
*
ndirty
,
size_t
*
nmuzzy
,
arena_stats_t
*
astats
,
size_t
*
nactive
,
size_t
*
ndirty
,
size_t
*
nmuzzy
,
arena_stats_t
*
astats
,
bin_stats_t
*
bstats
,
arena_stats_large_t
*
lstats
,
bin_stats_
data_
t
*
bstats
,
arena_stats_large_t
*
lstats
,
arena_stats_exten
ts_t
*
e
stats
)
{
pac_estats_t
*
estats
,
hpa_shard_stats_t
*
hpastats
,
sec_sta
ts_t
*
sec
stats
)
{
cassert
(
config_stats
);
cassert
(
config_stats
);
arena_basic_stats_merge
(
tsdn
,
arena
,
nthreads
,
dss
,
dirty_decay_ms
,
arena_basic_stats_merge
(
tsdn
,
arena
,
nthreads
,
dss
,
dirty_decay_ms
,
...
@@ -93,122 +95,74 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
...
@@ -93,122 +95,74 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
size_t
base_allocated
,
base_resident
,
base_mapped
,
metadata_thp
;
size_t
base_allocated
,
base_resident
,
base_mapped
,
metadata_thp
;
base_stats_get
(
tsdn
,
arena
->
base
,
&
base_allocated
,
&
base_resident
,
base_stats_get
(
tsdn
,
arena
->
base
,
&
base_allocated
,
&
base_resident
,
&
base_mapped
,
&
metadata_thp
);
&
base_mapped
,
&
metadata_thp
);
size_t
pac_mapped_sz
=
pac_mapped
(
&
arena
->
pa_shard
.
pac
);
astats
->
mapped
+=
base_mapped
+
pac_mapped_sz
;
astats
->
resident
+=
base_resident
;
arena_stats_lock
(
tsdn
,
&
arena
->
stats
);
LOCKEDINT_MTX_LOCK
(
tsdn
,
arena
->
stats
.
mtx
);
arena_stats_accum_zu
(
&
astats
->
mapped
,
base_mapped
+
arena_stats_read_zu
(
tsdn
,
&
arena
->
stats
,
&
arena
->
stats
.
mapped
));
arena_stats_accum_zu
(
&
astats
->
retained
,
extents_npages_get
(
&
arena
->
extents_retained
)
<<
LG_PAGE
);
atomic_store_zu
(
&
astats
->
extent_avail
,
atomic_load_zu
(
&
arena
->
extent_avail_cnt
,
ATOMIC_RELAXED
),
ATOMIC_RELAXED
);
arena_stats_accum_u64
(
&
astats
->
decay_dirty
.
npurge
,
astats
->
base
+=
base_allocated
;
arena_stats_read_u64
(
tsdn
,
&
arena
->
stats
,
atomic_load_add_store_zu
(
&
astats
->
internal
,
arena_internal_get
(
arena
));
&
arena
->
stats
.
decay_dirty
.
npurge
));
astats
->
metadata_thp
+=
metadata_thp
;
arena_stats_accum_u64
(
&
astats
->
decay_dirty
.
nmadvise
,
arena_stats_read_u64
(
tsdn
,
&
arena
->
stats
,
&
arena
->
stats
.
decay_dirty
.
nmadvise
));
arena_stats_accum_u64
(
&
astats
->
decay_dirty
.
purged
,
arena_stats_read_u64
(
tsdn
,
&
arena
->
stats
,
&
arena
->
stats
.
decay_dirty
.
purged
));
arena_stats_accum_u64
(
&
astats
->
decay_muzzy
.
npurge
,
arena_stats_read_u64
(
tsdn
,
&
arena
->
stats
,
&
arena
->
stats
.
decay_muzzy
.
npurge
));
arena_stats_accum_u64
(
&
astats
->
decay_muzzy
.
nmadvise
,
arena_stats_read_u64
(
tsdn
,
&
arena
->
stats
,
&
arena
->
stats
.
decay_muzzy
.
nmadvise
));
arena_stats_accum_u64
(
&
astats
->
decay_muzzy
.
purged
,
arena_stats_read_u64
(
tsdn
,
&
arena
->
stats
,
&
arena
->
stats
.
decay_muzzy
.
purged
));
arena_stats_accum_zu
(
&
astats
->
base
,
base_allocated
);
arena_stats_accum_zu
(
&
astats
->
internal
,
arena_internal_get
(
arena
));
arena_stats_accum_zu
(
&
astats
->
metadata_thp
,
metadata_thp
);
arena_stats_accum_zu
(
&
astats
->
resident
,
base_resident
+
(((
atomic_load_zu
(
&
arena
->
nactive
,
ATOMIC_RELAXED
)
+
extents_npages_get
(
&
arena
->
extents_dirty
)
+
extents_npages_get
(
&
arena
->
extents_muzzy
))
<<
LG_PAGE
)));
arena_stats_accum_zu
(
&
astats
->
abandoned_vm
,
atomic_load_zu
(
&
arena
->
stats
.
abandoned_vm
,
ATOMIC_RELAXED
));
for
(
szind_t
i
=
0
;
i
<
SC_NSIZES
-
SC_NBINS
;
i
++
)
{
for
(
szind_t
i
=
0
;
i
<
SC_NSIZES
-
SC_NBINS
;
i
++
)
{
uint64_t
nmalloc
=
arena_stats_read_u64
(
tsdn
,
&
arena
->
stats
,
uint64_t
nmalloc
=
locked_read_u64
(
tsdn
,
LOCKEDINT_MTX
(
arena
->
stats
.
mtx
),
&
arena
->
stats
.
lstats
[
i
].
nmalloc
);
&
arena
->
stats
.
lstats
[
i
].
nmalloc
);
arena_stats_accum_u64
(
&
lstats
[
i
].
nmalloc
,
nmalloc
);
locked_inc_u64_unsynchronized
(
&
lstats
[
i
].
nmalloc
,
nmalloc
);
arena_stats_accum_u64
(
&
astats
->
nmalloc_large
,
nmalloc
)
;
astats
->
nmalloc_large
+=
nmalloc
;
uint64_t
ndalloc
=
arena_stats_read_u64
(
tsdn
,
&
arena
->
stats
,
uint64_t
ndalloc
=
locked_read_u64
(
tsdn
,
LOCKEDINT_MTX
(
arena
->
stats
.
mtx
),
&
arena
->
stats
.
lstats
[
i
].
ndalloc
);
&
arena
->
stats
.
lstats
[
i
].
ndalloc
);
arena_stats_accum_u64
(
&
lstats
[
i
].
ndalloc
,
ndalloc
);
locked_inc_u64_unsynchronized
(
&
lstats
[
i
].
ndalloc
,
ndalloc
);
arena_stats_accum_u64
(
&
astats
->
ndalloc_large
,
ndalloc
)
;
astats
->
ndalloc_large
+=
ndalloc
;
uint64_t
nrequests
=
arena_stats_read_u64
(
tsdn
,
&
arena
->
stats
,
uint64_t
nrequests
=
locked_read_u64
(
tsdn
,
LOCKEDINT_MTX
(
arena
->
stats
.
mtx
),
&
arena
->
stats
.
lstats
[
i
].
nrequests
);
&
arena
->
stats
.
lstats
[
i
].
nrequests
);
arena_stats_accum_u64
(
&
lstats
[
i
].
nrequests
,
locked_inc_u64_unsynchronized
(
&
lstats
[
i
].
nrequests
,
nmalloc
+
nrequests
);
arena_stats_accum_u64
(
&
astats
->
nrequests_large
,
nmalloc
+
nrequests
);
nmalloc
+
nrequests
);
astats
->
nrequests_large
+=
nmalloc
+
nrequests
;
/* nfill == nmalloc for large currently. */
/* nfill == nmalloc for large currently. */
arena_stats_accum_u64
(
&
lstats
[
i
].
nfills
,
nmalloc
);
locked_inc_u64_unsynchronized
(
&
lstats
[
i
].
nfills
,
nmalloc
);
arena_stats_accum_u64
(
&
astats
->
nfills_large
,
nmalloc
)
;
astats
->
nfills_large
+=
nmalloc
;
uint64_t
nflush
=
arena_stats_read_u64
(
tsdn
,
&
arena
->
stats
,
uint64_t
nflush
=
locked_read_u64
(
tsdn
,
LOCKEDINT_MTX
(
arena
->
stats
.
mtx
),
&
arena
->
stats
.
lstats
[
i
].
nflushes
);
&
arena
->
stats
.
lstats
[
i
].
nflushes
);
arena_stats_accum_u64
(
&
lstats
[
i
].
nflushes
,
nflush
);
locked_inc_u64_unsynchronized
(
&
lstats
[
i
].
nflushes
,
nflush
);
arena_stats_accum_u64
(
&
astats
->
nflushes_large
,
nflush
)
;
astats
->
nflushes_large
+=
nflush
;
assert
(
nmalloc
>=
ndalloc
);
assert
(
nmalloc
>=
ndalloc
);
assert
(
nmalloc
-
ndalloc
<=
SIZE_T_MAX
);
assert
(
nmalloc
-
ndalloc
<=
SIZE_T_MAX
);
size_t
curlextents
=
(
size_t
)(
nmalloc
-
ndalloc
);
size_t
curlextents
=
(
size_t
)(
nmalloc
-
ndalloc
);
lstats
[
i
].
curlextents
+=
curlextents
;
lstats
[
i
].
curlextents
+=
curlextents
;
arena_stats_accum_zu
(
&
astats
->
allocated_large
,
astats
->
allocated_large
+=
curlextents
*
sz_index2size
(
SC_NBINS
+
i
));
curlextents
*
sz_index2size
(
SC_NBINS
+
i
);
}
for
(
pszind_t
i
=
0
;
i
<
SC_NPSIZES
;
i
++
)
{
size_t
dirty
,
muzzy
,
retained
,
dirty_bytes
,
muzzy_bytes
,
retained_bytes
;
dirty
=
extents_nextents_get
(
&
arena
->
extents_dirty
,
i
);
muzzy
=
extents_nextents_get
(
&
arena
->
extents_muzzy
,
i
);
retained
=
extents_nextents_get
(
&
arena
->
extents_retained
,
i
);
dirty_bytes
=
extents_nbytes_get
(
&
arena
->
extents_dirty
,
i
);
muzzy_bytes
=
extents_nbytes_get
(
&
arena
->
extents_muzzy
,
i
);
retained_bytes
=
extents_nbytes_get
(
&
arena
->
extents_retained
,
i
);
atomic_store_zu
(
&
estats
[
i
].
ndirty
,
dirty
,
ATOMIC_RELAXED
);
atomic_store_zu
(
&
estats
[
i
].
nmuzzy
,
muzzy
,
ATOMIC_RELAXED
);
atomic_store_zu
(
&
estats
[
i
].
nretained
,
retained
,
ATOMIC_RELAXED
);
atomic_store_zu
(
&
estats
[
i
].
dirty_bytes
,
dirty_bytes
,
ATOMIC_RELAXED
);
atomic_store_zu
(
&
estats
[
i
].
muzzy_bytes
,
muzzy_bytes
,
ATOMIC_RELAXED
);
atomic_store_zu
(
&
estats
[
i
].
retained_bytes
,
retained_bytes
,
ATOMIC_RELAXED
);
}
}
arena_stats_unlock
(
tsdn
,
&
arena
->
stats
);
pa_shard_stats_merge
(
tsdn
,
&
arena
->
pa_shard
,
&
astats
->
pa_shard_stats
,
estats
,
hpastats
,
secstats
,
&
astats
->
resident
);
LOCKEDINT_MTX_UNLOCK
(
tsdn
,
arena
->
stats
.
mtx
);
/* tcache_bytes counts currently cached bytes. */
/* Currently cached bytes and sanitizer-stashed bytes in tcache. */
atomic_store_zu
(
&
astats
->
tcache_bytes
,
0
,
ATOMIC_RELAXED
);
astats
->
tcache_bytes
=
0
;
astats
->
tcache_stashed_bytes
=
0
;
malloc_mutex_lock
(
tsdn
,
&
arena
->
tcache_ql_mtx
);
malloc_mutex_lock
(
tsdn
,
&
arena
->
tcache_ql_mtx
);
cache_bin_array_descriptor_t
*
descriptor
;
cache_bin_array_descriptor_t
*
descriptor
;
ql_foreach
(
descriptor
,
&
arena
->
cache_bin_array_descriptor_ql
,
link
)
{
ql_foreach
(
descriptor
,
&
arena
->
cache_bin_array_descriptor_ql
,
link
)
{
szind_t
i
=
0
;
for
(
szind_t
i
=
0
;
i
<
nhbins
;
i
++
)
{
for
(;
i
<
SC_NBINS
;
i
++
)
{
cache_bin_t
*
cache_bin
=
&
descriptor
->
bins
[
i
];
cache_bin_t
*
tbin
=
&
descriptor
->
bins_small
[
i
];
cache_bin_sz_t
ncached
,
nstashed
;
arena_stats_accum_zu
(
&
astats
->
tcache_bytes
,
cache_bin_nitems_get_remote
(
cache_bin
,
tbin
->
ncached
*
sz_index2size
(
i
));
&
tcache_bin_info
[
i
],
&
ncached
,
&
nstashed
);
}
for
(;
i
<
nhbins
;
i
++
)
{
astats
->
tcache_bytes
+=
ncached
*
sz_index2size
(
i
);
cache_bin_t
*
tbin
=
&
descriptor
->
bins_large
[
i
];
astats
->
tcache_stashed_bytes
+=
nstashed
*
arena_stats_accum_zu
(
&
astats
->
tcache_bytes
,
sz_index2size
(
i
);
tbin
->
ncached
*
sz_index2size
(
i
));
}
}
}
}
malloc_mutex_prof_read
(
tsdn
,
malloc_mutex_prof_read
(
tsdn
,
...
@@ -224,21 +178,11 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
...
@@ -224,21 +178,11 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
/* Gather per arena mutex profiling data. */
/* Gather per arena mutex profiling data. */
READ_ARENA_MUTEX_PROF_DATA
(
large_mtx
,
arena_prof_mutex_large
);
READ_ARENA_MUTEX_PROF_DATA
(
large_mtx
,
arena_prof_mutex_large
);
READ_ARENA_MUTEX_PROF_DATA
(
extent_avail_mtx
,
arena_prof_mutex_extent_avail
)
READ_ARENA_MUTEX_PROF_DATA
(
extents_dirty
.
mtx
,
arena_prof_mutex_extents_dirty
)
READ_ARENA_MUTEX_PROF_DATA
(
extents_muzzy
.
mtx
,
arena_prof_mutex_extents_muzzy
)
READ_ARENA_MUTEX_PROF_DATA
(
extents_retained
.
mtx
,
arena_prof_mutex_extents_retained
)
READ_ARENA_MUTEX_PROF_DATA
(
decay_dirty
.
mtx
,
arena_prof_mutex_decay_dirty
)
READ_ARENA_MUTEX_PROF_DATA
(
decay_muzzy
.
mtx
,
arena_prof_mutex_decay_muzzy
)
READ_ARENA_MUTEX_PROF_DATA
(
base
->
mtx
,
READ_ARENA_MUTEX_PROF_DATA
(
base
->
mtx
,
arena_prof_mutex_base
)
arena_prof_mutex_base
)
;
#undef READ_ARENA_MUTEX_PROF_DATA
#undef READ_ARENA_MUTEX_PROF_DATA
pa_shard_mtx_stats_read
(
tsdn
,
&
arena
->
pa_shard
,
astats
->
mutex_prof_data
);
nstime_copy
(
&
astats
->
uptime
,
&
arena
->
create_time
);
nstime_copy
(
&
astats
->
uptime
,
&
arena
->
create_time
);
nstime_update
(
&
astats
->
uptime
);
nstime_update
(
&
astats
->
uptime
);
...
@@ -247,55 +191,67 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
...
@@ -247,55 +191,67 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
for
(
szind_t
i
=
0
;
i
<
SC_NBINS
;
i
++
)
{
for
(
szind_t
i
=
0
;
i
<
SC_NBINS
;
i
++
)
{
for
(
unsigned
j
=
0
;
j
<
bin_infos
[
i
].
n_shards
;
j
++
)
{
for
(
unsigned
j
=
0
;
j
<
bin_infos
[
i
].
n_shards
;
j
++
)
{
bin_stats_merge
(
tsdn
,
&
bstats
[
i
],
bin_stats_merge
(
tsdn
,
&
bstats
[
i
],
&
arena
->
bins
[
i
].
bin_shards
[
j
]
);
arena
_get_bin
(
arena
,
i
,
j
)
);
}
}
}
}
}
}
void
static
void
arena_extents_dirty_dalloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_background_thread_inactivity_check
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
)
{
bool
is_background_thread
)
{
if
(
!
background_thread_enabled
()
||
is_background_thread
)
{
return
;
}
background_thread_info_t
*
info
=
arena_background_thread_info_get
(
arena
);
if
(
background_thread_indefinite_sleep
(
info
))
{
arena_maybe_do_deferred_work
(
tsdn
,
arena
,
&
arena
->
pa_shard
.
pac
.
decay_dirty
,
0
);
}
}
/*
* React to deferred work generated by a PAI function.
*/
void
arena_handle_deferred_work
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
0
);
WITNESS_RANK_CORE
,
0
);
extents_dalloc
(
tsdn
,
arena
,
r_extent_hooks
,
&
arena
->
extents_dirty
,
if
(
decay_immediately
(
&
arena
->
pa_shard
.
pac
.
decay_dirty
))
{
extent
);
if
(
arena_dirty_decay_ms_get
(
arena
)
==
0
)
{
arena_decay_dirty
(
tsdn
,
arena
,
false
,
true
);
arena_decay_dirty
(
tsdn
,
arena
,
false
,
true
);
}
else
{
arena_background_thread_inactivity_check
(
tsdn
,
arena
,
false
);
}
}
arena_background_thread_inactivity_check
(
tsdn
,
arena
,
false
);
}
}
static
void
*
static
void
*
arena_slab_reg_alloc
(
e
xtent
_t
*
slab
,
const
bin_info_t
*
bin_info
)
{
arena_slab_reg_alloc
(
e
data
_t
*
slab
,
const
bin_info_t
*
bin_info
)
{
void
*
ret
;
void
*
ret
;
arena_
slab_data_t
*
slab_data
=
e
xtent
_slab_data_get
(
slab
);
slab_data_t
*
slab_data
=
e
data
_slab_data_get
(
slab
);
size_t
regind
;
size_t
regind
;
assert
(
e
xtent
_nfree_get
(
slab
)
>
0
);
assert
(
e
data
_nfree_get
(
slab
)
>
0
);
assert
(
!
bitmap_full
(
slab_data
->
bitmap
,
&
bin_info
->
bitmap_info
));
assert
(
!
bitmap_full
(
slab_data
->
bitmap
,
&
bin_info
->
bitmap_info
));
regind
=
bitmap_sfu
(
slab_data
->
bitmap
,
&
bin_info
->
bitmap_info
);
regind
=
bitmap_sfu
(
slab_data
->
bitmap
,
&
bin_info
->
bitmap_info
);
ret
=
(
void
*
)((
uintptr_t
)
e
xtent
_addr_get
(
slab
)
+
ret
=
(
void
*
)((
uintptr_t
)
e
data
_addr_get
(
slab
)
+
(
uintptr_t
)(
bin_info
->
reg_size
*
regind
));
(
uintptr_t
)(
bin_info
->
reg_size
*
regind
));
e
xtent
_nfree_dec
(
slab
);
e
data
_nfree_dec
(
slab
);
return
ret
;
return
ret
;
}
}
static
void
static
void
arena_slab_reg_alloc_batch
(
e
xtent
_t
*
slab
,
const
bin_info_t
*
bin_info
,
arena_slab_reg_alloc_batch
(
e
data
_t
*
slab
,
const
bin_info_t
*
bin_info
,
unsigned
cnt
,
void
**
ptrs
)
{
unsigned
cnt
,
void
**
ptrs
)
{
arena_
slab_data_t
*
slab_data
=
e
xtent
_slab_data_get
(
slab
);
slab_data_t
*
slab_data
=
e
data
_slab_data_get
(
slab
);
assert
(
e
xtent
_nfree_get
(
slab
)
>=
cnt
);
assert
(
e
data
_nfree_get
(
slab
)
>=
cnt
);
assert
(
!
bitmap_full
(
slab_data
->
bitmap
,
&
bin_info
->
bitmap_info
));
assert
(
!
bitmap_full
(
slab_data
->
bitmap
,
&
bin_info
->
bitmap_info
));
#if (! defined JEMALLOC_INTERNAL_POPCOUNTL) || (defined BITMAP_USE_TREE)
#if (! defined JEMALLOC_INTERNAL_POPCOUNTL) || (defined BITMAP_USE_TREE)
for
(
unsigned
i
=
0
;
i
<
cnt
;
i
++
)
{
for
(
unsigned
i
=
0
;
i
<
cnt
;
i
++
)
{
size_t
regind
=
bitmap_sfu
(
slab_data
->
bitmap
,
size_t
regind
=
bitmap_sfu
(
slab_data
->
bitmap
,
&
bin_info
->
bitmap_info
);
&
bin_info
->
bitmap_info
);
*
(
ptrs
+
i
)
=
(
void
*
)((
uintptr_t
)
e
xtent
_addr_get
(
slab
)
+
*
(
ptrs
+
i
)
=
(
void
*
)((
uintptr_t
)
e
data
_addr_get
(
slab
)
+
(
uintptr_t
)(
bin_info
->
reg_size
*
regind
));
(
uintptr_t
)(
bin_info
->
reg_size
*
regind
));
}
}
#else
#else
...
@@ -316,7 +272,7 @@ arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info,
...
@@ -316,7 +272,7 @@ arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info,
* Load from memory locations only once, outside the
* Load from memory locations only once, outside the
* hot loop below.
* hot loop below.
*/
*/
uintptr_t
base
=
(
uintptr_t
)
e
xtent
_addr_get
(
slab
);
uintptr_t
base
=
(
uintptr_t
)
e
data
_addr_get
(
slab
);
uintptr_t
regsize
=
(
uintptr_t
)
bin_info
->
reg_size
;
uintptr_t
regsize
=
(
uintptr_t
)
bin_info
->
reg_size
;
while
(
pop
--
)
{
while
(
pop
--
)
{
size_t
bit
=
cfs_lu
(
&
g
);
size_t
bit
=
cfs_lu
(
&
g
);
...
@@ -328,56 +284,7 @@ arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info,
...
@@ -328,56 +284,7 @@ arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info,
slab_data
->
bitmap
[
group
]
=
g
;
slab_data
->
bitmap
[
group
]
=
g
;
}
}
#endif
#endif
extent_nfree_sub
(
slab
,
cnt
);
edata_nfree_sub
(
slab
,
cnt
);
}
#ifndef JEMALLOC_JET
static
#endif
size_t
arena_slab_regind
(
extent_t
*
slab
,
szind_t
binind
,
const
void
*
ptr
)
{
size_t
diff
,
regind
;
/* Freeing a pointer outside the slab can cause assertion failure. */
assert
((
uintptr_t
)
ptr
>=
(
uintptr_t
)
extent_addr_get
(
slab
));
assert
((
uintptr_t
)
ptr
<
(
uintptr_t
)
extent_past_get
(
slab
));
/* Freeing an interior pointer can cause assertion failure. */
assert
(((
uintptr_t
)
ptr
-
(
uintptr_t
)
extent_addr_get
(
slab
))
%
(
uintptr_t
)
bin_infos
[
binind
].
reg_size
==
0
);
diff
=
(
size_t
)((
uintptr_t
)
ptr
-
(
uintptr_t
)
extent_addr_get
(
slab
));
/* Avoid doing division with a variable divisor. */
regind
=
div_compute
(
&
arena_binind_div_info
[
binind
],
diff
);
assert
(
regind
<
bin_infos
[
binind
].
nregs
);
return
regind
;
}
static
void
arena_slab_reg_dalloc
(
extent_t
*
slab
,
arena_slab_data_t
*
slab_data
,
void
*
ptr
)
{
szind_t
binind
=
extent_szind_get
(
slab
);
const
bin_info_t
*
bin_info
=
&
bin_infos
[
binind
];
size_t
regind
=
arena_slab_regind
(
slab
,
binind
,
ptr
);
assert
(
extent_nfree_get
(
slab
)
<
bin_info
->
nregs
);
/* Freeing an unallocated pointer can cause assertion failure. */
assert
(
bitmap_get
(
slab_data
->
bitmap
,
&
bin_info
->
bitmap_info
,
regind
));
bitmap_unset
(
slab_data
->
bitmap
,
&
bin_info
->
bitmap_info
,
regind
);
extent_nfree_inc
(
slab
);
}
static
void
arena_nactive_add
(
arena_t
*
arena
,
size_t
add_pages
)
{
atomic_fetch_add_zu
(
&
arena
->
nactive
,
add_pages
,
ATOMIC_RELAXED
);
}
static
void
arena_nactive_sub
(
arena_t
*
arena
,
size_t
sub_pages
)
{
assert
(
atomic_load_zu
(
&
arena
->
nactive
,
ATOMIC_RELAXED
)
>=
sub_pages
);
atomic_fetch_sub_zu
(
&
arena
->
nactive
,
sub_pages
,
ATOMIC_RELAXED
);
}
}
static
void
static
void
...
@@ -392,7 +299,7 @@ arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
...
@@ -392,7 +299,7 @@ arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
index
=
sz_size2index
(
usize
);
index
=
sz_size2index
(
usize
);
hindex
=
(
index
>=
SC_NBINS
)
?
index
-
SC_NBINS
:
0
;
hindex
=
(
index
>=
SC_NBINS
)
?
index
-
SC_NBINS
:
0
;
arena_stats_add_u64
(
tsdn
,
&
arena
->
stats
,
locked_inc_u64
(
tsdn
,
LOCKEDINT_MTX
(
arena
->
stats
.
mtx
)
,
&
arena
->
stats
.
lstats
[
hindex
].
nmalloc
,
1
);
&
arena
->
stats
.
lstats
[
hindex
].
nmalloc
,
1
);
}
}
...
@@ -408,627 +315,284 @@ arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
...
@@ -408,627 +315,284 @@ arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
index
=
sz_size2index
(
usize
);
index
=
sz_size2index
(
usize
);
hindex
=
(
index
>=
SC_NBINS
)
?
index
-
SC_NBINS
:
0
;
hindex
=
(
index
>=
SC_NBINS
)
?
index
-
SC_NBINS
:
0
;
arena_stats_add_u64
(
tsdn
,
&
arena
->
stats
,
locked_inc_u64
(
tsdn
,
LOCKEDINT_MTX
(
arena
->
stats
.
mtx
)
,
&
arena
->
stats
.
lstats
[
hindex
].
ndalloc
,
1
);
&
arena
->
stats
.
lstats
[
hindex
].
ndalloc
,
1
);
}
}
static
void
static
void
arena_large_ralloc_stats_update
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
oldusize
,
arena_large_ralloc_stats_update
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
oldusize
,
size_t
usize
)
{
size_t
usize
)
{
arena_large_dalloc_stats_update
(
tsdn
,
arena
,
oldusize
);
arena_large_malloc_stats_update
(
tsdn
,
arena
,
usize
);
arena_large_malloc_stats_update
(
tsdn
,
arena
,
usize
);
arena_large_dalloc_stats_update
(
tsdn
,
arena
,
oldusize
);
}
}
static
bool
edata_t
*
arena_may_have_muzzy
(
arena_t
*
arena
)
{
return
(
pages_can_purge_lazy
&&
(
arena_muzzy_decay_ms_get
(
arena
)
!=
0
));
}
extent_t
*
arena_extent_alloc_large
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
usize
,
arena_extent_alloc_large
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
usize
,
size_t
alignment
,
bool
*
zero
)
{
size_t
alignment
,
bool
zero
)
{
extent_hooks_t
*
extent_hooks
=
EXTENT_HOOKS_INITIALIZER
;
bool
deferred_work_generated
=
false
;
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
0
);
szind_t
szind
=
sz_size2index
(
usize
);
szind_t
szind
=
sz_size2index
(
usize
);
size_t
mapped_add
;
size_t
esize
=
usize
+
sz_large_pad
;
bool
commit
=
true
;
extent_t
*
extent
=
extents_alloc
(
tsdn
,
arena
,
&
extent_hooks
,
bool
guarded
=
san_large_extent_decide_guard
(
tsdn
,
&
arena
->
extents_dirty
,
NULL
,
usize
,
sz_large_pad
,
alignment
,
false
,
arena_get_ehooks
(
arena
),
esize
,
alignment
);
szind
,
zero
,
&
commit
);
edata_t
*
edata
=
pa_alloc
(
tsdn
,
&
arena
->
pa_shard
,
esize
,
alignment
,
if
(
extent
==
NULL
&&
arena_may_have_muzzy
(
arena
))
{
/* slab */
false
,
szind
,
zero
,
guarded
,
&
deferred_work_generated
);
extent
=
extents_alloc
(
tsdn
,
arena
,
&
extent_hooks
,
assert
(
deferred_work_generated
==
false
);
&
arena
->
extents_muzzy
,
NULL
,
usize
,
sz_large_pad
,
alignment
,
false
,
szind
,
zero
,
&
commit
);
}
size_t
size
=
usize
+
sz_large_pad
;
if
(
extent
==
NULL
)
{
extent
=
extent_alloc_wrapper
(
tsdn
,
arena
,
&
extent_hooks
,
NULL
,
usize
,
sz_large_pad
,
alignment
,
false
,
szind
,
zero
,
&
commit
);
if
(
config_stats
)
{
/*
* extent may be NULL on OOM, but in that case
* mapped_add isn't used below, so there's no need to
* conditionlly set it to 0 here.
*/
mapped_add
=
size
;
}
}
else
if
(
config_stats
)
{
mapped_add
=
0
;
}
if
(
e
xtent
!=
NULL
)
{
if
(
e
data
!=
NULL
)
{
if
(
config_stats
)
{
if
(
config_stats
)
{
arena_stats_lock
(
tsdn
,
&
arena
->
stats
);
LOCKEDINT_MTX_LOCK
(
tsdn
,
arena
->
stats
.
mtx
);
arena_large_malloc_stats_update
(
tsdn
,
arena
,
usize
);
arena_large_malloc_stats_update
(
tsdn
,
arena
,
usize
);
if
(
mapped_add
!=
0
)
{
LOCKEDINT_MTX_UNLOCK
(
tsdn
,
arena
->
stats
.
mtx
);
arena_stats_add_zu
(
tsdn
,
&
arena
->
stats
,
&
arena
->
stats
.
mapped
,
mapped_add
);
}
}
arena_stats_unlock
(
tsdn
,
&
arena
->
stats
);
}
}
arena_nactive_add
(
arena
,
size
>>
LG_PAGE
);
if
(
edata
!=
NULL
&&
sz_large_pad
!=
0
)
{
arena_cache_oblivious_randomize
(
tsdn
,
arena
,
edata
,
alignment
);
}
}
return
e
xtent
;
return
e
data
;
}
}
void
void
arena_extent_dalloc_large_prep
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
e
xtent_t
*
extent
)
{
arena_extent_dalloc_large_prep
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
e
data_t
*
edata
)
{
if
(
config_stats
)
{
if
(
config_stats
)
{
arena_stats_lock
(
tsdn
,
&
arena
->
stats
);
LOCKEDINT_MTX_LOCK
(
tsdn
,
arena
->
stats
.
mtx
);
arena_large_dalloc_stats_update
(
tsdn
,
arena
,
arena_large_dalloc_stats_update
(
tsdn
,
arena
,
e
xtent
_usize_get
(
e
xtent
));
e
data
_usize_get
(
e
data
));
arena_stats_unlock
(
tsdn
,
&
arena
->
stats
);
LOCKEDINT_MTX_UNLOCK
(
tsdn
,
arena
->
stats
.
mtx
);
}
}
arena_nactive_sub
(
arena
,
extent_size_get
(
extent
)
>>
LG_PAGE
);
}
}
void
void
arena_extent_ralloc_large_shrink
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
e
xtent_t
*
extent
,
arena_extent_ralloc_large_shrink
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
e
data_t
*
edata
,
size_t
oldusize
)
{
size_t
oldusize
)
{
size_t
usize
=
extent_usize_get
(
extent
);
size_t
usize
=
edata_usize_get
(
edata
);
size_t
udiff
=
oldusize
-
usize
;
if
(
config_stats
)
{
if
(
config_stats
)
{
arena_stats_lock
(
tsdn
,
&
arena
->
stats
);
LOCKEDINT_MTX_LOCK
(
tsdn
,
arena
->
stats
.
mtx
);
arena_large_ralloc_stats_update
(
tsdn
,
arena
,
oldusize
,
usize
);
arena_large_ralloc_stats_update
(
tsdn
,
arena
,
oldusize
,
usize
);
arena_stats_unlock
(
tsdn
,
&
arena
->
stats
);
LOCKEDINT_MTX_UNLOCK
(
tsdn
,
arena
->
stats
.
mtx
);
}
}
arena_nactive_sub
(
arena
,
udiff
>>
LG_PAGE
);
}
}
void
void
arena_extent_ralloc_large_expand
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
e
xtent_t
*
extent
,
arena_extent_ralloc_large_expand
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
e
data_t
*
edata
,
size_t
oldusize
)
{
size_t
oldusize
)
{
size_t
usize
=
extent_usize_get
(
extent
);
size_t
usize
=
edata_usize_get
(
edata
);
size_t
udiff
=
usize
-
oldusize
;
if
(
config_stats
)
{
if
(
config_stats
)
{
arena_stats_lock
(
tsdn
,
&
arena
->
stats
);
LOCKEDINT_MTX_LOCK
(
tsdn
,
arena
->
stats
.
mtx
);
arena_large_ralloc_stats_update
(
tsdn
,
arena
,
oldusize
,
usize
);
arena_large_ralloc_stats_update
(
tsdn
,
arena
,
oldusize
,
usize
);
arena_stats_unlock
(
tsdn
,
&
arena
->
stats
);
LOCKEDINT_MTX_UNLOCK
(
tsdn
,
arena
->
stats
.
mtx
);
}
}
arena_nactive_add
(
arena
,
udiff
>>
LG_PAGE
);
}
static
ssize_t
arena_decay_ms_read
(
arena_decay_t
*
decay
)
{
return
atomic_load_zd
(
&
decay
->
time_ms
,
ATOMIC_RELAXED
);
}
}
static
void
/*
arena_decay_ms_write
(
arena_decay_t
*
decay
,
ssize_t
decay_ms
)
{
* In situations where we're not forcing a decay (i.e. because the user
atomic_store_zd
(
&
decay
->
time_ms
,
decay_ms
,
ATOMIC_RELAXED
);
* specifically requested it), should we purge ourselves, or wait for the
}
* background thread to get to it.
static
void
arena_decay_deadline_init
(
arena_decay_t
*
decay
)
{
/*
* Generate a new deadline that is uniformly random within the next
* epoch after the current one.
*/
*/
nstime_copy
(
&
decay
->
deadline
,
&
decay
->
epoch
);
static
pac_purge_eagerness_t
nstime_add
(
&
decay
->
deadline
,
&
decay
->
interval
);
arena_decide_unforced_purge_eagerness
(
bool
is_background_thread
)
{
if
(
arena_decay_ms_read
(
decay
)
>
0
)
{
if
(
is_background_thread
)
{
nstime_t
jitter
;
return
PAC_PURGE_ALWAYS
;
}
else
if
(
!
is_background_thread
&&
background_thread_enabled
())
{
nstime_init
(
&
jitter
,
prng_range_u64
(
&
decay
->
jitter_state
,
return
PAC_PURGE_NEVER
;
nstime_ns
(
&
decay
->
interval
)));
}
else
{
nstime_add
(
&
decay
->
deadline
,
&
jitter
)
;
return
PAC_PURGE_ON_EPOCH_ADVANCE
;
}
}
}
}
static
bool
bool
arena_decay_deadline_reached
(
const
arena_decay_t
*
decay
,
const
nstime_t
*
time
)
{
arena_decay_ms_set
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_state_t
state
,
return
(
nstime_compare
(
&
decay
->
deadline
,
time
)
<=
0
);
ssize_t
decay_ms
)
{
}
pac_purge_eagerness_t
eagerness
=
arena_decide_unforced_purge_eagerness
(
/* is_background_thread */
false
);
static
size_t
return
pa_decay_ms_set
(
tsdn
,
&
arena
->
pa_shard
,
state
,
decay_ms
,
arena_decay_backlog_npages_limit
(
const
arena_decay_t
*
decay
)
{
eagerness
);
uint64_t
sum
;
size_t
npages_limit_backlog
;
unsigned
i
;
/*
* For each element of decay_backlog, multiply by the corresponding
* fixed-point smoothstep decay factor. Sum the products, then divide
* to round down to the nearest whole number of pages.
*/
sum
=
0
;
for
(
i
=
0
;
i
<
SMOOTHSTEP_NSTEPS
;
i
++
)
{
sum
+=
decay
->
backlog
[
i
]
*
h_steps
[
i
];
}
npages_limit_backlog
=
(
size_t
)(
sum
>>
SMOOTHSTEP_BFP
);
return
npages_limit_backlog
;
}
}
static
void
ssize_t
arena_decay_backlog_update_last
(
arena_decay_t
*
decay
,
size_t
current_npages
)
{
arena_decay_ms_get
(
arena_t
*
arena
,
extent_state_t
state
)
{
size_t
npages_delta
=
(
current_npages
>
decay
->
nunpurged
)
?
return
pa_decay_ms_get
(
&
arena
->
pa_shard
,
state
);
current_npages
-
decay
->
nunpurged
:
0
;
decay
->
backlog
[
SMOOTHSTEP_NSTEPS
-
1
]
=
npages_delta
;
if
(
config_debug
)
{
if
(
current_npages
>
decay
->
ceil_npages
)
{
decay
->
ceil_npages
=
current_npages
;
}
size_t
npages_limit
=
arena_decay_backlog_npages_limit
(
decay
);
assert
(
decay
->
ceil_npages
>=
npages_limit
);
if
(
decay
->
ceil_npages
>
npages_limit
)
{
decay
->
ceil_npages
=
npages_limit
;
}
}
}
}
static
void
static
bool
arena_decay_backlog_update
(
arena_decay_t
*
decay
,
uint64_t
nadvance_u64
,
arena_decay_impl
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
decay_t
*
decay
,
size_t
current_npages
)
{
pac_decay_stats_t
*
decay_stats
,
ecache_t
*
ecache
,
if
(
nadvance_u64
>=
SMOOTHSTEP_NSTEPS
)
{
bool
is_background_thread
,
bool
all
)
{
memset
(
decay
->
backlog
,
0
,
(
SMOOTHSTEP_NSTEPS
-
1
)
*
if
(
all
)
{
sizeof
(
size_t
));
malloc_mutex_lock
(
tsdn
,
&
decay
->
mtx
);
}
else
{
pac_decay_all
(
tsdn
,
&
arena
->
pa_shard
.
pac
,
decay
,
decay_stats
,
size_t
nadvance_z
=
(
size_t
)
nadvance_u64
;
ecache
,
/* fully_decay */
all
);
malloc_mutex_unlock
(
tsdn
,
&
decay
->
mtx
);
assert
((
uint64_t
)
nadvance_z
==
nadvance_u64
);
return
false
;
memmove
(
decay
->
backlog
,
&
decay
->
backlog
[
nadvance_z
],
(
SMOOTHSTEP_NSTEPS
-
nadvance_z
)
*
sizeof
(
size_t
));
if
(
nadvance_z
>
1
)
{
memset
(
&
decay
->
backlog
[
SMOOTHSTEP_NSTEPS
-
nadvance_z
],
0
,
(
nadvance_z
-
1
)
*
sizeof
(
size_t
));
}
}
}
arena_decay_backlog_update_last
(
decay
,
current_npages
);
if
(
malloc_mutex_trylock
(
tsdn
,
&
decay
->
mtx
))
{
}
/* No need to wait if another thread is in progress. */
return
true
;
static
void
arena_decay_try_purge
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_decay_t
*
decay
,
extents_t
*
extents
,
size_t
current_npages
,
size_t
npages_limit
,
bool
is_background_thread
)
{
if
(
current_npages
>
npages_limit
)
{
arena_decay_to_limit
(
tsdn
,
arena
,
decay
,
extents
,
false
,
npages_limit
,
current_npages
-
npages_limit
,
is_background_thread
);
}
}
}
pac_purge_eagerness_t
eagerness
=
arena_decide_unforced_purge_eagerness
(
is_background_thread
);
static
void
bool
epoch_advanced
=
pac_maybe_decay_purge
(
tsdn
,
&
arena
->
pa_shard
.
pac
,
arena_decay_epoch_advance_helper
(
arena_decay_t
*
decay
,
const
nstime_t
*
time
,
decay
,
decay_stats
,
ecache
,
eagerness
);
size_t
current_npages
)
{
size_t
npages_new
;
assert
(
arena_decay_deadline_reached
(
decay
,
time
));
if
(
epoch_advanced
)
{
/* Backlog is updated on epoch advance. */
nstime_t
delta
;
npages_new
=
decay_epoch_npages_delta
(
decay
);
nstime_copy
(
&
delta
,
time
);
nstime_subtract
(
&
delta
,
&
decay
->
epoch
);
uint64_t
nadvance_u64
=
nstime_divide
(
&
delta
,
&
decay
->
interval
);
assert
(
nadvance_u64
>
0
);
/* Add nadvance_u64 decay intervals to epoch. */
nstime_copy
(
&
delta
,
&
decay
->
interval
);
nstime_imultiply
(
&
delta
,
nadvance_u64
);
nstime_add
(
&
decay
->
epoch
,
&
delta
);
/* Set a new deadline. */
arena_decay_deadline_init
(
decay
);
/* Update the backlog. */
arena_decay_backlog_update
(
decay
,
nadvance_u64
,
current_npages
);
}
static
void
arena_decay_epoch_advance
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_decay_t
*
decay
,
extents_t
*
extents
,
const
nstime_t
*
time
,
bool
is_background_thread
)
{
size_t
current_npages
=
extents_npages_get
(
extents
);
arena_decay_epoch_advance_helper
(
decay
,
time
,
current_npages
);
size_t
npages_limit
=
arena_decay_backlog_npages_limit
(
decay
);
/* We may unlock decay->mtx when try_purge(). Finish logging first. */
decay
->
nunpurged
=
(
npages_limit
>
current_npages
)
?
npages_limit
:
current_npages
;
if
(
!
background_thread_enabled
()
||
is_background_thread
)
{
arena_decay_try_purge
(
tsdn
,
arena
,
decay
,
extents
,
current_npages
,
npages_limit
,
is_background_thread
);
}
}
}
malloc_mutex_unlock
(
tsdn
,
&
decay
->
mtx
);
static
void
if
(
have_background_thread
&&
background_thread_enabled
()
&&
arena_decay_reinit
(
arena_decay_t
*
decay
,
ssize_t
decay_ms
)
{
epoch_advanced
&&
!
is_background_thread
)
{
arena_decay_ms_write
(
decay
,
decay_ms
);
arena_maybe_do_deferred_work
(
tsdn
,
arena
,
decay
,
npages_new
);
if
(
decay_ms
>
0
)
{
nstime_init
(
&
decay
->
interval
,
(
uint64_t
)
decay_ms
*
KQU
(
1000000
));
nstime_idivide
(
&
decay
->
interval
,
SMOOTHSTEP_NSTEPS
);
}
}
nstime_init
(
&
decay
->
epoch
,
0
);
nstime_update
(
&
decay
->
epoch
);
decay
->
jitter_state
=
(
uint64_t
)(
uintptr_t
)
decay
;
arena_decay_deadline_init
(
decay
);
decay
->
nunpurged
=
0
;
memset
(
decay
->
backlog
,
0
,
SMOOTHSTEP_NSTEPS
*
sizeof
(
size_t
));
}
static
bool
arena_decay_init
(
arena_decay_t
*
decay
,
ssize_t
decay_ms
,
arena_stats_decay_t
*
stats
)
{
if
(
config_debug
)
{
for
(
size_t
i
=
0
;
i
<
sizeof
(
arena_decay_t
);
i
++
)
{
assert
(((
char
*
)
decay
)[
i
]
==
0
);
}
decay
->
ceil_npages
=
0
;
}
if
(
malloc_mutex_init
(
&
decay
->
mtx
,
"decay"
,
WITNESS_RANK_DECAY
,
malloc_mutex_rank_exclusive
))
{
return
true
;
}
decay
->
purging
=
false
;
arena_decay_reinit
(
decay
,
decay_ms
);
/* Memory is zeroed, so there is no need to clear stats. */
if
(
config_stats
)
{
decay
->
stats
=
stats
;
}
return
false
;
return
false
;
}
}
static
bool
static
bool
arena_decay_ms_valid
(
ssize_t
decay_ms
)
{
arena_decay_dirty
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bool
is_background_thread
,
if
(
decay_ms
<
-
1
)
{
bool
all
)
{
return
false
;
return
arena_decay_impl
(
tsdn
,
arena
,
&
arena
->
pa_shard
.
pac
.
decay_dirty
,
}
&
arena
->
pa_shard
.
pac
.
stats
->
decay_dirty
,
if
(
decay_ms
==
-
1
||
(
uint64_t
)
decay_ms
<=
NSTIME_SEC_MAX
*
&
arena
->
pa_shard
.
pac
.
ecache_dirty
,
is_background_thread
,
all
);
KQU
(
1000
))
{
return
true
;
}
return
false
;
}
}
static
bool
static
bool
arena_maybe_decay
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_decay_t
*
decay
,
arena_decay_muzzy
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bool
is_background_thread
,
extents_t
*
extents
,
bool
is_background_thread
)
{
bool
all
)
{
malloc_mutex_assert_owner
(
tsdn
,
&
decay
->
mtx
);
if
(
pa_shard_dont_decay_muzzy
(
&
arena
->
pa_shard
))
{
/* Purge all or nothing if the option is disabled. */
ssize_t
decay_ms
=
arena_decay_ms_read
(
decay
);
if
(
decay_ms
<=
0
)
{
if
(
decay_ms
==
0
)
{
arena_decay_to_limit
(
tsdn
,
arena
,
decay
,
extents
,
false
,
0
,
extents_npages_get
(
extents
),
is_background_thread
);
}
return
false
;
return
false
;
}
}
return
arena_decay_impl
(
tsdn
,
arena
,
&
arena
->
pa_shard
.
pac
.
decay_muzzy
,
&
arena
->
pa_shard
.
pac
.
stats
->
decay_muzzy
,
&
arena
->
pa_shard
.
pac
.
ecache_muzzy
,
is_background_thread
,
all
);
}
nstime_t
time
;
void
nstime_init
(
&
time
,
0
);
arena_decay
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bool
is_background_thread
,
bool
all
)
{
nstime_update
(
&
time
);
if
(
all
)
{
if
(
unlikely
(
!
nstime_monotonic
()
&&
nstime_compare
(
&
decay
->
epoch
,
&
time
)
>
0
))
{
/*
/*
* Time went backwards. Move the epoch back in time and
* We should take a purge of "all" to mean "save as much memory
* generate a new deadline, with the expectation that time
* as possible", including flushing any caches (for situations
* typically flows forward for long enough periods of time that
* like thread death, or manual purge calls).
* epochs complete. Unfortunately, this strategy is susceptible
* to clock jitter triggering premature epoch advances, but
* clock jitter estimation and compensation isn't feasible here
* because calls into this code are event-driven.
*/
*/
nstime_copy
(
&
decay
->
epoch
,
&
time
);
sec_flush
(
tsdn
,
&
arena
->
pa_shard
.
hpa_sec
);
arena_decay_deadline_init
(
decay
);
}
else
{
/* Verify that time does not go backwards. */
assert
(
nstime_compare
(
&
decay
->
epoch
,
&
time
)
<=
0
);
}
}
if
(
arena_decay_dirty
(
tsdn
,
arena
,
is_background_thread
,
all
))
{
/*
return
;
* If the deadline has been reached, advance to the current epoch and
* purge to the new limit if necessary. Note that dirty pages created
* during the current epoch are not subject to purge until a future
* epoch, so as a result purging only happens during epoch advances, or
* being triggered by background threads (scheduled event).
*/
bool
advance_epoch
=
arena_decay_deadline_reached
(
decay
,
&
time
);
if
(
advance_epoch
)
{
arena_decay_epoch_advance
(
tsdn
,
arena
,
decay
,
extents
,
&
time
,
is_background_thread
);
}
else
if
(
is_background_thread
)
{
arena_decay_try_purge
(
tsdn
,
arena
,
decay
,
extents
,
extents_npages_get
(
extents
),
arena_decay_backlog_npages_limit
(
decay
),
is_background_thread
);
}
}
arena_decay_muzzy
(
tsdn
,
arena
,
is_background_thread
,
all
);
return
advance_epoch
;
}
static
ssize_t
arena_decay_ms_get
(
arena_decay_t
*
decay
)
{
return
arena_decay_ms_read
(
decay
);
}
ssize_t
arena_dirty_decay_ms_get
(
arena_t
*
arena
)
{
return
arena_decay_ms_get
(
&
arena
->
decay_dirty
);
}
ssize_t
arena_muzzy_decay_ms_get
(
arena_t
*
arena
)
{
return
arena_decay_ms_get
(
&
arena
->
decay_muzzy
);
}
}
static
bool
static
bool
arena_decay_ms_set
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_decay_t
*
decay
,
arena_should_decay_early
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
decay_t
*
decay
,
extents_t
*
extents
,
ssize_t
decay_ms
)
{
background_thread_info_t
*
info
,
nstime_t
*
remaining_sleep
,
if
(
!
arena_decay_ms_valid
(
decay_ms
))
{
size_t
npages_new
)
{
return
true
;
malloc_mutex_assert_owner
(
tsdn
,
&
info
->
mtx
);
}
malloc_mutex_lock
(
tsdn
,
&
decay
->
mtx
);
/*
* Restart decay backlog from scratch, which may cause many dirty pages
* to be immediately purged. It would conceptually be possible to map
* the old backlog onto the new backlog, but there is no justification
* for such complexity since decay_ms changes are intended to be
* infrequent, either between the {-1, 0, >0} states, or a one-time
* arbitrary change during initial arena configuration.
*/
arena_decay_reinit
(
decay
,
decay_ms
);
arena_maybe_decay
(
tsdn
,
arena
,
decay
,
extents
,
false
);
malloc_mutex_unlock
(
tsdn
,
&
decay
->
mtx
);
if
(
malloc_mutex_trylock
(
tsdn
,
&
decay
->
mtx
))
{
return
false
;
return
false
;
}
bool
arena_dirty_decay_ms_set
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
ssize_t
decay_ms
)
{
return
arena_decay_ms_set
(
tsdn
,
arena
,
&
arena
->
decay_dirty
,
&
arena
->
extents_dirty
,
decay_ms
);
}
bool
arena_muzzy_decay_ms_set
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
ssize_t
decay_ms
)
{
return
arena_decay_ms_set
(
tsdn
,
arena
,
&
arena
->
decay_muzzy
,
&
arena
->
extents_muzzy
,
decay_ms
);
}
static
size_t
arena_stash_decayed
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extents_t
*
extents
,
size_t
npages_limit
,
size_t
npages_decay_max
,
extent_list_t
*
decay_extents
)
{
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
0
);
/* Stash extents according to npages_limit. */
size_t
nstashed
=
0
;
extent_t
*
extent
;
while
(
nstashed
<
npages_decay_max
&&
(
extent
=
extents_evict
(
tsdn
,
arena
,
r_extent_hooks
,
extents
,
npages_limit
))
!=
NULL
)
{
extent_list_append
(
decay_extents
,
extent
);
nstashed
+=
extent_size_get
(
extent
)
>>
LG_PAGE
;
}
}
return
nstashed
;
}
static
size_t
if
(
!
decay_gradually
(
decay
))
{
arena_decay_stashed
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
malloc_mutex_unlock
(
tsdn
,
&
decay
->
mtx
);
extent_hooks_t
**
r_extent_hooks
,
arena_decay_t
*
decay
,
extents_t
*
extents
,
return
false
;
bool
all
,
extent_list_t
*
decay_extents
,
bool
is_background_thread
)
{
size_t
nmadvise
,
nunmapped
;
size_t
npurged
;
if
(
config_stats
)
{
nmadvise
=
0
;
nunmapped
=
0
;
}
}
npurged
=
0
;
ssize_t
muzzy_decay_ms
=
arena_muzzy_decay_ms_get
(
arena
);
nstime_init
(
remaining_sleep
,
background_thread_wakeup_time_get
(
info
));
for
(
extent_t
*
extent
=
extent_list_first
(
decay_extents
);
extent
!=
if
(
nstime_compare
(
remaining_sleep
,
&
decay
->
epoch
)
<=
0
)
{
NULL
;
extent
=
extent_list_first
(
decay_extents
))
{
malloc_mutex_unlock
(
tsdn
,
&
decay
->
mtx
);
if
(
config_stats
)
{
return
false
;
nmadvise
++
;
}
size_t
npages
=
extent_size_get
(
extent
)
>>
LG_PAGE
;
npurged
+=
npages
;
extent_list_remove
(
decay_extents
,
extent
);
switch
(
extents_state_get
(
extents
))
{
case
extent_state_active
:
not_reached
();
case
extent_state_dirty
:
if
(
!
all
&&
muzzy_decay_ms
!=
0
&&
!
extent_purge_lazy_wrapper
(
tsdn
,
arena
,
r_extent_hooks
,
extent
,
0
,
extent_size_get
(
extent
)))
{
extents_dalloc
(
tsdn
,
arena
,
r_extent_hooks
,
&
arena
->
extents_muzzy
,
extent
);
arena_background_thread_inactivity_check
(
tsdn
,
arena
,
is_background_thread
);
break
;
}
/* Fall through. */
case
extent_state_muzzy
:
extent_dalloc_wrapper
(
tsdn
,
arena
,
r_extent_hooks
,
extent
);
if
(
config_stats
)
{
nunmapped
+=
npages
;
}
break
;
case
extent_state_retained
:
default:
not_reached
();
}
}
}
nstime_subtract
(
remaining_sleep
,
&
decay
->
epoch
);
if
(
config_stats
)
{
if
(
npages_new
>
0
)
{
arena_stats_lock
(
tsdn
,
&
arena
->
stats
);
uint64_t
npurge_new
=
decay_npages_purge_in
(
decay
,
arena_stats_add_u64
(
tsdn
,
&
arena
->
stats
,
&
decay
->
stats
->
npurge
,
remaining_sleep
,
npages_new
);
1
);
info
->
npages_to_purge_new
+=
npurge_new
;
arena_stats_add_u64
(
tsdn
,
&
arena
->
stats
,
&
decay
->
stats
->
nmadvise
,
nmadvise
);
arena_stats_add_u64
(
tsdn
,
&
arena
->
stats
,
&
decay
->
stats
->
purged
,
npurged
);
arena_stats_sub_zu
(
tsdn
,
&
arena
->
stats
,
&
arena
->
stats
.
mapped
,
nunmapped
<<
LG_PAGE
);
arena_stats_unlock
(
tsdn
,
&
arena
->
stats
);
}
}
malloc_mutex_unlock
(
tsdn
,
&
decay
->
mtx
);
return
npurged
;
return
info
->
npages_to_purge_new
>
ARENA_DEFERRED_PURGE_NPAGES_THRESHOLD
;
}
}
/*
/*
* npages_limit: Decay at most npages_decay_max pages without violating the
* Check if deferred work needs to be done sooner than planned.
* invariant: (extents_npages_get(extents) >= npages_limit). We need an upper
* For decay we might want to wake up earlier because of an influx of dirty
* bound on number of pages in order to prevent unbounded growth (namely in
* pages. Rather than waiting for previously estimated time, we proactively
* stashed), otherwise unbounded new pages could be added to extents during the
* purge those pages.
* current decay run, so that the purging thread never finishes.
* If background thread sleeps indefinitely, always wake up because some
* deferred work has been generated.
*/
*/
static
void
static
void
arena_decay_to_limit
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_decay_t
*
decay
,
arena_maybe_do_deferred_work
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
decay_t
*
decay
,
extents_t
*
extents
,
bool
all
,
size_t
npages_limit
,
size_t
npages_decay_max
,
size_t
npages_new
)
{
bool
is_background_thread
)
{
background_thread_info_t
*
info
=
arena_background_thread_info_get
(
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
arena
);
WITNESS_RANK_CORE
,
1
);
if
(
malloc_mutex_trylock
(
tsdn
,
&
info
->
mtx
))
{
malloc_mutex_assert_owner
(
tsdn
,
&
decay
->
mtx
);
/*
* Background thread may hold the mutex for a long period of
if
(
decay
->
purging
)
{
* time. We'd like to avoid the variance on application
* threads. So keep this non-blocking, and leave the work to a
* future epoch.
*/
return
;
return
;
}
}
decay
->
purging
=
true
;
if
(
!
background_thread_is_started
(
info
))
{
malloc_mutex_unlock
(
tsdn
,
&
decay
->
mtx
);
goto
label_done
;
extent_hooks_t
*
extent_hooks
=
extent_hooks_get
(
arena
);
extent_list_t
decay_extents
;
extent_list_init
(
&
decay_extents
);
size_t
npurge
=
arena_stash_decayed
(
tsdn
,
arena
,
&
extent_hooks
,
extents
,
npages_limit
,
npages_decay_max
,
&
decay_extents
);
if
(
npurge
!=
0
)
{
size_t
npurged
=
arena_decay_stashed
(
tsdn
,
arena
,
&
extent_hooks
,
decay
,
extents
,
all
,
&
decay_extents
,
is_background_thread
);
assert
(
npurged
==
npurge
);
}
}
malloc_mutex_lock
(
tsdn
,
&
decay
->
mtx
);
nstime_t
remaining_sleep
;
decay
->
purging
=
false
;
if
(
background_thread_indefinite_sleep
(
info
))
{
}
background_thread_wakeup_early
(
info
,
NULL
);
}
else
if
(
arena_should_decay_early
(
tsdn
,
arena
,
decay
,
info
,
static
bool
&
remaining_sleep
,
npages_new
))
{
arena_decay_impl
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_decay_t
*
decay
,
info
->
npages_to_purge_new
=
0
;
extents_t
*
extents
,
bool
is_background_thread
,
bool
all
)
{
background_thread_wakeup_early
(
info
,
&
remaining_sleep
);
if
(
all
)
{
malloc_mutex_lock
(
tsdn
,
&
decay
->
mtx
);
arena_decay_to_limit
(
tsdn
,
arena
,
decay
,
extents
,
all
,
0
,
extents_npages_get
(
extents
),
is_background_thread
);
malloc_mutex_unlock
(
tsdn
,
&
decay
->
mtx
);
return
false
;
}
}
label_done:
if
(
malloc_mutex_trylock
(
tsdn
,
&
decay
->
mtx
))
{
malloc_mutex_unlock
(
tsdn
,
&
info
->
mtx
);
/* No need to wait if another thread is in progress. */
return
true
;
}
bool
epoch_advanced
=
arena_maybe_decay
(
tsdn
,
arena
,
decay
,
extents
,
is_background_thread
);
size_t
npages_new
;
if
(
epoch_advanced
)
{
/* Backlog is updated on epoch advance. */
npages_new
=
decay
->
backlog
[
SMOOTHSTEP_NSTEPS
-
1
];
}
malloc_mutex_unlock
(
tsdn
,
&
decay
->
mtx
);
if
(
have_background_thread
&&
background_thread_enabled
()
&&
epoch_advanced
&&
!
is_background_thread
)
{
background_thread_interval_check
(
tsdn
,
arena
,
decay
,
npages_new
);
}
return
false
;
}
static
bool
arena_decay_dirty
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bool
is_background_thread
,
bool
all
)
{
return
arena_decay_impl
(
tsdn
,
arena
,
&
arena
->
decay_dirty
,
&
arena
->
extents_dirty
,
is_background_thread
,
all
);
}
}
static
bool
/* Called from background threads. */
arena_decay_muzzy
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bool
is_background_thread
,
void
bool
all
)
{
arena_do_deferred_work
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
return
arena_decay
_impl
(
tsdn
,
arena
,
&
arena
->
decay_muzzy
,
arena_decay
(
tsdn
,
arena
,
true
,
false
);
&
arena
->
extents_muzzy
,
is_background_thread
,
all
);
pa_shard_do_deferred_work
(
tsdn
,
&
arena
->
pa_shard
);
}
}
void
void
arena_decay
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bool
is_background_thread
,
bool
all
)
{
arena_slab_dalloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
edata_t
*
slab
)
{
if
(
arena_decay_dirty
(
tsdn
,
arena
,
is_background_thread
,
all
))
{
bool
deferred_work_generated
=
false
;
return
;
pa_dalloc
(
tsdn
,
&
arena
->
pa_shard
,
slab
,
&
deferred_work_generated
);
if
(
deferred_work_generated
)
{
arena_handle_deferred_work
(
tsdn
,
arena
);
}
}
arena_decay_muzzy
(
tsdn
,
arena
,
is_background_thread
,
all
);
}
}
static
void
static
void
arena_slab_dalloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_t
*
slab
)
{
arena_bin_slabs_nonfull_insert
(
bin_t
*
bin
,
edata_t
*
slab
)
{
arena_nactive_sub
(
arena
,
extent_size_get
(
slab
)
>>
LG_PAGE
);
assert
(
edata_nfree_get
(
slab
)
>
0
);
edata_heap_insert
(
&
bin
->
slabs_nonfull
,
slab
);
extent_hooks_t
*
extent_hooks
=
EXTENT_HOOKS_INITIALIZER
;
arena_extents_dirty_dalloc
(
tsdn
,
arena
,
&
extent_hooks
,
slab
);
}
static
void
arena_bin_slabs_nonfull_insert
(
bin_t
*
bin
,
extent_t
*
slab
)
{
assert
(
extent_nfree_get
(
slab
)
>
0
);
extent_heap_insert
(
&
bin
->
slabs_nonfull
,
slab
);
if
(
config_stats
)
{
if
(
config_stats
)
{
bin
->
stats
.
nonfull_slabs
++
;
bin
->
stats
.
nonfull_slabs
++
;
}
}
}
}
static
void
static
void
arena_bin_slabs_nonfull_remove
(
bin_t
*
bin
,
e
xtent
_t
*
slab
)
{
arena_bin_slabs_nonfull_remove
(
bin_t
*
bin
,
e
data
_t
*
slab
)
{
e
xtent
_heap_remove
(
&
bin
->
slabs_nonfull
,
slab
);
e
data
_heap_remove
(
&
bin
->
slabs_nonfull
,
slab
);
if
(
config_stats
)
{
if
(
config_stats
)
{
bin
->
stats
.
nonfull_slabs
--
;
bin
->
stats
.
nonfull_slabs
--
;
}
}
}
}
static
e
xtent
_t
*
static
e
data
_t
*
arena_bin_slabs_nonfull_tryget
(
bin_t
*
bin
)
{
arena_bin_slabs_nonfull_tryget
(
bin_t
*
bin
)
{
e
xtent
_t
*
slab
=
e
xtent
_heap_remove_first
(
&
bin
->
slabs_nonfull
);
e
data
_t
*
slab
=
e
data
_heap_remove_first
(
&
bin
->
slabs_nonfull
);
if
(
slab
==
NULL
)
{
if
(
slab
==
NULL
)
{
return
NULL
;
return
NULL
;
}
}
...
@@ -1040,30 +604,30 @@ arena_bin_slabs_nonfull_tryget(bin_t *bin) {
...
@@ -1040,30 +604,30 @@ arena_bin_slabs_nonfull_tryget(bin_t *bin) {
}
}
static
void
static
void
arena_bin_slabs_full_insert
(
arena_t
*
arena
,
bin_t
*
bin
,
e
xtent
_t
*
slab
)
{
arena_bin_slabs_full_insert
(
arena_t
*
arena
,
bin_t
*
bin
,
e
data
_t
*
slab
)
{
assert
(
e
xtent
_nfree_get
(
slab
)
==
0
);
assert
(
e
data
_nfree_get
(
slab
)
==
0
);
/*
/*
* Tracking extents is required by arena_reset, which is not allowed
* Tracking extents is required by arena_reset, which is not allowed
* for auto arenas. Bypass this step to avoid touching the e
xtent
* for auto arenas. Bypass this step to avoid touching the e
data
* linkage (often results in cache misses) for auto arenas.
* linkage (often results in cache misses) for auto arenas.
*/
*/
if
(
arena_is_auto
(
arena
))
{
if
(
arena_is_auto
(
arena
))
{
return
;
return
;
}
}
e
xtent_list
_append
(
&
bin
->
slabs_full
,
slab
);
e
data_list_active
_append
(
&
bin
->
slabs_full
,
slab
);
}
}
static
void
static
void
arena_bin_slabs_full_remove
(
arena_t
*
arena
,
bin_t
*
bin
,
e
xtent
_t
*
slab
)
{
arena_bin_slabs_full_remove
(
arena_t
*
arena
,
bin_t
*
bin
,
e
data
_t
*
slab
)
{
if
(
arena_is_auto
(
arena
))
{
if
(
arena_is_auto
(
arena
))
{
return
;
return
;
}
}
e
xtent_list
_remove
(
&
bin
->
slabs_full
,
slab
);
e
data_list_active
_remove
(
&
bin
->
slabs_full
,
slab
);
}
}
static
void
static
void
arena_bin_reset
(
tsd_t
*
tsd
,
arena_t
*
arena
,
bin_t
*
bin
)
{
arena_bin_reset
(
tsd_t
*
tsd
,
arena_t
*
arena
,
bin_t
*
bin
)
{
e
xtent
_t
*
slab
;
e
data
_t
*
slab
;
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
bin
->
lock
);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
bin
->
lock
);
if
(
bin
->
slabcur
!=
NULL
)
{
if
(
bin
->
slabcur
!=
NULL
)
{
...
@@ -1073,13 +637,13 @@ arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) {
...
@@ -1073,13 +637,13 @@ arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) {
arena_slab_dalloc
(
tsd_tsdn
(
tsd
),
arena
,
slab
);
arena_slab_dalloc
(
tsd_tsdn
(
tsd
),
arena
,
slab
);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
bin
->
lock
);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
bin
->
lock
);
}
}
while
((
slab
=
e
xtent
_heap_remove_first
(
&
bin
->
slabs_nonfull
))
!=
NULL
)
{
while
((
slab
=
e
data
_heap_remove_first
(
&
bin
->
slabs_nonfull
))
!=
NULL
)
{
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
bin
->
lock
);
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
bin
->
lock
);
arena_slab_dalloc
(
tsd_tsdn
(
tsd
),
arena
,
slab
);
arena_slab_dalloc
(
tsd_tsdn
(
tsd
),
arena
,
slab
);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
bin
->
lock
);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
bin
->
lock
);
}
}
for
(
slab
=
e
xtent_list
_first
(
&
bin
->
slabs_full
);
slab
!=
NULL
;
for
(
slab
=
e
data_list_active
_first
(
&
bin
->
slabs_full
);
slab
!=
NULL
;
slab
=
e
xtent_list
_first
(
&
bin
->
slabs_full
))
{
slab
=
e
data_list_active
_first
(
&
bin
->
slabs_full
))
{
arena_bin_slabs_full_remove
(
arena
,
bin
,
slab
);
arena_bin_slabs_full_remove
(
arena
,
bin
,
slab
);
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
bin
->
lock
);
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
bin
->
lock
);
arena_slab_dalloc
(
tsd_tsdn
(
tsd
),
arena
,
slab
);
arena_slab_dalloc
(
tsd_tsdn
(
tsd
),
arena
,
slab
);
...
@@ -1111,16 +675,15 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
...
@@ -1111,16 +675,15 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
/* Large allocations. */
/* Large allocations. */
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
arena
->
large_mtx
);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
arena
->
large_mtx
);
for
(
e
xtent_t
*
extent
=
extent_list
_first
(
&
arena
->
large
);
extent
!=
for
(
e
data_t
*
edata
=
edata_list_active
_first
(
&
arena
->
large
);
NULL
;
extent
=
extent_list
_first
(
&
arena
->
large
))
{
edata
!=
NULL
;
edata
=
edata_list_active
_first
(
&
arena
->
large
))
{
void
*
ptr
=
e
xtent
_base_get
(
e
xtent
);
void
*
ptr
=
e
data
_base_get
(
e
data
);
size_t
usize
;
size_t
usize
;
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
arena
->
large_mtx
);
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
arena
->
large_mtx
);
alloc_ctx_t
alloc_ctx
;
emap_alloc_ctx_t
alloc_ctx
;
rtree_ctx_t
*
rtree_ctx
=
tsd_rtree_ctx
(
tsd
);
emap_alloc_ctx_lookup
(
tsd_tsdn
(
tsd
),
&
arena_emap_global
,
ptr
,
rtree_szind_slab_read
(
tsd_tsdn
(
tsd
),
&
extents_rtree
,
rtree_ctx
,
&
alloc_ctx
);
(
uintptr_t
)
ptr
,
true
,
&
alloc_ctx
.
szind
,
&
alloc_ctx
.
slab
);
assert
(
alloc_ctx
.
szind
!=
SC_NSIZES
);
assert
(
alloc_ctx
.
szind
!=
SC_NSIZES
);
if
(
config_stats
||
(
config_prof
&&
opt_prof
))
{
if
(
config_stats
||
(
config_prof
&&
opt_prof
))
{
...
@@ -1131,7 +694,7 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
...
@@ -1131,7 +694,7 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
if
(
config_prof
&&
opt_prof
)
{
if
(
config_prof
&&
opt_prof
)
{
prof_free
(
tsd
,
ptr
,
usize
,
&
alloc_ctx
);
prof_free
(
tsd
,
ptr
,
usize
,
&
alloc_ctx
);
}
}
large_dalloc
(
tsd_tsdn
(
tsd
),
e
xtent
);
large_dalloc
(
tsd_tsdn
(
tsd
),
e
data
);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
arena
->
large_mtx
);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
arena
->
large_mtx
);
}
}
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
arena
->
large_mtx
);
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
arena
->
large_mtx
);
...
@@ -1139,32 +702,95 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
...
@@ -1139,32 +702,95 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
/* Bins. */
/* Bins. */
for
(
unsigned
i
=
0
;
i
<
SC_NBINS
;
i
++
)
{
for
(
unsigned
i
=
0
;
i
<
SC_NBINS
;
i
++
)
{
for
(
unsigned
j
=
0
;
j
<
bin_infos
[
i
].
n_shards
;
j
++
)
{
for
(
unsigned
j
=
0
;
j
<
bin_infos
[
i
].
n_shards
;
j
++
)
{
arena_bin_reset
(
tsd
,
arena
,
arena_bin_reset
(
tsd
,
arena
,
arena_get_bin
(
arena
,
i
,
j
));
&
arena
->
bins
[
i
].
bin_shards
[
j
]);
}
}
}
pa_shard_reset
(
tsd_tsdn
(
tsd
),
&
arena
->
pa_shard
);
}
static
void
arena_prepare_base_deletion_sync_finish
(
tsd_t
*
tsd
,
malloc_mutex_t
**
mutexes
,
unsigned
n_mtx
)
{
for
(
unsigned
i
=
0
;
i
<
n_mtx
;
i
++
)
{
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
mutexes
[
i
]);
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
mutexes
[
i
]);
}
}
#define ARENA_DESTROY_MAX_DELAYED_MTX 32
static
void
arena_prepare_base_deletion_sync
(
tsd_t
*
tsd
,
malloc_mutex_t
*
mtx
,
malloc_mutex_t
**
delayed_mtx
,
unsigned
*
n_delayed
)
{
if
(
!
malloc_mutex_trylock
(
tsd_tsdn
(
tsd
),
mtx
))
{
/* No contention. */
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
mtx
);
return
;
}
}
unsigned
n
=
*
n_delayed
;
assert
(
n
<
ARENA_DESTROY_MAX_DELAYED_MTX
);
/* Add another to the batch. */
delayed_mtx
[
n
++
]
=
mtx
;
atomic_store_zu
(
&
arena
->
nactive
,
0
,
ATOMIC_RELAXED
);
if
(
n
==
ARENA_DESTROY_MAX_DELAYED_MTX
)
{
arena_prepare_base_deletion_sync_finish
(
tsd
,
delayed_mtx
,
n
);
n
=
0
;
}
*
n_delayed
=
n
;
}
}
static
void
static
void
arena_
destroy_retained
(
tsd
n
_t
*
tsd
n
,
arena_t
*
arena
)
{
arena_
prepare_base_deletion
(
tsd_t
*
tsd
,
base_t
*
base_to_destroy
)
{
/*
/*
* Iterate over the retained extents and destroy them. This gives the
* In order to coalesce, emap_try_acquire_edata_neighbor will attempt to
* extent allocator underlying the extent hooks an opportunity to unmap
* check neighbor edata's state to determine eligibility. This means
* all retained memory without having to keep its own metadata
* under certain conditions, the metadata from an arena can be accessed
* structures. In practice, virtual memory for dss-allocated extents is
* w/o holding any locks from that arena. In order to guarantee safe
* leaked here, so best practice is to avoid dss for arenas to be
* memory access, the metadata and the underlying base allocator needs
* destroyed, or provide custom extent hooks that track retained
* to be kept alive, until all pending accesses are done.
* dss-based extents for later reuse.
*
* 1) with opt_retain, the arena boundary implies the is_head state
* (tracked in the rtree leaf), and the coalesce flow will stop at the
* head state branch. Therefore no cross arena metadata access
* possible.
*
* 2) w/o opt_retain, the arena id needs to be read from the edata_t,
* meaning read only cross-arena metadata access is possible. The
* coalesce attempt will stop at the arena_id mismatch, and is always
* under one of the ecache locks. To allow safe passthrough of such
* metadata accesses, the loop below will iterate through all manual
* arenas' ecache locks. As all the metadata from this base allocator
* have been unlinked from the rtree, after going through all the
* relevant ecache locks, it's safe to say that a) pending accesses are
* all finished, and b) no new access will be generated.
*/
*/
extent_hooks_t
*
extent_hooks
=
extent_hooks_get
(
arena
);
if
(
opt_retain
)
{
extent_t
*
extent
;
return
;
while
((
extent
=
extents_evict
(
tsdn
,
arena
,
&
extent_hooks
,
}
&
arena
->
extents_retained
,
0
))
!=
NULL
)
{
unsigned
destroy_ind
=
base_ind_get
(
base_to_destroy
);
extent_destroy_wrapper
(
tsdn
,
arena
,
&
extent_hooks
,
extent
);
assert
(
destroy_ind
>=
manual_arena_base
);
tsdn_t
*
tsdn
=
tsd_tsdn
(
tsd
);
malloc_mutex_t
*
delayed_mtx
[
ARENA_DESTROY_MAX_DELAYED_MTX
];
unsigned
n_delayed
=
0
,
total
=
narenas_total_get
();
for
(
unsigned
i
=
0
;
i
<
total
;
i
++
)
{
if
(
i
==
destroy_ind
)
{
continue
;
}
arena_t
*
arena
=
arena_get
(
tsdn
,
i
,
false
);
if
(
arena
==
NULL
)
{
continue
;
}
pac_t
*
pac
=
&
arena
->
pa_shard
.
pac
;
arena_prepare_base_deletion_sync
(
tsd
,
&
pac
->
ecache_dirty
.
mtx
,
delayed_mtx
,
&
n_delayed
);
arena_prepare_base_deletion_sync
(
tsd
,
&
pac
->
ecache_muzzy
.
mtx
,
delayed_mtx
,
&
n_delayed
);
arena_prepare_base_deletion_sync
(
tsd
,
&
pac
->
ecache_retained
.
mtx
,
delayed_mtx
,
&
n_delayed
);
}
}
arena_prepare_base_deletion_sync_finish
(
tsd
,
delayed_mtx
,
n_delayed
);
}
}
#undef ARENA_DESTROY_MAX_DELAYED_MTX
void
void
arena_destroy
(
tsd_t
*
tsd
,
arena_t
*
arena
)
{
arena_destroy
(
tsd_t
*
tsd
,
arena_t
*
arena
)
{
...
@@ -1175,13 +801,10 @@ arena_destroy(tsd_t *tsd, arena_t *arena) {
...
@@ -1175,13 +801,10 @@ arena_destroy(tsd_t *tsd, arena_t *arena) {
/*
/*
* No allocations have occurred since arena_reset() was called.
* No allocations have occurred since arena_reset() was called.
* Furthermore, the caller (arena_i_destroy_ctl()) purged all cached
* Furthermore, the caller (arena_i_destroy_ctl()) purged all cached
* extents, so only retained extents may remain.
* extents, so only retained extents may remain and it's safe to call
* pa_shard_destroy_retained.
*/
*/
assert
(
extents_npages_get
(
&
arena
->
extents_dirty
)
==
0
);
pa_shard_destroy
(
tsd_tsdn
(
tsd
),
&
arena
->
pa_shard
);
assert
(
extents_npages_get
(
&
arena
->
extents_muzzy
)
==
0
);
/* Deallocate retained memory. */
arena_destroy_retained
(
tsd_tsdn
(
tsd
),
arena
);
/*
/*
* Remove the arena pointer from the arenas array. We rely on the fact
* Remove the arena pointer from the arenas array. We rely on the fact
...
@@ -1197,316 +820,370 @@ arena_destroy(tsd_t *tsd, arena_t *arena) {
...
@@ -1197,316 +820,370 @@ arena_destroy(tsd_t *tsd, arena_t *arena) {
/*
/*
* Destroy the base allocator, which manages all metadata ever mapped by
* Destroy the base allocator, which manages all metadata ever mapped by
* this arena.
* this arena. The prepare function will make sure no pending access to
* the metadata in this base anymore.
*/
*/
arena_prepare_base_deletion
(
tsd
,
arena
->
base
);
base_delete
(
tsd_tsdn
(
tsd
),
arena
->
base
);
base_delete
(
tsd_tsdn
(
tsd
),
arena
->
base
);
}
}
static
extent_t
*
static
edata_t
*
arena_slab_alloc_hard
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
const
bin_info_t
*
bin_info
,
szind_t
szind
)
{
extent_t
*
slab
;
bool
zero
,
commit
;
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
0
);
zero
=
false
;
commit
=
true
;
slab
=
extent_alloc_wrapper
(
tsdn
,
arena
,
r_extent_hooks
,
NULL
,
bin_info
->
slab_size
,
0
,
PAGE
,
true
,
szind
,
&
zero
,
&
commit
);
if
(
config_stats
&&
slab
!=
NULL
)
{
arena_stats_mapped_add
(
tsdn
,
&
arena
->
stats
,
bin_info
->
slab_size
);
}
return
slab
;
}
static
extent_t
*
arena_slab_alloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
szind_t
binind
,
unsigned
binshard
,
arena_slab_alloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
szind_t
binind
,
unsigned
binshard
,
const
bin_info_t
*
bin_info
)
{
const
bin_info_t
*
bin_info
)
{
bool
deferred_work_generated
=
false
;
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
0
);
WITNESS_RANK_CORE
,
0
);
extent_hooks_t
*
extent_hooks
=
EXTENT_HOOKS_INITIALIZER
;
bool
guarded
=
san_slab_extent_decide_guard
(
tsdn
,
szind_t
szind
=
sz_size2index
(
bin_info
->
reg_size
);
arena_get_ehooks
(
arena
));
bool
zero
=
false
;
edata_t
*
slab
=
pa_alloc
(
tsdn
,
&
arena
->
pa_shard
,
bin_info
->
slab_size
,
bool
commit
=
true
;
/* alignment */
PAGE
,
/* slab */
true
,
/* szind */
binind
,
extent_t
*
slab
=
extents_alloc
(
tsdn
,
arena
,
&
extent_hooks
,
/* zero */
false
,
guarded
,
&
deferred_work_generated
);
&
arena
->
extents_dirty
,
NULL
,
bin_info
->
slab_size
,
0
,
PAGE
,
true
,
binind
,
&
zero
,
&
commit
);
if
(
slab
==
NULL
&&
arena_may_have_muzzy
(
arena
))
{
slab
=
extents_alloc
(
tsdn
,
arena
,
&
extent_hooks
,
&
arena
->
extents_muzzy
,
NULL
,
bin_info
->
slab_size
,
0
,
PAGE
,
true
,
binind
,
&
zero
,
&
commit
);
}
if
(
slab
==
NULL
)
{
slab
=
arena_slab_alloc_hard
(
tsdn
,
arena
,
&
extent_hooks
,
bin_info
,
szind
);
if
(
slab
==
NULL
)
{
return
NULL
;
}
}
assert
(
extent_slab_get
(
slab
));
/* Initialize slab internals. */
arena_slab_data_t
*
slab_data
=
extent_slab_data_get
(
slab
);
extent_nfree_binshard_set
(
slab
,
bin_info
->
nregs
,
binshard
);
bitmap_init
(
slab_data
->
bitmap
,
&
bin_info
->
bitmap_info
,
false
);
arena_nactive_add
(
arena
,
extent_size_get
(
slab
)
>>
LG_PAGE
);
if
(
deferred_work_generated
)
{
arena_handle_deferred_work
(
tsdn
,
arena
);
return
slab
;
}
}
static
extent_t
*
arena_bin_nonfull_slab_get
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bin_t
*
bin
,
szind_t
binind
,
unsigned
binshard
)
{
extent_t
*
slab
;
const
bin_info_t
*
bin_info
;
/* Look for a usable slab. */
if
(
slab
==
NULL
)
{
slab
=
arena_bin_slabs_nonfull_tryget
(
bin
);
return
NULL
;
if
(
slab
!=
NULL
)
{
return
slab
;
}
}
/* No existing slabs have any space available. */
assert
(
edata_slab_get
(
slab
));
bin_info
=
&
bin_infos
[
binind
];
/* Initialize slab internals. */
slab_data_t
*
slab_data
=
edata_slab_data_get
(
slab
);
edata_nfree_binshard_set
(
slab
,
bin_info
->
nregs
,
binshard
);
bitmap_init
(
slab_data
->
bitmap
,
&
bin_info
->
bitmap_info
,
false
);
/* Allocate a new slab. */
return
slab
;
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
}
/******************************/
slab
=
arena_slab_alloc
(
tsdn
,
arena
,
binind
,
binshard
,
bin_info
);
/*
/********************************/
* Before attempting the _with_fresh_slab approaches below, the _no_fresh_slab
malloc_mutex_lock
(
tsdn
,
&
bin
->
lock
);
* variants (i.e. through slabcur and nonfull) must be tried first.
if
(
slab
!=
NULL
)
{
*/
static
void
arena_bin_refill_slabcur_with_fresh_slab
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bin_t
*
bin
,
szind_t
binind
,
edata_t
*
fresh_slab
)
{
malloc_mutex_assert_owner
(
tsdn
,
&
bin
->
lock
);
/* Only called after slabcur and nonfull both failed. */
assert
(
bin
->
slabcur
==
NULL
);
assert
(
edata_heap_first
(
&
bin
->
slabs_nonfull
)
==
NULL
);
assert
(
fresh_slab
!=
NULL
);
/* A new slab from arena_slab_alloc() */
assert
(
edata_nfree_get
(
fresh_slab
)
==
bin_infos
[
binind
].
nregs
);
if
(
config_stats
)
{
if
(
config_stats
)
{
bin
->
stats
.
nslabs
++
;
bin
->
stats
.
nslabs
++
;
bin
->
stats
.
curslabs
++
;
bin
->
stats
.
curslabs
++
;
}
}
return
slab
;
bin
->
slabcur
=
fresh_
slab
;
}
}
/*
/* Refill slabcur and then alloc using the fresh slab */
* arena_slab_alloc() failed, but another thread may have made
static
void
*
* sufficient memory available while this one dropped bin->lock above,
arena_bin_malloc_with_fresh_slab
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bin_t
*
bin
,
* so search one more time.
szind_t
binind
,
edata_t
*
fresh_slab
)
{
*/
malloc_mutex_assert_owner
(
tsdn
,
&
bin
->
lock
);
slab
=
arena_bin_slabs_nonfull_tryget
(
bin
);
arena_bin_refill_slabcur_with_fresh_slab
(
tsdn
,
arena
,
bin
,
binind
,
if
(
slab
!=
NULL
)
{
fresh_slab
);
return
slab
;
}
return
NULL
;
return
arena_slab_reg_alloc
(
bin
->
slabcur
,
&
bin_infos
[
binind
])
;
}
}
/* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */
static
bool
static
void
*
arena_bin_refill_slabcur_no_fresh_slab
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_bin_malloc_hard
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bin_t
*
bin
,
bin_t
*
bin
)
{
szind_t
binind
,
unsigned
binshard
)
{
malloc_mutex_assert_owner
(
tsdn
,
&
bin
->
lock
);
const
bin_info_t
*
bin_info
;
/* Only called after arena_slab_reg_alloc[_batch] failed. */
extent_t
*
slab
;
assert
(
bin
->
slabcur
==
NULL
||
edata_nfree_get
(
bin
->
slabcur
)
==
0
)
;
bin_info
=
&
bin_infos
[
binind
];
if
(
!
arena_is_auto
(
arena
)
&&
bin
->
slabcur
!=
NULL
)
{
arena_bin_slabs_full_insert
(
arena
,
bin
,
bin
->
slabcur
);
bin
->
slabcur
=
NULL
;
}
slab
=
arena_bin_nonfull_slab_get
(
tsdn
,
arena
,
bin
,
binind
,
binshard
);
if
(
bin
->
slabcur
!=
NULL
)
{
if
(
bin
->
slabcur
!=
NULL
)
{
/*
* Another thread updated slabcur while this one ran without the
* bin lock in arena_bin_nonfull_slab_get().
*/
if
(
extent_nfree_get
(
bin
->
slabcur
)
>
0
)
{
void
*
ret
=
arena_slab_reg_alloc
(
bin
->
slabcur
,
bin_info
);
if
(
slab
!=
NULL
)
{
/*
* arena_slab_alloc() may have allocated slab,
* or it may have been pulled from
* slabs_nonfull. Therefore it is unsafe to
* make any assumptions about how slab has
* previously been used, and
* arena_bin_lower_slab() must be called, as if
* a region were just deallocated from the slab.
*/
if
(
extent_nfree_get
(
slab
)
==
bin_info
->
nregs
)
{
arena_dalloc_bin_slab
(
tsdn
,
arena
,
slab
,
bin
);
}
else
{
arena_bin_lower_slab
(
tsdn
,
arena
,
slab
,
bin
);
}
}
return
ret
;
}
arena_bin_slabs_full_insert
(
arena
,
bin
,
bin
->
slabcur
);
arena_bin_slabs_full_insert
(
arena
,
bin
,
bin
->
slabcur
);
bin
->
slabcur
=
NULL
;
}
if
(
slab
==
NULL
)
{
return
NULL
;
}
}
bin
->
slabcur
=
slab
;
assert
(
extent_nfree_get
(
bin
->
slabcur
)
>
0
);
/* Look for a usable slab. */
bin
->
slabcur
=
arena_bin_slabs_nonfull_tryget
(
bin
);
assert
(
bin
->
slabcur
==
NULL
||
edata_nfree_get
(
bin
->
slabcur
)
>
0
);
return
arena_slab_reg_alloc
(
slab
,
bin_info
);
return
(
bin
->
slabcur
==
NULL
);
}
}
/* Choose a bin shard and return the locked bin. */
bin_t
*
bin_t
*
arena_bin_choose
_lock
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
szind_t
binind
,
arena_bin_choose
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
szind_t
binind
,
unsigned
*
binshard
)
{
unsigned
*
binshard
_p
)
{
bin_t
*
bin
;
unsigned
binshard
;
if
(
tsdn_null
(
tsdn
)
||
tsd_arena_get
(
tsdn_tsd
(
tsdn
))
==
NULL
)
{
if
(
tsdn_null
(
tsdn
)
||
tsd_arena_get
(
tsdn_tsd
(
tsdn
))
==
NULL
)
{
*
binshard
=
0
;
binshard
=
0
;
}
else
{
}
else
{
*
binshard
=
tsd_binshardsp_get
(
tsdn_tsd
(
tsdn
))
->
binshard
[
binind
];
binshard
=
tsd_binshardsp_get
(
tsdn_tsd
(
tsdn
))
->
binshard
[
binind
];
}
}
assert
(
*
binshard
<
bin_infos
[
binind
].
n_shards
);
assert
(
binshard
<
bin_infos
[
binind
].
n_shards
);
bin
=
&
arena
->
bins
[
binind
].
bin_shards
[
*
binshard
];
if
(
binshard_p
!=
NULL
)
{
malloc_mutex_lock
(
tsdn
,
&
bin
->
lock
)
;
*
binshard_p
=
binshard
;
}
return
bin
;
return
arena_get_bin
(
arena
,
binind
,
binshard
)
;
}
}
void
void
arena_tcache_fill_small
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
tcache_t
*
tcache
,
arena_cache_bin_fill_small
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
cache_bin_t
*
tbin
,
szind_t
binind
,
uint64_t
prof_accumbytes
)
{
cache_bin_t
*
cache_bin
,
cache_bin_info_t
*
cache_bin_info
,
szind_t
binind
,
unsigned
i
,
nfill
,
cnt
;
const
unsigned
nfill
)
{
assert
(
cache_bin_ncached_get_local
(
cache_bin
,
cache_bin_info
)
==
0
);
assert
(
tbin
->
ncached
==
0
);
if
(
config_prof
&&
arena_prof_accum
(
tsdn
,
arena
,
prof_accumbytes
))
{
const
bin_info_t
*
bin_info
=
&
bin_infos
[
binind
];
prof_idump
(
tsdn
);
}
unsigned
binshard
;
CACHE_BIN_PTR_ARRAY_DECLARE
(
ptrs
,
nfill
);
bin_t
*
bin
=
arena_bin_choose_lock
(
tsdn
,
arena
,
binind
,
&
binshard
);
cache_bin_init_ptr_array_for_fill
(
cache_bin
,
cache_bin_info
,
&
ptrs
,
nfill
);
for
(
i
=
0
,
nfill
=
(
tcache_bin_info
[
binind
].
ncached_max
>>
tcache
->
lg_fill_div
[
binind
]);
i
<
nfill
;
i
+=
cnt
)
{
extent_t
*
slab
;
if
((
slab
=
bin
->
slabcur
)
!=
NULL
&&
extent_nfree_get
(
slab
)
>
0
)
{
unsigned
tofill
=
nfill
-
i
;
cnt
=
tofill
<
extent_nfree_get
(
slab
)
?
tofill
:
extent_nfree_get
(
slab
);
arena_slab_reg_alloc_batch
(
slab
,
&
bin_infos
[
binind
],
cnt
,
tbin
->
avail
-
nfill
+
i
);
}
else
{
cnt
=
1
;
void
*
ptr
=
arena_bin_malloc_hard
(
tsdn
,
arena
,
bin
,
binind
,
binshard
);
/*
/*
* OOM. tbin->avail isn't yet filled down to its first
* Bin-local resources are used first: 1) bin->slabcur, and 2) nonfull
* element, so the successful allocations (if any) must
* slabs. After both are exhausted, new slabs will be allocated through
* be moved just before tbin->avail before bailing out.
* arena_slab_alloc().
*
* Bin lock is only taken / released right before / after the while(...)
* refill loop, with new slab allocation (which has its own locking)
* kept outside of the loop. This setup facilitates flat combining, at
* the cost of the nested loop (through goto label_refill).
*
* To optimize for cases with contention and limited resources
* (e.g. hugepage-backed or non-overcommit arenas), each fill-iteration
* gets one chance of slab_alloc, and a retry of bin local resources
* after the slab allocation (regardless if slab_alloc failed, because
* the bin lock is dropped during the slab allocation).
*
* In other words, new slab allocation is allowed, as long as there was
* progress since the previous slab_alloc. This is tracked with
* made_progress below, initialized to true to jump start the first
* iteration.
*
* In other words (again), the loop will only terminate early (i.e. stop
* with filled < nfill) after going through the three steps: a) bin
* local exhausted, b) unlock and slab_alloc returns null, c) re-lock
* and bin local fails again.
*/
*/
if
(
ptr
==
NULL
)
{
bool
made_progress
=
true
;
if
(
i
>
0
)
{
edata_t
*
fresh_slab
=
NULL
;
memmove
(
tbin
->
avail
-
i
,
bool
alloc_and_retry
=
false
;
tbin
->
avail
-
nfill
,
unsigned
filled
=
0
;
i
*
sizeof
(
void
*
));
unsigned
binshard
;
bin_t
*
bin
=
arena_bin_choose
(
tsdn
,
arena
,
binind
,
&
binshard
);
label_refill:
malloc_mutex_lock
(
tsdn
,
&
bin
->
lock
);
while
(
filled
<
nfill
)
{
/* Try batch-fill from slabcur first. */
edata_t
*
slabcur
=
bin
->
slabcur
;
if
(
slabcur
!=
NULL
&&
edata_nfree_get
(
slabcur
)
>
0
)
{
unsigned
tofill
=
nfill
-
filled
;
unsigned
nfree
=
edata_nfree_get
(
slabcur
);
unsigned
cnt
=
tofill
<
nfree
?
tofill
:
nfree
;
arena_slab_reg_alloc_batch
(
slabcur
,
bin_info
,
cnt
,
&
ptrs
.
ptr
[
filled
]);
made_progress
=
true
;
filled
+=
cnt
;
continue
;
}
/* Next try refilling slabcur from nonfull slabs. */
if
(
!
arena_bin_refill_slabcur_no_fresh_slab
(
tsdn
,
arena
,
bin
))
{
assert
(
bin
->
slabcur
!=
NULL
);
continue
;
}
/* Then see if a new slab was reserved already. */
if
(
fresh_slab
!=
NULL
)
{
arena_bin_refill_slabcur_with_fresh_slab
(
tsdn
,
arena
,
bin
,
binind
,
fresh_slab
);
assert
(
bin
->
slabcur
!=
NULL
);
fresh_slab
=
NULL
;
continue
;
}
/* Try slab_alloc if made progress (or never did slab_alloc). */
if
(
made_progress
)
{
assert
(
bin
->
slabcur
==
NULL
);
assert
(
fresh_slab
==
NULL
);
alloc_and_retry
=
true
;
/* Alloc a new slab then come back. */
break
;
}
}
/* OOM. */
assert
(
fresh_slab
==
NULL
);
assert
(
!
alloc_and_retry
);
break
;
break
;
}
/* while (filled < nfill) loop. */
if
(
config_stats
&&
!
alloc_and_retry
)
{
bin
->
stats
.
nmalloc
+=
filled
;
bin
->
stats
.
nrequests
+=
cache_bin
->
tstats
.
nrequests
;
bin
->
stats
.
curregs
+=
filled
;
bin
->
stats
.
nfills
++
;
cache_bin
->
tstats
.
nrequests
=
0
;
}
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
if
(
alloc_and_retry
)
{
assert
(
fresh_slab
==
NULL
);
assert
(
filled
<
nfill
);
assert
(
made_progress
);
fresh_slab
=
arena_slab_alloc
(
tsdn
,
arena
,
binind
,
binshard
,
bin_info
);
/* fresh_slab NULL case handled in the for loop. */
alloc_and_retry
=
false
;
made_progress
=
false
;
goto
label_refill
;
}
assert
(
filled
==
nfill
||
(
fresh_slab
==
NULL
&&
!
made_progress
));
/* Release if allocated but not used. */
if
(
fresh_slab
!=
NULL
)
{
assert
(
edata_nfree_get
(
fresh_slab
)
==
bin_info
->
nregs
);
arena_slab_dalloc
(
tsdn
,
arena
,
fresh_slab
);
fresh_slab
=
NULL
;
}
}
/* Insert such that low regions get used first. */
*
(
tbin
->
avail
-
nfill
+
i
)
=
ptr
;
cache_bin_finish_fill
(
cache_bin
,
cache_bin_info
,
&
ptrs
,
filled
);
arena_decay_tick
(
tsdn
,
arena
);
}
size_t
arena_fill_small_fresh
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
szind_t
binind
,
void
**
ptrs
,
size_t
nfill
,
bool
zero
)
{
assert
(
binind
<
SC_NBINS
);
const
bin_info_t
*
bin_info
=
&
bin_infos
[
binind
];
const
size_t
nregs
=
bin_info
->
nregs
;
assert
(
nregs
>
0
);
const
size_t
usize
=
bin_info
->
reg_size
;
const
bool
manual_arena
=
!
arena_is_auto
(
arena
);
unsigned
binshard
;
bin_t
*
bin
=
arena_bin_choose
(
tsdn
,
arena
,
binind
,
&
binshard
);
size_t
nslab
=
0
;
size_t
filled
=
0
;
edata_t
*
slab
=
NULL
;
edata_list_active_t
fulls
;
edata_list_active_init
(
&
fulls
);
while
(
filled
<
nfill
&&
(
slab
=
arena_slab_alloc
(
tsdn
,
arena
,
binind
,
binshard
,
bin_info
))
!=
NULL
)
{
assert
((
size_t
)
edata_nfree_get
(
slab
)
==
nregs
);
++
nslab
;
size_t
batch
=
nfill
-
filled
;
if
(
batch
>
nregs
)
{
batch
=
nregs
;
}
assert
(
batch
>
0
);
arena_slab_reg_alloc_batch
(
slab
,
bin_info
,
(
unsigned
)
batch
,
&
ptrs
[
filled
]);
assert
(
edata_addr_get
(
slab
)
==
ptrs
[
filled
]);
if
(
zero
)
{
memset
(
ptrs
[
filled
],
0
,
batch
*
usize
);
}
}
if
(
config_fill
&&
unlikely
(
opt_junk_alloc
))
{
filled
+=
batch
;
for
(
unsigned
j
=
0
;
j
<
cnt
;
j
++
)
{
if
(
batch
==
nregs
)
{
void
*
ptr
=
*
(
tbin
->
avail
-
nfill
+
i
+
j
);
if
(
manual_arena
)
{
arena_alloc_junk_small
(
ptr
,
&
bin_infos
[
binind
],
edata_list_active_append
(
&
fulls
,
slab
);
true
);
}
}
slab
=
NULL
;
}
}
malloc_mutex_lock
(
tsdn
,
&
bin
->
lock
);
/*
* Only the last slab can be non-empty, and the last slab is non-empty
* iff slab != NULL.
*/
if
(
slab
!=
NULL
)
{
arena_bin_lower_slab
(
tsdn
,
arena
,
slab
,
bin
);
}
}
if
(
manual_arena
)
{
edata_list_active_concat
(
&
bin
->
slabs_full
,
&
fulls
);
}
}
assert
(
edata_list_active_empty
(
&
fulls
));
if
(
config_stats
)
{
if
(
config_stats
)
{
bin
->
stats
.
n
malloc
+=
i
;
bin
->
stats
.
n
slabs
+=
nslab
;
bin
->
stats
.
nrequests
+=
tbin
->
tstats
.
nrequests
;
bin
->
stats
.
curslabs
+=
nslab
;
bin
->
stats
.
curregs
+=
i
;
bin
->
stats
.
nmalloc
+=
filled
;
bin
->
stats
.
nfill
s
++
;
bin
->
stats
.
n
requests
+=
fill
ed
;
t
bin
->
t
stats
.
nrequest
s
=
0
;
bin
->
stats
.
curreg
s
+
=
filled
;
}
}
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
tbin
->
ncached
=
i
;
arena_decay_tick
(
tsdn
,
arena
);
arena_decay_tick
(
tsdn
,
arena
);
return
filled
;
}
}
void
/*
arena_alloc_junk_small
(
void
*
ptr
,
const
bin_info_t
*
bin_info
,
bool
zero
)
{
* Without allocating a new slab, try arena_slab_reg_alloc() and re-fill
if
(
!
zero
)
{
* bin->slabcur if necessary.
memset
(
ptr
,
JEMALLOC_ALLOC_JUNK
,
bin_info
->
reg_size
);
*/
static
void
*
arena_bin_malloc_no_fresh_slab
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bin_t
*
bin
,
szind_t
binind
)
{
malloc_mutex_assert_owner
(
tsdn
,
&
bin
->
lock
);
if
(
bin
->
slabcur
==
NULL
||
edata_nfree_get
(
bin
->
slabcur
)
==
0
)
{
if
(
arena_bin_refill_slabcur_no_fresh_slab
(
tsdn
,
arena
,
bin
))
{
return
NULL
;
}
}
}
}
static
void
assert
(
bin
->
slabcur
!=
NULL
&&
edata_nfree_get
(
bin
->
slabcur
)
>
0
);
arena_dalloc_junk_small_impl
(
void
*
ptr
,
const
bin_info_t
*
bin_info
)
{
return
arena_slab_reg_alloc
(
bin
->
slabcur
,
&
bin_infos
[
binind
]);
memset
(
ptr
,
JEMALLOC_FREE_JUNK
,
bin_info
->
reg_size
);
}
}
arena_dalloc_junk_small_t
*
JET_MUTABLE
arena_dalloc_junk_small
=
arena_dalloc_junk_small_impl
;
static
void
*
static
void
*
arena_malloc_small
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
szind_t
binind
,
bool
zero
)
{
arena_malloc_small
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
szind_t
binind
,
bool
zero
)
{
void
*
ret
;
bin_t
*
bin
;
size_t
usize
;
extent_t
*
slab
;
assert
(
binind
<
SC_NBINS
);
assert
(
binind
<
SC_NBINS
);
usize
=
sz_index2size
(
binind
);
const
bin_info_t
*
bin_info
=
&
bin_infos
[
binind
];
size_t
usize
=
sz_index2size
(
binind
);
unsigned
binshard
;
unsigned
binshard
;
bin
=
arena_bin_choose_lock
(
tsdn
,
arena
,
binind
,
&
binshard
);
bin_t
*
bin
=
arena_bin_choose
(
tsdn
,
arena
,
binind
,
&
binshard
);
if
((
slab
=
bin
->
slabcur
)
!=
NULL
&&
extent_nfree_get
(
slab
)
>
0
)
{
ret
=
arena_slab_reg_alloc
(
slab
,
&
bin_infos
[
binind
]);
}
else
{
ret
=
arena_bin_malloc_hard
(
tsdn
,
arena
,
bin
,
binind
,
binshard
);
}
malloc_mutex_lock
(
tsdn
,
&
bin
->
lock
);
edata_t
*
fresh_slab
=
NULL
;
void
*
ret
=
arena_bin_malloc_no_fresh_slab
(
tsdn
,
arena
,
bin
,
binind
);
if
(
ret
==
NULL
)
{
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
/******************************/
fresh_slab
=
arena_slab_alloc
(
tsdn
,
arena
,
binind
,
binshard
,
bin_info
);
/********************************/
malloc_mutex_lock
(
tsdn
,
&
bin
->
lock
);
/* Retry since the lock was dropped. */
ret
=
arena_bin_malloc_no_fresh_slab
(
tsdn
,
arena
,
bin
,
binind
);
if
(
ret
==
NULL
)
{
if
(
ret
==
NULL
)
{
if
(
fresh_slab
==
NULL
)
{
/* OOM */
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
return
NULL
;
return
NULL
;
}
}
ret
=
arena_bin_malloc_with_fresh_slab
(
tsdn
,
arena
,
bin
,
binind
,
fresh_slab
);
fresh_slab
=
NULL
;
}
}
if
(
config_stats
)
{
if
(
config_stats
)
{
bin
->
stats
.
nmalloc
++
;
bin
->
stats
.
nmalloc
++
;
bin
->
stats
.
nrequests
++
;
bin
->
stats
.
nrequests
++
;
bin
->
stats
.
curregs
++
;
bin
->
stats
.
curregs
++
;
}
}
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
if
(
config_prof
&&
arena_prof_accum
(
tsdn
,
arena
,
usize
))
{
prof_idump
(
tsdn
);
}
if
(
!
zero
)
{
if
(
fresh_slab
!=
NULL
)
{
if
(
config_fill
)
{
arena_slab_dalloc
(
tsdn
,
arena
,
fresh_slab
);
if
(
unlikely
(
opt_junk_alloc
))
{
arena_alloc_junk_small
(
ret
,
&
bin_infos
[
binind
],
false
);
}
else
if
(
unlikely
(
opt_zero
))
{
memset
(
ret
,
0
,
usize
);
}
}
}
else
{
if
(
config_fill
&&
unlikely
(
opt_junk_alloc
))
{
arena_alloc_junk_small
(
ret
,
&
bin_infos
[
binind
],
true
);
}
}
if
(
zero
)
{
memset
(
ret
,
0
,
usize
);
memset
(
ret
,
0
,
usize
);
}
}
arena_decay_tick
(
tsdn
,
arena
);
arena_decay_tick
(
tsdn
,
arena
);
return
ret
;
return
ret
;
}
}
...
@@ -1533,10 +1210,17 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
...
@@ -1533,10 +1210,17 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
bool
zero
,
tcache_t
*
tcache
)
{
bool
zero
,
tcache_t
*
tcache
)
{
void
*
ret
;
void
*
ret
;
if
(
usize
<=
SC_SMALL_MAXCLASS
if
(
usize
<=
SC_SMALL_MAXCLASS
)
{
&&
(
alignment
<
PAGE
||
(
alignment
==
PAGE
&&
(
usize
&
PAGE_MASK
)
==
0
)))
{
/* Small; alignment doesn't require special slab placement. */
/* Small; alignment doesn't require special slab placement. */
/* usize should be a result of sz_sa2u() */
assert
((
usize
&
(
alignment
-
1
))
==
0
);
/*
* Small usize can't come from an alignment larger than a page.
*/
assert
(
alignment
<=
PAGE
);
ret
=
arena_malloc
(
tsdn
,
arena
,
usize
,
sz_size2index
(
usize
),
ret
=
arena_malloc
(
tsdn
,
arena
,
usize
,
sz_size2index
(
usize
),
zero
,
tcache
,
true
);
zero
,
tcache
,
true
);
}
else
{
}
else
{
...
@@ -1560,33 +1244,22 @@ arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize) {
...
@@ -1560,33 +1244,22 @@ arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize) {
safety_check_set_redzone
(
ptr
,
usize
,
SC_LARGE_MINCLASS
);
safety_check_set_redzone
(
ptr
,
usize
,
SC_LARGE_MINCLASS
);
}
}
rtree_ctx_t
rtree_ctx_fallback
;
edata_t
*
edata
=
emap_edata_lookup
(
tsdn
,
&
arena_emap_global
,
ptr
);
rtree_ctx_t
*
rtree_ctx
=
tsdn_rtree_ctx
(
tsdn
,
&
rtree_ctx_fallback
);
extent_t
*
extent
=
rtree_extent_read
(
tsdn
,
&
extents_rtree
,
rtree_ctx
,
(
uintptr_t
)
ptr
,
true
);
arena_t
*
arena
=
extent_arena_get
(
extent
);
szind_t
szind
=
sz_size2index
(
usize
);
szind_t
szind
=
sz_size2index
(
usize
);
extent_szind_set
(
extent
,
szind
);
edata_szind_set
(
edata
,
szind
);
rtree_szind_slab_update
(
tsdn
,
&
extents_rtree
,
rtree_ctx
,
(
uintptr_t
)
ptr
,
emap_remap
(
tsdn
,
&
arena_emap_global
,
edata
,
szind
,
/* slab */
false
);
szind
,
false
);
prof_accum_cancel
(
tsdn
,
&
arena
->
prof_accum
,
usize
);
assert
(
isalloc
(
tsdn
,
ptr
)
==
usize
);
assert
(
isalloc
(
tsdn
,
ptr
)
==
usize
);
}
}
static
size_t
static
size_t
arena_prof_demote
(
tsdn_t
*
tsdn
,
e
xtent_t
*
extent
,
const
void
*
ptr
)
{
arena_prof_demote
(
tsdn_t
*
tsdn
,
e
data_t
*
edata
,
const
void
*
ptr
)
{
cassert
(
config_prof
);
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
assert
(
ptr
!=
NULL
);
extent_szind_set
(
extent
,
SC_NBINS
);
edata_szind_set
(
edata
,
SC_NBINS
);
rtree_ctx_t
rtree_ctx_fallback
;
emap_remap
(
tsdn
,
&
arena_emap_global
,
edata
,
SC_NBINS
,
/* slab */
false
);
rtree_ctx_t
*
rtree_ctx
=
tsdn_rtree_ctx
(
tsdn
,
&
rtree_ctx_fallback
);
rtree_szind_slab_update
(
tsdn
,
&
extents_rtree
,
rtree_ctx
,
(
uintptr_t
)
ptr
,
SC_NBINS
,
false
);
assert
(
isalloc
(
tsdn
,
ptr
)
==
SC_LARGE_MINCLASS
);
assert
(
isalloc
(
tsdn
,
ptr
)
==
SC_LARGE_MINCLASS
);
...
@@ -1599,9 +1272,9 @@ arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
...
@@ -1599,9 +1272,9 @@ arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
cassert
(
config_prof
);
cassert
(
config_prof
);
assert
(
opt_prof
);
assert
(
opt_prof
);
e
xtent_t
*
extent
=
iealloc
(
tsdn
,
ptr
);
e
data_t
*
edata
=
emap_edata_lookup
(
tsdn
,
&
arena_emap_global
,
ptr
);
size_t
usize
=
e
xtent
_usize_get
(
e
xtent
);
size_t
usize
=
e
data
_usize_get
(
e
data
);
size_t
bumped_usize
=
arena_prof_demote
(
tsdn
,
e
xtent
,
ptr
);
size_t
bumped_usize
=
arena_prof_demote
(
tsdn
,
e
data
,
ptr
);
if
(
config_opt_safety_checks
&&
usize
<
SC_LARGE_MINCLASS
)
{
if
(
config_opt_safety_checks
&&
usize
<
SC_LARGE_MINCLASS
)
{
/*
/*
* Currently, we only do redzoning for small sampled
* Currently, we only do redzoning for small sampled
...
@@ -1614,17 +1287,17 @@ arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
...
@@ -1614,17 +1287,17 @@ arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
tcache_dalloc_large
(
tsdn_tsd
(
tsdn
),
tcache
,
ptr
,
tcache_dalloc_large
(
tsdn_tsd
(
tsdn
),
tcache
,
ptr
,
sz_size2index
(
bumped_usize
),
slow_path
);
sz_size2index
(
bumped_usize
),
slow_path
);
}
else
{
}
else
{
large_dalloc
(
tsdn
,
e
xtent
);
large_dalloc
(
tsdn
,
e
data
);
}
}
}
}
static
void
static
void
arena_dissociate_bin_slab
(
arena_t
*
arena
,
e
xtent
_t
*
slab
,
bin_t
*
bin
)
{
arena_dissociate_bin_slab
(
arena_t
*
arena
,
e
data
_t
*
slab
,
bin_t
*
bin
)
{
/* Dissociate slab from bin. */
/* Dissociate slab from bin. */
if
(
slab
==
bin
->
slabcur
)
{
if
(
slab
==
bin
->
slabcur
)
{
bin
->
slabcur
=
NULL
;
bin
->
slabcur
=
NULL
;
}
else
{
}
else
{
szind_t
binind
=
e
xtent
_szind_get
(
slab
);
szind_t
binind
=
e
data
_szind_get
(
slab
);
const
bin_info_t
*
bin_info
=
&
bin_infos
[
binind
];
const
bin_info_t
*
bin_info
=
&
bin_infos
[
binind
];
/*
/*
...
@@ -1641,24 +1314,9 @@ arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) {
...
@@ -1641,24 +1314,9 @@ arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) {
}
}
static
void
static
void
arena_dalloc_bin_slab
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_t
*
slab
,
arena_bin_lower_slab
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
edata_t
*
slab
,
bin_t
*
bin
)
{
assert
(
slab
!=
bin
->
slabcur
);
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
/******************************/
arena_slab_dalloc
(
tsdn
,
arena
,
slab
);
/****************************/
malloc_mutex_lock
(
tsdn
,
&
bin
->
lock
);
if
(
config_stats
)
{
bin
->
stats
.
curslabs
--
;
}
}
static
void
arena_bin_lower_slab
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_t
*
slab
,
bin_t
*
bin
)
{
bin_t
*
bin
)
{
assert
(
e
xtent
_nfree_get
(
slab
)
>
0
);
assert
(
e
data
_nfree_get
(
slab
)
>
0
);
/*
/*
* Make sure that if bin->slabcur is non-NULL, it refers to the
* Make sure that if bin->slabcur is non-NULL, it refers to the
...
@@ -1666,9 +1324,9 @@ arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
...
@@ -1666,9 +1324,9 @@ arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
* than proactively keeping it pointing at the oldest/lowest non-full
* than proactively keeping it pointing at the oldest/lowest non-full
* slab.
* slab.
*/
*/
if
(
bin
->
slabcur
!=
NULL
&&
e
xtent
_snad_comp
(
bin
->
slabcur
,
slab
)
>
0
)
{
if
(
bin
->
slabcur
!=
NULL
&&
e
data
_snad_comp
(
bin
->
slabcur
,
slab
)
>
0
)
{
/* Switch slabcur. */
/* Switch slabcur. */
if
(
e
xtent
_nfree_get
(
bin
->
slabcur
)
>
0
)
{
if
(
e
data
_nfree_get
(
bin
->
slabcur
)
>
0
)
{
arena_bin_slabs_nonfull_insert
(
bin
,
bin
->
slabcur
);
arena_bin_slabs_nonfull_insert
(
bin
,
bin
->
slabcur
);
}
else
{
}
else
{
arena_bin_slabs_full_insert
(
arena
,
bin
,
bin
->
slabcur
);
arena_bin_slabs_full_insert
(
arena
,
bin
,
bin
->
slabcur
);
...
@@ -1683,56 +1341,54 @@ arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
...
@@ -1683,56 +1341,54 @@ arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
}
}
static
void
static
void
arena_dalloc_bin_locked_impl
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bin_t
*
bin
,
arena_dalloc_bin_slab_prepare
(
tsdn_t
*
tsdn
,
edata_t
*
slab
,
bin_t
*
bin
)
{
szind_t
binind
,
extent_t
*
slab
,
void
*
ptr
,
bool
junked
)
{
malloc_mutex_assert_owner
(
tsdn
,
&
bin
->
lock
);
arena_slab_data_t
*
slab_data
=
extent_slab_data_get
(
slab
);
const
bin_info_t
*
bin_info
=
&
bin_infos
[
binind
];
if
(
!
junked
&&
config_fill
&&
unlikely
(
opt_junk_free
))
{
assert
(
slab
!=
bin
->
slabcur
);
arena_dalloc_junk_small
(
ptr
,
bin_info
);
if
(
config_stats
)
{
bin
->
stats
.
curslabs
--
;
}
}
}
arena_slab_reg_dalloc
(
slab
,
slab_data
,
ptr
);
void
unsigned
nfree
=
extent_nfree_get
(
slab
);
arena_dalloc_bin_locked_handle_newly_empty
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
if
(
nfree
==
bin_info
->
nregs
)
{
edata_t
*
slab
,
bin_t
*
bin
)
{
arena_dissociate_bin_slab
(
arena
,
slab
,
bin
);
arena_dissociate_bin_slab
(
arena
,
slab
,
bin
);
arena_dalloc_bin_slab
(
tsdn
,
arena
,
slab
,
bin
);
arena_dalloc_bin_slab_prepare
(
tsdn
,
slab
,
bin
);
}
else
if
(
nfree
==
1
&&
slab
!=
bin
->
slabcur
)
{
arena_bin_slabs_full_remove
(
arena
,
bin
,
slab
);
arena_bin_lower_slab
(
tsdn
,
arena
,
slab
,
bin
);
}
if
(
config_stats
)
{
bin
->
stats
.
ndalloc
++
;
bin
->
stats
.
curregs
--
;
}
}
}
void
void
arena_dalloc_bin_
jun
ked_
locked
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bin_t
*
bin
,
arena_dalloc_bin_
loc
ked_
handle_newly_nonempty
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
szind_t
binind
,
extent_t
*
extent
,
void
*
ptr
)
{
edata_t
*
slab
,
bin_t
*
bin
)
{
arena_
dalloc_bin_locked_impl
(
tsdn
,
arena
,
bin
,
binind
,
extent
,
ptr
,
arena_
bin_slabs_full_remove
(
arena
,
bin
,
slab
);
true
);
arena_bin_lower_slab
(
tsdn
,
arena
,
slab
,
bin
);
}
}
static
void
static
void
arena_dalloc_bin
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
e
xtent_t
*
extent
,
void
*
ptr
)
{
arena_dalloc_bin
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
e
data_t
*
edata
,
void
*
ptr
)
{
szind_t
binind
=
e
xtent
_szind_get
(
e
xtent
);
szind_t
binind
=
e
data
_szind_get
(
e
data
);
unsigned
binshard
=
e
xtent
_binshard_get
(
e
xtent
);
unsigned
binshard
=
e
data
_binshard_get
(
e
data
);
bin_t
*
bin
=
&
arena
->
bins
[
binind
].
bin
_
shard
s
[
binshard
]
;
bin_t
*
bin
=
arena
_get_bin
(
arena
,
binind
,
binshard
)
;
malloc_mutex_lock
(
tsdn
,
&
bin
->
lock
);
malloc_mutex_lock
(
tsdn
,
&
bin
->
lock
);
arena_dalloc_bin_locked_impl
(
tsdn
,
arena
,
bin
,
binind
,
extent
,
ptr
,
arena_dalloc_bin_locked_info_t
info
;
false
);
arena_dalloc_bin_locked_begin
(
&
info
,
binind
);
bool
ret
=
arena_dalloc_bin_locked_step
(
tsdn
,
arena
,
bin
,
&
info
,
binind
,
edata
,
ptr
);
arena_dalloc_bin_locked_finish
(
tsdn
,
arena
,
bin
,
&
info
);
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
if
(
ret
)
{
arena_slab_dalloc
(
tsdn
,
arena
,
edata
);
}
}
}
void
void
arena_dalloc_small
(
tsdn_t
*
tsdn
,
void
*
ptr
)
{
arena_dalloc_small
(
tsdn_t
*
tsdn
,
void
*
ptr
)
{
e
xtent_t
*
extent
=
iealloc
(
tsdn
,
ptr
);
e
data_t
*
edata
=
emap_edata_lookup
(
tsdn
,
&
arena_emap_global
,
ptr
);
arena_t
*
arena
=
extent_
arena_get
(
extent
);
arena_t
*
arena
=
arena_get
_from_edata
(
edata
);
arena_dalloc_bin
(
tsdn
,
arena
,
e
xtent
,
ptr
);
arena_dalloc_bin
(
tsdn
,
arena
,
e
data
,
ptr
);
arena_decay_tick
(
tsdn
,
arena
);
arena_decay_tick
(
tsdn
,
arena
);
}
}
...
@@ -1743,7 +1399,7 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
...
@@ -1743,7 +1399,7 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
/* Calls with non-zero extra had to clamp extra. */
/* Calls with non-zero extra had to clamp extra. */
assert
(
extra
==
0
||
size
+
extra
<=
SC_LARGE_MAXCLASS
);
assert
(
extra
==
0
||
size
+
extra
<=
SC_LARGE_MAXCLASS
);
e
xtent_t
*
extent
=
iealloc
(
tsdn
,
ptr
);
e
data_t
*
edata
=
emap_edata_lookup
(
tsdn
,
&
arena_emap_global
,
ptr
);
if
(
unlikely
(
size
>
SC_LARGE_MAXCLASS
))
{
if
(
unlikely
(
size
>
SC_LARGE_MAXCLASS
))
{
ret
=
true
;
ret
=
true
;
goto
done
;
goto
done
;
...
@@ -1766,18 +1422,19 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
...
@@ -1766,18 +1422,19 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
goto
done
;
goto
done
;
}
}
arena_decay_tick
(
tsdn
,
extent_arena_get
(
extent
));
arena_t
*
arena
=
arena_get_from_edata
(
edata
);
arena_decay_tick
(
tsdn
,
arena
);
ret
=
false
;
ret
=
false
;
}
else
if
(
oldsize
>=
SC_LARGE_MINCLASS
}
else
if
(
oldsize
>=
SC_LARGE_MINCLASS
&&
usize_max
>=
SC_LARGE_MINCLASS
)
{
&&
usize_max
>=
SC_LARGE_MINCLASS
)
{
ret
=
large_ralloc_no_move
(
tsdn
,
e
xtent
,
usize_min
,
usize_max
,
ret
=
large_ralloc_no_move
(
tsdn
,
e
data
,
usize_min
,
usize_max
,
zero
);
zero
);
}
else
{
}
else
{
ret
=
true
;
ret
=
true
;
}
}
done:
done:
assert
(
e
xtent
==
iealloc
(
tsdn
,
ptr
));
assert
(
e
data
==
emap_edata_lookup
(
tsdn
,
&
arena_emap_global
,
ptr
));
*
newsize
=
e
xtent
_usize_get
(
e
xtent
);
*
newsize
=
e
data
_usize_get
(
e
data
);
return
ret
;
return
ret
;
}
}
...
@@ -1800,7 +1457,7 @@ void *
...
@@ -1800,7 +1457,7 @@ void *
arena_ralloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
void
*
ptr
,
size_t
oldsize
,
arena_ralloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
alignment
,
bool
zero
,
tcache_t
*
tcache
,
size_t
size
,
size_t
alignment
,
bool
zero
,
tcache_t
*
tcache
,
hook_ralloc_args_t
*
hook_args
)
{
hook_ralloc_args_t
*
hook_args
)
{
size_t
usize
=
sz_s2u
(
size
);
size_t
usize
=
alignment
==
0
?
sz_s2u
(
size
)
:
sz_sa2u
(
size
,
alignment
);
if
(
unlikely
(
usize
==
0
||
size
>
SC_LARGE_MAXCLASS
))
{
if
(
unlikely
(
usize
==
0
||
size
>
SC_LARGE_MAXCLASS
))
{
return
NULL
;
return
NULL
;
}
}
...
@@ -1850,6 +1507,29 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
...
@@ -1850,6 +1507,29 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
return
ret
;
return
ret
;
}
}
ehooks_t
*
arena_get_ehooks
(
arena_t
*
arena
)
{
return
base_ehooks_get
(
arena
->
base
);
}
extent_hooks_t
*
arena_set_extent_hooks
(
tsd_t
*
tsd
,
arena_t
*
arena
,
extent_hooks_t
*
extent_hooks
)
{
background_thread_info_t
*
info
;
if
(
have_background_thread
)
{
info
=
arena_background_thread_info_get
(
arena
);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
info
->
mtx
);
}
/* No using the HPA now that we have the custom hooks. */
pa_shard_disable_hpa
(
tsd_tsdn
(
tsd
),
&
arena
->
pa_shard
);
extent_hooks_t
*
ret
=
base_extent_hooks_set
(
arena
->
base
,
extent_hooks
);
if
(
have_background_thread
)
{
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
info
->
mtx
);
}
return
ret
;
}
dss_prec_t
dss_prec_t
arena_dss_prec_get
(
arena_t
*
arena
)
{
arena_dss_prec_get
(
arena_t
*
arena
)
{
return
(
dss_prec_t
)
atomic_load_u
(
&
arena
->
dss_prec
,
ATOMIC_ACQUIRE
);
return
(
dss_prec_t
)
atomic_load_u
(
&
arena
->
dss_prec
,
ATOMIC_ACQUIRE
);
...
@@ -1871,7 +1551,7 @@ arena_dirty_decay_ms_default_get(void) {
...
@@ -1871,7 +1551,7 @@ arena_dirty_decay_ms_default_get(void) {
bool
bool
arena_dirty_decay_ms_default_set
(
ssize_t
decay_ms
)
{
arena_dirty_decay_ms_default_set
(
ssize_t
decay_ms
)
{
if
(
!
arena_
decay_ms_valid
(
decay_ms
))
{
if
(
!
decay_ms_valid
(
decay_ms
))
{
return
true
;
return
true
;
}
}
atomic_store_zd
(
&
dirty_decay_ms_default
,
decay_ms
,
ATOMIC_RELAXED
);
atomic_store_zd
(
&
dirty_decay_ms_default
,
decay_ms
,
ATOMIC_RELAXED
);
...
@@ -1885,7 +1565,7 @@ arena_muzzy_decay_ms_default_get(void) {
...
@@ -1885,7 +1565,7 @@ arena_muzzy_decay_ms_default_get(void) {
bool
bool
arena_muzzy_decay_ms_default_set
(
ssize_t
decay_ms
)
{
arena_muzzy_decay_ms_default_set
(
ssize_t
decay_ms
)
{
if
(
!
arena_
decay_ms_valid
(
decay_ms
))
{
if
(
!
decay_ms_valid
(
decay_ms
))
{
return
true
;
return
true
;
}
}
atomic_store_zd
(
&
muzzy_decay_ms_default
,
decay_ms
,
ATOMIC_RELAXED
);
atomic_store_zd
(
&
muzzy_decay_ms_default
,
decay_ms
,
ATOMIC_RELAXED
);
...
@@ -1896,26 +1576,8 @@ bool
...
@@ -1896,26 +1576,8 @@ bool
arena_retain_grow_limit_get_set
(
tsd_t
*
tsd
,
arena_t
*
arena
,
size_t
*
old_limit
,
arena_retain_grow_limit_get_set
(
tsd_t
*
tsd
,
arena_t
*
arena
,
size_t
*
old_limit
,
size_t
*
new_limit
)
{
size_t
*
new_limit
)
{
assert
(
opt_retain
);
assert
(
opt_retain
);
return
pac_retain_grow_limit_get_set
(
tsd_tsdn
(
tsd
),
pszind_t
new_ind
JEMALLOC_CC_SILENCE_INIT
(
0
);
&
arena
->
pa_shard
.
pac
,
old_limit
,
new_limit
);
if
(
new_limit
!=
NULL
)
{
size_t
limit
=
*
new_limit
;
/* Grow no more than the new limit. */
if
((
new_ind
=
sz_psz2ind
(
limit
+
1
)
-
1
)
>=
SC_NPSIZES
)
{
return
true
;
}
}
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
arena
->
extent_grow_mtx
);
if
(
old_limit
!=
NULL
)
{
*
old_limit
=
sz_pind2sz
(
arena
->
retain_grow_limit
);
}
if
(
new_limit
!=
NULL
)
{
arena
->
retain_grow_limit
=
new_ind
;
}
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
arena
->
extent_grow_mtx
);
return
false
;
}
}
unsigned
unsigned
...
@@ -1933,13 +1595,8 @@ arena_nthreads_dec(arena_t *arena, bool internal) {
...
@@ -1933,13 +1595,8 @@ arena_nthreads_dec(arena_t *arena, bool internal) {
atomic_fetch_sub_u
(
&
arena
->
nthreads
[
internal
],
1
,
ATOMIC_RELAXED
);
atomic_fetch_sub_u
(
&
arena
->
nthreads
[
internal
],
1
,
ATOMIC_RELAXED
);
}
}
size_t
arena_extent_sn_next
(
arena_t
*
arena
)
{
return
atomic_fetch_add_zu
(
&
arena
->
extent_sn_next
,
1
,
ATOMIC_RELAXED
);
}
arena_t
*
arena_t
*
arena_new
(
tsdn_t
*
tsdn
,
unsigned
ind
,
extent_hooks_t
*
extent_hooks
)
{
arena_new
(
tsdn_t
*
tsdn
,
unsigned
ind
,
const
arena_config_t
*
config
)
{
arena_t
*
arena
;
arena_t
*
arena
;
base_t
*
base
;
base_t
*
base
;
unsigned
i
;
unsigned
i
;
...
@@ -1947,16 +1604,13 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
...
@@ -1947,16 +1604,13 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
if
(
ind
==
0
)
{
if
(
ind
==
0
)
{
base
=
b0get
();
base
=
b0get
();
}
else
{
}
else
{
base
=
base_new
(
tsdn
,
ind
,
extent_hooks
);
base
=
base_new
(
tsdn
,
ind
,
config
->
extent_hooks
,
config
->
metadata_use_hooks
);
if
(
base
==
NULL
)
{
if
(
base
==
NULL
)
{
return
NULL
;
return
NULL
;
}
}
}
}
unsigned
nbins_total
=
0
;
for
(
i
=
0
;
i
<
SC_NBINS
;
i
++
)
{
nbins_total
+=
bin_infos
[
i
].
n_shards
;
}
size_t
arena_size
=
sizeof
(
arena_t
)
+
sizeof
(
bin_t
)
*
nbins_total
;
size_t
arena_size
=
sizeof
(
arena_t
)
+
sizeof
(
bin_t
)
*
nbins_total
;
arena
=
(
arena_t
*
)
base_alloc
(
tsdn
,
base
,
arena_size
,
CACHELINE
);
arena
=
(
arena_t
*
)
base_alloc
(
tsdn
,
base
,
arena_size
,
CACHELINE
);
if
(
arena
==
NULL
)
{
if
(
arena
==
NULL
)
{
...
@@ -1980,110 +1634,56 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
...
@@ -1980,110 +1634,56 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
}
}
}
}
if
(
config_prof
)
{
if
(
prof_accum_init
(
tsdn
,
&
arena
->
prof_accum
))
{
goto
label_error
;
}
}
if
(
config_cache_oblivious
)
{
/*
* A nondeterministic seed based on the address of arena reduces
* the likelihood of lockstep non-uniform cache index
* utilization among identical concurrent processes, but at the
* cost of test repeatability. For debug builds, instead use a
* deterministic seed.
*/
atomic_store_zu
(
&
arena
->
offset_state
,
config_debug
?
ind
:
(
size_t
)(
uintptr_t
)
arena
,
ATOMIC_RELAXED
);
}
atomic_store_zu
(
&
arena
->
extent_sn_next
,
0
,
ATOMIC_RELAXED
);
atomic_store_u
(
&
arena
->
dss_prec
,
(
unsigned
)
extent_dss_prec_get
(),
atomic_store_u
(
&
arena
->
dss_prec
,
(
unsigned
)
extent_dss_prec_get
(),
ATOMIC_RELAXED
);
ATOMIC_RELAXED
);
atomic_store_zu
(
&
arena
->
nactive
,
0
,
ATOMIC_RELAXED
);
edata_list_active_init
(
&
arena
->
large
);
extent_list_init
(
&
arena
->
large
);
if
(
malloc_mutex_init
(
&
arena
->
large_mtx
,
"arena_large"
,
if
(
malloc_mutex_init
(
&
arena
->
large_mtx
,
"arena_large"
,
WITNESS_RANK_ARENA_LARGE
,
malloc_mutex_rank_exclusive
))
{
WITNESS_RANK_ARENA_LARGE
,
malloc_mutex_rank_exclusive
))
{
goto
label_error
;
goto
label_error
;
}
}
/*
nstime_t
cur_time
;
* Delay coalescing for dirty extents despite the disruptive effect on
nstime_init_update
(
&
cur_time
);
* memory layout for best-fit extent allocation, since cached extents
if
(
pa_shard_init
(
tsdn
,
&
arena
->
pa_shard
,
&
arena_pa_central_global
,
* are likely to be reused soon after deallocation, and the cost of
&
arena_emap_global
,
base
,
ind
,
&
arena
->
stats
.
pa_shard_stats
,
* merging/splitting extents is non-trivial.
LOCKEDINT_MTX
(
arena
->
stats
.
mtx
),
&
cur_time
,
oversize_threshold
,
*/
arena_dirty_decay_ms_default_get
(),
if
(
extents_init
(
tsdn
,
&
arena
->
extents_dirty
,
extent_state_dirty
,
arena_muzzy_decay_ms_default_get
()))
{
true
))
{
goto
label_error
;
}
/*
* Coalesce muzzy extents immediately, because operations on them are in
* the critical path much less often than for dirty extents.
*/
if
(
extents_init
(
tsdn
,
&
arena
->
extents_muzzy
,
extent_state_muzzy
,
false
))
{
goto
label_error
;
}
/*
* Coalesce retained extents immediately, in part because they will
* never be evicted (and therefore there's no opportunity for delayed
* coalescing), but also because operations on retained extents are not
* in the critical path.
*/
if
(
extents_init
(
tsdn
,
&
arena
->
extents_retained
,
extent_state_retained
,
false
))
{
goto
label_error
;
}
if
(
arena_decay_init
(
&
arena
->
decay_dirty
,
arena_dirty_decay_ms_default_get
(),
&
arena
->
stats
.
decay_dirty
))
{
goto
label_error
;
}
if
(
arena_decay_init
(
&
arena
->
decay_muzzy
,
arena_muzzy_decay_ms_default_get
(),
&
arena
->
stats
.
decay_muzzy
))
{
goto
label_error
;
}
arena
->
extent_grow_next
=
sz_psz2ind
(
HUGEPAGE
);
arena
->
retain_grow_limit
=
sz_psz2ind
(
SC_LARGE_MAXCLASS
);
if
(
malloc_mutex_init
(
&
arena
->
extent_grow_mtx
,
"extent_grow"
,
WITNESS_RANK_EXTENT_GROW
,
malloc_mutex_rank_exclusive
))
{
goto
label_error
;
}
extent_avail_new
(
&
arena
->
extent_avail
);
if
(
malloc_mutex_init
(
&
arena
->
extent_avail_mtx
,
"extent_avail"
,
WITNESS_RANK_EXTENT_AVAIL
,
malloc_mutex_rank_exclusive
))
{
goto
label_error
;
goto
label_error
;
}
}
/* Initialize bins. */
/* Initialize bins. */
uintptr_t
bin_addr
=
(
uintptr_t
)
arena
+
sizeof
(
arena_t
);
atomic_store_u
(
&
arena
->
binshard_next
,
0
,
ATOMIC_RELEASE
);
atomic_store_u
(
&
arena
->
binshard_next
,
0
,
ATOMIC_RELEASE
);
for
(
i
=
0
;
i
<
SC_NBINS
;
i
++
)
{
for
(
i
=
0
;
i
<
nbins_total
;
i
++
)
{
unsigned
nshards
=
bin_infos
[
i
].
n_shards
;
bool
err
=
bin_init
(
&
arena
->
bins
[
i
]);
arena
->
bins
[
i
].
bin_shards
=
(
bin_t
*
)
bin_addr
;
bin_addr
+=
nshards
*
sizeof
(
bin_t
);
for
(
unsigned
j
=
0
;
j
<
nshards
;
j
++
)
{
bool
err
=
bin_init
(
&
arena
->
bins
[
i
].
bin_shards
[
j
]);
if
(
err
)
{
if
(
err
)
{
goto
label_error
;
goto
label_error
;
}
}
}
}
}
assert
(
bin_addr
==
(
uintptr_t
)
arena
+
arena_size
);
arena
->
base
=
base
;
arena
->
base
=
base
;
/* Set arena before creating background threads. */
/* Set arena before creating background threads. */
arena_set
(
ind
,
arena
);
arena_set
(
ind
,
arena
);
arena
->
ind
=
ind
;
nstime_init
(
&
arena
->
create_time
,
0
);
nstime_init_update
(
&
arena
->
create_time
);
nstime_update
(
&
arena
->
create_time
);
/*
* We turn on the HPA if set to. There are two exceptions:
* - Custom extent hooks (we should only return memory allocated from
* them in that case).
* - Arena 0 initialization. In this case, we're mid-bootstrapping, and
* so arena_hpa_global is not yet initialized.
*/
if
(
opt_hpa
&&
ehooks_are_default
(
base_ehooks_get
(
base
))
&&
ind
!=
0
)
{
hpa_shard_opts_t
hpa_shard_opts
=
opt_hpa_opts
;
hpa_shard_opts
.
deferral_allowed
=
background_thread_enabled
();
if
(
pa_shard_enable_hpa
(
tsdn
,
&
arena
->
pa_shard
,
&
hpa_shard_opts
,
&
opt_hpa_sec_opts
))
{
goto
label_error
;
}
}
/* We don't support reentrancy for arena 0 bootstrapping. */
/* We don't support reentrancy for arena 0 bootstrapping. */
if
(
ind
!=
0
)
{
if
(
ind
!=
0
)
{
...
@@ -2129,10 +1729,12 @@ arena_choose_huge(tsd_t *tsd) {
...
@@ -2129,10 +1729,12 @@ arena_choose_huge(tsd_t *tsd) {
* expected for huge allocations.
* expected for huge allocations.
*/
*/
if
(
arena_dirty_decay_ms_default_get
()
>
0
)
{
if
(
arena_dirty_decay_ms_default_get
()
>
0
)
{
arena_dirty_decay_ms_set
(
tsd_tsdn
(
tsd
),
huge_arena
,
0
);
arena_decay_ms_set
(
tsd_tsdn
(
tsd
),
huge_arena
,
extent_state_dirty
,
0
);
}
}
if
(
arena_muzzy_decay_ms_default_get
()
>
0
)
{
if
(
arena_muzzy_decay_ms_default_get
()
>
0
)
{
arena_muzzy_decay_ms_set
(
tsd_tsdn
(
tsd
),
huge_arena
,
0
);
arena_decay_ms_set
(
tsd_tsdn
(
tsd
),
huge_arena
,
extent_state_muzzy
,
0
);
}
}
}
}
...
@@ -2167,8 +1769,8 @@ arena_is_huge(unsigned arena_ind) {
...
@@ -2167,8 +1769,8 @@ arena_is_huge(unsigned arena_ind) {
return
(
arena_ind
==
huge_arena_ind
);
return
(
arena_ind
==
huge_arena_ind
);
}
}
void
bool
arena_boot
(
sc_data_t
*
sc_data
)
{
arena_boot
(
sc_data_t
*
sc_data
,
base_t
*
base
,
bool
hpa
)
{
arena_dirty_decay_ms_default_set
(
opt_dirty_decay_ms
);
arena_dirty_decay_ms_default_set
(
opt_dirty_decay_ms
);
arena_muzzy_decay_ms_default_set
(
opt_muzzy_decay_ms
);
arena_muzzy_decay_ms_default_set
(
opt_muzzy_decay_ms
);
for
(
unsigned
i
=
0
;
i
<
SC_NBINS
;
i
++
)
{
for
(
unsigned
i
=
0
;
i
<
SC_NBINS
;
i
++
)
{
...
@@ -2176,12 +1778,20 @@ arena_boot(sc_data_t *sc_data) {
...
@@ -2176,12 +1778,20 @@ arena_boot(sc_data_t *sc_data) {
div_init
(
&
arena_binind_div_info
[
i
],
div_init
(
&
arena_binind_div_info
[
i
],
(
1U
<<
sc
->
lg_base
)
+
(
sc
->
ndelta
<<
sc
->
lg_delta
));
(
1U
<<
sc
->
lg_base
)
+
(
sc
->
ndelta
<<
sc
->
lg_delta
));
}
}
uint32_t
cur_offset
=
(
uint32_t
)
offsetof
(
arena_t
,
bins
);
for
(
szind_t
i
=
0
;
i
<
SC_NBINS
;
i
++
)
{
arena_bin_offsets
[
i
]
=
cur_offset
;
nbins_total
+=
bin_infos
[
i
].
n_shards
;
cur_offset
+=
(
uint32_t
)(
bin_infos
[
i
].
n_shards
*
sizeof
(
bin_t
));
}
return
pa_central_init
(
&
arena_pa_central_global
,
base
,
hpa
,
&
hpa_hooks_default
);
}
}
void
void
arena_prefork0
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
arena_prefork0
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
malloc_mutex_prefork
(
tsdn
,
&
arena
->
decay_dirty
.
mtx
);
pa_shard_prefork0
(
tsdn
,
&
arena
->
pa_shard
);
malloc_mutex_prefork
(
tsdn
,
&
arena
->
decay_muzzy
.
mtx
);
}
}
void
void
...
@@ -2193,59 +1803,50 @@ arena_prefork1(tsdn_t *tsdn, arena_t *arena) {
...
@@ -2193,59 +1803,50 @@ arena_prefork1(tsdn_t *tsdn, arena_t *arena) {
void
void
arena_prefork2
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
arena_prefork2
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
malloc_mutex
_prefork
(
tsdn
,
&
arena
->
extent_grow_mtx
);
pa_shard
_prefork
2
(
tsdn
,
&
arena
->
pa_shard
);
}
}
void
void
arena_prefork3
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
arena_prefork3
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
extents_prefork
(
tsdn
,
&
arena
->
extents_dirty
);
pa_shard_prefork3
(
tsdn
,
&
arena
->
pa_shard
);
extents_prefork
(
tsdn
,
&
arena
->
extents_muzzy
);
extents_prefork
(
tsdn
,
&
arena
->
extents_retained
);
}
}
void
void
arena_prefork4
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
arena_prefork4
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
malloc_mutex
_prefork
(
tsdn
,
&
arena
->
extent_avail_mtx
);
pa_shard
_prefork
4
(
tsdn
,
&
arena
->
pa_shard
);
}
}
void
void
arena_prefork5
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
arena_prefork5
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
base
_prefork
(
tsdn
,
arena
->
base
);
pa_shard
_prefork
5
(
tsdn
,
&
arena
->
pa_shard
);
}
}
void
void
arena_prefork6
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
arena_prefork6
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
malloc_mutex
_prefork
(
tsdn
,
&
arena
->
large_mtx
);
base
_prefork
(
tsdn
,
arena
->
base
);
}
}
void
void
arena_prefork7
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
arena_prefork7
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
for
(
unsigned
i
=
0
;
i
<
SC_NBINS
;
i
++
)
{
malloc_mutex_prefork
(
tsdn
,
&
arena
->
large_mtx
);
for
(
unsigned
j
=
0
;
j
<
bin_infos
[
i
].
n_shards
;
j
++
)
{
}
bin_prefork
(
tsdn
,
&
arena
->
bins
[
i
].
bin_shards
[
j
]);
}
void
arena_prefork8
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
for
(
unsigned
i
=
0
;
i
<
nbins_total
;
i
++
)
{
bin_prefork
(
tsdn
,
&
arena
->
bins
[
i
]);
}
}
}
}
void
void
arena_postfork_parent
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
arena_postfork_parent
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
unsigned
i
;
for
(
unsigned
i
=
0
;
i
<
nbins_total
;
i
++
)
{
bin_postfork_parent
(
tsdn
,
&
arena
->
bins
[
i
]);
for
(
i
=
0
;
i
<
SC_NBINS
;
i
++
)
{
for
(
unsigned
j
=
0
;
j
<
bin_infos
[
i
].
n_shards
;
j
++
)
{
bin_postfork_parent
(
tsdn
,
&
arena
->
bins
[
i
].
bin_shards
[
j
]);
}
}
}
malloc_mutex_postfork_parent
(
tsdn
,
&
arena
->
large_mtx
);
malloc_mutex_postfork_parent
(
tsdn
,
&
arena
->
large_mtx
);
base_postfork_parent
(
tsdn
,
arena
->
base
);
base_postfork_parent
(
tsdn
,
arena
->
base
);
malloc_mutex_postfork_parent
(
tsdn
,
&
arena
->
extent_avail_mtx
);
pa_shard_postfork_parent
(
tsdn
,
&
arena
->
pa_shard
);
extents_postfork_parent
(
tsdn
,
&
arena
->
extents_dirty
);
extents_postfork_parent
(
tsdn
,
&
arena
->
extents_muzzy
);
extents_postfork_parent
(
tsdn
,
&
arena
->
extents_retained
);
malloc_mutex_postfork_parent
(
tsdn
,
&
arena
->
extent_grow_mtx
);
malloc_mutex_postfork_parent
(
tsdn
,
&
arena
->
decay_dirty
.
mtx
);
malloc_mutex_postfork_parent
(
tsdn
,
&
arena
->
decay_muzzy
.
mtx
);
if
(
config_stats
)
{
if
(
config_stats
)
{
malloc_mutex_postfork_parent
(
tsdn
,
&
arena
->
tcache_ql_mtx
);
malloc_mutex_postfork_parent
(
tsdn
,
&
arena
->
tcache_ql_mtx
);
}
}
...
@@ -2253,8 +1854,6 @@ arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
...
@@ -2253,8 +1854,6 @@ arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
void
void
arena_postfork_child
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
arena_postfork_child
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
unsigned
i
;
atomic_store_u
(
&
arena
->
nthreads
[
0
],
0
,
ATOMIC_RELAXED
);
atomic_store_u
(
&
arena
->
nthreads
[
0
],
0
,
ATOMIC_RELAXED
);
atomic_store_u
(
&
arena
->
nthreads
[
1
],
0
,
ATOMIC_RELAXED
);
atomic_store_u
(
&
arena
->
nthreads
[
1
],
0
,
ATOMIC_RELAXED
);
if
(
tsd_arena_get
(
tsdn_tsd
(
tsdn
))
==
arena
)
{
if
(
tsd_arena_get
(
tsdn_tsd
(
tsdn
))
==
arena
)
{
...
@@ -2266,32 +1865,26 @@ arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
...
@@ -2266,32 +1865,26 @@ arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
if
(
config_stats
)
{
if
(
config_stats
)
{
ql_new
(
&
arena
->
tcache_ql
);
ql_new
(
&
arena
->
tcache_ql
);
ql_new
(
&
arena
->
cache_bin_array_descriptor_ql
);
ql_new
(
&
arena
->
cache_bin_array_descriptor_ql
);
tcache_t
*
tcache
=
tcache_get
(
tsdn_tsd
(
tsdn
));
tcache_slow_t
*
tcache_slow
=
tcache_slow_get
(
tsdn_tsd
(
tsdn
));
if
(
tcache
!=
NULL
&&
tcache
->
arena
==
arena
)
{
if
(
tcache_slow
!=
NULL
&&
tcache_slow
->
arena
==
arena
)
{
ql_elm_new
(
tcache
,
link
);
tcache_t
*
tcache
=
tcache_slow
->
tcache
;
ql_tail_insert
(
&
arena
->
tcache_ql
,
tcache
,
link
);
ql_elm_new
(
tcache_slow
,
link
);
ql_tail_insert
(
&
arena
->
tcache_ql
,
tcache_slow
,
link
);
cache_bin_array_descriptor_init
(
cache_bin_array_descriptor_init
(
&
tcache
->
cache_bin_array_descriptor
,
&
tcache
_slow
->
cache_bin_array_descriptor
,
tcache
->
bins
_small
,
tcache
->
bins_large
);
tcache
->
bins
);
ql_tail_insert
(
&
arena
->
cache_bin_array_descriptor_ql
,
ql_tail_insert
(
&
arena
->
cache_bin_array_descriptor_ql
,
&
tcache
->
cache_bin_array_descriptor
,
link
);
&
tcache
_slow
->
cache_bin_array_descriptor
,
link
);
}
}
}
}
for
(
i
=
0
;
i
<
SC_NBINS
;
i
++
)
{
for
(
unsigned
i
=
0
;
i
<
nbins_total
;
i
++
)
{
for
(
unsigned
j
=
0
;
j
<
bin_infos
[
i
].
n_shards
;
j
++
)
{
bin_postfork_child
(
tsdn
,
&
arena
->
bins
[
i
]);
bin_postfork_child
(
tsdn
,
&
arena
->
bins
[
i
].
bin_shards
[
j
]);
}
}
}
malloc_mutex_postfork_child
(
tsdn
,
&
arena
->
large_mtx
);
malloc_mutex_postfork_child
(
tsdn
,
&
arena
->
large_mtx
);
base_postfork_child
(
tsdn
,
arena
->
base
);
base_postfork_child
(
tsdn
,
arena
->
base
);
malloc_mutex_postfork_child
(
tsdn
,
&
arena
->
extent_avail_mtx
);
pa_shard_postfork_child
(
tsdn
,
&
arena
->
pa_shard
);
extents_postfork_child
(
tsdn
,
&
arena
->
extents_dirty
);
extents_postfork_child
(
tsdn
,
&
arena
->
extents_muzzy
);
extents_postfork_child
(
tsdn
,
&
arena
->
extents_retained
);
malloc_mutex_postfork_child
(
tsdn
,
&
arena
->
extent_grow_mtx
);
malloc_mutex_postfork_child
(
tsdn
,
&
arena
->
decay_dirty
.
mtx
);
malloc_mutex_postfork_child
(
tsdn
,
&
arena
->
decay_muzzy
.
mtx
);
if
(
config_stats
)
{
if
(
config_stats
)
{
malloc_mutex_postfork_child
(
tsdn
,
&
arena
->
tcache_ql_mtx
);
malloc_mutex_postfork_child
(
tsdn
,
&
arena
->
tcache_ql_mtx
);
}
}
...
...
deps/jemalloc/src/background_thread.c
View file @
b8beda3c
#define JEMALLOC_BACKGROUND_THREAD_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
...
@@ -54,8 +53,9 @@ pthread_create_wrapper(pthread_t *__restrict thread, const pthread_attr_t *attr,
...
@@ -54,8 +53,9 @@ pthread_create_wrapper(pthread_t *__restrict thread, const pthread_attr_t *attr,
bool
background_thread_create
(
tsd_t
*
tsd
,
unsigned
arena_ind
)
NOT_REACHED
bool
background_thread_create
(
tsd_t
*
tsd
,
unsigned
arena_ind
)
NOT_REACHED
bool
background_threads_enable
(
tsd_t
*
tsd
)
NOT_REACHED
bool
background_threads_enable
(
tsd_t
*
tsd
)
NOT_REACHED
bool
background_threads_disable
(
tsd_t
*
tsd
)
NOT_REACHED
bool
background_threads_disable
(
tsd_t
*
tsd
)
NOT_REACHED
void
background_thread_interval_check
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bool
background_thread_is_started
(
background_thread_info_t
*
info
)
NOT_REACHED
arena_decay_t
*
decay
,
size_t
npages_new
)
NOT_REACHED
void
background_thread_wakeup_early
(
background_thread_info_t
*
info
,
nstime_t
*
remaining_sleep
)
NOT_REACHED
void
background_thread_prefork0
(
tsdn_t
*
tsdn
)
NOT_REACHED
void
background_thread_prefork0
(
tsdn_t
*
tsdn
)
NOT_REACHED
void
background_thread_prefork1
(
tsdn_t
*
tsdn
)
NOT_REACHED
void
background_thread_prefork1
(
tsdn_t
*
tsdn
)
NOT_REACHED
void
background_thread_postfork_parent
(
tsdn_t
*
tsdn
)
NOT_REACHED
void
background_thread_postfork_parent
(
tsdn_t
*
tsdn
)
NOT_REACHED
...
@@ -74,7 +74,7 @@ background_thread_info_init(tsdn_t *tsdn, background_thread_info_t *info) {
...
@@ -74,7 +74,7 @@ background_thread_info_init(tsdn_t *tsdn, background_thread_info_t *info) {
info
->
npages_to_purge_new
=
0
;
info
->
npages_to_purge_new
=
0
;
if
(
config_stats
)
{
if
(
config_stats
)
{
info
->
tot_n_runs
=
0
;
info
->
tot_n_runs
=
0
;
nstime_init
(
&
info
->
tot_sleep_time
,
0
);
nstime_init
_zero
(
&
info
->
tot_sleep_time
);
}
}
}
}
...
@@ -82,136 +82,40 @@ static inline bool
...
@@ -82,136 +82,40 @@ static inline bool
set_current_thread_affinity
(
int
cpu
)
{
set_current_thread_affinity
(
int
cpu
)
{
#if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
#if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
cpu_set_t
cpuset
;
cpu_set_t
cpuset
;
#else
# ifndef __NetBSD__
cpuset_t
cpuset
;
# else
cpuset_t
*
cpuset
;
# endif
#endif
#ifndef __NetBSD__
CPU_ZERO
(
&
cpuset
);
CPU_ZERO
(
&
cpuset
);
CPU_SET
(
cpu
,
&
cpuset
);
CPU_SET
(
cpu
,
&
cpuset
);
int
ret
=
sched_setaffinity
(
0
,
sizeof
(
cpu_set_t
),
&
cpuset
);
#else
cpuset
=
cpuset_create
();
#endif
return
(
ret
!=
0
);
#if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
return
(
sched_setaffinity
(
0
,
sizeof
(
cpu_set_t
),
&
cpuset
)
!=
0
);
#else
#else
return
false
;
# ifndef __NetBSD__
int
ret
=
pthread_setaffinity_np
(
pthread_self
(),
sizeof
(
cpuset_t
),
&
cpuset
);
# else
int
ret
=
pthread_setaffinity_np
(
pthread_self
(),
cpuset_size
(
cpuset
),
cpuset
);
cpuset_destroy
(
cpuset
);
# endif
return
ret
!=
0
;
#endif
#endif
}
}
/* Threshold for determining when to wake up the background thread. */
#define BACKGROUND_THREAD_NPAGES_THRESHOLD UINT64_C(1024)
#define BILLION UINT64_C(1000000000)
#define BILLION UINT64_C(1000000000)
/* Minimal sleep interval 100 ms. */
/* Minimal sleep interval 100 ms. */
#define BACKGROUND_THREAD_MIN_INTERVAL_NS (BILLION / 10)
#define BACKGROUND_THREAD_MIN_INTERVAL_NS (BILLION / 10)
static
inline
size_t
decay_npurge_after_interval
(
arena_decay_t
*
decay
,
size_t
interval
)
{
size_t
i
;
uint64_t
sum
=
0
;
for
(
i
=
0
;
i
<
interval
;
i
++
)
{
sum
+=
decay
->
backlog
[
i
]
*
h_steps
[
i
];
}
for
(;
i
<
SMOOTHSTEP_NSTEPS
;
i
++
)
{
sum
+=
decay
->
backlog
[
i
]
*
(
h_steps
[
i
]
-
h_steps
[
i
-
interval
]);
}
return
(
size_t
)(
sum
>>
SMOOTHSTEP_BFP
);
}
static
uint64_t
arena_decay_compute_purge_interval_impl
(
tsdn_t
*
tsdn
,
arena_decay_t
*
decay
,
extents_t
*
extents
)
{
if
(
malloc_mutex_trylock
(
tsdn
,
&
decay
->
mtx
))
{
/* Use minimal interval if decay is contended. */
return
BACKGROUND_THREAD_MIN_INTERVAL_NS
;
}
uint64_t
interval
;
ssize_t
decay_time
=
atomic_load_zd
(
&
decay
->
time_ms
,
ATOMIC_RELAXED
);
if
(
decay_time
<=
0
)
{
/* Purging is eagerly done or disabled currently. */
interval
=
BACKGROUND_THREAD_INDEFINITE_SLEEP
;
goto
label_done
;
}
uint64_t
decay_interval_ns
=
nstime_ns
(
&
decay
->
interval
);
assert
(
decay_interval_ns
>
0
);
size_t
npages
=
extents_npages_get
(
extents
);
if
(
npages
==
0
)
{
unsigned
i
;
for
(
i
=
0
;
i
<
SMOOTHSTEP_NSTEPS
;
i
++
)
{
if
(
decay
->
backlog
[
i
]
>
0
)
{
break
;
}
}
if
(
i
==
SMOOTHSTEP_NSTEPS
)
{
/* No dirty pages recorded. Sleep indefinitely. */
interval
=
BACKGROUND_THREAD_INDEFINITE_SLEEP
;
goto
label_done
;
}
}
if
(
npages
<=
BACKGROUND_THREAD_NPAGES_THRESHOLD
)
{
/* Use max interval. */
interval
=
decay_interval_ns
*
SMOOTHSTEP_NSTEPS
;
goto
label_done
;
}
size_t
lb
=
BACKGROUND_THREAD_MIN_INTERVAL_NS
/
decay_interval_ns
;
size_t
ub
=
SMOOTHSTEP_NSTEPS
;
/* Minimal 2 intervals to ensure reaching next epoch deadline. */
lb
=
(
lb
<
2
)
?
2
:
lb
;
if
((
decay_interval_ns
*
ub
<=
BACKGROUND_THREAD_MIN_INTERVAL_NS
)
||
(
lb
+
2
>
ub
))
{
interval
=
BACKGROUND_THREAD_MIN_INTERVAL_NS
;
goto
label_done
;
}
assert
(
lb
+
2
<=
ub
);
size_t
npurge_lb
,
npurge_ub
;
npurge_lb
=
decay_npurge_after_interval
(
decay
,
lb
);
if
(
npurge_lb
>
BACKGROUND_THREAD_NPAGES_THRESHOLD
)
{
interval
=
decay_interval_ns
*
lb
;
goto
label_done
;
}
npurge_ub
=
decay_npurge_after_interval
(
decay
,
ub
);
if
(
npurge_ub
<
BACKGROUND_THREAD_NPAGES_THRESHOLD
)
{
interval
=
decay_interval_ns
*
ub
;
goto
label_done
;
}
unsigned
n_search
=
0
;
size_t
target
,
npurge
;
while
((
npurge_lb
+
BACKGROUND_THREAD_NPAGES_THRESHOLD
<
npurge_ub
)
&&
(
lb
+
2
<
ub
))
{
target
=
(
lb
+
ub
)
/
2
;
npurge
=
decay_npurge_after_interval
(
decay
,
target
);
if
(
npurge
>
BACKGROUND_THREAD_NPAGES_THRESHOLD
)
{
ub
=
target
;
npurge_ub
=
npurge
;
}
else
{
lb
=
target
;
npurge_lb
=
npurge
;
}
assert
(
n_search
++
<
lg_floor
(
SMOOTHSTEP_NSTEPS
)
+
1
);
}
interval
=
decay_interval_ns
*
(
ub
+
lb
)
/
2
;
label_done:
interval
=
(
interval
<
BACKGROUND_THREAD_MIN_INTERVAL_NS
)
?
BACKGROUND_THREAD_MIN_INTERVAL_NS
:
interval
;
malloc_mutex_unlock
(
tsdn
,
&
decay
->
mtx
);
return
interval
;
}
/* Compute purge interval for background threads. */
static
uint64_t
arena_decay_compute_purge_interval
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
uint64_t
i1
,
i2
;
i1
=
arena_decay_compute_purge_interval_impl
(
tsdn
,
&
arena
->
decay_dirty
,
&
arena
->
extents_dirty
);
if
(
i1
==
BACKGROUND_THREAD_MIN_INTERVAL_NS
)
{
return
i1
;
}
i2
=
arena_decay_compute_purge_interval_impl
(
tsdn
,
&
arena
->
decay_muzzy
,
&
arena
->
extents_muzzy
);
return
i1
<
i2
?
i1
:
i2
;
}
static
void
static
void
background_thread_sleep
(
tsdn_t
*
tsdn
,
background_thread_info_t
*
info
,
background_thread_sleep
(
tsdn_t
*
tsdn
,
background_thread_info_t
*
info
,
uint64_t
interval
)
{
uint64_t
interval
)
{
...
@@ -228,7 +132,8 @@ background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info,
...
@@ -228,7 +132,8 @@ background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info,
int
ret
;
int
ret
;
if
(
interval
==
BACKGROUND_THREAD_INDEFINITE_SLEEP
)
{
if
(
interval
==
BACKGROUND_THREAD_INDEFINITE_SLEEP
)
{
assert
(
background_thread_indefinite_sleep
(
info
));
background_thread_wakeup_time_set
(
tsdn
,
info
,
BACKGROUND_THREAD_INDEFINITE_SLEEP
);
ret
=
pthread_cond_wait
(
&
info
->
cond
,
&
info
->
mtx
.
lock
);
ret
=
pthread_cond_wait
(
&
info
->
cond
,
&
info
->
mtx
.
lock
);
assert
(
ret
==
0
);
assert
(
ret
==
0
);
}
else
{
}
else
{
...
@@ -236,8 +141,7 @@ background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info,
...
@@ -236,8 +141,7 @@ background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info,
interval
<=
BACKGROUND_THREAD_INDEFINITE_SLEEP
);
interval
<=
BACKGROUND_THREAD_INDEFINITE_SLEEP
);
/* We need malloc clock (can be different from tv). */
/* We need malloc clock (can be different from tv). */
nstime_t
next_wakeup
;
nstime_t
next_wakeup
;
nstime_init
(
&
next_wakeup
,
0
);
nstime_init_update
(
&
next_wakeup
);
nstime_update
(
&
next_wakeup
);
nstime_iadd
(
&
next_wakeup
,
interval
);
nstime_iadd
(
&
next_wakeup
,
interval
);
assert
(
nstime_ns
(
&
next_wakeup
)
<
assert
(
nstime_ns
(
&
next_wakeup
)
<
BACKGROUND_THREAD_INDEFINITE_SLEEP
);
BACKGROUND_THREAD_INDEFINITE_SLEEP
);
...
@@ -254,8 +158,6 @@ background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info,
...
@@ -254,8 +158,6 @@ background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info,
assert
(
!
background_thread_indefinite_sleep
(
info
));
assert
(
!
background_thread_indefinite_sleep
(
info
));
ret
=
pthread_cond_timedwait
(
&
info
->
cond
,
&
info
->
mtx
.
lock
,
&
ts
);
ret
=
pthread_cond_timedwait
(
&
info
->
cond
,
&
info
->
mtx
.
lock
,
&
ts
);
assert
(
ret
==
ETIMEDOUT
||
ret
==
0
);
assert
(
ret
==
ETIMEDOUT
||
ret
==
0
);
background_thread_wakeup_time_set
(
tsdn
,
info
,
BACKGROUND_THREAD_INDEFINITE_SLEEP
);
}
}
if
(
config_stats
)
{
if
(
config_stats
)
{
gettimeofday
(
&
tv
,
NULL
);
gettimeofday
(
&
tv
,
NULL
);
...
@@ -283,28 +185,48 @@ background_thread_pause_check(tsdn_t *tsdn, background_thread_info_t *info) {
...
@@ -283,28 +185,48 @@ background_thread_pause_check(tsdn_t *tsdn, background_thread_info_t *info) {
}
}
static
inline
void
static
inline
void
background_work_sleep_once
(
tsdn_t
*
tsdn
,
background_thread_info_t
*
info
,
unsigned
ind
)
{
background_work_sleep_once
(
tsdn_t
*
tsdn
,
background_thread_info_t
*
info
,
uint64_t
min_interval
=
BACKGROUND_THREAD_INDEFINITE_SLEEP
;
unsigned
ind
)
{
uint64_t
ns_until_deferred
=
BACKGROUND_THREAD_DEFERRED_MAX
;
unsigned
narenas
=
narenas_total_get
();
unsigned
narenas
=
narenas_total_get
();
bool
slept_indefinitely
=
background_thread_indefinite_sleep
(
info
);
for
(
unsigned
i
=
ind
;
i
<
narenas
;
i
+=
max_background_threads
)
{
for
(
unsigned
i
=
ind
;
i
<
narenas
;
i
+=
max_background_threads
)
{
arena_t
*
arena
=
arena_get
(
tsdn
,
i
,
false
);
arena_t
*
arena
=
arena_get
(
tsdn
,
i
,
false
);
if
(
!
arena
)
{
if
(
!
arena
)
{
continue
;
continue
;
}
}
arena_decay
(
tsdn
,
arena
,
true
,
false
);
/*
if
(
min_interval
==
BACKGROUND_THREAD_MIN_INTERVAL_NS
)
{
* If thread was woken up from the indefinite sleep, don't
* do the work instantly, but rather check when the deferred
* work that caused this thread to wake up is scheduled for.
*/
if
(
!
slept_indefinitely
)
{
arena_do_deferred_work
(
tsdn
,
arena
);
}
if
(
ns_until_deferred
<=
BACKGROUND_THREAD_MIN_INTERVAL_NS
)
{
/* Min interval will be used. */
/* Min interval will be used. */
continue
;
continue
;
}
}
uint64_t
interval
=
arena_decay_compute_purge_interval
(
tsdn
,
uint64_t
ns_arena_deferred
=
pa_shard_time_until_deferred_work
(
arena
);
tsdn
,
&
arena
->
pa_shard
);
assert
(
interval
>=
BACKGROUND_THREAD_MIN_INTERVAL_NS
);
if
(
ns_arena_deferred
<
ns_until_deferred
)
{
if
(
min_interval
>
interval
)
{
ns_until_deferred
=
ns_arena_deferred
;
min_interval
=
interval
;
}
}
}
}
background_thread_sleep
(
tsdn
,
info
,
min_interval
);
uint64_t
sleep_ns
;
if
(
ns_until_deferred
==
BACKGROUND_THREAD_DEFERRED_MAX
)
{
sleep_ns
=
BACKGROUND_THREAD_INDEFINITE_SLEEP
;
}
else
{
sleep_ns
=
(
ns_until_deferred
<
BACKGROUND_THREAD_MIN_INTERVAL_NS
)
?
BACKGROUND_THREAD_MIN_INTERVAL_NS
:
ns_until_deferred
;
}
background_thread_sleep
(
tsdn
,
info
,
sleep_ns
);
}
}
static
bool
static
bool
...
@@ -508,7 +430,7 @@ background_thread_entry(void *ind_arg) {
...
@@ -508,7 +430,7 @@ background_thread_entry(void *ind_arg) {
assert
(
thread_ind
<
max_background_threads
);
assert
(
thread_ind
<
max_background_threads
);
#ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
#ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
pthread_setname_np
(
pthread_self
(),
"jemalloc_bg_thd"
);
pthread_setname_np
(
pthread_self
(),
"jemalloc_bg_thd"
);
#elif defined(__FreeBSD__)
#elif defined(__FreeBSD__)
|| defined(__DragonFly__)
pthread_set_name_np
(
pthread_self
(),
"jemalloc_bg_thd"
);
pthread_set_name_np
(
pthread_self
(),
"jemalloc_bg_thd"
);
#endif
#endif
if
(
opt_percpu_arena
!=
percpu_arena_disabled
)
{
if
(
opt_percpu_arena
!=
percpu_arena_disabled
)
{
...
@@ -608,16 +530,16 @@ background_threads_enable(tsd_t *tsd) {
...
@@ -608,16 +530,16 @@ background_threads_enable(tsd_t *tsd) {
malloc_mutex_assert_owner
(
tsd_tsdn
(
tsd
),
&
background_thread_lock
);
malloc_mutex_assert_owner
(
tsd_tsdn
(
tsd
),
&
background_thread_lock
);
VARIABLE_ARRAY
(
bool
,
marked
,
max_background_threads
);
VARIABLE_ARRAY
(
bool
,
marked
,
max_background_threads
);
unsigned
i
,
nmarked
;
unsigned
nmarked
;
for
(
i
=
0
;
i
<
max_background_threads
;
i
++
)
{
for
(
unsigned
i
=
0
;
i
<
max_background_threads
;
i
++
)
{
marked
[
i
]
=
false
;
marked
[
i
]
=
false
;
}
}
nmarked
=
0
;
nmarked
=
0
;
/* Thread 0 is required and created at the end. */
/* Thread 0 is required and created at the end. */
marked
[
0
]
=
true
;
marked
[
0
]
=
true
;
/* Mark the threads we need to create for thread 0. */
/* Mark the threads we need to create for thread 0. */
unsigned
n
=
narenas_total_get
();
unsigned
n
arenas
=
narenas_total_get
();
for
(
i
=
1
;
i
<
n
;
i
++
)
{
for
(
unsigned
i
=
1
;
i
<
n
arenas
;
i
++
)
{
if
(
marked
[
i
%
max_background_threads
]
||
if
(
marked
[
i
%
max_background_threads
]
||
arena_get
(
tsd_tsdn
(
tsd
),
i
,
false
)
==
NULL
)
{
arena_get
(
tsd_tsdn
(
tsd
),
i
,
false
)
==
NULL
)
{
continue
;
continue
;
...
@@ -634,7 +556,18 @@ background_threads_enable(tsd_t *tsd) {
...
@@ -634,7 +556,18 @@ background_threads_enable(tsd_t *tsd) {
}
}
}
}
return
background_thread_create_locked
(
tsd
,
0
);
bool
err
=
background_thread_create_locked
(
tsd
,
0
);
if
(
err
)
{
return
true
;
}
for
(
unsigned
i
=
0
;
i
<
narenas
;
i
++
)
{
arena_t
*
arena
=
arena_get
(
tsd_tsdn
(
tsd
),
i
,
false
);
if
(
arena
!=
NULL
)
{
pa_shard_set_deferral_allowed
(
tsd_tsdn
(
tsd
),
&
arena
->
pa_shard
,
true
);
}
}
return
false
;
}
}
bool
bool
...
@@ -648,92 +581,36 @@ background_threads_disable(tsd_t *tsd) {
...
@@ -648,92 +581,36 @@ background_threads_disable(tsd_t *tsd) {
return
true
;
return
true
;
}
}
assert
(
n_background_threads
==
0
);
assert
(
n_background_threads
==
0
);
unsigned
narenas
=
narenas_total_get
();
for
(
unsigned
i
=
0
;
i
<
narenas
;
i
++
)
{
arena_t
*
arena
=
arena_get
(
tsd_tsdn
(
tsd
),
i
,
false
);
if
(
arena
!=
NULL
)
{
pa_shard_set_deferral_allowed
(
tsd_tsdn
(
tsd
),
&
arena
->
pa_shard
,
false
);
}
}
return
false
;
return
false
;
}
}
/* Check if we need to signal the background thread early. */
bool
background_thread_is_started
(
background_thread_info_t
*
info
)
{
return
info
->
state
==
background_thread_started
;
}
void
void
background_thread_interval_check
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
background_thread_wakeup_early
(
background_thread_info_t
*
info
,
arena_decay_t
*
decay
,
size_t
npages_new
)
{
nstime_t
*
remaining_sleep
)
{
background_thread_info_t
*
info
=
arena_background_thread_info_get
(
arena
);
if
(
malloc_mutex_trylock
(
tsdn
,
&
info
->
mtx
))
{
/*
/*
* Background thread may hold the mutex for a long period of
* This is an optimization to increase batching. At this point
* time. We'd like to avoid the variance on application
* we know that background thread wakes up soon, so the time to cache
* threads. So keep this non-blocking, and leave the work to a
* the just freed memory is bounded and low.
* future epoch.
*/
*/
if
(
remaining_sleep
!=
NULL
&&
nstime_ns
(
remaining_sleep
)
<
BACKGROUND_THREAD_MIN_INTERVAL_NS
)
{
return
;
return
;
}
}
if
(
info
->
state
!=
background_thread_started
)
{
goto
label_done
;
}
if
(
malloc_mutex_trylock
(
tsdn
,
&
decay
->
mtx
))
{
goto
label_done
;
}
ssize_t
decay_time
=
atomic_load_zd
(
&
decay
->
time_ms
,
ATOMIC_RELAXED
);
if
(
decay_time
<=
0
)
{
/* Purging is eagerly done or disabled currently. */
goto
label_done_unlock2
;
}
uint64_t
decay_interval_ns
=
nstime_ns
(
&
decay
->
interval
);
assert
(
decay_interval_ns
>
0
);
nstime_t
diff
;
nstime_init
(
&
diff
,
background_thread_wakeup_time_get
(
info
));
if
(
nstime_compare
(
&
diff
,
&
decay
->
epoch
)
<=
0
)
{
goto
label_done_unlock2
;
}
nstime_subtract
(
&
diff
,
&
decay
->
epoch
);
if
(
nstime_ns
(
&
diff
)
<
BACKGROUND_THREAD_MIN_INTERVAL_NS
)
{
goto
label_done_unlock2
;
}
if
(
npages_new
>
0
)
{
size_t
n_epoch
=
(
size_t
)(
nstime_ns
(
&
diff
)
/
decay_interval_ns
);
/*
* Compute how many new pages we would need to purge by the next
* wakeup, which is used to determine if we should signal the
* background thread.
*/
uint64_t
npurge_new
;
if
(
n_epoch
>=
SMOOTHSTEP_NSTEPS
)
{
npurge_new
=
npages_new
;
}
else
{
uint64_t
h_steps_max
=
h_steps
[
SMOOTHSTEP_NSTEPS
-
1
];
assert
(
h_steps_max
>=
h_steps
[
SMOOTHSTEP_NSTEPS
-
1
-
n_epoch
]);
npurge_new
=
npages_new
*
(
h_steps_max
-
h_steps
[
SMOOTHSTEP_NSTEPS
-
1
-
n_epoch
]);
npurge_new
>>=
SMOOTHSTEP_BFP
;
}
info
->
npages_to_purge_new
+=
npurge_new
;
}
bool
should_signal
;
if
(
info
->
npages_to_purge_new
>
BACKGROUND_THREAD_NPAGES_THRESHOLD
)
{
should_signal
=
true
;
}
else
if
(
unlikely
(
background_thread_indefinite_sleep
(
info
))
&&
(
extents_npages_get
(
&
arena
->
extents_dirty
)
>
0
||
extents_npages_get
(
&
arena
->
extents_muzzy
)
>
0
||
info
->
npages_to_purge_new
>
0
))
{
should_signal
=
true
;
}
else
{
should_signal
=
false
;
}
if
(
should_signal
)
{
info
->
npages_to_purge_new
=
0
;
pthread_cond_signal
(
&
info
->
cond
);
pthread_cond_signal
(
&
info
->
cond
);
}
label_done_unlock2:
malloc_mutex_unlock
(
tsdn
,
&
decay
->
mtx
);
label_done:
malloc_mutex_unlock
(
tsdn
,
&
info
->
mtx
);
}
}
void
void
...
@@ -794,9 +671,11 @@ background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) {
...
@@ -794,9 +671,11 @@ background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) {
return
true
;
return
true
;
}
}
stats
->
num_threads
=
n_background_threads
;
nstime_init_zero
(
&
stats
->
run_interval
);
memset
(
&
stats
->
max_counter_per_bg_thd
,
0
,
sizeof
(
mutex_prof_data_t
));
uint64_t
num_runs
=
0
;
uint64_t
num_runs
=
0
;
n
st
ime_init
(
&
stats
->
run_interval
,
0
)
;
st
ats
->
num_threads
=
n_background_threads
;
for
(
unsigned
i
=
0
;
i
<
max_background_threads
;
i
++
)
{
for
(
unsigned
i
=
0
;
i
<
max_background_threads
;
i
++
)
{
background_thread_info_t
*
info
=
&
background_thread_info
[
i
];
background_thread_info_t
*
info
=
&
background_thread_info
[
i
];
if
(
malloc_mutex_trylock
(
tsdn
,
&
info
->
mtx
))
{
if
(
malloc_mutex_trylock
(
tsdn
,
&
info
->
mtx
))
{
...
@@ -809,6 +688,8 @@ background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) {
...
@@ -809,6 +688,8 @@ background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) {
if
(
info
->
state
!=
background_thread_stopped
)
{
if
(
info
->
state
!=
background_thread_stopped
)
{
num_runs
+=
info
->
tot_n_runs
;
num_runs
+=
info
->
tot_n_runs
;
nstime_add
(
&
stats
->
run_interval
,
&
info
->
tot_sleep_time
);
nstime_add
(
&
stats
->
run_interval
,
&
info
->
tot_sleep_time
);
malloc_mutex_prof_max_update
(
tsdn
,
&
stats
->
max_counter_per_bg_thd
,
&
info
->
mtx
);
}
}
malloc_mutex_unlock
(
tsdn
,
&
info
->
mtx
);
malloc_mutex_unlock
(
tsdn
,
&
info
->
mtx
);
}
}
...
@@ -892,7 +773,7 @@ background_thread_boot0(void) {
...
@@ -892,7 +773,7 @@ background_thread_boot0(void) {
}
}
bool
bool
background_thread_boot1
(
tsdn_t
*
tsdn
)
{
background_thread_boot1
(
tsdn_t
*
tsdn
,
base_t
*
base
)
{
#ifdef JEMALLOC_BACKGROUND_THREAD
#ifdef JEMALLOC_BACKGROUND_THREAD
assert
(
have_background_thread
);
assert
(
have_background_thread
);
assert
(
narenas_total_get
()
>
0
);
assert
(
narenas_total_get
()
>
0
);
...
@@ -911,7 +792,7 @@ background_thread_boot1(tsdn_t *tsdn) {
...
@@ -911,7 +792,7 @@ background_thread_boot1(tsdn_t *tsdn) {
}
}
background_thread_info
=
(
background_thread_info_t
*
)
base_alloc
(
tsdn
,
background_thread_info
=
(
background_thread_info_t
*
)
base_alloc
(
tsdn
,
b
0get
()
,
opt_max_background_threads
*
b
ase
,
opt_max_background_threads
*
sizeof
(
background_thread_info_t
),
CACHELINE
);
sizeof
(
background_thread_info_t
),
CACHELINE
);
if
(
background_thread_info
==
NULL
)
{
if
(
background_thread_info
==
NULL
)
{
return
true
;
return
true
;
...
...
deps/jemalloc/src/base.c
View file @
b8beda3c
#define JEMALLOC_BASE_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
...
@@ -7,6 +6,15 @@
...
@@ -7,6 +6,15 @@
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/sz.h"
/*
* In auto mode, arenas switch to huge pages for the base allocator on the
* second base block. a0 switches to thp on the 5th block (after 20 megabytes
* of metadata), since more metadata (e.g. rtree nodes) come from a0's base.
*/
#define BASE_AUTO_THP_THRESHOLD 2
#define BASE_AUTO_THP_THRESHOLD_A0 5
/******************************************************************************/
/******************************************************************************/
/* Data. */
/* Data. */
...
@@ -29,7 +37,7 @@ metadata_thp_madvise(void) {
...
@@ -29,7 +37,7 @@ metadata_thp_madvise(void) {
}
}
static
void
*
static
void
*
base_map
(
tsdn_t
*
tsdn
,
e
xtent_
hooks_t
*
e
xtent_
hooks
,
unsigned
ind
,
size_t
size
)
{
base_map
(
tsdn_t
*
tsdn
,
ehooks_t
*
ehooks
,
unsigned
ind
,
size_t
size
)
{
void
*
addr
;
void
*
addr
;
bool
zero
=
true
;
bool
zero
=
true
;
bool
commit
=
true
;
bool
commit
=
true
;
...
@@ -37,22 +45,21 @@ base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size)
...
@@ -37,22 +45,21 @@ base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size)
/* Use huge page sizes and alignment regardless of opt_metadata_thp. */
/* Use huge page sizes and alignment regardless of opt_metadata_thp. */
assert
(
size
==
HUGEPAGE_CEILING
(
size
));
assert
(
size
==
HUGEPAGE_CEILING
(
size
));
size_t
alignment
=
HUGEPAGE
;
size_t
alignment
=
HUGEPAGE
;
if
(
e
xtent_hooks
==
&
extent_hooks_default
)
{
if
(
e
hooks_are_default
(
ehooks
)
)
{
addr
=
extent_alloc_mmap
(
NULL
,
size
,
alignment
,
&
zero
,
&
commit
);
addr
=
extent_alloc_mmap
(
NULL
,
size
,
alignment
,
&
zero
,
&
commit
);
if
(
have_madvise_huge
&&
addr
)
{
pages_set_thp_state
(
addr
,
size
);
}
}
else
{
}
else
{
/* No arena context as we are creating new arenas. */
addr
=
ehooks_alloc
(
tsdn
,
ehooks
,
NULL
,
size
,
alignment
,
&
zero
,
tsd_t
*
tsd
=
tsdn_null
(
tsdn
)
?
tsd_fetch
()
:
tsdn_tsd
(
tsdn
);
&
commit
);
pre_reentrancy
(
tsd
,
NULL
);
addr
=
extent_hooks
->
alloc
(
extent_hooks
,
NULL
,
size
,
alignment
,
&
zero
,
&
commit
,
ind
);
post_reentrancy
(
tsd
);
}
}
return
addr
;
return
addr
;
}
}
static
void
static
void
base_unmap
(
tsdn_t
*
tsdn
,
e
xtent_
hooks_t
*
e
xtent_
hooks
,
unsigned
ind
,
void
*
addr
,
base_unmap
(
tsdn_t
*
tsdn
,
ehooks_t
*
ehooks
,
unsigned
ind
,
void
*
addr
,
size_t
size
)
{
size_t
size
)
{
/*
/*
* Cascade through dalloc, decommit, purge_forced, and purge_lazy,
* Cascade through dalloc, decommit, purge_forced, and purge_lazy,
...
@@ -64,7 +71,7 @@ base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr,
...
@@ -64,7 +71,7 @@ base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr,
* may in fact want the end state of all associated virtual memory to be
* may in fact want the end state of all associated virtual memory to be
* in some consistent-but-allocated state.
* in some consistent-but-allocated state.
*/
*/
if
(
e
xtent_hooks
==
&
extent_hooks_default
)
{
if
(
e
hooks_are_default
(
ehooks
)
)
{
if
(
!
extent_dalloc_mmap
(
addr
,
size
))
{
if
(
!
extent_dalloc_mmap
(
addr
,
size
))
{
goto
label_done
;
goto
label_done
;
}
}
...
@@ -80,31 +87,19 @@ base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr,
...
@@ -80,31 +87,19 @@ base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr,
/* Nothing worked. This should never happen. */
/* Nothing worked. This should never happen. */
not_reached
();
not_reached
();
}
else
{
}
else
{
tsd_t
*
tsd
=
tsdn_null
(
tsdn
)
?
tsd_fetch
()
:
tsdn_tsd
(
tsdn
);
if
(
!
ehooks_dalloc
(
tsdn
,
ehooks
,
addr
,
size
,
true
))
{
pre_reentrancy
(
tsd
,
NULL
);
goto
label_done
;
if
(
extent_hooks
->
dalloc
!=
NULL
&&
}
!
extent_hooks
->
dalloc
(
extent_hooks
,
addr
,
size
,
true
,
if
(
!
ehooks_decommit
(
tsdn
,
ehooks
,
addr
,
size
,
0
,
size
))
{
ind
))
{
goto
label_done
;
goto
label_post_reentrancy
;
}
}
if
(
!
ehooks_purge_forced
(
tsdn
,
ehooks
,
addr
,
size
,
0
,
size
))
{
if
(
extent_hooks
->
decommit
!=
NULL
&&
goto
label_done
;
!
extent_hooks
->
decommit
(
extent_hooks
,
addr
,
size
,
0
,
size
,
}
ind
))
{
if
(
!
ehooks_purge_lazy
(
tsdn
,
ehooks
,
addr
,
size
,
0
,
size
))
{
goto
label_post_reentrancy
;
goto
label_done
;
}
if
(
extent_hooks
->
purge_forced
!=
NULL
&&
!
extent_hooks
->
purge_forced
(
extent_hooks
,
addr
,
size
,
0
,
size
,
ind
))
{
goto
label_post_reentrancy
;
}
if
(
extent_hooks
->
purge_lazy
!=
NULL
&&
!
extent_hooks
->
purge_lazy
(
extent_hooks
,
addr
,
size
,
0
,
size
,
ind
))
{
goto
label_post_reentrancy
;
}
}
/* Nothing worked. That's the application's problem. */
/* Nothing worked. That's the application's problem. */
label_post_reentrancy:
post_reentrancy
(
tsd
);
}
}
label_done:
label_done:
if
(
metadata_thp_madvise
())
{
if
(
metadata_thp_madvise
())
{
...
@@ -116,14 +111,14 @@ label_done:
...
@@ -116,14 +111,14 @@ label_done:
}
}
static
void
static
void
base_e
xtent
_init
(
size_t
*
extent_sn_next
,
e
xtent_t
*
extent
,
void
*
addr
,
base_e
data
_init
(
size_t
*
extent_sn_next
,
e
data_t
*
edata
,
void
*
addr
,
size_t
size
)
{
size_t
size
)
{
size_t
sn
;
size_t
sn
;
sn
=
*
extent_sn_next
;
sn
=
*
extent_sn_next
;
(
*
extent_sn_next
)
++
;
(
*
extent_sn_next
)
++
;
e
xtent
_binit
(
e
xtent
,
addr
,
size
,
sn
);
e
data
_binit
(
e
data
,
addr
,
size
,
sn
);
}
}
static
size_t
static
size_t
...
@@ -169,7 +164,7 @@ base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
...
@@ -169,7 +164,7 @@ base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
pages_huge
(
block
,
block
->
size
);
pages_huge
(
block
,
block
->
size
);
if
(
config_stats
)
{
if
(
config_stats
)
{
base
->
n_thp
+=
HUGEPAGE_CEILING
(
block
->
size
-
base
->
n_thp
+=
HUGEPAGE_CEILING
(
block
->
size
-
e
xtent
_bsize_get
(
&
block
->
e
xtent
))
>>
LG_HUGEPAGE
;
e
data
_bsize_get
(
&
block
->
e
data
))
>>
LG_HUGEPAGE
;
}
}
block
=
block
->
next
;
block
=
block
->
next
;
assert
(
block
==
NULL
||
(
base_ind_get
(
base
)
==
0
));
assert
(
block
==
NULL
||
(
base_ind_get
(
base
)
==
0
));
...
@@ -177,34 +172,34 @@ base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
...
@@ -177,34 +172,34 @@ base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
}
}
static
void
*
static
void
*
base_extent_bump_alloc_helper
(
e
xtent_t
*
extent
,
size_t
*
gap_size
,
size_t
size
,
base_extent_bump_alloc_helper
(
e
data_t
*
edata
,
size_t
*
gap_size
,
size_t
size
,
size_t
alignment
)
{
size_t
alignment
)
{
void
*
ret
;
void
*
ret
;
assert
(
alignment
==
ALIGNMENT_CEILING
(
alignment
,
QUANTUM
));
assert
(
alignment
==
ALIGNMENT_CEILING
(
alignment
,
QUANTUM
));
assert
(
size
==
ALIGNMENT_CEILING
(
size
,
alignment
));
assert
(
size
==
ALIGNMENT_CEILING
(
size
,
alignment
));
*
gap_size
=
ALIGNMENT_CEILING
((
uintptr_t
)
e
xtent
_addr_get
(
e
xtent
),
*
gap_size
=
ALIGNMENT_CEILING
((
uintptr_t
)
e
data
_addr_get
(
e
data
),
alignment
)
-
(
uintptr_t
)
e
xtent
_addr_get
(
e
xtent
);
alignment
)
-
(
uintptr_t
)
e
data
_addr_get
(
e
data
);
ret
=
(
void
*
)((
uintptr_t
)
e
xtent
_addr_get
(
e
xtent
)
+
*
gap_size
);
ret
=
(
void
*
)((
uintptr_t
)
e
data
_addr_get
(
e
data
)
+
*
gap_size
);
assert
(
e
xtent
_bsize_get
(
e
xtent
)
>=
*
gap_size
+
size
);
assert
(
e
data
_bsize_get
(
e
data
)
>=
*
gap_size
+
size
);
e
xtent
_binit
(
e
xtent
,
(
void
*
)((
uintptr_t
)
e
xtent
_addr_get
(
e
xtent
)
+
e
data
_binit
(
e
data
,
(
void
*
)((
uintptr_t
)
e
data
_addr_get
(
e
data
)
+
*
gap_size
+
size
),
e
xtent
_bsize_get
(
e
xtent
)
-
*
gap_size
-
size
,
*
gap_size
+
size
),
e
data
_bsize_get
(
e
data
)
-
*
gap_size
-
size
,
e
xtent
_sn_get
(
e
xtent
));
e
data
_sn_get
(
e
data
));
return
ret
;
return
ret
;
}
}
static
void
static
void
base_extent_bump_alloc_post
(
base_t
*
base
,
e
xtent_t
*
extent
,
size_t
gap_size
,
base_extent_bump_alloc_post
(
base_t
*
base
,
e
data_t
*
edata
,
size_t
gap_size
,
void
*
addr
,
size_t
size
)
{
void
*
addr
,
size_t
size
)
{
if
(
e
xtent
_bsize_get
(
e
xtent
)
>
0
)
{
if
(
e
data
_bsize_get
(
e
data
)
>
0
)
{
/*
/*
* Compute the index for the largest size class that does not
* Compute the index for the largest size class that does not
* exceed extent's size.
* exceed extent's size.
*/
*/
szind_t
index_floor
=
szind_t
index_floor
=
sz_size2index
(
e
xtent
_bsize_get
(
e
xtent
)
+
1
)
-
1
;
sz_size2index
(
e
data
_bsize_get
(
e
data
)
+
1
)
-
1
;
e
xtent
_heap_insert
(
&
base
->
avail
[
index_floor
],
e
xtent
);
e
data
_heap_insert
(
&
base
->
avail
[
index_floor
],
e
data
);
}
}
if
(
config_stats
)
{
if
(
config_stats
)
{
...
@@ -229,13 +224,13 @@ base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size,
...
@@ -229,13 +224,13 @@ base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size,
}
}
static
void
*
static
void
*
base_extent_bump_alloc
(
base_t
*
base
,
e
xtent_t
*
extent
,
size_t
size
,
base_extent_bump_alloc
(
base_t
*
base
,
e
data_t
*
edata
,
size_t
size
,
size_t
alignment
)
{
size_t
alignment
)
{
void
*
ret
;
void
*
ret
;
size_t
gap_size
;
size_t
gap_size
;
ret
=
base_extent_bump_alloc_helper
(
e
xtent
,
&
gap_size
,
size
,
alignment
);
ret
=
base_extent_bump_alloc_helper
(
e
data
,
&
gap_size
,
size
,
alignment
);
base_extent_bump_alloc_post
(
base
,
e
xtent
,
gap_size
,
ret
,
size
);
base_extent_bump_alloc_post
(
base
,
e
data
,
gap_size
,
ret
,
size
);
return
ret
;
return
ret
;
}
}
...
@@ -245,8 +240,8 @@ base_extent_bump_alloc(base_t *base, extent_t *extent, size_t size,
...
@@ -245,8 +240,8 @@ base_extent_bump_alloc(base_t *base, extent_t *extent, size_t size,
* On success a pointer to the initialized base_block_t header is returned.
* On success a pointer to the initialized base_block_t header is returned.
*/
*/
static
base_block_t
*
static
base_block_t
*
base_block_alloc
(
tsdn_t
*
tsdn
,
base_t
*
base
,
e
xtent_
hooks_t
*
e
xtent_hooks
,
base_block_alloc
(
tsdn_t
*
tsdn
,
base_t
*
base
,
ehooks_t
*
e
hooks
,
unsigned
ind
,
unsigned
ind
,
pszind_t
*
pind_last
,
size_t
*
extent_sn_next
,
size_t
size
,
pszind_t
*
pind_last
,
size_t
*
extent_sn_next
,
size_t
size
,
size_t
alignment
)
{
size_t
alignment
)
{
alignment
=
ALIGNMENT_CEILING
(
alignment
,
QUANTUM
);
alignment
=
ALIGNMENT_CEILING
(
alignment
,
QUANTUM
);
size_t
usize
=
ALIGNMENT_CEILING
(
size
,
alignment
);
size_t
usize
=
ALIGNMENT_CEILING
(
size
,
alignment
);
...
@@ -267,7 +262,7 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks,
...
@@ -267,7 +262,7 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks,
size_t
next_block_size
=
HUGEPAGE_CEILING
(
sz_pind2sz
(
pind_next
));
size_t
next_block_size
=
HUGEPAGE_CEILING
(
sz_pind2sz
(
pind_next
));
size_t
block_size
=
(
min_block_size
>
next_block_size
)
?
min_block_size
size_t
block_size
=
(
min_block_size
>
next_block_size
)
?
min_block_size
:
next_block_size
;
:
next_block_size
;
base_block_t
*
block
=
(
base_block_t
*
)
base_map
(
tsdn
,
e
xtent_
hooks
,
ind
,
base_block_t
*
block
=
(
base_block_t
*
)
base_map
(
tsdn
,
ehooks
,
ind
,
block_size
);
block_size
);
if
(
block
==
NULL
)
{
if
(
block
==
NULL
)
{
return
NULL
;
return
NULL
;
...
@@ -295,7 +290,7 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks,
...
@@ -295,7 +290,7 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks,
block
->
size
=
block_size
;
block
->
size
=
block_size
;
block
->
next
=
NULL
;
block
->
next
=
NULL
;
assert
(
block_size
>=
header_size
);
assert
(
block_size
>=
header_size
);
base_e
xtent
_init
(
extent_sn_next
,
&
block
->
e
xtent
,
base_e
data
_init
(
extent_sn_next
,
&
block
->
e
data
,
(
void
*
)((
uintptr_t
)
block
+
header_size
),
block_size
-
header_size
);
(
void
*
)((
uintptr_t
)
block
+
header_size
),
block_size
-
header_size
);
return
block
;
return
block
;
}
}
...
@@ -304,17 +299,17 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks,
...
@@ -304,17 +299,17 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks,
* Allocate an extent that is at least as large as specified size, with
* Allocate an extent that is at least as large as specified size, with
* specified alignment.
* specified alignment.
*/
*/
static
e
xtent
_t
*
static
e
data
_t
*
base_extent_alloc
(
tsdn_t
*
tsdn
,
base_t
*
base
,
size_t
size
,
size_t
alignment
)
{
base_extent_alloc
(
tsdn_t
*
tsdn
,
base_t
*
base
,
size_t
size
,
size_t
alignment
)
{
malloc_mutex_assert_owner
(
tsdn
,
&
base
->
mtx
);
malloc_mutex_assert_owner
(
tsdn
,
&
base
->
mtx
);
e
xtent_
hooks_t
*
e
xtent_
hooks
=
base_e
xtent_
hooks_get
(
base
);
ehooks_t
*
ehooks
=
base_ehooks_get
_for_metadata
(
base
);
/*
/*
* Drop mutex during base_block_alloc(), because an extent hook will be
* Drop mutex during base_block_alloc(), because an extent hook will be
* called.
* called.
*/
*/
malloc_mutex_unlock
(
tsdn
,
&
base
->
mtx
);
malloc_mutex_unlock
(
tsdn
,
&
base
->
mtx
);
base_block_t
*
block
=
base_block_alloc
(
tsdn
,
base
,
e
xtent_
hooks
,
base_block_t
*
block
=
base_block_alloc
(
tsdn
,
base
,
ehooks
,
base_ind_get
(
base
),
&
base
->
pind_last
,
&
base
->
extent_sn_next
,
size
,
base_ind_get
(
base
),
&
base
->
pind_last
,
&
base
->
extent_sn_next
,
size
,
alignment
);
alignment
);
malloc_mutex_lock
(
tsdn
,
&
base
->
mtx
);
malloc_mutex_lock
(
tsdn
,
&
base
->
mtx
);
...
@@ -338,7 +333,7 @@ base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
...
@@ -338,7 +333,7 @@ base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
assert
(
base
->
resident
<=
base
->
mapped
);
assert
(
base
->
resident
<=
base
->
mapped
);
assert
(
base
->
n_thp
<<
LG_HUGEPAGE
<=
base
->
mapped
);
assert
(
base
->
n_thp
<<
LG_HUGEPAGE
<=
base
->
mapped
);
}
}
return
&
block
->
e
xtent
;
return
&
block
->
e
data
;
}
}
base_t
*
base_t
*
...
@@ -347,10 +342,22 @@ b0get(void) {
...
@@ -347,10 +342,22 @@ b0get(void) {
}
}
base_t
*
base_t
*
base_new
(
tsdn_t
*
tsdn
,
unsigned
ind
,
extent_hooks_t
*
extent_hooks
)
{
base_new
(
tsdn_t
*
tsdn
,
unsigned
ind
,
const
extent_hooks_t
*
extent_hooks
,
bool
metadata_use_hooks
)
{
pszind_t
pind_last
=
0
;
pszind_t
pind_last
=
0
;
size_t
extent_sn_next
=
0
;
size_t
extent_sn_next
=
0
;
base_block_t
*
block
=
base_block_alloc
(
tsdn
,
NULL
,
extent_hooks
,
ind
,
/*
* The base will contain the ehooks eventually, but it itself is
* allocated using them. So we use some stack ehooks to bootstrap its
* memory, and then initialize the ehooks within the base_t.
*/
ehooks_t
fake_ehooks
;
ehooks_init
(
&
fake_ehooks
,
metadata_use_hooks
?
(
extent_hooks_t
*
)
extent_hooks
:
(
extent_hooks_t
*
)
&
ehooks_default_extent_hooks
,
ind
);
base_block_t
*
block
=
base_block_alloc
(
tsdn
,
NULL
,
&
fake_ehooks
,
ind
,
&
pind_last
,
&
extent_sn_next
,
sizeof
(
base_t
),
QUANTUM
);
&
pind_last
,
&
extent_sn_next
,
sizeof
(
base_t
),
QUANTUM
);
if
(
block
==
NULL
)
{
if
(
block
==
NULL
)
{
return
NULL
;
return
NULL
;
...
@@ -359,13 +366,15 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
...
@@ -359,13 +366,15 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
size_t
gap_size
;
size_t
gap_size
;
size_t
base_alignment
=
CACHELINE
;
size_t
base_alignment
=
CACHELINE
;
size_t
base_size
=
ALIGNMENT_CEILING
(
sizeof
(
base_t
),
base_alignment
);
size_t
base_size
=
ALIGNMENT_CEILING
(
sizeof
(
base_t
),
base_alignment
);
base_t
*
base
=
(
base_t
*
)
base_extent_bump_alloc_helper
(
&
block
->
e
xtent
,
base_t
*
base
=
(
base_t
*
)
base_extent_bump_alloc_helper
(
&
block
->
e
data
,
&
gap_size
,
base_size
,
base_alignment
);
&
gap_size
,
base_size
,
base_alignment
);
base
->
ind
=
ind
;
ehooks_init
(
&
base
->
ehooks
,
(
extent_hooks_t
*
)
extent_hooks
,
ind
);
atomic_store_p
(
&
base
->
extent_hooks
,
extent_hooks
,
ATOMIC_RELAXED
);
ehooks_init
(
&
base
->
ehooks_base
,
metadata_use_hooks
?
(
extent_hooks_t
*
)
extent_hooks
:
(
extent_hooks_t
*
)
&
ehooks_default_extent_hooks
,
ind
);
if
(
malloc_mutex_init
(
&
base
->
mtx
,
"base"
,
WITNESS_RANK_BASE
,
if
(
malloc_mutex_init
(
&
base
->
mtx
,
"base"
,
WITNESS_RANK_BASE
,
malloc_mutex_rank_exclusive
))
{
malloc_mutex_rank_exclusive
))
{
base_unmap
(
tsdn
,
extent_
hooks
,
ind
,
block
,
block
->
size
);
base_unmap
(
tsdn
,
&
fake_e
hooks
,
ind
,
block
,
block
->
size
);
return
NULL
;
return
NULL
;
}
}
base
->
pind_last
=
pind_last
;
base
->
pind_last
=
pind_last
;
...
@@ -373,7 +382,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
...
@@ -373,7 +382,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
base
->
blocks
=
block
;
base
->
blocks
=
block
;
base
->
auto_thp_switched
=
false
;
base
->
auto_thp_switched
=
false
;
for
(
szind_t
i
=
0
;
i
<
SC_NSIZES
;
i
++
)
{
for
(
szind_t
i
=
0
;
i
<
SC_NSIZES
;
i
++
)
{
e
xtent
_heap_new
(
&
base
->
avail
[
i
]);
e
data
_heap_new
(
&
base
->
avail
[
i
]);
}
}
if
(
config_stats
)
{
if
(
config_stats
)
{
base
->
allocated
=
sizeof
(
base_block_t
);
base
->
allocated
=
sizeof
(
base_block_t
);
...
@@ -386,7 +395,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
...
@@ -386,7 +395,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
assert
(
base
->
resident
<=
base
->
mapped
);
assert
(
base
->
resident
<=
base
->
mapped
);
assert
(
base
->
n_thp
<<
LG_HUGEPAGE
<=
base
->
mapped
);
assert
(
base
->
n_thp
<<
LG_HUGEPAGE
<=
base
->
mapped
);
}
}
base_extent_bump_alloc_post
(
base
,
&
block
->
e
xtent
,
gap_size
,
base
,
base_extent_bump_alloc_post
(
base
,
&
block
->
e
data
,
gap_size
,
base
,
base_size
);
base_size
);
return
base
;
return
base
;
...
@@ -394,26 +403,31 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
...
@@ -394,26 +403,31 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
void
void
base_delete
(
tsdn_t
*
tsdn
,
base_t
*
base
)
{
base_delete
(
tsdn_t
*
tsdn
,
base_t
*
base
)
{
e
xtent_
hooks_t
*
e
xtent_
hooks
=
base_e
xtent_
hooks_get
(
base
);
ehooks_t
*
ehooks
=
base_ehooks_get
_for_metadata
(
base
);
base_block_t
*
next
=
base
->
blocks
;
base_block_t
*
next
=
base
->
blocks
;
do
{
do
{
base_block_t
*
block
=
next
;
base_block_t
*
block
=
next
;
next
=
block
->
next
;
next
=
block
->
next
;
base_unmap
(
tsdn
,
e
xtent_
hooks
,
base_ind_get
(
base
),
block
,
base_unmap
(
tsdn
,
ehooks
,
base_ind_get
(
base
),
block
,
block
->
size
);
block
->
size
);
}
while
(
next
!=
NULL
);
}
while
(
next
!=
NULL
);
}
}
extent_hooks_t
*
ehooks_t
*
base_extent_hooks_get
(
base_t
*
base
)
{
base_ehooks_get
(
base_t
*
base
)
{
return
(
extent_hooks_t
*
)
atomic_load_p
(
&
base
->
extent_hooks
,
return
&
base
->
ehooks
;
ATOMIC_ACQUIRE
);
}
ehooks_t
*
base_ehooks_get_for_metadata
(
base_t
*
base
)
{
return
&
base
->
ehooks_base
;
}
}
extent_hooks_t
*
extent_hooks_t
*
base_extent_hooks_set
(
base_t
*
base
,
extent_hooks_t
*
extent_hooks
)
{
base_extent_hooks_set
(
base_t
*
base
,
extent_hooks_t
*
extent_hooks
)
{
extent_hooks_t
*
old_extent_hooks
=
base_extent_hooks_get
(
base
);
extent_hooks_t
*
old_extent_hooks
=
atomic_store_p
(
&
base
->
extent_hooks
,
extent_hooks
,
ATOMIC_RELEASE
);
ehooks_get_extent_hooks_ptr
(
&
base
->
ehooks
);
ehooks_init
(
&
base
->
ehooks
,
extent_hooks
,
ehooks_ind_get
(
&
base
->
ehooks
));
return
old_extent_hooks
;
return
old_extent_hooks
;
}
}
...
@@ -424,28 +438,28 @@ base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment,
...
@@ -424,28 +438,28 @@ base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment,
size_t
usize
=
ALIGNMENT_CEILING
(
size
,
alignment
);
size_t
usize
=
ALIGNMENT_CEILING
(
size
,
alignment
);
size_t
asize
=
usize
+
alignment
-
QUANTUM
;
size_t
asize
=
usize
+
alignment
-
QUANTUM
;
e
xtent_t
*
extent
=
NULL
;
e
data_t
*
edata
=
NULL
;
malloc_mutex_lock
(
tsdn
,
&
base
->
mtx
);
malloc_mutex_lock
(
tsdn
,
&
base
->
mtx
);
for
(
szind_t
i
=
sz_size2index
(
asize
);
i
<
SC_NSIZES
;
i
++
)
{
for
(
szind_t
i
=
sz_size2index
(
asize
);
i
<
SC_NSIZES
;
i
++
)
{
e
xtent
=
extent
_heap_remove_first
(
&
base
->
avail
[
i
]);
e
data
=
edata
_heap_remove_first
(
&
base
->
avail
[
i
]);
if
(
e
xtent
!=
NULL
)
{
if
(
e
data
!=
NULL
)
{
/* Use existing space. */
/* Use existing space. */
break
;
break
;
}
}
}
}
if
(
e
xtent
==
NULL
)
{
if
(
e
data
==
NULL
)
{
/* Try to allocate more space. */
/* Try to allocate more space. */
e
xtent
=
base_extent_alloc
(
tsdn
,
base
,
usize
,
alignment
);
e
data
=
base_extent_alloc
(
tsdn
,
base
,
usize
,
alignment
);
}
}
void
*
ret
;
void
*
ret
;
if
(
e
xtent
==
NULL
)
{
if
(
e
data
==
NULL
)
{
ret
=
NULL
;
ret
=
NULL
;
goto
label_return
;
goto
label_return
;
}
}
ret
=
base_extent_bump_alloc
(
base
,
e
xtent
,
usize
,
alignment
);
ret
=
base_extent_bump_alloc
(
base
,
e
data
,
usize
,
alignment
);
if
(
esn
!=
NULL
)
{
if
(
esn
!=
NULL
)
{
*
esn
=
extent
_sn_get
(
e
xtent
);
*
esn
=
(
size_t
)
edata
_sn_get
(
e
data
);
}
}
label_return:
label_return:
malloc_mutex_unlock
(
tsdn
,
&
base
->
mtx
);
malloc_mutex_unlock
(
tsdn
,
&
base
->
mtx
);
...
@@ -465,16 +479,16 @@ base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
...
@@ -465,16 +479,16 @@ base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
return
base_alloc_impl
(
tsdn
,
base
,
size
,
alignment
,
NULL
);
return
base_alloc_impl
(
tsdn
,
base
,
size
,
alignment
,
NULL
);
}
}
e
xtent
_t
*
e
data
_t
*
base_alloc_e
xtent
(
tsdn_t
*
tsdn
,
base_t
*
base
)
{
base_alloc_e
data
(
tsdn_t
*
tsdn
,
base_t
*
base
)
{
size_t
esn
;
size_t
esn
;
e
xtent_t
*
extent
=
base_alloc_impl
(
tsdn
,
base
,
sizeof
(
e
xtent
_t
),
e
data_t
*
edata
=
base_alloc_impl
(
tsdn
,
base
,
sizeof
(
e
data
_t
),
CACHELINE
,
&
esn
);
EDATA_ALIGNMENT
,
&
esn
);
if
(
e
xtent
==
NULL
)
{
if
(
e
data
==
NULL
)
{
return
NULL
;
return
NULL
;
}
}
e
xtent
_esn_set
(
e
xtent
,
esn
);
e
data
_esn_set
(
e
data
,
esn
);
return
e
xtent
;
return
e
data
;
}
}
void
void
...
@@ -509,6 +523,7 @@ base_postfork_child(tsdn_t *tsdn, base_t *base) {
...
@@ -509,6 +523,7 @@ base_postfork_child(tsdn_t *tsdn, base_t *base) {
bool
bool
base_boot
(
tsdn_t
*
tsdn
)
{
base_boot
(
tsdn_t
*
tsdn
)
{
b0
=
base_new
(
tsdn
,
0
,
(
extent_hooks_t
*
)
&
extent_hooks_default
);
b0
=
base_new
(
tsdn
,
0
,
(
extent_hooks_t
*
)
&
ehooks_default_extent_hooks
,
/* metadata_use_hooks */
true
);
return
(
b0
==
NULL
);
return
(
b0
==
NULL
);
}
}
deps/jemalloc/src/bin.c
View file @
b8beda3c
...
@@ -6,26 +6,6 @@
...
@@ -6,26 +6,6 @@
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/witness.h"
#include "jemalloc/internal/witness.h"
bin_info_t
bin_infos
[
SC_NBINS
];
static
void
bin_infos_init
(
sc_data_t
*
sc_data
,
unsigned
bin_shard_sizes
[
SC_NBINS
],
bin_info_t
bin_infos
[
SC_NBINS
])
{
for
(
unsigned
i
=
0
;
i
<
SC_NBINS
;
i
++
)
{
bin_info_t
*
bin_info
=
&
bin_infos
[
i
];
sc_t
*
sc
=
&
sc_data
->
sc
[
i
];
bin_info
->
reg_size
=
((
size_t
)
1U
<<
sc
->
lg_base
)
+
((
size_t
)
sc
->
ndelta
<<
sc
->
lg_delta
);
bin_info
->
slab_size
=
(
sc
->
pgs
<<
LG_PAGE
);
bin_info
->
nregs
=
(
uint32_t
)(
bin_info
->
slab_size
/
bin_info
->
reg_size
);
bin_info
->
n_shards
=
bin_shard_sizes
[
i
];
bitmap_info_t
bitmap_info
=
BITMAP_INFO_INITIALIZER
(
bin_info
->
nregs
);
bin_info
->
bitmap_info
=
bitmap_info
;
}
}
bool
bool
bin_update_shard_size
(
unsigned
bin_shard_sizes
[
SC_NBINS
],
size_t
start_size
,
bin_update_shard_size
(
unsigned
bin_shard_sizes
[
SC_NBINS
],
size_t
start_size
,
size_t
end_size
,
size_t
nshards
)
{
size_t
end_size
,
size_t
nshards
)
{
...
@@ -58,12 +38,6 @@ bin_shard_sizes_boot(unsigned bin_shard_sizes[SC_NBINS]) {
...
@@ -58,12 +38,6 @@ bin_shard_sizes_boot(unsigned bin_shard_sizes[SC_NBINS]) {
}
}
}
}
void
bin_boot
(
sc_data_t
*
sc_data
,
unsigned
bin_shard_sizes
[
SC_NBINS
])
{
assert
(
sc_data
->
initialized
);
bin_infos_init
(
sc_data
,
bin_shard_sizes
,
bin_infos
);
}
bool
bool
bin_init
(
bin_t
*
bin
)
{
bin_init
(
bin_t
*
bin
)
{
if
(
malloc_mutex_init
(
&
bin
->
lock
,
"bin"
,
WITNESS_RANK_BIN
,
if
(
malloc_mutex_init
(
&
bin
->
lock
,
"bin"
,
WITNESS_RANK_BIN
,
...
@@ -71,8 +45,8 @@ bin_init(bin_t *bin) {
...
@@ -71,8 +45,8 @@ bin_init(bin_t *bin) {
return
true
;
return
true
;
}
}
bin
->
slabcur
=
NULL
;
bin
->
slabcur
=
NULL
;
e
xtent
_heap_new
(
&
bin
->
slabs_nonfull
);
e
data
_heap_new
(
&
bin
->
slabs_nonfull
);
e
xtent_list
_init
(
&
bin
->
slabs_full
);
e
data_list_active
_init
(
&
bin
->
slabs_full
);
if
(
config_stats
)
{
if
(
config_stats
)
{
memset
(
&
bin
->
stats
,
0
,
sizeof
(
bin_stats_t
));
memset
(
&
bin
->
stats
,
0
,
sizeof
(
bin_stats_t
));
}
}
...
...
deps/jemalloc/src/bin_info.c
0 → 100644
View file @
b8beda3c
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/bin_info.h"
bin_info_t
bin_infos
[
SC_NBINS
];
static
void
bin_infos_init
(
sc_data_t
*
sc_data
,
unsigned
bin_shard_sizes
[
SC_NBINS
],
bin_info_t
infos
[
SC_NBINS
])
{
for
(
unsigned
i
=
0
;
i
<
SC_NBINS
;
i
++
)
{
bin_info_t
*
bin_info
=
&
infos
[
i
];
sc_t
*
sc
=
&
sc_data
->
sc
[
i
];
bin_info
->
reg_size
=
((
size_t
)
1U
<<
sc
->
lg_base
)
+
((
size_t
)
sc
->
ndelta
<<
sc
->
lg_delta
);
bin_info
->
slab_size
=
(
sc
->
pgs
<<
LG_PAGE
);
bin_info
->
nregs
=
(
uint32_t
)(
bin_info
->
slab_size
/
bin_info
->
reg_size
);
bin_info
->
n_shards
=
bin_shard_sizes
[
i
];
bitmap_info_t
bitmap_info
=
BITMAP_INFO_INITIALIZER
(
bin_info
->
nregs
);
bin_info
->
bitmap_info
=
bitmap_info
;
}
}
void
bin_info_boot
(
sc_data_t
*
sc_data
,
unsigned
bin_shard_sizes
[
SC_NBINS
])
{
assert
(
sc_data
->
initialized
);
bin_infos_init
(
sc_data
,
bin_shard_sizes
,
bin_infos
);
}
deps/jemalloc/src/bitmap.c
View file @
b8beda3c
#define JEMALLOC_BITMAP_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
...
...
deps/jemalloc/src/buf_writer.c
0 → 100644
View file @
b8beda3c
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/buf_writer.h"
#include "jemalloc/internal/malloc_io.h"
static
void
*
buf_writer_allocate_internal_buf
(
tsdn_t
*
tsdn
,
size_t
buf_len
)
{
#ifdef JEMALLOC_JET
if
(
buf_len
>
SC_LARGE_MAXCLASS
)
{
return
NULL
;
}
#else
assert
(
buf_len
<=
SC_LARGE_MAXCLASS
);
#endif
return
iallocztm
(
tsdn
,
buf_len
,
sz_size2index
(
buf_len
),
false
,
NULL
,
true
,
arena_get
(
tsdn
,
0
,
false
),
true
);
}
static
void
buf_writer_free_internal_buf
(
tsdn_t
*
tsdn
,
void
*
buf
)
{
if
(
buf
!=
NULL
)
{
idalloctm
(
tsdn
,
buf
,
NULL
,
NULL
,
true
,
true
);
}
}
static
void
buf_writer_assert
(
buf_writer_t
*
buf_writer
)
{
assert
(
buf_writer
!=
NULL
);
assert
(
buf_writer
->
write_cb
!=
NULL
);
if
(
buf_writer
->
buf
!=
NULL
)
{
assert
(
buf_writer
->
buf_size
>
0
);
}
else
{
assert
(
buf_writer
->
buf_size
==
0
);
assert
(
buf_writer
->
internal_buf
);
}
assert
(
buf_writer
->
buf_end
<=
buf_writer
->
buf_size
);
}
bool
buf_writer_init
(
tsdn_t
*
tsdn
,
buf_writer_t
*
buf_writer
,
write_cb_t
*
write_cb
,
void
*
cbopaque
,
char
*
buf
,
size_t
buf_len
)
{
if
(
write_cb
!=
NULL
)
{
buf_writer
->
write_cb
=
write_cb
;
}
else
{
buf_writer
->
write_cb
=
je_malloc_message
!=
NULL
?
je_malloc_message
:
wrtmessage
;
}
buf_writer
->
cbopaque
=
cbopaque
;
assert
(
buf_len
>=
2
);
if
(
buf
!=
NULL
)
{
buf_writer
->
buf
=
buf
;
buf_writer
->
internal_buf
=
false
;
}
else
{
buf_writer
->
buf
=
buf_writer_allocate_internal_buf
(
tsdn
,
buf_len
);
buf_writer
->
internal_buf
=
true
;
}
if
(
buf_writer
->
buf
!=
NULL
)
{
buf_writer
->
buf_size
=
buf_len
-
1
;
/* Allowing for '\0'. */
}
else
{
buf_writer
->
buf_size
=
0
;
}
buf_writer
->
buf_end
=
0
;
buf_writer_assert
(
buf_writer
);
return
buf_writer
->
buf
==
NULL
;
}
void
buf_writer_flush
(
buf_writer_t
*
buf_writer
)
{
buf_writer_assert
(
buf_writer
);
if
(
buf_writer
->
buf
==
NULL
)
{
return
;
}
buf_writer
->
buf
[
buf_writer
->
buf_end
]
=
'\0'
;
buf_writer
->
write_cb
(
buf_writer
->
cbopaque
,
buf_writer
->
buf
);
buf_writer
->
buf_end
=
0
;
buf_writer_assert
(
buf_writer
);
}
void
buf_writer_cb
(
void
*
buf_writer_arg
,
const
char
*
s
)
{
buf_writer_t
*
buf_writer
=
(
buf_writer_t
*
)
buf_writer_arg
;
buf_writer_assert
(
buf_writer
);
if
(
buf_writer
->
buf
==
NULL
)
{
buf_writer
->
write_cb
(
buf_writer
->
cbopaque
,
s
);
return
;
}
size_t
i
,
slen
,
n
;
for
(
i
=
0
,
slen
=
strlen
(
s
);
i
<
slen
;
i
+=
n
)
{
if
(
buf_writer
->
buf_end
==
buf_writer
->
buf_size
)
{
buf_writer_flush
(
buf_writer
);
}
size_t
s_remain
=
slen
-
i
;
size_t
buf_remain
=
buf_writer
->
buf_size
-
buf_writer
->
buf_end
;
n
=
s_remain
<
buf_remain
?
s_remain
:
buf_remain
;
memcpy
(
buf_writer
->
buf
+
buf_writer
->
buf_end
,
s
+
i
,
n
);
buf_writer
->
buf_end
+=
n
;
buf_writer_assert
(
buf_writer
);
}
assert
(
i
==
slen
);
}
void
buf_writer_terminate
(
tsdn_t
*
tsdn
,
buf_writer_t
*
buf_writer
)
{
buf_writer_assert
(
buf_writer
);
buf_writer_flush
(
buf_writer
);
if
(
buf_writer
->
internal_buf
)
{
buf_writer_free_internal_buf
(
tsdn
,
buf_writer
->
buf
);
}
}
void
buf_writer_pipe
(
buf_writer_t
*
buf_writer
,
read_cb_t
*
read_cb
,
void
*
read_cbopaque
)
{
/*
* A tiny local buffer in case the buffered writer failed to allocate
* at init.
*/
static
char
backup_buf
[
16
];
static
buf_writer_t
backup_buf_writer
;
buf_writer_assert
(
buf_writer
);
assert
(
read_cb
!=
NULL
);
if
(
buf_writer
->
buf
==
NULL
)
{
buf_writer_init
(
TSDN_NULL
,
&
backup_buf_writer
,
buf_writer
->
write_cb
,
buf_writer
->
cbopaque
,
backup_buf
,
sizeof
(
backup_buf
));
buf_writer
=
&
backup_buf_writer
;
}
assert
(
buf_writer
->
buf
!=
NULL
);
ssize_t
nread
=
0
;
do
{
buf_writer
->
buf_end
+=
nread
;
buf_writer_assert
(
buf_writer
);
if
(
buf_writer
->
buf_end
==
buf_writer
->
buf_size
)
{
buf_writer_flush
(
buf_writer
);
}
nread
=
read_cb
(
read_cbopaque
,
buf_writer
->
buf
+
buf_writer
->
buf_end
,
buf_writer
->
buf_size
-
buf_writer
->
buf_end
);
}
while
(
nread
>
0
);
buf_writer_flush
(
buf_writer
);
}
deps/jemalloc/src/cache_bin.c
0 → 100644
View file @
b8beda3c
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/bit_util.h"
#include "jemalloc/internal/cache_bin.h"
#include "jemalloc/internal/safety_check.h"
void
cache_bin_info_init
(
cache_bin_info_t
*
info
,
cache_bin_sz_t
ncached_max
)
{
assert
(
ncached_max
<=
CACHE_BIN_NCACHED_MAX
);
size_t
stack_size
=
(
size_t
)
ncached_max
*
sizeof
(
void
*
);
assert
(
stack_size
<
((
size_t
)
1
<<
(
sizeof
(
cache_bin_sz_t
)
*
8
)));
info
->
ncached_max
=
(
cache_bin_sz_t
)
ncached_max
;
}
void
cache_bin_info_compute_alloc
(
cache_bin_info_t
*
infos
,
szind_t
ninfos
,
size_t
*
size
,
size_t
*
alignment
)
{
/* For the total bin stack region (per tcache), reserve 2 more slots so
* that
* 1) the empty position can be safely read on the fast path before
* checking "is_empty"; and
* 2) the cur_ptr can go beyond the empty position by 1 step safely on
* the fast path (i.e. no overflow).
*/
*
size
=
sizeof
(
void
*
)
*
2
;
for
(
szind_t
i
=
0
;
i
<
ninfos
;
i
++
)
{
assert
(
infos
[
i
].
ncached_max
>
0
);
*
size
+=
infos
[
i
].
ncached_max
*
sizeof
(
void
*
);
}
/*
* Align to at least PAGE, to minimize the # of TLBs needed by the
* smaller sizes; also helps if the larger sizes don't get used at all.
*/
*
alignment
=
PAGE
;
}
void
cache_bin_preincrement
(
cache_bin_info_t
*
infos
,
szind_t
ninfos
,
void
*
alloc
,
size_t
*
cur_offset
)
{
if
(
config_debug
)
{
size_t
computed_size
;
size_t
computed_alignment
;
/* Pointer should be as aligned as we asked for. */
cache_bin_info_compute_alloc
(
infos
,
ninfos
,
&
computed_size
,
&
computed_alignment
);
assert
(((
uintptr_t
)
alloc
&
(
computed_alignment
-
1
))
==
0
);
}
*
(
uintptr_t
*
)((
uintptr_t
)
alloc
+
*
cur_offset
)
=
cache_bin_preceding_junk
;
*
cur_offset
+=
sizeof
(
void
*
);
}
void
cache_bin_postincrement
(
cache_bin_info_t
*
infos
,
szind_t
ninfos
,
void
*
alloc
,
size_t
*
cur_offset
)
{
*
(
uintptr_t
*
)((
uintptr_t
)
alloc
+
*
cur_offset
)
=
cache_bin_trailing_junk
;
*
cur_offset
+=
sizeof
(
void
*
);
}
void
cache_bin_init
(
cache_bin_t
*
bin
,
cache_bin_info_t
*
info
,
void
*
alloc
,
size_t
*
cur_offset
)
{
/*
* The full_position points to the lowest available space. Allocations
* will access the slots toward higher addresses (for the benefit of
* adjacent prefetch).
*/
void
*
stack_cur
=
(
void
*
)((
uintptr_t
)
alloc
+
*
cur_offset
);
void
*
full_position
=
stack_cur
;
uint16_t
bin_stack_size
=
info
->
ncached_max
*
sizeof
(
void
*
);
*
cur_offset
+=
bin_stack_size
;
void
*
empty_position
=
(
void
*
)((
uintptr_t
)
alloc
+
*
cur_offset
);
/* Init to the empty position. */
bin
->
stack_head
=
(
void
**
)
empty_position
;
bin
->
low_bits_low_water
=
(
uint16_t
)(
uintptr_t
)
bin
->
stack_head
;
bin
->
low_bits_full
=
(
uint16_t
)(
uintptr_t
)
full_position
;
bin
->
low_bits_empty
=
(
uint16_t
)(
uintptr_t
)
empty_position
;
cache_bin_sz_t
free_spots
=
cache_bin_diff
(
bin
,
bin
->
low_bits_full
,
(
uint16_t
)(
uintptr_t
)
bin
->
stack_head
,
/* racy */
false
);
assert
(
free_spots
==
bin_stack_size
);
assert
(
cache_bin_ncached_get_local
(
bin
,
info
)
==
0
);
assert
(
cache_bin_empty_position_get
(
bin
)
==
empty_position
);
assert
(
bin_stack_size
>
0
||
empty_position
==
full_position
);
}
bool
cache_bin_still_zero_initialized
(
cache_bin_t
*
bin
)
{
return
bin
->
stack_head
==
NULL
;
}
Prev
1
…
4
5
6
7
8
9
10
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment