Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
d4439bd4
Commit
d4439bd4
authored
May 15, 2023
by
Oran Agra
Browse files
Merge remote-tracking branch 'origin/unstable' into 7.2
parents
e26a769d
2ffde15a
Changes
200
Show whitespace changes
Inline
Side-by-side
Too many changes to show.
To preserve performance only
200 of 200+
files are displayed.
Plain diff
Email patch
deps/jemalloc/include/jemalloc/jemalloc_protos.h.in
View file @
d4439bd4
...
...
@@ -8,21 +8,22 @@ extern JEMALLOC_EXPORT void (*@je_@malloc_message)(void *cbopaque,
const char *s);
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *@je_@malloc(size_t size)
void JEMALLOC_
SYS_
NOTHROW *@je_@malloc(size_t size)
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *@je_@calloc(size_t num, size_t size)
void JEMALLOC_
SYS_
NOTHROW *@je_@calloc(size_t num, size_t size)
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2);
JEMALLOC_EXPORT int JEMALLOC_NOTHROW @je_@posix_memalign(void **memptr,
size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT int JEMALLOC_SYS_NOTHROW @je_@posix_memalign(
void **memptr, size_t alignment, size_t size) JEMALLOC_CXX_THROW
JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *@je_@aligned_alloc(size_t alignment,
void JEMALLOC_
SYS_
NOTHROW *@je_@aligned_alloc(size_t alignment,
size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc)
JEMALLOC_ALLOC_SIZE(2);
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *@je_@realloc(void *ptr, size_t size)
void JEMALLOC_
SYS_
NOTHROW *@je_@realloc(void *ptr, size_t size)
JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2);
JEMALLOC_EXPORT void JEMALLOC_NOTHROW @je_@free(void *ptr)
JEMALLOC_EXPORT void JEMALLOC_
SYS_
NOTHROW @je_@free(void *ptr)
JEMALLOC_CXX_THROW;
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
...
...
@@ -52,15 +53,19 @@ JEMALLOC_EXPORT void JEMALLOC_NOTHROW @je_@malloc_stats_print(
const char *opts);
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@malloc_usable_size(
JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW;
#ifdef JEMALLOC_HAVE_MALLOC_SIZE
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@malloc_size(
const void *ptr);
#endif
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *@je_@memalign(size_t alignment, size_t size)
void JEMALLOC_
SYS_
NOTHROW *@je_@memalign(size_t alignment, size_t size)
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc);
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *@je_@valloc(size_t size) JEMALLOC_CXX_THROW
void JEMALLOC_
SYS_
NOTHROW *@je_@valloc(size_t size) JEMALLOC_CXX_THROW
JEMALLOC_ATTR(malloc);
#endif
deps/jemalloc/m4/ax_cxx_compile_stdcxx.m4
View file @
d4439bd4
# ===========================================================================
#
http://www.gnu.org/software/autoconf-archive/ax_cxx_compile_stdcxx.html
# http
s
://www.gnu.org/software/autoconf-archive/ax_cxx_compile_stdcxx.html
# ===========================================================================
#
# SYNOPSIS
...
...
@@ -33,21 +33,23 @@
# Copyright (c) 2014, 2015 Google Inc.; contributed by Alexey Sokolov <sokolov@google.com>
# Copyright (c) 2015 Paul Norman <penorman@mac.com>
# Copyright (c) 2015 Moritz Klammler <moritz@klammler.eu>
# Copyright (c) 2016, 2018 Krzesimir Nowak <qdlacz@gmail.com>
# Copyright (c) 2019 Enji Cooper <yaneurabeya@gmail.com>
#
# Copying and distribution of this file, with or without modification, are
# permitted in any medium without royalty provided the copyright notice
# and this notice are preserved. This file is offered as-is, without any
# warranty.
#serial
4
#serial
11
dnl This macro is based on the code from the AX_CXX_COMPILE_STDCXX_11 macro
dnl (serial version number 13).
AC_DEFUN([AX_CXX_COMPILE_STDCXX], [dnl
m4_if([$1], [11], [],
[$1], [14], [],
[$1], [17], [
m4_fatal([support for C++17 not yet implemented in AX_CXX_COMPILE_STDCXX])
],
m4_if([$1], [11], [
ax_cxx_compile_alternatives="11 0x"
],
[$1], [14], [
ax_cxx_compile_alternatives="14 1y"
],
[$1], [17], [
ax_cxx_compile_alternatives="17 1z"
],
[m4_fatal([invalid first argument `$1' to AX_CXX_COMPILE_STDCXX])])dnl
m4_if([$2], [], [],
[$2], [ext], [],
...
...
@@ -59,18 +61,11 @@ AC_DEFUN([AX_CXX_COMPILE_STDCXX], [dnl
[m4_fatal([invalid third argument `$3' to AX_CXX_COMPILE_STDCXX])])
AC_LANG_PUSH([C++])dnl
ac_success=no
AC_CACHE_CHECK(whether $CXX supports C++$1 features by default,
ax_cv_cxx_compile_cxx$1,
[AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])],
[ax_cv_cxx_compile_cxx$1=yes],
[ax_cv_cxx_compile_cxx$1=no])])
if test x$ax_cv_cxx_compile_cxx$1 = xyes; then
ac_success=yes
fi
m4_if([$2], [noext], [], [dnl
if test x$ac_success = xno; then
for switch in -std=gnu++$1 -std=gnu++0x; do
for alternative in ${ax_cxx_compile_alternatives}; do
switch="-std=gnu++${alternative}"
cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx$1_$switch])
AC_CACHE_CHECK(whether $CXX supports C++$1 features with $switch,
$cachevar,
...
...
@@ -96,7 +91,8 @@ AC_DEFUN([AX_CXX_COMPILE_STDCXX], [dnl
dnl HP's aCC needs +std=c++11 according to:
dnl http://h21007.www2.hp.com/portal/download/files/unprot/aCxx/PDF_Release_Notes/769149-001.pdf
dnl Cray's crayCC needs "-h std=c++11"
for switch in -std=c++$1 -std=c++0x +std=c++$1 "-h std=c++$1"; do
for alternative in ${ax_cxx_compile_alternatives}; do
for switch in -std=c++${alternative} +std=c++${alternative} "-h std=c++${alternative}"; do
cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx$1_$switch])
AC_CACHE_CHECK(whether $CXX supports C++$1 features with $switch,
$cachevar,
...
...
@@ -115,6 +111,10 @@ AC_DEFUN([AX_CXX_COMPILE_STDCXX], [dnl
break
fi
done
if test x$ac_success = xyes; then
break
fi
done
fi])
AC_LANG_POP([C++])
if test x$ax_cxx_compile_cxx$1_required = xtrue; then
...
...
@@ -148,6 +148,11 @@ m4_define([_AX_CXX_COMPILE_STDCXX_testbody_14],
_AX_CXX_COMPILE_STDCXX_testbody_new_in_14
)
m4_define([_AX_CXX_COMPILE_STDCXX_testbody_17],
_AX_CXX_COMPILE_STDCXX_testbody_new_in_11
_AX_CXX_COMPILE_STDCXX_testbody_new_in_14
_AX_CXX_COMPILE_STDCXX_testbody_new_in_17
)
dnl Tests for new features in C++11
...
...
@@ -185,11 +190,13 @@ namespace cxx11
struct Base
{
virtual ~Base() {}
virtual void f() {}
};
struct Derived : public Base
{
virtual ~Derived() override {}
virtual void f() override {}
};
...
...
@@ -518,7 +525,7 @@ namespace cxx14
}
namespace test_digit_sep
e
rators
namespace test_digit_sep
a
rators
{
constexpr auto ten_million = 100'000'000;
...
...
@@ -560,3 +567,385 @@ namespace cxx14
#endif // __cplusplus >= 201402L
]])
dnl Tests for new features in C++17
m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_17], [[
// If the compiler admits that it is not ready for C++17, why torture it?
// Hopefully, this will speed up the test.
#ifndef __cplusplus
#error "This is not a C++ compiler"
#elif __cplusplus < 201703L
#error "This is not a C++17 compiler"
#else
#include <initializer_list>
#include <utility>
#include <type_traits>
namespace cxx17
{
namespace test_constexpr_lambdas
{
constexpr int foo = [](){return 42;}();
}
namespace test::nested_namespace::definitions
{
}
namespace test_fold_expression
{
template<typename... Args>
int multiply(Args... args)
{
return (args * ... * 1);
}
template<typename... Args>
bool all(Args... args)
{
return (args && ...);
}
}
namespace test_extended_static_assert
{
static_assert (true);
}
namespace test_auto_brace_init_list
{
auto foo = {5};
auto bar {5};
static_assert(std::is_same<std::initializer_list<int>, decltype(foo)>::value);
static_assert(std::is_same<int, decltype(bar)>::value);
}
namespace test_typename_in_template_template_parameter
{
template<template<typename> typename X> struct D;
}
namespace test_fallthrough_nodiscard_maybe_unused_attributes
{
int f1()
{
return 42;
}
[[nodiscard]] int f2()
{
[[maybe_unused]] auto unused = f1();
switch (f1())
{
case 17:
f1();
[[fallthrough]];
case 42:
f1();
}
return f1();
}
}
namespace test_extended_aggregate_initialization
{
struct base1
{
int b1, b2 = 42;
};
struct base2
{
base2() {
b3 = 42;
}
int b3;
};
struct derived : base1, base2
{
int d;
};
derived d1 {{1, 2}, {}, 4}; // full initialization
derived d2 {{}, {}, 4}; // value-initialized bases
}
namespace test_general_range_based_for_loop
{
struct iter
{
int i;
int& operator* ()
{
return i;
}
const int& operator* () const
{
return i;
}
iter& operator++()
{
++i;
return *this;
}
};
struct sentinel
{
int i;
};
bool operator== (const iter& i, const sentinel& s)
{
return i.i == s.i;
}
bool operator!= (const iter& i, const sentinel& s)
{
return !(i == s);
}
struct range
{
iter begin() const
{
return {0};
}
sentinel end() const
{
return {5};
}
};
void f()
{
range r {};
for (auto i : r)
{
[[maybe_unused]] auto v = i;
}
}
}
namespace test_lambda_capture_asterisk_this_by_value
{
struct t
{
int i;
int foo()
{
return [*this]()
{
return i;
}();
}
};
}
namespace test_enum_class_construction
{
enum class byte : unsigned char
{};
byte foo {42};
}
namespace test_constexpr_if
{
template <bool cond>
int f ()
{
if constexpr(cond)
{
return 13;
}
else
{
return 42;
}
}
}
namespace test_selection_statement_with_initializer
{
int f()
{
return 13;
}
int f2()
{
if (auto i = f(); i > 0)
{
return 3;
}
switch (auto i = f(); i + 4)
{
case 17:
return 2;
default:
return 1;
}
}
}
namespace test_template_argument_deduction_for_class_templates
{
template <typename T1, typename T2>
struct pair
{
pair (T1 p1, T2 p2)
: m1 {p1},
m2 {p2}
{}
T1 m1;
T2 m2;
};
void f()
{
[[maybe_unused]] auto p = pair{13, 42u};
}
}
namespace test_non_type_auto_template_parameters
{
template <auto n>
struct B
{};
B<5> b1;
B<'a'> b2;
}
namespace test_structured_bindings
{
int arr[2] = { 1, 2 };
std::pair<int, int> pr = { 1, 2 };
auto f1() -> int(&)[2]
{
return arr;
}
auto f2() -> std::pair<int, int>&
{
return pr;
}
struct S
{
int x1 : 2;
volatile double y1;
};
S f3()
{
return {};
}
auto [ x1, y1 ] = f1();
auto& [ xr1, yr1 ] = f1();
auto [ x2, y2 ] = f2();
auto& [ xr2, yr2 ] = f2();
const auto [ x3, y3 ] = f3();
}
namespace test_exception_spec_type_system
{
struct Good {};
struct Bad {};
void g1() noexcept;
void g2();
template<typename T>
Bad
f(T*, T*);
template<typename T1, typename T2>
Good
f(T1*, T2*);
static_assert (std::is_same_v<Good, decltype(f(g1, g2))>);
}
namespace test_inline_variables
{
template<class T> void f(T)
{}
template<class T> inline T g(T)
{
return T{};
}
template<> inline void f<>(int)
{}
template<> int g<>(int)
{
return 5;
}
}
} // namespace cxx17
#endif // __cplusplus < 201703L
]])
deps/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj
View file @
d4439bd4
...
...
@@ -39,34 +39,64 @@
<ClCompile
Include=
"..\..\..\..\src\background_thread.c"
/>
<ClCompile
Include=
"..\..\..\..\src\base.c"
/>
<ClCompile
Include=
"..\..\..\..\src\bin.c"
/>
<ClCompile
Include=
"..\..\..\..\src\bin_info.c"
/>
<ClCompile
Include=
"..\..\..\..\src\bitmap.c"
/>
<ClCompile
Include=
"..\..\..\..\src\buf_writer.c"
/>
<ClCompile
Include=
"..\..\..\..\src\cache_bin.c"
/>
<ClCompile
Include=
"..\..\..\..\src\ckh.c"
/>
<ClCompile
Include=
"..\..\..\..\src\counter.c"
/>
<ClCompile
Include=
"..\..\..\..\src\ctl.c"
/>
<ClCompile
Include=
"..\..\..\..\src\decay.c"
/>
<ClCompile
Include=
"..\..\..\..\src\div.c"
/>
<ClCompile
Include=
"..\..\..\..\src\ecache.c"
/>
<ClCompile
Include=
"..\..\..\..\src\edata.c"
/>
<ClCompile
Include=
"..\..\..\..\src\edata_cache.c"
/>
<ClCompile
Include=
"..\..\..\..\src\ehooks.c"
/>
<ClCompile
Include=
"..\..\..\..\src\emap.c"
/>
<ClCompile
Include=
"..\..\..\..\src\eset.c"
/>
<ClCompile
Include=
"..\..\..\..\src\exp_grow.c"
/>
<ClCompile
Include=
"..\..\..\..\src\extent.c"
/>
<ClCompile
Include=
"..\..\..\..\src\extent_dss.c"
/>
<ClCompile
Include=
"..\..\..\..\src\extent_mmap.c"
/>
<ClCompile
Include=
"..\..\..\..\src\
hash
.c"
/>
<ClCompile
Include=
"..\..\..\..\src\
fxp
.c"
/>
<ClCompile
Include=
"..\..\..\..\src\hook.c"
/>
<ClCompile
Include=
"..\..\..\..\src\hpa.c"
/>
<ClCompile
Include=
"..\..\..\..\src\hpa_hooks.c"
/>
<ClCompile
Include=
"..\..\..\..\src\hpdata.c"
/>
<ClCompile
Include=
"..\..\..\..\src\inspect.c"
/>
<ClCompile
Include=
"..\..\..\..\src\jemalloc.c"
/>
<ClCompile
Include=
"..\..\..\..\src\large.c"
/>
<ClCompile
Include=
"..\..\..\..\src\log.c"
/>
<ClCompile
Include=
"..\..\..\..\src\malloc_io.c"
/>
<ClCompile
Include=
"..\..\..\..\src\mutex.c"
/>
<ClCompile
Include=
"..\..\..\..\src\mutex_pool.c"
/>
<ClCompile
Include=
"..\..\..\..\src\nstime.c"
/>
<ClCompile
Include=
"..\..\..\..\src\pa.c"
/>
<ClCompile
Include=
"..\..\..\..\src\pa_extra.c"
/>
<ClCompile
Include=
"..\..\..\..\src\pai.c"
/>
<ClCompile
Include=
"..\..\..\..\src\pac.c"
/>
<ClCompile
Include=
"..\..\..\..\src\pages.c"
/>
<ClCompile
Include=
"..\..\..\..\src\p
rng
.c"
/>
<ClCompile
Include=
"..\..\..\..\src\p
eak_event
.c"
/>
<ClCompile
Include=
"..\..\..\..\src\prof.c"
/>
<ClCompile
Include=
"..\..\..\..\src\prof_data.c"
/>
<ClCompile
Include=
"..\..\..\..\src\prof_log.c"
/>
<ClCompile
Include=
"..\..\..\..\src\prof_recent.c"
/>
<ClCompile
Include=
"..\..\..\..\src\prof_stats.c"
/>
<ClCompile
Include=
"..\..\..\..\src\prof_sys.c"
/>
<ClCompile
Include=
"..\..\..\..\src\psset.c"
/>
<ClCompile
Include=
"..\..\..\..\src\rtree.c"
/>
<ClCompile
Include=
"..\..\..\..\src\safety_check.c"
/>
<ClCompile
Include=
"..\..\..\..\src\san.c"
/>
<ClCompile
Include=
"..\..\..\..\src\san_bump.c"
/>
<ClCompile
Include=
"..\..\..\..\src\sc.c"
/>
<ClCompile
Include=
"..\..\..\..\src\sec.c"
/>
<ClCompile
Include=
"..\..\..\..\src\stats.c"
/>
<ClCompile
Include=
"..\..\..\..\src\sz.c"
/>
<ClCompile
Include=
"..\..\..\..\src\tcache.c"
/>
<ClCompile
Include=
"..\..\..\..\src\test_hooks.c"
/>
<ClCompile
Include=
"..\..\..\..\src\thread_event.c"
/>
<ClCompile
Include=
"..\..\..\..\src\ticker.c"
/>
<ClCompile
Include=
"..\..\..\..\src\tsd.c"
/>
<ClCompile
Include=
"..\..\..\..\src\witness.c"
/>
<ClCompile
Include=
"..\..\..\..\src\safety_check.c"
/>
</ItemGroup>
<PropertyGroup
Label=
"Globals"
>
<ProjectGuid>
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}
</ProjectGuid>
...
...
deps/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters
View file @
d4439bd4
...
...
@@ -16,15 +16,39 @@
<ClCompile
Include=
"..\..\..\..\src\base.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\bin.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\bitmap.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\buf_writer.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\cache_bin.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\ckh.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\counter.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\ctl.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\decay.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\div.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\emap.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\exp_grow.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\extent.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
...
...
@@ -34,45 +58,93 @@
<ClCompile
Include=
"..\..\..\..\src\extent_mmap.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\
hash
.c"
>
<ClCompile
Include=
"..\..\..\..\src\
fxp
.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\hook.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\hpa.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\hpa_hooks.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\hpdata.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\inspect.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\jemalloc.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\large.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\log.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\malloc_io.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\mutex.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\
mutex_pool
.c"
>
<ClCompile
Include=
"..\..\..\..\src\
nstime
.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\nstime.c"
>
<ClCompile
Include=
"..\..\..\..\src\pa.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\pa_extra.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\pai.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\pac.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\pages.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\p
rng
.c"
>
<ClCompile
Include=
"..\..\..\..\src\p
eak_event
.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\prof.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\prof_data.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\prof_log.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\prof_recent.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\prof_stats.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\prof_sys.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\psset.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\rtree.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\safety_check.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\sc.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\sec.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\stats.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
...
...
@@ -82,6 +154,12 @@
<ClCompile
Include=
"..\..\..\..\src\tcache.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\test_hooks.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\thread_event.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\ticker.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
...
...
@@ -91,16 +169,28 @@
<ClCompile
Include=
"..\..\..\..\src\witness.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\
log
.c"
>
<ClCompile
Include=
"..\..\..\..\src\
bin_info
.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\
bin
.c"
>
<ClCompile
Include=
"..\..\..\..\src\
ecache
.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\
div
.c"
>
<ClCompile
Include=
"..\..\..\..\src\
edata
.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\safety_check.c"
>
<ClCompile
Include=
"..\..\..\..\src\edata_cache.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\ehooks.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\eset.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\san.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\san_bump.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
</ItemGroup>
...
...
deps/jemalloc/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj
View file @
d4439bd4
...
...
@@ -39,35 +39,64 @@
<ClCompile
Include=
"..\..\..\..\src\background_thread.c"
/>
<ClCompile
Include=
"..\..\..\..\src\base.c"
/>
<ClCompile
Include=
"..\..\..\..\src\bin.c"
/>
<ClCompile
Include=
"..\..\..\..\src\bin_info.c"
/>
<ClCompile
Include=
"..\..\..\..\src\bitmap.c"
/>
<ClCompile
Include=
"..\..\..\..\src\buf_writer.c"
/>
<ClCompile
Include=
"..\..\..\..\src\cache_bin.c"
/>
<ClCompile
Include=
"..\..\..\..\src\ckh.c"
/>
<ClCompile
Include=
"..\..\..\..\src\counter.c"
/>
<ClCompile
Include=
"..\..\..\..\src\ctl.c"
/>
<ClCompile
Include=
"..\..\..\..\src\decay.c"
/>
<ClCompile
Include=
"..\..\..\..\src\div.c"
/>
<ClCompile
Include=
"..\..\..\..\src\ecache.c"
/>
<ClCompile
Include=
"..\..\..\..\src\edata.c"
/>
<ClCompile
Include=
"..\..\..\..\src\edata_cache.c"
/>
<ClCompile
Include=
"..\..\..\..\src\ehooks.c"
/>
<ClCompile
Include=
"..\..\..\..\src\emap.c"
/>
<ClCompile
Include=
"..\..\..\..\src\eset.c"
/>
<ClCompile
Include=
"..\..\..\..\src\exp_grow.c"
/>
<ClCompile
Include=
"..\..\..\..\src\extent.c"
/>
<ClCompile
Include=
"..\..\..\..\src\extent_dss.c"
/>
<ClCompile
Include=
"..\..\..\..\src\extent_mmap.c"
/>
<ClCompile
Include=
"..\..\..\..\src\
hash
.c"
/>
<ClCompile
Include=
"..\..\..\..\src\
fxp
.c"
/>
<ClCompile
Include=
"..\..\..\..\src\hook.c"
/>
<ClCompile
Include=
"..\..\..\..\src\hpa.c"
/>
<ClCompile
Include=
"..\..\..\..\src\hpa_hooks.c"
/>
<ClCompile
Include=
"..\..\..\..\src\hpdata.c"
/>
<ClCompile
Include=
"..\..\..\..\src\inspect.c"
/>
<ClCompile
Include=
"..\..\..\..\src\jemalloc.c"
/>
<ClCompile
Include=
"..\..\..\..\src\large.c"
/>
<ClCompile
Include=
"..\..\..\..\src\log.c"
/>
<ClCompile
Include=
"..\..\..\..\src\malloc_io.c"
/>
<ClCompile
Include=
"..\..\..\..\src\mutex.c"
/>
<ClCompile
Include=
"..\..\..\..\src\mutex_pool.c"
/>
<ClCompile
Include=
"..\..\..\..\src\nstime.c"
/>
<ClCompile
Include=
"..\..\..\..\src\pa.c"
/>
<ClCompile
Include=
"..\..\..\..\src\pa_extra.c"
/>
<ClCompile
Include=
"..\..\..\..\src\pai.c"
/>
<ClCompile
Include=
"..\..\..\..\src\pac.c"
/>
<ClCompile
Include=
"..\..\..\..\src\pages.c"
/>
<ClCompile
Include=
"..\..\..\..\src\p
rng
.c"
/>
<ClCompile
Include=
"..\..\..\..\src\p
eak_event
.c"
/>
<ClCompile
Include=
"..\..\..\..\src\prof.c"
/>
<ClCompile
Include=
"..\..\..\..\src\prof_data.c"
/>
<ClCompile
Include=
"..\..\..\..\src\prof_log.c"
/>
<ClCompile
Include=
"..\..\..\..\src\prof_recent.c"
/>
<ClCompile
Include=
"..\..\..\..\src\prof_stats.c"
/>
<ClCompile
Include=
"..\..\..\..\src\prof_sys.c"
/>
<ClCompile
Include=
"..\..\..\..\src\psset.c"
/>
<ClCompile
Include=
"..\..\..\..\src\rtree.c"
/>
<ClCompile
Include=
"..\..\..\..\src\safety_check.c"
/>
<ClCompile
Include=
"..\..\..\..\src\san.c"
/>
<ClCompile
Include=
"..\..\..\..\src\san_bump.c"
/>
<ClCompile
Include=
"..\..\..\..\src\sc.c"
/>
<ClCompile
Include=
"..\..\..\..\src\sec.c"
/>
<ClCompile
Include=
"..\..\..\..\src\stats.c"
/>
<ClCompile
Include=
"..\..\..\..\src\sz.c"
/>
<ClCompile
Include=
"..\..\..\..\src\tcache.c"
/>
<ClCompile
Include=
"..\..\..\..\src\test_hooks.c"
/>
<ClCompile
Include=
"..\..\..\..\src\thread_event.c"
/>
<ClCompile
Include=
"..\..\..\..\src\ticker.c"
/>
<ClCompile
Include=
"..\..\..\..\src\tsd.c"
/>
<ClCompile
Include=
"..\..\..\..\src\witness.c"
/>
<ClCompile
Include=
"..\..\..\..\src\safety_check.c"
/>
</ItemGroup>
<PropertyGroup
Label=
"Globals"
>
<ProjectGuid>
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}
</ProjectGuid>
...
...
deps/jemalloc/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters
View file @
d4439bd4
...
...
@@ -16,15 +16,39 @@
<ClCompile
Include=
"..\..\..\..\src\base.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\bin.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\bitmap.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\buf_writer.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\cache_bin.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\ckh.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\counter.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\ctl.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\decay.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\div.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\emap.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\exp_grow.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\extent.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
...
...
@@ -34,45 +58,93 @@
<ClCompile
Include=
"..\..\..\..\src\extent_mmap.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\
hash
.c"
>
<ClCompile
Include=
"..\..\..\..\src\
fxp
.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\hook.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\hpa.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\hpa_hooks.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\hpdata.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\inspect.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\jemalloc.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\large.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\log.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\malloc_io.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\mutex.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\
mutex_pool
.c"
>
<ClCompile
Include=
"..\..\..\..\src\
nstime
.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\nstime.c"
>
<ClCompile
Include=
"..\..\..\..\src\pa.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\pa_extra.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\pai.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\pac.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\pages.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\p
rng
.c"
>
<ClCompile
Include=
"..\..\..\..\src\p
eak_event
.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\prof.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\prof_data.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\prof_log.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\prof_recent.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\prof_stats.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\prof_sys.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\psset.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\rtree.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\safety_check.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\sc.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\sec.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\stats.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
...
...
@@ -82,6 +154,12 @@
<ClCompile
Include=
"..\..\..\..\src\tcache.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\test_hooks.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\thread_event.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\ticker.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
...
...
@@ -91,19 +169,28 @@
<ClCompile
Include=
"..\..\..\..\src\witness.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\
log
.c"
>
<ClCompile
Include=
"..\..\..\..\src\
bin_info
.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\
bin
.c"
>
<ClCompile
Include=
"..\..\..\..\src\
ecache
.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\
div
.c"
>
<ClCompile
Include=
"..\..\..\..\src\
edata
.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\
test_hooks
.c"
>
<ClCompile
Include=
"..\..\..\..\src\
edata_cache
.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\safety_check.c"
>
<ClCompile
Include=
"..\..\..\..\src\ehooks.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\eset.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\san.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
<ClCompile
Include=
"..\..\..\..\src\san_bump.c"
>
<Filter>
Source Files
</Filter>
</ClCompile>
</ItemGroup>
...
...
deps/jemalloc/msvc/test_threads/test_threads.cpp
View file @
d4439bd4
...
...
@@ -9,6 +9,7 @@
#include <thread>
#include <vector>
#include <stdio.h>
#define JEMALLOC_NO_DEMANGLE
#include <jemalloc/jemalloc.h>
using
std
::
vector
;
...
...
deps/jemalloc/scripts/check-formatting.sh
0 → 100755
View file @
d4439bd4
#!/bin/bash
# The files that need to be properly formatted. We'll grow this incrementally
# until it includes all the jemalloc source files (as we convert things over),
# and then just replace it with
# find -name '*.c' -o -name '*.h' -o -name '*.cpp
FILES
=(
)
if
command
-v
clang-format &> /dev/null
;
then
CLANG_FORMAT
=
"clang-format"
elif
command
-v
clang-format-8 &> /dev/null
;
then
CLANG_FORMAT
=
"clang-format-8"
else
echo
"Couldn't find clang-format."
fi
if
!
$CLANG_FORMAT
-version
|
grep
"version 8
\.
"
&> /dev/null
;
then
echo
"clang-format is the wrong version."
exit
1
fi
for
file
in
${
FILES
[@]
}
;
do
if
!
cmp
--silent
$file
<
(
$CLANG_FORMAT
$file
)
&> /dev/null
;
then
echo
"Error:
$file
is not clang-formatted"
exit
1
fi
done
deps/jemalloc/scripts/freebsd/before_install.sh
0 → 100644
View file @
d4439bd4
#!/bin/tcsh
su
-m
root
-c
'pkg install -y git'
deps/jemalloc/scripts/freebsd/before_script.sh
0 → 100644
View file @
d4439bd4
#!/bin/tcsh
autoconf
# We don't perfectly track freebsd stdlib.h definitions. This is fine when
# we count as a system header, but breaks otherwise, like during these
# tests.
./configure
--with-jemalloc-prefix
=
ci_
${
COMPILER_FLAGS
:+
CC=
"
$CC
$COMPILER_FLAGS
"
CXX=
"
$CXX
$COMPILER_FLAGS
"
}
$CONFIGURE_FLAGS
JE_NCPUS
=
`
sysctl
-n
kern.smp.cpus
`
gmake
-j
${
JE_NCPUS
}
gmake
-j
${
JE_NCPUS
}
tests
deps/jemalloc/scripts/freebsd/script.sh
0 → 100644
View file @
d4439bd4
#!/bin/tcsh
gmake check
deps/jemalloc/scripts/gen_run_tests.py
View file @
d4439bd4
#!/usr/bin/env python
#!/usr/bin/env python
3
import
sys
from
itertools
import
combinations
...
...
@@ -14,14 +14,14 @@ nparallel = cpu_count() * 2
uname
=
uname
()[
0
]
if
"BSD"
in
uname
:
if
call
(
"command -v gmake"
,
shell
=
True
)
==
0
:
make_cmd
=
'gmake'
else
:
make_cmd
=
'make'
def
powerset
(
items
):
result
=
[]
for
i
in
x
range
(
len
(
items
)
+
1
):
for
i
in
range
(
len
(
items
)
+
1
):
result
+=
combinations
(
items
,
i
)
return
result
...
...
@@ -41,6 +41,7 @@ possible_config_opts = [
'--enable-prof'
,
'--disable-stats'
,
'--enable-opt-safety-checks'
,
'--with-lg-page=16'
,
]
if
bits_64
:
possible_config_opts
.
append
(
'--with-lg-vaddr=56'
)
...
...
@@ -52,19 +53,20 @@ possible_malloc_conf_opts = [
'background_thread:true'
,
]
print
'set -e'
print
'if [ -f Makefile ] ; then %(make_cmd)s relclean ; fi'
%
{
'make_cmd'
:
make_cmd
}
print
'autoconf'
print
'rm -rf run_tests.out'
print
'mkdir run_tests.out'
print
'cd run_tests.out'
print
(
'set -e'
)
print
(
'if [ -f Makefile ] ; then %(make_cmd)s relclean ; fi'
%
{
'make_cmd'
:
make_cmd
})
print
(
'autoconf'
)
print
(
'rm -rf run_tests.out'
)
print
(
'mkdir run_tests.out'
)
print
(
'cd run_tests.out'
)
ind
=
0
for
cc
,
cxx
in
possible_compilers
:
for
compiler_opts
in
powerset
(
possible_compiler_opts
):
for
config_opts
in
powerset
(
possible_config_opts
):
for
malloc_conf_opts
in
powerset
(
possible_malloc_conf_opts
):
if
cc
is
'clang'
\
if
cc
==
'clang'
\
and
'-m32'
in
possible_compiler_opts
\
and
'--enable-prof'
in
config_opts
:
continue
...
...
@@ -92,7 +94,7 @@ for cc, cxx in possible_compilers:
if
(
uname
==
'Linux'
and
linux_supported
)
\
or
(
not
linux_supported
and
(
uname
!=
'Darwin'
or
\
not
darwin_unsupported
)):
print
"""cat <<EOF > run_test_%(ind)d.sh
print
(
"""cat <<EOF > run_test_%(ind)d.sh
#!/bin/sh
set -e
...
...
@@ -120,7 +122,9 @@ run_cmd %(make_cmd)s all tests
run_cmd %(make_cmd)s check
run_cmd %(make_cmd)s distclean
EOF
chmod 755 run_test_%(ind)d.sh"""
%
{
'ind'
:
ind
,
'config_line'
:
config_line
,
'make_cmd'
:
make_cmd
}
chmod 755 run_test_%(ind)d.sh"""
%
{
'ind'
:
ind
,
'config_line'
:
config_line
,
'make_cmd'
:
make_cmd
})
ind
+=
1
print
'for i in `seq 0 %(last_ind)d` ; do echo run_test_${i}.sh ; done | xargs -P %(nparallel)d -n 1 sh'
%
{
'last_ind'
:
ind
-
1
,
'nparallel'
:
nparallel
}
print
(
'for i in `seq 0 %(last_ind)d` ; do echo run_test_${i}.sh ; done | xargs'
' -P %(nparallel)d -n 1 sh'
%
{
'last_ind'
:
ind
-
1
,
'nparallel'
:
nparallel
})
deps/jemalloc/scripts/gen_travis.py
View file @
d4439bd4
#!/usr/bin/env python
#!/usr/bin/env python
3
from
itertools
import
combinations
from
itertools
import
combinations
,
chain
from
enum
import
Enum
,
auto
travis_template
=
"""
\
language: generic
dist: precise
matrix:
LINUX
=
'linux'
OSX
=
'osx'
WINDOWS
=
'windows'
FREEBSD
=
'freebsd'
AMD64
=
'amd64'
ARM64
=
'arm64'
PPC64LE
=
'ppc64le'
TRAVIS_TEMPLATE
=
"""
\
# This config file is generated by ./scripts/gen_travis.py.
# Do not edit by hand.
# We use 'minimal', because 'generic' makes Windows VMs hang at startup. Also
# the software provided by 'generic' is simply not needed for our tests.
# Differences are explained here:
# https://docs.travis-ci.com/user/languages/minimal-and-generic/
language: minimal
dist: focal
jobs:
include:
%s
{jobs}
before_install:
- |-
if test -f "./scripts/$TRAVIS_OS_NAME/before_install.sh"; then
source ./scripts/$TRAVIS_OS_NAME/before_install.sh
fi
before_script:
- autoconf
- scripts/gen_travis.py > travis_script && diff .travis.yml travis_script
- ./configure ${COMPILER_FLAGS:+
\
CC="$CC $COMPILER_FLAGS"
\
CXX="$CXX $COMPILER_FLAGS" }
\
$CONFIGURE_FLAGS
- make -j3
- make -j3 tests
- |-
if test -f "./scripts/$TRAVIS_OS_NAME/before_script.sh"; then
source ./scripts/$TRAVIS_OS_NAME/before_script.sh
else
scripts/gen_travis.py > travis_script && diff .travis.yml travis_script
autoconf
# If COMPILER_FLAGS are not empty, add them to CC and CXX
./configure ${{COMPILER_FLAGS:+ CC="$CC $COMPILER_FLAGS"
\
CXX="$CXX $COMPILER_FLAGS"}} $CONFIGURE_FLAGS
make -j3
make -j3 tests
fi
script:
- make check
- |-
if test -f "./scripts/$TRAVIS_OS_NAME/script.sh"; then
source ./scripts/$TRAVIS_OS_NAME/script.sh
else
make check
fi
"""
class
Option
(
object
):
class
Type
:
COMPILER
=
auto
()
COMPILER_FLAG
=
auto
()
CONFIGURE_FLAG
=
auto
()
MALLOC_CONF
=
auto
()
FEATURE
=
auto
()
def
__init__
(
self
,
type
,
value
):
self
.
type
=
type
self
.
value
=
value
@
staticmethod
def
as_compiler
(
value
):
return
Option
(
Option
.
Type
.
COMPILER
,
value
)
@
staticmethod
def
as_compiler_flag
(
value
):
return
Option
(
Option
.
Type
.
COMPILER_FLAG
,
value
)
@
staticmethod
def
as_configure_flag
(
value
):
return
Option
(
Option
.
Type
.
CONFIGURE_FLAG
,
value
)
@
staticmethod
def
as_malloc_conf
(
value
):
return
Option
(
Option
.
Type
.
MALLOC_CONF
,
value
)
@
staticmethod
def
as_feature
(
value
):
return
Option
(
Option
.
Type
.
FEATURE
,
value
)
def
__eq__
(
self
,
obj
):
return
(
isinstance
(
obj
,
Option
)
and
obj
.
type
==
self
.
type
and
obj
.
value
==
self
.
value
)
# The 'default' configuration is gcc, on linux, with no compiler or configure
# flags. We also test with clang, -m32, --enable-debug, --enable-prof,
# --disable-stats, and --with-malloc-conf=tcache:false. To avoid abusing
# travis though, we don't test all 2**7 = 128 possible combinations of these;
# instead, we only test combinations of up to 2 'unusual' settings, under the
# hope that bugs involving interactions of such settings are rare.
# Things at once, for C(7, 0) + C(7, 1) + C(7, 2) = 29
MAX_UNUSUAL_OPTIONS
=
2
os_default
=
'linux'
os_unusual
=
'osx'
compilers_default
=
'CC=gcc CXX=g++'
compilers_unusual
=
'CC=clang CXX=clang++'
GCC
=
Option
.
as_compiler
(
'CC=gcc CXX=g++'
)
CLANG
=
Option
.
as_compiler
(
'CC=clang CXX=clang++'
)
CL
=
Option
.
as_compiler
(
'CC=cl.exe CXX=cl.exe'
)
compilers_unusual
=
[
CLANG
,]
compiler_flag_unusuals
=
[
'-m32'
]
configure_flag_unusuals
=
[
CROSS_COMPILE_32BIT
=
Option
.
as_feature
(
'CROSS_COMPILE_32BIT'
)
feature_unusuals
=
[
CROSS_COMPILE_32BIT
]
configure_flag_unusuals
=
[
Option
.
as_configure_flag
(
opt
)
for
opt
in
(
'--enable-debug'
,
'--enable-prof'
,
'--disable-stats'
,
'--disable-libdl'
,
'--enable-opt-safety-checks'
,
]
'--with-lg-page=16'
,
)]
malloc_conf_unusuals
=
[
malloc_conf_unusuals
=
[
Option
.
as_malloc_conf
(
opt
)
for
opt
in
(
'tcache:false'
,
'dss:primary'
,
'percpu_arena:percpu'
,
'background_thread:true'
,
]
)
]
all_unusuals
=
(
[
os_unusual
]
+
[
compilers_unusual
]
+
compiler_flag_unusuals
+
configure_flag_unusuals
+
malloc_conf_unusuals
)
unusual_combinations_to_test
=
[]
for
i
in
xrange
(
MAX_UNUSUAL_OPTIONS
+
1
):
unusual_combinations_to_test
+=
combinations
(
all_unusuals
,
i
)
all_unusuals
=
(
compilers_unusual
+
feature_unusuals
+
configure_flag_unusuals
+
malloc_conf_unusuals
)
gcc_multilib_set
=
False
# Formats a job from a combination of flags
def
format_job
(
combination
):
global
gcc_multilib_set
os
=
os_unusual
if
os_unusual
in
combination
else
os_default
compilers
=
compilers_unusual
if
compilers_unusual
in
combination
else
compilers_default
def
get_extra_cflags
(
os
,
compiler
):
if
os
==
FREEBSD
:
return
[]
compiler_flags
=
[
x
for
x
in
combination
if
x
in
compiler_flag_unusuals
]
configure_flags
=
[
x
for
x
in
combination
if
x
in
configure_flag_unusuals
]
malloc_conf
=
[
x
for
x
in
combination
if
x
in
malloc_conf_unusuals
]
if
os
==
WINDOWS
:
# For non-CL compilers under Windows (for now it's only MinGW-GCC),
# -fcommon needs to be specified to correctly handle multiple
# 'malloc_conf' symbols and such, which are declared weak under Linux.
# Weak symbols don't work with MinGW-GCC.
if
compiler
!=
CL
.
value
:
return
[
'-fcommon'
]
else
:
return
[]
# Filter out unsupported configurations on OS X.
if
os
==
'osx'
and
(
'dss:primary'
in
malloc_conf
or
\
'percpu_arena:percpu'
in
malloc_conf
or
'background_thread:true'
\
in
malloc_conf
):
return
""
if
len
(
malloc_conf
)
>
0
:
configure_flags
.
append
(
'--with-malloc-conf='
+
","
.
join
(
malloc_conf
))
# We get some spurious errors when -Warray-bounds is enabled.
extra_cflags
=
[
'-Werror'
,
'-Wno-array-bounds'
]
if
compiler
==
CLANG
.
value
or
os
==
OSX
:
extra_cflags
+=
[
'-Wno-unknown-warning-option'
,
'-Wno-ignored-attributes'
]
if
os
==
OSX
:
extra_cflags
+=
[
'-Wno-deprecated-declarations'
,
]
return
extra_cflags
# Filter out an unsupported configuration - heap profiling on OS X.
if
os
==
'osx'
and
'--enable-prof'
in
configure_flags
:
return
""
# We get some spurious errors when -Warray-bounds is enabled.
env_string
=
(
'{} COMPILER_FLAGS="{}" CONFIGURE_FLAGS="{}" '
'EXTRA_CFLAGS="-Werror -Wno-array-bounds"'
).
format
(
compilers
,
" "
.
join
(
compiler_flags
),
" "
.
join
(
configure_flags
))
job
=
""
job
+=
' - os: %s
\n
'
%
os
job
+=
' env: %s
\n
'
%
env_string
if
'-m32'
in
combination
and
os
==
'linux'
:
job
+=
' addons:'
if
gcc_multilib_set
:
job
+=
' *gcc_multilib
\n
'
# Formats a job from a combination of flags
def
format_job
(
os
,
arch
,
combination
):
compilers
=
[
x
.
value
for
x
in
combination
if
x
.
type
==
Option
.
Type
.
COMPILER
]
assert
(
len
(
compilers
)
<=
1
)
compiler_flags
=
[
x
.
value
for
x
in
combination
if
x
.
type
==
Option
.
Type
.
COMPILER_FLAG
]
configure_flags
=
[
x
.
value
for
x
in
combination
if
x
.
type
==
Option
.
Type
.
CONFIGURE_FLAG
]
malloc_conf
=
[
x
.
value
for
x
in
combination
if
x
.
type
==
Option
.
Type
.
MALLOC_CONF
]
features
=
[
x
.
value
for
x
in
combination
if
x
.
type
==
Option
.
Type
.
FEATURE
]
if
len
(
malloc_conf
)
>
0
:
configure_flags
.
append
(
'--with-malloc-conf='
+
','
.
join
(
malloc_conf
))
if
not
compilers
:
compiler
=
GCC
.
value
else
:
job
+=
' &gcc_multilib
\n
'
job
+=
' apt:
\n
'
job
+=
' packages:
\n
'
job
+=
' - gcc-multilib
\n
'
gcc_multilib_set
=
True
compiler
=
compilers
[
0
]
extra_environment_vars
=
''
cross_compile
=
CROSS_COMPILE_32BIT
.
value
in
features
if
os
==
LINUX
and
cross_compile
:
compiler_flags
.
append
(
'-m32'
)
features_str
=
' '
.
join
([
' {}=yes'
.
format
(
feature
)
for
feature
in
features
])
stringify
=
lambda
arr
,
name
:
' {}="{}"'
.
format
(
name
,
' '
.
join
(
arr
))
if
arr
else
''
env_string
=
'{}{}{}{}{}{}'
.
format
(
compiler
,
features_str
,
stringify
(
compiler_flags
,
'COMPILER_FLAGS'
),
stringify
(
configure_flags
,
'CONFIGURE_FLAGS'
),
stringify
(
get_extra_cflags
(
os
,
compiler
),
'EXTRA_CFLAGS'
),
extra_environment_vars
)
job
=
' - os: {}
\n
'
.
format
(
os
)
job
+=
' arch: {}
\n
'
.
format
(
arch
)
job
+=
' env: {}'
.
format
(
env_string
)
return
job
include_rows
=
""
for
combination
in
unusual_combinations_to_test
:
include_rows
+=
format_job
(
combination
)
# Development build
include_rows
+=
'''
\
def
generate_unusual_combinations
(
unusuals
,
max_unusual_opts
):
"""
Generates different combinations of non-standard compilers, compiler flags,
configure flags and malloc_conf settings.
@param max_unusual_opts: Limit of unusual options per combination.
"""
return
chain
.
from_iterable
(
[
combinations
(
unusuals
,
i
)
for
i
in
range
(
max_unusual_opts
+
1
)])
def
included
(
combination
,
exclude
):
"""
Checks if the combination of options should be included in the Travis
testing matrix.
@param exclude: A list of options to be avoided.
"""
return
not
any
(
excluded
in
combination
for
excluded
in
exclude
)
def
generate_jobs
(
os
,
arch
,
exclude
,
max_unusual_opts
,
unusuals
=
all_unusuals
):
jobs
=
[]
for
combination
in
generate_unusual_combinations
(
unusuals
,
max_unusual_opts
):
if
included
(
combination
,
exclude
):
jobs
.
append
(
format_job
(
os
,
arch
,
combination
))
return
'
\n
'
.
join
(
jobs
)
def
generate_linux
(
arch
):
os
=
LINUX
# Only generate 2 unusual options for AMD64 to reduce matrix size
max_unusual_opts
=
MAX_UNUSUAL_OPTIONS
if
arch
==
AMD64
else
1
exclude
=
[]
if
arch
==
PPC64LE
:
# Avoid 32 bit builds and clang on PowerPC
exclude
=
(
CROSS_COMPILE_32BIT
,
CLANG
,)
return
generate_jobs
(
os
,
arch
,
exclude
,
max_unusual_opts
)
def
generate_macos
(
arch
):
os
=
OSX
max_unusual_opts
=
1
exclude
=
([
Option
.
as_malloc_conf
(
opt
)
for
opt
in
(
'dss:primary'
,
'percpu_arena:percpu'
,
'background_thread:true'
)]
+
[
Option
.
as_configure_flag
(
'--enable-prof'
)]
+
[
CLANG
,])
return
generate_jobs
(
os
,
arch
,
exclude
,
max_unusual_opts
)
def
generate_windows
(
arch
):
os
=
WINDOWS
max_unusual_opts
=
3
unusuals
=
(
Option
.
as_configure_flag
(
'--enable-debug'
),
CL
,
CROSS_COMPILE_32BIT
,
)
return
generate_jobs
(
os
,
arch
,
(),
max_unusual_opts
,
unusuals
)
def
generate_freebsd
(
arch
):
os
=
FREEBSD
max_unusual_opts
=
4
unusuals
=
(
Option
.
as_configure_flag
(
'--enable-debug'
),
Option
.
as_configure_flag
(
'--enable-prof --enable-prof-libunwind'
),
Option
.
as_configure_flag
(
'--with-lg-page=16 --with-malloc-conf=tcache:false'
),
CROSS_COMPILE_32BIT
,
)
return
generate_jobs
(
os
,
arch
,
(),
max_unusual_opts
,
unusuals
)
def
get_manual_jobs
():
return
"""
\
# Development build
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --disable-cache-oblivious --enable-stats --enable-log --enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
'''
# Enable-expermental-smallocx
include_rows
+=
'''
\
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-debug
\
--disable-cache-oblivious --enable-stats --enable-log --enable-prof"
\
EXTRA_CFLAGS="-Werror -Wno-array-bounds"
# --enable-expermental-smallocx:
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --enable-experimental-smallocx --enable-stats --enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
'''
env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-debug
\
--enable-experimental-smallocx --enable-stats --enable-prof"
\
EXTRA_CFLAGS="-Werror -Wno-array-bounds"
"""
# Valgrind build bots
include_rows
+=
'''
# Valgrind
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds" JEMALLOC_TEST_PREFIX="valgrind"
addons:
apt:
packages:
- valgrind
'''
# To enable valgrind on macosx add:
#
#
- os: osx
# env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds" JEMALLOC_TEST_PREFIX="valgrind"
#
install: brew install valgrind
#
# It currently fails due to: https://github.com/jemalloc/jemalloc/issues/1274
print
travis_template
%
include_rows
def
main
():
jobs
=
'
\n
'
.
join
((
generate_windows
(
AMD64
),
generate_freebsd
(
AMD64
),
generate_linux
(
AMD64
),
generate_linux
(
PPC64LE
),
generate_macos
(
AMD64
),
get_manual_jobs
(),
))
print
(
TRAVIS_TEMPLATE
.
format
(
jobs
=
jobs
))
if
__name__
==
'__main__'
:
main
()
deps/jemalloc/scripts/linux/before_install.sh
0 → 100644
View file @
d4439bd4
#!/bin/bash
set
-ev
if
[[
"
$TRAVIS_OS_NAME
"
!=
"linux"
]]
;
then
echo
"Incorrect
\$
TRAVIS_OS_NAME: expected linux, got
$TRAVIS_OS_NAME
"
exit
1
fi
if
[[
"
$CROSS_COMPILE_32BIT
"
==
"yes"
]]
;
then
sudo
apt-get update
sudo
apt-get
-y
install
gcc-multilib g++-multilib
fi
deps/jemalloc/scripts/windows/before_install.sh
0 → 100644
View file @
d4439bd4
#!/bin/bash
set
-e
# The purpose of this script is to install build dependencies and set
# $build_env to a function that sets appropriate environment variables,
# to enable (mingw32|mingw64) environment if we want to compile with gcc, or
# (mingw32|mingw64) + vcvarsall.bat if we want to compile with cl.exe
if
[[
"
$TRAVIS_OS_NAME
"
!=
"windows"
]]
;
then
echo
"Incorrect
\$
TRAVIS_OS_NAME: expected windows, got
$TRAVIS_OS_NAME
"
exit
1
fi
[[
!
-f
C:/tools/msys64/msys2_shell.cmd
]]
&&
rm
-rf
C:/tools/msys64
choco uninstall
-y
mingw
choco upgrade
--no-progress
-y
msys2
msys_shell_cmd
=
"cmd //C RefreshEnv.cmd && set MSYS=winsymlinks:nativestrict && C:
\\
tools
\\
msys64
\\
msys2_shell.cmd"
msys2
()
{
$msys_shell_cmd
-defterm
-no-start
-msys2
-c
"
$*
"
;
}
mingw32
()
{
$msys_shell_cmd
-defterm
-no-start
-mingw32
-c
"
$*
"
;
}
mingw64
()
{
$msys_shell_cmd
-defterm
-no-start
-mingw64
-c
"
$*
"
;
}
if
[[
"
$CROSS_COMPILE_32BIT
"
==
"yes"
]]
;
then
mingw
=
mingw32
mingw_gcc_package_arch
=
i686
else
mingw
=
mingw64
mingw_gcc_package_arch
=
x86_64
fi
if
[[
"
$CC
"
==
*
"gcc"
*
]]
;
then
$mingw
pacman
-S
--noconfirm
--needed
\
autotools
\
git
\
mingw-w64-
${
mingw_gcc_package_arch
}
-make
\
mingw-w64-
${
mingw_gcc_package_arch
}
-gcc
\
mingw-w64-
${
mingw_gcc_package_arch
}
-binutils
build_env
=
$mingw
elif
[[
"
$CC
"
==
*
"cl"
*
]]
;
then
$mingw
pacman
-S
--noconfirm
--needed
\
autotools
\
git
\
mingw-w64-
${
mingw_gcc_package_arch
}
-make
\
mingw-w64-
${
mingw_gcc_package_arch
}
-binutils
# In order to use MSVC compiler (cl.exe), we need to correctly set some environment
# variables, namely PATH, INCLUDE, LIB and LIBPATH. The correct values of these
# variables are set by a batch script "vcvarsall.bat". The code below generates
# a batch script that calls "vcvarsall.bat" and prints the environment variables.
#
# Then, those environment variables are transformed from cmd to bash format and put
# into a script $apply_vsenv. If cl.exe needs to be used from bash, one can
# 'source $apply_vsenv' and it will apply the environment variables needed for cl.exe
# to be located and function correctly.
#
# At last, a function "mingw_with_msvc_vars" is generated which forwards user input
# into a correct mingw (32 or 64) subshell that automatically performs 'source $apply_vsenv',
# making it possible for autotools to discover and use cl.exe.
vcvarsall
=
"vcvarsall.tmp.bat"
echo
"@echo off"
>
$vcvarsall
echo
"call
\"
c:
\P
rogram Files (x86)
\M
icrosoft Visual Studio 14.0
\V
C
\\\v
cvarsall.bat
\"
$USE_MSVC
"
>>
$vcvarsall
echo
"set"
>>
$vcvarsall
apply_vsenv
=
"./apply_vsenv.sh"
cmd //C
$vcvarsall
|
grep
-E
"^PATH="
|
sed
-n
-e
's/\(.*\)=\(.*\)/export \1=$PATH:"\2"/g'
\
-e
's/\([a-zA-Z]\):[\\\/]/\/\1\//g'
\
-e
's/\\/\//g'
\
-e
's/;\//:\//gp'
>
$apply_vsenv
cmd //C
$vcvarsall
|
grep
-E
"^(INCLUDE|LIB|LIBPATH)="
|
sed
-n
-e
's/\(.*\)=\(.*\)/export \1="\2"/gp'
>>
$apply_vsenv
cat
$apply_vsenv
mingw_with_msvc_vars
()
{
$msys_shell_cmd
-defterm
-no-start
-
$mingw
-c
"source
$apply_vsenv
&& ""
$*
"
;
}
build_env
=
mingw_with_msvc_vars
rm
-f
$vcvarsall
else
echo
"Unknown C compiler:
$CC
"
exit
1
fi
echo
"Build environment function:
$build_env
"
deps/jemalloc/scripts/windows/before_script.sh
0 → 100644
View file @
d4439bd4
#!/bin/bash
set
-e
if
[[
"
$TRAVIS_OS_NAME
"
!=
"windows"
]]
;
then
echo
"Incorrect
\$
TRAVIS_OS_NAME: expected windows, got
$TRAVIS_OS_NAME
"
exit
1
fi
$build_env
autoconf
$build_env
./configure
$CONFIGURE_FLAGS
# mingw32-make simply means "make", unrelated to mingw32 vs mingw64.
# Simply disregard the prefix and treat is as "make".
$build_env
mingw32-make
-j3
# At the moment, it's impossible to make tests in parallel,
# seemingly due to concurrent writes to '.pdb' file. I don't know why
# that happens, because we explicitly supply '/Fs' to the compiler.
# Until we figure out how to fix it, we should build tests sequentially
# on Windows.
$build_env
mingw32-make tests
deps/jemalloc/scripts/windows/script.sh
0 → 100644
View file @
d4439bd4
#!/bin/bash
set
-e
if
[[
"
$TRAVIS_OS_NAME
"
!=
"windows"
]]
;
then
echo
"Incorrect
\$
TRAVIS_OS_NAME: expected windows, got
$TRAVIS_OS_NAME
"
exit
1
fi
$build_env
mingw32-make
-k
check
deps/jemalloc/src/arena.c
View file @
d4439bd4
#define JEMALLOC_ARENA_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/div.h"
#include "jemalloc/internal/decay.h"
#include "jemalloc/internal/ehooks.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/extent_mmap.h"
#include "jemalloc/internal/san.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/safety_check.h"
...
...
@@ -35,34 +36,37 @@ ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT;
static
atomic_zd_t
dirty_decay_ms_default
;
static
atomic_zd_t
muzzy_decay_ms_default
;
const
uint64_t
h_steps
[
SMOOTHSTEP_NSTEPS
]
=
{
#define STEP(step, h, x, y) \
h,
SMOOTHSTEP
#undef STEP
};
emap_t
arena_emap_global
;
pa_central_t
arena_pa_central_global
;
static
div_info_t
arena_binind_div_info
[
SC_NBINS
];
div_info_t
arena_binind_div_info
[
SC_NBINS
];
size_t
opt_oversize_threshold
=
OVERSIZE_THRESHOLD_DEFAULT
;
size_t
oversize_threshold
=
OVERSIZE_THRESHOLD_DEFAULT
;
uint32_t
arena_bin_offsets
[
SC_NBINS
];
static
unsigned
nbins_total
;
static
unsigned
huge_arena_ind
;
const
arena_config_t
arena_config_default
=
{
/* .extent_hooks = */
(
extent_hooks_t
*
)
&
ehooks_default_extent_hooks
,
/* .metadata_use_hooks = */
true
,
};
/******************************************************************************/
/*
* Function prototypes for static functions that are referenced prior to
* definition.
*/
static
void
arena_decay_to_limit
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_decay_t
*
decay
,
extents_t
*
extents
,
bool
all
,
size_t
npages_limit
,
size_t
npages_decay_max
,
bool
is_background_thread
);
static
bool
arena_decay_dirty
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bool
is_background_thread
,
bool
all
);
static
void
arena_dalloc_bin_slab
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_t
*
slab
,
bin_t
*
bin
);
static
void
arena_bin_lower_slab
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_t
*
slab
,
static
void
arena_bin_lower_slab
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
edata_t
*
slab
,
bin_t
*
bin
);
static
void
arena_maybe_do_deferred_work
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
decay_t
*
decay
,
size_t
npages_new
);
/******************************************************************************/
...
...
@@ -72,19 +76,17 @@ arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
size_t
*
nactive
,
size_t
*
ndirty
,
size_t
*
nmuzzy
)
{
*
nthreads
+=
arena_nthreads_get
(
arena
,
false
);
*
dss
=
dss_prec_names
[
arena_dss_prec_get
(
arena
)];
*
dirty_decay_ms
=
arena_dirty_decay_ms_get
(
arena
);
*
muzzy_decay_ms
=
arena_muzzy_decay_ms_get
(
arena
);
*
nactive
+=
atomic_load_zu
(
&
arena
->
nactive
,
ATOMIC_RELAXED
);
*
ndirty
+=
extents_npages_get
(
&
arena
->
extents_dirty
);
*
nmuzzy
+=
extents_npages_get
(
&
arena
->
extents_muzzy
);
*
dirty_decay_ms
=
arena_decay_ms_get
(
arena
,
extent_state_dirty
);
*
muzzy_decay_ms
=
arena_decay_ms_get
(
arena
,
extent_state_muzzy
);
pa_shard_basic_stats_merge
(
&
arena
->
pa_shard
,
nactive
,
ndirty
,
nmuzzy
);
}
void
arena_stats_merge
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
unsigned
*
nthreads
,
const
char
**
dss
,
ssize_t
*
dirty_decay_ms
,
ssize_t
*
muzzy_decay_ms
,
size_t
*
nactive
,
size_t
*
ndirty
,
size_t
*
nmuzzy
,
arena_stats_t
*
astats
,
bin_stats_t
*
bstats
,
arena_stats_large_t
*
lstats
,
arena_stats_exten
ts_t
*
e
stats
)
{
bin_stats_
data_
t
*
bstats
,
arena_stats_large_t
*
lstats
,
pac_estats_t
*
estats
,
hpa_shard_stats_t
*
hpastats
,
sec_sta
ts_t
*
sec
stats
)
{
cassert
(
config_stats
);
arena_basic_stats_merge
(
tsdn
,
arena
,
nthreads
,
dss
,
dirty_decay_ms
,
...
...
@@ -93,122 +95,74 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
size_t
base_allocated
,
base_resident
,
base_mapped
,
metadata_thp
;
base_stats_get
(
tsdn
,
arena
->
base
,
&
base_allocated
,
&
base_resident
,
&
base_mapped
,
&
metadata_thp
);
size_t
pac_mapped_sz
=
pac_mapped
(
&
arena
->
pa_shard
.
pac
);
astats
->
mapped
+=
base_mapped
+
pac_mapped_sz
;
astats
->
resident
+=
base_resident
;
arena_stats_lock
(
tsdn
,
&
arena
->
stats
);
arena_stats_accum_zu
(
&
astats
->
mapped
,
base_mapped
+
arena_stats_read_zu
(
tsdn
,
&
arena
->
stats
,
&
arena
->
stats
.
mapped
));
arena_stats_accum_zu
(
&
astats
->
retained
,
extents_npages_get
(
&
arena
->
extents_retained
)
<<
LG_PAGE
);
atomic_store_zu
(
&
astats
->
extent_avail
,
atomic_load_zu
(
&
arena
->
extent_avail_cnt
,
ATOMIC_RELAXED
),
ATOMIC_RELAXED
);
LOCKEDINT_MTX_LOCK
(
tsdn
,
arena
->
stats
.
mtx
);
arena_stats_accum_u64
(
&
astats
->
decay_dirty
.
npurge
,
arena_stats_read_u64
(
tsdn
,
&
arena
->
stats
,
&
arena
->
stats
.
decay_dirty
.
npurge
));
arena_stats_accum_u64
(
&
astats
->
decay_dirty
.
nmadvise
,
arena_stats_read_u64
(
tsdn
,
&
arena
->
stats
,
&
arena
->
stats
.
decay_dirty
.
nmadvise
));
arena_stats_accum_u64
(
&
astats
->
decay_dirty
.
purged
,
arena_stats_read_u64
(
tsdn
,
&
arena
->
stats
,
&
arena
->
stats
.
decay_dirty
.
purged
));
arena_stats_accum_u64
(
&
astats
->
decay_muzzy
.
npurge
,
arena_stats_read_u64
(
tsdn
,
&
arena
->
stats
,
&
arena
->
stats
.
decay_muzzy
.
npurge
));
arena_stats_accum_u64
(
&
astats
->
decay_muzzy
.
nmadvise
,
arena_stats_read_u64
(
tsdn
,
&
arena
->
stats
,
&
arena
->
stats
.
decay_muzzy
.
nmadvise
));
arena_stats_accum_u64
(
&
astats
->
decay_muzzy
.
purged
,
arena_stats_read_u64
(
tsdn
,
&
arena
->
stats
,
&
arena
->
stats
.
decay_muzzy
.
purged
));
arena_stats_accum_zu
(
&
astats
->
base
,
base_allocated
);
arena_stats_accum_zu
(
&
astats
->
internal
,
arena_internal_get
(
arena
));
arena_stats_accum_zu
(
&
astats
->
metadata_thp
,
metadata_thp
);
arena_stats_accum_zu
(
&
astats
->
resident
,
base_resident
+
(((
atomic_load_zu
(
&
arena
->
nactive
,
ATOMIC_RELAXED
)
+
extents_npages_get
(
&
arena
->
extents_dirty
)
+
extents_npages_get
(
&
arena
->
extents_muzzy
))
<<
LG_PAGE
)));
arena_stats_accum_zu
(
&
astats
->
abandoned_vm
,
atomic_load_zu
(
&
arena
->
stats
.
abandoned_vm
,
ATOMIC_RELAXED
));
astats
->
base
+=
base_allocated
;
atomic_load_add_store_zu
(
&
astats
->
internal
,
arena_internal_get
(
arena
));
astats
->
metadata_thp
+=
metadata_thp
;
for
(
szind_t
i
=
0
;
i
<
SC_NSIZES
-
SC_NBINS
;
i
++
)
{
uint64_t
nmalloc
=
arena_stats_read_u64
(
tsdn
,
&
arena
->
stats
,
uint64_t
nmalloc
=
locked_read_u64
(
tsdn
,
LOCKEDINT_MTX
(
arena
->
stats
.
mtx
),
&
arena
->
stats
.
lstats
[
i
].
nmalloc
);
arena_stats_accum_u64
(
&
lstats
[
i
].
nmalloc
,
nmalloc
);
arena_stats_accum_u64
(
&
astats
->
nmalloc_large
,
nmalloc
)
;
locked_inc_u64_unsynchronized
(
&
lstats
[
i
].
nmalloc
,
nmalloc
);
astats
->
nmalloc_large
+=
nmalloc
;
uint64_t
ndalloc
=
arena_stats_read_u64
(
tsdn
,
&
arena
->
stats
,
uint64_t
ndalloc
=
locked_read_u64
(
tsdn
,
LOCKEDINT_MTX
(
arena
->
stats
.
mtx
),
&
arena
->
stats
.
lstats
[
i
].
ndalloc
);
arena_stats_accum_u64
(
&
lstats
[
i
].
ndalloc
,
ndalloc
);
arena_stats_accum_u64
(
&
astats
->
ndalloc_large
,
ndalloc
)
;
locked_inc_u64_unsynchronized
(
&
lstats
[
i
].
ndalloc
,
ndalloc
);
astats
->
ndalloc_large
+=
ndalloc
;
uint64_t
nrequests
=
arena_stats_read_u64
(
tsdn
,
&
arena
->
stats
,
uint64_t
nrequests
=
locked_read_u64
(
tsdn
,
LOCKEDINT_MTX
(
arena
->
stats
.
mtx
),
&
arena
->
stats
.
lstats
[
i
].
nrequests
);
arena_stats_accum_u64
(
&
lstats
[
i
].
nrequests
,
nmalloc
+
nrequests
);
arena_stats_accum_u64
(
&
astats
->
nrequests_large
,
locked_inc_u64_unsynchronized
(
&
lstats
[
i
].
nrequests
,
nmalloc
+
nrequests
);
astats
->
nrequests_large
+=
nmalloc
+
nrequests
;
/* nfill == nmalloc for large currently. */
arena_stats_accum_u64
(
&
lstats
[
i
].
nfills
,
nmalloc
);
arena_stats_accum_u64
(
&
astats
->
nfills_large
,
nmalloc
)
;
locked_inc_u64_unsynchronized
(
&
lstats
[
i
].
nfills
,
nmalloc
);
astats
->
nfills_large
+=
nmalloc
;
uint64_t
nflush
=
arena_stats_read_u64
(
tsdn
,
&
arena
->
stats
,
uint64_t
nflush
=
locked_read_u64
(
tsdn
,
LOCKEDINT_MTX
(
arena
->
stats
.
mtx
),
&
arena
->
stats
.
lstats
[
i
].
nflushes
);
arena_stats_accum_u64
(
&
lstats
[
i
].
nflushes
,
nflush
);
arena_stats_accum_u64
(
&
astats
->
nflushes_large
,
nflush
)
;
locked_inc_u64_unsynchronized
(
&
lstats
[
i
].
nflushes
,
nflush
);
astats
->
nflushes_large
+=
nflush
;
assert
(
nmalloc
>=
ndalloc
);
assert
(
nmalloc
-
ndalloc
<=
SIZE_T_MAX
);
size_t
curlextents
=
(
size_t
)(
nmalloc
-
ndalloc
);
lstats
[
i
].
curlextents
+=
curlextents
;
arena_stats_accum_zu
(
&
astats
->
allocated_large
,
curlextents
*
sz_index2size
(
SC_NBINS
+
i
));
}
for
(
pszind_t
i
=
0
;
i
<
SC_NPSIZES
;
i
++
)
{
size_t
dirty
,
muzzy
,
retained
,
dirty_bytes
,
muzzy_bytes
,
retained_bytes
;
dirty
=
extents_nextents_get
(
&
arena
->
extents_dirty
,
i
);
muzzy
=
extents_nextents_get
(
&
arena
->
extents_muzzy
,
i
);
retained
=
extents_nextents_get
(
&
arena
->
extents_retained
,
i
);
dirty_bytes
=
extents_nbytes_get
(
&
arena
->
extents_dirty
,
i
);
muzzy_bytes
=
extents_nbytes_get
(
&
arena
->
extents_muzzy
,
i
);
retained_bytes
=
extents_nbytes_get
(
&
arena
->
extents_retained
,
i
);
atomic_store_zu
(
&
estats
[
i
].
ndirty
,
dirty
,
ATOMIC_RELAXED
);
atomic_store_zu
(
&
estats
[
i
].
nmuzzy
,
muzzy
,
ATOMIC_RELAXED
);
atomic_store_zu
(
&
estats
[
i
].
nretained
,
retained
,
ATOMIC_RELAXED
);
atomic_store_zu
(
&
estats
[
i
].
dirty_bytes
,
dirty_bytes
,
ATOMIC_RELAXED
);
atomic_store_zu
(
&
estats
[
i
].
muzzy_bytes
,
muzzy_bytes
,
ATOMIC_RELAXED
);
atomic_store_zu
(
&
estats
[
i
].
retained_bytes
,
retained_bytes
,
ATOMIC_RELAXED
);
astats
->
allocated_large
+=
curlextents
*
sz_index2size
(
SC_NBINS
+
i
);
}
arena_stats_unlock
(
tsdn
,
&
arena
->
stats
);
pa_shard_stats_merge
(
tsdn
,
&
arena
->
pa_shard
,
&
astats
->
pa_shard_stats
,
estats
,
hpastats
,
secstats
,
&
astats
->
resident
);
LOCKEDINT_MTX_UNLOCK
(
tsdn
,
arena
->
stats
.
mtx
);
/* tcache_bytes counts currently cached bytes. */
atomic_store_zu
(
&
astats
->
tcache_bytes
,
0
,
ATOMIC_RELAXED
);
/* Currently cached bytes and sanitizer-stashed bytes in tcache. */
astats
->
tcache_bytes
=
0
;
astats
->
tcache_stashed_bytes
=
0
;
malloc_mutex_lock
(
tsdn
,
&
arena
->
tcache_ql_mtx
);
cache_bin_array_descriptor_t
*
descriptor
;
ql_foreach
(
descriptor
,
&
arena
->
cache_bin_array_descriptor_ql
,
link
)
{
szind_t
i
=
0
;
for
(;
i
<
SC_NBINS
;
i
++
)
{
cache_bin_t
*
tbin
=
&
descriptor
->
bins_small
[
i
];
arena_stats_accum_zu
(
&
astats
->
tcache_bytes
,
tbin
->
ncached
*
sz_index2size
(
i
));
}
for
(;
i
<
nhbins
;
i
++
)
{
cache_bin_t
*
tbin
=
&
descriptor
->
bins_large
[
i
];
arena_stats_accum_zu
(
&
astats
->
tcache_bytes
,
tbin
->
ncached
*
sz_index2size
(
i
));
for
(
szind_t
i
=
0
;
i
<
nhbins
;
i
++
)
{
cache_bin_t
*
cache_bin
=
&
descriptor
->
bins
[
i
];
cache_bin_sz_t
ncached
,
nstashed
;
cache_bin_nitems_get_remote
(
cache_bin
,
&
tcache_bin_info
[
i
],
&
ncached
,
&
nstashed
);
astats
->
tcache_bytes
+=
ncached
*
sz_index2size
(
i
);
astats
->
tcache_stashed_bytes
+=
nstashed
*
sz_index2size
(
i
);
}
}
malloc_mutex_prof_read
(
tsdn
,
...
...
@@ -224,21 +178,11 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
/* Gather per arena mutex profiling data. */
READ_ARENA_MUTEX_PROF_DATA
(
large_mtx
,
arena_prof_mutex_large
);
READ_ARENA_MUTEX_PROF_DATA
(
extent_avail_mtx
,
arena_prof_mutex_extent_avail
)
READ_ARENA_MUTEX_PROF_DATA
(
extents_dirty
.
mtx
,
arena_prof_mutex_extents_dirty
)
READ_ARENA_MUTEX_PROF_DATA
(
extents_muzzy
.
mtx
,
arena_prof_mutex_extents_muzzy
)
READ_ARENA_MUTEX_PROF_DATA
(
extents_retained
.
mtx
,
arena_prof_mutex_extents_retained
)
READ_ARENA_MUTEX_PROF_DATA
(
decay_dirty
.
mtx
,
arena_prof_mutex_decay_dirty
)
READ_ARENA_MUTEX_PROF_DATA
(
decay_muzzy
.
mtx
,
arena_prof_mutex_decay_muzzy
)
READ_ARENA_MUTEX_PROF_DATA
(
base
->
mtx
,
arena_prof_mutex_base
)
arena_prof_mutex_base
)
;
#undef READ_ARENA_MUTEX_PROF_DATA
pa_shard_mtx_stats_read
(
tsdn
,
&
arena
->
pa_shard
,
astats
->
mutex_prof_data
);
nstime_copy
(
&
astats
->
uptime
,
&
arena
->
create_time
);
nstime_update
(
&
astats
->
uptime
);
...
...
@@ -247,55 +191,67 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
for
(
szind_t
i
=
0
;
i
<
SC_NBINS
;
i
++
)
{
for
(
unsigned
j
=
0
;
j
<
bin_infos
[
i
].
n_shards
;
j
++
)
{
bin_stats_merge
(
tsdn
,
&
bstats
[
i
],
&
arena
->
bins
[
i
].
bin_shards
[
j
]
);
arena
_get_bin
(
arena
,
i
,
j
)
);
}
}
}
void
arena_extents_dirty_dalloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
)
{
static
void
arena_background_thread_inactivity_check
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bool
is_background_thread
)
{
if
(
!
background_thread_enabled
()
||
is_background_thread
)
{
return
;
}
background_thread_info_t
*
info
=
arena_background_thread_info_get
(
arena
);
if
(
background_thread_indefinite_sleep
(
info
))
{
arena_maybe_do_deferred_work
(
tsdn
,
arena
,
&
arena
->
pa_shard
.
pac
.
decay_dirty
,
0
);
}
}
/*
* React to deferred work generated by a PAI function.
*/
void
arena_handle_deferred_work
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
0
);
extents_dalloc
(
tsdn
,
arena
,
r_extent_hooks
,
&
arena
->
extents_dirty
,
extent
);
if
(
arena_dirty_decay_ms_get
(
arena
)
==
0
)
{
if
(
decay_immediately
(
&
arena
->
pa_shard
.
pac
.
decay_dirty
))
{
arena_decay_dirty
(
tsdn
,
arena
,
false
,
true
);
}
else
{
arena_background_thread_inactivity_check
(
tsdn
,
arena
,
false
);
}
arena_background_thread_inactivity_check
(
tsdn
,
arena
,
false
);
}
static
void
*
arena_slab_reg_alloc
(
e
xtent
_t
*
slab
,
const
bin_info_t
*
bin_info
)
{
arena_slab_reg_alloc
(
e
data
_t
*
slab
,
const
bin_info_t
*
bin_info
)
{
void
*
ret
;
arena_
slab_data_t
*
slab_data
=
e
xtent
_slab_data_get
(
slab
);
slab_data_t
*
slab_data
=
e
data
_slab_data_get
(
slab
);
size_t
regind
;
assert
(
e
xtent
_nfree_get
(
slab
)
>
0
);
assert
(
e
data
_nfree_get
(
slab
)
>
0
);
assert
(
!
bitmap_full
(
slab_data
->
bitmap
,
&
bin_info
->
bitmap_info
));
regind
=
bitmap_sfu
(
slab_data
->
bitmap
,
&
bin_info
->
bitmap_info
);
ret
=
(
void
*
)((
uintptr_t
)
e
xtent
_addr_get
(
slab
)
+
ret
=
(
void
*
)((
uintptr_t
)
e
data
_addr_get
(
slab
)
+
(
uintptr_t
)(
bin_info
->
reg_size
*
regind
));
e
xtent
_nfree_dec
(
slab
);
e
data
_nfree_dec
(
slab
);
return
ret
;
}
static
void
arena_slab_reg_alloc_batch
(
e
xtent
_t
*
slab
,
const
bin_info_t
*
bin_info
,
arena_slab_reg_alloc_batch
(
e
data
_t
*
slab
,
const
bin_info_t
*
bin_info
,
unsigned
cnt
,
void
**
ptrs
)
{
arena_
slab_data_t
*
slab_data
=
e
xtent
_slab_data_get
(
slab
);
slab_data_t
*
slab_data
=
e
data
_slab_data_get
(
slab
);
assert
(
e
xtent
_nfree_get
(
slab
)
>=
cnt
);
assert
(
e
data
_nfree_get
(
slab
)
>=
cnt
);
assert
(
!
bitmap_full
(
slab_data
->
bitmap
,
&
bin_info
->
bitmap_info
));
#if (! defined JEMALLOC_INTERNAL_POPCOUNTL) || (defined BITMAP_USE_TREE)
for
(
unsigned
i
=
0
;
i
<
cnt
;
i
++
)
{
size_t
regind
=
bitmap_sfu
(
slab_data
->
bitmap
,
&
bin_info
->
bitmap_info
);
*
(
ptrs
+
i
)
=
(
void
*
)((
uintptr_t
)
e
xtent
_addr_get
(
slab
)
+
*
(
ptrs
+
i
)
=
(
void
*
)((
uintptr_t
)
e
data
_addr_get
(
slab
)
+
(
uintptr_t
)(
bin_info
->
reg_size
*
regind
));
}
#else
...
...
@@ -316,7 +272,7 @@ arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info,
* Load from memory locations only once, outside the
* hot loop below.
*/
uintptr_t
base
=
(
uintptr_t
)
e
xtent
_addr_get
(
slab
);
uintptr_t
base
=
(
uintptr_t
)
e
data
_addr_get
(
slab
);
uintptr_t
regsize
=
(
uintptr_t
)
bin_info
->
reg_size
;
while
(
pop
--
)
{
size_t
bit
=
cfs_lu
(
&
g
);
...
...
@@ -328,56 +284,7 @@ arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info,
slab_data
->
bitmap
[
group
]
=
g
;
}
#endif
extent_nfree_sub
(
slab
,
cnt
);
}
#ifndef JEMALLOC_JET
static
#endif
size_t
arena_slab_regind
(
extent_t
*
slab
,
szind_t
binind
,
const
void
*
ptr
)
{
size_t
diff
,
regind
;
/* Freeing a pointer outside the slab can cause assertion failure. */
assert
((
uintptr_t
)
ptr
>=
(
uintptr_t
)
extent_addr_get
(
slab
));
assert
((
uintptr_t
)
ptr
<
(
uintptr_t
)
extent_past_get
(
slab
));
/* Freeing an interior pointer can cause assertion failure. */
assert
(((
uintptr_t
)
ptr
-
(
uintptr_t
)
extent_addr_get
(
slab
))
%
(
uintptr_t
)
bin_infos
[
binind
].
reg_size
==
0
);
diff
=
(
size_t
)((
uintptr_t
)
ptr
-
(
uintptr_t
)
extent_addr_get
(
slab
));
/* Avoid doing division with a variable divisor. */
regind
=
div_compute
(
&
arena_binind_div_info
[
binind
],
diff
);
assert
(
regind
<
bin_infos
[
binind
].
nregs
);
return
regind
;
}
static
void
arena_slab_reg_dalloc
(
extent_t
*
slab
,
arena_slab_data_t
*
slab_data
,
void
*
ptr
)
{
szind_t
binind
=
extent_szind_get
(
slab
);
const
bin_info_t
*
bin_info
=
&
bin_infos
[
binind
];
size_t
regind
=
arena_slab_regind
(
slab
,
binind
,
ptr
);
assert
(
extent_nfree_get
(
slab
)
<
bin_info
->
nregs
);
/* Freeing an unallocated pointer can cause assertion failure. */
assert
(
bitmap_get
(
slab_data
->
bitmap
,
&
bin_info
->
bitmap_info
,
regind
));
bitmap_unset
(
slab_data
->
bitmap
,
&
bin_info
->
bitmap_info
,
regind
);
extent_nfree_inc
(
slab
);
}
static
void
arena_nactive_add
(
arena_t
*
arena
,
size_t
add_pages
)
{
atomic_fetch_add_zu
(
&
arena
->
nactive
,
add_pages
,
ATOMIC_RELAXED
);
}
static
void
arena_nactive_sub
(
arena_t
*
arena
,
size_t
sub_pages
)
{
assert
(
atomic_load_zu
(
&
arena
->
nactive
,
ATOMIC_RELAXED
)
>=
sub_pages
);
atomic_fetch_sub_zu
(
&
arena
->
nactive
,
sub_pages
,
ATOMIC_RELAXED
);
edata_nfree_sub
(
slab
,
cnt
);
}
static
void
...
...
@@ -392,7 +299,7 @@ arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
index
=
sz_size2index
(
usize
);
hindex
=
(
index
>=
SC_NBINS
)
?
index
-
SC_NBINS
:
0
;
arena_stats_add_u64
(
tsdn
,
&
arena
->
stats
,
locked_inc_u64
(
tsdn
,
LOCKEDINT_MTX
(
arena
->
stats
.
mtx
)
,
&
arena
->
stats
.
lstats
[
hindex
].
nmalloc
,
1
);
}
...
...
@@ -408,627 +315,284 @@ arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
index
=
sz_size2index
(
usize
);
hindex
=
(
index
>=
SC_NBINS
)
?
index
-
SC_NBINS
:
0
;
arena_stats_add_u64
(
tsdn
,
&
arena
->
stats
,
locked_inc_u64
(
tsdn
,
LOCKEDINT_MTX
(
arena
->
stats
.
mtx
)
,
&
arena
->
stats
.
lstats
[
hindex
].
ndalloc
,
1
);
}
static
void
arena_large_ralloc_stats_update
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
oldusize
,
size_t
usize
)
{
arena_large_dalloc_stats_update
(
tsdn
,
arena
,
oldusize
);
arena_large_malloc_stats_update
(
tsdn
,
arena
,
usize
);
arena_large_dalloc_stats_update
(
tsdn
,
arena
,
oldusize
);
}
static
bool
arena_may_have_muzzy
(
arena_t
*
arena
)
{
return
(
pages_can_purge_lazy
&&
(
arena_muzzy_decay_ms_get
(
arena
)
!=
0
));
}
extent_t
*
edata_t
*
arena_extent_alloc_large
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
size_t
usize
,
size_t
alignment
,
bool
*
zero
)
{
extent_hooks_t
*
extent_hooks
=
EXTENT_HOOKS_INITIALIZER
;
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
0
);
size_t
alignment
,
bool
zero
)
{
bool
deferred_work_generated
=
false
;
szind_t
szind
=
sz_size2index
(
usize
);
size_t
mapped_add
;
bool
commit
=
true
;
extent_t
*
extent
=
extents_alloc
(
tsdn
,
arena
,
&
extent_hooks
,
&
arena
->
extents_dirty
,
NULL
,
usize
,
sz_large_pad
,
alignment
,
false
,
szind
,
zero
,
&
commit
);
if
(
extent
==
NULL
&&
arena_may_have_muzzy
(
arena
))
{
extent
=
extents_alloc
(
tsdn
,
arena
,
&
extent_hooks
,
&
arena
->
extents_muzzy
,
NULL
,
usize
,
sz_large_pad
,
alignment
,
false
,
szind
,
zero
,
&
commit
);
}
size_t
size
=
usize
+
sz_large_pad
;
if
(
extent
==
NULL
)
{
extent
=
extent_alloc_wrapper
(
tsdn
,
arena
,
&
extent_hooks
,
NULL
,
usize
,
sz_large_pad
,
alignment
,
false
,
szind
,
zero
,
&
commit
);
if
(
config_stats
)
{
/*
* extent may be NULL on OOM, but in that case
* mapped_add isn't used below, so there's no need to
* conditionlly set it to 0 here.
*/
mapped_add
=
size
;
}
}
else
if
(
config_stats
)
{
mapped_add
=
0
;
}
size_t
esize
=
usize
+
sz_large_pad
;
bool
guarded
=
san_large_extent_decide_guard
(
tsdn
,
arena_get_ehooks
(
arena
),
esize
,
alignment
);
edata_t
*
edata
=
pa_alloc
(
tsdn
,
&
arena
->
pa_shard
,
esize
,
alignment
,
/* slab */
false
,
szind
,
zero
,
guarded
,
&
deferred_work_generated
);
assert
(
deferred_work_generated
==
false
);
if
(
e
xtent
!=
NULL
)
{
if
(
e
data
!=
NULL
)
{
if
(
config_stats
)
{
arena_stats_lock
(
tsdn
,
&
arena
->
stats
);
LOCKEDINT_MTX_LOCK
(
tsdn
,
arena
->
stats
.
mtx
);
arena_large_malloc_stats_update
(
tsdn
,
arena
,
usize
);
if
(
mapped_add
!=
0
)
{
arena_stats_add_zu
(
tsdn
,
&
arena
->
stats
,
&
arena
->
stats
.
mapped
,
mapped_add
);
LOCKEDINT_MTX_UNLOCK
(
tsdn
,
arena
->
stats
.
mtx
);
}
arena_stats_unlock
(
tsdn
,
&
arena
->
stats
);
}
arena_nactive_add
(
arena
,
size
>>
LG_PAGE
);
if
(
edata
!=
NULL
&&
sz_large_pad
!=
0
)
{
arena_cache_oblivious_randomize
(
tsdn
,
arena
,
edata
,
alignment
);
}
return
e
xtent
;
return
e
data
;
}
void
arena_extent_dalloc_large_prep
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
e
xtent_t
*
extent
)
{
arena_extent_dalloc_large_prep
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
e
data_t
*
edata
)
{
if
(
config_stats
)
{
arena_stats_lock
(
tsdn
,
&
arena
->
stats
);
LOCKEDINT_MTX_LOCK
(
tsdn
,
arena
->
stats
.
mtx
);
arena_large_dalloc_stats_update
(
tsdn
,
arena
,
e
xtent
_usize_get
(
e
xtent
));
arena_stats_unlock
(
tsdn
,
&
arena
->
stats
);
e
data
_usize_get
(
e
data
));
LOCKEDINT_MTX_UNLOCK
(
tsdn
,
arena
->
stats
.
mtx
);
}
arena_nactive_sub
(
arena
,
extent_size_get
(
extent
)
>>
LG_PAGE
);
}
void
arena_extent_ralloc_large_shrink
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
e
xtent_t
*
extent
,
arena_extent_ralloc_large_shrink
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
e
data_t
*
edata
,
size_t
oldusize
)
{
size_t
usize
=
extent_usize_get
(
extent
);
size_t
udiff
=
oldusize
-
usize
;
size_t
usize
=
edata_usize_get
(
edata
);
if
(
config_stats
)
{
arena_stats_lock
(
tsdn
,
&
arena
->
stats
);
LOCKEDINT_MTX_LOCK
(
tsdn
,
arena
->
stats
.
mtx
);
arena_large_ralloc_stats_update
(
tsdn
,
arena
,
oldusize
,
usize
);
arena_stats_unlock
(
tsdn
,
&
arena
->
stats
);
LOCKEDINT_MTX_UNLOCK
(
tsdn
,
arena
->
stats
.
mtx
);
}
arena_nactive_sub
(
arena
,
udiff
>>
LG_PAGE
);
}
void
arena_extent_ralloc_large_expand
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
e
xtent_t
*
extent
,
arena_extent_ralloc_large_expand
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
e
data_t
*
edata
,
size_t
oldusize
)
{
size_t
usize
=
extent_usize_get
(
extent
);
size_t
udiff
=
usize
-
oldusize
;
size_t
usize
=
edata_usize_get
(
edata
);
if
(
config_stats
)
{
arena_stats_lock
(
tsdn
,
&
arena
->
stats
);
LOCKEDINT_MTX_LOCK
(
tsdn
,
arena
->
stats
.
mtx
);
arena_large_ralloc_stats_update
(
tsdn
,
arena
,
oldusize
,
usize
);
arena_stats_unlock
(
tsdn
,
&
arena
->
stats
);
LOCKEDINT_MTX_UNLOCK
(
tsdn
,
arena
->
stats
.
mtx
);
}
arena_nactive_add
(
arena
,
udiff
>>
LG_PAGE
);
}
static
ssize_t
arena_decay_ms_read
(
arena_decay_t
*
decay
)
{
return
atomic_load_zd
(
&
decay
->
time_ms
,
ATOMIC_RELAXED
);
}
static
void
arena_decay_ms_write
(
arena_decay_t
*
decay
,
ssize_t
decay_ms
)
{
atomic_store_zd
(
&
decay
->
time_ms
,
decay_ms
,
ATOMIC_RELAXED
);
}
static
void
arena_decay_deadline_init
(
arena_decay_t
*
decay
)
{
/*
* Generate a new deadline that is uniformly random within the next
* epoch after the current one.
/*
* In situations where we're not forcing a decay (i.e. because the user
* specifically requested it), should we purge ourselves, or wait for the
* background thread to get to it.
*/
nstime_copy
(
&
decay
->
deadline
,
&
decay
->
epoch
);
nstime_add
(
&
decay
->
deadline
,
&
decay
->
interval
);
if
(
arena_decay_ms_read
(
decay
)
>
0
)
{
nstime_t
jitter
;
nstime_init
(
&
jitter
,
prng_range_u64
(
&
decay
->
jitter_state
,
nstime_ns
(
&
decay
->
interval
)));
nstime_add
(
&
decay
->
deadline
,
&
jitter
)
;
static
pac_purge_eagerness_t
arena_decide_unforced_purge_eagerness
(
bool
is_background_thread
)
{
if
(
is_background_thread
)
{
return
PAC_PURGE_ALWAYS
;
}
else
if
(
!
is_background_thread
&&
background_thread_enabled
())
{
return
PAC_PURGE_NEVER
;
}
else
{
return
PAC_PURGE_ON_EPOCH_ADVANCE
;
}
}
static
bool
arena_decay_deadline_reached
(
const
arena_decay_t
*
decay
,
const
nstime_t
*
time
)
{
return
(
nstime_compare
(
&
decay
->
deadline
,
time
)
<=
0
);
}
static
size_t
arena_decay_backlog_npages_limit
(
const
arena_decay_t
*
decay
)
{
uint64_t
sum
;
size_t
npages_limit_backlog
;
unsigned
i
;
/*
* For each element of decay_backlog, multiply by the corresponding
* fixed-point smoothstep decay factor. Sum the products, then divide
* to round down to the nearest whole number of pages.
*/
sum
=
0
;
for
(
i
=
0
;
i
<
SMOOTHSTEP_NSTEPS
;
i
++
)
{
sum
+=
decay
->
backlog
[
i
]
*
h_steps
[
i
];
}
npages_limit_backlog
=
(
size_t
)(
sum
>>
SMOOTHSTEP_BFP
);
return
npages_limit_backlog
;
bool
arena_decay_ms_set
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_state_t
state
,
ssize_t
decay_ms
)
{
pac_purge_eagerness_t
eagerness
=
arena_decide_unforced_purge_eagerness
(
/* is_background_thread */
false
);
return
pa_decay_ms_set
(
tsdn
,
&
arena
->
pa_shard
,
state
,
decay_ms
,
eagerness
);
}
static
void
arena_decay_backlog_update_last
(
arena_decay_t
*
decay
,
size_t
current_npages
)
{
size_t
npages_delta
=
(
current_npages
>
decay
->
nunpurged
)
?
current_npages
-
decay
->
nunpurged
:
0
;
decay
->
backlog
[
SMOOTHSTEP_NSTEPS
-
1
]
=
npages_delta
;
if
(
config_debug
)
{
if
(
current_npages
>
decay
->
ceil_npages
)
{
decay
->
ceil_npages
=
current_npages
;
}
size_t
npages_limit
=
arena_decay_backlog_npages_limit
(
decay
);
assert
(
decay
->
ceil_npages
>=
npages_limit
);
if
(
decay
->
ceil_npages
>
npages_limit
)
{
decay
->
ceil_npages
=
npages_limit
;
}
}
ssize_t
arena_decay_ms_get
(
arena_t
*
arena
,
extent_state_t
state
)
{
return
pa_decay_ms_get
(
&
arena
->
pa_shard
,
state
);
}
static
void
arena_decay_backlog_update
(
arena_decay_t
*
decay
,
uint64_t
nadvance_u64
,
size_t
current_npages
)
{
if
(
nadvance_u64
>=
SMOOTHSTEP_NSTEPS
)
{
memset
(
decay
->
backlog
,
0
,
(
SMOOTHSTEP_NSTEPS
-
1
)
*
sizeof
(
size_t
));
}
else
{
size_t
nadvance_z
=
(
size_t
)
nadvance_u64
;
assert
((
uint64_t
)
nadvance_z
==
nadvance_u64
);
memmove
(
decay
->
backlog
,
&
decay
->
backlog
[
nadvance_z
],
(
SMOOTHSTEP_NSTEPS
-
nadvance_z
)
*
sizeof
(
size_t
));
if
(
nadvance_z
>
1
)
{
memset
(
&
decay
->
backlog
[
SMOOTHSTEP_NSTEPS
-
nadvance_z
],
0
,
(
nadvance_z
-
1
)
*
sizeof
(
size_t
));
}
static
bool
arena_decay_impl
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
decay_t
*
decay
,
pac_decay_stats_t
*
decay_stats
,
ecache_t
*
ecache
,
bool
is_background_thread
,
bool
all
)
{
if
(
all
)
{
malloc_mutex_lock
(
tsdn
,
&
decay
->
mtx
);
pac_decay_all
(
tsdn
,
&
arena
->
pa_shard
.
pac
,
decay
,
decay_stats
,
ecache
,
/* fully_decay */
all
);
malloc_mutex_unlock
(
tsdn
,
&
decay
->
mtx
);
return
false
;
}
arena_decay_backlog_update_last
(
decay
,
current_npages
);
}
static
void
arena_decay_try_purge
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_decay_t
*
decay
,
extents_t
*
extents
,
size_t
current_npages
,
size_t
npages_limit
,
bool
is_background_thread
)
{
if
(
current_npages
>
npages_limit
)
{
arena_decay_to_limit
(
tsdn
,
arena
,
decay
,
extents
,
false
,
npages_limit
,
current_npages
-
npages_limit
,
is_background_thread
);
if
(
malloc_mutex_trylock
(
tsdn
,
&
decay
->
mtx
))
{
/* No need to wait if another thread is in progress. */
return
true
;
}
}
static
void
arena_decay_epoch_advance_helper
(
arena_decay_t
*
decay
,
const
nstime_t
*
time
,
size_t
current_npages
)
{
assert
(
arena_decay_deadline_reached
(
decay
,
time
));
nstime_t
delta
;
nstime_copy
(
&
delta
,
time
);
nstime_subtract
(
&
delta
,
&
decay
->
epoch
);
uint64_t
nadvance_u64
=
nstime_divide
(
&
delta
,
&
decay
->
interval
);
assert
(
nadvance_u64
>
0
);
/* Add nadvance_u64 decay intervals to epoch. */
nstime_copy
(
&
delta
,
&
decay
->
interval
);
nstime_imultiply
(
&
delta
,
nadvance_u64
);
nstime_add
(
&
decay
->
epoch
,
&
delta
);
/* Set a new deadline. */
arena_decay_deadline_init
(
decay
);
/* Update the backlog. */
arena_decay_backlog_update
(
decay
,
nadvance_u64
,
current_npages
);
}
static
void
arena_decay_epoch_advance
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_decay_t
*
decay
,
extents_t
*
extents
,
const
nstime_t
*
time
,
bool
is_background_thread
)
{
size_t
current_npages
=
extents_npages_get
(
extents
);
arena_decay_epoch_advance_helper
(
decay
,
time
,
current_npages
);
size_t
npages_limit
=
arena_decay_backlog_npages_limit
(
decay
);
/* We may unlock decay->mtx when try_purge(). Finish logging first. */
decay
->
nunpurged
=
(
npages_limit
>
current_npages
)
?
npages_limit
:
current_npages
;
if
(
!
background_thread_enabled
()
||
is_background_thread
)
{
arena_decay_try_purge
(
tsdn
,
arena
,
decay
,
extents
,
current_npages
,
npages_limit
,
is_background_thread
);
pac_purge_eagerness_t
eagerness
=
arena_decide_unforced_purge_eagerness
(
is_background_thread
);
bool
epoch_advanced
=
pac_maybe_decay_purge
(
tsdn
,
&
arena
->
pa_shard
.
pac
,
decay
,
decay_stats
,
ecache
,
eagerness
);
size_t
npages_new
;
if
(
epoch_advanced
)
{
/* Backlog is updated on epoch advance. */
npages_new
=
decay_epoch_npages_delta
(
decay
);
}
}
malloc_mutex_unlock
(
tsdn
,
&
decay
->
mtx
);
static
void
arena_decay_reinit
(
arena_decay_t
*
decay
,
ssize_t
decay_ms
)
{
arena_decay_ms_write
(
decay
,
decay_ms
);
if
(
decay_ms
>
0
)
{
nstime_init
(
&
decay
->
interval
,
(
uint64_t
)
decay_ms
*
KQU
(
1000000
));
nstime_idivide
(
&
decay
->
interval
,
SMOOTHSTEP_NSTEPS
);
if
(
have_background_thread
&&
background_thread_enabled
()
&&
epoch_advanced
&&
!
is_background_thread
)
{
arena_maybe_do_deferred_work
(
tsdn
,
arena
,
decay
,
npages_new
);
}
nstime_init
(
&
decay
->
epoch
,
0
);
nstime_update
(
&
decay
->
epoch
);
decay
->
jitter_state
=
(
uint64_t
)(
uintptr_t
)
decay
;
arena_decay_deadline_init
(
decay
);
decay
->
nunpurged
=
0
;
memset
(
decay
->
backlog
,
0
,
SMOOTHSTEP_NSTEPS
*
sizeof
(
size_t
));
}
static
bool
arena_decay_init
(
arena_decay_t
*
decay
,
ssize_t
decay_ms
,
arena_stats_decay_t
*
stats
)
{
if
(
config_debug
)
{
for
(
size_t
i
=
0
;
i
<
sizeof
(
arena_decay_t
);
i
++
)
{
assert
(((
char
*
)
decay
)[
i
]
==
0
);
}
decay
->
ceil_npages
=
0
;
}
if
(
malloc_mutex_init
(
&
decay
->
mtx
,
"decay"
,
WITNESS_RANK_DECAY
,
malloc_mutex_rank_exclusive
))
{
return
true
;
}
decay
->
purging
=
false
;
arena_decay_reinit
(
decay
,
decay_ms
);
/* Memory is zeroed, so there is no need to clear stats. */
if
(
config_stats
)
{
decay
->
stats
=
stats
;
}
return
false
;
}
static
bool
arena_decay_ms_valid
(
ssize_t
decay_ms
)
{
if
(
decay_ms
<
-
1
)
{
return
false
;
}
if
(
decay_ms
==
-
1
||
(
uint64_t
)
decay_ms
<=
NSTIME_SEC_MAX
*
KQU
(
1000
))
{
return
true
;
}
return
false
;
arena_decay_dirty
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bool
is_background_thread
,
bool
all
)
{
return
arena_decay_impl
(
tsdn
,
arena
,
&
arena
->
pa_shard
.
pac
.
decay_dirty
,
&
arena
->
pa_shard
.
pac
.
stats
->
decay_dirty
,
&
arena
->
pa_shard
.
pac
.
ecache_dirty
,
is_background_thread
,
all
);
}
static
bool
arena_maybe_decay
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_decay_t
*
decay
,
extents_t
*
extents
,
bool
is_background_thread
)
{
malloc_mutex_assert_owner
(
tsdn
,
&
decay
->
mtx
);
/* Purge all or nothing if the option is disabled. */
ssize_t
decay_ms
=
arena_decay_ms_read
(
decay
);
if
(
decay_ms
<=
0
)
{
if
(
decay_ms
==
0
)
{
arena_decay_to_limit
(
tsdn
,
arena
,
decay
,
extents
,
false
,
0
,
extents_npages_get
(
extents
),
is_background_thread
);
}
arena_decay_muzzy
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bool
is_background_thread
,
bool
all
)
{
if
(
pa_shard_dont_decay_muzzy
(
&
arena
->
pa_shard
))
{
return
false
;
}
return
arena_decay_impl
(
tsdn
,
arena
,
&
arena
->
pa_shard
.
pac
.
decay_muzzy
,
&
arena
->
pa_shard
.
pac
.
stats
->
decay_muzzy
,
&
arena
->
pa_shard
.
pac
.
ecache_muzzy
,
is_background_thread
,
all
);
}
nstime_t
time
;
nstime_init
(
&
time
,
0
);
nstime_update
(
&
time
);
if
(
unlikely
(
!
nstime_monotonic
()
&&
nstime_compare
(
&
decay
->
epoch
,
&
time
)
>
0
))
{
void
arena_decay
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bool
is_background_thread
,
bool
all
)
{
if
(
all
)
{
/*
* Time went backwards. Move the epoch back in time and
* generate a new deadline, with the expectation that time
* typically flows forward for long enough periods of time that
* epochs complete. Unfortunately, this strategy is susceptible
* to clock jitter triggering premature epoch advances, but
* clock jitter estimation and compensation isn't feasible here
* because calls into this code are event-driven.
* We should take a purge of "all" to mean "save as much memory
* as possible", including flushing any caches (for situations
* like thread death, or manual purge calls).
*/
nstime_copy
(
&
decay
->
epoch
,
&
time
);
arena_decay_deadline_init
(
decay
);
}
else
{
/* Verify that time does not go backwards. */
assert
(
nstime_compare
(
&
decay
->
epoch
,
&
time
)
<=
0
);
sec_flush
(
tsdn
,
&
arena
->
pa_shard
.
hpa_sec
);
}
/*
* If the deadline has been reached, advance to the current epoch and
* purge to the new limit if necessary. Note that dirty pages created
* during the current epoch are not subject to purge until a future
* epoch, so as a result purging only happens during epoch advances, or
* being triggered by background threads (scheduled event).
*/
bool
advance_epoch
=
arena_decay_deadline_reached
(
decay
,
&
time
);
if
(
advance_epoch
)
{
arena_decay_epoch_advance
(
tsdn
,
arena
,
decay
,
extents
,
&
time
,
is_background_thread
);
}
else
if
(
is_background_thread
)
{
arena_decay_try_purge
(
tsdn
,
arena
,
decay
,
extents
,
extents_npages_get
(
extents
),
arena_decay_backlog_npages_limit
(
decay
),
is_background_thread
);
if
(
arena_decay_dirty
(
tsdn
,
arena
,
is_background_thread
,
all
))
{
return
;
}
return
advance_epoch
;
}
static
ssize_t
arena_decay_ms_get
(
arena_decay_t
*
decay
)
{
return
arena_decay_ms_read
(
decay
);
}
ssize_t
arena_dirty_decay_ms_get
(
arena_t
*
arena
)
{
return
arena_decay_ms_get
(
&
arena
->
decay_dirty
);
}
ssize_t
arena_muzzy_decay_ms_get
(
arena_t
*
arena
)
{
return
arena_decay_ms_get
(
&
arena
->
decay_muzzy
);
arena_decay_muzzy
(
tsdn
,
arena
,
is_background_thread
,
all
);
}
static
bool
arena_decay_ms_set
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_decay_t
*
decay
,
extents_t
*
extents
,
ssize_t
decay_ms
)
{
if
(
!
arena_decay_ms_valid
(
decay_ms
))
{
return
true
;
}
malloc_mutex_lock
(
tsdn
,
&
decay
->
mtx
);
/*
* Restart decay backlog from scratch, which may cause many dirty pages
* to be immediately purged. It would conceptually be possible to map
* the old backlog onto the new backlog, but there is no justification
* for such complexity since decay_ms changes are intended to be
* infrequent, either between the {-1, 0, >0} states, or a one-time
* arbitrary change during initial arena configuration.
*/
arena_decay_reinit
(
decay
,
decay_ms
);
arena_maybe_decay
(
tsdn
,
arena
,
decay
,
extents
,
false
);
malloc_mutex_unlock
(
tsdn
,
&
decay
->
mtx
);
arena_should_decay_early
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
decay_t
*
decay
,
background_thread_info_t
*
info
,
nstime_t
*
remaining_sleep
,
size_t
npages_new
)
{
malloc_mutex_assert_owner
(
tsdn
,
&
info
->
mtx
);
if
(
malloc_mutex_trylock
(
tsdn
,
&
decay
->
mtx
))
{
return
false
;
}
bool
arena_dirty_decay_ms_set
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
ssize_t
decay_ms
)
{
return
arena_decay_ms_set
(
tsdn
,
arena
,
&
arena
->
decay_dirty
,
&
arena
->
extents_dirty
,
decay_ms
);
}
bool
arena_muzzy_decay_ms_set
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
ssize_t
decay_ms
)
{
return
arena_decay_ms_set
(
tsdn
,
arena
,
&
arena
->
decay_muzzy
,
&
arena
->
extents_muzzy
,
decay_ms
);
}
static
size_t
arena_stash_decayed
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extents_t
*
extents
,
size_t
npages_limit
,
size_t
npages_decay_max
,
extent_list_t
*
decay_extents
)
{
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
0
);
/* Stash extents according to npages_limit. */
size_t
nstashed
=
0
;
extent_t
*
extent
;
while
(
nstashed
<
npages_decay_max
&&
(
extent
=
extents_evict
(
tsdn
,
arena
,
r_extent_hooks
,
extents
,
npages_limit
))
!=
NULL
)
{
extent_list_append
(
decay_extents
,
extent
);
nstashed
+=
extent_size_get
(
extent
)
>>
LG_PAGE
;
}
return
nstashed
;
}
static
size_t
arena_decay_stashed
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
arena_decay_t
*
decay
,
extents_t
*
extents
,
bool
all
,
extent_list_t
*
decay_extents
,
bool
is_background_thread
)
{
size_t
nmadvise
,
nunmapped
;
size_t
npurged
;
if
(
config_stats
)
{
nmadvise
=
0
;
nunmapped
=
0
;
if
(
!
decay_gradually
(
decay
))
{
malloc_mutex_unlock
(
tsdn
,
&
decay
->
mtx
);
return
false
;
}
npurged
=
0
;
ssize_t
muzzy_decay_ms
=
arena_muzzy_decay_ms_get
(
arena
);
for
(
extent_t
*
extent
=
extent_list_first
(
decay_extents
);
extent
!=
NULL
;
extent
=
extent_list_first
(
decay_extents
))
{
if
(
config_stats
)
{
nmadvise
++
;
}
size_t
npages
=
extent_size_get
(
extent
)
>>
LG_PAGE
;
npurged
+=
npages
;
extent_list_remove
(
decay_extents
,
extent
);
switch
(
extents_state_get
(
extents
))
{
case
extent_state_active
:
not_reached
();
case
extent_state_dirty
:
if
(
!
all
&&
muzzy_decay_ms
!=
0
&&
!
extent_purge_lazy_wrapper
(
tsdn
,
arena
,
r_extent_hooks
,
extent
,
0
,
extent_size_get
(
extent
)))
{
extents_dalloc
(
tsdn
,
arena
,
r_extent_hooks
,
&
arena
->
extents_muzzy
,
extent
);
arena_background_thread_inactivity_check
(
tsdn
,
arena
,
is_background_thread
);
break
;
}
/* Fall through. */
case
extent_state_muzzy
:
extent_dalloc_wrapper
(
tsdn
,
arena
,
r_extent_hooks
,
extent
);
if
(
config_stats
)
{
nunmapped
+=
npages
;
}
break
;
case
extent_state_retained
:
default:
not_reached
();
}
nstime_init
(
remaining_sleep
,
background_thread_wakeup_time_get
(
info
));
if
(
nstime_compare
(
remaining_sleep
,
&
decay
->
epoch
)
<=
0
)
{
malloc_mutex_unlock
(
tsdn
,
&
decay
->
mtx
);
return
false
;
}
if
(
config_stats
)
{
arena_stats_lock
(
tsdn
,
&
arena
->
stats
);
arena_stats_add_u64
(
tsdn
,
&
arena
->
stats
,
&
decay
->
stats
->
npurge
,
1
);
arena_stats_add_u64
(
tsdn
,
&
arena
->
stats
,
&
decay
->
stats
->
nmadvise
,
nmadvise
);
arena_stats_add_u64
(
tsdn
,
&
arena
->
stats
,
&
decay
->
stats
->
purged
,
npurged
);
arena_stats_sub_zu
(
tsdn
,
&
arena
->
stats
,
&
arena
->
stats
.
mapped
,
nunmapped
<<
LG_PAGE
);
arena_stats_unlock
(
tsdn
,
&
arena
->
stats
);
nstime_subtract
(
remaining_sleep
,
&
decay
->
epoch
);
if
(
npages_new
>
0
)
{
uint64_t
npurge_new
=
decay_npages_purge_in
(
decay
,
remaining_sleep
,
npages_new
);
info
->
npages_to_purge_new
+=
npurge_new
;
}
return
npurged
;
malloc_mutex_unlock
(
tsdn
,
&
decay
->
mtx
);
return
info
->
npages_to_purge_new
>
ARENA_DEFERRED_PURGE_NPAGES_THRESHOLD
;
}
/*
* npages_limit: Decay at most npages_decay_max pages without violating the
* invariant: (extents_npages_get(extents) >= npages_limit). We need an upper
* bound on number of pages in order to prevent unbounded growth (namely in
* stashed), otherwise unbounded new pages could be added to extents during the
* current decay run, so that the purging thread never finishes.
* Check if deferred work needs to be done sooner than planned.
* For decay we might want to wake up earlier because of an influx of dirty
* pages. Rather than waiting for previously estimated time, we proactively
* purge those pages.
* If background thread sleeps indefinitely, always wake up because some
* deferred work has been generated.
*/
static
void
arena_decay_to_limit
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_decay_t
*
decay
,
extents_t
*
extents
,
bool
all
,
size_t
npages_limit
,
size_t
npages_decay_max
,
bool
is_background_thread
)
{
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
1
);
malloc_mutex_assert_owner
(
tsdn
,
&
decay
->
mtx
);
if
(
decay
->
purging
)
{
arena_maybe_do_deferred_work
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
decay_t
*
decay
,
size_t
npages_new
)
{
background_thread_info_t
*
info
=
arena_background_thread_info_get
(
arena
);
if
(
malloc_mutex_trylock
(
tsdn
,
&
info
->
mtx
))
{
/*
* Background thread may hold the mutex for a long period of
* time. We'd like to avoid the variance on application
* threads. So keep this non-blocking, and leave the work to a
* future epoch.
*/
return
;
}
decay
->
purging
=
true
;
malloc_mutex_unlock
(
tsdn
,
&
decay
->
mtx
);
extent_hooks_t
*
extent_hooks
=
extent_hooks_get
(
arena
);
extent_list_t
decay_extents
;
extent_list_init
(
&
decay_extents
);
size_t
npurge
=
arena_stash_decayed
(
tsdn
,
arena
,
&
extent_hooks
,
extents
,
npages_limit
,
npages_decay_max
,
&
decay_extents
);
if
(
npurge
!=
0
)
{
size_t
npurged
=
arena_decay_stashed
(
tsdn
,
arena
,
&
extent_hooks
,
decay
,
extents
,
all
,
&
decay_extents
,
is_background_thread
);
assert
(
npurged
==
npurge
);
if
(
!
background_thread_is_started
(
info
))
{
goto
label_done
;
}
malloc_mutex_lock
(
tsdn
,
&
decay
->
mtx
);
decay
->
purging
=
false
;
}
static
bool
arena_decay_impl
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_decay_t
*
decay
,
extents_t
*
extents
,
bool
is_background_thread
,
bool
all
)
{
if
(
all
)
{
malloc_mutex_lock
(
tsdn
,
&
decay
->
mtx
);
arena_decay_to_limit
(
tsdn
,
arena
,
decay
,
extents
,
all
,
0
,
extents_npages_get
(
extents
),
is_background_thread
);
malloc_mutex_unlock
(
tsdn
,
&
decay
->
mtx
);
return
false
;
nstime_t
remaining_sleep
;
if
(
background_thread_indefinite_sleep
(
info
))
{
background_thread_wakeup_early
(
info
,
NULL
);
}
else
if
(
arena_should_decay_early
(
tsdn
,
arena
,
decay
,
info
,
&
remaining_sleep
,
npages_new
))
{
info
->
npages_to_purge_new
=
0
;
background_thread_wakeup_early
(
info
,
&
remaining_sleep
);
}
if
(
malloc_mutex_trylock
(
tsdn
,
&
decay
->
mtx
))
{
/* No need to wait if another thread is in progress. */
return
true
;
}
bool
epoch_advanced
=
arena_maybe_decay
(
tsdn
,
arena
,
decay
,
extents
,
is_background_thread
);
size_t
npages_new
;
if
(
epoch_advanced
)
{
/* Backlog is updated on epoch advance. */
npages_new
=
decay
->
backlog
[
SMOOTHSTEP_NSTEPS
-
1
];
}
malloc_mutex_unlock
(
tsdn
,
&
decay
->
mtx
);
if
(
have_background_thread
&&
background_thread_enabled
()
&&
epoch_advanced
&&
!
is_background_thread
)
{
background_thread_interval_check
(
tsdn
,
arena
,
decay
,
npages_new
);
}
return
false
;
}
static
bool
arena_decay_dirty
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bool
is_background_thread
,
bool
all
)
{
return
arena_decay_impl
(
tsdn
,
arena
,
&
arena
->
decay_dirty
,
&
arena
->
extents_dirty
,
is_background_thread
,
all
);
label_done:
malloc_mutex_unlock
(
tsdn
,
&
info
->
mtx
);
}
static
bool
arena_decay_muzzy
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bool
is_background_thread
,
bool
all
)
{
return
arena_decay
_impl
(
tsdn
,
arena
,
&
arena
->
decay_muzzy
,
&
arena
->
extents_muzzy
,
is_background_thread
,
all
);
/* Called from background threads. */
void
arena_do_deferred_work
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
arena_decay
(
tsdn
,
arena
,
true
,
false
);
pa_shard_do_deferred_work
(
tsdn
,
&
arena
->
pa_shard
);
}
void
arena_decay
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bool
is_background_thread
,
bool
all
)
{
if
(
arena_decay_dirty
(
tsdn
,
arena
,
is_background_thread
,
all
))
{
return
;
arena_slab_dalloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
edata_t
*
slab
)
{
bool
deferred_work_generated
=
false
;
pa_dalloc
(
tsdn
,
&
arena
->
pa_shard
,
slab
,
&
deferred_work_generated
);
if
(
deferred_work_generated
)
{
arena_handle_deferred_work
(
tsdn
,
arena
);
}
arena_decay_muzzy
(
tsdn
,
arena
,
is_background_thread
,
all
);
}
static
void
arena_slab_dalloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_t
*
slab
)
{
arena_nactive_sub
(
arena
,
extent_size_get
(
slab
)
>>
LG_PAGE
);
extent_hooks_t
*
extent_hooks
=
EXTENT_HOOKS_INITIALIZER
;
arena_extents_dirty_dalloc
(
tsdn
,
arena
,
&
extent_hooks
,
slab
);
}
static
void
arena_bin_slabs_nonfull_insert
(
bin_t
*
bin
,
extent_t
*
slab
)
{
assert
(
extent_nfree_get
(
slab
)
>
0
);
extent_heap_insert
(
&
bin
->
slabs_nonfull
,
slab
);
arena_bin_slabs_nonfull_insert
(
bin_t
*
bin
,
edata_t
*
slab
)
{
assert
(
edata_nfree_get
(
slab
)
>
0
);
edata_heap_insert
(
&
bin
->
slabs_nonfull
,
slab
);
if
(
config_stats
)
{
bin
->
stats
.
nonfull_slabs
++
;
}
}
static
void
arena_bin_slabs_nonfull_remove
(
bin_t
*
bin
,
e
xtent
_t
*
slab
)
{
e
xtent
_heap_remove
(
&
bin
->
slabs_nonfull
,
slab
);
arena_bin_slabs_nonfull_remove
(
bin_t
*
bin
,
e
data
_t
*
slab
)
{
e
data
_heap_remove
(
&
bin
->
slabs_nonfull
,
slab
);
if
(
config_stats
)
{
bin
->
stats
.
nonfull_slabs
--
;
}
}
static
e
xtent
_t
*
static
e
data
_t
*
arena_bin_slabs_nonfull_tryget
(
bin_t
*
bin
)
{
e
xtent
_t
*
slab
=
e
xtent
_heap_remove_first
(
&
bin
->
slabs_nonfull
);
e
data
_t
*
slab
=
e
data
_heap_remove_first
(
&
bin
->
slabs_nonfull
);
if
(
slab
==
NULL
)
{
return
NULL
;
}
...
...
@@ -1040,30 +604,30 @@ arena_bin_slabs_nonfull_tryget(bin_t *bin) {
}
static
void
arena_bin_slabs_full_insert
(
arena_t
*
arena
,
bin_t
*
bin
,
e
xtent
_t
*
slab
)
{
assert
(
e
xtent
_nfree_get
(
slab
)
==
0
);
arena_bin_slabs_full_insert
(
arena_t
*
arena
,
bin_t
*
bin
,
e
data
_t
*
slab
)
{
assert
(
e
data
_nfree_get
(
slab
)
==
0
);
/*
* Tracking extents is required by arena_reset, which is not allowed
* for auto arenas. Bypass this step to avoid touching the e
xtent
* for auto arenas. Bypass this step to avoid touching the e
data
* linkage (often results in cache misses) for auto arenas.
*/
if
(
arena_is_auto
(
arena
))
{
return
;
}
e
xtent_list
_append
(
&
bin
->
slabs_full
,
slab
);
e
data_list_active
_append
(
&
bin
->
slabs_full
,
slab
);
}
static
void
arena_bin_slabs_full_remove
(
arena_t
*
arena
,
bin_t
*
bin
,
e
xtent
_t
*
slab
)
{
arena_bin_slabs_full_remove
(
arena_t
*
arena
,
bin_t
*
bin
,
e
data
_t
*
slab
)
{
if
(
arena_is_auto
(
arena
))
{
return
;
}
e
xtent_list
_remove
(
&
bin
->
slabs_full
,
slab
);
e
data_list_active
_remove
(
&
bin
->
slabs_full
,
slab
);
}
static
void
arena_bin_reset
(
tsd_t
*
tsd
,
arena_t
*
arena
,
bin_t
*
bin
)
{
e
xtent
_t
*
slab
;
e
data
_t
*
slab
;
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
bin
->
lock
);
if
(
bin
->
slabcur
!=
NULL
)
{
...
...
@@ -1073,13 +637,13 @@ arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) {
arena_slab_dalloc
(
tsd_tsdn
(
tsd
),
arena
,
slab
);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
bin
->
lock
);
}
while
((
slab
=
e
xtent
_heap_remove_first
(
&
bin
->
slabs_nonfull
))
!=
NULL
)
{
while
((
slab
=
e
data
_heap_remove_first
(
&
bin
->
slabs_nonfull
))
!=
NULL
)
{
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
bin
->
lock
);
arena_slab_dalloc
(
tsd_tsdn
(
tsd
),
arena
,
slab
);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
bin
->
lock
);
}
for
(
slab
=
e
xtent_list
_first
(
&
bin
->
slabs_full
);
slab
!=
NULL
;
slab
=
e
xtent_list
_first
(
&
bin
->
slabs_full
))
{
for
(
slab
=
e
data_list_active
_first
(
&
bin
->
slabs_full
);
slab
!=
NULL
;
slab
=
e
data_list_active
_first
(
&
bin
->
slabs_full
))
{
arena_bin_slabs_full_remove
(
arena
,
bin
,
slab
);
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
bin
->
lock
);
arena_slab_dalloc
(
tsd_tsdn
(
tsd
),
arena
,
slab
);
...
...
@@ -1111,16 +675,15 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
/* Large allocations. */
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
arena
->
large_mtx
);
for
(
e
xtent_t
*
extent
=
extent_list
_first
(
&
arena
->
large
);
extent
!=
NULL
;
extent
=
extent_list
_first
(
&
arena
->
large
))
{
void
*
ptr
=
e
xtent
_base_get
(
e
xtent
);
for
(
e
data_t
*
edata
=
edata_list_active
_first
(
&
arena
->
large
);
edata
!=
NULL
;
edata
=
edata_list_active
_first
(
&
arena
->
large
))
{
void
*
ptr
=
e
data
_base_get
(
e
data
);
size_t
usize
;
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
arena
->
large_mtx
);
alloc_ctx_t
alloc_ctx
;
rtree_ctx_t
*
rtree_ctx
=
tsd_rtree_ctx
(
tsd
);
rtree_szind_slab_read
(
tsd_tsdn
(
tsd
),
&
extents_rtree
,
rtree_ctx
,
(
uintptr_t
)
ptr
,
true
,
&
alloc_ctx
.
szind
,
&
alloc_ctx
.
slab
);
emap_alloc_ctx_t
alloc_ctx
;
emap_alloc_ctx_lookup
(
tsd_tsdn
(
tsd
),
&
arena_emap_global
,
ptr
,
&
alloc_ctx
);
assert
(
alloc_ctx
.
szind
!=
SC_NSIZES
);
if
(
config_stats
||
(
config_prof
&&
opt_prof
))
{
...
...
@@ -1131,7 +694,7 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
if
(
config_prof
&&
opt_prof
)
{
prof_free
(
tsd
,
ptr
,
usize
,
&
alloc_ctx
);
}
large_dalloc
(
tsd_tsdn
(
tsd
),
e
xtent
);
large_dalloc
(
tsd_tsdn
(
tsd
),
e
data
);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
arena
->
large_mtx
);
}
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
arena
->
large_mtx
);
...
...
@@ -1139,32 +702,95 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
/* Bins. */
for
(
unsigned
i
=
0
;
i
<
SC_NBINS
;
i
++
)
{
for
(
unsigned
j
=
0
;
j
<
bin_infos
[
i
].
n_shards
;
j
++
)
{
arena_bin_reset
(
tsd
,
arena
,
&
arena
->
bins
[
i
].
bin_shards
[
j
]);
arena_bin_reset
(
tsd
,
arena
,
arena_get_bin
(
arena
,
i
,
j
));
}
}
pa_shard_reset
(
tsd_tsdn
(
tsd
),
&
arena
->
pa_shard
);
}
static
void
arena_prepare_base_deletion_sync_finish
(
tsd_t
*
tsd
,
malloc_mutex_t
**
mutexes
,
unsigned
n_mtx
)
{
for
(
unsigned
i
=
0
;
i
<
n_mtx
;
i
++
)
{
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
mutexes
[
i
]);
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
mutexes
[
i
]);
}
}
#define ARENA_DESTROY_MAX_DELAYED_MTX 32
static
void
arena_prepare_base_deletion_sync
(
tsd_t
*
tsd
,
malloc_mutex_t
*
mtx
,
malloc_mutex_t
**
delayed_mtx
,
unsigned
*
n_delayed
)
{
if
(
!
malloc_mutex_trylock
(
tsd_tsdn
(
tsd
),
mtx
))
{
/* No contention. */
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
mtx
);
return
;
}
unsigned
n
=
*
n_delayed
;
assert
(
n
<
ARENA_DESTROY_MAX_DELAYED_MTX
);
/* Add another to the batch. */
delayed_mtx
[
n
++
]
=
mtx
;
atomic_store_zu
(
&
arena
->
nactive
,
0
,
ATOMIC_RELAXED
);
if
(
n
==
ARENA_DESTROY_MAX_DELAYED_MTX
)
{
arena_prepare_base_deletion_sync_finish
(
tsd
,
delayed_mtx
,
n
);
n
=
0
;
}
*
n_delayed
=
n
;
}
static
void
arena_
destroy_retained
(
tsd
n
_t
*
tsd
n
,
arena_t
*
arena
)
{
arena_
prepare_base_deletion
(
tsd_t
*
tsd
,
base_t
*
base_to_destroy
)
{
/*
* Iterate over the retained extents and destroy them. This gives the
* extent allocator underlying the extent hooks an opportunity to unmap
* all retained memory without having to keep its own metadata
* structures. In practice, virtual memory for dss-allocated extents is
* leaked here, so best practice is to avoid dss for arenas to be
* destroyed, or provide custom extent hooks that track retained
* dss-based extents for later reuse.
* In order to coalesce, emap_try_acquire_edata_neighbor will attempt to
* check neighbor edata's state to determine eligibility. This means
* under certain conditions, the metadata from an arena can be accessed
* w/o holding any locks from that arena. In order to guarantee safe
* memory access, the metadata and the underlying base allocator needs
* to be kept alive, until all pending accesses are done.
*
* 1) with opt_retain, the arena boundary implies the is_head state
* (tracked in the rtree leaf), and the coalesce flow will stop at the
* head state branch. Therefore no cross arena metadata access
* possible.
*
* 2) w/o opt_retain, the arena id needs to be read from the edata_t,
* meaning read only cross-arena metadata access is possible. The
* coalesce attempt will stop at the arena_id mismatch, and is always
* under one of the ecache locks. To allow safe passthrough of such
* metadata accesses, the loop below will iterate through all manual
* arenas' ecache locks. As all the metadata from this base allocator
* have been unlinked from the rtree, after going through all the
* relevant ecache locks, it's safe to say that a) pending accesses are
* all finished, and b) no new access will be generated.
*/
extent_hooks_t
*
extent_hooks
=
extent_hooks_get
(
arena
);
extent_t
*
extent
;
while
((
extent
=
extents_evict
(
tsdn
,
arena
,
&
extent_hooks
,
&
arena
->
extents_retained
,
0
))
!=
NULL
)
{
extent_destroy_wrapper
(
tsdn
,
arena
,
&
extent_hooks
,
extent
);
if
(
opt_retain
)
{
return
;
}
unsigned
destroy_ind
=
base_ind_get
(
base_to_destroy
);
assert
(
destroy_ind
>=
manual_arena_base
);
tsdn_t
*
tsdn
=
tsd_tsdn
(
tsd
);
malloc_mutex_t
*
delayed_mtx
[
ARENA_DESTROY_MAX_DELAYED_MTX
];
unsigned
n_delayed
=
0
,
total
=
narenas_total_get
();
for
(
unsigned
i
=
0
;
i
<
total
;
i
++
)
{
if
(
i
==
destroy_ind
)
{
continue
;
}
arena_t
*
arena
=
arena_get
(
tsdn
,
i
,
false
);
if
(
arena
==
NULL
)
{
continue
;
}
pac_t
*
pac
=
&
arena
->
pa_shard
.
pac
;
arena_prepare_base_deletion_sync
(
tsd
,
&
pac
->
ecache_dirty
.
mtx
,
delayed_mtx
,
&
n_delayed
);
arena_prepare_base_deletion_sync
(
tsd
,
&
pac
->
ecache_muzzy
.
mtx
,
delayed_mtx
,
&
n_delayed
);
arena_prepare_base_deletion_sync
(
tsd
,
&
pac
->
ecache_retained
.
mtx
,
delayed_mtx
,
&
n_delayed
);
}
arena_prepare_base_deletion_sync_finish
(
tsd
,
delayed_mtx
,
n_delayed
);
}
#undef ARENA_DESTROY_MAX_DELAYED_MTX
void
arena_destroy
(
tsd_t
*
tsd
,
arena_t
*
arena
)
{
...
...
@@ -1175,13 +801,10 @@ arena_destroy(tsd_t *tsd, arena_t *arena) {
/*
* No allocations have occurred since arena_reset() was called.
* Furthermore, the caller (arena_i_destroy_ctl()) purged all cached
* extents, so only retained extents may remain.
* extents, so only retained extents may remain and it's safe to call
* pa_shard_destroy_retained.
*/
assert
(
extents_npages_get
(
&
arena
->
extents_dirty
)
==
0
);
assert
(
extents_npages_get
(
&
arena
->
extents_muzzy
)
==
0
);
/* Deallocate retained memory. */
arena_destroy_retained
(
tsd_tsdn
(
tsd
),
arena
);
pa_shard_destroy
(
tsd_tsdn
(
tsd
),
&
arena
->
pa_shard
);
/*
* Remove the arena pointer from the arenas array. We rely on the fact
...
...
@@ -1197,316 +820,370 @@ arena_destroy(tsd_t *tsd, arena_t *arena) {
/*
* Destroy the base allocator, which manages all metadata ever mapped by
* this arena.
* this arena. The prepare function will make sure no pending access to
* the metadata in this base anymore.
*/
arena_prepare_base_deletion
(
tsd
,
arena
->
base
);
base_delete
(
tsd_tsdn
(
tsd
),
arena
->
base
);
}
static
extent_t
*
arena_slab_alloc_hard
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
const
bin_info_t
*
bin_info
,
szind_t
szind
)
{
extent_t
*
slab
;
bool
zero
,
commit
;
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
0
);
zero
=
false
;
commit
=
true
;
slab
=
extent_alloc_wrapper
(
tsdn
,
arena
,
r_extent_hooks
,
NULL
,
bin_info
->
slab_size
,
0
,
PAGE
,
true
,
szind
,
&
zero
,
&
commit
);
if
(
config_stats
&&
slab
!=
NULL
)
{
arena_stats_mapped_add
(
tsdn
,
&
arena
->
stats
,
bin_info
->
slab_size
);
}
return
slab
;
}
static
extent_t
*
static
edata_t
*
arena_slab_alloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
szind_t
binind
,
unsigned
binshard
,
const
bin_info_t
*
bin_info
)
{
bool
deferred_work_generated
=
false
;
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
0
);
extent_hooks_t
*
extent_hooks
=
EXTENT_HOOKS_INITIALIZER
;
szind_t
szind
=
sz_size2index
(
bin_info
->
reg_size
);
bool
zero
=
false
;
bool
commit
=
true
;
extent_t
*
slab
=
extents_alloc
(
tsdn
,
arena
,
&
extent_hooks
,
&
arena
->
extents_dirty
,
NULL
,
bin_info
->
slab_size
,
0
,
PAGE
,
true
,
binind
,
&
zero
,
&
commit
);
if
(
slab
==
NULL
&&
arena_may_have_muzzy
(
arena
))
{
slab
=
extents_alloc
(
tsdn
,
arena
,
&
extent_hooks
,
&
arena
->
extents_muzzy
,
NULL
,
bin_info
->
slab_size
,
0
,
PAGE
,
true
,
binind
,
&
zero
,
&
commit
);
}
if
(
slab
==
NULL
)
{
slab
=
arena_slab_alloc_hard
(
tsdn
,
arena
,
&
extent_hooks
,
bin_info
,
szind
);
if
(
slab
==
NULL
)
{
return
NULL
;
}
}
assert
(
extent_slab_get
(
slab
));
/* Initialize slab internals. */
arena_slab_data_t
*
slab_data
=
extent_slab_data_get
(
slab
);
extent_nfree_binshard_set
(
slab
,
bin_info
->
nregs
,
binshard
);
bitmap_init
(
slab_data
->
bitmap
,
&
bin_info
->
bitmap_info
,
false
);
bool
guarded
=
san_slab_extent_decide_guard
(
tsdn
,
arena_get_ehooks
(
arena
));
edata_t
*
slab
=
pa_alloc
(
tsdn
,
&
arena
->
pa_shard
,
bin_info
->
slab_size
,
/* alignment */
PAGE
,
/* slab */
true
,
/* szind */
binind
,
/* zero */
false
,
guarded
,
&
deferred_work_generated
);
arena_nactive_add
(
arena
,
extent_size_get
(
slab
)
>>
LG_PAGE
);
return
slab
;
}
static
extent_t
*
arena_bin_nonfull_slab_get
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bin_t
*
bin
,
szind_t
binind
,
unsigned
binshard
)
{
extent_t
*
slab
;
const
bin_info_t
*
bin_info
;
if
(
deferred_work_generated
)
{
arena_handle_deferred_work
(
tsdn
,
arena
);
}
/* Look for a usable slab. */
slab
=
arena_bin_slabs_nonfull_tryget
(
bin
);
if
(
slab
!=
NULL
)
{
return
slab
;
if
(
slab
==
NULL
)
{
return
NULL
;
}
/* No existing slabs have any space available. */
assert
(
edata_slab_get
(
slab
));
bin_info
=
&
bin_infos
[
binind
];
/* Initialize slab internals. */
slab_data_t
*
slab_data
=
edata_slab_data_get
(
slab
);
edata_nfree_binshard_set
(
slab
,
bin_info
->
nregs
,
binshard
);
bitmap_init
(
slab_data
->
bitmap
,
&
bin_info
->
bitmap_info
,
false
);
/* Allocate a new slab. */
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
/******************************/
slab
=
arena_slab_alloc
(
tsdn
,
arena
,
binind
,
binshard
,
bin_info
);
/********************************/
malloc_mutex_lock
(
tsdn
,
&
bin
->
lock
);
if
(
slab
!=
NULL
)
{
return
slab
;
}
/*
* Before attempting the _with_fresh_slab approaches below, the _no_fresh_slab
* variants (i.e. through slabcur and nonfull) must be tried first.
*/
static
void
arena_bin_refill_slabcur_with_fresh_slab
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bin_t
*
bin
,
szind_t
binind
,
edata_t
*
fresh_slab
)
{
malloc_mutex_assert_owner
(
tsdn
,
&
bin
->
lock
);
/* Only called after slabcur and nonfull both failed. */
assert
(
bin
->
slabcur
==
NULL
);
assert
(
edata_heap_first
(
&
bin
->
slabs_nonfull
)
==
NULL
);
assert
(
fresh_slab
!=
NULL
);
/* A new slab from arena_slab_alloc() */
assert
(
edata_nfree_get
(
fresh_slab
)
==
bin_infos
[
binind
].
nregs
);
if
(
config_stats
)
{
bin
->
stats
.
nslabs
++
;
bin
->
stats
.
curslabs
++
;
}
return
slab
;
}
bin
->
slabcur
=
fresh_
slab
;
}
/*
* arena_slab_alloc() failed, but another thread may have made
* sufficient memory available while this one dropped bin->lock above,
* so search one more time.
*/
slab
=
arena_bin_slabs_nonfull_tryget
(
bin
);
if
(
slab
!=
NULL
)
{
return
slab
;
}
/* Refill slabcur and then alloc using the fresh slab */
static
void
*
arena_bin_malloc_with_fresh_slab
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bin_t
*
bin
,
szind_t
binind
,
edata_t
*
fresh_slab
)
{
malloc_mutex_assert_owner
(
tsdn
,
&
bin
->
lock
);
arena_bin_refill_slabcur_with_fresh_slab
(
tsdn
,
arena
,
bin
,
binind
,
fresh_slab
);
return
NULL
;
return
arena_slab_reg_alloc
(
bin
->
slabcur
,
&
bin_infos
[
binind
])
;
}
/* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */
static
void
*
arena_bin_malloc_hard
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bin_t
*
bin
,
szind_t
binind
,
unsigned
binshard
)
{
const
bin_info_t
*
bin_info
;
extent_t
*
slab
;
static
bool
arena_bin_refill_slabcur_no_fresh_slab
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bin_t
*
bin
)
{
malloc_mutex_assert_owner
(
tsdn
,
&
bin
->
lock
);
/* Only called after arena_slab_reg_alloc[_batch] failed. */
assert
(
bin
->
slabcur
==
NULL
||
edata_nfree_get
(
bin
->
slabcur
)
==
0
)
;
bin_info
=
&
bin_infos
[
binind
];
if
(
!
arena_is_auto
(
arena
)
&&
bin
->
slabcur
!=
NULL
)
{
arena_bin_slabs_full_insert
(
arena
,
bin
,
bin
->
slabcur
);
bin
->
slabcur
=
NULL
;
}
slab
=
arena_bin_nonfull_slab_get
(
tsdn
,
arena
,
bin
,
binind
,
binshard
);
if
(
bin
->
slabcur
!=
NULL
)
{
/*
* Another thread updated slabcur while this one ran without the
* bin lock in arena_bin_nonfull_slab_get().
*/
if
(
extent_nfree_get
(
bin
->
slabcur
)
>
0
)
{
void
*
ret
=
arena_slab_reg_alloc
(
bin
->
slabcur
,
bin_info
);
if
(
slab
!=
NULL
)
{
/*
* arena_slab_alloc() may have allocated slab,
* or it may have been pulled from
* slabs_nonfull. Therefore it is unsafe to
* make any assumptions about how slab has
* previously been used, and
* arena_bin_lower_slab() must be called, as if
* a region were just deallocated from the slab.
*/
if
(
extent_nfree_get
(
slab
)
==
bin_info
->
nregs
)
{
arena_dalloc_bin_slab
(
tsdn
,
arena
,
slab
,
bin
);
}
else
{
arena_bin_lower_slab
(
tsdn
,
arena
,
slab
,
bin
);
}
}
return
ret
;
}
arena_bin_slabs_full_insert
(
arena
,
bin
,
bin
->
slabcur
);
bin
->
slabcur
=
NULL
;
}
if
(
slab
==
NULL
)
{
return
NULL
;
}
bin
->
slabcur
=
slab
;
assert
(
extent_nfree_get
(
bin
->
slabcur
)
>
0
);
/* Look for a usable slab. */
bin
->
slabcur
=
arena_bin_slabs_nonfull_tryget
(
bin
);
assert
(
bin
->
slabcur
==
NULL
||
edata_nfree_get
(
bin
->
slabcur
)
>
0
);
return
arena_slab_reg_alloc
(
slab
,
bin_info
);
return
(
bin
->
slabcur
==
NULL
);
}
/* Choose a bin shard and return the locked bin. */
bin_t
*
arena_bin_choose
_lock
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
szind_t
binind
,
unsigned
*
binshard
)
{
bin_t
*
bin
;
arena_bin_choose
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
szind_t
binind
,
unsigned
*
binshard
_p
)
{
unsigned
binshard
;
if
(
tsdn_null
(
tsdn
)
||
tsd_arena_get
(
tsdn_tsd
(
tsdn
))
==
NULL
)
{
*
binshard
=
0
;
binshard
=
0
;
}
else
{
*
binshard
=
tsd_binshardsp_get
(
tsdn_tsd
(
tsdn
))
->
binshard
[
binind
];
binshard
=
tsd_binshardsp_get
(
tsdn_tsd
(
tsdn
))
->
binshard
[
binind
];
}
assert
(
*
binshard
<
bin_infos
[
binind
].
n_shards
);
bin
=
&
arena
->
bins
[
binind
].
bin_shards
[
*
binshard
];
malloc_mutex_lock
(
tsdn
,
&
bin
->
lock
)
;
return
bin
;
assert
(
binshard
<
bin_infos
[
binind
].
n_shards
);
if
(
binshard_p
!=
NULL
)
{
*
binshard_p
=
binshard
;
}
return
arena_get_bin
(
arena
,
binind
,
binshard
)
;
}
void
arena_tcache_fill_small
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
tcache_t
*
tcache
,
cache_bin_t
*
tbin
,
szind_t
binind
,
uint64_t
prof_accumbytes
)
{
unsigned
i
,
nfill
,
cnt
;
assert
(
tbin
->
ncached
==
0
);
arena_cache_bin_fill_small
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
cache_bin_t
*
cache_bin
,
cache_bin_info_t
*
cache_bin_info
,
szind_t
binind
,
const
unsigned
nfill
)
{
assert
(
cache_bin_ncached_get_local
(
cache_bin
,
cache_bin_info
)
==
0
);
if
(
config_prof
&&
arena_prof_accum
(
tsdn
,
arena
,
prof_accumbytes
))
{
prof_idump
(
tsdn
);
}
const
bin_info_t
*
bin_info
=
&
bin_infos
[
binind
];
unsigned
binshard
;
bin_t
*
bin
=
arena_bin_choose_lock
(
tsdn
,
arena
,
binind
,
&
binshard
);
for
(
i
=
0
,
nfill
=
(
tcache_bin_info
[
binind
].
ncached_max
>>
tcache
->
lg_fill_div
[
binind
]);
i
<
nfill
;
i
+=
cnt
)
{
extent_t
*
slab
;
if
((
slab
=
bin
->
slabcur
)
!=
NULL
&&
extent_nfree_get
(
slab
)
>
0
)
{
unsigned
tofill
=
nfill
-
i
;
cnt
=
tofill
<
extent_nfree_get
(
slab
)
?
tofill
:
extent_nfree_get
(
slab
);
arena_slab_reg_alloc_batch
(
slab
,
&
bin_infos
[
binind
],
cnt
,
tbin
->
avail
-
nfill
+
i
);
}
else
{
cnt
=
1
;
void
*
ptr
=
arena_bin_malloc_hard
(
tsdn
,
arena
,
bin
,
binind
,
binshard
);
CACHE_BIN_PTR_ARRAY_DECLARE
(
ptrs
,
nfill
);
cache_bin_init_ptr_array_for_fill
(
cache_bin
,
cache_bin_info
,
&
ptrs
,
nfill
);
/*
* OOM. tbin->avail isn't yet filled down to its first
* element, so the successful allocations (if any) must
* be moved just before tbin->avail before bailing out.
* Bin-local resources are used first: 1) bin->slabcur, and 2) nonfull
* slabs. After both are exhausted, new slabs will be allocated through
* arena_slab_alloc().
*
* Bin lock is only taken / released right before / after the while(...)
* refill loop, with new slab allocation (which has its own locking)
* kept outside of the loop. This setup facilitates flat combining, at
* the cost of the nested loop (through goto label_refill).
*
* To optimize for cases with contention and limited resources
* (e.g. hugepage-backed or non-overcommit arenas), each fill-iteration
* gets one chance of slab_alloc, and a retry of bin local resources
* after the slab allocation (regardless if slab_alloc failed, because
* the bin lock is dropped during the slab allocation).
*
* In other words, new slab allocation is allowed, as long as there was
* progress since the previous slab_alloc. This is tracked with
* made_progress below, initialized to true to jump start the first
* iteration.
*
* In other words (again), the loop will only terminate early (i.e. stop
* with filled < nfill) after going through the three steps: a) bin
* local exhausted, b) unlock and slab_alloc returns null, c) re-lock
* and bin local fails again.
*/
if
(
ptr
==
NULL
)
{
if
(
i
>
0
)
{
memmove
(
tbin
->
avail
-
i
,
tbin
->
avail
-
nfill
,
i
*
sizeof
(
void
*
));
bool
made_progress
=
true
;
edata_t
*
fresh_slab
=
NULL
;
bool
alloc_and_retry
=
false
;
unsigned
filled
=
0
;
unsigned
binshard
;
bin_t
*
bin
=
arena_bin_choose
(
tsdn
,
arena
,
binind
,
&
binshard
);
label_refill:
malloc_mutex_lock
(
tsdn
,
&
bin
->
lock
);
while
(
filled
<
nfill
)
{
/* Try batch-fill from slabcur first. */
edata_t
*
slabcur
=
bin
->
slabcur
;
if
(
slabcur
!=
NULL
&&
edata_nfree_get
(
slabcur
)
>
0
)
{
unsigned
tofill
=
nfill
-
filled
;
unsigned
nfree
=
edata_nfree_get
(
slabcur
);
unsigned
cnt
=
tofill
<
nfree
?
tofill
:
nfree
;
arena_slab_reg_alloc_batch
(
slabcur
,
bin_info
,
cnt
,
&
ptrs
.
ptr
[
filled
]);
made_progress
=
true
;
filled
+=
cnt
;
continue
;
}
/* Next try refilling slabcur from nonfull slabs. */
if
(
!
arena_bin_refill_slabcur_no_fresh_slab
(
tsdn
,
arena
,
bin
))
{
assert
(
bin
->
slabcur
!=
NULL
);
continue
;
}
/* Then see if a new slab was reserved already. */
if
(
fresh_slab
!=
NULL
)
{
arena_bin_refill_slabcur_with_fresh_slab
(
tsdn
,
arena
,
bin
,
binind
,
fresh_slab
);
assert
(
bin
->
slabcur
!=
NULL
);
fresh_slab
=
NULL
;
continue
;
}
/* Try slab_alloc if made progress (or never did slab_alloc). */
if
(
made_progress
)
{
assert
(
bin
->
slabcur
==
NULL
);
assert
(
fresh_slab
==
NULL
);
alloc_and_retry
=
true
;
/* Alloc a new slab then come back. */
break
;
}
/* OOM. */
assert
(
fresh_slab
==
NULL
);
assert
(
!
alloc_and_retry
);
break
;
}
/* while (filled < nfill) loop. */
if
(
config_stats
&&
!
alloc_and_retry
)
{
bin
->
stats
.
nmalloc
+=
filled
;
bin
->
stats
.
nrequests
+=
cache_bin
->
tstats
.
nrequests
;
bin
->
stats
.
curregs
+=
filled
;
bin
->
stats
.
nfills
++
;
cache_bin
->
tstats
.
nrequests
=
0
;
}
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
if
(
alloc_and_retry
)
{
assert
(
fresh_slab
==
NULL
);
assert
(
filled
<
nfill
);
assert
(
made_progress
);
fresh_slab
=
arena_slab_alloc
(
tsdn
,
arena
,
binind
,
binshard
,
bin_info
);
/* fresh_slab NULL case handled in the for loop. */
alloc_and_retry
=
false
;
made_progress
=
false
;
goto
label_refill
;
}
assert
(
filled
==
nfill
||
(
fresh_slab
==
NULL
&&
!
made_progress
));
/* Release if allocated but not used. */
if
(
fresh_slab
!=
NULL
)
{
assert
(
edata_nfree_get
(
fresh_slab
)
==
bin_info
->
nregs
);
arena_slab_dalloc
(
tsdn
,
arena
,
fresh_slab
);
fresh_slab
=
NULL
;
}
/* Insert such that low regions get used first. */
*
(
tbin
->
avail
-
nfill
+
i
)
=
ptr
;
cache_bin_finish_fill
(
cache_bin
,
cache_bin_info
,
&
ptrs
,
filled
);
arena_decay_tick
(
tsdn
,
arena
);
}
size_t
arena_fill_small_fresh
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
szind_t
binind
,
void
**
ptrs
,
size_t
nfill
,
bool
zero
)
{
assert
(
binind
<
SC_NBINS
);
const
bin_info_t
*
bin_info
=
&
bin_infos
[
binind
];
const
size_t
nregs
=
bin_info
->
nregs
;
assert
(
nregs
>
0
);
const
size_t
usize
=
bin_info
->
reg_size
;
const
bool
manual_arena
=
!
arena_is_auto
(
arena
);
unsigned
binshard
;
bin_t
*
bin
=
arena_bin_choose
(
tsdn
,
arena
,
binind
,
&
binshard
);
size_t
nslab
=
0
;
size_t
filled
=
0
;
edata_t
*
slab
=
NULL
;
edata_list_active_t
fulls
;
edata_list_active_init
(
&
fulls
);
while
(
filled
<
nfill
&&
(
slab
=
arena_slab_alloc
(
tsdn
,
arena
,
binind
,
binshard
,
bin_info
))
!=
NULL
)
{
assert
((
size_t
)
edata_nfree_get
(
slab
)
==
nregs
);
++
nslab
;
size_t
batch
=
nfill
-
filled
;
if
(
batch
>
nregs
)
{
batch
=
nregs
;
}
assert
(
batch
>
0
);
arena_slab_reg_alloc_batch
(
slab
,
bin_info
,
(
unsigned
)
batch
,
&
ptrs
[
filled
]);
assert
(
edata_addr_get
(
slab
)
==
ptrs
[
filled
]);
if
(
zero
)
{
memset
(
ptrs
[
filled
],
0
,
batch
*
usize
);
}
if
(
config_fill
&&
unlikely
(
opt_junk_alloc
))
{
for
(
unsigned
j
=
0
;
j
<
cnt
;
j
++
)
{
void
*
ptr
=
*
(
tbin
->
avail
-
nfill
+
i
+
j
);
arena_alloc_junk_small
(
ptr
,
&
bin_infos
[
binind
],
true
);
filled
+=
batch
;
if
(
batch
==
nregs
)
{
if
(
manual_arena
)
{
edata_list_active_append
(
&
fulls
,
slab
);
}
slab
=
NULL
;
}
}
malloc_mutex_lock
(
tsdn
,
&
bin
->
lock
);
/*
* Only the last slab can be non-empty, and the last slab is non-empty
* iff slab != NULL.
*/
if
(
slab
!=
NULL
)
{
arena_bin_lower_slab
(
tsdn
,
arena
,
slab
,
bin
);
}
if
(
manual_arena
)
{
edata_list_active_concat
(
&
bin
->
slabs_full
,
&
fulls
);
}
assert
(
edata_list_active_empty
(
&
fulls
));
if
(
config_stats
)
{
bin
->
stats
.
n
malloc
+=
i
;
bin
->
stats
.
nrequests
+=
tbin
->
tstats
.
nrequests
;
bin
->
stats
.
curregs
+=
i
;
bin
->
stats
.
nfill
s
++
;
t
bin
->
t
stats
.
nrequest
s
=
0
;
bin
->
stats
.
n
slabs
+=
nslab
;
bin
->
stats
.
curslabs
+=
nslab
;
bin
->
stats
.
nmalloc
+=
filled
;
bin
->
stats
.
n
requests
+=
fill
ed
;
bin
->
stats
.
curreg
s
+
=
filled
;
}
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
tbin
->
ncached
=
i
;
arena_decay_tick
(
tsdn
,
arena
);
return
filled
;
}
void
arena_alloc_junk_small
(
void
*
ptr
,
const
bin_info_t
*
bin_info
,
bool
zero
)
{
if
(
!
zero
)
{
memset
(
ptr
,
JEMALLOC_ALLOC_JUNK
,
bin_info
->
reg_size
);
/*
* Without allocating a new slab, try arena_slab_reg_alloc() and re-fill
* bin->slabcur if necessary.
*/
static
void
*
arena_bin_malloc_no_fresh_slab
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bin_t
*
bin
,
szind_t
binind
)
{
malloc_mutex_assert_owner
(
tsdn
,
&
bin
->
lock
);
if
(
bin
->
slabcur
==
NULL
||
edata_nfree_get
(
bin
->
slabcur
)
==
0
)
{
if
(
arena_bin_refill_slabcur_no_fresh_slab
(
tsdn
,
arena
,
bin
))
{
return
NULL
;
}
}
}
static
void
arena_dalloc_junk_small_impl
(
void
*
ptr
,
const
bin_info_t
*
bin_info
)
{
memset
(
ptr
,
JEMALLOC_FREE_JUNK
,
bin_info
->
reg_size
);
assert
(
bin
->
slabcur
!=
NULL
&&
edata_nfree_get
(
bin
->
slabcur
)
>
0
);
return
arena_slab_reg_alloc
(
bin
->
slabcur
,
&
bin_infos
[
binind
]);
}
arena_dalloc_junk_small_t
*
JET_MUTABLE
arena_dalloc_junk_small
=
arena_dalloc_junk_small_impl
;
static
void
*
arena_malloc_small
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
szind_t
binind
,
bool
zero
)
{
void
*
ret
;
bin_t
*
bin
;
size_t
usize
;
extent_t
*
slab
;
assert
(
binind
<
SC_NBINS
);
usize
=
sz_index2size
(
binind
);
const
bin_info_t
*
bin_info
=
&
bin_infos
[
binind
];
size_t
usize
=
sz_index2size
(
binind
);
unsigned
binshard
;
bin
=
arena_bin_choose_lock
(
tsdn
,
arena
,
binind
,
&
binshard
);
if
((
slab
=
bin
->
slabcur
)
!=
NULL
&&
extent_nfree_get
(
slab
)
>
0
)
{
ret
=
arena_slab_reg_alloc
(
slab
,
&
bin_infos
[
binind
]);
}
else
{
ret
=
arena_bin_malloc_hard
(
tsdn
,
arena
,
bin
,
binind
,
binshard
);
}
bin_t
*
bin
=
arena_bin_choose
(
tsdn
,
arena
,
binind
,
&
binshard
);
malloc_mutex_lock
(
tsdn
,
&
bin
->
lock
);
edata_t
*
fresh_slab
=
NULL
;
void
*
ret
=
arena_bin_malloc_no_fresh_slab
(
tsdn
,
arena
,
bin
,
binind
);
if
(
ret
==
NULL
)
{
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
/******************************/
fresh_slab
=
arena_slab_alloc
(
tsdn
,
arena
,
binind
,
binshard
,
bin_info
);
/********************************/
malloc_mutex_lock
(
tsdn
,
&
bin
->
lock
);
/* Retry since the lock was dropped. */
ret
=
arena_bin_malloc_no_fresh_slab
(
tsdn
,
arena
,
bin
,
binind
);
if
(
ret
==
NULL
)
{
if
(
fresh_slab
==
NULL
)
{
/* OOM */
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
return
NULL
;
}
ret
=
arena_bin_malloc_with_fresh_slab
(
tsdn
,
arena
,
bin
,
binind
,
fresh_slab
);
fresh_slab
=
NULL
;
}
}
if
(
config_stats
)
{
bin
->
stats
.
nmalloc
++
;
bin
->
stats
.
nrequests
++
;
bin
->
stats
.
curregs
++
;
}
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
if
(
config_prof
&&
arena_prof_accum
(
tsdn
,
arena
,
usize
))
{
prof_idump
(
tsdn
);
}
if
(
!
zero
)
{
if
(
config_fill
)
{
if
(
unlikely
(
opt_junk_alloc
))
{
arena_alloc_junk_small
(
ret
,
&
bin_infos
[
binind
],
false
);
}
else
if
(
unlikely
(
opt_zero
))
{
memset
(
ret
,
0
,
usize
);
}
}
}
else
{
if
(
config_fill
&&
unlikely
(
opt_junk_alloc
))
{
arena_alloc_junk_small
(
ret
,
&
bin_infos
[
binind
],
true
);
if
(
fresh_slab
!=
NULL
)
{
arena_slab_dalloc
(
tsdn
,
arena
,
fresh_slab
);
}
if
(
zero
)
{
memset
(
ret
,
0
,
usize
);
}
arena_decay_tick
(
tsdn
,
arena
);
return
ret
;
}
...
...
@@ -1533,10 +1210,17 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
bool
zero
,
tcache_t
*
tcache
)
{
void
*
ret
;
if
(
usize
<=
SC_SMALL_MAXCLASS
&&
(
alignment
<
PAGE
||
(
alignment
==
PAGE
&&
(
usize
&
PAGE_MASK
)
==
0
)))
{
if
(
usize
<=
SC_SMALL_MAXCLASS
)
{
/* Small; alignment doesn't require special slab placement. */
/* usize should be a result of sz_sa2u() */
assert
((
usize
&
(
alignment
-
1
))
==
0
);
/*
* Small usize can't come from an alignment larger than a page.
*/
assert
(
alignment
<=
PAGE
);
ret
=
arena_malloc
(
tsdn
,
arena
,
usize
,
sz_size2index
(
usize
),
zero
,
tcache
,
true
);
}
else
{
...
...
@@ -1560,33 +1244,22 @@ arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize) {
safety_check_set_redzone
(
ptr
,
usize
,
SC_LARGE_MINCLASS
);
}
rtree_ctx_t
rtree_ctx_fallback
;
rtree_ctx_t
*
rtree_ctx
=
tsdn_rtree_ctx
(
tsdn
,
&
rtree_ctx_fallback
);
extent_t
*
extent
=
rtree_extent_read
(
tsdn
,
&
extents_rtree
,
rtree_ctx
,
(
uintptr_t
)
ptr
,
true
);
arena_t
*
arena
=
extent_arena_get
(
extent
);
edata_t
*
edata
=
emap_edata_lookup
(
tsdn
,
&
arena_emap_global
,
ptr
);
szind_t
szind
=
sz_size2index
(
usize
);
extent_szind_set
(
extent
,
szind
);
rtree_szind_slab_update
(
tsdn
,
&
extents_rtree
,
rtree_ctx
,
(
uintptr_t
)
ptr
,
szind
,
false
);
prof_accum_cancel
(
tsdn
,
&
arena
->
prof_accum
,
usize
);
edata_szind_set
(
edata
,
szind
);
emap_remap
(
tsdn
,
&
arena_emap_global
,
edata
,
szind
,
/* slab */
false
);
assert
(
isalloc
(
tsdn
,
ptr
)
==
usize
);
}
static
size_t
arena_prof_demote
(
tsdn_t
*
tsdn
,
e
xtent_t
*
extent
,
const
void
*
ptr
)
{
arena_prof_demote
(
tsdn_t
*
tsdn
,
e
data_t
*
edata
,
const
void
*
ptr
)
{
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
extent_szind_set
(
extent
,
SC_NBINS
);
rtree_ctx_t
rtree_ctx_fallback
;
rtree_ctx_t
*
rtree_ctx
=
tsdn_rtree_ctx
(
tsdn
,
&
rtree_ctx_fallback
);
rtree_szind_slab_update
(
tsdn
,
&
extents_rtree
,
rtree_ctx
,
(
uintptr_t
)
ptr
,
SC_NBINS
,
false
);
edata_szind_set
(
edata
,
SC_NBINS
);
emap_remap
(
tsdn
,
&
arena_emap_global
,
edata
,
SC_NBINS
,
/* slab */
false
);
assert
(
isalloc
(
tsdn
,
ptr
)
==
SC_LARGE_MINCLASS
);
...
...
@@ -1599,9 +1272,9 @@ arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
cassert
(
config_prof
);
assert
(
opt_prof
);
e
xtent_t
*
extent
=
iealloc
(
tsdn
,
ptr
);
size_t
usize
=
e
xtent
_usize_get
(
e
xtent
);
size_t
bumped_usize
=
arena_prof_demote
(
tsdn
,
e
xtent
,
ptr
);
e
data_t
*
edata
=
emap_edata_lookup
(
tsdn
,
&
arena_emap_global
,
ptr
);
size_t
usize
=
e
data
_usize_get
(
e
data
);
size_t
bumped_usize
=
arena_prof_demote
(
tsdn
,
e
data
,
ptr
);
if
(
config_opt_safety_checks
&&
usize
<
SC_LARGE_MINCLASS
)
{
/*
* Currently, we only do redzoning for small sampled
...
...
@@ -1614,17 +1287,17 @@ arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
tcache_dalloc_large
(
tsdn_tsd
(
tsdn
),
tcache
,
ptr
,
sz_size2index
(
bumped_usize
),
slow_path
);
}
else
{
large_dalloc
(
tsdn
,
e
xtent
);
large_dalloc
(
tsdn
,
e
data
);
}
}
static
void
arena_dissociate_bin_slab
(
arena_t
*
arena
,
e
xtent
_t
*
slab
,
bin_t
*
bin
)
{
arena_dissociate_bin_slab
(
arena_t
*
arena
,
e
data
_t
*
slab
,
bin_t
*
bin
)
{
/* Dissociate slab from bin. */
if
(
slab
==
bin
->
slabcur
)
{
bin
->
slabcur
=
NULL
;
}
else
{
szind_t
binind
=
e
xtent
_szind_get
(
slab
);
szind_t
binind
=
e
data
_szind_get
(
slab
);
const
bin_info_t
*
bin_info
=
&
bin_infos
[
binind
];
/*
...
...
@@ -1641,24 +1314,9 @@ arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) {
}
static
void
arena_dalloc_bin_slab
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_t
*
slab
,
bin_t
*
bin
)
{
assert
(
slab
!=
bin
->
slabcur
);
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
/******************************/
arena_slab_dalloc
(
tsdn
,
arena
,
slab
);
/****************************/
malloc_mutex_lock
(
tsdn
,
&
bin
->
lock
);
if
(
config_stats
)
{
bin
->
stats
.
curslabs
--
;
}
}
static
void
arena_bin_lower_slab
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_t
*
slab
,
arena_bin_lower_slab
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
edata_t
*
slab
,
bin_t
*
bin
)
{
assert
(
e
xtent
_nfree_get
(
slab
)
>
0
);
assert
(
e
data
_nfree_get
(
slab
)
>
0
);
/*
* Make sure that if bin->slabcur is non-NULL, it refers to the
...
...
@@ -1666,9 +1324,9 @@ arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
* than proactively keeping it pointing at the oldest/lowest non-full
* slab.
*/
if
(
bin
->
slabcur
!=
NULL
&&
e
xtent
_snad_comp
(
bin
->
slabcur
,
slab
)
>
0
)
{
if
(
bin
->
slabcur
!=
NULL
&&
e
data
_snad_comp
(
bin
->
slabcur
,
slab
)
>
0
)
{
/* Switch slabcur. */
if
(
e
xtent
_nfree_get
(
bin
->
slabcur
)
>
0
)
{
if
(
e
data
_nfree_get
(
bin
->
slabcur
)
>
0
)
{
arena_bin_slabs_nonfull_insert
(
bin
,
bin
->
slabcur
);
}
else
{
arena_bin_slabs_full_insert
(
arena
,
bin
,
bin
->
slabcur
);
...
...
@@ -1683,56 +1341,54 @@ arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
}
static
void
arena_dalloc_bin_locked_impl
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bin_t
*
bin
,
szind_t
binind
,
extent_t
*
slab
,
void
*
ptr
,
bool
junked
)
{
arena_slab_data_t
*
slab_data
=
extent_slab_data_get
(
slab
);
const
bin_info_t
*
bin_info
=
&
bin_infos
[
binind
];
arena_dalloc_bin_slab_prepare
(
tsdn_t
*
tsdn
,
edata_t
*
slab
,
bin_t
*
bin
)
{
malloc_mutex_assert_owner
(
tsdn
,
&
bin
->
lock
);
if
(
!
junked
&&
config_fill
&&
unlikely
(
opt_junk_free
))
{
arena_dalloc_junk_small
(
ptr
,
bin_info
);
assert
(
slab
!=
bin
->
slabcur
);
if
(
config_stats
)
{
bin
->
stats
.
curslabs
--
;
}
}
arena_slab_reg_dalloc
(
slab
,
slab_data
,
ptr
);
unsigned
nfree
=
extent_nfree_get
(
slab
);
if
(
nfree
==
bin_info
->
nregs
)
{
void
arena_dalloc_bin_locked_handle_newly_empty
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
edata_t
*
slab
,
bin_t
*
bin
)
{
arena_dissociate_bin_slab
(
arena
,
slab
,
bin
);
arena_dalloc_bin_slab
(
tsdn
,
arena
,
slab
,
bin
);
}
else
if
(
nfree
==
1
&&
slab
!=
bin
->
slabcur
)
{
arena_bin_slabs_full_remove
(
arena
,
bin
,
slab
);
arena_bin_lower_slab
(
tsdn
,
arena
,
slab
,
bin
);
}
if
(
config_stats
)
{
bin
->
stats
.
ndalloc
++
;
bin
->
stats
.
curregs
--
;
}
arena_dalloc_bin_slab_prepare
(
tsdn
,
slab
,
bin
);
}
void
arena_dalloc_bin_
jun
ked_
locked
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bin_t
*
bin
,
szind_t
binind
,
extent_t
*
extent
,
void
*
ptr
)
{
arena_
dalloc_bin_locked_impl
(
tsdn
,
arena
,
bin
,
binind
,
extent
,
ptr
,
true
);
arena_dalloc_bin_
loc
ked_
handle_newly_nonempty
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
edata_t
*
slab
,
bin_t
*
bin
)
{
arena_
bin_slabs_full_remove
(
arena
,
bin
,
slab
);
arena_bin_lower_slab
(
tsdn
,
arena
,
slab
,
bin
);
}
static
void
arena_dalloc_bin
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
e
xtent_t
*
extent
,
void
*
ptr
)
{
szind_t
binind
=
e
xtent
_szind_get
(
e
xtent
);
unsigned
binshard
=
e
xtent
_binshard_get
(
e
xtent
);
bin_t
*
bin
=
&
arena
->
bins
[
binind
].
bin
_
shard
s
[
binshard
]
;
arena_dalloc_bin
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
e
data_t
*
edata
,
void
*
ptr
)
{
szind_t
binind
=
e
data
_szind_get
(
e
data
);
unsigned
binshard
=
e
data
_binshard_get
(
e
data
);
bin_t
*
bin
=
arena
_get_bin
(
arena
,
binind
,
binshard
)
;
malloc_mutex_lock
(
tsdn
,
&
bin
->
lock
);
arena_dalloc_bin_locked_impl
(
tsdn
,
arena
,
bin
,
binind
,
extent
,
ptr
,
false
);
arena_dalloc_bin_locked_info_t
info
;
arena_dalloc_bin_locked_begin
(
&
info
,
binind
);
bool
ret
=
arena_dalloc_bin_locked_step
(
tsdn
,
arena
,
bin
,
&
info
,
binind
,
edata
,
ptr
);
arena_dalloc_bin_locked_finish
(
tsdn
,
arena
,
bin
,
&
info
);
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
if
(
ret
)
{
arena_slab_dalloc
(
tsdn
,
arena
,
edata
);
}
}
void
arena_dalloc_small
(
tsdn_t
*
tsdn
,
void
*
ptr
)
{
e
xtent_t
*
extent
=
iealloc
(
tsdn
,
ptr
);
arena_t
*
arena
=
extent_
arena_get
(
extent
);
e
data_t
*
edata
=
emap_edata_lookup
(
tsdn
,
&
arena_emap_global
,
ptr
);
arena_t
*
arena
=
arena_get
_from_edata
(
edata
);
arena_dalloc_bin
(
tsdn
,
arena
,
e
xtent
,
ptr
);
arena_dalloc_bin
(
tsdn
,
arena
,
e
data
,
ptr
);
arena_decay_tick
(
tsdn
,
arena
);
}
...
...
@@ -1743,7 +1399,7 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
/* Calls with non-zero extra had to clamp extra. */
assert
(
extra
==
0
||
size
+
extra
<=
SC_LARGE_MAXCLASS
);
e
xtent_t
*
extent
=
iealloc
(
tsdn
,
ptr
);
e
data_t
*
edata
=
emap_edata_lookup
(
tsdn
,
&
arena_emap_global
,
ptr
);
if
(
unlikely
(
size
>
SC_LARGE_MAXCLASS
))
{
ret
=
true
;
goto
done
;
...
...
@@ -1766,18 +1422,19 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
goto
done
;
}
arena_decay_tick
(
tsdn
,
extent_arena_get
(
extent
));
arena_t
*
arena
=
arena_get_from_edata
(
edata
);
arena_decay_tick
(
tsdn
,
arena
);
ret
=
false
;
}
else
if
(
oldsize
>=
SC_LARGE_MINCLASS
&&
usize_max
>=
SC_LARGE_MINCLASS
)
{
ret
=
large_ralloc_no_move
(
tsdn
,
e
xtent
,
usize_min
,
usize_max
,
ret
=
large_ralloc_no_move
(
tsdn
,
e
data
,
usize_min
,
usize_max
,
zero
);
}
else
{
ret
=
true
;
}
done:
assert
(
e
xtent
==
iealloc
(
tsdn
,
ptr
));
*
newsize
=
e
xtent
_usize_get
(
e
xtent
);
assert
(
e
data
==
emap_edata_lookup
(
tsdn
,
&
arena_emap_global
,
ptr
));
*
newsize
=
e
data
_usize_get
(
e
data
);
return
ret
;
}
...
...
@@ -1800,7 +1457,7 @@ void *
arena_ralloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
alignment
,
bool
zero
,
tcache_t
*
tcache
,
hook_ralloc_args_t
*
hook_args
)
{
size_t
usize
=
sz_s2u
(
size
);
size_t
usize
=
alignment
==
0
?
sz_s2u
(
size
)
:
sz_sa2u
(
size
,
alignment
);
if
(
unlikely
(
usize
==
0
||
size
>
SC_LARGE_MAXCLASS
))
{
return
NULL
;
}
...
...
@@ -1850,6 +1507,29 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
return
ret
;
}
ehooks_t
*
arena_get_ehooks
(
arena_t
*
arena
)
{
return
base_ehooks_get
(
arena
->
base
);
}
extent_hooks_t
*
arena_set_extent_hooks
(
tsd_t
*
tsd
,
arena_t
*
arena
,
extent_hooks_t
*
extent_hooks
)
{
background_thread_info_t
*
info
;
if
(
have_background_thread
)
{
info
=
arena_background_thread_info_get
(
arena
);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
info
->
mtx
);
}
/* No using the HPA now that we have the custom hooks. */
pa_shard_disable_hpa
(
tsd_tsdn
(
tsd
),
&
arena
->
pa_shard
);
extent_hooks_t
*
ret
=
base_extent_hooks_set
(
arena
->
base
,
extent_hooks
);
if
(
have_background_thread
)
{
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
info
->
mtx
);
}
return
ret
;
}
dss_prec_t
arena_dss_prec_get
(
arena_t
*
arena
)
{
return
(
dss_prec_t
)
atomic_load_u
(
&
arena
->
dss_prec
,
ATOMIC_ACQUIRE
);
...
...
@@ -1871,7 +1551,7 @@ arena_dirty_decay_ms_default_get(void) {
bool
arena_dirty_decay_ms_default_set
(
ssize_t
decay_ms
)
{
if
(
!
arena_
decay_ms_valid
(
decay_ms
))
{
if
(
!
decay_ms_valid
(
decay_ms
))
{
return
true
;
}
atomic_store_zd
(
&
dirty_decay_ms_default
,
decay_ms
,
ATOMIC_RELAXED
);
...
...
@@ -1885,7 +1565,7 @@ arena_muzzy_decay_ms_default_get(void) {
bool
arena_muzzy_decay_ms_default_set
(
ssize_t
decay_ms
)
{
if
(
!
arena_
decay_ms_valid
(
decay_ms
))
{
if
(
!
decay_ms_valid
(
decay_ms
))
{
return
true
;
}
atomic_store_zd
(
&
muzzy_decay_ms_default
,
decay_ms
,
ATOMIC_RELAXED
);
...
...
@@ -1896,26 +1576,8 @@ bool
arena_retain_grow_limit_get_set
(
tsd_t
*
tsd
,
arena_t
*
arena
,
size_t
*
old_limit
,
size_t
*
new_limit
)
{
assert
(
opt_retain
);
pszind_t
new_ind
JEMALLOC_CC_SILENCE_INIT
(
0
);
if
(
new_limit
!=
NULL
)
{
size_t
limit
=
*
new_limit
;
/* Grow no more than the new limit. */
if
((
new_ind
=
sz_psz2ind
(
limit
+
1
)
-
1
)
>=
SC_NPSIZES
)
{
return
true
;
}
}
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
arena
->
extent_grow_mtx
);
if
(
old_limit
!=
NULL
)
{
*
old_limit
=
sz_pind2sz
(
arena
->
retain_grow_limit
);
}
if
(
new_limit
!=
NULL
)
{
arena
->
retain_grow_limit
=
new_ind
;
}
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
arena
->
extent_grow_mtx
);
return
false
;
return
pac_retain_grow_limit_get_set
(
tsd_tsdn
(
tsd
),
&
arena
->
pa_shard
.
pac
,
old_limit
,
new_limit
);
}
unsigned
...
...
@@ -1933,13 +1595,8 @@ arena_nthreads_dec(arena_t *arena, bool internal) {
atomic_fetch_sub_u
(
&
arena
->
nthreads
[
internal
],
1
,
ATOMIC_RELAXED
);
}
size_t
arena_extent_sn_next
(
arena_t
*
arena
)
{
return
atomic_fetch_add_zu
(
&
arena
->
extent_sn_next
,
1
,
ATOMIC_RELAXED
);
}
arena_t
*
arena_new
(
tsdn_t
*
tsdn
,
unsigned
ind
,
extent_hooks_t
*
extent_hooks
)
{
arena_new
(
tsdn_t
*
tsdn
,
unsigned
ind
,
const
arena_config_t
*
config
)
{
arena_t
*
arena
;
base_t
*
base
;
unsigned
i
;
...
...
@@ -1947,16 +1604,13 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
if
(
ind
==
0
)
{
base
=
b0get
();
}
else
{
base
=
base_new
(
tsdn
,
ind
,
extent_hooks
);
base
=
base_new
(
tsdn
,
ind
,
config
->
extent_hooks
,
config
->
metadata_use_hooks
);
if
(
base
==
NULL
)
{
return
NULL
;
}
}
unsigned
nbins_total
=
0
;
for
(
i
=
0
;
i
<
SC_NBINS
;
i
++
)
{
nbins_total
+=
bin_infos
[
i
].
n_shards
;
}
size_t
arena_size
=
sizeof
(
arena_t
)
+
sizeof
(
bin_t
)
*
nbins_total
;
arena
=
(
arena_t
*
)
base_alloc
(
tsdn
,
base
,
arena_size
,
CACHELINE
);
if
(
arena
==
NULL
)
{
...
...
@@ -1980,110 +1634,56 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
}
}
if
(
config_prof
)
{
if
(
prof_accum_init
(
tsdn
,
&
arena
->
prof_accum
))
{
goto
label_error
;
}
}
if
(
config_cache_oblivious
)
{
/*
* A nondeterministic seed based on the address of arena reduces
* the likelihood of lockstep non-uniform cache index
* utilization among identical concurrent processes, but at the
* cost of test repeatability. For debug builds, instead use a
* deterministic seed.
*/
atomic_store_zu
(
&
arena
->
offset_state
,
config_debug
?
ind
:
(
size_t
)(
uintptr_t
)
arena
,
ATOMIC_RELAXED
);
}
atomic_store_zu
(
&
arena
->
extent_sn_next
,
0
,
ATOMIC_RELAXED
);
atomic_store_u
(
&
arena
->
dss_prec
,
(
unsigned
)
extent_dss_prec_get
(),
ATOMIC_RELAXED
);
atomic_store_zu
(
&
arena
->
nactive
,
0
,
ATOMIC_RELAXED
);
extent_list_init
(
&
arena
->
large
);
edata_list_active_init
(
&
arena
->
large
);
if
(
malloc_mutex_init
(
&
arena
->
large_mtx
,
"arena_large"
,
WITNESS_RANK_ARENA_LARGE
,
malloc_mutex_rank_exclusive
))
{
goto
label_error
;
}
/*
* Delay coalescing for dirty extents despite the disruptive effect on
* memory layout for best-fit extent allocation, since cached extents
* are likely to be reused soon after deallocation, and the cost of
* merging/splitting extents is non-trivial.
*/
if
(
extents_init
(
tsdn
,
&
arena
->
extents_dirty
,
extent_state_dirty
,
true
))
{
goto
label_error
;
}
/*
* Coalesce muzzy extents immediately, because operations on them are in
* the critical path much less often than for dirty extents.
*/
if
(
extents_init
(
tsdn
,
&
arena
->
extents_muzzy
,
extent_state_muzzy
,
false
))
{
goto
label_error
;
}
/*
* Coalesce retained extents immediately, in part because they will
* never be evicted (and therefore there's no opportunity for delayed
* coalescing), but also because operations on retained extents are not
* in the critical path.
*/
if
(
extents_init
(
tsdn
,
&
arena
->
extents_retained
,
extent_state_retained
,
false
))
{
goto
label_error
;
}
if
(
arena_decay_init
(
&
arena
->
decay_dirty
,
arena_dirty_decay_ms_default_get
(),
&
arena
->
stats
.
decay_dirty
))
{
goto
label_error
;
}
if
(
arena_decay_init
(
&
arena
->
decay_muzzy
,
arena_muzzy_decay_ms_default_get
(),
&
arena
->
stats
.
decay_muzzy
))
{
goto
label_error
;
}
arena
->
extent_grow_next
=
sz_psz2ind
(
HUGEPAGE
);
arena
->
retain_grow_limit
=
sz_psz2ind
(
SC_LARGE_MAXCLASS
);
if
(
malloc_mutex_init
(
&
arena
->
extent_grow_mtx
,
"extent_grow"
,
WITNESS_RANK_EXTENT_GROW
,
malloc_mutex_rank_exclusive
))
{
goto
label_error
;
}
extent_avail_new
(
&
arena
->
extent_avail
);
if
(
malloc_mutex_init
(
&
arena
->
extent_avail_mtx
,
"extent_avail"
,
WITNESS_RANK_EXTENT_AVAIL
,
malloc_mutex_rank_exclusive
))
{
nstime_t
cur_time
;
nstime_init_update
(
&
cur_time
);
if
(
pa_shard_init
(
tsdn
,
&
arena
->
pa_shard
,
&
arena_pa_central_global
,
&
arena_emap_global
,
base
,
ind
,
&
arena
->
stats
.
pa_shard_stats
,
LOCKEDINT_MTX
(
arena
->
stats
.
mtx
),
&
cur_time
,
oversize_threshold
,
arena_dirty_decay_ms_default_get
(),
arena_muzzy_decay_ms_default_get
()))
{
goto
label_error
;
}
/* Initialize bins. */
uintptr_t
bin_addr
=
(
uintptr_t
)
arena
+
sizeof
(
arena_t
);
atomic_store_u
(
&
arena
->
binshard_next
,
0
,
ATOMIC_RELEASE
);
for
(
i
=
0
;
i
<
SC_NBINS
;
i
++
)
{
unsigned
nshards
=
bin_infos
[
i
].
n_shards
;
arena
->
bins
[
i
].
bin_shards
=
(
bin_t
*
)
bin_addr
;
bin_addr
+=
nshards
*
sizeof
(
bin_t
);
for
(
unsigned
j
=
0
;
j
<
nshards
;
j
++
)
{
bool
err
=
bin_init
(
&
arena
->
bins
[
i
].
bin_shards
[
j
]);
for
(
i
=
0
;
i
<
nbins_total
;
i
++
)
{
bool
err
=
bin_init
(
&
arena
->
bins
[
i
]);
if
(
err
)
{
goto
label_error
;
}
}
}
assert
(
bin_addr
==
(
uintptr_t
)
arena
+
arena_size
);
arena
->
base
=
base
;
/* Set arena before creating background threads. */
arena_set
(
ind
,
arena
);
arena
->
ind
=
ind
;
nstime_init
(
&
arena
->
create_time
,
0
);
nstime_update
(
&
arena
->
create_time
);
nstime_init_update
(
&
arena
->
create_time
);
/*
* We turn on the HPA if set to. There are two exceptions:
* - Custom extent hooks (we should only return memory allocated from
* them in that case).
* - Arena 0 initialization. In this case, we're mid-bootstrapping, and
* so arena_hpa_global is not yet initialized.
*/
if
(
opt_hpa
&&
ehooks_are_default
(
base_ehooks_get
(
base
))
&&
ind
!=
0
)
{
hpa_shard_opts_t
hpa_shard_opts
=
opt_hpa_opts
;
hpa_shard_opts
.
deferral_allowed
=
background_thread_enabled
();
if
(
pa_shard_enable_hpa
(
tsdn
,
&
arena
->
pa_shard
,
&
hpa_shard_opts
,
&
opt_hpa_sec_opts
))
{
goto
label_error
;
}
}
/* We don't support reentrancy for arena 0 bootstrapping. */
if
(
ind
!=
0
)
{
...
...
@@ -2129,10 +1729,12 @@ arena_choose_huge(tsd_t *tsd) {
* expected for huge allocations.
*/
if
(
arena_dirty_decay_ms_default_get
()
>
0
)
{
arena_dirty_decay_ms_set
(
tsd_tsdn
(
tsd
),
huge_arena
,
0
);
arena_decay_ms_set
(
tsd_tsdn
(
tsd
),
huge_arena
,
extent_state_dirty
,
0
);
}
if
(
arena_muzzy_decay_ms_default_get
()
>
0
)
{
arena_muzzy_decay_ms_set
(
tsd_tsdn
(
tsd
),
huge_arena
,
0
);
arena_decay_ms_set
(
tsd_tsdn
(
tsd
),
huge_arena
,
extent_state_muzzy
,
0
);
}
}
...
...
@@ -2167,8 +1769,8 @@ arena_is_huge(unsigned arena_ind) {
return
(
arena_ind
==
huge_arena_ind
);
}
void
arena_boot
(
sc_data_t
*
sc_data
)
{
bool
arena_boot
(
sc_data_t
*
sc_data
,
base_t
*
base
,
bool
hpa
)
{
arena_dirty_decay_ms_default_set
(
opt_dirty_decay_ms
);
arena_muzzy_decay_ms_default_set
(
opt_muzzy_decay_ms
);
for
(
unsigned
i
=
0
;
i
<
SC_NBINS
;
i
++
)
{
...
...
@@ -2176,12 +1778,20 @@ arena_boot(sc_data_t *sc_data) {
div_init
(
&
arena_binind_div_info
[
i
],
(
1U
<<
sc
->
lg_base
)
+
(
sc
->
ndelta
<<
sc
->
lg_delta
));
}
uint32_t
cur_offset
=
(
uint32_t
)
offsetof
(
arena_t
,
bins
);
for
(
szind_t
i
=
0
;
i
<
SC_NBINS
;
i
++
)
{
arena_bin_offsets
[
i
]
=
cur_offset
;
nbins_total
+=
bin_infos
[
i
].
n_shards
;
cur_offset
+=
(
uint32_t
)(
bin_infos
[
i
].
n_shards
*
sizeof
(
bin_t
));
}
return
pa_central_init
(
&
arena_pa_central_global
,
base
,
hpa
,
&
hpa_hooks_default
);
}
void
arena_prefork0
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
malloc_mutex_prefork
(
tsdn
,
&
arena
->
decay_dirty
.
mtx
);
malloc_mutex_prefork
(
tsdn
,
&
arena
->
decay_muzzy
.
mtx
);
pa_shard_prefork0
(
tsdn
,
&
arena
->
pa_shard
);
}
void
...
...
@@ -2193,59 +1803,50 @@ arena_prefork1(tsdn_t *tsdn, arena_t *arena) {
void
arena_prefork2
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
malloc_mutex
_prefork
(
tsdn
,
&
arena
->
extent_grow_mtx
);
pa_shard
_prefork
2
(
tsdn
,
&
arena
->
pa_shard
);
}
void
arena_prefork3
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
extents_prefork
(
tsdn
,
&
arena
->
extents_dirty
);
extents_prefork
(
tsdn
,
&
arena
->
extents_muzzy
);
extents_prefork
(
tsdn
,
&
arena
->
extents_retained
);
pa_shard_prefork3
(
tsdn
,
&
arena
->
pa_shard
);
}
void
arena_prefork4
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
malloc_mutex
_prefork
(
tsdn
,
&
arena
->
extent_avail_mtx
);
pa_shard
_prefork
4
(
tsdn
,
&
arena
->
pa_shard
);
}
void
arena_prefork5
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
base
_prefork
(
tsdn
,
arena
->
base
);
pa_shard
_prefork
5
(
tsdn
,
&
arena
->
pa_shard
);
}
void
arena_prefork6
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
malloc_mutex
_prefork
(
tsdn
,
&
arena
->
large_mtx
);
base
_prefork
(
tsdn
,
arena
->
base
);
}
void
arena_prefork7
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
for
(
unsigned
i
=
0
;
i
<
SC_NBINS
;
i
++
)
{
for
(
unsigned
j
=
0
;
j
<
bin_infos
[
i
].
n_shards
;
j
++
)
{
bin_prefork
(
tsdn
,
&
arena
->
bins
[
i
].
bin_shards
[
j
]);
}
malloc_mutex_prefork
(
tsdn
,
&
arena
->
large_mtx
);
}
void
arena_prefork8
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
for
(
unsigned
i
=
0
;
i
<
nbins_total
;
i
++
)
{
bin_prefork
(
tsdn
,
&
arena
->
bins
[
i
]);
}
}
void
arena_postfork_parent
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
unsigned
i
;
for
(
i
=
0
;
i
<
SC_NBINS
;
i
++
)
{
for
(
unsigned
j
=
0
;
j
<
bin_infos
[
i
].
n_shards
;
j
++
)
{
bin_postfork_parent
(
tsdn
,
&
arena
->
bins
[
i
].
bin_shards
[
j
]);
}
for
(
unsigned
i
=
0
;
i
<
nbins_total
;
i
++
)
{
bin_postfork_parent
(
tsdn
,
&
arena
->
bins
[
i
]);
}
malloc_mutex_postfork_parent
(
tsdn
,
&
arena
->
large_mtx
);
base_postfork_parent
(
tsdn
,
arena
->
base
);
malloc_mutex_postfork_parent
(
tsdn
,
&
arena
->
extent_avail_mtx
);
extents_postfork_parent
(
tsdn
,
&
arena
->
extents_dirty
);
extents_postfork_parent
(
tsdn
,
&
arena
->
extents_muzzy
);
extents_postfork_parent
(
tsdn
,
&
arena
->
extents_retained
);
malloc_mutex_postfork_parent
(
tsdn
,
&
arena
->
extent_grow_mtx
);
malloc_mutex_postfork_parent
(
tsdn
,
&
arena
->
decay_dirty
.
mtx
);
malloc_mutex_postfork_parent
(
tsdn
,
&
arena
->
decay_muzzy
.
mtx
);
pa_shard_postfork_parent
(
tsdn
,
&
arena
->
pa_shard
);
if
(
config_stats
)
{
malloc_mutex_postfork_parent
(
tsdn
,
&
arena
->
tcache_ql_mtx
);
}
...
...
@@ -2253,8 +1854,6 @@ arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
void
arena_postfork_child
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
unsigned
i
;
atomic_store_u
(
&
arena
->
nthreads
[
0
],
0
,
ATOMIC_RELAXED
);
atomic_store_u
(
&
arena
->
nthreads
[
1
],
0
,
ATOMIC_RELAXED
);
if
(
tsd_arena_get
(
tsdn_tsd
(
tsdn
))
==
arena
)
{
...
...
@@ -2266,32 +1865,26 @@ arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
if
(
config_stats
)
{
ql_new
(
&
arena
->
tcache_ql
);
ql_new
(
&
arena
->
cache_bin_array_descriptor_ql
);
tcache_t
*
tcache
=
tcache_get
(
tsdn_tsd
(
tsdn
));
if
(
tcache
!=
NULL
&&
tcache
->
arena
==
arena
)
{
ql_elm_new
(
tcache
,
link
);
ql_tail_insert
(
&
arena
->
tcache_ql
,
tcache
,
link
);
tcache_slow_t
*
tcache_slow
=
tcache_slow_get
(
tsdn_tsd
(
tsdn
));
if
(
tcache_slow
!=
NULL
&&
tcache_slow
->
arena
==
arena
)
{
tcache_t
*
tcache
=
tcache_slow
->
tcache
;
ql_elm_new
(
tcache_slow
,
link
);
ql_tail_insert
(
&
arena
->
tcache_ql
,
tcache_slow
,
link
);
cache_bin_array_descriptor_init
(
&
tcache
->
cache_bin_array_descriptor
,
tcache
->
bins
_small
,
tcache
->
bins_large
);
&
tcache
_slow
->
cache_bin_array_descriptor
,
tcache
->
bins
);
ql_tail_insert
(
&
arena
->
cache_bin_array_descriptor_ql
,
&
tcache
->
cache_bin_array_descriptor
,
link
);
&
tcache
_slow
->
cache_bin_array_descriptor
,
link
);
}
}
for
(
i
=
0
;
i
<
SC_NBINS
;
i
++
)
{
for
(
unsigned
j
=
0
;
j
<
bin_infos
[
i
].
n_shards
;
j
++
)
{
bin_postfork_child
(
tsdn
,
&
arena
->
bins
[
i
].
bin_shards
[
j
]);
}
for
(
unsigned
i
=
0
;
i
<
nbins_total
;
i
++
)
{
bin_postfork_child
(
tsdn
,
&
arena
->
bins
[
i
]);
}
malloc_mutex_postfork_child
(
tsdn
,
&
arena
->
large_mtx
);
base_postfork_child
(
tsdn
,
arena
->
base
);
malloc_mutex_postfork_child
(
tsdn
,
&
arena
->
extent_avail_mtx
);
extents_postfork_child
(
tsdn
,
&
arena
->
extents_dirty
);
extents_postfork_child
(
tsdn
,
&
arena
->
extents_muzzy
);
extents_postfork_child
(
tsdn
,
&
arena
->
extents_retained
);
malloc_mutex_postfork_child
(
tsdn
,
&
arena
->
extent_grow_mtx
);
malloc_mutex_postfork_child
(
tsdn
,
&
arena
->
decay_dirty
.
mtx
);
malloc_mutex_postfork_child
(
tsdn
,
&
arena
->
decay_muzzy
.
mtx
);
pa_shard_postfork_child
(
tsdn
,
&
arena
->
pa_shard
);
if
(
config_stats
)
{
malloc_mutex_postfork_child
(
tsdn
,
&
arena
->
tcache_ql_mtx
);
}
...
...
deps/jemalloc/src/background_thread.c
View file @
d4439bd4
#define JEMALLOC_BACKGROUND_THREAD_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
...
...
@@ -54,8 +53,9 @@ pthread_create_wrapper(pthread_t *__restrict thread, const pthread_attr_t *attr,
bool
background_thread_create
(
tsd_t
*
tsd
,
unsigned
arena_ind
)
NOT_REACHED
bool
background_threads_enable
(
tsd_t
*
tsd
)
NOT_REACHED
bool
background_threads_disable
(
tsd_t
*
tsd
)
NOT_REACHED
void
background_thread_interval_check
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_decay_t
*
decay
,
size_t
npages_new
)
NOT_REACHED
bool
background_thread_is_started
(
background_thread_info_t
*
info
)
NOT_REACHED
void
background_thread_wakeup_early
(
background_thread_info_t
*
info
,
nstime_t
*
remaining_sleep
)
NOT_REACHED
void
background_thread_prefork0
(
tsdn_t
*
tsdn
)
NOT_REACHED
void
background_thread_prefork1
(
tsdn_t
*
tsdn
)
NOT_REACHED
void
background_thread_postfork_parent
(
tsdn_t
*
tsdn
)
NOT_REACHED
...
...
@@ -74,7 +74,7 @@ background_thread_info_init(tsdn_t *tsdn, background_thread_info_t *info) {
info
->
npages_to_purge_new
=
0
;
if
(
config_stats
)
{
info
->
tot_n_runs
=
0
;
nstime_init
(
&
info
->
tot_sleep_time
,
0
);
nstime_init
_zero
(
&
info
->
tot_sleep_time
);
}
}
...
...
@@ -82,136 +82,40 @@ static inline bool
set_current_thread_affinity
(
int
cpu
)
{
#if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
cpu_set_t
cpuset
;
#else
# ifndef __NetBSD__
cpuset_t
cpuset
;
# else
cpuset_t
*
cpuset
;
# endif
#endif
#ifndef __NetBSD__
CPU_ZERO
(
&
cpuset
);
CPU_SET
(
cpu
,
&
cpuset
);
int
ret
=
sched_setaffinity
(
0
,
sizeof
(
cpu_set_t
),
&
cpuset
);
#else
cpuset
=
cpuset_create
();
#endif
return
(
ret
!=
0
);
#if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
return
(
sched_setaffinity
(
0
,
sizeof
(
cpu_set_t
),
&
cpuset
)
!=
0
);
#else
return
false
;
# ifndef __NetBSD__
int
ret
=
pthread_setaffinity_np
(
pthread_self
(),
sizeof
(
cpuset_t
),
&
cpuset
);
# else
int
ret
=
pthread_setaffinity_np
(
pthread_self
(),
cpuset_size
(
cpuset
),
cpuset
);
cpuset_destroy
(
cpuset
);
# endif
return
ret
!=
0
;
#endif
}
/* Threshold for determining when to wake up the background thread. */
#define BACKGROUND_THREAD_NPAGES_THRESHOLD UINT64_C(1024)
#define BILLION UINT64_C(1000000000)
/* Minimal sleep interval 100 ms. */
#define BACKGROUND_THREAD_MIN_INTERVAL_NS (BILLION / 10)
static
inline
size_t
decay_npurge_after_interval
(
arena_decay_t
*
decay
,
size_t
interval
)
{
size_t
i
;
uint64_t
sum
=
0
;
for
(
i
=
0
;
i
<
interval
;
i
++
)
{
sum
+=
decay
->
backlog
[
i
]
*
h_steps
[
i
];
}
for
(;
i
<
SMOOTHSTEP_NSTEPS
;
i
++
)
{
sum
+=
decay
->
backlog
[
i
]
*
(
h_steps
[
i
]
-
h_steps
[
i
-
interval
]);
}
return
(
size_t
)(
sum
>>
SMOOTHSTEP_BFP
);
}
static
uint64_t
arena_decay_compute_purge_interval_impl
(
tsdn_t
*
tsdn
,
arena_decay_t
*
decay
,
extents_t
*
extents
)
{
if
(
malloc_mutex_trylock
(
tsdn
,
&
decay
->
mtx
))
{
/* Use minimal interval if decay is contended. */
return
BACKGROUND_THREAD_MIN_INTERVAL_NS
;
}
uint64_t
interval
;
ssize_t
decay_time
=
atomic_load_zd
(
&
decay
->
time_ms
,
ATOMIC_RELAXED
);
if
(
decay_time
<=
0
)
{
/* Purging is eagerly done or disabled currently. */
interval
=
BACKGROUND_THREAD_INDEFINITE_SLEEP
;
goto
label_done
;
}
uint64_t
decay_interval_ns
=
nstime_ns
(
&
decay
->
interval
);
assert
(
decay_interval_ns
>
0
);
size_t
npages
=
extents_npages_get
(
extents
);
if
(
npages
==
0
)
{
unsigned
i
;
for
(
i
=
0
;
i
<
SMOOTHSTEP_NSTEPS
;
i
++
)
{
if
(
decay
->
backlog
[
i
]
>
0
)
{
break
;
}
}
if
(
i
==
SMOOTHSTEP_NSTEPS
)
{
/* No dirty pages recorded. Sleep indefinitely. */
interval
=
BACKGROUND_THREAD_INDEFINITE_SLEEP
;
goto
label_done
;
}
}
if
(
npages
<=
BACKGROUND_THREAD_NPAGES_THRESHOLD
)
{
/* Use max interval. */
interval
=
decay_interval_ns
*
SMOOTHSTEP_NSTEPS
;
goto
label_done
;
}
size_t
lb
=
BACKGROUND_THREAD_MIN_INTERVAL_NS
/
decay_interval_ns
;
size_t
ub
=
SMOOTHSTEP_NSTEPS
;
/* Minimal 2 intervals to ensure reaching next epoch deadline. */
lb
=
(
lb
<
2
)
?
2
:
lb
;
if
((
decay_interval_ns
*
ub
<=
BACKGROUND_THREAD_MIN_INTERVAL_NS
)
||
(
lb
+
2
>
ub
))
{
interval
=
BACKGROUND_THREAD_MIN_INTERVAL_NS
;
goto
label_done
;
}
assert
(
lb
+
2
<=
ub
);
size_t
npurge_lb
,
npurge_ub
;
npurge_lb
=
decay_npurge_after_interval
(
decay
,
lb
);
if
(
npurge_lb
>
BACKGROUND_THREAD_NPAGES_THRESHOLD
)
{
interval
=
decay_interval_ns
*
lb
;
goto
label_done
;
}
npurge_ub
=
decay_npurge_after_interval
(
decay
,
ub
);
if
(
npurge_ub
<
BACKGROUND_THREAD_NPAGES_THRESHOLD
)
{
interval
=
decay_interval_ns
*
ub
;
goto
label_done
;
}
unsigned
n_search
=
0
;
size_t
target
,
npurge
;
while
((
npurge_lb
+
BACKGROUND_THREAD_NPAGES_THRESHOLD
<
npurge_ub
)
&&
(
lb
+
2
<
ub
))
{
target
=
(
lb
+
ub
)
/
2
;
npurge
=
decay_npurge_after_interval
(
decay
,
target
);
if
(
npurge
>
BACKGROUND_THREAD_NPAGES_THRESHOLD
)
{
ub
=
target
;
npurge_ub
=
npurge
;
}
else
{
lb
=
target
;
npurge_lb
=
npurge
;
}
assert
(
n_search
++
<
lg_floor
(
SMOOTHSTEP_NSTEPS
)
+
1
);
}
interval
=
decay_interval_ns
*
(
ub
+
lb
)
/
2
;
label_done:
interval
=
(
interval
<
BACKGROUND_THREAD_MIN_INTERVAL_NS
)
?
BACKGROUND_THREAD_MIN_INTERVAL_NS
:
interval
;
malloc_mutex_unlock
(
tsdn
,
&
decay
->
mtx
);
return
interval
;
}
/* Compute purge interval for background threads. */
static
uint64_t
arena_decay_compute_purge_interval
(
tsdn_t
*
tsdn
,
arena_t
*
arena
)
{
uint64_t
i1
,
i2
;
i1
=
arena_decay_compute_purge_interval_impl
(
tsdn
,
&
arena
->
decay_dirty
,
&
arena
->
extents_dirty
);
if
(
i1
==
BACKGROUND_THREAD_MIN_INTERVAL_NS
)
{
return
i1
;
}
i2
=
arena_decay_compute_purge_interval_impl
(
tsdn
,
&
arena
->
decay_muzzy
,
&
arena
->
extents_muzzy
);
return
i1
<
i2
?
i1
:
i2
;
}
static
void
background_thread_sleep
(
tsdn_t
*
tsdn
,
background_thread_info_t
*
info
,
uint64_t
interval
)
{
...
...
@@ -228,7 +132,8 @@ background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info,
int
ret
;
if
(
interval
==
BACKGROUND_THREAD_INDEFINITE_SLEEP
)
{
assert
(
background_thread_indefinite_sleep
(
info
));
background_thread_wakeup_time_set
(
tsdn
,
info
,
BACKGROUND_THREAD_INDEFINITE_SLEEP
);
ret
=
pthread_cond_wait
(
&
info
->
cond
,
&
info
->
mtx
.
lock
);
assert
(
ret
==
0
);
}
else
{
...
...
@@ -236,8 +141,7 @@ background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info,
interval
<=
BACKGROUND_THREAD_INDEFINITE_SLEEP
);
/* We need malloc clock (can be different from tv). */
nstime_t
next_wakeup
;
nstime_init
(
&
next_wakeup
,
0
);
nstime_update
(
&
next_wakeup
);
nstime_init_update
(
&
next_wakeup
);
nstime_iadd
(
&
next_wakeup
,
interval
);
assert
(
nstime_ns
(
&
next_wakeup
)
<
BACKGROUND_THREAD_INDEFINITE_SLEEP
);
...
...
@@ -254,8 +158,6 @@ background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info,
assert
(
!
background_thread_indefinite_sleep
(
info
));
ret
=
pthread_cond_timedwait
(
&
info
->
cond
,
&
info
->
mtx
.
lock
,
&
ts
);
assert
(
ret
==
ETIMEDOUT
||
ret
==
0
);
background_thread_wakeup_time_set
(
tsdn
,
info
,
BACKGROUND_THREAD_INDEFINITE_SLEEP
);
}
if
(
config_stats
)
{
gettimeofday
(
&
tv
,
NULL
);
...
...
@@ -283,28 +185,48 @@ background_thread_pause_check(tsdn_t *tsdn, background_thread_info_t *info) {
}
static
inline
void
background_work_sleep_once
(
tsdn_t
*
tsdn
,
background_thread_info_t
*
info
,
unsigned
ind
)
{
uint64_t
min_interval
=
BACKGROUND_THREAD_INDEFINITE_SLEEP
;
background_work_sleep_once
(
tsdn_t
*
tsdn
,
background_thread_info_t
*
info
,
unsigned
ind
)
{
uint64_t
ns_until_deferred
=
BACKGROUND_THREAD_DEFERRED_MAX
;
unsigned
narenas
=
narenas_total_get
();
bool
slept_indefinitely
=
background_thread_indefinite_sleep
(
info
);
for
(
unsigned
i
=
ind
;
i
<
narenas
;
i
+=
max_background_threads
)
{
arena_t
*
arena
=
arena_get
(
tsdn
,
i
,
false
);
if
(
!
arena
)
{
continue
;
}
arena_decay
(
tsdn
,
arena
,
true
,
false
);
if
(
min_interval
==
BACKGROUND_THREAD_MIN_INTERVAL_NS
)
{
/*
* If thread was woken up from the indefinite sleep, don't
* do the work instantly, but rather check when the deferred
* work that caused this thread to wake up is scheduled for.
*/
if
(
!
slept_indefinitely
)
{
arena_do_deferred_work
(
tsdn
,
arena
);
}
if
(
ns_until_deferred
<=
BACKGROUND_THREAD_MIN_INTERVAL_NS
)
{
/* Min interval will be used. */
continue
;
}
uint64_t
interval
=
arena_decay_compute_purge_interval
(
tsdn
,
arena
);
assert
(
interval
>=
BACKGROUND_THREAD_MIN_INTERVAL_NS
);
if
(
min_interval
>
interval
)
{
min_interval
=
interval
;
uint64_t
ns_arena_deferred
=
pa_shard_time_until_deferred_work
(
tsdn
,
&
arena
->
pa_shard
);
if
(
ns_arena_deferred
<
ns_until_deferred
)
{
ns_until_deferred
=
ns_arena_deferred
;
}
}
background_thread_sleep
(
tsdn
,
info
,
min_interval
);
uint64_t
sleep_ns
;
if
(
ns_until_deferred
==
BACKGROUND_THREAD_DEFERRED_MAX
)
{
sleep_ns
=
BACKGROUND_THREAD_INDEFINITE_SLEEP
;
}
else
{
sleep_ns
=
(
ns_until_deferred
<
BACKGROUND_THREAD_MIN_INTERVAL_NS
)
?
BACKGROUND_THREAD_MIN_INTERVAL_NS
:
ns_until_deferred
;
}
background_thread_sleep
(
tsdn
,
info
,
sleep_ns
);
}
static
bool
...
...
@@ -508,7 +430,7 @@ background_thread_entry(void *ind_arg) {
assert
(
thread_ind
<
max_background_threads
);
#ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
pthread_setname_np
(
pthread_self
(),
"jemalloc_bg_thd"
);
#elif defined(__FreeBSD__)
#elif defined(__FreeBSD__)
|| defined(__DragonFly__)
pthread_set_name_np
(
pthread_self
(),
"jemalloc_bg_thd"
);
#endif
if
(
opt_percpu_arena
!=
percpu_arena_disabled
)
{
...
...
@@ -608,16 +530,16 @@ background_threads_enable(tsd_t *tsd) {
malloc_mutex_assert_owner
(
tsd_tsdn
(
tsd
),
&
background_thread_lock
);
VARIABLE_ARRAY
(
bool
,
marked
,
max_background_threads
);
unsigned
i
,
nmarked
;
for
(
i
=
0
;
i
<
max_background_threads
;
i
++
)
{
unsigned
nmarked
;
for
(
unsigned
i
=
0
;
i
<
max_background_threads
;
i
++
)
{
marked
[
i
]
=
false
;
}
nmarked
=
0
;
/* Thread 0 is required and created at the end. */
marked
[
0
]
=
true
;
/* Mark the threads we need to create for thread 0. */
unsigned
n
=
narenas_total_get
();
for
(
i
=
1
;
i
<
n
;
i
++
)
{
unsigned
n
arenas
=
narenas_total_get
();
for
(
unsigned
i
=
1
;
i
<
n
arenas
;
i
++
)
{
if
(
marked
[
i
%
max_background_threads
]
||
arena_get
(
tsd_tsdn
(
tsd
),
i
,
false
)
==
NULL
)
{
continue
;
...
...
@@ -634,7 +556,18 @@ background_threads_enable(tsd_t *tsd) {
}
}
return
background_thread_create_locked
(
tsd
,
0
);
bool
err
=
background_thread_create_locked
(
tsd
,
0
);
if
(
err
)
{
return
true
;
}
for
(
unsigned
i
=
0
;
i
<
narenas
;
i
++
)
{
arena_t
*
arena
=
arena_get
(
tsd_tsdn
(
tsd
),
i
,
false
);
if
(
arena
!=
NULL
)
{
pa_shard_set_deferral_allowed
(
tsd_tsdn
(
tsd
),
&
arena
->
pa_shard
,
true
);
}
}
return
false
;
}
bool
...
...
@@ -648,92 +581,36 @@ background_threads_disable(tsd_t *tsd) {
return
true
;
}
assert
(
n_background_threads
==
0
);
unsigned
narenas
=
narenas_total_get
();
for
(
unsigned
i
=
0
;
i
<
narenas
;
i
++
)
{
arena_t
*
arena
=
arena_get
(
tsd_tsdn
(
tsd
),
i
,
false
);
if
(
arena
!=
NULL
)
{
pa_shard_set_deferral_allowed
(
tsd_tsdn
(
tsd
),
&
arena
->
pa_shard
,
false
);
}
}
return
false
;
}
/* Check if we need to signal the background thread early. */
bool
background_thread_is_started
(
background_thread_info_t
*
info
)
{
return
info
->
state
==
background_thread_started
;
}
void
background_thread_interval_check
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
arena_decay_t
*
decay
,
size_t
npages_new
)
{
background_thread_info_t
*
info
=
arena_background_thread_info_get
(
arena
);
if
(
malloc_mutex_trylock
(
tsdn
,
&
info
->
mtx
))
{
background_thread_wakeup_early
(
background_thread_info_t
*
info
,
nstime_t
*
remaining_sleep
)
{
/*
* Background thread may hold the mutex for a long period of
* time. We'd like to avoid the variance on application
* threads. So keep this non-blocking, and leave the work to a
* future epoch.
* This is an optimization to increase batching. At this point
* we know that background thread wakes up soon, so the time to cache
* the just freed memory is bounded and low.
*/
if
(
remaining_sleep
!=
NULL
&&
nstime_ns
(
remaining_sleep
)
<
BACKGROUND_THREAD_MIN_INTERVAL_NS
)
{
return
;
}
if
(
info
->
state
!=
background_thread_started
)
{
goto
label_done
;
}
if
(
malloc_mutex_trylock
(
tsdn
,
&
decay
->
mtx
))
{
goto
label_done
;
}
ssize_t
decay_time
=
atomic_load_zd
(
&
decay
->
time_ms
,
ATOMIC_RELAXED
);
if
(
decay_time
<=
0
)
{
/* Purging is eagerly done or disabled currently. */
goto
label_done_unlock2
;
}
uint64_t
decay_interval_ns
=
nstime_ns
(
&
decay
->
interval
);
assert
(
decay_interval_ns
>
0
);
nstime_t
diff
;
nstime_init
(
&
diff
,
background_thread_wakeup_time_get
(
info
));
if
(
nstime_compare
(
&
diff
,
&
decay
->
epoch
)
<=
0
)
{
goto
label_done_unlock2
;
}
nstime_subtract
(
&
diff
,
&
decay
->
epoch
);
if
(
nstime_ns
(
&
diff
)
<
BACKGROUND_THREAD_MIN_INTERVAL_NS
)
{
goto
label_done_unlock2
;
}
if
(
npages_new
>
0
)
{
size_t
n_epoch
=
(
size_t
)(
nstime_ns
(
&
diff
)
/
decay_interval_ns
);
/*
* Compute how many new pages we would need to purge by the next
* wakeup, which is used to determine if we should signal the
* background thread.
*/
uint64_t
npurge_new
;
if
(
n_epoch
>=
SMOOTHSTEP_NSTEPS
)
{
npurge_new
=
npages_new
;
}
else
{
uint64_t
h_steps_max
=
h_steps
[
SMOOTHSTEP_NSTEPS
-
1
];
assert
(
h_steps_max
>=
h_steps
[
SMOOTHSTEP_NSTEPS
-
1
-
n_epoch
]);
npurge_new
=
npages_new
*
(
h_steps_max
-
h_steps
[
SMOOTHSTEP_NSTEPS
-
1
-
n_epoch
]);
npurge_new
>>=
SMOOTHSTEP_BFP
;
}
info
->
npages_to_purge_new
+=
npurge_new
;
}
bool
should_signal
;
if
(
info
->
npages_to_purge_new
>
BACKGROUND_THREAD_NPAGES_THRESHOLD
)
{
should_signal
=
true
;
}
else
if
(
unlikely
(
background_thread_indefinite_sleep
(
info
))
&&
(
extents_npages_get
(
&
arena
->
extents_dirty
)
>
0
||
extents_npages_get
(
&
arena
->
extents_muzzy
)
>
0
||
info
->
npages_to_purge_new
>
0
))
{
should_signal
=
true
;
}
else
{
should_signal
=
false
;
}
if
(
should_signal
)
{
info
->
npages_to_purge_new
=
0
;
pthread_cond_signal
(
&
info
->
cond
);
}
label_done_unlock2:
malloc_mutex_unlock
(
tsdn
,
&
decay
->
mtx
);
label_done:
malloc_mutex_unlock
(
tsdn
,
&
info
->
mtx
);
}
void
...
...
@@ -794,9 +671,11 @@ background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) {
return
true
;
}
stats
->
num_threads
=
n_background_threads
;
nstime_init_zero
(
&
stats
->
run_interval
);
memset
(
&
stats
->
max_counter_per_bg_thd
,
0
,
sizeof
(
mutex_prof_data_t
));
uint64_t
num_runs
=
0
;
n
st
ime_init
(
&
stats
->
run_interval
,
0
)
;
st
ats
->
num_threads
=
n_background_threads
;
for
(
unsigned
i
=
0
;
i
<
max_background_threads
;
i
++
)
{
background_thread_info_t
*
info
=
&
background_thread_info
[
i
];
if
(
malloc_mutex_trylock
(
tsdn
,
&
info
->
mtx
))
{
...
...
@@ -809,6 +688,8 @@ background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) {
if
(
info
->
state
!=
background_thread_stopped
)
{
num_runs
+=
info
->
tot_n_runs
;
nstime_add
(
&
stats
->
run_interval
,
&
info
->
tot_sleep_time
);
malloc_mutex_prof_max_update
(
tsdn
,
&
stats
->
max_counter_per_bg_thd
,
&
info
->
mtx
);
}
malloc_mutex_unlock
(
tsdn
,
&
info
->
mtx
);
}
...
...
@@ -892,7 +773,7 @@ background_thread_boot0(void) {
}
bool
background_thread_boot1
(
tsdn_t
*
tsdn
)
{
background_thread_boot1
(
tsdn_t
*
tsdn
,
base_t
*
base
)
{
#ifdef JEMALLOC_BACKGROUND_THREAD
assert
(
have_background_thread
);
assert
(
narenas_total_get
()
>
0
);
...
...
@@ -911,7 +792,7 @@ background_thread_boot1(tsdn_t *tsdn) {
}
background_thread_info
=
(
background_thread_info_t
*
)
base_alloc
(
tsdn
,
b
0get
()
,
opt_max_background_threads
*
b
ase
,
opt_max_background_threads
*
sizeof
(
background_thread_info_t
),
CACHELINE
);
if
(
background_thread_info
==
NULL
)
{
return
true
;
...
...
deps/jemalloc/src/base.c
View file @
d4439bd4
#define JEMALLOC_BASE_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
...
...
@@ -7,6 +6,15 @@
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/sz.h"
/*
* In auto mode, arenas switch to huge pages for the base allocator on the
* second base block. a0 switches to thp on the 5th block (after 20 megabytes
* of metadata), since more metadata (e.g. rtree nodes) come from a0's base.
*/
#define BASE_AUTO_THP_THRESHOLD 2
#define BASE_AUTO_THP_THRESHOLD_A0 5
/******************************************************************************/
/* Data. */
...
...
@@ -29,7 +37,7 @@ metadata_thp_madvise(void) {
}
static
void
*
base_map
(
tsdn_t
*
tsdn
,
e
xtent_
hooks_t
*
e
xtent_
hooks
,
unsigned
ind
,
size_t
size
)
{
base_map
(
tsdn_t
*
tsdn
,
ehooks_t
*
ehooks
,
unsigned
ind
,
size_t
size
)
{
void
*
addr
;
bool
zero
=
true
;
bool
commit
=
true
;
...
...
@@ -37,22 +45,21 @@ base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size)
/* Use huge page sizes and alignment regardless of opt_metadata_thp. */
assert
(
size
==
HUGEPAGE_CEILING
(
size
));
size_t
alignment
=
HUGEPAGE
;
if
(
e
xtent_hooks
==
&
extent_hooks_default
)
{
if
(
e
hooks_are_default
(
ehooks
)
)
{
addr
=
extent_alloc_mmap
(
NULL
,
size
,
alignment
,
&
zero
,
&
commit
);
if
(
have_madvise_huge
&&
addr
)
{
pages_set_thp_state
(
addr
,
size
);
}
}
else
{
/* No arena context as we are creating new arenas. */
tsd_t
*
tsd
=
tsdn_null
(
tsdn
)
?
tsd_fetch
()
:
tsdn_tsd
(
tsdn
);
pre_reentrancy
(
tsd
,
NULL
);
addr
=
extent_hooks
->
alloc
(
extent_hooks
,
NULL
,
size
,
alignment
,
&
zero
,
&
commit
,
ind
);
post_reentrancy
(
tsd
);
addr
=
ehooks_alloc
(
tsdn
,
ehooks
,
NULL
,
size
,
alignment
,
&
zero
,
&
commit
);
}
return
addr
;
}
static
void
base_unmap
(
tsdn_t
*
tsdn
,
e
xtent_
hooks_t
*
e
xtent_
hooks
,
unsigned
ind
,
void
*
addr
,
base_unmap
(
tsdn_t
*
tsdn
,
ehooks_t
*
ehooks
,
unsigned
ind
,
void
*
addr
,
size_t
size
)
{
/*
* Cascade through dalloc, decommit, purge_forced, and purge_lazy,
...
...
@@ -64,7 +71,7 @@ base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr,
* may in fact want the end state of all associated virtual memory to be
* in some consistent-but-allocated state.
*/
if
(
e
xtent_hooks
==
&
extent_hooks_default
)
{
if
(
e
hooks_are_default
(
ehooks
)
)
{
if
(
!
extent_dalloc_mmap
(
addr
,
size
))
{
goto
label_done
;
}
...
...
@@ -80,31 +87,19 @@ base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr,
/* Nothing worked. This should never happen. */
not_reached
();
}
else
{
tsd_t
*
tsd
=
tsdn_null
(
tsdn
)
?
tsd_fetch
()
:
tsdn_tsd
(
tsdn
);
pre_reentrancy
(
tsd
,
NULL
);
if
(
extent_hooks
->
dalloc
!=
NULL
&&
!
extent_hooks
->
dalloc
(
extent_hooks
,
addr
,
size
,
true
,
ind
))
{
goto
label_post_reentrancy
;
}
if
(
extent_hooks
->
decommit
!=
NULL
&&
!
extent_hooks
->
decommit
(
extent_hooks
,
addr
,
size
,
0
,
size
,
ind
))
{
goto
label_post_reentrancy
;
}
if
(
extent_hooks
->
purge_forced
!=
NULL
&&
!
extent_hooks
->
purge_forced
(
extent_hooks
,
addr
,
size
,
0
,
size
,
ind
))
{
goto
label_post_reentrancy
;
}
if
(
extent_hooks
->
purge_lazy
!=
NULL
&&
!
extent_hooks
->
purge_lazy
(
extent_hooks
,
addr
,
size
,
0
,
size
,
ind
))
{
goto
label_post_reentrancy
;
if
(
!
ehooks_dalloc
(
tsdn
,
ehooks
,
addr
,
size
,
true
))
{
goto
label_done
;
}
if
(
!
ehooks_decommit
(
tsdn
,
ehooks
,
addr
,
size
,
0
,
size
))
{
goto
label_done
;
}
if
(
!
ehooks_purge_forced
(
tsdn
,
ehooks
,
addr
,
size
,
0
,
size
))
{
goto
label_done
;
}
if
(
!
ehooks_purge_lazy
(
tsdn
,
ehooks
,
addr
,
size
,
0
,
size
))
{
goto
label_done
;
}
/* Nothing worked. That's the application's problem. */
label_post_reentrancy:
post_reentrancy
(
tsd
);
}
label_done:
if
(
metadata_thp_madvise
())
{
...
...
@@ -116,14 +111,14 @@ label_done:
}
static
void
base_e
xtent
_init
(
size_t
*
extent_sn_next
,
e
xtent_t
*
extent
,
void
*
addr
,
base_e
data
_init
(
size_t
*
extent_sn_next
,
e
data_t
*
edata
,
void
*
addr
,
size_t
size
)
{
size_t
sn
;
sn
=
*
extent_sn_next
;
(
*
extent_sn_next
)
++
;
e
xtent
_binit
(
e
xtent
,
addr
,
size
,
sn
);
e
data
_binit
(
e
data
,
addr
,
size
,
sn
);
}
static
size_t
...
...
@@ -169,7 +164,7 @@ base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
pages_huge
(
block
,
block
->
size
);
if
(
config_stats
)
{
base
->
n_thp
+=
HUGEPAGE_CEILING
(
block
->
size
-
e
xtent
_bsize_get
(
&
block
->
e
xtent
))
>>
LG_HUGEPAGE
;
e
data
_bsize_get
(
&
block
->
e
data
))
>>
LG_HUGEPAGE
;
}
block
=
block
->
next
;
assert
(
block
==
NULL
||
(
base_ind_get
(
base
)
==
0
));
...
...
@@ -177,34 +172,34 @@ base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
}
static
void
*
base_extent_bump_alloc_helper
(
e
xtent_t
*
extent
,
size_t
*
gap_size
,
size_t
size
,
base_extent_bump_alloc_helper
(
e
data_t
*
edata
,
size_t
*
gap_size
,
size_t
size
,
size_t
alignment
)
{
void
*
ret
;
assert
(
alignment
==
ALIGNMENT_CEILING
(
alignment
,
QUANTUM
));
assert
(
size
==
ALIGNMENT_CEILING
(
size
,
alignment
));
*
gap_size
=
ALIGNMENT_CEILING
((
uintptr_t
)
e
xtent
_addr_get
(
e
xtent
),
alignment
)
-
(
uintptr_t
)
e
xtent
_addr_get
(
e
xtent
);
ret
=
(
void
*
)((
uintptr_t
)
e
xtent
_addr_get
(
e
xtent
)
+
*
gap_size
);
assert
(
e
xtent
_bsize_get
(
e
xtent
)
>=
*
gap_size
+
size
);
e
xtent
_binit
(
e
xtent
,
(
void
*
)((
uintptr_t
)
e
xtent
_addr_get
(
e
xtent
)
+
*
gap_size
+
size
),
e
xtent
_bsize_get
(
e
xtent
)
-
*
gap_size
-
size
,
e
xtent
_sn_get
(
e
xtent
));
*
gap_size
=
ALIGNMENT_CEILING
((
uintptr_t
)
e
data
_addr_get
(
e
data
),
alignment
)
-
(
uintptr_t
)
e
data
_addr_get
(
e
data
);
ret
=
(
void
*
)((
uintptr_t
)
e
data
_addr_get
(
e
data
)
+
*
gap_size
);
assert
(
e
data
_bsize_get
(
e
data
)
>=
*
gap_size
+
size
);
e
data
_binit
(
e
data
,
(
void
*
)((
uintptr_t
)
e
data
_addr_get
(
e
data
)
+
*
gap_size
+
size
),
e
data
_bsize_get
(
e
data
)
-
*
gap_size
-
size
,
e
data
_sn_get
(
e
data
));
return
ret
;
}
static
void
base_extent_bump_alloc_post
(
base_t
*
base
,
e
xtent_t
*
extent
,
size_t
gap_size
,
base_extent_bump_alloc_post
(
base_t
*
base
,
e
data_t
*
edata
,
size_t
gap_size
,
void
*
addr
,
size_t
size
)
{
if
(
e
xtent
_bsize_get
(
e
xtent
)
>
0
)
{
if
(
e
data
_bsize_get
(
e
data
)
>
0
)
{
/*
* Compute the index for the largest size class that does not
* exceed extent's size.
*/
szind_t
index_floor
=
sz_size2index
(
e
xtent
_bsize_get
(
e
xtent
)
+
1
)
-
1
;
e
xtent
_heap_insert
(
&
base
->
avail
[
index_floor
],
e
xtent
);
sz_size2index
(
e
data
_bsize_get
(
e
data
)
+
1
)
-
1
;
e
data
_heap_insert
(
&
base
->
avail
[
index_floor
],
e
data
);
}
if
(
config_stats
)
{
...
...
@@ -229,13 +224,13 @@ base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size,
}
static
void
*
base_extent_bump_alloc
(
base_t
*
base
,
e
xtent_t
*
extent
,
size_t
size
,
base_extent_bump_alloc
(
base_t
*
base
,
e
data_t
*
edata
,
size_t
size
,
size_t
alignment
)
{
void
*
ret
;
size_t
gap_size
;
ret
=
base_extent_bump_alloc_helper
(
e
xtent
,
&
gap_size
,
size
,
alignment
);
base_extent_bump_alloc_post
(
base
,
e
xtent
,
gap_size
,
ret
,
size
);
ret
=
base_extent_bump_alloc_helper
(
e
data
,
&
gap_size
,
size
,
alignment
);
base_extent_bump_alloc_post
(
base
,
e
data
,
gap_size
,
ret
,
size
);
return
ret
;
}
...
...
@@ -245,8 +240,8 @@ base_extent_bump_alloc(base_t *base, extent_t *extent, size_t size,
* On success a pointer to the initialized base_block_t header is returned.
*/
static
base_block_t
*
base_block_alloc
(
tsdn_t
*
tsdn
,
base_t
*
base
,
e
xtent_
hooks_t
*
e
xtent_hooks
,
unsigned
ind
,
pszind_t
*
pind_last
,
size_t
*
extent_sn_next
,
size_t
size
,
base_block_alloc
(
tsdn_t
*
tsdn
,
base_t
*
base
,
ehooks_t
*
e
hooks
,
unsigned
ind
,
pszind_t
*
pind_last
,
size_t
*
extent_sn_next
,
size_t
size
,
size_t
alignment
)
{
alignment
=
ALIGNMENT_CEILING
(
alignment
,
QUANTUM
);
size_t
usize
=
ALIGNMENT_CEILING
(
size
,
alignment
);
...
...
@@ -267,7 +262,7 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks,
size_t
next_block_size
=
HUGEPAGE_CEILING
(
sz_pind2sz
(
pind_next
));
size_t
block_size
=
(
min_block_size
>
next_block_size
)
?
min_block_size
:
next_block_size
;
base_block_t
*
block
=
(
base_block_t
*
)
base_map
(
tsdn
,
e
xtent_
hooks
,
ind
,
base_block_t
*
block
=
(
base_block_t
*
)
base_map
(
tsdn
,
ehooks
,
ind
,
block_size
);
if
(
block
==
NULL
)
{
return
NULL
;
...
...
@@ -295,7 +290,7 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks,
block
->
size
=
block_size
;
block
->
next
=
NULL
;
assert
(
block_size
>=
header_size
);
base_e
xtent
_init
(
extent_sn_next
,
&
block
->
e
xtent
,
base_e
data
_init
(
extent_sn_next
,
&
block
->
e
data
,
(
void
*
)((
uintptr_t
)
block
+
header_size
),
block_size
-
header_size
);
return
block
;
}
...
...
@@ -304,17 +299,17 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks,
* Allocate an extent that is at least as large as specified size, with
* specified alignment.
*/
static
e
xtent
_t
*
static
e
data
_t
*
base_extent_alloc
(
tsdn_t
*
tsdn
,
base_t
*
base
,
size_t
size
,
size_t
alignment
)
{
malloc_mutex_assert_owner
(
tsdn
,
&
base
->
mtx
);
e
xtent_
hooks_t
*
e
xtent_
hooks
=
base_e
xtent_
hooks_get
(
base
);
ehooks_t
*
ehooks
=
base_ehooks_get
_for_metadata
(
base
);
/*
* Drop mutex during base_block_alloc(), because an extent hook will be
* called.
*/
malloc_mutex_unlock
(
tsdn
,
&
base
->
mtx
);
base_block_t
*
block
=
base_block_alloc
(
tsdn
,
base
,
e
xtent_
hooks
,
base_block_t
*
block
=
base_block_alloc
(
tsdn
,
base
,
ehooks
,
base_ind_get
(
base
),
&
base
->
pind_last
,
&
base
->
extent_sn_next
,
size
,
alignment
);
malloc_mutex_lock
(
tsdn
,
&
base
->
mtx
);
...
...
@@ -338,7 +333,7 @@ base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
assert
(
base
->
resident
<=
base
->
mapped
);
assert
(
base
->
n_thp
<<
LG_HUGEPAGE
<=
base
->
mapped
);
}
return
&
block
->
e
xtent
;
return
&
block
->
e
data
;
}
base_t
*
...
...
@@ -347,10 +342,22 @@ b0get(void) {
}
base_t
*
base_new
(
tsdn_t
*
tsdn
,
unsigned
ind
,
extent_hooks_t
*
extent_hooks
)
{
base_new
(
tsdn_t
*
tsdn
,
unsigned
ind
,
const
extent_hooks_t
*
extent_hooks
,
bool
metadata_use_hooks
)
{
pszind_t
pind_last
=
0
;
size_t
extent_sn_next
=
0
;
base_block_t
*
block
=
base_block_alloc
(
tsdn
,
NULL
,
extent_hooks
,
ind
,
/*
* The base will contain the ehooks eventually, but it itself is
* allocated using them. So we use some stack ehooks to bootstrap its
* memory, and then initialize the ehooks within the base_t.
*/
ehooks_t
fake_ehooks
;
ehooks_init
(
&
fake_ehooks
,
metadata_use_hooks
?
(
extent_hooks_t
*
)
extent_hooks
:
(
extent_hooks_t
*
)
&
ehooks_default_extent_hooks
,
ind
);
base_block_t
*
block
=
base_block_alloc
(
tsdn
,
NULL
,
&
fake_ehooks
,
ind
,
&
pind_last
,
&
extent_sn_next
,
sizeof
(
base_t
),
QUANTUM
);
if
(
block
==
NULL
)
{
return
NULL
;
...
...
@@ -359,13 +366,15 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
size_t
gap_size
;
size_t
base_alignment
=
CACHELINE
;
size_t
base_size
=
ALIGNMENT_CEILING
(
sizeof
(
base_t
),
base_alignment
);
base_t
*
base
=
(
base_t
*
)
base_extent_bump_alloc_helper
(
&
block
->
e
xtent
,
base_t
*
base
=
(
base_t
*
)
base_extent_bump_alloc_helper
(
&
block
->
e
data
,
&
gap_size
,
base_size
,
base_alignment
);
base
->
ind
=
ind
;
atomic_store_p
(
&
base
->
extent_hooks
,
extent_hooks
,
ATOMIC_RELAXED
);
ehooks_init
(
&
base
->
ehooks
,
(
extent_hooks_t
*
)
extent_hooks
,
ind
);
ehooks_init
(
&
base
->
ehooks_base
,
metadata_use_hooks
?
(
extent_hooks_t
*
)
extent_hooks
:
(
extent_hooks_t
*
)
&
ehooks_default_extent_hooks
,
ind
);
if
(
malloc_mutex_init
(
&
base
->
mtx
,
"base"
,
WITNESS_RANK_BASE
,
malloc_mutex_rank_exclusive
))
{
base_unmap
(
tsdn
,
extent_
hooks
,
ind
,
block
,
block
->
size
);
base_unmap
(
tsdn
,
&
fake_e
hooks
,
ind
,
block
,
block
->
size
);
return
NULL
;
}
base
->
pind_last
=
pind_last
;
...
...
@@ -373,7 +382,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
base
->
blocks
=
block
;
base
->
auto_thp_switched
=
false
;
for
(
szind_t
i
=
0
;
i
<
SC_NSIZES
;
i
++
)
{
e
xtent
_heap_new
(
&
base
->
avail
[
i
]);
e
data
_heap_new
(
&
base
->
avail
[
i
]);
}
if
(
config_stats
)
{
base
->
allocated
=
sizeof
(
base_block_t
);
...
...
@@ -386,7 +395,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
assert
(
base
->
resident
<=
base
->
mapped
);
assert
(
base
->
n_thp
<<
LG_HUGEPAGE
<=
base
->
mapped
);
}
base_extent_bump_alloc_post
(
base
,
&
block
->
e
xtent
,
gap_size
,
base
,
base_extent_bump_alloc_post
(
base
,
&
block
->
e
data
,
gap_size
,
base
,
base_size
);
return
base
;
...
...
@@ -394,26 +403,31 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
void
base_delete
(
tsdn_t
*
tsdn
,
base_t
*
base
)
{
e
xtent_
hooks_t
*
e
xtent_
hooks
=
base_e
xtent_
hooks_get
(
base
);
ehooks_t
*
ehooks
=
base_ehooks_get
_for_metadata
(
base
);
base_block_t
*
next
=
base
->
blocks
;
do
{
base_block_t
*
block
=
next
;
next
=
block
->
next
;
base_unmap
(
tsdn
,
e
xtent_
hooks
,
base_ind_get
(
base
),
block
,
base_unmap
(
tsdn
,
ehooks
,
base_ind_get
(
base
),
block
,
block
->
size
);
}
while
(
next
!=
NULL
);
}
extent_hooks_t
*
base_extent_hooks_get
(
base_t
*
base
)
{
return
(
extent_hooks_t
*
)
atomic_load_p
(
&
base
->
extent_hooks
,
ATOMIC_ACQUIRE
);
ehooks_t
*
base_ehooks_get
(
base_t
*
base
)
{
return
&
base
->
ehooks
;
}
ehooks_t
*
base_ehooks_get_for_metadata
(
base_t
*
base
)
{
return
&
base
->
ehooks_base
;
}
extent_hooks_t
*
base_extent_hooks_set
(
base_t
*
base
,
extent_hooks_t
*
extent_hooks
)
{
extent_hooks_t
*
old_extent_hooks
=
base_extent_hooks_get
(
base
);
atomic_store_p
(
&
base
->
extent_hooks
,
extent_hooks
,
ATOMIC_RELEASE
);
extent_hooks_t
*
old_extent_hooks
=
ehooks_get_extent_hooks_ptr
(
&
base
->
ehooks
);
ehooks_init
(
&
base
->
ehooks
,
extent_hooks
,
ehooks_ind_get
(
&
base
->
ehooks
));
return
old_extent_hooks
;
}
...
...
@@ -424,28 +438,28 @@ base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment,
size_t
usize
=
ALIGNMENT_CEILING
(
size
,
alignment
);
size_t
asize
=
usize
+
alignment
-
QUANTUM
;
e
xtent_t
*
extent
=
NULL
;
e
data_t
*
edata
=
NULL
;
malloc_mutex_lock
(
tsdn
,
&
base
->
mtx
);
for
(
szind_t
i
=
sz_size2index
(
asize
);
i
<
SC_NSIZES
;
i
++
)
{
e
xtent
=
extent
_heap_remove_first
(
&
base
->
avail
[
i
]);
if
(
e
xtent
!=
NULL
)
{
e
data
=
edata
_heap_remove_first
(
&
base
->
avail
[
i
]);
if
(
e
data
!=
NULL
)
{
/* Use existing space. */
break
;
}
}
if
(
e
xtent
==
NULL
)
{
if
(
e
data
==
NULL
)
{
/* Try to allocate more space. */
e
xtent
=
base_extent_alloc
(
tsdn
,
base
,
usize
,
alignment
);
e
data
=
base_extent_alloc
(
tsdn
,
base
,
usize
,
alignment
);
}
void
*
ret
;
if
(
e
xtent
==
NULL
)
{
if
(
e
data
==
NULL
)
{
ret
=
NULL
;
goto
label_return
;
}
ret
=
base_extent_bump_alloc
(
base
,
e
xtent
,
usize
,
alignment
);
ret
=
base_extent_bump_alloc
(
base
,
e
data
,
usize
,
alignment
);
if
(
esn
!=
NULL
)
{
*
esn
=
extent
_sn_get
(
e
xtent
);
*
esn
=
(
size_t
)
edata
_sn_get
(
e
data
);
}
label_return:
malloc_mutex_unlock
(
tsdn
,
&
base
->
mtx
);
...
...
@@ -465,16 +479,16 @@ base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
return
base_alloc_impl
(
tsdn
,
base
,
size
,
alignment
,
NULL
);
}
e
xtent
_t
*
base_alloc_e
xtent
(
tsdn_t
*
tsdn
,
base_t
*
base
)
{
e
data
_t
*
base_alloc_e
data
(
tsdn_t
*
tsdn
,
base_t
*
base
)
{
size_t
esn
;
e
xtent_t
*
extent
=
base_alloc_impl
(
tsdn
,
base
,
sizeof
(
e
xtent
_t
),
CACHELINE
,
&
esn
);
if
(
e
xtent
==
NULL
)
{
e
data_t
*
edata
=
base_alloc_impl
(
tsdn
,
base
,
sizeof
(
e
data
_t
),
EDATA_ALIGNMENT
,
&
esn
);
if
(
e
data
==
NULL
)
{
return
NULL
;
}
e
xtent
_esn_set
(
e
xtent
,
esn
);
return
e
xtent
;
e
data
_esn_set
(
e
data
,
esn
);
return
e
data
;
}
void
...
...
@@ -509,6 +523,7 @@ base_postfork_child(tsdn_t *tsdn, base_t *base) {
bool
base_boot
(
tsdn_t
*
tsdn
)
{
b0
=
base_new
(
tsdn
,
0
,
(
extent_hooks_t
*
)
&
extent_hooks_default
);
b0
=
base_new
(
tsdn
,
0
,
(
extent_hooks_t
*
)
&
ehooks_default_extent_hooks
,
/* metadata_use_hooks */
true
);
return
(
b0
==
NULL
);
}
Prev
1
…
4
5
6
7
8
9
10
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment