Commit 153f2f00 authored by antirez's avatar antirez
Browse files

Jemalloc updated to 4.4.0.

The original jemalloc source tree was modified to:

1. Remove the configure error that prevents nested builds.
2. Insert the Redis private Jemalloc API in order to allow the
Redis fragmentation function to work.
parent ca532c94
#ifndef MSVC_COMPAT_WINDOWS_EXTRA_H #ifndef MSVC_COMPAT_WINDOWS_EXTRA_H
#define MSVC_COMPAT_WINDOWS_EXTRA_H #define MSVC_COMPAT_WINDOWS_EXTRA_H
#ifndef ENOENT #include <errno.h>
# define ENOENT ERROR_PATH_NOT_FOUND
#endif
#ifndef EINVAL
# define EINVAL ERROR_BAD_ARGUMENTS
#endif
#ifndef EAGAIN
# define EAGAIN ERROR_OUTOFMEMORY
#endif
#ifndef EPERM
# define EPERM ERROR_WRITE_FAULT
#endif
#ifndef EFAULT
# define EFAULT ERROR_INVALID_ADDRESS
#endif
#ifndef ENOMEM
# define ENOMEM ERROR_NOT_ENOUGH_MEMORY
#endif
#ifndef ERANGE
# define ERANGE ERROR_INVALID_DATA
#endif
#endif /* MSVC_COMPAT_WINDOWS_EXTRA_H */ #endif /* MSVC_COMPAT_WINDOWS_EXTRA_H */
...@@ -6,7 +6,7 @@ install_suffix=@install_suffix@ ...@@ -6,7 +6,7 @@ install_suffix=@install_suffix@
Name: jemalloc Name: jemalloc
Description: A general purpose malloc(3) implementation that emphasizes fragmentation avoidance and scalable concurrency support. Description: A general purpose malloc(3) implementation that emphasizes fragmentation avoidance and scalable concurrency support.
URL: http://www.canonware.com/jemalloc URL: http://jemalloc.net/
Version: @jemalloc_version@ Version: @jemalloc_version@
Cflags: -I${includedir} Cflags: -I${includedir}
Libs: -L${libdir} -ljemalloc${install_suffix} Libs: -L${libdir} -ljemalloc${install_suffix}
How to build jemalloc for Windows
=================================
1. Install Cygwin with at least the following packages:
* autoconf
* autogen
* gawk
* grep
* sed
2. Install Visual Studio 2015 with Visual C++
3. Add Cygwin\bin to the PATH environment variable
4. Open "VS2015 x86 Native Tools Command Prompt"
(note: x86/x64 doesn't matter at this point)
5. Generate header files:
sh -c "CC=cl ./autogen.sh"
6. Now the project can be opened and built in Visual Studio:
msvc\jemalloc_vc2015.sln

Microsoft Visual Studio Solution File, Format Version 12.00
# Visual Studio 14
VisualStudioVersion = 14.0.24720.0
MinimumVisualStudioVersion = 10.0.40219.1
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{70A99006-6DE9-472B-8F83-4CEE6C616DF3}"
ProjectSection(SolutionItems) = preProject
ReadMe.txt = ReadMe.txt
EndProjectSection
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "jemalloc", "projects\vc2015\jemalloc\jemalloc.vcxproj", "{8D6BB292-9E1C-413D-9F98-4864BDC1514A}"
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "test_threads", "projects\vc2015\test_threads\test_threads.vcxproj", "{09028CFD-4EB7-491D-869C-0708DB97ED44}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|x64 = Debug|x64
Debug|x86 = Debug|x86
Debug-static|x64 = Debug-static|x64
Debug-static|x86 = Debug-static|x86
Release|x64 = Release|x64
Release|x86 = Release|x86
Release-static|x64 = Release-static|x64
Release-static|x86 = Release-static|x86
EndGlobalSection
GlobalSection(ProjectConfigurationPlatforms) = postSolution
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x64.ActiveCfg = Debug|x64
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x64.Build.0 = Debug|x64
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x86.ActiveCfg = Debug|Win32
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x86.Build.0 = Debug|Win32
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x64.ActiveCfg = Debug-static|x64
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x64.Build.0 = Debug-static|x64
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x86.ActiveCfg = Debug-static|Win32
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x86.Build.0 = Debug-static|Win32
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x64.ActiveCfg = Release|x64
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x64.Build.0 = Release|x64
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x86.ActiveCfg = Release|Win32
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x86.Build.0 = Release|Win32
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x64.ActiveCfg = Release-static|x64
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x64.Build.0 = Release-static|x64
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x86.ActiveCfg = Release-static|Win32
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x86.Build.0 = Release-static|Win32
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x64.ActiveCfg = Debug|x64
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x64.Build.0 = Debug|x64
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x86.ActiveCfg = Debug|Win32
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x86.Build.0 = Debug|Win32
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x64.ActiveCfg = Debug-static|x64
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x64.Build.0 = Debug-static|x64
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x86.ActiveCfg = Debug-static|Win32
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x86.Build.0 = Debug-static|Win32
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x64.ActiveCfg = Release|x64
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x64.Build.0 = Release|x64
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x86.ActiveCfg = Release|Win32
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x86.Build.0 = Release|Win32
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x64.ActiveCfg = Release-static|x64
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x64.Build.0 = Release-static|x64
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x86.ActiveCfg = Release-static|Win32
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x86.Build.0 = Release-static|Win32
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
EndGlobalSection
EndGlobal
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug-static|Win32">
<Configuration>Debug-static</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug-static|x64">
<Configuration>Debug-static</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug|Win32">
<Configuration>Debug</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release-static|Win32">
<Configuration>Release-static</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release-static|x64">
<Configuration>Release-static</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|Win32">
<Configuration>Release</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<ItemGroup>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\arena.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\assert.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\atomic.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\base.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\bitmap.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\chunk.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\chunk_dss.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\chunk_mmap.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\ckh.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\ctl.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\extent.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\hash.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\huge.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_decls.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_defs.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_macros.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\mb.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\mutex.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\nstime.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\pages.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\ph.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\private_namespace.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\private_unnamespace.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\prng.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\prof.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\public_namespace.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\public_unnamespace.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\ql.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\qr.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\quarantine.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\rb.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\rtree.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\size_classes.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\smoothstep.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\spin.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\stats.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\tcache.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\ticker.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\tsd.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\util.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\witness.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_defs.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_macros.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_mangle.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_protos.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_protos_jet.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_rename.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_typedefs.h" />
<ClInclude Include="..\..\..\..\include\msvc_compat\C99\stdbool.h" />
<ClInclude Include="..\..\..\..\include\msvc_compat\C99\stdint.h" />
<ClInclude Include="..\..\..\..\include\msvc_compat\strings.h" />
<ClInclude Include="..\..\..\..\include\msvc_compat\windows_extra.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\..\..\src\arena.c" />
<ClCompile Include="..\..\..\..\src\atomic.c" />
<ClCompile Include="..\..\..\..\src\base.c" />
<ClCompile Include="..\..\..\..\src\bitmap.c" />
<ClCompile Include="..\..\..\..\src\chunk.c" />
<ClCompile Include="..\..\..\..\src\chunk_dss.c" />
<ClCompile Include="..\..\..\..\src\chunk_mmap.c" />
<ClCompile Include="..\..\..\..\src\ckh.c" />
<ClCompile Include="..\..\..\..\src\ctl.c" />
<ClCompile Include="..\..\..\..\src\extent.c" />
<ClCompile Include="..\..\..\..\src\hash.c" />
<ClCompile Include="..\..\..\..\src\huge.c" />
<ClCompile Include="..\..\..\..\src\jemalloc.c" />
<ClCompile Include="..\..\..\..\src\mb.c" />
<ClCompile Include="..\..\..\..\src\mutex.c" />
<ClCompile Include="..\..\..\..\src\nstime.c" />
<ClCompile Include="..\..\..\..\src\pages.c" />
<ClCompile Include="..\..\..\..\src\prng.c" />
<ClCompile Include="..\..\..\..\src\prof.c" />
<ClCompile Include="..\..\..\..\src\quarantine.c" />
<ClCompile Include="..\..\..\..\src\rtree.c" />
<ClCompile Include="..\..\..\..\src\spin.c" />
<ClCompile Include="..\..\..\..\src\stats.c" />
<ClCompile Include="..\..\..\..\src\tcache.c" />
<ClCompile Include="..\..\..\..\src\ticker.c" />
<ClCompile Include="..\..\..\..\src\tsd.c" />
<ClCompile Include="..\..\..\..\src\util.c" />
<ClCompile Include="..\..\..\..\src\witness.c" />
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{8D6BB292-9E1C-413D-9F98-4864BDC1514A}</ProjectGuid>
<Keyword>Win32Proj</Keyword>
<RootNamespace>jemalloc</RootNamespace>
<WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Label="Shared">
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<TargetName>$(ProjectName)d</TargetName>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<TargetName>$(ProjectName)-$(PlatformToolset)-$(Configuration)</TargetName>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<TargetName>$(ProjectName)-$(PlatformToolset)-$(Configuration)</TargetName>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<TargetName>$(ProjectName)d</TargetName>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<TargetName>$(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration)</TargetName>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<TargetName>$(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration)</TargetName>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
<ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
<ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
<ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
<DebugInformationFormat>OldStyle</DebugInformationFormat>
<MinimalRebuild>false</MinimalRebuild>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
<ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>_REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
<ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
<ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>_REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
<DebugInformationFormat>OldStyle</DebugInformationFormat>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
</Link>
</ItemDefinitionGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>
\ No newline at end of file
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup>
<Filter Include="Source Files">
<UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
<Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
</Filter>
<Filter Include="Header Files">
<UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
<Extensions>h;hh;hpp;hxx;hm;inl;inc;xsd</Extensions>
</Filter>
<Filter Include="Header Files\internal">
<UniqueIdentifier>{5697dfa3-16cf-4932-b428-6e0ec6e9f98e}</UniqueIdentifier>
</Filter>
<Filter Include="Header Files\msvc_compat">
<UniqueIdentifier>{0cbd2ca6-42a7-4f82-8517-d7e7a14fd986}</UniqueIdentifier>
</Filter>
<Filter Include="Header Files\msvc_compat\C99">
<UniqueIdentifier>{0abe6f30-49b5-46dd-8aca-6e33363fa52c}</UniqueIdentifier>
</Filter>
</ItemGroup>
<ItemGroup>
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_defs.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_macros.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_mangle.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_protos.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_protos_jet.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_rename.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_typedefs.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\arena.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\assert.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\atomic.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\base.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\bitmap.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\chunk.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\chunk_dss.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\chunk_mmap.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\ckh.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\ctl.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\extent.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\hash.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\huge.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_decls.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_defs.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_macros.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\mb.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\mutex.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\nstime.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\pages.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\ph.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\private_namespace.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\private_unnamespace.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\prng.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\prof.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\public_namespace.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\public_unnamespace.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\ql.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\qr.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\quarantine.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\rb.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\rtree.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\size_classes.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\smoothstep.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\spin.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\stats.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\tcache.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\ticker.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\tsd.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\util.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\witness.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\msvc_compat\strings.h">
<Filter>Header Files\msvc_compat</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\msvc_compat\windows_extra.h">
<Filter>Header Files\msvc_compat</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\msvc_compat\C99\stdbool.h">
<Filter>Header Files\msvc_compat\C99</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\msvc_compat\C99\stdint.h">
<Filter>Header Files\msvc_compat\C99</Filter>
</ClInclude>
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\..\..\src\arena.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\atomic.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\base.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\bitmap.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\chunk.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\chunk_dss.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\chunk_mmap.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\ckh.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\ctl.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\extent.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\hash.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\huge.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\jemalloc.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\mb.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\mutex.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\nstime.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\pages.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\prng.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\prof.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\quarantine.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\rtree.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\spin.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\stats.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\tcache.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\ticker.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\tsd.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\util.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\witness.c">
<Filter>Source Files</Filter>
</ClCompile>
</ItemGroup>
</Project>
// jemalloc C++ threaded test
// Author: Rustam Abdullaev
// Public Domain
#include <atomic>
#include <functional>
#include <future>
#include <random>
#include <thread>
#include <vector>
#include <stdio.h>
#include <jemalloc/jemalloc.h>
using std::vector;
using std::thread;
using std::uniform_int_distribution;
using std::minstd_rand;
int test_threads()
{
je_malloc_conf = "narenas:3";
int narenas = 0;
size_t sz = sizeof(narenas);
je_mallctl("opt.narenas", (void *)&narenas, &sz, NULL, 0);
if (narenas != 3) {
printf("Error: unexpected number of arenas: %d\n", narenas);
return 1;
}
static const int sizes[] = { 7, 16, 32, 60, 91, 100, 120, 144, 169, 199, 255, 400, 670, 900, 917, 1025, 3333, 5190, 13131, 49192, 99999, 123123, 255265, 2333111 };
static const int numSizes = (int)(sizeof(sizes) / sizeof(sizes[0]));
vector<thread> workers;
static const int numThreads = narenas + 1, numAllocsMax = 25, numIter1 = 50, numIter2 = 50;
je_malloc_stats_print(NULL, NULL, NULL);
size_t allocated1;
size_t sz1 = sizeof(allocated1);
je_mallctl("stats.active", (void *)&allocated1, &sz1, NULL, 0);
printf("\nPress Enter to start threads...\n");
getchar();
printf("Starting %d threads x %d x %d iterations...\n", numThreads, numIter1, numIter2);
for (int i = 0; i < numThreads; i++) {
workers.emplace_back([tid=i]() {
uniform_int_distribution<int> sizeDist(0, numSizes - 1);
minstd_rand rnd(tid * 17);
uint8_t* ptrs[numAllocsMax];
int ptrsz[numAllocsMax];
for (int i = 0; i < numIter1; ++i) {
thread t([&]() {
for (int i = 0; i < numIter2; ++i) {
const int numAllocs = numAllocsMax - sizeDist(rnd);
for (int j = 0; j < numAllocs; j += 64) {
const int x = sizeDist(rnd);
const int sz = sizes[x];
ptrsz[j] = sz;
ptrs[j] = (uint8_t*)je_malloc(sz);
if (!ptrs[j]) {
printf("Unable to allocate %d bytes in thread %d, iter %d, alloc %d. %d\n", sz, tid, i, j, x);
exit(1);
}
for (int k = 0; k < sz; k++)
ptrs[j][k] = tid + k;
}
for (int j = 0; j < numAllocs; j += 64) {
for (int k = 0, sz = ptrsz[j]; k < sz; k++)
if (ptrs[j][k] != (uint8_t)(tid + k)) {
printf("Memory error in thread %d, iter %d, alloc %d @ %d : %02X!=%02X\n", tid, i, j, k, ptrs[j][k], (uint8_t)(tid + k));
exit(1);
}
je_free(ptrs[j]);
}
}
});
t.join();
}
});
}
for (thread& t : workers) {
t.join();
}
je_malloc_stats_print(NULL, NULL, NULL);
size_t allocated2;
je_mallctl("stats.active", (void *)&allocated2, &sz1, NULL, 0);
size_t leaked = allocated2 - allocated1;
printf("\nDone. Leaked: %zd bytes\n", leaked);
bool failed = leaked > 65536; // in case C++ runtime allocated something (e.g. iostream locale or facet)
printf("\nTest %s!\n", (failed ? "FAILED" : "successful"));
printf("\nPress Enter to continue...\n");
getchar();
return failed ? 1 : 0;
}
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug-static|Win32">
<Configuration>Debug-static</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug-static|x64">
<Configuration>Debug-static</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug|Win32">
<Configuration>Debug</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release-static|Win32">
<Configuration>Release-static</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release-static|x64">
<Configuration>Release-static</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|Win32">
<Configuration>Release</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{09028CFD-4EB7-491D-869C-0708DB97ED44}</ProjectGuid>
<Keyword>Win32Proj</Keyword>
<RootNamespace>test_threads</RootNamespace>
<WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Label="Shared">
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<LinkIncremental>true</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<LinkIncremental>true</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<LinkIncremental>true</LinkIncremental>
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
<LinkIncremental>true</LinkIncremental>
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<LinkIncremental>false</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<LinkIncremental>false</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<LinkIncremental>false</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<LinkIncremental>false</LinkIncremental>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
<AdditionalDependencies>jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
<AdditionalDependencies>jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
<AdditionalDependencies>jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
<AdditionalDependencies>jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
<AdditionalDependencies>jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
<AdditionalDependencies>jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClCompile Include="test_threads.cpp" />
<ClCompile Include="test_threads_main.cpp" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\jemalloc\jemalloc.vcxproj">
<Project>{8d6bb292-9e1c-413d-9f98-4864bdc1514a}</Project>
</ProjectReference>
</ItemGroup>
<ItemGroup>
<ClInclude Include="test_threads.h" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>
\ No newline at end of file
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup>
<Filter Include="Source Files">
<UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
<Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
</Filter>
<Filter Include="Header Files">
<UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
<Extensions>h;hh;hpp;hxx;hm;inl;inc;xsd</Extensions>
</Filter>
</ItemGroup>
<ItemGroup>
<ClCompile Include="test_threads.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="test_threads_main.cpp">
<Filter>Source Files</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="test_threads.h">
<Filter>Header Files</Filter>
</ClInclude>
</ItemGroup>
</Project>
\ No newline at end of file
#include "test_threads.h"
#include <future>
#include <functional>
#include <chrono>
using namespace std::chrono_literals;
int main(int argc, char** argv)
{
int rc = test_threads();
return rc;
}
...@@ -4,16 +4,23 @@ ...@@ -4,16 +4,23 @@
/******************************************************************************/ /******************************************************************************/
/* Data. */ /* Data. */
purge_mode_t opt_purge = PURGE_DEFAULT;
const char *purge_mode_names[] = {
"ratio",
"decay",
"N/A"
};
ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT; ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
static ssize_t lg_dirty_mult_default; static ssize_t lg_dirty_mult_default;
ssize_t opt_decay_time = DECAY_TIME_DEFAULT;
static ssize_t decay_time_default;
arena_bin_info_t arena_bin_info[NBINS]; arena_bin_info_t arena_bin_info[NBINS];
size_t map_bias; size_t map_bias;
size_t map_misc_offset; size_t map_misc_offset;
size_t arena_maxrun; /* Max run size for arenas. */ size_t arena_maxrun; /* Max run size for arenas. */
size_t large_maxclass; /* Max large size class. */ size_t large_maxclass; /* Max large size class. */
static size_t small_maxrun; /* Max run size used for small size classes. */
static bool *small_run_tab; /* Valid small run page multiples. */
unsigned nlclasses; /* Number of large size classes. */ unsigned nlclasses; /* Number of large size classes. */
unsigned nhclasses; /* Number of huge size classes. */ unsigned nhclasses; /* Number of huge size classes. */
...@@ -23,60 +30,57 @@ unsigned nhclasses; /* Number of huge size classes. */ ...@@ -23,60 +30,57 @@ unsigned nhclasses; /* Number of huge size classes. */
* definition. * definition.
*/ */
static void arena_purge(arena_t *arena, bool all); static void arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena,
static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, arena_chunk_t *chunk);
bool cleaned, bool decommitted); static void arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena,
static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, size_t ndirty_limit);
arena_run_t *run, arena_bin_t *bin); static void arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run,
static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, bool dirty, bool cleaned, bool decommitted);
arena_run_t *run, arena_bin_t *bin); static void arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena,
arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin);
static void arena_bin_lower_run(arena_t *arena, arena_run_t *run,
arena_bin_t *bin);
/******************************************************************************/ /******************************************************************************/
#define CHUNK_MAP_KEY ((uintptr_t)0x1U) JEMALLOC_INLINE_C size_t
arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm)
JEMALLOC_INLINE_C arena_chunk_map_misc_t *
arena_miscelm_key_create(size_t size)
{ {
arena_chunk_t *chunk;
size_t pageind, mapbits;
return ((arena_chunk_map_misc_t *)(arena_mapbits_size_encode(size) | chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
CHUNK_MAP_KEY)); pageind = arena_miscelm_to_pageind(miscelm);
mapbits = arena_mapbits_get(chunk, pageind);
return (arena_mapbits_size_decode(mapbits));
} }
JEMALLOC_INLINE_C bool JEMALLOC_INLINE_C const extent_node_t *
arena_miscelm_is_key(const arena_chunk_map_misc_t *miscelm) arena_miscelm_extent_get(const arena_chunk_map_misc_t *miscelm)
{ {
arena_chunk_t *chunk;
return (((uintptr_t)miscelm & CHUNK_MAP_KEY) != 0); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
return (&chunk->node);
} }
#undef CHUNK_MAP_KEY JEMALLOC_INLINE_C int
arena_sn_comp(const arena_chunk_map_misc_t *a, const arena_chunk_map_misc_t *b)
JEMALLOC_INLINE_C size_t
arena_miscelm_key_size_get(const arena_chunk_map_misc_t *miscelm)
{ {
size_t a_sn, b_sn;
assert(arena_miscelm_is_key(miscelm)); assert(a != NULL);
assert(b != NULL);
return (arena_mapbits_size_decode((uintptr_t)miscelm));
}
JEMALLOC_INLINE_C size_t
arena_miscelm_size_get(arena_chunk_map_misc_t *miscelm)
{
arena_chunk_t *chunk;
size_t pageind, mapbits;
assert(!arena_miscelm_is_key(miscelm)); a_sn = extent_node_sn_get(arena_miscelm_extent_get(a));
b_sn = extent_node_sn_get(arena_miscelm_extent_get(b));
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); return ((a_sn > b_sn) - (a_sn < b_sn));
pageind = arena_miscelm_to_pageind(miscelm);
mapbits = arena_mapbits_get(chunk, pageind);
return (arena_mapbits_size_decode(mapbits));
} }
JEMALLOC_INLINE_C int JEMALLOC_INLINE_C int
arena_run_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b) arena_ad_comp(const arena_chunk_map_misc_t *a,
const arena_chunk_map_misc_t *b)
{ {
uintptr_t a_miscelm = (uintptr_t)a; uintptr_t a_miscelm = (uintptr_t)a;
uintptr_t b_miscelm = (uintptr_t)b; uintptr_t b_miscelm = (uintptr_t)b;
...@@ -87,74 +91,79 @@ arena_run_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b) ...@@ -87,74 +91,79 @@ arena_run_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b)
return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm)); return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
} }
/* Generate red-black tree functions. */ JEMALLOC_INLINE_C int
rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_misc_t, arena_snad_comp(const arena_chunk_map_misc_t *a,
rb_link, arena_run_comp) const arena_chunk_map_misc_t *b)
static size_t
run_quantize(size_t size)
{ {
size_t qsize; int ret;
assert(size != 0); assert(a != NULL);
assert(size == PAGE_CEILING(size)); assert(b != NULL);
/* Don't change sizes that are valid small run sizes. */ ret = arena_sn_comp(a, b);
if (size <= small_maxrun && small_run_tab[size >> LG_PAGE]) if (ret != 0)
return (size); return (ret);
/* ret = arena_ad_comp(a, b);
* Round down to the nearest run size that can actually be requested return (ret);
* during normal large allocation. Add large_pad so that cache index
* randomization can offset the allocation from the page boundary.
*/
qsize = index2size(size2index(size - large_pad + 1) - 1) + large_pad;
if (qsize <= SMALL_MAXCLASS + large_pad)
return (run_quantize(size - large_pad));
assert(qsize <= size);
return (qsize);
} }
/* Generate pairing heap functions. */
ph_gen(static UNUSED, arena_run_heap_, arena_run_heap_t, arena_chunk_map_misc_t,
ph_link, arena_snad_comp)
#ifdef JEMALLOC_JET
#undef run_quantize_floor
#define run_quantize_floor JEMALLOC_N(n_run_quantize_floor)
#endif
static size_t static size_t
run_quantize_next(size_t size) run_quantize_floor(size_t size)
{ {
size_t large_run_size_next; size_t ret;
pszind_t pind;
assert(size > 0);
assert(size <= HUGE_MAXCLASS);
assert((size & PAGE_MASK) == 0);
assert(size != 0); assert(size != 0);
assert(size == PAGE_CEILING(size)); assert(size == PAGE_CEILING(size));
/* pind = psz2ind(size - large_pad + 1);
* Return the next quantized size greater than the input size. if (pind == 0) {
* Quantized sizes comprise the union of run sizes that back small /*
* region runs, and run sizes that back large regions with no explicit * Avoid underflow. This short-circuit would also do the right
* alignment constraints. * thing for all sizes in the range for which there are
*/ * PAGE-spaced size classes, but it's simplest to just handle
* the one case that would cause erroneous results.
if (size > SMALL_MAXCLASS) { */
large_run_size_next = PAGE_CEILING(index2size(size2index(size - return (size);
large_pad) + 1) + large_pad);
} else
large_run_size_next = SIZE_T_MAX;
if (size >= small_maxrun)
return (large_run_size_next);
while (true) {
size += PAGE;
assert(size <= small_maxrun);
if (small_run_tab[size >> LG_PAGE]) {
if (large_run_size_next < size)
return (large_run_size_next);
return (size);
}
} }
ret = pind2sz(pind - 1) + large_pad;
assert(ret <= size);
return (ret);
} }
#ifdef JEMALLOC_JET
#undef run_quantize_floor
#define run_quantize_floor JEMALLOC_N(run_quantize_floor)
run_quantize_t *run_quantize_floor = JEMALLOC_N(n_run_quantize_floor);
#endif
#ifdef JEMALLOC_JET
#undef run_quantize_ceil
#define run_quantize_ceil JEMALLOC_N(n_run_quantize_ceil)
#endif
static size_t static size_t
run_quantize_first(size_t size) run_quantize_ceil(size_t size)
{ {
size_t qsize = run_quantize(size); size_t ret;
assert(size > 0);
assert(size <= HUGE_MAXCLASS);
assert((size & PAGE_MASK) == 0);
if (qsize < size) { ret = run_quantize_floor(size);
if (ret < size) {
/* /*
* Skip a quantization that may have an adequately large run, * Skip a quantization that may have an adequately large run,
* because under-sized runs may be mixed in. This only happens * because under-sized runs may be mixed in. This only happens
...@@ -163,72 +172,50 @@ run_quantize_first(size_t size) ...@@ -163,72 +172,50 @@ run_quantize_first(size_t size)
* search would potentially find sufficiently aligned available * search would potentially find sufficiently aligned available
* memory somewhere lower. * memory somewhere lower.
*/ */
qsize = run_quantize_next(size); ret = pind2sz(psz2ind(ret - large_pad + 1)) + large_pad;
} }
return (qsize);
}
JEMALLOC_INLINE_C int
arena_avail_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b)
{
int ret;
uintptr_t a_miscelm = (uintptr_t)a;
size_t a_qsize = run_quantize(arena_miscelm_is_key(a) ?
arena_miscelm_key_size_get(a) : arena_miscelm_size_get(a));
size_t b_qsize = run_quantize(arena_miscelm_size_get(b));
/*
* Compare based on quantized size rather than size, in order to sort
* equally useful runs only by address.
*/
ret = (a_qsize > b_qsize) - (a_qsize < b_qsize);
if (ret == 0) {
if (!arena_miscelm_is_key(a)) {
uintptr_t b_miscelm = (uintptr_t)b;
ret = (a_miscelm > b_miscelm) - (a_miscelm < b_miscelm);
} else {
/*
* Treat keys as if they are lower than anything else.
*/
ret = -1;
}
}
return (ret); return (ret);
} }
#ifdef JEMALLOC_JET
/* Generate red-black tree functions. */ #undef run_quantize_ceil
rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, #define run_quantize_ceil JEMALLOC_N(run_quantize_ceil)
arena_chunk_map_misc_t, rb_link, arena_avail_comp) run_quantize_t *run_quantize_ceil = JEMALLOC_N(n_run_quantize_ceil);
#endif
static void static void
arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
size_t npages) size_t npages)
{ {
pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get(
arena_miscelm_get_const(chunk, pageind))));
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE)); LG_PAGE));
arena_avail_tree_insert(&arena->runs_avail, arena_miscelm_get(chunk, assert((npages << LG_PAGE) < chunksize);
pageind)); assert(pind2sz(pind) <= chunksize);
arena_run_heap_insert(&arena->runs_avail[pind],
arena_miscelm_get_mutable(chunk, pageind));
} }
static void static void
arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
size_t npages) size_t npages)
{ {
pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get(
arena_miscelm_get_const(chunk, pageind))));
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE)); LG_PAGE));
arena_avail_tree_remove(&arena->runs_avail, arena_miscelm_get(chunk, assert((npages << LG_PAGE) < chunksize);
pageind)); assert(pind2sz(pind) <= chunksize);
arena_run_heap_remove(&arena->runs_avail[pind],
arena_miscelm_get_mutable(chunk, pageind));
} }
static void static void
arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
size_t npages) size_t npages)
{ {
arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
pageind);
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE)); LG_PAGE));
...@@ -245,7 +232,8 @@ static void ...@@ -245,7 +232,8 @@ static void
arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
size_t npages) size_t npages)
{ {
arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
pageind);
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE)); LG_PAGE));
...@@ -292,14 +280,14 @@ JEMALLOC_INLINE_C void * ...@@ -292,14 +280,14 @@ JEMALLOC_INLINE_C void *
arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info) arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
{ {
void *ret; void *ret;
unsigned regind; size_t regind;
arena_chunk_map_misc_t *miscelm; arena_chunk_map_misc_t *miscelm;
void *rpages; void *rpages;
assert(run->nfree > 0); assert(run->nfree > 0);
assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info)); assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info));
regind = bitmap_sfu(run->bitmap, &bin_info->bitmap_info); regind = (unsigned)bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
miscelm = arena_run_to_miscelm(run); miscelm = arena_run_to_miscelm(run);
rpages = arena_miscelm_to_rpages(miscelm); rpages = arena_miscelm_to_rpages(miscelm);
ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset + ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset +
...@@ -316,7 +304,7 @@ arena_run_reg_dalloc(arena_run_t *run, void *ptr) ...@@ -316,7 +304,7 @@ arena_run_reg_dalloc(arena_run_t *run, void *ptr)
size_t mapbits = arena_mapbits_get(chunk, pageind); size_t mapbits = arena_mapbits_get(chunk, pageind);
szind_t binind = arena_ptr_small_binind_get(ptr, mapbits); szind_t binind = arena_ptr_small_binind_get(ptr, mapbits);
arena_bin_info_t *bin_info = &arena_bin_info[binind]; arena_bin_info_t *bin_info = &arena_bin_info[binind];
unsigned regind = arena_run_regind(run, bin_info, ptr); size_t regind = arena_run_regind(run, bin_info, ptr);
assert(run->nfree < bin_info->nregs); assert(run->nfree < bin_info->nregs);
/* Freeing an interior pointer can cause assertion failure. */ /* Freeing an interior pointer can cause assertion failure. */
...@@ -364,16 +352,30 @@ arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind) ...@@ -364,16 +352,30 @@ arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
} }
static void static void
arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages) arena_nactive_add(arena_t *arena, size_t add_pages)
{ {
if (config_stats) { if (config_stats) {
ssize_t cactive_diff = CHUNK_CEILING((arena->nactive + add_pages size_t cactive_add = CHUNK_CEILING((arena->nactive +
- sub_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive << add_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
LG_PAGE); LG_PAGE);
if (cactive_diff != 0) if (cactive_add != 0)
stats_cactive_add(cactive_diff); stats_cactive_add(cactive_add);
} }
arena->nactive += add_pages;
}
static void
arena_nactive_sub(arena_t *arena, size_t sub_pages)
{
if (config_stats) {
size_t cactive_sub = CHUNK_CEILING(arena->nactive << LG_PAGE) -
CHUNK_CEILING((arena->nactive - sub_pages) << LG_PAGE);
if (cactive_sub != 0)
stats_cactive_sub(cactive_sub);
}
arena->nactive -= sub_pages;
} }
static void static void
...@@ -394,8 +396,7 @@ arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind, ...@@ -394,8 +396,7 @@ arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
arena_avail_remove(arena, chunk, run_ind, total_pages); arena_avail_remove(arena, chunk, run_ind, total_pages);
if (flag_dirty != 0) if (flag_dirty != 0)
arena_run_dirty_remove(arena, chunk, run_ind, total_pages); arena_run_dirty_remove(arena, chunk, run_ind, total_pages);
arena_cactive_update(arena, need_pages, 0); arena_nactive_add(arena, need_pages);
arena->nactive += need_pages;
/* Keep track of trailing unused pages for later use. */ /* Keep track of trailing unused pages for later use. */
if (rem_pages > 0) { if (rem_pages > 0) {
...@@ -567,7 +568,8 @@ arena_chunk_init_spare(arena_t *arena) ...@@ -567,7 +568,8 @@ arena_chunk_init_spare(arena_t *arena)
} }
static bool static bool
arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, bool zero) arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
size_t sn, bool zero)
{ {
/* /*
...@@ -576,64 +578,67 @@ arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, bool zero) ...@@ -576,64 +578,67 @@ arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, bool zero)
* of runs is tracked individually, and upon chunk deallocation the * of runs is tracked individually, and upon chunk deallocation the
* entire chunk is in a consistent commit state. * entire chunk is in a consistent commit state.
*/ */
extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true); extent_node_init(&chunk->node, arena, chunk, chunksize, sn, zero, true);
extent_node_achunk_set(&chunk->node, true); extent_node_achunk_set(&chunk->node, true);
return (chunk_register(chunk, &chunk->node)); return (chunk_register(tsdn, chunk, &chunk->node));
} }
static arena_chunk_t * static arena_chunk_t *
arena_chunk_alloc_internal_hard(arena_t *arena, chunk_hooks_t *chunk_hooks, arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
bool *zero, bool *commit) chunk_hooks_t *chunk_hooks, bool *zero, bool *commit)
{ {
arena_chunk_t *chunk; arena_chunk_t *chunk;
size_t sn;
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(tsdn, &arena->lock);
chunk = (arena_chunk_t *)chunk_alloc_wrapper(arena, chunk_hooks, NULL, chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks,
chunksize, chunksize, zero, commit); NULL, chunksize, chunksize, &sn, zero, commit);
if (chunk != NULL && !*commit) { if (chunk != NULL && !*commit) {
/* Commit header. */ /* Commit header. */
if (chunk_hooks->commit(chunk, chunksize, 0, map_bias << if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
LG_PAGE, arena->ind)) { LG_PAGE, arena->ind)) {
chunk_dalloc_wrapper(arena, chunk_hooks, chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
(void *)chunk, chunksize, *commit); (void *)chunk, chunksize, sn, *zero, *commit);
chunk = NULL; chunk = NULL;
} }
} }
if (chunk != NULL && arena_chunk_register(arena, chunk, *zero)) { if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, sn,
*zero)) {
if (!*commit) { if (!*commit) {
/* Undo commit of header. */ /* Undo commit of header. */
chunk_hooks->decommit(chunk, chunksize, 0, map_bias << chunk_hooks->decommit(chunk, chunksize, 0, map_bias <<
LG_PAGE, arena->ind); LG_PAGE, arena->ind);
} }
chunk_dalloc_wrapper(arena, chunk_hooks, (void *)chunk, chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, (void *)chunk,
chunksize, *commit); chunksize, sn, *zero, *commit);
chunk = NULL; chunk = NULL;
} }
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(tsdn, &arena->lock);
return (chunk); return (chunk);
} }
static arena_chunk_t * static arena_chunk_t *
arena_chunk_alloc_internal(arena_t *arena, bool *zero, bool *commit) arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
bool *commit)
{ {
arena_chunk_t *chunk; arena_chunk_t *chunk;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
size_t sn;
chunk = chunk_alloc_cache(arena, &chunk_hooks, NULL, chunksize, chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize,
chunksize, zero, true); chunksize, &sn, zero, commit, true);
if (chunk != NULL) { if (chunk != NULL) {
if (arena_chunk_register(arena, chunk, *zero)) { if (arena_chunk_register(tsdn, arena, chunk, sn, *zero)) {
chunk_dalloc_cache(arena, &chunk_hooks, chunk, chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk,
chunksize, true); chunksize, sn, true);
return (NULL); return (NULL);
} }
*commit = true;
} }
if (chunk == NULL) { if (chunk == NULL) {
chunk = arena_chunk_alloc_internal_hard(arena, &chunk_hooks, chunk = arena_chunk_alloc_internal_hard(tsdn, arena,
zero, commit); &chunk_hooks, zero, commit);
} }
if (config_stats && chunk != NULL) { if (config_stats && chunk != NULL) {
...@@ -645,7 +650,7 @@ arena_chunk_alloc_internal(arena_t *arena, bool *zero, bool *commit) ...@@ -645,7 +650,7 @@ arena_chunk_alloc_internal(arena_t *arena, bool *zero, bool *commit)
} }
static arena_chunk_t * static arena_chunk_t *
arena_chunk_init_hard(arena_t *arena) arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
{ {
arena_chunk_t *chunk; arena_chunk_t *chunk;
bool zero, commit; bool zero, commit;
...@@ -655,14 +660,16 @@ arena_chunk_init_hard(arena_t *arena) ...@@ -655,14 +660,16 @@ arena_chunk_init_hard(arena_t *arena)
zero = false; zero = false;
commit = false; commit = false;
chunk = arena_chunk_alloc_internal(arena, &zero, &commit); chunk = arena_chunk_alloc_internal(tsdn, arena, &zero, &commit);
if (chunk == NULL) if (chunk == NULL)
return (NULL); return (NULL);
chunk->hugepage = true;
/* /*
* Initialize the map to contain one maximal free untouched run. Mark * Initialize the map to contain one maximal free untouched run. Mark
* the pages as zeroed if chunk_alloc() returned a zeroed or decommitted * the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed
* chunk. * or decommitted chunk.
*/ */
flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED; flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED;
flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED; flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED;
...@@ -674,17 +681,18 @@ arena_chunk_init_hard(arena_t *arena) ...@@ -674,17 +681,18 @@ arena_chunk_init_hard(arena_t *arena)
*/ */
if (!zero) { if (!zero) {
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED( JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
(void *)arena_bitselm_get(chunk, map_bias+1), (void *)arena_bitselm_get_const(chunk, map_bias+1),
(size_t)((uintptr_t) arena_bitselm_get(chunk, (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
chunk_npages-1) - (uintptr_t)arena_bitselm_get(chunk, chunk_npages-1) -
map_bias+1))); (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
for (i = map_bias+1; i < chunk_npages-1; i++) for (i = map_bias+1; i < chunk_npages-1; i++)
arena_mapbits_internal_set(chunk, i, flag_unzeroed); arena_mapbits_internal_set(chunk, i, flag_unzeroed);
} else { } else {
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
*)arena_bitselm_get(chunk, map_bias+1), (size_t)((uintptr_t) *)arena_bitselm_get_const(chunk, map_bias+1),
arena_bitselm_get(chunk, chunk_npages-1) - (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
(uintptr_t)arena_bitselm_get(chunk, map_bias+1))); chunk_npages-1) -
(uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
if (config_debug) { if (config_debug) {
for (i = map_bias+1; i < chunk_npages-1; i++) { for (i = map_bias+1; i < chunk_npages-1; i++) {
assert(arena_mapbits_unzeroed_get(chunk, i) == assert(arena_mapbits_unzeroed_get(chunk, i) ==
...@@ -699,28 +707,85 @@ arena_chunk_init_hard(arena_t *arena) ...@@ -699,28 +707,85 @@ arena_chunk_init_hard(arena_t *arena)
} }
static arena_chunk_t * static arena_chunk_t *
arena_chunk_alloc(arena_t *arena) arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena)
{ {
arena_chunk_t *chunk; arena_chunk_t *chunk;
if (arena->spare != NULL) if (arena->spare != NULL)
chunk = arena_chunk_init_spare(arena); chunk = arena_chunk_init_spare(arena);
else { else {
chunk = arena_chunk_init_hard(arena); chunk = arena_chunk_init_hard(tsdn, arena);
if (chunk == NULL) if (chunk == NULL)
return (NULL); return (NULL);
} }
/* Insert the run into the runs_avail tree. */ ql_elm_new(&chunk->node, ql_link);
ql_tail_insert(&arena->achunks, &chunk->node, ql_link);
arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias); arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
return (chunk); return (chunk);
} }
static void static void
arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk) arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
{
size_t sn, hugepage;
bool committed;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
chunk_deregister(chunk, &chunk->node);
sn = extent_node_sn_get(&chunk->node);
hugepage = chunk->hugepage;
committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
if (!committed) {
/*
* Decommit the header. Mark the chunk as decommitted even if
* header decommit fails, since treating a partially committed
* chunk as committed has a high potential for causing later
* access of decommitted memory.
*/
chunk_hooks = chunk_hooks_get(tsdn, arena);
chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE,
arena->ind);
}
if (!hugepage) {
/*
* Convert chunk back to the default state, so that all
* subsequent chunk allocations start out with chunks that can
* be backed by transparent huge pages.
*/
pages_huge(chunk, chunksize);
}
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize,
sn, committed);
if (config_stats) {
arena->stats.mapped -= chunksize;
arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
}
}
static void
arena_spare_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *spare)
{ {
assert(arena->spare != spare);
if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
arena_run_dirty_remove(arena, spare, map_bias,
chunk_npages-map_bias);
}
arena_chunk_discard(tsdn, arena, spare);
}
static void
arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
{
arena_chunk_t *spare;
assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
...@@ -732,49 +797,14 @@ arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk) ...@@ -732,49 +797,14 @@ arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
assert(arena_mapbits_decommitted_get(chunk, map_bias) == assert(arena_mapbits_decommitted_get(chunk, map_bias) ==
arena_mapbits_decommitted_get(chunk, chunk_npages-1)); arena_mapbits_decommitted_get(chunk, chunk_npages-1));
/* /* Remove run from runs_avail, so that the arena does not use it. */
* Remove run from the runs_avail tree, so that the arena does not use
* it.
*/
arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias); arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
if (arena->spare != NULL) { ql_remove(&arena->achunks, &chunk->node, ql_link);
arena_chunk_t *spare = arena->spare; spare = arena->spare;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; arena->spare = chunk;
bool committed; if (spare != NULL)
arena_spare_discard(tsdn, arena, spare);
arena->spare = chunk;
if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
arena_run_dirty_remove(arena, spare, map_bias,
chunk_npages-map_bias);
}
chunk_deregister(spare, &spare->node);
committed = (arena_mapbits_decommitted_get(spare, map_bias) ==
0);
if (!committed) {
/*
* Decommit the header. Mark the chunk as decommitted
* even if header decommit fails, since treating a
* partially committed chunk as committed has a high
* potential for causing later access of decommitted
* memory.
*/
chunk_hooks = chunk_hooks_get(arena);
chunk_hooks.decommit(spare, chunksize, 0, map_bias <<
LG_PAGE, arena->ind);
}
chunk_dalloc_cache(arena, &chunk_hooks, (void *)spare,
chunksize, committed);
if (config_stats) {
arena->stats.mapped -= chunksize;
arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
}
} else
arena->spare = chunk;
} }
static void static void
...@@ -816,6 +846,17 @@ arena_huge_dalloc_stats_update(arena_t *arena, size_t usize) ...@@ -816,6 +846,17 @@ arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
arena->stats.hstats[index].curhchunks--; arena->stats.hstats[index].curhchunks--;
} }
static void
arena_huge_reset_stats_cancel(arena_t *arena, size_t usize)
{
szind_t index = size2index(usize) - nlclasses - NBINS;
cassert(config_stats);
arena->stats.ndalloc_huge++;
arena->stats.hstats[index].ndalloc--;
}
static void static void
arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize) arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
{ {
...@@ -847,243 +888,240 @@ arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize, ...@@ -847,243 +888,240 @@ arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
} }
extent_node_t * extent_node_t *
arena_node_alloc(arena_t *arena) arena_node_alloc(tsdn_t *tsdn, arena_t *arena)
{ {
extent_node_t *node; extent_node_t *node;
malloc_mutex_lock(&arena->node_cache_mtx); malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
node = ql_last(&arena->node_cache, ql_link); node = ql_last(&arena->node_cache, ql_link);
if (node == NULL) { if (node == NULL) {
malloc_mutex_unlock(&arena->node_cache_mtx); malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
return (base_alloc(sizeof(extent_node_t))); return (base_alloc(tsdn, sizeof(extent_node_t)));
} }
ql_tail_remove(&arena->node_cache, extent_node_t, ql_link); ql_tail_remove(&arena->node_cache, extent_node_t, ql_link);
malloc_mutex_unlock(&arena->node_cache_mtx); malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
return (node); return (node);
} }
void void
arena_node_dalloc(arena_t *arena, extent_node_t *node) arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node)
{ {
malloc_mutex_lock(&arena->node_cache_mtx); malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
ql_elm_new(node, ql_link); ql_elm_new(node, ql_link);
ql_tail_insert(&arena->node_cache, node, ql_link); ql_tail_insert(&arena->node_cache, node, ql_link);
malloc_mutex_unlock(&arena->node_cache_mtx); malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
} }
static void * static void *
arena_chunk_alloc_huge_hard(arena_t *arena, chunk_hooks_t *chunk_hooks, arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
size_t usize, size_t alignment, bool *zero, size_t csize) chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, size_t *sn,
bool *zero, size_t csize)
{ {
void *ret; void *ret;
bool commit = true; bool commit = true;
ret = chunk_alloc_wrapper(arena, chunk_hooks, NULL, csize, alignment, ret = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize,
zero, &commit); alignment, sn, zero, &commit);
if (ret == NULL) { if (ret == NULL) {
/* Revert optimistic stats updates. */ /* Revert optimistic stats updates. */
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(tsdn, &arena->lock);
if (config_stats) { if (config_stats) {
arena_huge_malloc_stats_update_undo(arena, usize); arena_huge_malloc_stats_update_undo(arena, usize);
arena->stats.mapped -= usize; arena->stats.mapped -= usize;
} }
arena->nactive -= (usize >> LG_PAGE); arena_nactive_sub(arena, usize >> LG_PAGE);
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(tsdn, &arena->lock);
} }
return (ret); return (ret);
} }
void * void *
arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment, arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
bool *zero) size_t alignment, size_t *sn, bool *zero)
{ {
void *ret; void *ret;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
size_t csize = CHUNK_CEILING(usize); size_t csize = CHUNK_CEILING(usize);
bool commit = true;
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(tsdn, &arena->lock);
/* Optimistically update stats. */ /* Optimistically update stats. */
if (config_stats) { if (config_stats) {
arena_huge_malloc_stats_update(arena, usize); arena_huge_malloc_stats_update(arena, usize);
arena->stats.mapped += usize; arena->stats.mapped += usize;
} }
arena->nactive += (usize >> LG_PAGE); arena_nactive_add(arena, usize >> LG_PAGE);
ret = chunk_alloc_cache(arena, &chunk_hooks, NULL, csize, alignment, ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize,
zero, true); alignment, sn, zero, &commit, true);
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(tsdn, &arena->lock);
if (ret == NULL) { if (ret == NULL) {
ret = arena_chunk_alloc_huge_hard(arena, &chunk_hooks, usize, ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
alignment, zero, csize); usize, alignment, sn, zero, csize);
} }
if (config_stats && ret != NULL)
stats_cactive_add(usize);
return (ret); return (ret);
} }
void void
arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize) arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize,
size_t sn)
{ {
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
size_t csize; size_t csize;
csize = CHUNK_CEILING(usize); csize = CHUNK_CEILING(usize);
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(tsdn, &arena->lock);
if (config_stats) { if (config_stats) {
arena_huge_dalloc_stats_update(arena, usize); arena_huge_dalloc_stats_update(arena, usize);
arena->stats.mapped -= usize; arena->stats.mapped -= usize;
stats_cactive_sub(usize);
} }
arena->nactive -= (usize >> LG_PAGE); arena_nactive_sub(arena, usize >> LG_PAGE);
chunk_dalloc_cache(arena, &chunk_hooks, chunk, csize, true); chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, sn, true);
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(tsdn, &arena->lock);
} }
void void
arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk, size_t oldsize, arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk,
size_t usize) size_t oldsize, size_t usize)
{ {
assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize)); assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize));
assert(oldsize != usize); assert(oldsize != usize);
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(tsdn, &arena->lock);
if (config_stats) if (config_stats)
arena_huge_ralloc_stats_update(arena, oldsize, usize); arena_huge_ralloc_stats_update(arena, oldsize, usize);
if (oldsize < usize) { if (oldsize < usize)
size_t udiff = usize - oldsize; arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE);
arena->nactive += udiff >> LG_PAGE; else
if (config_stats) arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE);
stats_cactive_add(udiff); malloc_mutex_unlock(tsdn, &arena->lock);
} else {
size_t udiff = oldsize - usize;
arena->nactive -= udiff >> LG_PAGE;
if (config_stats)
stats_cactive_sub(udiff);
}
malloc_mutex_unlock(&arena->lock);
} }
void void
arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize, arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
size_t usize) size_t oldsize, size_t usize, size_t sn)
{ {
size_t udiff = oldsize - usize; size_t udiff = oldsize - usize;
size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize); size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(tsdn, &arena->lock);
if (config_stats) { if (config_stats) {
arena_huge_ralloc_stats_update(arena, oldsize, usize); arena_huge_ralloc_stats_update(arena, oldsize, usize);
if (cdiff != 0) { if (cdiff != 0)
arena->stats.mapped -= cdiff; arena->stats.mapped -= cdiff;
stats_cactive_sub(udiff);
}
} }
arena->nactive -= udiff >> LG_PAGE; arena_nactive_sub(arena, udiff >> LG_PAGE);
if (cdiff != 0) { if (cdiff != 0) {
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
void *nchunk = (void *)((uintptr_t)chunk + void *nchunk = (void *)((uintptr_t)chunk +
CHUNK_CEILING(usize)); CHUNK_CEILING(usize));
chunk_dalloc_cache(arena, &chunk_hooks, nchunk, cdiff, true); chunk_dalloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
sn, true);
} }
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(tsdn, &arena->lock);
} }
static bool static bool
arena_chunk_ralloc_huge_expand_hard(arena_t *arena, chunk_hooks_t *chunk_hooks, arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena,
void *chunk, size_t oldsize, size_t usize, bool *zero, void *nchunk, chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize,
size_t udiff, size_t cdiff) size_t *sn, bool *zero, void *nchunk, size_t udiff, size_t cdiff)
{ {
bool err; bool err;
bool commit = true; bool commit = true;
err = (chunk_alloc_wrapper(arena, chunk_hooks, nchunk, cdiff, chunksize, err = (chunk_alloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
zero, &commit) == NULL); chunksize, sn, zero, &commit) == NULL);
if (err) { if (err) {
/* Revert optimistic stats updates. */ /* Revert optimistic stats updates. */
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(tsdn, &arena->lock);
if (config_stats) { if (config_stats) {
arena_huge_ralloc_stats_update_undo(arena, oldsize, arena_huge_ralloc_stats_update_undo(arena, oldsize,
usize); usize);
arena->stats.mapped -= cdiff; arena->stats.mapped -= cdiff;
} }
arena->nactive -= (udiff >> LG_PAGE); arena_nactive_sub(arena, udiff >> LG_PAGE);
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(tsdn, &arena->lock);
} else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk, } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
cdiff, true, arena->ind)) { cdiff, true, arena->ind)) {
chunk_dalloc_arena(arena, chunk_hooks, nchunk, cdiff, *zero, chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
true); *sn, *zero, true);
err = true; err = true;
} }
return (err); return (err);
} }
bool bool
arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize, arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk,
size_t usize, bool *zero) size_t oldsize, size_t usize, bool *zero)
{ {
bool err; bool err;
chunk_hooks_t chunk_hooks = chunk_hooks_get(arena); chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize)); void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
size_t udiff = usize - oldsize; size_t udiff = usize - oldsize;
size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize); size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
size_t sn;
bool commit = true;
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(tsdn, &arena->lock);
/* Optimistically update stats. */ /* Optimistically update stats. */
if (config_stats) { if (config_stats) {
arena_huge_ralloc_stats_update(arena, oldsize, usize); arena_huge_ralloc_stats_update(arena, oldsize, usize);
arena->stats.mapped += cdiff; arena->stats.mapped += cdiff;
} }
arena->nactive += (udiff >> LG_PAGE); arena_nactive_add(arena, udiff >> LG_PAGE);
err = (chunk_alloc_cache(arena, &arena->chunk_hooks, nchunk, cdiff, err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
chunksize, zero, true) == NULL); chunksize, &sn, zero, &commit, true) == NULL);
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(tsdn, &arena->lock);
if (err) { if (err) {
err = arena_chunk_ralloc_huge_expand_hard(arena, &chunk_hooks, err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena,
chunk, oldsize, usize, zero, nchunk, udiff, &chunk_hooks, chunk, oldsize, usize, &sn, zero, nchunk,
cdiff); udiff, cdiff);
} else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk, } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
cdiff, true, arena->ind)) { cdiff, true, arena->ind)) {
chunk_dalloc_arena(arena, &chunk_hooks, nchunk, cdiff, *zero, chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff,
true); sn, *zero, true);
err = true; err = true;
} }
if (config_stats && !err)
stats_cactive_add(udiff);
return (err); return (err);
} }
/* /*
* Do first-best-fit run selection, i.e. select the lowest run that best fits. * Do first-best-fit run selection, i.e. select the lowest run that best fits.
* Run sizes are quantized, so not all candidate runs are necessarily exactly * Run sizes are indexed, so not all candidate runs are necessarily exactly the
* the same size. * same size.
*/ */
static arena_run_t * static arena_run_t *
arena_run_first_best_fit(arena_t *arena, size_t size) arena_run_first_best_fit(arena_t *arena, size_t size)
{ {
size_t search_size = run_quantize_first(size); pszind_t pind, i;
arena_chunk_map_misc_t *key = arena_miscelm_key_create(search_size);
arena_chunk_map_misc_t *miscelm = pind = psz2ind(run_quantize_ceil(size));
arena_avail_tree_nsearch(&arena->runs_avail, key);
if (miscelm == NULL) for (i = pind; pind2sz(i) <= chunksize; i++) {
return (NULL); arena_chunk_map_misc_t *miscelm = arena_run_heap_first(
return (&miscelm->run); &arena->runs_avail[i]);
if (miscelm != NULL)
return (&miscelm->run);
}
return (NULL);
} }
static arena_run_t * static arena_run_t *
arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero) arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
{ {
arena_run_t *run = arena_run_first_best_fit(arena, s2u(size)); arena_run_t *run = arena_run_first_best_fit(arena, size);
if (run != NULL) { if (run != NULL) {
if (arena_run_split_large(arena, run, size, zero)) if (arena_run_split_large(arena, run, size, zero))
run = NULL; run = NULL;
...@@ -1092,7 +1130,7 @@ arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero) ...@@ -1092,7 +1130,7 @@ arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
} }
static arena_run_t * static arena_run_t *
arena_run_alloc_large(arena_t *arena, size_t size, bool zero) arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero)
{ {
arena_chunk_t *chunk; arena_chunk_t *chunk;
arena_run_t *run; arena_run_t *run;
...@@ -1108,9 +1146,9 @@ arena_run_alloc_large(arena_t *arena, size_t size, bool zero) ...@@ -1108,9 +1146,9 @@ arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
/* /*
* No usable runs. Create a new chunk from which to allocate the run. * No usable runs. Create a new chunk from which to allocate the run.
*/ */
chunk = arena_chunk_alloc(arena); chunk = arena_chunk_alloc(tsdn, arena);
if (chunk != NULL) { if (chunk != NULL) {
run = &arena_miscelm_get(chunk, map_bias)->run; run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
if (arena_run_split_large(arena, run, size, zero)) if (arena_run_split_large(arena, run, size, zero))
run = NULL; run = NULL;
return (run); return (run);
...@@ -1136,7 +1174,7 @@ arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind) ...@@ -1136,7 +1174,7 @@ arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind)
} }
static arena_run_t * static arena_run_t *
arena_run_alloc_small(arena_t *arena, size_t size, szind_t binind) arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind)
{ {
arena_chunk_t *chunk; arena_chunk_t *chunk;
arena_run_t *run; arena_run_t *run;
...@@ -1153,9 +1191,9 @@ arena_run_alloc_small(arena_t *arena, size_t size, szind_t binind) ...@@ -1153,9 +1191,9 @@ arena_run_alloc_small(arena_t *arena, size_t size, szind_t binind)
/* /*
* No usable runs. Create a new chunk from which to allocate the run. * No usable runs. Create a new chunk from which to allocate the run.
*/ */
chunk = arena_chunk_alloc(arena); chunk = arena_chunk_alloc(tsdn, arena);
if (chunk != NULL) { if (chunk != NULL) {
run = &arena_miscelm_get(chunk, map_bias)->run; run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
if (arena_run_split_small(arena, run, size, binind)) if (arena_run_split_small(arena, run, size, binind))
run = NULL; run = NULL;
return (run); return (run);
...@@ -1178,42 +1216,239 @@ arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult) ...@@ -1178,42 +1216,239 @@ arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult)
} }
ssize_t ssize_t
arena_lg_dirty_mult_get(arena_t *arena) arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena)
{ {
ssize_t lg_dirty_mult; ssize_t lg_dirty_mult;
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(tsdn, &arena->lock);
lg_dirty_mult = arena->lg_dirty_mult; lg_dirty_mult = arena->lg_dirty_mult;
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(tsdn, &arena->lock);
return (lg_dirty_mult); return (lg_dirty_mult);
} }
bool bool
arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult) arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, ssize_t lg_dirty_mult)
{ {
if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
return (true); return (true);
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(tsdn, &arena->lock);
arena->lg_dirty_mult = lg_dirty_mult; arena->lg_dirty_mult = lg_dirty_mult;
arena_maybe_purge(arena); arena_maybe_purge(tsdn, arena);
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(tsdn, &arena->lock);
return (false); return (false);
} }
void static void
arena_maybe_purge(arena_t *arena) arena_decay_deadline_init(arena_t *arena)
{
assert(opt_purge == purge_mode_decay);
/*
* Generate a new deadline that is uniformly random within the next
* epoch after the current one.
*/
nstime_copy(&arena->decay.deadline, &arena->decay.epoch);
nstime_add(&arena->decay.deadline, &arena->decay.interval);
if (arena->decay.time > 0) {
nstime_t jitter;
nstime_init(&jitter, prng_range_u64(&arena->decay.jitter_state,
nstime_ns(&arena->decay.interval)));
nstime_add(&arena->decay.deadline, &jitter);
}
}
static bool
arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time)
{
assert(opt_purge == purge_mode_decay);
return (nstime_compare(&arena->decay.deadline, time) <= 0);
}
static size_t
arena_decay_backlog_npages_limit(const arena_t *arena)
{
static const uint64_t h_steps[] = {
#define STEP(step, h, x, y) \
h,
SMOOTHSTEP
#undef STEP
};
uint64_t sum;
size_t npages_limit_backlog;
unsigned i;
assert(opt_purge == purge_mode_decay);
/*
* For each element of decay_backlog, multiply by the corresponding
* fixed-point smoothstep decay factor. Sum the products, then divide
* to round down to the nearest whole number of pages.
*/
sum = 0;
for (i = 0; i < SMOOTHSTEP_NSTEPS; i++)
sum += arena->decay.backlog[i] * h_steps[i];
npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
return (npages_limit_backlog);
}
static void
arena_decay_backlog_update_last(arena_t *arena)
{
size_t ndirty_delta = (arena->ndirty > arena->decay.ndirty) ?
arena->ndirty - arena->decay.ndirty : 0;
arena->decay.backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta;
}
static void
arena_decay_backlog_update(arena_t *arena, uint64_t nadvance_u64)
{
if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
memset(arena->decay.backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
sizeof(size_t));
} else {
size_t nadvance_z = (size_t)nadvance_u64;
assert((uint64_t)nadvance_z == nadvance_u64);
memmove(arena->decay.backlog, &arena->decay.backlog[nadvance_z],
(SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
if (nadvance_z > 1) {
memset(&arena->decay.backlog[SMOOTHSTEP_NSTEPS -
nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
}
}
arena_decay_backlog_update_last(arena);
}
static void
arena_decay_epoch_advance_helper(arena_t *arena, const nstime_t *time)
{
uint64_t nadvance_u64;
nstime_t delta;
assert(opt_purge == purge_mode_decay);
assert(arena_decay_deadline_reached(arena, time));
nstime_copy(&delta, time);
nstime_subtract(&delta, &arena->decay.epoch);
nadvance_u64 = nstime_divide(&delta, &arena->decay.interval);
assert(nadvance_u64 > 0);
/* Add nadvance_u64 decay intervals to epoch. */
nstime_copy(&delta, &arena->decay.interval);
nstime_imultiply(&delta, nadvance_u64);
nstime_add(&arena->decay.epoch, &delta);
/* Set a new deadline. */
arena_decay_deadline_init(arena);
/* Update the backlog. */
arena_decay_backlog_update(arena, nadvance_u64);
}
static void
arena_decay_epoch_advance_purge(tsdn_t *tsdn, arena_t *arena)
{
size_t ndirty_limit = arena_decay_backlog_npages_limit(arena);
if (arena->ndirty > ndirty_limit)
arena_purge_to_limit(tsdn, arena, ndirty_limit);
arena->decay.ndirty = arena->ndirty;
}
static void
arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, const nstime_t *time)
{
arena_decay_epoch_advance_helper(arena, time);
arena_decay_epoch_advance_purge(tsdn, arena);
}
static void
arena_decay_init(arena_t *arena, ssize_t decay_time)
{
arena->decay.time = decay_time;
if (decay_time > 0) {
nstime_init2(&arena->decay.interval, decay_time, 0);
nstime_idivide(&arena->decay.interval, SMOOTHSTEP_NSTEPS);
}
nstime_init(&arena->decay.epoch, 0);
nstime_update(&arena->decay.epoch);
arena->decay.jitter_state = (uint64_t)(uintptr_t)arena;
arena_decay_deadline_init(arena);
arena->decay.ndirty = arena->ndirty;
memset(arena->decay.backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
}
static bool
arena_decay_time_valid(ssize_t decay_time)
{
if (decay_time < -1)
return (false);
if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX)
return (true);
return (false);
}
ssize_t
arena_decay_time_get(tsdn_t *tsdn, arena_t *arena)
{
ssize_t decay_time;
malloc_mutex_lock(tsdn, &arena->lock);
decay_time = arena->decay.time;
malloc_mutex_unlock(tsdn, &arena->lock);
return (decay_time);
}
bool
arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time)
{
if (!arena_decay_time_valid(decay_time))
return (true);
malloc_mutex_lock(tsdn, &arena->lock);
/*
* Restart decay backlog from scratch, which may cause many dirty pages
* to be immediately purged. It would conceptually be possible to map
* the old backlog onto the new backlog, but there is no justification
* for such complexity since decay_time changes are intended to be
* infrequent, either between the {-1, 0, >0} states, or a one-time
* arbitrary change during initial arena configuration.
*/
arena_decay_init(arena, decay_time);
arena_maybe_purge(tsdn, arena);
malloc_mutex_unlock(tsdn, &arena->lock);
return (false);
}
static void
arena_maybe_purge_ratio(tsdn_t *tsdn, arena_t *arena)
{ {
assert(opt_purge == purge_mode_ratio);
/* Don't purge if the option is disabled. */ /* Don't purge if the option is disabled. */
if (arena->lg_dirty_mult < 0) if (arena->lg_dirty_mult < 0)
return; return;
/* Don't recursively purge. */
if (arena->purging)
return;
/* /*
* Iterate, since preventing recursive purging could otherwise leave too * Iterate, since preventing recursive purging could otherwise leave too
* many dirty pages. * many dirty pages.
...@@ -1228,8 +1463,66 @@ arena_maybe_purge(arena_t *arena) ...@@ -1228,8 +1463,66 @@ arena_maybe_purge(arena_t *arena)
*/ */
if (arena->ndirty <= threshold) if (arena->ndirty <= threshold)
return; return;
arena_purge(arena, false); arena_purge_to_limit(tsdn, arena, threshold);
}
}
static void
arena_maybe_purge_decay(tsdn_t *tsdn, arena_t *arena)
{
nstime_t time;
assert(opt_purge == purge_mode_decay);
/* Purge all or nothing if the option is disabled. */
if (arena->decay.time <= 0) {
if (arena->decay.time == 0)
arena_purge_to_limit(tsdn, arena, 0);
return;
}
nstime_init(&time, 0);
nstime_update(&time);
if (unlikely(!nstime_monotonic() && nstime_compare(&arena->decay.epoch,
&time) > 0)) {
/*
* Time went backwards. Move the epoch back in time and
* generate a new deadline, with the expectation that time
* typically flows forward for long enough periods of time that
* epochs complete. Unfortunately, this strategy is susceptible
* to clock jitter triggering premature epoch advances, but
* clock jitter estimation and compensation isn't feasible here
* because calls into this code are event-driven.
*/
nstime_copy(&arena->decay.epoch, &time);
arena_decay_deadline_init(arena);
} else {
/* Verify that time does not go backwards. */
assert(nstime_compare(&arena->decay.epoch, &time) <= 0);
} }
/*
* If the deadline has been reached, advance to the current epoch and
* purge to the new limit if necessary. Note that dirty pages created
* during the current epoch are not subject to purge until a future
* epoch, so as a result purging only happens during epoch advances.
*/
if (arena_decay_deadline_reached(arena, &time))
arena_decay_epoch_advance(tsdn, arena, &time);
}
void
arena_maybe_purge(tsdn_t *tsdn, arena_t *arena)
{
/* Don't recursively purge. */
if (arena->purging)
return;
if (opt_purge == purge_mode_ratio)
arena_maybe_purge_ratio(tsdn, arena);
else
arena_maybe_purge_decay(tsdn, arena);
} }
static size_t static size_t
...@@ -1253,49 +1546,29 @@ arena_dirty_count(arena_t *arena) ...@@ -1253,49 +1546,29 @@ arena_dirty_count(arena_t *arena)
arena_chunk_map_misc_t *miscelm = arena_chunk_map_misc_t *miscelm =
arena_rd_to_miscelm(rdelm); arena_rd_to_miscelm(rdelm);
size_t pageind = arena_miscelm_to_pageind(miscelm); size_t pageind = arena_miscelm_to_pageind(miscelm);
assert(arena_mapbits_allocated_get(chunk, pageind) == assert(arena_mapbits_allocated_get(chunk, pageind) ==
0); 0);
assert(arena_mapbits_large_get(chunk, pageind) == 0); assert(arena_mapbits_large_get(chunk, pageind) == 0);
assert(arena_mapbits_dirty_get(chunk, pageind) != 0); assert(arena_mapbits_dirty_get(chunk, pageind) != 0);
npages = arena_mapbits_unallocated_size_get(chunk, npages = arena_mapbits_unallocated_size_get(chunk,
pageind) >> LG_PAGE; pageind) >> LG_PAGE;
} }
ndirty += npages; ndirty += npages;
} }
return (ndirty);
}
static size_t
arena_compute_npurge(arena_t *arena, bool all)
{
size_t npurge;
/*
* Compute the minimum number of pages that this thread should try to
* purge.
*/
if (!all) {
size_t threshold = (arena->nactive >> arena->lg_dirty_mult);
threshold = threshold < chunk_npages ? chunk_npages : threshold;
npurge = arena->ndirty - threshold;
} else
npurge = arena->ndirty;
return (npurge); return (ndirty);
} }
static size_t static size_t
arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all, arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
size_t npurge, arena_runs_dirty_link_t *purge_runs_sentinel, size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel,
extent_node_t *purge_chunks_sentinel) extent_node_t *purge_chunks_sentinel)
{ {
arena_runs_dirty_link_t *rdelm, *rdelm_next; arena_runs_dirty_link_t *rdelm, *rdelm_next;
extent_node_t *chunkselm; extent_node_t *chunkselm;
size_t nstashed = 0; size_t nstashed = 0;
/* Stash at least npurge pages. */ /* Stash runs/chunks according to ndirty_limit. */
for (rdelm = qr_next(&arena->runs_dirty, rd_link), for (rdelm = qr_next(&arena->runs_dirty, rd_link),
chunkselm = qr_next(&arena->chunks_cache, cc_link); chunkselm = qr_next(&arena->chunks_cache, cc_link);
rdelm != &arena->runs_dirty; rdelm = rdelm_next) { rdelm != &arena->runs_dirty; rdelm = rdelm_next) {
...@@ -1304,24 +1577,32 @@ arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all, ...@@ -1304,24 +1577,32 @@ arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all,
if (rdelm == &chunkselm->rd) { if (rdelm == &chunkselm->rd) {
extent_node_t *chunkselm_next; extent_node_t *chunkselm_next;
bool zero; size_t sn;
bool zero, commit;
UNUSED void *chunk; UNUSED void *chunk;
npages = extent_node_size_get(chunkselm) >> LG_PAGE;
if (opt_purge == purge_mode_decay && arena->ndirty -
(nstashed + npages) < ndirty_limit)
break;
chunkselm_next = qr_next(chunkselm, cc_link); chunkselm_next = qr_next(chunkselm, cc_link);
/* /*
* Allocate. chunkselm remains valid due to the * Allocate. chunkselm remains valid due to the
* dalloc_node=false argument to chunk_alloc_cache(). * dalloc_node=false argument to chunk_alloc_cache().
*/ */
zero = false; zero = false;
chunk = chunk_alloc_cache(arena, chunk_hooks, commit = false;
chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks,
extent_node_addr_get(chunkselm), extent_node_addr_get(chunkselm),
extent_node_size_get(chunkselm), chunksize, &zero, extent_node_size_get(chunkselm), chunksize, &sn,
false); &zero, &commit, false);
assert(chunk == extent_node_addr_get(chunkselm)); assert(chunk == extent_node_addr_get(chunkselm));
assert(zero == extent_node_zeroed_get(chunkselm)); assert(zero == extent_node_zeroed_get(chunkselm));
extent_node_dirty_insert(chunkselm, purge_runs_sentinel, extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
purge_chunks_sentinel); purge_chunks_sentinel);
npages = extent_node_size_get(chunkselm) >> LG_PAGE; assert(npages == (extent_node_size_get(chunkselm) >>
LG_PAGE));
chunkselm = chunkselm_next; chunkselm = chunkselm_next;
} else { } else {
arena_chunk_t *chunk = arena_chunk_t *chunk =
...@@ -1334,6 +1615,9 @@ arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all, ...@@ -1334,6 +1615,9 @@ arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all,
arena_mapbits_unallocated_size_get(chunk, pageind); arena_mapbits_unallocated_size_get(chunk, pageind);
npages = run_size >> LG_PAGE; npages = run_size >> LG_PAGE;
if (opt_purge == purge_mode_decay && arena->ndirty -
(nstashed + npages) < ndirty_limit)
break;
assert(pageind + npages <= chunk_npages); assert(pageind + npages <= chunk_npages);
assert(arena_mapbits_dirty_get(chunk, pageind) == assert(arena_mapbits_dirty_get(chunk, pageind) ==
...@@ -1344,7 +1628,7 @@ arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all, ...@@ -1344,7 +1628,7 @@ arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all,
* prior to allocation. * prior to allocation.
*/ */
if (chunk == arena->spare) if (chunk == arena->spare)
arena_chunk_alloc(arena); arena_chunk_alloc(tsdn, arena);
/* Temporarily allocate the free dirty run. */ /* Temporarily allocate the free dirty run. */
arena_run_split_large(arena, run, run_size, false); arena_run_split_large(arena, run, run_size, false);
...@@ -1359,7 +1643,8 @@ arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all, ...@@ -1359,7 +1643,8 @@ arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all,
} }
nstashed += npages; nstashed += npages;
if (!all && nstashed >= npurge) if (opt_purge == purge_mode_ratio && arena->ndirty - nstashed <=
ndirty_limit)
break; break;
} }
...@@ -1367,7 +1652,7 @@ arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all, ...@@ -1367,7 +1652,7 @@ arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all,
} }
static size_t static size_t
arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks, arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
arena_runs_dirty_link_t *purge_runs_sentinel, arena_runs_dirty_link_t *purge_runs_sentinel,
extent_node_t *purge_chunks_sentinel) extent_node_t *purge_chunks_sentinel)
{ {
...@@ -1379,7 +1664,7 @@ arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -1379,7 +1664,7 @@ arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks,
nmadvise = 0; nmadvise = 0;
npurged = 0; npurged = 0;
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(tsdn, &arena->lock);
for (rdelm = qr_next(purge_runs_sentinel, rd_link), for (rdelm = qr_next(purge_runs_sentinel, rd_link),
chunkselm = qr_next(purge_chunks_sentinel, cc_link); chunkselm = qr_next(purge_chunks_sentinel, cc_link);
rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) { rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) {
...@@ -1408,6 +1693,17 @@ arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -1408,6 +1693,17 @@ arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks,
run_size = arena_mapbits_large_size_get(chunk, pageind); run_size = arena_mapbits_large_size_get(chunk, pageind);
npages = run_size >> LG_PAGE; npages = run_size >> LG_PAGE;
/*
* If this is the first run purged within chunk, mark
* the chunk as non-huge. This will prevent all use of
* transparent huge pages for this chunk until the chunk
* as a whole is deallocated.
*/
if (chunk->hugepage) {
pages_nohuge(chunk, chunksize);
chunk->hugepage = false;
}
assert(pageind + npages <= chunk_npages); assert(pageind + npages <= chunk_npages);
assert(!arena_mapbits_decommitted_get(chunk, pageind)); assert(!arena_mapbits_decommitted_get(chunk, pageind));
assert(!arena_mapbits_decommitted_get(chunk, assert(!arena_mapbits_decommitted_get(chunk,
...@@ -1418,7 +1714,7 @@ arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -1418,7 +1714,7 @@ arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks,
flag_unzeroed = 0; flag_unzeroed = 0;
flags = CHUNK_MAP_DECOMMITTED; flags = CHUNK_MAP_DECOMMITTED;
} else { } else {
flag_unzeroed = chunk_purge_wrapper(arena, flag_unzeroed = chunk_purge_wrapper(tsdn, arena,
chunk_hooks, chunk, chunksize, pageind << chunk_hooks, chunk, chunksize, pageind <<
LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0; LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0;
flags = flag_unzeroed; flags = flag_unzeroed;
...@@ -1449,7 +1745,7 @@ arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -1449,7 +1745,7 @@ arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks,
if (config_stats) if (config_stats)
nmadvise++; nmadvise++;
} }
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(tsdn, &arena->lock);
if (config_stats) { if (config_stats) {
arena->stats.nmadvise += nmadvise; arena->stats.nmadvise += nmadvise;
...@@ -1460,7 +1756,7 @@ arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -1460,7 +1756,7 @@ arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks,
} }
static void static void
arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks, arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
arena_runs_dirty_link_t *purge_runs_sentinel, arena_runs_dirty_link_t *purge_runs_sentinel,
extent_node_t *purge_chunks_sentinel) extent_node_t *purge_chunks_sentinel)
{ {
...@@ -1477,13 +1773,14 @@ arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -1477,13 +1773,14 @@ arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks,
cc_link); cc_link);
void *addr = extent_node_addr_get(chunkselm); void *addr = extent_node_addr_get(chunkselm);
size_t size = extent_node_size_get(chunkselm); size_t size = extent_node_size_get(chunkselm);
size_t sn = extent_node_sn_get(chunkselm);
bool zeroed = extent_node_zeroed_get(chunkselm); bool zeroed = extent_node_zeroed_get(chunkselm);
bool committed = extent_node_committed_get(chunkselm); bool committed = extent_node_committed_get(chunkselm);
extent_node_dirty_remove(chunkselm); extent_node_dirty_remove(chunkselm);
arena_node_dalloc(arena, chunkselm); arena_node_dalloc(tsdn, arena, chunkselm);
chunkselm = chunkselm_next; chunkselm = chunkselm_next;
chunk_dalloc_arena(arena, chunk_hooks, addr, size, chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr,
zeroed, committed); size, sn, zeroed, committed);
} else { } else {
arena_chunk_t *chunk = arena_chunk_t *chunk =
(arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
...@@ -1494,16 +1791,26 @@ arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -1494,16 +1791,26 @@ arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks,
pageind) != 0); pageind) != 0);
arena_run_t *run = &miscelm->run; arena_run_t *run = &miscelm->run;
qr_remove(rdelm, rd_link); qr_remove(rdelm, rd_link);
arena_run_dalloc(arena, run, false, true, decommitted); arena_run_dalloc(tsdn, arena, run, false, true,
decommitted);
} }
} }
} }
/*
* NB: ndirty_limit is interpreted differently depending on opt_purge:
* - purge_mode_ratio: Purge as few dirty run/chunks as possible to reach the
* desired state:
* (arena->ndirty <= ndirty_limit)
* - purge_mode_decay: Purge as many dirty runs/chunks as possible without
* violating the invariant:
* (arena->ndirty >= ndirty_limit)
*/
static void static void
arena_purge(arena_t *arena, bool all) arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit)
{ {
chunk_hooks_t chunk_hooks = chunk_hooks_get(arena); chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
size_t npurge, npurgeable, npurged; size_t npurge, npurged;
arena_runs_dirty_link_t purge_runs_sentinel; arena_runs_dirty_link_t purge_runs_sentinel;
extent_node_t purge_chunks_sentinel; extent_node_t purge_chunks_sentinel;
...@@ -1517,34 +1824,183 @@ arena_purge(arena_t *arena, bool all) ...@@ -1517,34 +1824,183 @@ arena_purge(arena_t *arena, bool all)
size_t ndirty = arena_dirty_count(arena); size_t ndirty = arena_dirty_count(arena);
assert(ndirty == arena->ndirty); assert(ndirty == arena->ndirty);
} }
assert((arena->nactive >> arena->lg_dirty_mult) < arena->ndirty || all); assert(opt_purge != purge_mode_ratio || (arena->nactive >>
arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0);
if (config_stats)
arena->stats.npurge++;
npurge = arena_compute_npurge(arena, all);
qr_new(&purge_runs_sentinel, rd_link); qr_new(&purge_runs_sentinel, rd_link);
extent_node_dirty_linkage_init(&purge_chunks_sentinel); extent_node_dirty_linkage_init(&purge_chunks_sentinel);
npurgeable = arena_stash_dirty(arena, &chunk_hooks, all, npurge, npurge = arena_stash_dirty(tsdn, arena, &chunk_hooks, ndirty_limit,
&purge_runs_sentinel, &purge_chunks_sentinel); &purge_runs_sentinel, &purge_chunks_sentinel);
assert(npurgeable >= npurge); if (npurge == 0)
npurged = arena_purge_stashed(arena, &chunk_hooks, &purge_runs_sentinel, goto label_return;
&purge_chunks_sentinel); npurged = arena_purge_stashed(tsdn, arena, &chunk_hooks,
assert(npurged == npurgeable); &purge_runs_sentinel, &purge_chunks_sentinel);
arena_unstash_purged(arena, &chunk_hooks, &purge_runs_sentinel, assert(npurged == npurge);
arena_unstash_purged(tsdn, arena, &chunk_hooks, &purge_runs_sentinel,
&purge_chunks_sentinel); &purge_chunks_sentinel);
if (config_stats)
arena->stats.npurge++;
label_return:
arena->purging = false; arena->purging = false;
} }
void void
arena_purge_all(arena_t *arena) arena_purge(tsdn_t *tsdn, arena_t *arena, bool all)
{
malloc_mutex_lock(tsdn, &arena->lock);
if (all)
arena_purge_to_limit(tsdn, arena, 0);
else
arena_maybe_purge(tsdn, arena);
malloc_mutex_unlock(tsdn, &arena->lock);
}
static void
arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk)
{
size_t pageind, npages;
cassert(config_prof);
assert(opt_prof);
/*
* Iterate over the allocated runs and remove profiled allocations from
* the sample set.
*/
for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
if (arena_mapbits_allocated_get(chunk, pageind) != 0) {
if (arena_mapbits_large_get(chunk, pageind) != 0) {
void *ptr = (void *)((uintptr_t)chunk + (pageind
<< LG_PAGE));
size_t usize = isalloc(tsd_tsdn(tsd), ptr,
config_prof);
prof_free(tsd, ptr, usize);
npages = arena_mapbits_large_size_get(chunk,
pageind) >> LG_PAGE;
} else {
/* Skip small run. */
size_t binind = arena_mapbits_binind_get(chunk,
pageind);
arena_bin_info_t *bin_info =
&arena_bin_info[binind];
npages = bin_info->run_size >> LG_PAGE;
}
} else {
/* Skip unallocated run. */
npages = arena_mapbits_unallocated_size_get(chunk,
pageind) >> LG_PAGE;
}
assert(pageind + npages <= chunk_npages);
}
}
void
arena_reset(tsd_t *tsd, arena_t *arena)
{ {
unsigned i;
extent_node_t *node;
/*
* Locking in this function is unintuitive. The caller guarantees that
* no concurrent operations are happening in this arena, but there are
* still reasons that some locking is necessary:
*
* - Some of the functions in the transitive closure of calls assume
* appropriate locks are held, and in some cases these locks are
* temporarily dropped to avoid lock order reversal or deadlock due to
* reentry.
* - mallctl("epoch", ...) may concurrently refresh stats. While
* strictly speaking this is a "concurrent operation", disallowing
* stats refreshes would impose an inconvenient burden.
*/
/* Remove large allocations from prof sample set. */
if (config_prof && opt_prof) {
ql_foreach(node, &arena->achunks, ql_link) {
arena_achunk_prof_reset(tsd, arena,
extent_node_addr_get(node));
}
}
/* Reset curruns for large size classes. */
if (config_stats) {
for (i = 0; i < nlclasses; i++)
arena->stats.lstats[i].curruns = 0;
}
/* Huge allocations. */
malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
for (node = ql_last(&arena->huge, ql_link); node != NULL; node =
ql_last(&arena->huge, ql_link)) {
void *ptr = extent_node_addr_get(node);
size_t usize;
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
if (config_stats || (config_prof && opt_prof))
usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
/* Remove huge allocation from prof sample set. */
if (config_prof && opt_prof)
prof_free(tsd, ptr, usize);
huge_dalloc(tsd_tsdn(tsd), ptr);
malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
/* Cancel out unwanted effects on stats. */
if (config_stats)
arena_huge_reset_stats_cancel(arena, usize);
}
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
/* Bins. */
for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i];
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
bin->runcur = NULL;
arena_run_heap_new(&bin->runs);
if (config_stats) {
bin->stats.curregs = 0;
bin->stats.curruns = 0;
}
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
}
/*
* Re-initialize runs_dirty such that the chunks_cache and runs_dirty
* chains directly correspond.
*/
qr_new(&arena->runs_dirty, rd_link);
for (node = qr_next(&arena->chunks_cache, cc_link);
node != &arena->chunks_cache; node = qr_next(node, cc_link)) {
qr_new(&node->rd, rd_link);
qr_meld(&arena->runs_dirty, &node->rd, rd_link);
}
/* Arena chunks. */
for (node = ql_last(&arena->achunks, ql_link); node != NULL; node =
ql_last(&arena->achunks, ql_link)) {
ql_remove(&arena->achunks, node, ql_link);
arena_chunk_discard(tsd_tsdn(tsd), arena,
extent_node_addr_get(node));
}
/* Spare. */
if (arena->spare != NULL) {
arena_chunk_discard(tsd_tsdn(tsd), arena, arena->spare);
arena->spare = NULL;
}
malloc_mutex_lock(&arena->lock); assert(!arena->purging);
arena_purge(arena, true); arena->nactive = 0;
malloc_mutex_unlock(&arena->lock);
for (i = 0; i < NPSIZES; i++)
arena_run_heap_new(&arena->runs_avail[i]);
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
} }
static void static void
...@@ -1660,21 +2116,9 @@ arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, ...@@ -1660,21 +2116,9 @@ arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
return (size); return (size);
} }
static bool
arena_run_decommit(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run)
{
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
size_t run_ind = arena_miscelm_to_pageind(miscelm);
size_t offset = run_ind << LG_PAGE;
size_t length = arena_run_size_get(arena, chunk, run, run_ind);
return (arena->chunk_hooks.decommit(chunk, chunksize, offset, length,
arena->ind));
}
static void static void
arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned, arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty,
bool decommitted) bool cleaned, bool decommitted)
{ {
arena_chunk_t *chunk; arena_chunk_t *chunk;
arena_chunk_map_misc_t *miscelm; arena_chunk_map_misc_t *miscelm;
...@@ -1687,8 +2131,7 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned, ...@@ -1687,8 +2131,7 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned,
assert(run_ind < chunk_npages); assert(run_ind < chunk_npages);
size = arena_run_size_get(arena, chunk, run, run_ind); size = arena_run_size_get(arena, chunk, run, run_ind);
run_pages = (size >> LG_PAGE); run_pages = (size >> LG_PAGE);
arena_cactive_update(arena, 0, run_pages); arena_nactive_sub(arena, run_pages);
arena->nactive -= run_pages;
/* /*
* The run is dirty if the caller claims to have dirtied it, as well as * The run is dirty if the caller claims to have dirtied it, as well as
...@@ -1735,7 +2178,7 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned, ...@@ -1735,7 +2178,7 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned,
if (size == arena_maxrun) { if (size == arena_maxrun) {
assert(run_ind == map_bias); assert(run_ind == map_bias);
assert(run_pages == (arena_maxrun >> LG_PAGE)); assert(run_pages == (arena_maxrun >> LG_PAGE));
arena_chunk_dalloc(arena, chunk); arena_chunk_dalloc(tsdn, arena, chunk);
} }
/* /*
...@@ -1746,21 +2189,12 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned, ...@@ -1746,21 +2189,12 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned,
* chances of spuriously crossing the dirty page purging threshold. * chances of spuriously crossing the dirty page purging threshold.
*/ */
if (dirty) if (dirty)
arena_maybe_purge(arena); arena_maybe_purge(tsdn, arena);
}
static void
arena_run_dalloc_decommit(arena_t *arena, arena_chunk_t *chunk,
arena_run_t *run)
{
bool committed = arena_run_decommit(arena, chunk, run);
arena_run_dalloc(arena, run, committed, false, !committed);
} }
static void static void
arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, arena_run_trim_head(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
size_t oldsize, size_t newsize) arena_run_t *run, size_t oldsize, size_t newsize)
{ {
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
size_t pageind = arena_miscelm_to_pageind(miscelm); size_t pageind = arena_miscelm_to_pageind(miscelm);
...@@ -1795,12 +2229,13 @@ arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, ...@@ -1795,12 +2229,13 @@ arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
pageind+head_npages))); pageind+head_npages)));
arena_run_dalloc(arena, run, false, false, (flag_decommitted != 0)); arena_run_dalloc(tsdn, arena, run, false, false, (flag_decommitted !=
0));
} }
static void static void
arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, arena_run_trim_tail(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
size_t oldsize, size_t newsize, bool dirty) arena_run_t *run, size_t oldsize, size_t newsize, bool dirty)
{ {
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
size_t pageind = arena_miscelm_to_pageind(miscelm); size_t pageind = arena_miscelm_to_pageind(miscelm);
...@@ -1837,20 +2272,10 @@ arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, ...@@ -1837,20 +2272,10 @@ arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
pageind+head_npages))); pageind+head_npages)));
tail_miscelm = arena_miscelm_get(chunk, pageind + head_npages); tail_miscelm = arena_miscelm_get_mutable(chunk, pageind + head_npages);
tail_run = &tail_miscelm->run; tail_run = &tail_miscelm->run;
arena_run_dalloc(arena, tail_run, dirty, false, (flag_decommitted != arena_run_dalloc(tsdn, arena, tail_run, dirty, false, (flag_decommitted
0)); != 0));
}
static arena_run_t *
arena_bin_runs_first(arena_bin_t *bin)
{
arena_chunk_map_misc_t *miscelm = arena_run_tree_first(&bin->runs);
if (miscelm != NULL)
return (&miscelm->run);
return (NULL);
} }
static void static void
...@@ -1858,35 +2283,25 @@ arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run) ...@@ -1858,35 +2283,25 @@ arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
{ {
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
assert(arena_run_tree_search(&bin->runs, miscelm) == NULL); arena_run_heap_insert(&bin->runs, miscelm);
arena_run_tree_insert(&bin->runs, miscelm);
}
static void
arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
{
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
assert(arena_run_tree_search(&bin->runs, miscelm) != NULL);
arena_run_tree_remove(&bin->runs, miscelm);
} }
static arena_run_t * static arena_run_t *
arena_bin_nonfull_run_tryget(arena_bin_t *bin) arena_bin_nonfull_run_tryget(arena_bin_t *bin)
{ {
arena_run_t *run = arena_bin_runs_first(bin); arena_chunk_map_misc_t *miscelm;
if (run != NULL) {
arena_bin_runs_remove(bin, run); miscelm = arena_run_heap_remove_first(&bin->runs);
if (config_stats) if (miscelm == NULL)
bin->stats.reruns++; return (NULL);
} if (config_stats)
return (run); bin->stats.reruns++;
return (&miscelm->run);
} }
static arena_run_t * static arena_run_t *
arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin) arena_bin_nonfull_run_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
{ {
arena_run_t *run; arena_run_t *run;
szind_t binind; szind_t binind;
...@@ -1902,19 +2317,19 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin) ...@@ -1902,19 +2317,19 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
bin_info = &arena_bin_info[binind]; bin_info = &arena_bin_info[binind];
/* Allocate a new run. */ /* Allocate a new run. */
malloc_mutex_unlock(&bin->lock); malloc_mutex_unlock(tsdn, &bin->lock);
/******************************/ /******************************/
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(tsdn, &arena->lock);
run = arena_run_alloc_small(arena, bin_info->run_size, binind); run = arena_run_alloc_small(tsdn, arena, bin_info->run_size, binind);
if (run != NULL) { if (run != NULL) {
/* Initialize run internals. */ /* Initialize run internals. */
run->binind = binind; run->binind = binind;
run->nfree = bin_info->nregs; run->nfree = bin_info->nregs;
bitmap_init(run->bitmap, &bin_info->bitmap_info); bitmap_init(run->bitmap, &bin_info->bitmap_info);
} }
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(tsdn, &arena->lock);
/********************************/ /********************************/
malloc_mutex_lock(&bin->lock); malloc_mutex_lock(tsdn, &bin->lock);
if (run != NULL) { if (run != NULL) {
if (config_stats) { if (config_stats) {
bin->stats.nruns++; bin->stats.nruns++;
...@@ -1937,7 +2352,7 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin) ...@@ -1937,7 +2352,7 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */ /* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
static void * static void *
arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin) arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
{ {
szind_t binind; szind_t binind;
arena_bin_info_t *bin_info; arena_bin_info_t *bin_info;
...@@ -1946,7 +2361,7 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin) ...@@ -1946,7 +2361,7 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
binind = arena_bin_index(arena, bin); binind = arena_bin_index(arena, bin);
bin_info = &arena_bin_info[binind]; bin_info = &arena_bin_info[binind];
bin->runcur = NULL; bin->runcur = NULL;
run = arena_bin_nonfull_run_get(arena, bin); run = arena_bin_nonfull_run_get(tsdn, arena, bin);
if (bin->runcur != NULL && bin->runcur->nfree > 0) { if (bin->runcur != NULL && bin->runcur->nfree > 0) {
/* /*
* Another thread updated runcur while this one ran without the * Another thread updated runcur while this one ran without the
...@@ -1967,10 +2382,11 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin) ...@@ -1967,10 +2382,11 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
* were just deallocated from the run. * were just deallocated from the run.
*/ */
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
if (run->nfree == bin_info->nregs) if (run->nfree == bin_info->nregs) {
arena_dalloc_bin_run(arena, chunk, run, bin); arena_dalloc_bin_run(tsdn, arena, chunk, run,
else bin);
arena_bin_lower_run(arena, chunk, run, bin); } else
arena_bin_lower_run(arena, run, bin);
} }
return (ret); return (ret);
} }
...@@ -1986,18 +2402,18 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin) ...@@ -1986,18 +2402,18 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
} }
void void
arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind, arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin,
uint64_t prof_accumbytes) szind_t binind, uint64_t prof_accumbytes)
{ {
unsigned i, nfill; unsigned i, nfill;
arena_bin_t *bin; arena_bin_t *bin;
assert(tbin->ncached == 0); assert(tbin->ncached == 0);
if (config_prof && arena_prof_accum(arena, prof_accumbytes)) if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes))
prof_idump(); prof_idump(tsdn);
bin = &arena->bins[binind]; bin = &arena->bins[binind];
malloc_mutex_lock(&bin->lock); malloc_mutex_lock(tsdn, &bin->lock);
for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
tbin->lg_fill_div); i < nfill; i++) { tbin->lg_fill_div); i < nfill; i++) {
arena_run_t *run; arena_run_t *run;
...@@ -2005,16 +2421,15 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind, ...@@ -2005,16 +2421,15 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind,
if ((run = bin->runcur) != NULL && run->nfree > 0) if ((run = bin->runcur) != NULL && run->nfree > 0)
ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]); ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
else else
ptr = arena_bin_malloc_hard(arena, bin); ptr = arena_bin_malloc_hard(tsdn, arena, bin);
if (ptr == NULL) { if (ptr == NULL) {
/* /*
* OOM. tbin->avail isn't yet filled down to its first * OOM. tbin->avail isn't yet filled down to its first
* element, so the successful allocations (if any) must * element, so the successful allocations (if any) must
* be moved to the base of tbin->avail before bailing * be moved just before tbin->avail before bailing out.
* out.
*/ */
if (i > 0) { if (i > 0) {
memmove(tbin->avail, &tbin->avail[nfill - i], memmove(tbin->avail - i, tbin->avail - nfill,
i * sizeof(void *)); i * sizeof(void *));
} }
break; break;
...@@ -2024,7 +2439,7 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind, ...@@ -2024,7 +2439,7 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind,
true); true);
} }
/* Insert such that low regions get used first. */ /* Insert such that low regions get used first. */
tbin->avail[nfill - 1 - i] = ptr; *(tbin->avail - nfill + i) = ptr;
} }
if (config_stats) { if (config_stats) {
bin->stats.nmalloc += i; bin->stats.nmalloc += i;
...@@ -2033,29 +2448,31 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind, ...@@ -2033,29 +2448,31 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind,
bin->stats.nfills++; bin->stats.nfills++;
tbin->tstats.nrequests = 0; tbin->tstats.nrequests = 0;
} }
malloc_mutex_unlock(&bin->lock); malloc_mutex_unlock(tsdn, &bin->lock);
tbin->ncached = i; tbin->ncached = i;
arena_decay_tick(tsdn, arena);
} }
void void
arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero) arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
{ {
size_t redzone_size = bin_info->redzone_size;
if (zero) { if (zero) {
size_t redzone_size = bin_info->redzone_size; memset((void *)((uintptr_t)ptr - redzone_size),
memset((void *)((uintptr_t)ptr - redzone_size), 0xa5, JEMALLOC_ALLOC_JUNK, redzone_size);
redzone_size); memset((void *)((uintptr_t)ptr + bin_info->reg_size),
memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5, JEMALLOC_ALLOC_JUNK, redzone_size);
redzone_size);
} else { } else {
memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5, memset((void *)((uintptr_t)ptr - redzone_size),
bin_info->reg_interval); JEMALLOC_ALLOC_JUNK, bin_info->reg_interval);
} }
} }
#ifdef JEMALLOC_JET #ifdef JEMALLOC_JET
#undef arena_redzone_corruption #undef arena_redzone_corruption
#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl) #define arena_redzone_corruption JEMALLOC_N(n_arena_redzone_corruption)
#endif #endif
static void static void
arena_redzone_corruption(void *ptr, size_t usize, bool after, arena_redzone_corruption(void *ptr, size_t usize, bool after,
...@@ -2070,7 +2487,7 @@ arena_redzone_corruption(void *ptr, size_t usize, bool after, ...@@ -2070,7 +2487,7 @@ arena_redzone_corruption(void *ptr, size_t usize, bool after,
#undef arena_redzone_corruption #undef arena_redzone_corruption
#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption) #define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
arena_redzone_corruption_t *arena_redzone_corruption = arena_redzone_corruption_t *arena_redzone_corruption =
JEMALLOC_N(arena_redzone_corruption_impl); JEMALLOC_N(n_arena_redzone_corruption);
#endif #endif
static void static void
...@@ -2085,22 +2502,22 @@ arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset) ...@@ -2085,22 +2502,22 @@ arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
for (i = 1; i <= redzone_size; i++) { for (i = 1; i <= redzone_size; i++) {
uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i); uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
if (*byte != 0xa5) { if (*byte != JEMALLOC_ALLOC_JUNK) {
error = true; error = true;
arena_redzone_corruption(ptr, size, false, i, arena_redzone_corruption(ptr, size, false, i,
*byte); *byte);
if (reset) if (reset)
*byte = 0xa5; *byte = JEMALLOC_ALLOC_JUNK;
} }
} }
for (i = 0; i < redzone_size; i++) { for (i = 0; i < redzone_size; i++) {
uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i); uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
if (*byte != 0xa5) { if (*byte != JEMALLOC_ALLOC_JUNK) {
error = true; error = true;
arena_redzone_corruption(ptr, size, true, i, arena_redzone_corruption(ptr, size, true, i,
*byte); *byte);
if (reset) if (reset)
*byte = 0xa5; *byte = JEMALLOC_ALLOC_JUNK;
} }
} }
} }
...@@ -2111,7 +2528,7 @@ arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset) ...@@ -2111,7 +2528,7 @@ arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
#ifdef JEMALLOC_JET #ifdef JEMALLOC_JET
#undef arena_dalloc_junk_small #undef arena_dalloc_junk_small
#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl) #define arena_dalloc_junk_small JEMALLOC_N(n_arena_dalloc_junk_small)
#endif #endif
void void
arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info) arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
...@@ -2119,14 +2536,14 @@ arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info) ...@@ -2119,14 +2536,14 @@ arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
size_t redzone_size = bin_info->redzone_size; size_t redzone_size = bin_info->redzone_size;
arena_redzones_validate(ptr, bin_info, false); arena_redzones_validate(ptr, bin_info, false);
memset((void *)((uintptr_t)ptr - redzone_size), 0x5a, memset((void *)((uintptr_t)ptr - redzone_size), JEMALLOC_FREE_JUNK,
bin_info->reg_interval); bin_info->reg_interval);
} }
#ifdef JEMALLOC_JET #ifdef JEMALLOC_JET
#undef arena_dalloc_junk_small #undef arena_dalloc_junk_small
#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) #define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
arena_dalloc_junk_small_t *arena_dalloc_junk_small = arena_dalloc_junk_small_t *arena_dalloc_junk_small =
JEMALLOC_N(arena_dalloc_junk_small_impl); JEMALLOC_N(n_arena_dalloc_junk_small);
#endif #endif
void void
...@@ -2144,27 +2561,26 @@ arena_quarantine_junk_small(void *ptr, size_t usize) ...@@ -2144,27 +2561,26 @@ arena_quarantine_junk_small(void *ptr, size_t usize)
arena_redzones_validate(ptr, bin_info, true); arena_redzones_validate(ptr, bin_info, true);
} }
void * static void *
arena_malloc_small(arena_t *arena, size_t size, bool zero) arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
{ {
void *ret; void *ret;
arena_bin_t *bin; arena_bin_t *bin;
size_t usize;
arena_run_t *run; arena_run_t *run;
szind_t binind;
binind = size2index(size);
assert(binind < NBINS); assert(binind < NBINS);
bin = &arena->bins[binind]; bin = &arena->bins[binind];
size = index2size(binind); usize = index2size(binind);
malloc_mutex_lock(&bin->lock); malloc_mutex_lock(tsdn, &bin->lock);
if ((run = bin->runcur) != NULL && run->nfree > 0) if ((run = bin->runcur) != NULL && run->nfree > 0)
ret = arena_run_reg_alloc(run, &arena_bin_info[binind]); ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
else else
ret = arena_bin_malloc_hard(arena, bin); ret = arena_bin_malloc_hard(tsdn, arena, bin);
if (ret == NULL) { if (ret == NULL) {
malloc_mutex_unlock(&bin->lock); malloc_mutex_unlock(tsdn, &bin->lock);
return (NULL); return (NULL);
} }
...@@ -2173,9 +2589,9 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero) ...@@ -2173,9 +2589,9 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
bin->stats.nrequests++; bin->stats.nrequests++;
bin->stats.curregs++; bin->stats.curregs++;
} }
malloc_mutex_unlock(&bin->lock); malloc_mutex_unlock(tsdn, &bin->lock);
if (config_prof && !isthreaded && arena_prof_accum(arena, size)) if (config_prof && !isthreaded && arena_prof_accum(tsdn, arena, usize))
prof_idump(); prof_idump(tsdn);
if (!zero) { if (!zero) {
if (config_fill) { if (config_fill) {
...@@ -2183,34 +2599,35 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero) ...@@ -2183,34 +2599,35 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
arena_alloc_junk_small(ret, arena_alloc_junk_small(ret,
&arena_bin_info[binind], false); &arena_bin_info[binind], false);
} else if (unlikely(opt_zero)) } else if (unlikely(opt_zero))
memset(ret, 0, size); memset(ret, 0, usize);
} }
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
} else { } else {
if (config_fill && unlikely(opt_junk_alloc)) { if (config_fill && unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret, &arena_bin_info[binind], arena_alloc_junk_small(ret, &arena_bin_info[binind],
true); true);
} }
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
memset(ret, 0, size); memset(ret, 0, usize);
} }
arena_decay_tick(tsdn, arena);
return (ret); return (ret);
} }
void * void *
arena_malloc_large(arena_t *arena, size_t size, bool zero) arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
{ {
void *ret; void *ret;
size_t usize; size_t usize;
uintptr_t random_offset; uintptr_t random_offset;
arena_run_t *run; arena_run_t *run;
arena_chunk_map_misc_t *miscelm; arena_chunk_map_misc_t *miscelm;
UNUSED bool idump; UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
/* Large allocation. */ /* Large allocation. */
usize = s2u(size); usize = index2size(binind);
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(tsdn, &arena->lock);
if (config_cache_oblivious) { if (config_cache_oblivious) {
uint64_t r; uint64_t r;
...@@ -2219,22 +2636,21 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero) ...@@ -2219,22 +2636,21 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero)
* that is a multiple of the cacheline size, e.g. [0 .. 63) * 64 * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64
* for 4 KiB pages and 64-byte cachelines. * for 4 KiB pages and 64-byte cachelines.
*/ */
prng64(r, LG_PAGE - LG_CACHELINE, arena->offset_state, r = prng_lg_range_zu(&arena->offset_state, LG_PAGE -
UINT64_C(6364136223846793009), LG_CACHELINE, false);
UINT64_C(1442695040888963409));
random_offset = ((uintptr_t)r) << LG_CACHELINE; random_offset = ((uintptr_t)r) << LG_CACHELINE;
} else } else
random_offset = 0; random_offset = 0;
run = arena_run_alloc_large(arena, usize + large_pad, zero); run = arena_run_alloc_large(tsdn, arena, usize + large_pad, zero);
if (run == NULL) { if (run == NULL) {
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(tsdn, &arena->lock);
return (NULL); return (NULL);
} }
miscelm = arena_run_to_miscelm(run); miscelm = arena_run_to_miscelm(run);
ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) + ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) +
random_offset); random_offset);
if (config_stats) { if (config_stats) {
szind_t index = size2index(usize) - NBINS; szind_t index = binind - NBINS;
arena->stats.nmalloc_large++; arena->stats.nmalloc_large++;
arena->stats.nrequests_large++; arena->stats.nrequests_large++;
...@@ -2245,25 +2661,45 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero) ...@@ -2245,25 +2661,45 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero)
} }
if (config_prof) if (config_prof)
idump = arena_prof_accum_locked(arena, usize); idump = arena_prof_accum_locked(arena, usize);
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(tsdn, &arena->lock);
if (config_prof && idump) if (config_prof && idump)
prof_idump(); prof_idump(tsdn);
if (!zero) { if (!zero) {
if (config_fill) { if (config_fill) {
if (unlikely(opt_junk_alloc)) if (unlikely(opt_junk_alloc))
memset(ret, 0xa5, usize); memset(ret, JEMALLOC_ALLOC_JUNK, usize);
else if (unlikely(opt_zero)) else if (unlikely(opt_zero))
memset(ret, 0, usize); memset(ret, 0, usize);
} }
} }
arena_decay_tick(tsdn, arena);
return (ret); return (ret);
} }
void *
arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
bool zero)
{
assert(!tsdn_null(tsdn) || arena != NULL);
if (likely(!tsdn_null(tsdn)))
arena = arena_choose(tsdn_tsd(tsdn), arena);
if (unlikely(arena == NULL))
return (NULL);
if (likely(size <= SMALL_MAXCLASS))
return (arena_malloc_small(tsdn, arena, ind, zero));
if (likely(size <= large_maxclass))
return (arena_malloc_large(tsdn, arena, ind, zero));
return (huge_malloc(tsdn, arena, index2size(ind), zero));
}
/* Only handles large allocations that require more than page alignment. */ /* Only handles large allocations that require more than page alignment. */
static void * static void *
arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
bool zero) bool zero)
{ {
void *ret; void *ret;
...@@ -2273,19 +2709,21 @@ arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, ...@@ -2273,19 +2709,21 @@ arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
arena_chunk_map_misc_t *miscelm; arena_chunk_map_misc_t *miscelm;
void *rpages; void *rpages;
assert(!tsdn_null(tsdn) || arena != NULL);
assert(usize == PAGE_CEILING(usize)); assert(usize == PAGE_CEILING(usize));
arena = arena_choose(tsd, arena); if (likely(!tsdn_null(tsdn)))
arena = arena_choose(tsdn_tsd(tsdn), arena);
if (unlikely(arena == NULL)) if (unlikely(arena == NULL))
return (NULL); return (NULL);
alignment = PAGE_CEILING(alignment); alignment = PAGE_CEILING(alignment);
alloc_size = usize + large_pad + alignment - PAGE; alloc_size = usize + large_pad + alignment - PAGE;
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(tsdn, &arena->lock);
run = arena_run_alloc_large(arena, alloc_size, false); run = arena_run_alloc_large(tsdn, arena, alloc_size, false);
if (run == NULL) { if (run == NULL) {
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(tsdn, &arena->lock);
return (NULL); return (NULL);
} }
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
...@@ -2300,16 +2738,16 @@ arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, ...@@ -2300,16 +2738,16 @@ arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
arena_chunk_map_misc_t *head_miscelm = miscelm; arena_chunk_map_misc_t *head_miscelm = miscelm;
arena_run_t *head_run = run; arena_run_t *head_run = run;
miscelm = arena_miscelm_get(chunk, miscelm = arena_miscelm_get_mutable(chunk,
arena_miscelm_to_pageind(head_miscelm) + (leadsize >> arena_miscelm_to_pageind(head_miscelm) + (leadsize >>
LG_PAGE)); LG_PAGE));
run = &miscelm->run; run = &miscelm->run;
arena_run_trim_head(arena, chunk, head_run, alloc_size, arena_run_trim_head(tsdn, arena, chunk, head_run, alloc_size,
alloc_size - leadsize); alloc_size - leadsize);
} }
if (trailsize != 0) { if (trailsize != 0) {
arena_run_trim_tail(arena, chunk, run, usize + large_pad + arena_run_trim_tail(tsdn, arena, chunk, run, usize + large_pad +
trailsize, usize + large_pad, false); trailsize, usize + large_pad, false);
} }
if (arena_run_init_large(arena, run, usize + large_pad, zero)) { if (arena_run_init_large(arena, run, usize + large_pad, zero)) {
...@@ -2320,8 +2758,8 @@ arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, ...@@ -2320,8 +2758,8 @@ arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
run_ind) != 0); run_ind) != 0);
assert(decommitted); /* Cause of OOM. */ assert(decommitted); /* Cause of OOM. */
arena_run_dalloc(arena, run, dirty, false, decommitted); arena_run_dalloc(tsdn, arena, run, dirty, false, decommitted);
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(tsdn, &arena->lock);
return (NULL); return (NULL);
} }
ret = arena_miscelm_to_rpages(miscelm); ret = arena_miscelm_to_rpages(miscelm);
...@@ -2336,19 +2774,20 @@ arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, ...@@ -2336,19 +2774,20 @@ arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
arena->stats.lstats[index].nrequests++; arena->stats.lstats[index].nrequests++;
arena->stats.lstats[index].curruns++; arena->stats.lstats[index].curruns++;
} }
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(tsdn, &arena->lock);
if (config_fill && !zero) { if (config_fill && !zero) {
if (unlikely(opt_junk_alloc)) if (unlikely(opt_junk_alloc))
memset(ret, 0xa5, usize); memset(ret, JEMALLOC_ALLOC_JUNK, usize);
else if (unlikely(opt_zero)) else if (unlikely(opt_zero))
memset(ret, 0, usize); memset(ret, 0, usize);
} }
arena_decay_tick(tsdn, arena);
return (ret); return (ret);
} }
void * void *
arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
bool zero, tcache_t *tcache) bool zero, tcache_t *tcache)
{ {
void *ret; void *ret;
...@@ -2356,7 +2795,8 @@ arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, ...@@ -2356,7 +2795,8 @@ arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
&& (usize & PAGE_MASK) == 0))) { && (usize & PAGE_MASK) == 0))) {
/* Small; alignment doesn't require special run placement. */ /* Small; alignment doesn't require special run placement. */
ret = arena_malloc(tsd, arena, usize, zero, tcache); ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
tcache, true);
} else if (usize <= large_maxclass && alignment <= PAGE) { } else if (usize <= large_maxclass && alignment <= PAGE) {
/* /*
* Large; alignment doesn't require special run placement. * Large; alignment doesn't require special run placement.
...@@ -2364,25 +2804,25 @@ arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, ...@@ -2364,25 +2804,25 @@ arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
* the base of the run, so do some bit manipulation to retrieve * the base of the run, so do some bit manipulation to retrieve
* the base. * the base.
*/ */
ret = arena_malloc(tsd, arena, usize, zero, tcache); ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
tcache, true);
if (config_cache_oblivious) if (config_cache_oblivious)
ret = (void *)((uintptr_t)ret & ~PAGE_MASK); ret = (void *)((uintptr_t)ret & ~PAGE_MASK);
} else { } else {
if (likely(usize <= large_maxclass)) { if (likely(usize <= large_maxclass)) {
ret = arena_palloc_large(tsd, arena, usize, alignment, ret = arena_palloc_large(tsdn, arena, usize, alignment,
zero); zero);
} else if (likely(alignment <= chunksize)) } else if (likely(alignment <= chunksize))
ret = huge_malloc(tsd, arena, usize, zero, tcache); ret = huge_malloc(tsdn, arena, usize, zero);
else { else {
ret = huge_palloc(tsd, arena, usize, alignment, zero, ret = huge_palloc(tsdn, arena, usize, alignment, zero);
tcache);
} }
} }
return (ret); return (ret);
} }
void void
arena_prof_promoted(const void *ptr, size_t size) arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size)
{ {
arena_chunk_t *chunk; arena_chunk_t *chunk;
size_t pageind; size_t pageind;
...@@ -2391,8 +2831,8 @@ arena_prof_promoted(const void *ptr, size_t size) ...@@ -2391,8 +2831,8 @@ arena_prof_promoted(const void *ptr, size_t size)
cassert(config_prof); cassert(config_prof);
assert(ptr != NULL); assert(ptr != NULL);
assert(CHUNK_ADDR2BASE(ptr) != ptr); assert(CHUNK_ADDR2BASE(ptr) != ptr);
assert(isalloc(ptr, false) == LARGE_MINCLASS); assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS);
assert(isalloc(ptr, true) == LARGE_MINCLASS); assert(isalloc(tsdn, ptr, true) == LARGE_MINCLASS);
assert(size <= SMALL_MAXCLASS); assert(size <= SMALL_MAXCLASS);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
...@@ -2401,8 +2841,8 @@ arena_prof_promoted(const void *ptr, size_t size) ...@@ -2401,8 +2841,8 @@ arena_prof_promoted(const void *ptr, size_t size)
assert(binind < NBINS); assert(binind < NBINS);
arena_mapbits_large_binind_set(chunk, pageind, binind); arena_mapbits_large_binind_set(chunk, pageind, binind);
assert(isalloc(ptr, false) == LARGE_MINCLASS); assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS);
assert(isalloc(ptr, true) == size); assert(isalloc(tsdn, ptr, true) == size);
} }
static void static void
...@@ -2418,48 +2858,51 @@ arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run, ...@@ -2418,48 +2858,51 @@ arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
&chunk->node), bin); &chunk->node), bin);
arena_bin_info_t *bin_info = &arena_bin_info[binind]; arena_bin_info_t *bin_info = &arena_bin_info[binind];
/*
* The following block's conditional is necessary because if the
* run only contains one region, then it never gets inserted
* into the non-full runs tree.
*/
if (bin_info->nregs != 1) { if (bin_info->nregs != 1) {
/* arena_chunk_map_misc_t *miscelm =
* This block's conditional is necessary because if the arena_run_to_miscelm(run);
* run only contains one region, then it never gets
* inserted into the non-full runs tree. arena_run_heap_remove(&bin->runs, miscelm);
*/
arena_bin_runs_remove(bin, run);
} }
} }
} }
static void static void
arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
arena_bin_t *bin) arena_run_t *run, arena_bin_t *bin)
{ {
assert(run != bin->runcur); assert(run != bin->runcur);
assert(arena_run_tree_search(&bin->runs, arena_run_to_miscelm(run)) ==
NULL);
malloc_mutex_unlock(&bin->lock); malloc_mutex_unlock(tsdn, &bin->lock);
/******************************/ /******************************/
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(tsdn, &arena->lock);
arena_run_dalloc_decommit(arena, chunk, run); arena_run_dalloc(tsdn, arena, run, true, false, false);
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(tsdn, &arena->lock);
/****************************/ /****************************/
malloc_mutex_lock(&bin->lock); malloc_mutex_lock(tsdn, &bin->lock);
if (config_stats) if (config_stats)
bin->stats.curruns--; bin->stats.curruns--;
} }
static void static void
arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, arena_bin_lower_run(arena_t *arena, arena_run_t *run, arena_bin_t *bin)
arena_bin_t *bin)
{ {
/* /*
* Make sure that if bin->runcur is non-NULL, it refers to the lowest * Make sure that if bin->runcur is non-NULL, it refers to the
* non-full run. It is okay to NULL runcur out rather than proactively * oldest/lowest non-full run. It is okay to NULL runcur out rather
* keeping it pointing at the lowest non-full run. * than proactively keeping it pointing at the oldest/lowest non-full
* run.
*/ */
if ((uintptr_t)run < (uintptr_t)bin->runcur) { if (bin->runcur != NULL &&
arena_snad_comp(arena_run_to_miscelm(bin->runcur),
arena_run_to_miscelm(run)) > 0) {
/* Switch runcur. */ /* Switch runcur. */
if (bin->runcur->nfree > 0) if (bin->runcur->nfree > 0)
arena_bin_runs_insert(bin, bin->runcur); arena_bin_runs_insert(bin, bin->runcur);
...@@ -2471,8 +2914,8 @@ arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, ...@@ -2471,8 +2914,8 @@ arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
} }
static void static void
arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr, arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
arena_chunk_map_bits_t *bitselm, bool junked) void *ptr, arena_chunk_map_bits_t *bitselm, bool junked)
{ {
size_t pageind, rpages_ind; size_t pageind, rpages_ind;
arena_run_t *run; arena_run_t *run;
...@@ -2482,7 +2925,7 @@ arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr, ...@@ -2482,7 +2925,7 @@ arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr,
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
run = &arena_miscelm_get(chunk, rpages_ind)->run; run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
binind = run->binind; binind = run->binind;
bin = &arena->bins[binind]; bin = &arena->bins[binind];
bin_info = &arena_bin_info[binind]; bin_info = &arena_bin_info[binind];
...@@ -2493,9 +2936,9 @@ arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr, ...@@ -2493,9 +2936,9 @@ arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena_run_reg_dalloc(run, ptr); arena_run_reg_dalloc(run, ptr);
if (run->nfree == bin_info->nregs) { if (run->nfree == bin_info->nregs) {
arena_dissociate_bin_run(chunk, run, bin); arena_dissociate_bin_run(chunk, run, bin);
arena_dalloc_bin_run(arena, chunk, run, bin); arena_dalloc_bin_run(tsdn, arena, chunk, run, bin);
} else if (run->nfree == 1 && run != bin->runcur) } else if (run->nfree == 1 && run != bin->runcur)
arena_bin_lower_run(arena, chunk, run, bin); arena_bin_lower_run(arena, run, bin);
if (config_stats) { if (config_stats) {
bin->stats.ndalloc++; bin->stats.ndalloc++;
...@@ -2504,15 +2947,15 @@ arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr, ...@@ -2504,15 +2947,15 @@ arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr,
} }
void void
arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr, arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
arena_chunk_map_bits_t *bitselm) arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm)
{ {
arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, true); arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, true);
} }
void void
arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t pageind, arena_chunk_map_bits_t *bitselm) size_t pageind, arena_chunk_map_bits_t *bitselm)
{ {
arena_run_t *run; arena_run_t *run;
...@@ -2520,16 +2963,16 @@ arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, ...@@ -2520,16 +2963,16 @@ arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t rpages_ind; size_t rpages_ind;
rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
run = &arena_miscelm_get(chunk, rpages_ind)->run; run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
bin = &arena->bins[run->binind]; bin = &arena->bins[run->binind];
malloc_mutex_lock(&bin->lock); malloc_mutex_lock(tsdn, &bin->lock);
arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, false); arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, false);
malloc_mutex_unlock(&bin->lock); malloc_mutex_unlock(tsdn, &bin->lock);
} }
void void
arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
size_t pageind) void *ptr, size_t pageind)
{ {
arena_chunk_map_bits_t *bitselm; arena_chunk_map_bits_t *bitselm;
...@@ -2538,34 +2981,36 @@ arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, ...@@ -2538,34 +2981,36 @@ arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
pageind)) != BININD_INVALID); pageind)) != BININD_INVALID);
} }
bitselm = arena_bitselm_get(chunk, pageind); bitselm = arena_bitselm_get_mutable(chunk, pageind);
arena_dalloc_bin(arena, chunk, ptr, pageind, bitselm); arena_dalloc_bin(tsdn, arena, chunk, ptr, pageind, bitselm);
arena_decay_tick(tsdn, arena);
} }
#ifdef JEMALLOC_JET #ifdef JEMALLOC_JET
#undef arena_dalloc_junk_large #undef arena_dalloc_junk_large
#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl) #define arena_dalloc_junk_large JEMALLOC_N(n_arena_dalloc_junk_large)
#endif #endif
void void
arena_dalloc_junk_large(void *ptr, size_t usize) arena_dalloc_junk_large(void *ptr, size_t usize)
{ {
if (config_fill && unlikely(opt_junk_free)) if (config_fill && unlikely(opt_junk_free))
memset(ptr, 0x5a, usize); memset(ptr, JEMALLOC_FREE_JUNK, usize);
} }
#ifdef JEMALLOC_JET #ifdef JEMALLOC_JET
#undef arena_dalloc_junk_large #undef arena_dalloc_junk_large
#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large) #define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
arena_dalloc_junk_large_t *arena_dalloc_junk_large = arena_dalloc_junk_large_t *arena_dalloc_junk_large =
JEMALLOC_N(arena_dalloc_junk_large_impl); JEMALLOC_N(n_arena_dalloc_junk_large);
#endif #endif
static void static void
arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk, arena_dalloc_large_locked_impl(tsdn_t *tsdn, arena_t *arena,
void *ptr, bool junked) arena_chunk_t *chunk, void *ptr, bool junked)
{ {
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
pageind);
arena_run_t *run = &miscelm->run; arena_run_t *run = &miscelm->run;
if (config_fill || config_stats) { if (config_fill || config_stats) {
...@@ -2584,32 +3029,35 @@ arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk, ...@@ -2584,32 +3029,35 @@ arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk,
} }
} }
arena_run_dalloc_decommit(arena, chunk, run); arena_run_dalloc(tsdn, arena, run, true, false, false);
} }
void void
arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk, arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena,
void *ptr) arena_chunk_t *chunk, void *ptr)
{ {
arena_dalloc_large_locked_impl(arena, chunk, ptr, true); arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, true);
} }
void void
arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr) arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
void *ptr)
{ {
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(tsdn, &arena->lock);
arena_dalloc_large_locked_impl(arena, chunk, ptr, false); arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, false);
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(tsdn, &arena->lock);
arena_decay_tick(tsdn, arena);
} }
static void static void
arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr, arena_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
size_t oldsize, size_t size) void *ptr, size_t oldsize, size_t size)
{ {
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
pageind);
arena_run_t *run = &miscelm->run; arena_run_t *run = &miscelm->run;
assert(size < oldsize); assert(size < oldsize);
...@@ -2618,8 +3066,8 @@ arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr, ...@@ -2618,8 +3066,8 @@ arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
* Shrink the run, and make trailing pages available for other * Shrink the run, and make trailing pages available for other
* allocations. * allocations.
*/ */
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(tsdn, &arena->lock);
arena_run_trim_tail(arena, chunk, run, oldsize + large_pad, size + arena_run_trim_tail(tsdn, arena, chunk, run, oldsize + large_pad, size +
large_pad, true); large_pad, true);
if (config_stats) { if (config_stats) {
szind_t oldindex = size2index(oldsize) - NBINS; szind_t oldindex = size2index(oldsize) - NBINS;
...@@ -2637,12 +3085,12 @@ arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr, ...@@ -2637,12 +3085,12 @@ arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena->stats.lstats[index].nrequests++; arena->stats.lstats[index].nrequests++;
arena->stats.lstats[index].curruns++; arena->stats.lstats[index].curruns++;
} }
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(tsdn, &arena->lock);
} }
static bool static bool
arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
size_t oldsize, size_t usize_min, size_t usize_max, bool zero) void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero)
{ {
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
size_t npages = (oldsize + large_pad) >> LG_PAGE; size_t npages = (oldsize + large_pad) >> LG_PAGE;
...@@ -2652,7 +3100,7 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, ...@@ -2652,7 +3100,7 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
large_pad); large_pad);
/* Try to extend the run. */ /* Try to extend the run. */
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(tsdn, &arena->lock);
if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk, if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk,
pageind+npages) != 0) pageind+npages) != 0)
goto label_fail; goto label_fail;
...@@ -2675,7 +3123,7 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, ...@@ -2675,7 +3123,7 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
if (splitsize == 0) if (splitsize == 0)
goto label_fail; goto label_fail;
run = &arena_miscelm_get(chunk, pageind+npages)->run; run = &arena_miscelm_get_mutable(chunk, pageind+npages)->run;
if (arena_run_split_large(arena, run, splitsize, zero)) if (arena_run_split_large(arena, run, splitsize, zero))
goto label_fail; goto label_fail;
...@@ -2683,10 +3131,16 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, ...@@ -2683,10 +3131,16 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
/* /*
* Zero the trailing bytes of the original allocation's * Zero the trailing bytes of the original allocation's
* last page, since they are in an indeterminate state. * last page, since they are in an indeterminate state.
* There will always be trailing bytes, because ptr's
* offset from the beginning of the run is a multiple of
* CACHELINE in [0 .. PAGE).
*/ */
assert(PAGE_CEILING(oldsize) == oldsize); void *zbase = (void *)((uintptr_t)ptr + oldsize);
memset((void *)((uintptr_t)ptr + oldsize), 0, void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
PAGE_CEILING((uintptr_t)ptr) - (uintptr_t)ptr); PAGE));
size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
assert(nzero > 0);
memset(zbase, 0, nzero);
} }
size = oldsize + splitsize; size = oldsize + splitsize;
...@@ -2726,24 +3180,24 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, ...@@ -2726,24 +3180,24 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena->stats.lstats[index].nrequests++; arena->stats.lstats[index].nrequests++;
arena->stats.lstats[index].curruns++; arena->stats.lstats[index].curruns++;
} }
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(tsdn, &arena->lock);
return (false); return (false);
} }
label_fail: label_fail:
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(tsdn, &arena->lock);
return (true); return (true);
} }
#ifdef JEMALLOC_JET #ifdef JEMALLOC_JET
#undef arena_ralloc_junk_large #undef arena_ralloc_junk_large
#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl) #define arena_ralloc_junk_large JEMALLOC_N(n_arena_ralloc_junk_large)
#endif #endif
static void static void
arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize) arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
{ {
if (config_fill && unlikely(opt_junk_free)) { if (config_fill && unlikely(opt_junk_free)) {
memset((void *)((uintptr_t)ptr + usize), 0x5a, memset((void *)((uintptr_t)ptr + usize), JEMALLOC_FREE_JUNK,
old_usize - usize); old_usize - usize);
} }
} }
...@@ -2751,7 +3205,7 @@ arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize) ...@@ -2751,7 +3205,7 @@ arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
#undef arena_ralloc_junk_large #undef arena_ralloc_junk_large
#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large) #define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
arena_ralloc_junk_large_t *arena_ralloc_junk_large = arena_ralloc_junk_large_t *arena_ralloc_junk_large =
JEMALLOC_N(arena_ralloc_junk_large_impl); JEMALLOC_N(n_arena_ralloc_junk_large);
#endif #endif
/* /*
...@@ -2759,7 +3213,7 @@ arena_ralloc_junk_large_t *arena_ralloc_junk_large = ...@@ -2759,7 +3213,7 @@ arena_ralloc_junk_large_t *arena_ralloc_junk_large =
* always fail if growing an object, and the following run is already in use. * always fail if growing an object, and the following run is already in use.
*/ */
static bool static bool
arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min, arena_ralloc_large(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
size_t usize_max, bool zero) size_t usize_max, bool zero)
{ {
arena_chunk_t *chunk; arena_chunk_t *chunk;
...@@ -2774,15 +3228,16 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min, ...@@ -2774,15 +3228,16 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min,
arena = extent_node_arena_get(&chunk->node); arena = extent_node_arena_get(&chunk->node);
if (oldsize < usize_max) { if (oldsize < usize_max) {
bool ret = arena_ralloc_large_grow(arena, chunk, ptr, oldsize, bool ret = arena_ralloc_large_grow(tsdn, arena, chunk, ptr,
usize_min, usize_max, zero); oldsize, usize_min, usize_max, zero);
if (config_fill && !ret && !zero) { if (config_fill && !ret && !zero) {
if (unlikely(opt_junk_alloc)) { if (unlikely(opt_junk_alloc)) {
memset((void *)((uintptr_t)ptr + oldsize), 0xa5, memset((void *)((uintptr_t)ptr + oldsize),
isalloc(ptr, config_prof) - oldsize); JEMALLOC_ALLOC_JUNK,
isalloc(tsdn, ptr, config_prof) - oldsize);
} else if (unlikely(opt_zero)) { } else if (unlikely(opt_zero)) {
memset((void *)((uintptr_t)ptr + oldsize), 0, memset((void *)((uintptr_t)ptr + oldsize), 0,
isalloc(ptr, config_prof) - oldsize); isalloc(tsdn, ptr, config_prof) - oldsize);
} }
} }
return (ret); return (ret);
...@@ -2791,19 +3246,27 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min, ...@@ -2791,19 +3246,27 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min,
assert(oldsize > usize_max); assert(oldsize > usize_max);
/* Fill before shrinking in order avoid a race. */ /* Fill before shrinking in order avoid a race. */
arena_ralloc_junk_large(ptr, oldsize, usize_max); arena_ralloc_junk_large(ptr, oldsize, usize_max);
arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, usize_max); arena_ralloc_large_shrink(tsdn, arena, chunk, ptr, oldsize, usize_max);
return (false); return (false);
} }
bool bool
arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
bool zero) size_t extra, bool zero)
{ {
size_t usize_min, usize_max; size_t usize_min, usize_max;
/* Calls with non-zero extra had to clamp extra. */
assert(extra == 0 || size + extra <= HUGE_MAXCLASS);
if (unlikely(size > HUGE_MAXCLASS))
return (true);
usize_min = s2u(size); usize_min = s2u(size);
usize_max = s2u(size + extra); usize_max = s2u(size + extra);
if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) { if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) {
arena_chunk_t *chunk;
/* /*
* Avoid moving the allocation if the size class can be left the * Avoid moving the allocation if the size class can be left the
* same. * same.
...@@ -2811,37 +3274,39 @@ arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, ...@@ -2811,37 +3274,39 @@ arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
if (oldsize <= SMALL_MAXCLASS) { if (oldsize <= SMALL_MAXCLASS) {
assert(arena_bin_info[size2index(oldsize)].reg_size == assert(arena_bin_info[size2index(oldsize)].reg_size ==
oldsize); oldsize);
if ((usize_max <= SMALL_MAXCLASS && if ((usize_max > SMALL_MAXCLASS ||
size2index(usize_max) == size2index(oldsize)) || size2index(usize_max) != size2index(oldsize)) &&
(size <= oldsize && usize_max >= oldsize)) (size > oldsize || usize_max < oldsize))
return (false); return (true);
} else { } else {
if (usize_max > SMALL_MAXCLASS) { if (usize_max <= SMALL_MAXCLASS)
if (!arena_ralloc_large(ptr, oldsize, usize_min, return (true);
usize_max, zero)) if (arena_ralloc_large(tsdn, ptr, oldsize, usize_min,
return (false); usize_max, zero))
} return (true);
} }
/* Reallocation would require a move. */ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
return (true); arena_decay_tick(tsdn, extent_node_arena_get(&chunk->node));
return (false);
} else { } else {
return (huge_ralloc_no_move(ptr, oldsize, usize_min, usize_max, return (huge_ralloc_no_move(tsdn, ptr, oldsize, usize_min,
zero)); usize_max, zero));
} }
} }
static void * static void *
arena_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize, arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool zero, tcache_t *tcache) size_t alignment, bool zero, tcache_t *tcache)
{ {
if (alignment == 0) if (alignment == 0)
return (arena_malloc(tsd, arena, usize, zero, tcache)); return (arena_malloc(tsdn, arena, usize, size2index(usize),
zero, tcache, true));
usize = sa2u(usize, alignment); usize = sa2u(usize, alignment);
if (usize == 0) if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
return (NULL); return (NULL);
return (ipalloct(tsd, usize, alignment, zero, tcache, arena)); return (ipalloct(tsdn, usize, alignment, zero, tcache, arena));
} }
void * void *
...@@ -2852,14 +3317,15 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, ...@@ -2852,14 +3317,15 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t usize; size_t usize;
usize = s2u(size); usize = s2u(size);
if (usize == 0) if (unlikely(usize == 0 || size > HUGE_MAXCLASS))
return (NULL); return (NULL);
if (likely(usize <= large_maxclass)) { if (likely(usize <= large_maxclass)) {
size_t copysize; size_t copysize;
/* Try to avoid moving the allocation. */ /* Try to avoid moving the allocation. */
if (!arena_ralloc_no_move(ptr, oldsize, usize, 0, zero)) if (!arena_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, 0,
zero))
return (ptr); return (ptr);
/* /*
...@@ -2867,8 +3333,8 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, ...@@ -2867,8 +3333,8 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
* the object. In that case, fall back to allocating new space * the object. In that case, fall back to allocating new space
* and copying. * and copying.
*/ */
ret = arena_ralloc_move_helper(tsd, arena, usize, alignment, ret = arena_ralloc_move_helper(tsd_tsdn(tsd), arena, usize,
zero, tcache); alignment, zero, tcache);
if (ret == NULL) if (ret == NULL)
return (NULL); return (NULL);
...@@ -2880,7 +3346,7 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, ...@@ -2880,7 +3346,7 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
copysize = (usize < oldsize) ? usize : oldsize; copysize = (usize < oldsize) ? usize : oldsize;
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
memcpy(ret, ptr, copysize); memcpy(ret, ptr, copysize);
isqalloc(tsd, ptr, oldsize, tcache); isqalloc(tsd, ptr, oldsize, tcache, true);
} else { } else {
ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment, ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment,
zero, tcache); zero, tcache);
...@@ -2889,25 +3355,25 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, ...@@ -2889,25 +3355,25 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
} }
dss_prec_t dss_prec_t
arena_dss_prec_get(arena_t *arena) arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena)
{ {
dss_prec_t ret; dss_prec_t ret;
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(tsdn, &arena->lock);
ret = arena->dss_prec; ret = arena->dss_prec;
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(tsdn, &arena->lock);
return (ret); return (ret);
} }
bool bool
arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec)
{ {
if (!have_dss) if (!have_dss)
return (dss_prec != dss_prec_disabled); return (dss_prec != dss_prec_disabled);
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(tsdn, &arena->lock);
arena->dss_prec = dss_prec; arena->dss_prec = dss_prec;
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(tsdn, &arena->lock);
return (false); return (false);
} }
...@@ -2922,27 +3388,76 @@ bool ...@@ -2922,27 +3388,76 @@ bool
arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult) arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult)
{ {
if (opt_purge != purge_mode_ratio)
return (true);
if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
return (true); return (true);
atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult); atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult);
return (false); return (false);
} }
ssize_t
arena_decay_time_default_get(void)
{
return ((ssize_t)atomic_read_z((size_t *)&decay_time_default));
}
bool
arena_decay_time_default_set(ssize_t decay_time)
{
if (opt_purge != purge_mode_decay)
return (true);
if (!arena_decay_time_valid(decay_time))
return (true);
atomic_write_z((size_t *)&decay_time_default, (size_t)decay_time);
return (false);
}
static void
arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads,
const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
size_t *nactive, size_t *ndirty)
{
*nthreads += arena_nthreads_get(arena, false);
*dss = dss_prec_names[arena->dss_prec];
*lg_dirty_mult = arena->lg_dirty_mult;
*decay_time = arena->decay.time;
*nactive += arena->nactive;
*ndirty += arena->ndirty;
}
void
arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
size_t *nactive, size_t *ndirty)
{
malloc_mutex_lock(tsdn, &arena->lock);
arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
decay_time, nactive, ndirty);
malloc_mutex_unlock(tsdn, &arena->lock);
}
void void
arena_stats_merge(arena_t *arena, const char **dss, ssize_t *lg_dirty_mult, arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
size_t *nactive, size_t *ndirty, arena_stats_t *astats, size_t *nactive, size_t *ndirty, arena_stats_t *astats,
malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats, malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
malloc_huge_stats_t *hstats) malloc_huge_stats_t *hstats)
{ {
unsigned i; unsigned i;
malloc_mutex_lock(&arena->lock); cassert(config_stats);
*dss = dss_prec_names[arena->dss_prec];
*lg_dirty_mult = arena->lg_dirty_mult; malloc_mutex_lock(tsdn, &arena->lock);
*nactive += arena->nactive; arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
*ndirty += arena->ndirty; decay_time, nactive, ndirty);
astats->mapped += arena->stats.mapped; astats->mapped += arena->stats.mapped;
astats->retained += arena->stats.retained;
astats->npurge += arena->stats.npurge; astats->npurge += arena->stats.npurge;
astats->nmadvise += arena->stats.nmadvise; astats->nmadvise += arena->stats.nmadvise;
astats->purged += arena->stats.purged; astats->purged += arena->stats.purged;
...@@ -2968,12 +3483,12 @@ arena_stats_merge(arena_t *arena, const char **dss, ssize_t *lg_dirty_mult, ...@@ -2968,12 +3483,12 @@ arena_stats_merge(arena_t *arena, const char **dss, ssize_t *lg_dirty_mult,
hstats[i].ndalloc += arena->stats.hstats[i].ndalloc; hstats[i].ndalloc += arena->stats.hstats[i].ndalloc;
hstats[i].curhchunks += arena->stats.hstats[i].curhchunks; hstats[i].curhchunks += arena->stats.hstats[i].curhchunks;
} }
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(tsdn, &arena->lock);
for (i = 0; i < NBINS; i++) { for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i]; arena_bin_t *bin = &arena->bins[i];
malloc_mutex_lock(&bin->lock); malloc_mutex_lock(tsdn, &bin->lock);
bstats[i].nmalloc += bin->stats.nmalloc; bstats[i].nmalloc += bin->stats.nmalloc;
bstats[i].ndalloc += bin->stats.ndalloc; bstats[i].ndalloc += bin->stats.ndalloc;
bstats[i].nrequests += bin->stats.nrequests; bstats[i].nrequests += bin->stats.nrequests;
...@@ -2985,33 +3500,61 @@ arena_stats_merge(arena_t *arena, const char **dss, ssize_t *lg_dirty_mult, ...@@ -2985,33 +3500,61 @@ arena_stats_merge(arena_t *arena, const char **dss, ssize_t *lg_dirty_mult,
bstats[i].nruns += bin->stats.nruns; bstats[i].nruns += bin->stats.nruns;
bstats[i].reruns += bin->stats.reruns; bstats[i].reruns += bin->stats.reruns;
bstats[i].curruns += bin->stats.curruns; bstats[i].curruns += bin->stats.curruns;
malloc_mutex_unlock(&bin->lock); malloc_mutex_unlock(tsdn, &bin->lock);
} }
} }
unsigned
arena_nthreads_get(arena_t *arena, bool internal)
{
return (atomic_read_u(&arena->nthreads[internal]));
}
void
arena_nthreads_inc(arena_t *arena, bool internal)
{
atomic_add_u(&arena->nthreads[internal], 1);
}
void
arena_nthreads_dec(arena_t *arena, bool internal)
{
atomic_sub_u(&arena->nthreads[internal], 1);
}
size_t
arena_extent_sn_next(arena_t *arena)
{
return (atomic_add_z(&arena->extent_sn_next, 1) - 1);
}
arena_t * arena_t *
arena_new(unsigned ind) arena_new(tsdn_t *tsdn, unsigned ind)
{ {
arena_t *arena; arena_t *arena;
unsigned i; unsigned i;
arena_bin_t *bin;
/* /*
* Allocate arena, arena->lstats, and arena->hstats contiguously, mainly * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly
* because there is no way to clean up if base_alloc() OOMs. * because there is no way to clean up if base_alloc() OOMs.
*/ */
if (config_stats) { if (config_stats) {
arena = (arena_t *)base_alloc(CACHELINE_CEILING(sizeof(arena_t)) arena = (arena_t *)base_alloc(tsdn,
+ QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t) + CACHELINE_CEILING(sizeof(arena_t)) +
nhclasses) * sizeof(malloc_huge_stats_t)); QUANTUM_CEILING((nlclasses * sizeof(malloc_large_stats_t)))
+ (nhclasses * sizeof(malloc_huge_stats_t)));
} else } else
arena = (arena_t *)base_alloc(sizeof(arena_t)); arena = (arena_t *)base_alloc(tsdn, sizeof(arena_t));
if (arena == NULL) if (arena == NULL)
return (NULL); return (NULL);
arena->ind = ind; arena->ind = ind;
arena->nthreads = 0; arena->nthreads[0] = arena->nthreads[1] = 0;
if (malloc_mutex_init(&arena->lock)) if (malloc_mutex_init(&arena->lock, "arena", WITNESS_RANK_ARENA))
return (NULL); return (NULL);
if (config_stats) { if (config_stats) {
...@@ -3041,11 +3584,15 @@ arena_new(unsigned ind) ...@@ -3041,11 +3584,15 @@ arena_new(unsigned ind)
* deterministic seed. * deterministic seed.
*/ */
arena->offset_state = config_debug ? ind : arena->offset_state = config_debug ? ind :
(uint64_t)(uintptr_t)arena; (size_t)(uintptr_t)arena;
} }
arena->dss_prec = chunk_dss_prec_get(); arena->dss_prec = chunk_dss_prec_get();
ql_new(&arena->achunks);
arena->extent_sn_next = 0;
arena->spare = NULL; arena->spare = NULL;
arena->lg_dirty_mult = arena_lg_dirty_mult_default_get(); arena->lg_dirty_mult = arena_lg_dirty_mult_default_get();
...@@ -3053,33 +3600,42 @@ arena_new(unsigned ind) ...@@ -3053,33 +3600,42 @@ arena_new(unsigned ind)
arena->nactive = 0; arena->nactive = 0;
arena->ndirty = 0; arena->ndirty = 0;
arena_avail_tree_new(&arena->runs_avail); for (i = 0; i < NPSIZES; i++)
arena_run_heap_new(&arena->runs_avail[i]);
qr_new(&arena->runs_dirty, rd_link); qr_new(&arena->runs_dirty, rd_link);
qr_new(&arena->chunks_cache, cc_link); qr_new(&arena->chunks_cache, cc_link);
if (opt_purge == purge_mode_decay)
arena_decay_init(arena, arena_decay_time_default_get());
ql_new(&arena->huge); ql_new(&arena->huge);
if (malloc_mutex_init(&arena->huge_mtx)) if (malloc_mutex_init(&arena->huge_mtx, "arena_huge",
WITNESS_RANK_ARENA_HUGE))
return (NULL); return (NULL);
extent_tree_szad_new(&arena->chunks_szad_cached); extent_tree_szsnad_new(&arena->chunks_szsnad_cached);
extent_tree_ad_new(&arena->chunks_ad_cached); extent_tree_ad_new(&arena->chunks_ad_cached);
extent_tree_szad_new(&arena->chunks_szad_retained); extent_tree_szsnad_new(&arena->chunks_szsnad_retained);
extent_tree_ad_new(&arena->chunks_ad_retained); extent_tree_ad_new(&arena->chunks_ad_retained);
if (malloc_mutex_init(&arena->chunks_mtx)) if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks",
WITNESS_RANK_ARENA_CHUNKS))
return (NULL); return (NULL);
ql_new(&arena->node_cache); ql_new(&arena->node_cache);
if (malloc_mutex_init(&arena->node_cache_mtx)) if (malloc_mutex_init(&arena->node_cache_mtx, "arena_node_cache",
WITNESS_RANK_ARENA_NODE_CACHE))
return (NULL); return (NULL);
arena->chunk_hooks = chunk_hooks_default; arena->chunk_hooks = chunk_hooks_default;
/* Initialize bins. */ /* Initialize bins. */
for (i = 0; i < NBINS; i++) { for (i = 0; i < NBINS; i++) {
bin = &arena->bins[i]; arena_bin_t *bin = &arena->bins[i];
if (malloc_mutex_init(&bin->lock)) if (malloc_mutex_init(&bin->lock, "arena_bin",
WITNESS_RANK_ARENA_BIN))
return (NULL); return (NULL);
bin->runcur = NULL; bin->runcur = NULL;
arena_run_tree_new(&bin->runs); arena_run_heap_new(&bin->runs);
if (config_stats) if (config_stats)
memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
} }
...@@ -3111,8 +3667,7 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info) ...@@ -3111,8 +3667,7 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info)
* be twice as large in order to maintain alignment. * be twice as large in order to maintain alignment.
*/ */
if (config_fill && unlikely(opt_redzone)) { if (config_fill && unlikely(opt_redzone)) {
size_t align_min = ZU(1) << (jemalloc_ffs(bin_info->reg_size) - size_t align_min = ZU(1) << (ffs_zu(bin_info->reg_size) - 1);
1);
if (align_min <= REDZONE_MINSIZE) { if (align_min <= REDZONE_MINSIZE) {
bin_info->redzone_size = REDZONE_MINSIZE; bin_info->redzone_size = REDZONE_MINSIZE;
pad_size = 0; pad_size = 0;
...@@ -3132,18 +3687,19 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info) ...@@ -3132,18 +3687,19 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info)
* size). * size).
*/ */
try_run_size = PAGE; try_run_size = PAGE;
try_nregs = try_run_size / bin_info->reg_size; try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
do { do {
perfect_run_size = try_run_size; perfect_run_size = try_run_size;
perfect_nregs = try_nregs; perfect_nregs = try_nregs;
try_run_size += PAGE; try_run_size += PAGE;
try_nregs = try_run_size / bin_info->reg_size; try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
} while (perfect_run_size != perfect_nregs * bin_info->reg_size); } while (perfect_run_size != perfect_nregs * bin_info->reg_size);
assert(perfect_nregs <= RUN_MAXREGS); assert(perfect_nregs <= RUN_MAXREGS);
actual_run_size = perfect_run_size; actual_run_size = perfect_run_size;
actual_nregs = (actual_run_size - pad_size) / bin_info->reg_interval; actual_nregs = (uint32_t)((actual_run_size - pad_size) /
bin_info->reg_interval);
/* /*
* Redzones can require enough padding that not even a single region can * Redzones can require enough padding that not even a single region can
...@@ -3155,8 +3711,8 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info) ...@@ -3155,8 +3711,8 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info)
assert(config_fill && unlikely(opt_redzone)); assert(config_fill && unlikely(opt_redzone));
actual_run_size += PAGE; actual_run_size += PAGE;
actual_nregs = (actual_run_size - pad_size) / actual_nregs = (uint32_t)((actual_run_size - pad_size) /
bin_info->reg_interval; bin_info->reg_interval);
} }
/* /*
...@@ -3164,8 +3720,8 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info) ...@@ -3164,8 +3720,8 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info)
*/ */
while (actual_run_size > arena_maxrun) { while (actual_run_size > arena_maxrun) {
actual_run_size -= PAGE; actual_run_size -= PAGE;
actual_nregs = (actual_run_size - pad_size) / actual_nregs = (uint32_t)((actual_run_size - pad_size) /
bin_info->reg_interval; bin_info->reg_interval);
} }
assert(actual_nregs > 0); assert(actual_nregs > 0);
assert(actual_run_size == s2u(actual_run_size)); assert(actual_run_size == s2u(actual_run_size));
...@@ -3173,11 +3729,8 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info) ...@@ -3173,11 +3729,8 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info)
/* Copy final settings. */ /* Copy final settings. */
bin_info->run_size = actual_run_size; bin_info->run_size = actual_run_size;
bin_info->nregs = actual_nregs; bin_info->nregs = actual_nregs;
bin_info->reg0_offset = actual_run_size - (actual_nregs * bin_info->reg0_offset = (uint32_t)(actual_run_size - (actual_nregs *
bin_info->reg_interval) - pad_size + bin_info->redzone_size; bin_info->reg_interval) - pad_size + bin_info->redzone_size);
if (actual_run_size > small_maxrun)
small_maxrun = actual_run_size;
assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
* bin_info->reg_interval) + pad_size == bin_info->run_size); * bin_info->reg_interval) + pad_size == bin_info->run_size);
...@@ -3194,7 +3747,7 @@ bin_info_init(void) ...@@ -3194,7 +3747,7 @@ bin_info_init(void)
bin_info_run_size_calc(bin_info); \ bin_info_run_size_calc(bin_info); \
bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs); bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
#define BIN_INFO_INIT_bin_no(index, size) #define BIN_INFO_INIT_bin_no(index, size)
#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ #define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)) BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
SIZE_CLASSES SIZE_CLASSES
#undef BIN_INFO_INIT_bin_yes #undef BIN_INFO_INIT_bin_yes
...@@ -3202,38 +3755,13 @@ bin_info_init(void) ...@@ -3202,38 +3755,13 @@ bin_info_init(void)
#undef SC #undef SC
} }
static bool void
small_run_size_init(void)
{
assert(small_maxrun != 0);
small_run_tab = (bool *)base_alloc(sizeof(bool) * (small_maxrun >>
LG_PAGE));
if (small_run_tab == NULL)
return (true);
#define TAB_INIT_bin_yes(index, size) { \
arena_bin_info_t *bin_info = &arena_bin_info[index]; \
small_run_tab[bin_info->run_size >> LG_PAGE] = true; \
}
#define TAB_INIT_bin_no(index, size)
#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
TAB_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
SIZE_CLASSES
#undef TAB_INIT_bin_yes
#undef TAB_INIT_bin_no
#undef SC
return (false);
}
bool
arena_boot(void) arena_boot(void)
{ {
unsigned i; unsigned i;
arena_lg_dirty_mult_default_set(opt_lg_dirty_mult); arena_lg_dirty_mult_default_set(opt_lg_dirty_mult);
arena_decay_time_default_set(opt_decay_time);
/* /*
* Compute the header size such that it is large enough to contain the * Compute the header size such that it is large enough to contain the
...@@ -3275,44 +3803,61 @@ arena_boot(void) ...@@ -3275,44 +3803,61 @@ arena_boot(void)
nhclasses = NSIZES - nlclasses - NBINS; nhclasses = NSIZES - nlclasses - NBINS;
bin_info_init(); bin_info_init();
return (small_run_size_init());
} }
void void
arena_prefork(arena_t *arena) arena_prefork0(tsdn_t *tsdn, arena_t *arena)
{
malloc_mutex_prefork(tsdn, &arena->lock);
}
void
arena_prefork1(tsdn_t *tsdn, arena_t *arena)
{
malloc_mutex_prefork(tsdn, &arena->chunks_mtx);
}
void
arena_prefork2(tsdn_t *tsdn, arena_t *arena)
{
malloc_mutex_prefork(tsdn, &arena->node_cache_mtx);
}
void
arena_prefork3(tsdn_t *tsdn, arena_t *arena)
{ {
unsigned i; unsigned i;
malloc_mutex_prefork(&arena->lock);
malloc_mutex_prefork(&arena->huge_mtx);
malloc_mutex_prefork(&arena->chunks_mtx);
malloc_mutex_prefork(&arena->node_cache_mtx);
for (i = 0; i < NBINS; i++) for (i = 0; i < NBINS; i++)
malloc_mutex_prefork(&arena->bins[i].lock); malloc_mutex_prefork(tsdn, &arena->bins[i].lock);
malloc_mutex_prefork(tsdn, &arena->huge_mtx);
} }
void void
arena_postfork_parent(arena_t *arena) arena_postfork_parent(tsdn_t *tsdn, arena_t *arena)
{ {
unsigned i; unsigned i;
malloc_mutex_postfork_parent(tsdn, &arena->huge_mtx);
for (i = 0; i < NBINS; i++) for (i = 0; i < NBINS; i++)
malloc_mutex_postfork_parent(&arena->bins[i].lock); malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock);
malloc_mutex_postfork_parent(&arena->node_cache_mtx); malloc_mutex_postfork_parent(tsdn, &arena->node_cache_mtx);
malloc_mutex_postfork_parent(&arena->chunks_mtx); malloc_mutex_postfork_parent(tsdn, &arena->chunks_mtx);
malloc_mutex_postfork_parent(&arena->huge_mtx); malloc_mutex_postfork_parent(tsdn, &arena->lock);
malloc_mutex_postfork_parent(&arena->lock);
} }
void void
arena_postfork_child(arena_t *arena) arena_postfork_child(tsdn_t *tsdn, arena_t *arena)
{ {
unsigned i; unsigned i;
malloc_mutex_postfork_child(tsdn, &arena->huge_mtx);
for (i = 0; i < NBINS; i++) for (i = 0; i < NBINS; i++)
malloc_mutex_postfork_child(&arena->bins[i].lock); malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock);
malloc_mutex_postfork_child(&arena->node_cache_mtx); malloc_mutex_postfork_child(tsdn, &arena->node_cache_mtx);
malloc_mutex_postfork_child(&arena->chunks_mtx); malloc_mutex_postfork_child(tsdn, &arena->chunks_mtx);
malloc_mutex_postfork_child(&arena->huge_mtx); malloc_mutex_postfork_child(tsdn, &arena->lock);
malloc_mutex_postfork_child(&arena->lock);
} }
...@@ -5,7 +5,8 @@ ...@@ -5,7 +5,8 @@
/* Data. */ /* Data. */
static malloc_mutex_t base_mtx; static malloc_mutex_t base_mtx;
static extent_tree_t base_avail_szad; static size_t base_extent_sn_next;
static extent_tree_t base_avail_szsnad;
static extent_node_t *base_nodes; static extent_node_t *base_nodes;
static size_t base_allocated; static size_t base_allocated;
static size_t base_resident; static size_t base_resident;
...@@ -13,12 +14,13 @@ static size_t base_mapped; ...@@ -13,12 +14,13 @@ static size_t base_mapped;
/******************************************************************************/ /******************************************************************************/
/* base_mtx must be held. */
static extent_node_t * static extent_node_t *
base_node_try_alloc(void) base_node_try_alloc(tsdn_t *tsdn)
{ {
extent_node_t *node; extent_node_t *node;
malloc_mutex_assert_owner(tsdn, &base_mtx);
if (base_nodes == NULL) if (base_nodes == NULL)
return (NULL); return (NULL);
node = base_nodes; node = base_nodes;
...@@ -27,33 +29,42 @@ base_node_try_alloc(void) ...@@ -27,33 +29,42 @@ base_node_try_alloc(void)
return (node); return (node);
} }
/* base_mtx must be held. */
static void static void
base_node_dalloc(extent_node_t *node) base_node_dalloc(tsdn_t *tsdn, extent_node_t *node)
{ {
malloc_mutex_assert_owner(tsdn, &base_mtx);
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t)); JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
*(extent_node_t **)node = base_nodes; *(extent_node_t **)node = base_nodes;
base_nodes = node; base_nodes = node;
} }
/* base_mtx must be held. */ static void
base_extent_node_init(extent_node_t *node, void *addr, size_t size)
{
size_t sn = atomic_add_z(&base_extent_sn_next, 1) - 1;
extent_node_init(node, NULL, addr, size, sn, true, true);
}
static extent_node_t * static extent_node_t *
base_chunk_alloc(size_t minsize) base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
{ {
extent_node_t *node; extent_node_t *node;
size_t csize, nsize; size_t csize, nsize;
void *addr; void *addr;
malloc_mutex_assert_owner(tsdn, &base_mtx);
assert(minsize != 0); assert(minsize != 0);
node = base_node_try_alloc(); node = base_node_try_alloc(tsdn);
/* Allocate enough space to also carve a node out if necessary. */ /* Allocate enough space to also carve a node out if necessary. */
nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0; nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0;
csize = CHUNK_CEILING(minsize + nsize); csize = CHUNK_CEILING(minsize + nsize);
addr = chunk_alloc_base(csize); addr = chunk_alloc_base(csize);
if (addr == NULL) { if (addr == NULL) {
if (node != NULL) if (node != NULL)
base_node_dalloc(node); base_node_dalloc(tsdn, node);
return (NULL); return (NULL);
} }
base_mapped += csize; base_mapped += csize;
...@@ -66,7 +77,7 @@ base_chunk_alloc(size_t minsize) ...@@ -66,7 +77,7 @@ base_chunk_alloc(size_t minsize)
base_resident += PAGE_CEILING(nsize); base_resident += PAGE_CEILING(nsize);
} }
} }
extent_node_init(node, NULL, addr, csize, true, true); base_extent_node_init(node, addr, csize);
return (node); return (node);
} }
...@@ -76,7 +87,7 @@ base_chunk_alloc(size_t minsize) ...@@ -76,7 +87,7 @@ base_chunk_alloc(size_t minsize)
* physical memory usage. * physical memory usage.
*/ */
void * void *
base_alloc(size_t size) base_alloc(tsdn_t *tsdn, size_t size)
{ {
void *ret; void *ret;
size_t csize, usize; size_t csize, usize;
...@@ -90,15 +101,15 @@ base_alloc(size_t size) ...@@ -90,15 +101,15 @@ base_alloc(size_t size)
csize = CACHELINE_CEILING(size); csize = CACHELINE_CEILING(size);
usize = s2u(csize); usize = s2u(csize);
extent_node_init(&key, NULL, NULL, usize, false, false); extent_node_init(&key, NULL, NULL, usize, 0, false, false);
malloc_mutex_lock(&base_mtx); malloc_mutex_lock(tsdn, &base_mtx);
node = extent_tree_szad_nsearch(&base_avail_szad, &key); node = extent_tree_szsnad_nsearch(&base_avail_szsnad, &key);
if (node != NULL) { if (node != NULL) {
/* Use existing space. */ /* Use existing space. */
extent_tree_szad_remove(&base_avail_szad, node); extent_tree_szsnad_remove(&base_avail_szsnad, node);
} else { } else {
/* Try to allocate more space. */ /* Try to allocate more space. */
node = base_chunk_alloc(csize); node = base_chunk_alloc(tsdn, csize);
} }
if (node == NULL) { if (node == NULL) {
ret = NULL; ret = NULL;
...@@ -109,9 +120,9 @@ base_alloc(size_t size) ...@@ -109,9 +120,9 @@ base_alloc(size_t size)
if (extent_node_size_get(node) > csize) { if (extent_node_size_get(node) > csize) {
extent_node_addr_set(node, (void *)((uintptr_t)ret + csize)); extent_node_addr_set(node, (void *)((uintptr_t)ret + csize));
extent_node_size_set(node, extent_node_size_get(node) - csize); extent_node_size_set(node, extent_node_size_get(node) - csize);
extent_tree_szad_insert(&base_avail_szad, node); extent_tree_szsnad_insert(&base_avail_szsnad, node);
} else } else
base_node_dalloc(node); base_node_dalloc(tsdn, node);
if (config_stats) { if (config_stats) {
base_allocated += csize; base_allocated += csize;
/* /*
...@@ -123,52 +134,54 @@ base_alloc(size_t size) ...@@ -123,52 +134,54 @@ base_alloc(size_t size)
} }
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize); JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize);
label_return: label_return:
malloc_mutex_unlock(&base_mtx); malloc_mutex_unlock(tsdn, &base_mtx);
return (ret); return (ret);
} }
void void
base_stats_get(size_t *allocated, size_t *resident, size_t *mapped) base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident,
size_t *mapped)
{ {
malloc_mutex_lock(&base_mtx); malloc_mutex_lock(tsdn, &base_mtx);
assert(base_allocated <= base_resident); assert(base_allocated <= base_resident);
assert(base_resident <= base_mapped); assert(base_resident <= base_mapped);
*allocated = base_allocated; *allocated = base_allocated;
*resident = base_resident; *resident = base_resident;
*mapped = base_mapped; *mapped = base_mapped;
malloc_mutex_unlock(&base_mtx); malloc_mutex_unlock(tsdn, &base_mtx);
} }
bool bool
base_boot(void) base_boot(void)
{ {
if (malloc_mutex_init(&base_mtx)) if (malloc_mutex_init(&base_mtx, "base", WITNESS_RANK_BASE))
return (true); return (true);
extent_tree_szad_new(&base_avail_szad); base_extent_sn_next = 0;
extent_tree_szsnad_new(&base_avail_szsnad);
base_nodes = NULL; base_nodes = NULL;
return (false); return (false);
} }
void void
base_prefork(void) base_prefork(tsdn_t *tsdn)
{ {
malloc_mutex_prefork(&base_mtx); malloc_mutex_prefork(tsdn, &base_mtx);
} }
void void
base_postfork_parent(void) base_postfork_parent(tsdn_t *tsdn)
{ {
malloc_mutex_postfork_parent(&base_mtx); malloc_mutex_postfork_parent(tsdn, &base_mtx);
} }
void void
base_postfork_child(void) base_postfork_child(tsdn_t *tsdn)
{ {
malloc_mutex_postfork_child(&base_mtx); malloc_mutex_postfork_child(tsdn, &base_mtx);
} }
...@@ -3,6 +3,8 @@ ...@@ -3,6 +3,8 @@
/******************************************************************************/ /******************************************************************************/
#ifdef USE_TREE
void void
bitmap_info_init(bitmap_info_t *binfo, size_t nbits) bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
{ {
...@@ -32,20 +34,11 @@ bitmap_info_init(bitmap_info_t *binfo, size_t nbits) ...@@ -32,20 +34,11 @@ bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
binfo->nbits = nbits; binfo->nbits = nbits;
} }
size_t static size_t
bitmap_info_ngroups(const bitmap_info_t *binfo) bitmap_info_ngroups(const bitmap_info_t *binfo)
{ {
return (binfo->levels[binfo->nlevels].group_offset << LG_SIZEOF_BITMAP); return (binfo->levels[binfo->nlevels].group_offset);
}
size_t
bitmap_size(size_t nbits)
{
bitmap_info_t binfo;
bitmap_info_init(&binfo, nbits);
return (bitmap_info_ngroups(&binfo));
} }
void void
...@@ -61,8 +54,7 @@ bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo) ...@@ -61,8 +54,7 @@ bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
* correspond to the first logical bit in the group, so extra bits * correspond to the first logical bit in the group, so extra bits
* are the most significant bits of the last group. * are the most significant bits of the last group.
*/ */
memset(bitmap, 0xffU, binfo->levels[binfo->nlevels].group_offset << memset(bitmap, 0xffU, bitmap_size(binfo));
LG_SIZEOF_BITMAP);
extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK)) extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
& BITMAP_GROUP_NBITS_MASK; & BITMAP_GROUP_NBITS_MASK;
if (extra != 0) if (extra != 0)
...@@ -76,3 +68,44 @@ bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo) ...@@ -76,3 +68,44 @@ bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
bitmap[binfo->levels[i+1].group_offset - 1] >>= extra; bitmap[binfo->levels[i+1].group_offset - 1] >>= extra;
} }
} }
#else /* USE_TREE */
void
bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
{
assert(nbits > 0);
assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
binfo->ngroups = BITMAP_BITS2GROUPS(nbits);
binfo->nbits = nbits;
}
static size_t
bitmap_info_ngroups(const bitmap_info_t *binfo)
{
return (binfo->ngroups);
}
void
bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
{
size_t extra;
memset(bitmap, 0xffU, bitmap_size(binfo));
extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
& BITMAP_GROUP_NBITS_MASK;
if (extra != 0)
bitmap[binfo->ngroups - 1] >>= extra;
}
#endif /* USE_TREE */
size_t
bitmap_size(const bitmap_info_t *binfo)
{
return (bitmap_info_ngroups(binfo) << LG_SIZEOF_BITMAP);
}
...@@ -49,9 +49,10 @@ const chunk_hooks_t chunk_hooks_default = { ...@@ -49,9 +49,10 @@ const chunk_hooks_t chunk_hooks_default = {
* definition. * definition.
*/ */
static void chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks, static void chunk_record(tsdn_t *tsdn, arena_t *arena,
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache, chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szsnad,
void *chunk, size_t size, bool zeroed, bool committed); extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, size_t sn,
bool zeroed, bool committed);
/******************************************************************************/ /******************************************************************************/
...@@ -63,23 +64,23 @@ chunk_hooks_get_locked(arena_t *arena) ...@@ -63,23 +64,23 @@ chunk_hooks_get_locked(arena_t *arena)
} }
chunk_hooks_t chunk_hooks_t
chunk_hooks_get(arena_t *arena) chunk_hooks_get(tsdn_t *tsdn, arena_t *arena)
{ {
chunk_hooks_t chunk_hooks; chunk_hooks_t chunk_hooks;
malloc_mutex_lock(&arena->chunks_mtx); malloc_mutex_lock(tsdn, &arena->chunks_mtx);
chunk_hooks = chunk_hooks_get_locked(arena); chunk_hooks = chunk_hooks_get_locked(arena);
malloc_mutex_unlock(&arena->chunks_mtx); malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
return (chunk_hooks); return (chunk_hooks);
} }
chunk_hooks_t chunk_hooks_t
chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks) chunk_hooks_set(tsdn_t *tsdn, arena_t *arena, const chunk_hooks_t *chunk_hooks)
{ {
chunk_hooks_t old_chunk_hooks; chunk_hooks_t old_chunk_hooks;
malloc_mutex_lock(&arena->chunks_mtx); malloc_mutex_lock(tsdn, &arena->chunks_mtx);
old_chunk_hooks = arena->chunk_hooks; old_chunk_hooks = arena->chunk_hooks;
/* /*
* Copy each field atomically so that it is impossible for readers to * Copy each field atomically so that it is impossible for readers to
...@@ -104,14 +105,14 @@ chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks) ...@@ -104,14 +105,14 @@ chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks)
ATOMIC_COPY_HOOK(split); ATOMIC_COPY_HOOK(split);
ATOMIC_COPY_HOOK(merge); ATOMIC_COPY_HOOK(merge);
#undef ATOMIC_COPY_HOOK #undef ATOMIC_COPY_HOOK
malloc_mutex_unlock(&arena->chunks_mtx); malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
return (old_chunk_hooks); return (old_chunk_hooks);
} }
static void static void
chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks, chunk_hooks_assure_initialized_impl(tsdn_t *tsdn, arena_t *arena,
bool locked) chunk_hooks_t *chunk_hooks, bool locked)
{ {
static const chunk_hooks_t uninitialized_hooks = static const chunk_hooks_t uninitialized_hooks =
CHUNK_HOOKS_INITIALIZER; CHUNK_HOOKS_INITIALIZER;
...@@ -119,27 +120,28 @@ chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -119,27 +120,28 @@ chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks,
if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) == if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) ==
0) { 0) {
*chunk_hooks = locked ? chunk_hooks_get_locked(arena) : *chunk_hooks = locked ? chunk_hooks_get_locked(arena) :
chunk_hooks_get(arena); chunk_hooks_get(tsdn, arena);
} }
} }
static void static void
chunk_hooks_assure_initialized_locked(arena_t *arena, chunk_hooks_assure_initialized_locked(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks) chunk_hooks_t *chunk_hooks)
{ {
chunk_hooks_assure_initialized_impl(arena, chunk_hooks, true); chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, true);
} }
static void static void
chunk_hooks_assure_initialized(arena_t *arena, chunk_hooks_t *chunk_hooks) chunk_hooks_assure_initialized(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks)
{ {
chunk_hooks_assure_initialized_impl(arena, chunk_hooks, false); chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, false);
} }
bool bool
chunk_register(const void *chunk, const extent_node_t *node) chunk_register(tsdn_t *tsdn, const void *chunk, const extent_node_t *node)
{ {
assert(extent_node_addr_get(node) == chunk); assert(extent_node_addr_get(node) == chunk);
...@@ -159,7 +161,7 @@ chunk_register(const void *chunk, const extent_node_t *node) ...@@ -159,7 +161,7 @@ chunk_register(const void *chunk, const extent_node_t *node)
high = atomic_read_z(&highchunks); high = atomic_read_z(&highchunks);
} }
if (cur > high && prof_gdump_get_unlocked()) if (cur > high && prof_gdump_get_unlocked())
prof_gdump(); prof_gdump(tsdn);
} }
return (false); return (false);
...@@ -181,33 +183,35 @@ chunk_deregister(const void *chunk, const extent_node_t *node) ...@@ -181,33 +183,35 @@ chunk_deregister(const void *chunk, const extent_node_t *node)
} }
/* /*
* Do first-best-fit chunk selection, i.e. select the lowest chunk that best * Do first-best-fit chunk selection, i.e. select the oldest/lowest chunk that
* fits. * best fits.
*/ */
static extent_node_t * static extent_node_t *
chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad, chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szsnad, size_t size)
extent_tree_t *chunks_ad, size_t size)
{ {
extent_node_t key; extent_node_t key;
assert(size == CHUNK_CEILING(size)); assert(size == CHUNK_CEILING(size));
extent_node_init(&key, arena, NULL, size, false, false); extent_node_init(&key, arena, NULL, size, 0, false, false);
return (extent_tree_szad_nsearch(chunks_szad, &key)); return (extent_tree_szsnad_nsearch(chunks_szsnad, &key));
} }
static void * static void *
chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks, chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache, extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache,
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
bool dalloc_node) bool *commit, bool dalloc_node)
{ {
void *ret; void *ret;
extent_node_t *node; extent_node_t *node;
size_t alloc_size, leadsize, trailsize; size_t alloc_size, leadsize, trailsize;
bool zeroed, committed; bool zeroed, committed;
assert(CHUNK_CEILING(size) == size);
assert(alignment > 0);
assert(new_addr == NULL || alignment == chunksize); assert(new_addr == NULL || alignment == chunksize);
assert(CHUNK_ADDR2BASE(new_addr) == new_addr);
/* /*
* Cached chunks use the node linkage embedded in their headers, in * Cached chunks use the node linkage embedded in their headers, in
* which case dalloc_node is true, and new_addr is non-NULL because * which case dalloc_node is true, and new_addr is non-NULL because
...@@ -215,24 +219,23 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -215,24 +219,23 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
*/ */
assert(dalloc_node || new_addr != NULL); assert(dalloc_node || new_addr != NULL);
alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize)); alloc_size = size + CHUNK_CEILING(alignment) - chunksize;
/* Beware size_t wrap-around. */ /* Beware size_t wrap-around. */
if (alloc_size < size) if (alloc_size < size)
return (NULL); return (NULL);
malloc_mutex_lock(&arena->chunks_mtx); malloc_mutex_lock(tsdn, &arena->chunks_mtx);
chunk_hooks_assure_initialized_locked(arena, chunk_hooks); chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
if (new_addr != NULL) { if (new_addr != NULL) {
extent_node_t key; extent_node_t key;
extent_node_init(&key, arena, new_addr, alloc_size, false, extent_node_init(&key, arena, new_addr, alloc_size, 0, false,
false); false);
node = extent_tree_ad_search(chunks_ad, &key); node = extent_tree_ad_search(chunks_ad, &key);
} else { } else {
node = chunk_first_best_fit(arena, chunks_szad, chunks_ad, node = chunk_first_best_fit(arena, chunks_szsnad, alloc_size);
alloc_size);
} }
if (node == NULL || (new_addr != NULL && extent_node_size_get(node) < if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
size)) { size)) {
malloc_mutex_unlock(&arena->chunks_mtx); malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
return (NULL); return (NULL);
} }
leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node), leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node),
...@@ -241,6 +244,7 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -241,6 +244,7 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
assert(extent_node_size_get(node) >= leadsize + size); assert(extent_node_size_get(node) >= leadsize + size);
trailsize = extent_node_size_get(node) - leadsize - size; trailsize = extent_node_size_get(node) - leadsize - size;
ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize); ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize);
*sn = extent_node_sn_get(node);
zeroed = extent_node_zeroed_get(node); zeroed = extent_node_zeroed_get(node);
if (zeroed) if (zeroed)
*zero = true; *zero = true;
...@@ -251,17 +255,17 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -251,17 +255,17 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
if (leadsize != 0 && if (leadsize != 0 &&
chunk_hooks->split(extent_node_addr_get(node), chunk_hooks->split(extent_node_addr_get(node),
extent_node_size_get(node), leadsize, size, false, arena->ind)) { extent_node_size_get(node), leadsize, size, false, arena->ind)) {
malloc_mutex_unlock(&arena->chunks_mtx); malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
return (NULL); return (NULL);
} }
/* Remove node from the tree. */ /* Remove node from the tree. */
extent_tree_szad_remove(chunks_szad, node); extent_tree_szsnad_remove(chunks_szsnad, node);
extent_tree_ad_remove(chunks_ad, node); extent_tree_ad_remove(chunks_ad, node);
arena_chunk_cache_maybe_remove(arena, node, cache); arena_chunk_cache_maybe_remove(arena, node, cache);
if (leadsize != 0) { if (leadsize != 0) {
/* Insert the leading space as a smaller chunk. */ /* Insert the leading space as a smaller chunk. */
extent_node_size_set(node, leadsize); extent_node_size_set(node, leadsize);
extent_tree_szad_insert(chunks_szad, node); extent_tree_szsnad_insert(chunks_szsnad, node);
extent_tree_ad_insert(chunks_ad, node); extent_tree_ad_insert(chunks_ad, node);
arena_chunk_cache_maybe_insert(arena, node, cache); arena_chunk_cache_maybe_insert(arena, node, cache);
node = NULL; node = NULL;
...@@ -271,41 +275,42 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -271,41 +275,42 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
if (chunk_hooks->split(ret, size + trailsize, size, if (chunk_hooks->split(ret, size + trailsize, size,
trailsize, false, arena->ind)) { trailsize, false, arena->ind)) {
if (dalloc_node && node != NULL) if (dalloc_node && node != NULL)
arena_node_dalloc(arena, node); arena_node_dalloc(tsdn, arena, node);
malloc_mutex_unlock(&arena->chunks_mtx); malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad,
cache, ret, size + trailsize, zeroed, committed); chunks_ad, cache, ret, size + trailsize, *sn,
zeroed, committed);
return (NULL); return (NULL);
} }
/* Insert the trailing space as a smaller chunk. */ /* Insert the trailing space as a smaller chunk. */
if (node == NULL) { if (node == NULL) {
node = arena_node_alloc(arena); node = arena_node_alloc(tsdn, arena);
if (node == NULL) { if (node == NULL) {
malloc_mutex_unlock(&arena->chunks_mtx); malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
chunk_record(arena, chunk_hooks, chunks_szad, chunk_record(tsdn, arena, chunk_hooks,
chunks_ad, cache, ret, size + trailsize, chunks_szsnad, chunks_ad, cache, ret, size
zeroed, committed); + trailsize, *sn, zeroed, committed);
return (NULL); return (NULL);
} }
} }
extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size), extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size),
trailsize, zeroed, committed); trailsize, *sn, zeroed, committed);
extent_tree_szad_insert(chunks_szad, node); extent_tree_szsnad_insert(chunks_szsnad, node);
extent_tree_ad_insert(chunks_ad, node); extent_tree_ad_insert(chunks_ad, node);
arena_chunk_cache_maybe_insert(arena, node, cache); arena_chunk_cache_maybe_insert(arena, node, cache);
node = NULL; node = NULL;
} }
if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) { if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
malloc_mutex_unlock(&arena->chunks_mtx); malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, cache, chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad, chunks_ad,
ret, size, zeroed, committed); cache, ret, size, *sn, zeroed, committed);
return (NULL); return (NULL);
} }
malloc_mutex_unlock(&arena->chunks_mtx); malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
assert(dalloc_node || node != NULL); assert(dalloc_node || node != NULL);
if (dalloc_node && node != NULL) if (dalloc_node && node != NULL)
arena_node_dalloc(arena, node); arena_node_dalloc(tsdn, arena, node);
if (*zero) { if (*zero) {
if (!zeroed) if (!zeroed)
memset(ret, 0, size); memset(ret, 0, size);
...@@ -313,10 +318,11 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -313,10 +318,11 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
size_t i; size_t i;
size_t *p = (size_t *)(uintptr_t)ret; size_t *p = (size_t *)(uintptr_t)ret;
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size);
for (i = 0; i < size / sizeof(size_t); i++) for (i = 0; i < size / sizeof(size_t); i++)
assert(p[i] == 0); assert(p[i] == 0);
} }
if (config_valgrind)
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size);
} }
return (ret); return (ret);
} }
...@@ -328,39 +334,29 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -328,39 +334,29 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
* them if they are returned. * them if they are returned.
*/ */
static void * static void *
chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment, chunk_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
bool *zero, bool *commit, dss_prec_t dss_prec) size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec)
{ {
void *ret; void *ret;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
assert(size != 0); assert(size != 0);
assert((size & chunksize_mask) == 0); assert((size & chunksize_mask) == 0);
assert(alignment != 0); assert(alignment != 0);
assert((alignment & chunksize_mask) == 0); assert((alignment & chunksize_mask) == 0);
/* Retained. */
if ((ret = chunk_recycle(arena, &chunk_hooks,
&arena->chunks_szad_retained, &arena->chunks_ad_retained, false,
new_addr, size, alignment, zero, commit, true)) != NULL)
return (ret);
/* "primary" dss. */ /* "primary" dss. */
if (have_dss && dss_prec == dss_prec_primary && (ret = if (have_dss && dss_prec == dss_prec_primary && (ret =
chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) != chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
NULL)
return (ret);
/*
* mmap. Requesting an address is not implemented for
* chunk_alloc_mmap(), so only call it if (new_addr == NULL).
*/
if (new_addr == NULL && (ret = chunk_alloc_mmap(size, alignment, zero,
commit)) != NULL) commit)) != NULL)
return (ret); return (ret);
/* mmap. */
if ((ret = chunk_alloc_mmap(new_addr, size, alignment, zero, commit)) !=
NULL)
return (ret);
/* "secondary" dss. */ /* "secondary" dss. */
if (have_dss && dss_prec == dss_prec_secondary && (ret = if (have_dss && dss_prec == dss_prec_secondary && (ret =
chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) != chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
NULL) commit)) != NULL)
return (ret); return (ret);
/* All strategies for allocation failed. */ /* All strategies for allocation failed. */
...@@ -380,7 +376,7 @@ chunk_alloc_base(size_t size) ...@@ -380,7 +376,7 @@ chunk_alloc_base(size_t size)
*/ */
zero = true; zero = true;
commit = true; commit = true;
ret = chunk_alloc_mmap(size, chunksize, &zero, &commit); ret = chunk_alloc_mmap(NULL, size, chunksize, &zero, &commit);
if (ret == NULL) if (ret == NULL)
return (NULL); return (NULL);
if (config_valgrind) if (config_valgrind)
...@@ -390,37 +386,33 @@ chunk_alloc_base(size_t size) ...@@ -390,37 +386,33 @@ chunk_alloc_base(size_t size)
} }
void * void *
chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
size_t size, size_t alignment, bool *zero, bool dalloc_node) void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
bool *commit, bool dalloc_node)
{ {
void *ret; void *ret;
bool commit;
assert(size != 0); assert(size != 0);
assert((size & chunksize_mask) == 0); assert((size & chunksize_mask) == 0);
assert(alignment != 0); assert(alignment != 0);
assert((alignment & chunksize_mask) == 0); assert((alignment & chunksize_mask) == 0);
commit = true; ret = chunk_recycle(tsdn, arena, chunk_hooks,
ret = chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_cached, &arena->chunks_szsnad_cached, &arena->chunks_ad_cached, true,
&arena->chunks_ad_cached, true, new_addr, size, alignment, zero, new_addr, size, alignment, sn, zero, commit, dalloc_node);
&commit, dalloc_node);
if (ret == NULL) if (ret == NULL)
return (NULL); return (NULL);
assert(commit);
if (config_valgrind) if (config_valgrind)
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
return (ret); return (ret);
} }
static arena_t * static arena_t *
chunk_arena_get(unsigned arena_ind) chunk_arena_get(tsdn_t *tsdn, unsigned arena_ind)
{ {
arena_t *arena; arena_t *arena;
/* Dodge tsd for a0 in order to avoid bootstrapping issues. */ arena = arena_get(tsdn, arena_ind, false);
arena = (arena_ind == 0) ? a0get() : arena_get(tsd_fetch(), arena_ind,
false, true);
/* /*
* The arena we're allocating on behalf of must have been initialized * The arena we're allocating on behalf of must have been initialized
* already. * already.
...@@ -430,14 +422,12 @@ chunk_arena_get(unsigned arena_ind) ...@@ -430,14 +422,12 @@ chunk_arena_get(unsigned arena_ind)
} }
static void * static void *
chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero, chunk_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
bool *commit, unsigned arena_ind) size_t size, size_t alignment, bool *zero, bool *commit)
{ {
void *ret; void *ret;
arena_t *arena;
arena = chunk_arena_get(arena_ind); ret = chunk_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
ret = chunk_alloc_core(arena, new_addr, size, alignment, zero,
commit, arena->dss_prec); commit, arena->dss_prec);
if (ret == NULL) if (ret == NULL)
return (NULL); return (NULL);
...@@ -447,26 +437,80 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero, ...@@ -447,26 +437,80 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
return (ret); return (ret);
} }
static void *
chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
bool *commit, unsigned arena_ind)
{
tsdn_t *tsdn;
arena_t *arena;
tsdn = tsdn_fetch();
arena = chunk_arena_get(tsdn, arena_ind);
return (chunk_alloc_default_impl(tsdn, arena, new_addr, size, alignment,
zero, commit));
}
static void *
chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
bool *commit)
{
void *ret;
assert(size != 0);
assert((size & chunksize_mask) == 0);
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
ret = chunk_recycle(tsdn, arena, chunk_hooks,
&arena->chunks_szsnad_retained, &arena->chunks_ad_retained, false,
new_addr, size, alignment, sn, zero, commit, true);
if (config_stats && ret != NULL)
arena->stats.retained -= size;
return (ret);
}
void * void *
chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
size_t size, size_t alignment, bool *zero, bool *commit) void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
bool *commit)
{ {
void *ret; void *ret;
chunk_hooks_assure_initialized(arena, chunk_hooks); chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
ret = chunk_hooks->alloc(new_addr, size, alignment, zero, commit,
arena->ind); ret = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size,
if (ret == NULL) alignment, sn, zero, commit);
return (NULL); if (ret == NULL) {
if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default) if (chunk_hooks->alloc == chunk_alloc_default) {
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize); /* Call directly to propagate tsdn. */
ret = chunk_alloc_default_impl(tsdn, arena, new_addr,
size, alignment, zero, commit);
} else {
ret = chunk_hooks->alloc(new_addr, size, alignment,
zero, commit, arena->ind);
}
if (ret == NULL)
return (NULL);
*sn = arena_extent_sn_next(arena);
if (config_valgrind && chunk_hooks->alloc !=
chunk_alloc_default)
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
}
return (ret); return (ret);
} }
static void static void
chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks, chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache, extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache,
void *chunk, size_t size, bool zeroed, bool committed) void *chunk, size_t size, size_t sn, bool zeroed, bool committed)
{ {
bool unzeroed; bool unzeroed;
extent_node_t *node, *prev; extent_node_t *node, *prev;
...@@ -476,9 +520,9 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -476,9 +520,9 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
unzeroed = cache || !zeroed; unzeroed = cache || !zeroed;
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size); JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
malloc_mutex_lock(&arena->chunks_mtx); malloc_mutex_lock(tsdn, &arena->chunks_mtx);
chunk_hooks_assure_initialized_locked(arena, chunk_hooks); chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0, extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0, 0,
false, false); false, false);
node = extent_tree_ad_nsearch(chunks_ad, &key); node = extent_tree_ad_nsearch(chunks_ad, &key);
/* Try to coalesce forward. */ /* Try to coalesce forward. */
...@@ -490,19 +534,21 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -490,19 +534,21 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
/* /*
* Coalesce chunk with the following address range. This does * Coalesce chunk with the following address range. This does
* not change the position within chunks_ad, so only * not change the position within chunks_ad, so only
* remove/insert from/into chunks_szad. * remove/insert from/into chunks_szsnad.
*/ */
extent_tree_szad_remove(chunks_szad, node); extent_tree_szsnad_remove(chunks_szsnad, node);
arena_chunk_cache_maybe_remove(arena, node, cache); arena_chunk_cache_maybe_remove(arena, node, cache);
extent_node_addr_set(node, chunk); extent_node_addr_set(node, chunk);
extent_node_size_set(node, size + extent_node_size_get(node)); extent_node_size_set(node, size + extent_node_size_get(node));
if (sn < extent_node_sn_get(node))
extent_node_sn_set(node, sn);
extent_node_zeroed_set(node, extent_node_zeroed_get(node) && extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
!unzeroed); !unzeroed);
extent_tree_szad_insert(chunks_szad, node); extent_tree_szsnad_insert(chunks_szsnad, node);
arena_chunk_cache_maybe_insert(arena, node, cache); arena_chunk_cache_maybe_insert(arena, node, cache);
} else { } else {
/* Coalescing forward failed, so insert a new node. */ /* Coalescing forward failed, so insert a new node. */
node = arena_node_alloc(arena); node = arena_node_alloc(tsdn, arena);
if (node == NULL) { if (node == NULL) {
/* /*
* Node allocation failed, which is an exceedingly * Node allocation failed, which is an exceedingly
...@@ -511,15 +557,15 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -511,15 +557,15 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
* a virtual memory leak. * a virtual memory leak.
*/ */
if (cache) { if (cache) {
chunk_purge_wrapper(arena, chunk_hooks, chunk, chunk_purge_wrapper(tsdn, arena, chunk_hooks,
size, 0, size); chunk, size, 0, size);
} }
goto label_return; goto label_return;
} }
extent_node_init(node, arena, chunk, size, !unzeroed, extent_node_init(node, arena, chunk, size, sn, !unzeroed,
committed); committed);
extent_tree_ad_insert(chunks_ad, node); extent_tree_ad_insert(chunks_ad, node);
extent_tree_szad_insert(chunks_szad, node); extent_tree_szsnad_insert(chunks_szsnad, node);
arena_chunk_cache_maybe_insert(arena, node, cache); arena_chunk_cache_maybe_insert(arena, node, cache);
} }
...@@ -533,31 +579,33 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -533,31 +579,33 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
/* /*
* Coalesce chunk with the previous address range. This does * Coalesce chunk with the previous address range. This does
* not change the position within chunks_ad, so only * not change the position within chunks_ad, so only
* remove/insert node from/into chunks_szad. * remove/insert node from/into chunks_szsnad.
*/ */
extent_tree_szad_remove(chunks_szad, prev); extent_tree_szsnad_remove(chunks_szsnad, prev);
extent_tree_ad_remove(chunks_ad, prev); extent_tree_ad_remove(chunks_ad, prev);
arena_chunk_cache_maybe_remove(arena, prev, cache); arena_chunk_cache_maybe_remove(arena, prev, cache);
extent_tree_szad_remove(chunks_szad, node); extent_tree_szsnad_remove(chunks_szsnad, node);
arena_chunk_cache_maybe_remove(arena, node, cache); arena_chunk_cache_maybe_remove(arena, node, cache);
extent_node_addr_set(node, extent_node_addr_get(prev)); extent_node_addr_set(node, extent_node_addr_get(prev));
extent_node_size_set(node, extent_node_size_get(prev) + extent_node_size_set(node, extent_node_size_get(prev) +
extent_node_size_get(node)); extent_node_size_get(node));
if (extent_node_sn_get(prev) < extent_node_sn_get(node))
extent_node_sn_set(node, extent_node_sn_get(prev));
extent_node_zeroed_set(node, extent_node_zeroed_get(prev) && extent_node_zeroed_set(node, extent_node_zeroed_get(prev) &&
extent_node_zeroed_get(node)); extent_node_zeroed_get(node));
extent_tree_szad_insert(chunks_szad, node); extent_tree_szsnad_insert(chunks_szsnad, node);
arena_chunk_cache_maybe_insert(arena, node, cache); arena_chunk_cache_maybe_insert(arena, node, cache);
arena_node_dalloc(arena, prev); arena_node_dalloc(tsdn, arena, prev);
} }
label_return: label_return:
malloc_mutex_unlock(&arena->chunks_mtx); malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
} }
void void
chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
size_t size, bool committed) void *chunk, size_t size, size_t sn, bool committed)
{ {
assert(chunk != NULL); assert(chunk != NULL);
...@@ -565,24 +613,49 @@ chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, ...@@ -565,24 +613,49 @@ chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
assert(size != 0); assert(size != 0);
assert((size & chunksize_mask) == 0); assert((size & chunksize_mask) == 0);
chunk_record(arena, chunk_hooks, &arena->chunks_szad_cached, chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_cached,
&arena->chunks_ad_cached, true, chunk, size, false, committed); &arena->chunks_ad_cached, true, chunk, size, sn, false,
arena_maybe_purge(arena); committed);
arena_maybe_purge(tsdn, arena);
}
static bool
chunk_dalloc_default_impl(void *chunk, size_t size)
{
if (!have_dss || !chunk_in_dss(chunk))
return (chunk_dalloc_mmap(chunk, size));
return (true);
}
static bool
chunk_dalloc_default(void *chunk, size_t size, bool committed,
unsigned arena_ind)
{
return (chunk_dalloc_default_impl(chunk, size));
} }
void void
chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
size_t size, bool zeroed, bool committed) void *chunk, size_t size, size_t sn, bool zeroed, bool committed)
{ {
bool err;
assert(chunk != NULL); assert(chunk != NULL);
assert(CHUNK_ADDR2BASE(chunk) == chunk); assert(CHUNK_ADDR2BASE(chunk) == chunk);
assert(size != 0); assert(size != 0);
assert((size & chunksize_mask) == 0); assert((size & chunksize_mask) == 0);
chunk_hooks_assure_initialized(arena, chunk_hooks); chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
/* Try to deallocate. */ /* Try to deallocate. */
if (!chunk_hooks->dalloc(chunk, size, committed, arena->ind)) if (chunk_hooks->dalloc == chunk_dalloc_default) {
/* Call directly to propagate tsdn. */
err = chunk_dalloc_default_impl(chunk, size);
} else
err = chunk_hooks->dalloc(chunk, size, committed, arena->ind);
if (!err)
return; return;
/* Try to decommit; purge if that fails. */ /* Try to decommit; purge if that fails. */
if (committed) { if (committed) {
...@@ -591,29 +664,12 @@ chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, ...@@ -591,29 +664,12 @@ chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
} }
zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size, zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size,
arena->ind); arena->ind);
chunk_record(arena, chunk_hooks, &arena->chunks_szad_retained, chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_retained,
&arena->chunks_ad_retained, false, chunk, size, zeroed, committed); &arena->chunks_ad_retained, false, chunk, size, sn, zeroed,
} committed);
static bool
chunk_dalloc_default(void *chunk, size_t size, bool committed,
unsigned arena_ind)
{
if (!have_dss || !chunk_in_dss(chunk)) if (config_stats)
return (chunk_dalloc_mmap(chunk, size)); arena->stats.retained += size;
return (true);
}
void
chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
size_t size, bool committed)
{
chunk_hooks_assure_initialized(arena, chunk_hooks);
chunk_hooks->dalloc(chunk, size, committed, arena->ind);
if (config_valgrind && chunk_hooks->dalloc != chunk_dalloc_default)
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
} }
static bool static bool
...@@ -634,8 +690,9 @@ chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length, ...@@ -634,8 +690,9 @@ chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length,
length)); length));
} }
bool static bool
chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length) chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
unsigned arena_ind)
{ {
assert(chunk != NULL); assert(chunk != NULL);
...@@ -648,21 +705,12 @@ chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length) ...@@ -648,21 +705,12 @@ chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length)
length)); length));
} }
static bool
chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
unsigned arena_ind)
{
return (chunk_purge_arena(chunk_arena_get(arena_ind), chunk, offset,
length));
}
bool bool
chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
size_t size, size_t offset, size_t length) void *chunk, size_t size, size_t offset, size_t length)
{ {
chunk_hooks_assure_initialized(arena, chunk_hooks); chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
return (chunk_hooks->purge(chunk, size, offset, length, arena->ind)); return (chunk_hooks->purge(chunk, size, offset, length, arena->ind));
} }
...@@ -677,23 +725,30 @@ chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b, ...@@ -677,23 +725,30 @@ chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b,
} }
static bool static bool
chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b, chunk_merge_default_impl(void *chunk_a, void *chunk_b)
bool committed, unsigned arena_ind)
{ {
if (!maps_coalesce) if (!maps_coalesce)
return (true); return (true);
if (have_dss && chunk_in_dss(chunk_a) != chunk_in_dss(chunk_b)) if (have_dss && !chunk_dss_mergeable(chunk_a, chunk_b))
return (true); return (true);
return (false); return (false);
} }
static bool
chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
bool committed, unsigned arena_ind)
{
return (chunk_merge_default_impl(chunk_a, chunk_b));
}
static rtree_node_elm_t * static rtree_node_elm_t *
chunks_rtree_node_alloc(size_t nelms) chunks_rtree_node_alloc(size_t nelms)
{ {
return ((rtree_node_elm_t *)base_alloc(nelms * return ((rtree_node_elm_t *)base_alloc(TSDN_NULL, nelms *
sizeof(rtree_node_elm_t))); sizeof(rtree_node_elm_t)));
} }
...@@ -716,7 +771,7 @@ chunk_boot(void) ...@@ -716,7 +771,7 @@ chunk_boot(void)
* so pages_map will always take fast path. * so pages_map will always take fast path.
*/ */
if (!opt_lg_chunk) { if (!opt_lg_chunk) {
opt_lg_chunk = jemalloc_ffs((int)info.dwAllocationGranularity) opt_lg_chunk = ffs_u((unsigned)info.dwAllocationGranularity)
- 1; - 1;
} }
#else #else
...@@ -730,32 +785,11 @@ chunk_boot(void) ...@@ -730,32 +785,11 @@ chunk_boot(void)
chunksize_mask = chunksize - 1; chunksize_mask = chunksize - 1;
chunk_npages = (chunksize >> LG_PAGE); chunk_npages = (chunksize >> LG_PAGE);
if (have_dss && chunk_dss_boot()) if (have_dss)
return (true); chunk_dss_boot();
if (rtree_new(&chunks_rtree, (ZU(1) << (LG_SIZEOF_PTR+3)) - if (rtree_new(&chunks_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) -
opt_lg_chunk, chunks_rtree_node_alloc, NULL)) opt_lg_chunk), chunks_rtree_node_alloc, NULL))
return (true); return (true);
return (false); return (false);
} }
void
chunk_prefork(void)
{
chunk_dss_prefork();
}
void
chunk_postfork_parent(void)
{
chunk_dss_postfork_parent();
}
void
chunk_postfork_child(void)
{
chunk_dss_postfork_child();
}
...@@ -10,20 +10,19 @@ const char *dss_prec_names[] = { ...@@ -10,20 +10,19 @@ const char *dss_prec_names[] = {
"N/A" "N/A"
}; };
/* Current dss precedence default, used when creating new arenas. */
static dss_prec_t dss_prec_default = DSS_PREC_DEFAULT;
/* /*
* Protects sbrk() calls. This avoids malloc races among threads, though it * Current dss precedence default, used when creating new arenas. NB: This is
* does not protect against races with threads that call sbrk() directly. * stored as unsigned rather than dss_prec_t because in principle there's no
* guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use
* atomic operations to synchronize the setting.
*/ */
static malloc_mutex_t dss_mtx; static unsigned dss_prec_default = (unsigned)DSS_PREC_DEFAULT;
/* Base address of the DSS. */ /* Base address of the DSS. */
static void *dss_base; static void *dss_base;
/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */ /* Atomic boolean indicating whether the DSS is exhausted. */
static void *dss_prev; static unsigned dss_exhausted;
/* Current upper limit on DSS addresses. */ /* Atomic current upper limit on DSS addresses. */
static void *dss_max; static void *dss_max;
/******************************************************************************/ /******************************************************************************/
...@@ -47,9 +46,7 @@ chunk_dss_prec_get(void) ...@@ -47,9 +46,7 @@ chunk_dss_prec_get(void)
if (!have_dss) if (!have_dss)
return (dss_prec_disabled); return (dss_prec_disabled);
malloc_mutex_lock(&dss_mtx); ret = (dss_prec_t)atomic_read_u(&dss_prec_default);
ret = dss_prec_default;
malloc_mutex_unlock(&dss_mtx);
return (ret); return (ret);
} }
...@@ -59,15 +56,46 @@ chunk_dss_prec_set(dss_prec_t dss_prec) ...@@ -59,15 +56,46 @@ chunk_dss_prec_set(dss_prec_t dss_prec)
if (!have_dss) if (!have_dss)
return (dss_prec != dss_prec_disabled); return (dss_prec != dss_prec_disabled);
malloc_mutex_lock(&dss_mtx); atomic_write_u(&dss_prec_default, (unsigned)dss_prec);
dss_prec_default = dss_prec;
malloc_mutex_unlock(&dss_mtx);
return (false); return (false);
} }
static void *
chunk_dss_max_update(void *new_addr)
{
void *max_cur;
spin_t spinner;
/*
* Get the current end of the DSS as max_cur and assure that dss_max is
* up to date.
*/
spin_init(&spinner);
while (true) {
void *max_prev = atomic_read_p(&dss_max);
max_cur = chunk_dss_sbrk(0);
if ((uintptr_t)max_prev > (uintptr_t)max_cur) {
/*
* Another thread optimistically updated dss_max. Wait
* for it to finish.
*/
spin_adaptive(&spinner);
continue;
}
if (!atomic_cas_p(&dss_max, max_prev, max_cur))
break;
}
/* Fixed new_addr can only be supported if it is at the edge of DSS. */
if (new_addr != NULL && max_cur != new_addr)
return (NULL);
return (max_cur);
}
void * void *
chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment, chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
bool *zero, bool *commit) size_t alignment, bool *zero, bool *commit)
{ {
cassert(have_dss); cassert(have_dss);
assert(size > 0 && (size & chunksize_mask) == 0); assert(size > 0 && (size & chunksize_mask) == 0);
...@@ -80,28 +108,20 @@ chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment, ...@@ -80,28 +108,20 @@ chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
if ((intptr_t)size < 0) if ((intptr_t)size < 0)
return (NULL); return (NULL);
malloc_mutex_lock(&dss_mtx); if (!atomic_read_u(&dss_exhausted)) {
if (dss_prev != (void *)-1) {
/* /*
* The loop is necessary to recover from races with other * The loop is necessary to recover from races with other
* threads that are using the DSS for something other than * threads that are using the DSS for something other than
* malloc. * malloc.
*/ */
do { while (true) {
void *ret, *cpad, *dss_next; void *ret, *cpad, *max_cur, *dss_next, *dss_prev;
size_t gap_size, cpad_size; size_t gap_size, cpad_size;
intptr_t incr; intptr_t incr;
/* Avoid an unnecessary system call. */
if (new_addr != NULL && dss_max != new_addr)
break;
/* Get the current end of the DSS. */
dss_max = chunk_dss_sbrk(0);
/* Make sure the earlier condition still holds. */ max_cur = chunk_dss_max_update(new_addr);
if (new_addr != NULL && dss_max != new_addr) if (max_cur == NULL)
break; goto label_oom;
/* /*
* Calculate how much padding is necessary to * Calculate how much padding is necessary to
...@@ -120,22 +140,29 @@ chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment, ...@@ -120,22 +140,29 @@ chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
cpad_size = (uintptr_t)ret - (uintptr_t)cpad; cpad_size = (uintptr_t)ret - (uintptr_t)cpad;
dss_next = (void *)((uintptr_t)ret + size); dss_next = (void *)((uintptr_t)ret + size);
if ((uintptr_t)ret < (uintptr_t)dss_max || if ((uintptr_t)ret < (uintptr_t)dss_max ||
(uintptr_t)dss_next < (uintptr_t)dss_max) { (uintptr_t)dss_next < (uintptr_t)dss_max)
/* Wrap-around. */ goto label_oom; /* Wrap-around. */
malloc_mutex_unlock(&dss_mtx);
return (NULL);
}
incr = gap_size + cpad_size + size; incr = gap_size + cpad_size + size;
/*
* Optimistically update dss_max, and roll back below if
* sbrk() fails. No other thread will try to extend the
* DSS while dss_max is greater than the current DSS
* max reported by sbrk(0).
*/
if (atomic_cas_p(&dss_max, max_cur, dss_next))
continue;
/* Try to allocate. */
dss_prev = chunk_dss_sbrk(incr); dss_prev = chunk_dss_sbrk(incr);
if (dss_prev == dss_max) { if (dss_prev == max_cur) {
/* Success. */ /* Success. */
dss_max = dss_next;
malloc_mutex_unlock(&dss_mtx);
if (cpad_size != 0) { if (cpad_size != 0) {
chunk_hooks_t chunk_hooks = chunk_hooks_t chunk_hooks =
CHUNK_HOOKS_INITIALIZER; CHUNK_HOOKS_INITIALIZER;
chunk_dalloc_wrapper(arena, chunk_dalloc_wrapper(tsdn, arena,
&chunk_hooks, cpad, cpad_size, &chunk_hooks, cpad, cpad_size,
arena_extent_sn_next(arena), false,
true); true);
} }
if (*zero) { if (*zero) {
...@@ -147,68 +174,65 @@ chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment, ...@@ -147,68 +174,65 @@ chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
*commit = pages_decommit(ret, size); *commit = pages_decommit(ret, size);
return (ret); return (ret);
} }
} while (dss_prev != (void *)-1);
}
malloc_mutex_unlock(&dss_mtx);
/*
* Failure, whether due to OOM or a race with a raw
* sbrk() call from outside the allocator. Try to roll
* back optimistic dss_max update; if rollback fails,
* it's due to another caller of this function having
* succeeded since this invocation started, in which
* case rollback is not necessary.
*/
atomic_cas_p(&dss_max, dss_next, max_cur);
if (dss_prev == (void *)-1) {
/* OOM. */
atomic_write_u(&dss_exhausted, (unsigned)true);
goto label_oom;
}
}
}
label_oom:
return (NULL); return (NULL);
} }
bool static bool
chunk_in_dss(void *chunk) chunk_in_dss_helper(void *chunk, void *max)
{ {
bool ret;
cassert(have_dss); return ((uintptr_t)chunk >= (uintptr_t)dss_base && (uintptr_t)chunk <
(uintptr_t)max);
malloc_mutex_lock(&dss_mtx);
if ((uintptr_t)chunk >= (uintptr_t)dss_base
&& (uintptr_t)chunk < (uintptr_t)dss_max)
ret = true;
else
ret = false;
malloc_mutex_unlock(&dss_mtx);
return (ret);
} }
bool bool
chunk_dss_boot(void) chunk_in_dss(void *chunk)
{ {
cassert(have_dss); cassert(have_dss);
if (malloc_mutex_init(&dss_mtx)) return (chunk_in_dss_helper(chunk, atomic_read_p(&dss_max)));
return (true);
dss_base = chunk_dss_sbrk(0);
dss_prev = dss_base;
dss_max = dss_base;
return (false);
} }
void bool
chunk_dss_prefork(void) chunk_dss_mergeable(void *chunk_a, void *chunk_b)
{ {
void *max;
if (have_dss) cassert(have_dss);
malloc_mutex_prefork(&dss_mtx);
}
void
chunk_dss_postfork_parent(void)
{
if (have_dss) max = atomic_read_p(&dss_max);
malloc_mutex_postfork_parent(&dss_mtx); return (chunk_in_dss_helper(chunk_a, max) ==
chunk_in_dss_helper(chunk_b, max));
} }
void void
chunk_dss_postfork_child(void) chunk_dss_boot(void)
{ {
if (have_dss) cassert(have_dss);
malloc_mutex_postfork_child(&dss_mtx);
dss_base = chunk_dss_sbrk(0);
dss_exhausted = (unsigned)(dss_base == (void *)-1);
dss_max = dss_base;
} }
/******************************************************************************/ /******************************************************************************/
...@@ -16,23 +16,22 @@ chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit) ...@@ -16,23 +16,22 @@ chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
do { do {
void *pages; void *pages;
size_t leadsize; size_t leadsize;
pages = pages_map(NULL, alloc_size); pages = pages_map(NULL, alloc_size, commit);
if (pages == NULL) if (pages == NULL)
return (NULL); return (NULL);
leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) - leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
(uintptr_t)pages; (uintptr_t)pages;
ret = pages_trim(pages, alloc_size, leadsize, size); ret = pages_trim(pages, alloc_size, leadsize, size, commit);
} while (ret == NULL); } while (ret == NULL);
assert(ret != NULL); assert(ret != NULL);
*zero = true; *zero = true;
if (!*commit)
*commit = pages_decommit(ret, size);
return (ret); return (ret);
} }
void * void *
chunk_alloc_mmap(size_t size, size_t alignment, bool *zero, bool *commit) chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
bool *commit)
{ {
void *ret; void *ret;
size_t offset; size_t offset;
...@@ -53,9 +52,10 @@ chunk_alloc_mmap(size_t size, size_t alignment, bool *zero, bool *commit) ...@@ -53,9 +52,10 @@ chunk_alloc_mmap(size_t size, size_t alignment, bool *zero, bool *commit)
assert(alignment != 0); assert(alignment != 0);
assert((alignment & chunksize_mask) == 0); assert((alignment & chunksize_mask) == 0);
ret = pages_map(NULL, size); ret = pages_map(new_addr, size, commit);
if (ret == NULL) if (ret == NULL || ret == new_addr)
return (NULL); return (ret);
assert(new_addr == NULL);
offset = ALIGNMENT_ADDR2OFFSET(ret, alignment); offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
if (offset != 0) { if (offset != 0) {
pages_unmap(ret, size); pages_unmap(ret, size);
...@@ -64,8 +64,6 @@ chunk_alloc_mmap(size_t size, size_t alignment, bool *zero, bool *commit) ...@@ -64,8 +64,6 @@ chunk_alloc_mmap(size_t size, size_t alignment, bool *zero, bool *commit)
assert(ret != NULL); assert(ret != NULL);
*zero = true; *zero = true;
if (!*commit)
*commit = pages_decommit(ret, size);
return (ret); return (ret);
} }
......
...@@ -99,7 +99,8 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, ...@@ -99,7 +99,8 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
* Cycle through the cells in the bucket, starting at a random position. * Cycle through the cells in the bucket, starting at a random position.
* The randomness avoids worst-case search overhead as buckets fill up. * The randomness avoids worst-case search overhead as buckets fill up.
*/ */
prng32(offset, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C); offset = (unsigned)prng_lg_range_u64(&ckh->prng_state,
LG_CKH_BUCKET_CELLS);
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) +
((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))]; ((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))];
...@@ -141,7 +142,8 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, ...@@ -141,7 +142,8 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
* were an item for which both hashes indicated the same * were an item for which both hashes indicated the same
* bucket. * bucket.
*/ */
prng32(i, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C); i = (unsigned)prng_lg_range_u64(&ckh->prng_state,
LG_CKH_BUCKET_CELLS);
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
assert(cell->key != NULL); assert(cell->key != NULL);
...@@ -247,8 +249,7 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh) ...@@ -247,8 +249,7 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
{ {
bool ret; bool ret;
ckhc_t *tab, *ttab; ckhc_t *tab, *ttab;
size_t lg_curcells; unsigned lg_prevbuckets, lg_curcells;
unsigned lg_prevbuckets;
#ifdef CKH_COUNT #ifdef CKH_COUNT
ckh->ngrows++; ckh->ngrows++;
...@@ -266,12 +267,12 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh) ...@@ -266,12 +267,12 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
lg_curcells++; lg_curcells++;
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
if (usize == 0) { if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
ret = true; ret = true;
goto label_return; goto label_return;
} }
tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE,
true, NULL); true, NULL, true, arena_ichoose(tsd, NULL));
if (tab == NULL) { if (tab == NULL) {
ret = true; ret = true;
goto label_return; goto label_return;
...@@ -283,12 +284,12 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh) ...@@ -283,12 +284,12 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
if (!ckh_rebuild(ckh, tab)) { if (!ckh_rebuild(ckh, tab)) {
idalloctm(tsd, tab, tcache_get(tsd, false), true); idalloctm(tsd_tsdn(tsd), tab, NULL, true, true);
break; break;
} }
/* Rebuilding failed, so back out partially rebuilt table. */ /* Rebuilding failed, so back out partially rebuilt table. */
idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true); idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true);
ckh->tab = tab; ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets; ckh->lg_curbuckets = lg_prevbuckets;
} }
...@@ -302,8 +303,8 @@ static void ...@@ -302,8 +303,8 @@ static void
ckh_shrink(tsd_t *tsd, ckh_t *ckh) ckh_shrink(tsd_t *tsd, ckh_t *ckh)
{ {
ckhc_t *tab, *ttab; ckhc_t *tab, *ttab;
size_t lg_curcells, usize; size_t usize;
unsigned lg_prevbuckets; unsigned lg_prevbuckets, lg_curcells;
/* /*
* It is possible (though unlikely, given well behaved hashes) that the * It is possible (though unlikely, given well behaved hashes) that the
...@@ -312,10 +313,10 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) ...@@ -312,10 +313,10 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
lg_prevbuckets = ckh->lg_curbuckets; lg_prevbuckets = ckh->lg_curbuckets;
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1; lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
if (usize == 0) if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
return; return;
tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true, tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL,
NULL); true, arena_ichoose(tsd, NULL));
if (tab == NULL) { if (tab == NULL) {
/* /*
* An OOM error isn't worth propagating, since it doesn't * An OOM error isn't worth propagating, since it doesn't
...@@ -330,7 +331,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) ...@@ -330,7 +331,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
if (!ckh_rebuild(ckh, tab)) { if (!ckh_rebuild(ckh, tab)) {
idalloctm(tsd, tab, tcache_get(tsd, false), true); idalloctm(tsd_tsdn(tsd), tab, NULL, true, true);
#ifdef CKH_COUNT #ifdef CKH_COUNT
ckh->nshrinks++; ckh->nshrinks++;
#endif #endif
...@@ -338,7 +339,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) ...@@ -338,7 +339,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
} }
/* Rebuilding failed, so back out partially rebuilt table. */ /* Rebuilding failed, so back out partially rebuilt table. */
idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true); idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true);
ckh->tab = tab; ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets; ckh->lg_curbuckets = lg_prevbuckets;
#ifdef CKH_COUNT #ifdef CKH_COUNT
...@@ -387,12 +388,12 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ...@@ -387,12 +388,12 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ckh->keycomp = keycomp; ckh->keycomp = keycomp;
usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE); usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
if (usize == 0) { if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
ret = true; ret = true;
goto label_return; goto label_return;
} }
ckh->tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true, ckh->tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true,
NULL); NULL, true, arena_ichoose(tsd, NULL));
if (ckh->tab == NULL) { if (ckh->tab == NULL) {
ret = true; ret = true;
goto label_return; goto label_return;
...@@ -421,9 +422,9 @@ ckh_delete(tsd_t *tsd, ckh_t *ckh) ...@@ -421,9 +422,9 @@ ckh_delete(tsd_t *tsd, ckh_t *ckh)
(unsigned long long)ckh->nrelocs); (unsigned long long)ckh->nrelocs);
#endif #endif
idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true); idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true);
if (config_debug) if (config_debug)
memset(ckh, 0x5a, sizeof(ckh_t)); memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t));
} }
size_t size_t
......
...@@ -24,7 +24,7 @@ ctl_named_node(const ctl_node_t *node) ...@@ -24,7 +24,7 @@ ctl_named_node(const ctl_node_t *node)
} }
JEMALLOC_INLINE_C const ctl_named_node_t * JEMALLOC_INLINE_C const ctl_named_node_t *
ctl_named_children(const ctl_named_node_t *node, int index) ctl_named_children(const ctl_named_node_t *node, size_t index)
{ {
const ctl_named_node_t *children = ctl_named_node(node->children); const ctl_named_node_t *children = ctl_named_node(node->children);
...@@ -42,25 +42,25 @@ ctl_indexed_node(const ctl_node_t *node) ...@@ -42,25 +42,25 @@ ctl_indexed_node(const ctl_node_t *node)
/* Function prototypes for non-inline static functions. */ /* Function prototypes for non-inline static functions. */
#define CTL_PROTO(n) \ #define CTL_PROTO(n) \
static int n##_ctl(const size_t *mib, size_t miblen, void *oldp, \ static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
size_t *oldlenp, void *newp, size_t newlen); void *oldp, size_t *oldlenp, void *newp, size_t newlen);
#define INDEX_PROTO(n) \ #define INDEX_PROTO(n) \
static const ctl_named_node_t *n##_index(const size_t *mib, \ static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \
size_t miblen, size_t i); const size_t *mib, size_t miblen, size_t i);
static bool ctl_arena_init(ctl_arena_stats_t *astats); static bool ctl_arena_init(ctl_arena_stats_t *astats);
static void ctl_arena_clear(ctl_arena_stats_t *astats); static void ctl_arena_clear(ctl_arena_stats_t *astats);
static void ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, static void ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats,
arena_t *arena); arena_t *arena);
static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats,
ctl_arena_stats_t *astats); ctl_arena_stats_t *astats);
static void ctl_arena_refresh(arena_t *arena, unsigned i); static void ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, unsigned i);
static bool ctl_grow(void); static bool ctl_grow(tsdn_t *tsdn);
static void ctl_refresh(void); static void ctl_refresh(tsdn_t *tsdn);
static bool ctl_init(void); static bool ctl_init(tsdn_t *tsdn);
static int ctl_lookup(const char *name, ctl_node_t const **nodesp, static int ctl_lookup(tsdn_t *tsdn, const char *name,
size_t *mibp, size_t *depthp); ctl_node_t const **nodesp, size_t *mibp, size_t *depthp);
CTL_PROTO(version) CTL_PROTO(version)
CTL_PROTO(epoch) CTL_PROTO(epoch)
...@@ -77,6 +77,7 @@ CTL_PROTO(config_cache_oblivious) ...@@ -77,6 +77,7 @@ CTL_PROTO(config_cache_oblivious)
CTL_PROTO(config_debug) CTL_PROTO(config_debug)
CTL_PROTO(config_fill) CTL_PROTO(config_fill)
CTL_PROTO(config_lazy_lock) CTL_PROTO(config_lazy_lock)
CTL_PROTO(config_malloc_conf)
CTL_PROTO(config_munmap) CTL_PROTO(config_munmap)
CTL_PROTO(config_prof) CTL_PROTO(config_prof)
CTL_PROTO(config_prof_libgcc) CTL_PROTO(config_prof_libgcc)
...@@ -91,7 +92,9 @@ CTL_PROTO(opt_abort) ...@@ -91,7 +92,9 @@ CTL_PROTO(opt_abort)
CTL_PROTO(opt_dss) CTL_PROTO(opt_dss)
CTL_PROTO(opt_lg_chunk) CTL_PROTO(opt_lg_chunk)
CTL_PROTO(opt_narenas) CTL_PROTO(opt_narenas)
CTL_PROTO(opt_purge)
CTL_PROTO(opt_lg_dirty_mult) CTL_PROTO(opt_lg_dirty_mult)
CTL_PROTO(opt_decay_time)
CTL_PROTO(opt_stats_print) CTL_PROTO(opt_stats_print)
CTL_PROTO(opt_junk) CTL_PROTO(opt_junk)
CTL_PROTO(opt_zero) CTL_PROTO(opt_zero)
...@@ -114,10 +117,13 @@ CTL_PROTO(opt_prof_accum) ...@@ -114,10 +117,13 @@ CTL_PROTO(opt_prof_accum)
CTL_PROTO(tcache_create) CTL_PROTO(tcache_create)
CTL_PROTO(tcache_flush) CTL_PROTO(tcache_flush)
CTL_PROTO(tcache_destroy) CTL_PROTO(tcache_destroy)
static void arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all);
CTL_PROTO(arena_i_purge) CTL_PROTO(arena_i_purge)
static void arena_purge(unsigned arena_ind); CTL_PROTO(arena_i_decay)
CTL_PROTO(arena_i_reset)
CTL_PROTO(arena_i_dss) CTL_PROTO(arena_i_dss)
CTL_PROTO(arena_i_lg_dirty_mult) CTL_PROTO(arena_i_lg_dirty_mult)
CTL_PROTO(arena_i_decay_time)
CTL_PROTO(arena_i_chunk_hooks) CTL_PROTO(arena_i_chunk_hooks)
INDEX_PROTO(arena_i) INDEX_PROTO(arena_i)
CTL_PROTO(arenas_bin_i_size) CTL_PROTO(arenas_bin_i_size)
...@@ -131,6 +137,7 @@ INDEX_PROTO(arenas_hchunk_i) ...@@ -131,6 +137,7 @@ INDEX_PROTO(arenas_hchunk_i)
CTL_PROTO(arenas_narenas) CTL_PROTO(arenas_narenas)
CTL_PROTO(arenas_initialized) CTL_PROTO(arenas_initialized)
CTL_PROTO(arenas_lg_dirty_mult) CTL_PROTO(arenas_lg_dirty_mult)
CTL_PROTO(arenas_decay_time)
CTL_PROTO(arenas_quantum) CTL_PROTO(arenas_quantum)
CTL_PROTO(arenas_page) CTL_PROTO(arenas_page)
CTL_PROTO(arenas_tcache_max) CTL_PROTO(arenas_tcache_max)
...@@ -181,9 +188,11 @@ INDEX_PROTO(stats_arenas_i_hchunks_j) ...@@ -181,9 +188,11 @@ INDEX_PROTO(stats_arenas_i_hchunks_j)
CTL_PROTO(stats_arenas_i_nthreads) CTL_PROTO(stats_arenas_i_nthreads)
CTL_PROTO(stats_arenas_i_dss) CTL_PROTO(stats_arenas_i_dss)
CTL_PROTO(stats_arenas_i_lg_dirty_mult) CTL_PROTO(stats_arenas_i_lg_dirty_mult)
CTL_PROTO(stats_arenas_i_decay_time)
CTL_PROTO(stats_arenas_i_pactive) CTL_PROTO(stats_arenas_i_pactive)
CTL_PROTO(stats_arenas_i_pdirty) CTL_PROTO(stats_arenas_i_pdirty)
CTL_PROTO(stats_arenas_i_mapped) CTL_PROTO(stats_arenas_i_mapped)
CTL_PROTO(stats_arenas_i_retained)
CTL_PROTO(stats_arenas_i_npurge) CTL_PROTO(stats_arenas_i_npurge)
CTL_PROTO(stats_arenas_i_nmadvise) CTL_PROTO(stats_arenas_i_nmadvise)
CTL_PROTO(stats_arenas_i_purged) CTL_PROTO(stats_arenas_i_purged)
...@@ -196,6 +205,7 @@ CTL_PROTO(stats_active) ...@@ -196,6 +205,7 @@ CTL_PROTO(stats_active)
CTL_PROTO(stats_metadata) CTL_PROTO(stats_metadata)
CTL_PROTO(stats_resident) CTL_PROTO(stats_resident)
CTL_PROTO(stats_mapped) CTL_PROTO(stats_mapped)
CTL_PROTO(stats_retained)
/******************************************************************************/ /******************************************************************************/
/* mallctl tree. */ /* mallctl tree. */
...@@ -241,6 +251,7 @@ static const ctl_named_node_t config_node[] = { ...@@ -241,6 +251,7 @@ static const ctl_named_node_t config_node[] = {
{NAME("debug"), CTL(config_debug)}, {NAME("debug"), CTL(config_debug)},
{NAME("fill"), CTL(config_fill)}, {NAME("fill"), CTL(config_fill)},
{NAME("lazy_lock"), CTL(config_lazy_lock)}, {NAME("lazy_lock"), CTL(config_lazy_lock)},
{NAME("malloc_conf"), CTL(config_malloc_conf)},
{NAME("munmap"), CTL(config_munmap)}, {NAME("munmap"), CTL(config_munmap)},
{NAME("prof"), CTL(config_prof)}, {NAME("prof"), CTL(config_prof)},
{NAME("prof_libgcc"), CTL(config_prof_libgcc)}, {NAME("prof_libgcc"), CTL(config_prof_libgcc)},
...@@ -258,7 +269,9 @@ static const ctl_named_node_t opt_node[] = { ...@@ -258,7 +269,9 @@ static const ctl_named_node_t opt_node[] = {
{NAME("dss"), CTL(opt_dss)}, {NAME("dss"), CTL(opt_dss)},
{NAME("lg_chunk"), CTL(opt_lg_chunk)}, {NAME("lg_chunk"), CTL(opt_lg_chunk)},
{NAME("narenas"), CTL(opt_narenas)}, {NAME("narenas"), CTL(opt_narenas)},
{NAME("purge"), CTL(opt_purge)},
{NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)}, {NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)},
{NAME("decay_time"), CTL(opt_decay_time)},
{NAME("stats_print"), CTL(opt_stats_print)}, {NAME("stats_print"), CTL(opt_stats_print)},
{NAME("junk"), CTL(opt_junk)}, {NAME("junk"), CTL(opt_junk)},
{NAME("zero"), CTL(opt_zero)}, {NAME("zero"), CTL(opt_zero)},
...@@ -288,8 +301,11 @@ static const ctl_named_node_t tcache_node[] = { ...@@ -288,8 +301,11 @@ static const ctl_named_node_t tcache_node[] = {
static const ctl_named_node_t arena_i_node[] = { static const ctl_named_node_t arena_i_node[] = {
{NAME("purge"), CTL(arena_i_purge)}, {NAME("purge"), CTL(arena_i_purge)},
{NAME("decay"), CTL(arena_i_decay)},
{NAME("reset"), CTL(arena_i_reset)},
{NAME("dss"), CTL(arena_i_dss)}, {NAME("dss"), CTL(arena_i_dss)},
{NAME("lg_dirty_mult"), CTL(arena_i_lg_dirty_mult)}, {NAME("lg_dirty_mult"), CTL(arena_i_lg_dirty_mult)},
{NAME("decay_time"), CTL(arena_i_decay_time)},
{NAME("chunk_hooks"), CTL(arena_i_chunk_hooks)} {NAME("chunk_hooks"), CTL(arena_i_chunk_hooks)}
}; };
static const ctl_named_node_t super_arena_i_node[] = { static const ctl_named_node_t super_arena_i_node[] = {
...@@ -339,6 +355,7 @@ static const ctl_named_node_t arenas_node[] = { ...@@ -339,6 +355,7 @@ static const ctl_named_node_t arenas_node[] = {
{NAME("narenas"), CTL(arenas_narenas)}, {NAME("narenas"), CTL(arenas_narenas)},
{NAME("initialized"), CTL(arenas_initialized)}, {NAME("initialized"), CTL(arenas_initialized)},
{NAME("lg_dirty_mult"), CTL(arenas_lg_dirty_mult)}, {NAME("lg_dirty_mult"), CTL(arenas_lg_dirty_mult)},
{NAME("decay_time"), CTL(arenas_decay_time)},
{NAME("quantum"), CTL(arenas_quantum)}, {NAME("quantum"), CTL(arenas_quantum)},
{NAME("page"), CTL(arenas_page)}, {NAME("page"), CTL(arenas_page)},
{NAME("tcache_max"), CTL(arenas_tcache_max)}, {NAME("tcache_max"), CTL(arenas_tcache_max)},
...@@ -439,9 +456,11 @@ static const ctl_named_node_t stats_arenas_i_node[] = { ...@@ -439,9 +456,11 @@ static const ctl_named_node_t stats_arenas_i_node[] = {
{NAME("nthreads"), CTL(stats_arenas_i_nthreads)}, {NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
{NAME("dss"), CTL(stats_arenas_i_dss)}, {NAME("dss"), CTL(stats_arenas_i_dss)},
{NAME("lg_dirty_mult"), CTL(stats_arenas_i_lg_dirty_mult)}, {NAME("lg_dirty_mult"), CTL(stats_arenas_i_lg_dirty_mult)},
{NAME("decay_time"), CTL(stats_arenas_i_decay_time)},
{NAME("pactive"), CTL(stats_arenas_i_pactive)}, {NAME("pactive"), CTL(stats_arenas_i_pactive)},
{NAME("pdirty"), CTL(stats_arenas_i_pdirty)}, {NAME("pdirty"), CTL(stats_arenas_i_pdirty)},
{NAME("mapped"), CTL(stats_arenas_i_mapped)}, {NAME("mapped"), CTL(stats_arenas_i_mapped)},
{NAME("retained"), CTL(stats_arenas_i_retained)},
{NAME("npurge"), CTL(stats_arenas_i_npurge)}, {NAME("npurge"), CTL(stats_arenas_i_npurge)},
{NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)}, {NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)},
{NAME("purged"), CTL(stats_arenas_i_purged)}, {NAME("purged"), CTL(stats_arenas_i_purged)},
...@@ -468,6 +487,7 @@ static const ctl_named_node_t stats_node[] = { ...@@ -468,6 +487,7 @@ static const ctl_named_node_t stats_node[] = {
{NAME("metadata"), CTL(stats_metadata)}, {NAME("metadata"), CTL(stats_metadata)},
{NAME("resident"), CTL(stats_resident)}, {NAME("resident"), CTL(stats_resident)},
{NAME("mapped"), CTL(stats_mapped)}, {NAME("mapped"), CTL(stats_mapped)},
{NAME("retained"), CTL(stats_retained)},
{NAME("arenas"), CHILD(indexed, stats_arenas)} {NAME("arenas"), CHILD(indexed, stats_arenas)}
}; };
...@@ -519,8 +539,10 @@ static void ...@@ -519,8 +539,10 @@ static void
ctl_arena_clear(ctl_arena_stats_t *astats) ctl_arena_clear(ctl_arena_stats_t *astats)
{ {
astats->nthreads = 0;
astats->dss = dss_prec_names[dss_prec_limit]; astats->dss = dss_prec_names[dss_prec_limit];
astats->lg_dirty_mult = -1; astats->lg_dirty_mult = -1;
astats->decay_time = -1;
astats->pactive = 0; astats->pactive = 0;
astats->pdirty = 0; astats->pdirty = 0;
if (config_stats) { if (config_stats) {
...@@ -538,20 +560,27 @@ ctl_arena_clear(ctl_arena_stats_t *astats) ...@@ -538,20 +560,27 @@ ctl_arena_clear(ctl_arena_stats_t *astats)
} }
static void static void
ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena) ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats, arena_t *arena)
{ {
unsigned i; unsigned i;
arena_stats_merge(arena, &cstats->dss, &cstats->lg_dirty_mult, if (config_stats) {
&cstats->pactive, &cstats->pdirty, &cstats->astats, cstats->bstats, arena_stats_merge(tsdn, arena, &cstats->nthreads, &cstats->dss,
cstats->lstats, cstats->hstats); &cstats->lg_dirty_mult, &cstats->decay_time,
&cstats->pactive, &cstats->pdirty, &cstats->astats,
for (i = 0; i < NBINS; i++) { cstats->bstats, cstats->lstats, cstats->hstats);
cstats->allocated_small += cstats->bstats[i].curregs *
index2size(i); for (i = 0; i < NBINS; i++) {
cstats->nmalloc_small += cstats->bstats[i].nmalloc; cstats->allocated_small += cstats->bstats[i].curregs *
cstats->ndalloc_small += cstats->bstats[i].ndalloc; index2size(i);
cstats->nrequests_small += cstats->bstats[i].nrequests; cstats->nmalloc_small += cstats->bstats[i].nmalloc;
cstats->ndalloc_small += cstats->bstats[i].ndalloc;
cstats->nrequests_small += cstats->bstats[i].nrequests;
}
} else {
arena_basic_stats_merge(tsdn, arena, &cstats->nthreads,
&cstats->dss, &cstats->lg_dirty_mult, &cstats->decay_time,
&cstats->pactive, &cstats->pdirty);
} }
} }
...@@ -560,89 +589,91 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats) ...@@ -560,89 +589,91 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
{ {
unsigned i; unsigned i;
sstats->nthreads += astats->nthreads;
sstats->pactive += astats->pactive; sstats->pactive += astats->pactive;
sstats->pdirty += astats->pdirty; sstats->pdirty += astats->pdirty;
sstats->astats.mapped += astats->astats.mapped; if (config_stats) {
sstats->astats.npurge += astats->astats.npurge; sstats->astats.mapped += astats->astats.mapped;
sstats->astats.nmadvise += astats->astats.nmadvise; sstats->astats.retained += astats->astats.retained;
sstats->astats.purged += astats->astats.purged; sstats->astats.npurge += astats->astats.npurge;
sstats->astats.nmadvise += astats->astats.nmadvise;
sstats->astats.metadata_mapped += astats->astats.metadata_mapped; sstats->astats.purged += astats->astats.purged;
sstats->astats.metadata_allocated += astats->astats.metadata_allocated;
sstats->astats.metadata_mapped +=
sstats->allocated_small += astats->allocated_small; astats->astats.metadata_mapped;
sstats->nmalloc_small += astats->nmalloc_small; sstats->astats.metadata_allocated +=
sstats->ndalloc_small += astats->ndalloc_small; astats->astats.metadata_allocated;
sstats->nrequests_small += astats->nrequests_small;
sstats->allocated_small += astats->allocated_small;
sstats->astats.allocated_large += astats->astats.allocated_large; sstats->nmalloc_small += astats->nmalloc_small;
sstats->astats.nmalloc_large += astats->astats.nmalloc_large; sstats->ndalloc_small += astats->ndalloc_small;
sstats->astats.ndalloc_large += astats->astats.ndalloc_large; sstats->nrequests_small += astats->nrequests_small;
sstats->astats.nrequests_large += astats->astats.nrequests_large;
sstats->astats.allocated_large +=
sstats->astats.allocated_huge += astats->astats.allocated_huge; astats->astats.allocated_large;
sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge; sstats->astats.nmalloc_large += astats->astats.nmalloc_large;
sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge; sstats->astats.ndalloc_large += astats->astats.ndalloc_large;
sstats->astats.nrequests_large +=
for (i = 0; i < NBINS; i++) { astats->astats.nrequests_large;
sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc; sstats->astats.allocated_huge += astats->astats.allocated_huge;
sstats->bstats[i].nrequests += astats->bstats[i].nrequests; sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge;
sstats->bstats[i].curregs += astats->bstats[i].curregs; sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge;
if (config_tcache) {
sstats->bstats[i].nfills += astats->bstats[i].nfills; for (i = 0; i < NBINS; i++) {
sstats->bstats[i].nflushes += sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
astats->bstats[i].nflushes; sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
sstats->bstats[i].nrequests +=
astats->bstats[i].nrequests;
sstats->bstats[i].curregs += astats->bstats[i].curregs;
if (config_tcache) {
sstats->bstats[i].nfills +=
astats->bstats[i].nfills;
sstats->bstats[i].nflushes +=
astats->bstats[i].nflushes;
}
sstats->bstats[i].nruns += astats->bstats[i].nruns;
sstats->bstats[i].reruns += astats->bstats[i].reruns;
sstats->bstats[i].curruns += astats->bstats[i].curruns;
} }
sstats->bstats[i].nruns += astats->bstats[i].nruns;
sstats->bstats[i].reruns += astats->bstats[i].reruns;
sstats->bstats[i].curruns += astats->bstats[i].curruns;
}
for (i = 0; i < nlclasses; i++) { for (i = 0; i < nlclasses; i++) {
sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc; sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc; sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
sstats->lstats[i].nrequests += astats->lstats[i].nrequests; sstats->lstats[i].nrequests +=
sstats->lstats[i].curruns += astats->lstats[i].curruns; astats->lstats[i].nrequests;
} sstats->lstats[i].curruns += astats->lstats[i].curruns;
}
for (i = 0; i < nhclasses; i++) { for (i = 0; i < nhclasses; i++) {
sstats->hstats[i].nmalloc += astats->hstats[i].nmalloc; sstats->hstats[i].nmalloc += astats->hstats[i].nmalloc;
sstats->hstats[i].ndalloc += astats->hstats[i].ndalloc; sstats->hstats[i].ndalloc += astats->hstats[i].ndalloc;
sstats->hstats[i].curhchunks += astats->hstats[i].curhchunks; sstats->hstats[i].curhchunks +=
astats->hstats[i].curhchunks;
}
} }
} }
static void static void
ctl_arena_refresh(arena_t *arena, unsigned i) ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, unsigned i)
{ {
ctl_arena_stats_t *astats = &ctl_stats.arenas[i]; ctl_arena_stats_t *astats = &ctl_stats.arenas[i];
ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas]; ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas];
ctl_arena_clear(astats); ctl_arena_clear(astats);
ctl_arena_stats_amerge(tsdn, astats, arena);
sstats->nthreads += astats->nthreads; /* Merge into sum stats as well. */
if (config_stats) { ctl_arena_stats_smerge(sstats, astats);
ctl_arena_stats_amerge(astats, arena);
/* Merge into sum stats as well. */
ctl_arena_stats_smerge(sstats, astats);
} else {
astats->pactive += arena->nactive;
astats->pdirty += arena->ndirty;
/* Merge into sum stats as well. */
sstats->pactive += arena->nactive;
sstats->pdirty += arena->ndirty;
}
} }
static bool static bool
ctl_grow(void) ctl_grow(tsdn_t *tsdn)
{ {
ctl_arena_stats_t *astats; ctl_arena_stats_t *astats;
/* Initialize new arena. */ /* Initialize new arena. */
if (arena_init(ctl_stats.narenas) == NULL) if (arena_init(tsdn, ctl_stats.narenas) == NULL)
return (true); return (true);
/* Allocate extended arena stats. */ /* Allocate extended arena stats. */
...@@ -677,47 +708,32 @@ ctl_grow(void) ...@@ -677,47 +708,32 @@ ctl_grow(void)
} }
static void static void
ctl_refresh(void) ctl_refresh(tsdn_t *tsdn)
{ {
tsd_t *tsd;
unsigned i; unsigned i;
bool refreshed;
VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas); VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
/* /*
* Clear sum stats, since they will be merged into by * Clear sum stats, since they will be merged into by
* ctl_arena_refresh(). * ctl_arena_refresh().
*/ */
ctl_stats.arenas[ctl_stats.narenas].nthreads = 0;
ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]); ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]);
tsd = tsd_fetch(); for (i = 0; i < ctl_stats.narenas; i++)
for (i = 0, refreshed = false; i < ctl_stats.narenas; i++) { tarenas[i] = arena_get(tsdn, i, false);
tarenas[i] = arena_get(tsd, i, false, false);
if (tarenas[i] == NULL && !refreshed) {
tarenas[i] = arena_get(tsd, i, false, true);
refreshed = true;
}
}
for (i = 0; i < ctl_stats.narenas; i++) {
if (tarenas[i] != NULL)
ctl_stats.arenas[i].nthreads = arena_nbound(i);
else
ctl_stats.arenas[i].nthreads = 0;
}
for (i = 0; i < ctl_stats.narenas; i++) { for (i = 0; i < ctl_stats.narenas; i++) {
bool initialized = (tarenas[i] != NULL); bool initialized = (tarenas[i] != NULL);
ctl_stats.arenas[i].initialized = initialized; ctl_stats.arenas[i].initialized = initialized;
if (initialized) if (initialized)
ctl_arena_refresh(tarenas[i], i); ctl_arena_refresh(tsdn, tarenas[i], i);
} }
if (config_stats) { if (config_stats) {
size_t base_allocated, base_resident, base_mapped; size_t base_allocated, base_resident, base_mapped;
base_stats_get(&base_allocated, &base_resident, &base_mapped); base_stats_get(tsdn, &base_allocated, &base_resident,
&base_mapped);
ctl_stats.allocated = ctl_stats.allocated =
ctl_stats.arenas[ctl_stats.narenas].allocated_small + ctl_stats.arenas[ctl_stats.narenas].allocated_small +
ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large + ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large +
...@@ -734,17 +750,19 @@ ctl_refresh(void) ...@@ -734,17 +750,19 @@ ctl_refresh(void)
ctl_stats.arenas[ctl_stats.narenas].pdirty) << LG_PAGE); ctl_stats.arenas[ctl_stats.narenas].pdirty) << LG_PAGE);
ctl_stats.mapped = base_mapped + ctl_stats.mapped = base_mapped +
ctl_stats.arenas[ctl_stats.narenas].astats.mapped; ctl_stats.arenas[ctl_stats.narenas].astats.mapped;
ctl_stats.retained =
ctl_stats.arenas[ctl_stats.narenas].astats.retained;
} }
ctl_epoch++; ctl_epoch++;
} }
static bool static bool
ctl_init(void) ctl_init(tsdn_t *tsdn)
{ {
bool ret; bool ret;
malloc_mutex_lock(&ctl_mtx); malloc_mutex_lock(tsdn, &ctl_mtx);
if (!ctl_initialized) { if (!ctl_initialized) {
/* /*
* Allocate space for one extra arena stats element, which * Allocate space for one extra arena stats element, which
...@@ -786,19 +804,19 @@ ctl_init(void) ...@@ -786,19 +804,19 @@ ctl_init(void)
ctl_stats.arenas[ctl_stats.narenas].initialized = true; ctl_stats.arenas[ctl_stats.narenas].initialized = true;
ctl_epoch = 0; ctl_epoch = 0;
ctl_refresh(); ctl_refresh(tsdn);
ctl_initialized = true; ctl_initialized = true;
} }
ret = false; ret = false;
label_return: label_return:
malloc_mutex_unlock(&ctl_mtx); malloc_mutex_unlock(tsdn, &ctl_mtx);
return (ret); return (ret);
} }
static int static int
ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp, ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
size_t *depthp) size_t *mibp, size_t *depthp)
{ {
int ret; int ret;
const char *elm, *tdot, *dot; const char *elm, *tdot, *dot;
...@@ -850,7 +868,7 @@ ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp, ...@@ -850,7 +868,7 @@ ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp,
} }
inode = ctl_indexed_node(node->children); inode = ctl_indexed_node(node->children);
node = inode->index(mibp, *depthp, (size_t)index); node = inode->index(tsdn, mibp, *depthp, (size_t)index);
if (node == NULL) { if (node == NULL) {
ret = ENOENT; ret = ENOENT;
goto label_return; goto label_return;
...@@ -894,8 +912,8 @@ label_return: ...@@ -894,8 +912,8 @@ label_return:
} }
int int
ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp, ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
size_t newlen) void *newp, size_t newlen)
{ {
int ret; int ret;
size_t depth; size_t depth;
...@@ -903,19 +921,19 @@ ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp, ...@@ -903,19 +921,19 @@ ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
size_t mib[CTL_MAX_DEPTH]; size_t mib[CTL_MAX_DEPTH];
const ctl_named_node_t *node; const ctl_named_node_t *node;
if (!ctl_initialized && ctl_init()) { if (!ctl_initialized && ctl_init(tsd_tsdn(tsd))) {
ret = EAGAIN; ret = EAGAIN;
goto label_return; goto label_return;
} }
depth = CTL_MAX_DEPTH; depth = CTL_MAX_DEPTH;
ret = ctl_lookup(name, nodes, mib, &depth); ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth);
if (ret != 0) if (ret != 0)
goto label_return; goto label_return;
node = ctl_named_node(nodes[depth-1]); node = ctl_named_node(nodes[depth-1]);
if (node != NULL && node->ctl) if (node != NULL && node->ctl)
ret = node->ctl(mib, depth, oldp, oldlenp, newp, newlen); ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen);
else { else {
/* The name refers to a partial path through the ctl tree. */ /* The name refers to a partial path through the ctl tree. */
ret = ENOENT; ret = ENOENT;
...@@ -926,29 +944,29 @@ label_return: ...@@ -926,29 +944,29 @@ label_return:
} }
int int
ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp) ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp, size_t *miblenp)
{ {
int ret; int ret;
if (!ctl_initialized && ctl_init()) { if (!ctl_initialized && ctl_init(tsdn)) {
ret = EAGAIN; ret = EAGAIN;
goto label_return; goto label_return;
} }
ret = ctl_lookup(name, NULL, mibp, miblenp); ret = ctl_lookup(tsdn, name, NULL, mibp, miblenp);
label_return: label_return:
return(ret); return(ret);
} }
int int
ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen)
{ {
int ret; int ret;
const ctl_named_node_t *node; const ctl_named_node_t *node;
size_t i; size_t i;
if (!ctl_initialized && ctl_init()) { if (!ctl_initialized && ctl_init(tsd_tsdn(tsd))) {
ret = EAGAIN; ret = EAGAIN;
goto label_return; goto label_return;
} }
...@@ -960,7 +978,7 @@ ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, ...@@ -960,7 +978,7 @@ ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
assert(node->nchildren > 0); assert(node->nchildren > 0);
if (ctl_named_node(node->children) != NULL) { if (ctl_named_node(node->children) != NULL) {
/* Children are named. */ /* Children are named. */
if (node->nchildren <= mib[i]) { if (node->nchildren <= (unsigned)mib[i]) {
ret = ENOENT; ret = ENOENT;
goto label_return; goto label_return;
} }
...@@ -970,7 +988,7 @@ ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, ...@@ -970,7 +988,7 @@ ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
/* Indexed element. */ /* Indexed element. */
inode = ctl_indexed_node(node->children); inode = ctl_indexed_node(node->children);
node = inode->index(mib, miblen, mib[i]); node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]);
if (node == NULL) { if (node == NULL) {
ret = ENOENT; ret = ENOENT;
goto label_return; goto label_return;
...@@ -980,7 +998,7 @@ ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, ...@@ -980,7 +998,7 @@ ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
/* Call the ctl function. */ /* Call the ctl function. */
if (node && node->ctl) if (node && node->ctl)
ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen); ret = node->ctl(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
else { else {
/* Partial MIB. */ /* Partial MIB. */
ret = ENOENT; ret = ENOENT;
...@@ -994,7 +1012,7 @@ bool ...@@ -994,7 +1012,7 @@ bool
ctl_boot(void) ctl_boot(void)
{ {
if (malloc_mutex_init(&ctl_mtx)) if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL))
return (true); return (true);
ctl_initialized = false; ctl_initialized = false;
...@@ -1003,24 +1021,24 @@ ctl_boot(void) ...@@ -1003,24 +1021,24 @@ ctl_boot(void)
} }
void void
ctl_prefork(void) ctl_prefork(tsdn_t *tsdn)
{ {
malloc_mutex_prefork(&ctl_mtx); malloc_mutex_prefork(tsdn, &ctl_mtx);
} }
void void
ctl_postfork_parent(void) ctl_postfork_parent(tsdn_t *tsdn)
{ {
malloc_mutex_postfork_parent(&ctl_mtx); malloc_mutex_postfork_parent(tsdn, &ctl_mtx);
} }
void void
ctl_postfork_child(void) ctl_postfork_child(tsdn_t *tsdn)
{ {
malloc_mutex_postfork_child(&ctl_mtx); malloc_mutex_postfork_child(tsdn, &ctl_mtx);
} }
/******************************************************************************/ /******************************************************************************/
...@@ -1077,8 +1095,8 @@ ctl_postfork_child(void) ...@@ -1077,8 +1095,8 @@ ctl_postfork_child(void)
*/ */
#define CTL_RO_CLGEN(c, l, n, v, t) \ #define CTL_RO_CLGEN(c, l, n, v, t) \
static int \ static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
void *newp, size_t newlen) \ size_t *oldlenp, void *newp, size_t newlen) \
{ \ { \
int ret; \ int ret; \
t oldval; \ t oldval; \
...@@ -1086,7 +1104,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ ...@@ -1086,7 +1104,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
if (!(c)) \ if (!(c)) \
return (ENOENT); \ return (ENOENT); \
if (l) \ if (l) \
malloc_mutex_lock(&ctl_mtx); \ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
READONLY(); \ READONLY(); \
oldval = (v); \ oldval = (v); \
READ(oldval, t); \ READ(oldval, t); \
...@@ -1094,47 +1112,47 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ ...@@ -1094,47 +1112,47 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
ret = 0; \ ret = 0; \
label_return: \ label_return: \
if (l) \ if (l) \
malloc_mutex_unlock(&ctl_mtx); \ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
return (ret); \ return (ret); \
} }
#define CTL_RO_CGEN(c, n, v, t) \ #define CTL_RO_CGEN(c, n, v, t) \
static int \ static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
void *newp, size_t newlen) \ size_t *oldlenp, void *newp, size_t newlen) \
{ \ { \
int ret; \ int ret; \
t oldval; \ t oldval; \
\ \
if (!(c)) \ if (!(c)) \
return (ENOENT); \ return (ENOENT); \
malloc_mutex_lock(&ctl_mtx); \ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
READONLY(); \ READONLY(); \
oldval = (v); \ oldval = (v); \
READ(oldval, t); \ READ(oldval, t); \
\ \
ret = 0; \ ret = 0; \
label_return: \ label_return: \
malloc_mutex_unlock(&ctl_mtx); \ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
return (ret); \ return (ret); \
} }
#define CTL_RO_GEN(n, v, t) \ #define CTL_RO_GEN(n, v, t) \
static int \ static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
void *newp, size_t newlen) \ size_t *oldlenp, void *newp, size_t newlen) \
{ \ { \
int ret; \ int ret; \
t oldval; \ t oldval; \
\ \
malloc_mutex_lock(&ctl_mtx); \ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
READONLY(); \ READONLY(); \
oldval = (v); \ oldval = (v); \
READ(oldval, t); \ READ(oldval, t); \
\ \
ret = 0; \ ret = 0; \
label_return: \ label_return: \
malloc_mutex_unlock(&ctl_mtx); \ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
return (ret); \ return (ret); \
} }
...@@ -1144,8 +1162,8 @@ label_return: \ ...@@ -1144,8 +1162,8 @@ label_return: \
*/ */
#define CTL_RO_NL_CGEN(c, n, v, t) \ #define CTL_RO_NL_CGEN(c, n, v, t) \
static int \ static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
void *newp, size_t newlen) \ size_t *oldlenp, void *newp, size_t newlen) \
{ \ { \
int ret; \ int ret; \
t oldval; \ t oldval; \
...@@ -1163,8 +1181,8 @@ label_return: \ ...@@ -1163,8 +1181,8 @@ label_return: \
#define CTL_RO_NL_GEN(n, v, t) \ #define CTL_RO_NL_GEN(n, v, t) \
static int \ static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
void *newp, size_t newlen) \ size_t *oldlenp, void *newp, size_t newlen) \
{ \ { \
int ret; \ int ret; \
t oldval; \ t oldval; \
...@@ -1180,17 +1198,15 @@ label_return: \ ...@@ -1180,17 +1198,15 @@ label_return: \
#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \ #define CTL_TSD_RO_NL_CGEN(c, n, m, t) \
static int \ static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
void *newp, size_t newlen) \ size_t *oldlenp, void *newp, size_t newlen) \
{ \ { \
int ret; \ int ret; \
t oldval; \ t oldval; \
tsd_t *tsd; \
\ \
if (!(c)) \ if (!(c)) \
return (ENOENT); \ return (ENOENT); \
READONLY(); \ READONLY(); \
tsd = tsd_fetch(); \
oldval = (m(tsd)); \ oldval = (m(tsd)); \
READ(oldval, t); \ READ(oldval, t); \
\ \
...@@ -1199,17 +1215,17 @@ label_return: \ ...@@ -1199,17 +1215,17 @@ label_return: \
return (ret); \ return (ret); \
} }
#define CTL_RO_BOOL_CONFIG_GEN(n) \ #define CTL_RO_CONFIG_GEN(n, t) \
static int \ static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
void *newp, size_t newlen) \ size_t *oldlenp, void *newp, size_t newlen) \
{ \ { \
int ret; \ int ret; \
bool oldval; \ t oldval; \
\ \
READONLY(); \ READONLY(); \
oldval = n; \ oldval = n; \
READ(oldval, bool); \ READ(oldval, t); \
\ \
ret = 0; \ ret = 0; \
label_return: \ label_return: \
...@@ -1221,48 +1237,51 @@ label_return: \ ...@@ -1221,48 +1237,51 @@ label_return: \
CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *) CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
static int static int
epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen)
{ {
int ret; int ret;
UNUSED uint64_t newval; UNUSED uint64_t newval;
malloc_mutex_lock(&ctl_mtx); malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
WRITE(newval, uint64_t); WRITE(newval, uint64_t);
if (newp != NULL) if (newp != NULL)
ctl_refresh(); ctl_refresh(tsd_tsdn(tsd));
READ(ctl_epoch, uint64_t); READ(ctl_epoch, uint64_t);
ret = 0; ret = 0;
label_return: label_return:
malloc_mutex_unlock(&ctl_mtx); malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret); return (ret);
} }
/******************************************************************************/ /******************************************************************************/
CTL_RO_BOOL_CONFIG_GEN(config_cache_oblivious) CTL_RO_CONFIG_GEN(config_cache_oblivious, bool)
CTL_RO_BOOL_CONFIG_GEN(config_debug) CTL_RO_CONFIG_GEN(config_debug, bool)
CTL_RO_BOOL_CONFIG_GEN(config_fill) CTL_RO_CONFIG_GEN(config_fill, bool)
CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock) CTL_RO_CONFIG_GEN(config_lazy_lock, bool)
CTL_RO_BOOL_CONFIG_GEN(config_munmap) CTL_RO_CONFIG_GEN(config_malloc_conf, const char *)
CTL_RO_BOOL_CONFIG_GEN(config_prof) CTL_RO_CONFIG_GEN(config_munmap, bool)
CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc) CTL_RO_CONFIG_GEN(config_prof, bool)
CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind) CTL_RO_CONFIG_GEN(config_prof_libgcc, bool)
CTL_RO_BOOL_CONFIG_GEN(config_stats) CTL_RO_CONFIG_GEN(config_prof_libunwind, bool)
CTL_RO_BOOL_CONFIG_GEN(config_tcache) CTL_RO_CONFIG_GEN(config_stats, bool)
CTL_RO_BOOL_CONFIG_GEN(config_tls) CTL_RO_CONFIG_GEN(config_tcache, bool)
CTL_RO_BOOL_CONFIG_GEN(config_utrace) CTL_RO_CONFIG_GEN(config_tls, bool)
CTL_RO_BOOL_CONFIG_GEN(config_valgrind) CTL_RO_CONFIG_GEN(config_utrace, bool)
CTL_RO_BOOL_CONFIG_GEN(config_xmalloc) CTL_RO_CONFIG_GEN(config_valgrind, bool)
CTL_RO_CONFIG_GEN(config_xmalloc, bool)
/******************************************************************************/ /******************************************************************************/
CTL_RO_NL_GEN(opt_abort, opt_abort, bool) CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
CTL_RO_NL_GEN(opt_dss, opt_dss, const char *) CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t) CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t) CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned)
CTL_RO_NL_GEN(opt_purge, purge_mode_names[opt_purge], const char *)
CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t) CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
CTL_RO_NL_GEN(opt_decay_time, opt_decay_time, ssize_t)
CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool) CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *) CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t) CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t)
...@@ -1287,20 +1306,18 @@ CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool) ...@@ -1287,20 +1306,18 @@ CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
/******************************************************************************/ /******************************************************************************/
static int static int
thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen)
{ {
int ret; int ret;
tsd_t *tsd;
arena_t *oldarena; arena_t *oldarena;
unsigned newind, oldind; unsigned newind, oldind;
tsd = tsd_fetch();
oldarena = arena_choose(tsd, NULL); oldarena = arena_choose(tsd, NULL);
if (oldarena == NULL) if (oldarena == NULL)
return (EAGAIN); return (EAGAIN);
malloc_mutex_lock(&ctl_mtx); malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
newind = oldind = oldarena->ind; newind = oldind = oldarena->ind;
WRITE(newind, unsigned); WRITE(newind, unsigned);
READ(oldind, unsigned); READ(oldind, unsigned);
...@@ -1314,7 +1331,7 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, ...@@ -1314,7 +1331,7 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
} }
/* Initialize arena if necessary. */ /* Initialize arena if necessary. */
newarena = arena_get(tsd, newind, true, true); newarena = arena_get(tsd_tsdn(tsd), newind, true);
if (newarena == NULL) { if (newarena == NULL) {
ret = EAGAIN; ret = EAGAIN;
goto label_return; goto label_return;
...@@ -1324,15 +1341,15 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, ...@@ -1324,15 +1341,15 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
if (config_tcache) { if (config_tcache) {
tcache_t *tcache = tsd_tcache_get(tsd); tcache_t *tcache = tsd_tcache_get(tsd);
if (tcache != NULL) { if (tcache != NULL) {
tcache_arena_reassociate(tcache, oldarena, tcache_arena_reassociate(tsd_tsdn(tsd), tcache,
newarena); oldarena, newarena);
} }
} }
} }
ret = 0; ret = 0;
label_return: label_return:
malloc_mutex_unlock(&ctl_mtx); malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret); return (ret);
} }
...@@ -1346,8 +1363,8 @@ CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp, ...@@ -1346,8 +1363,8 @@ CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp,
tsd_thread_deallocatedp_get, uint64_t *) tsd_thread_deallocatedp_get, uint64_t *)
static int static int
thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp, thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
size_t *oldlenp, void *newp, size_t newlen) void *oldp, size_t *oldlenp, void *newp, size_t newlen)
{ {
int ret; int ret;
bool oldval; bool oldval;
...@@ -1371,8 +1388,8 @@ label_return: ...@@ -1371,8 +1388,8 @@ label_return:
} }
static int static int
thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
size_t *oldlenp, void *newp, size_t newlen) void *oldp, size_t *oldlenp, void *newp, size_t newlen)
{ {
int ret; int ret;
...@@ -1390,7 +1407,7 @@ label_return: ...@@ -1390,7 +1407,7 @@ label_return:
} }
static int static int
thread_prof_name_ctl(const size_t *mib, size_t miblen, void *oldp, thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen)
{ {
int ret; int ret;
...@@ -1401,20 +1418,16 @@ thread_prof_name_ctl(const size_t *mib, size_t miblen, void *oldp, ...@@ -1401,20 +1418,16 @@ thread_prof_name_ctl(const size_t *mib, size_t miblen, void *oldp,
READ_XOR_WRITE(); READ_XOR_WRITE();
if (newp != NULL) { if (newp != NULL) {
tsd_t *tsd;
if (newlen != sizeof(const char *)) { if (newlen != sizeof(const char *)) {
ret = EINVAL; ret = EINVAL;
goto label_return; goto label_return;
} }
tsd = tsd_fetch();
if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) != if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) !=
0) 0)
goto label_return; goto label_return;
} else { } else {
const char *oldname = prof_thread_name_get(); const char *oldname = prof_thread_name_get(tsd);
READ(oldname, const char *); READ(oldname, const char *);
} }
...@@ -1424,7 +1437,7 @@ label_return: ...@@ -1424,7 +1437,7 @@ label_return:
} }
static int static int
thread_prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen)
{ {
int ret; int ret;
...@@ -1433,13 +1446,13 @@ thread_prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, ...@@ -1433,13 +1446,13 @@ thread_prof_active_ctl(const size_t *mib, size_t miblen, void *oldp,
if (!config_prof) if (!config_prof)
return (ENOENT); return (ENOENT);
oldval = prof_thread_active_get(); oldval = prof_thread_active_get(tsd);
if (newp != NULL) { if (newp != NULL) {
if (newlen != sizeof(bool)) { if (newlen != sizeof(bool)) {
ret = EINVAL; ret = EINVAL;
goto label_return; goto label_return;
} }
if (prof_thread_active_set(*(bool *)newp)) { if (prof_thread_active_set(tsd, *(bool *)newp)) {
ret = EAGAIN; ret = EAGAIN;
goto label_return; goto label_return;
} }
...@@ -1454,19 +1467,16 @@ label_return: ...@@ -1454,19 +1467,16 @@ label_return:
/******************************************************************************/ /******************************************************************************/
static int static int
tcache_create_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen)
{ {
int ret; int ret;
tsd_t *tsd;
unsigned tcache_ind; unsigned tcache_ind;
if (!config_tcache) if (!config_tcache)
return (ENOENT); return (ENOENT);
tsd = tsd_fetch(); malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
malloc_mutex_lock(&ctl_mtx);
READONLY(); READONLY();
if (tcaches_create(tsd, &tcache_ind)) { if (tcaches_create(tsd, &tcache_ind)) {
ret = EFAULT; ret = EFAULT;
...@@ -1476,23 +1486,20 @@ tcache_create_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, ...@@ -1476,23 +1486,20 @@ tcache_create_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
ret = 0; ret = 0;
label_return: label_return:
malloc_mutex_unlock(&ctl_mtx); malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret); return (ret);
} }
static int static int
tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen)
{ {
int ret; int ret;
tsd_t *tsd;
unsigned tcache_ind; unsigned tcache_ind;
if (!config_tcache) if (!config_tcache)
return (ENOENT); return (ENOENT);
tsd = tsd_fetch();
WRITEONLY(); WRITEONLY();
tcache_ind = UINT_MAX; tcache_ind = UINT_MAX;
WRITE(tcache_ind, unsigned); WRITE(tcache_ind, unsigned);
...@@ -1508,18 +1515,15 @@ label_return: ...@@ -1508,18 +1515,15 @@ label_return:
} }
static int static int
tcache_destroy_ctl(const size_t *mib, size_t miblen, void *oldp, tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen)
{ {
int ret; int ret;
tsd_t *tsd;
unsigned tcache_ind; unsigned tcache_ind;
if (!config_tcache) if (!config_tcache)
return (ENOENT); return (ENOENT);
tsd = tsd_fetch();
WRITEONLY(); WRITEONLY();
tcache_ind = UINT_MAX; tcache_ind = UINT_MAX;
WRITE(tcache_ind, unsigned); WRITE(tcache_ind, unsigned);
...@@ -1536,48 +1540,56 @@ label_return: ...@@ -1536,48 +1540,56 @@ label_return:
/******************************************************************************/ /******************************************************************************/
/* ctl_mutex must be held during execution of this function. */
static void static void
arena_purge(unsigned arena_ind) arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all)
{ {
tsd_t *tsd;
unsigned i;
bool refreshed;
VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
tsd = tsd_fetch(); malloc_mutex_lock(tsdn, &ctl_mtx);
for (i = 0, refreshed = false; i < ctl_stats.narenas; i++) { {
tarenas[i] = arena_get(tsd, i, false, false); unsigned narenas = ctl_stats.narenas;
if (tarenas[i] == NULL && !refreshed) {
tarenas[i] = arena_get(tsd, i, false, true);
refreshed = true;
}
}
if (arena_ind == ctl_stats.narenas) { if (arena_ind == narenas) {
unsigned i; unsigned i;
for (i = 0; i < ctl_stats.narenas; i++) { VARIABLE_ARRAY(arena_t *, tarenas, narenas);
if (tarenas[i] != NULL)
arena_purge_all(tarenas[i]); for (i = 0; i < narenas; i++)
tarenas[i] = arena_get(tsdn, i, false);
/*
* No further need to hold ctl_mtx, since narenas and
* tarenas contain everything needed below.
*/
malloc_mutex_unlock(tsdn, &ctl_mtx);
for (i = 0; i < narenas; i++) {
if (tarenas[i] != NULL)
arena_purge(tsdn, tarenas[i], all);
}
} else {
arena_t *tarena;
assert(arena_ind < narenas);
tarena = arena_get(tsdn, arena_ind, false);
/* No further need to hold ctl_mtx. */
malloc_mutex_unlock(tsdn, &ctl_mtx);
if (tarena != NULL)
arena_purge(tsdn, tarena, all);
} }
} else {
assert(arena_ind < ctl_stats.narenas);
if (tarenas[arena_ind] != NULL)
arena_purge_all(tarenas[arena_ind]);
} }
} }
static int static int
arena_i_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen)
{ {
int ret; int ret;
READONLY(); READONLY();
WRITEONLY(); WRITEONLY();
malloc_mutex_lock(&ctl_mtx); arena_i_purge(tsd_tsdn(tsd), (unsigned)mib[1], true);
arena_purge(mib[1]);
malloc_mutex_unlock(&ctl_mtx);
ret = 0; ret = 0;
label_return: label_return:
...@@ -1585,16 +1597,65 @@ label_return: ...@@ -1585,16 +1597,65 @@ label_return:
} }
static int static int
arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
READONLY();
WRITEONLY();
arena_i_purge(tsd_tsdn(tsd), (unsigned)mib[1], false);
ret = 0;
label_return:
return (ret);
}
static int
arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
unsigned arena_ind;
arena_t *arena;
READONLY();
WRITEONLY();
if ((config_valgrind && unlikely(in_valgrind)) || (config_fill &&
unlikely(opt_quarantine))) {
ret = EFAULT;
goto label_return;
}
arena_ind = (unsigned)mib[1];
if (config_debug) {
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
assert(arena_ind < ctl_stats.narenas);
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
}
assert(arena_ind >= opt_narenas);
arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
arena_reset(tsd, arena);
ret = 0;
label_return:
return (ret);
}
static int
arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{ {
int ret; int ret;
const char *dss = NULL; const char *dss = NULL;
unsigned arena_ind = mib[1]; unsigned arena_ind = (unsigned)mib[1];
dss_prec_t dss_prec_old = dss_prec_limit; dss_prec_t dss_prec_old = dss_prec_limit;
dss_prec_t dss_prec = dss_prec_limit; dss_prec_t dss_prec = dss_prec_limit;
malloc_mutex_lock(&ctl_mtx); malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
WRITE(dss, const char *); WRITE(dss, const char *);
if (dss != NULL) { if (dss != NULL) {
int i; int i;
...@@ -1615,13 +1676,13 @@ arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, ...@@ -1615,13 +1676,13 @@ arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
} }
if (arena_ind < ctl_stats.narenas) { if (arena_ind < ctl_stats.narenas) {
arena_t *arena = arena_get(tsd_fetch(), arena_ind, false, true); arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
if (arena == NULL || (dss_prec != dss_prec_limit && if (arena == NULL || (dss_prec != dss_prec_limit &&
arena_dss_prec_set(arena, dss_prec))) { arena_dss_prec_set(tsd_tsdn(tsd), arena, dss_prec))) {
ret = EFAULT; ret = EFAULT;
goto label_return; goto label_return;
} }
dss_prec_old = arena_dss_prec_get(arena); dss_prec_old = arena_dss_prec_get(tsd_tsdn(tsd), arena);
} else { } else {
if (dss_prec != dss_prec_limit && if (dss_prec != dss_prec_limit &&
chunk_dss_prec_set(dss_prec)) { chunk_dss_prec_set(dss_prec)) {
...@@ -1636,26 +1697,26 @@ arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, ...@@ -1636,26 +1697,26 @@ arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
ret = 0; ret = 0;
label_return: label_return:
malloc_mutex_unlock(&ctl_mtx); malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret); return (ret);
} }
static int static int
arena_i_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp, arena_i_lg_dirty_mult_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
size_t *oldlenp, void *newp, size_t newlen) void *oldp, size_t *oldlenp, void *newp, size_t newlen)
{ {
int ret; int ret;
unsigned arena_ind = mib[1]; unsigned arena_ind = (unsigned)mib[1];
arena_t *arena; arena_t *arena;
arena = arena_get(tsd_fetch(), arena_ind, false, true); arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
if (arena == NULL) { if (arena == NULL) {
ret = EFAULT; ret = EFAULT;
goto label_return; goto label_return;
} }
if (oldp != NULL && oldlenp != NULL) { if (oldp != NULL && oldlenp != NULL) {
size_t oldval = arena_lg_dirty_mult_get(arena); size_t oldval = arena_lg_dirty_mult_get(tsd_tsdn(tsd), arena);
READ(oldval, ssize_t); READ(oldval, ssize_t);
} }
if (newp != NULL) { if (newp != NULL) {
...@@ -1663,7 +1724,8 @@ arena_i_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp, ...@@ -1663,7 +1724,8 @@ arena_i_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp,
ret = EINVAL; ret = EINVAL;
goto label_return; goto label_return;
} }
if (arena_lg_dirty_mult_set(arena, *(ssize_t *)newp)) { if (arena_lg_dirty_mult_set(tsd_tsdn(tsd), arena,
*(ssize_t *)newp)) {
ret = EFAULT; ret = EFAULT;
goto label_return; goto label_return;
} }
...@@ -1675,24 +1737,60 @@ label_return: ...@@ -1675,24 +1737,60 @@ label_return:
} }
static int static int
arena_i_chunk_hooks_ctl(const size_t *mib, size_t miblen, void *oldp, arena_i_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen)
{ {
int ret; int ret;
unsigned arena_ind = mib[1]; unsigned arena_ind = (unsigned)mib[1];
arena_t *arena; arena_t *arena;
malloc_mutex_lock(&ctl_mtx); arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
if (arena == NULL) {
ret = EFAULT;
goto label_return;
}
if (oldp != NULL && oldlenp != NULL) {
size_t oldval = arena_decay_time_get(tsd_tsdn(tsd), arena);
READ(oldval, ssize_t);
}
if (newp != NULL) {
if (newlen != sizeof(ssize_t)) {
ret = EINVAL;
goto label_return;
}
if (arena_decay_time_set(tsd_tsdn(tsd), arena,
*(ssize_t *)newp)) {
ret = EFAULT;
goto label_return;
}
}
ret = 0;
label_return:
return (ret);
}
static int
arena_i_chunk_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
unsigned arena_ind = (unsigned)mib[1];
arena_t *arena;
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
if (arena_ind < narenas_total_get() && (arena = if (arena_ind < narenas_total_get() && (arena =
arena_get(tsd_fetch(), arena_ind, false, true)) != NULL) { arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) {
if (newp != NULL) { if (newp != NULL) {
chunk_hooks_t old_chunk_hooks, new_chunk_hooks; chunk_hooks_t old_chunk_hooks, new_chunk_hooks;
WRITE(new_chunk_hooks, chunk_hooks_t); WRITE(new_chunk_hooks, chunk_hooks_t);
old_chunk_hooks = chunk_hooks_set(arena, old_chunk_hooks = chunk_hooks_set(tsd_tsdn(tsd), arena,
&new_chunk_hooks); &new_chunk_hooks);
READ(old_chunk_hooks, chunk_hooks_t); READ(old_chunk_hooks, chunk_hooks_t);
} else { } else {
chunk_hooks_t old_chunk_hooks = chunk_hooks_get(arena); chunk_hooks_t old_chunk_hooks =
chunk_hooks_get(tsd_tsdn(tsd), arena);
READ(old_chunk_hooks, chunk_hooks_t); READ(old_chunk_hooks, chunk_hooks_t);
} }
} else { } else {
...@@ -1701,16 +1799,16 @@ arena_i_chunk_hooks_ctl(const size_t *mib, size_t miblen, void *oldp, ...@@ -1701,16 +1799,16 @@ arena_i_chunk_hooks_ctl(const size_t *mib, size_t miblen, void *oldp,
} }
ret = 0; ret = 0;
label_return: label_return:
malloc_mutex_unlock(&ctl_mtx); malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret); return (ret);
} }
static const ctl_named_node_t * static const ctl_named_node_t *
arena_i_index(const size_t *mib, size_t miblen, size_t i) arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
{ {
const ctl_named_node_t * ret; const ctl_named_node_t *ret;
malloc_mutex_lock(&ctl_mtx); malloc_mutex_lock(tsdn, &ctl_mtx);
if (i > ctl_stats.narenas) { if (i > ctl_stats.narenas) {
ret = NULL; ret = NULL;
goto label_return; goto label_return;
...@@ -1718,20 +1816,20 @@ arena_i_index(const size_t *mib, size_t miblen, size_t i) ...@@ -1718,20 +1816,20 @@ arena_i_index(const size_t *mib, size_t miblen, size_t i)
ret = super_arena_i_node; ret = super_arena_i_node;
label_return: label_return:
malloc_mutex_unlock(&ctl_mtx); malloc_mutex_unlock(tsdn, &ctl_mtx);
return (ret); return (ret);
} }
/******************************************************************************/ /******************************************************************************/
static int static int
arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp, arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen)
{ {
int ret; int ret;
unsigned narenas; unsigned narenas;
malloc_mutex_lock(&ctl_mtx); malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
READONLY(); READONLY();
if (*oldlenp != sizeof(unsigned)) { if (*oldlenp != sizeof(unsigned)) {
ret = EINVAL; ret = EINVAL;
...@@ -1742,23 +1840,23 @@ arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp, ...@@ -1742,23 +1840,23 @@ arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp,
ret = 0; ret = 0;
label_return: label_return:
malloc_mutex_unlock(&ctl_mtx); malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret); return (ret);
} }
static int static int
arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp, arenas_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen)
{ {
int ret; int ret;
unsigned nread, i; unsigned nread, i;
malloc_mutex_lock(&ctl_mtx); malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
READONLY(); READONLY();
if (*oldlenp != ctl_stats.narenas * sizeof(bool)) { if (*oldlenp != ctl_stats.narenas * sizeof(bool)) {
ret = EINVAL; ret = EINVAL;
nread = (*oldlenp < ctl_stats.narenas * sizeof(bool)) nread = (*oldlenp < ctl_stats.narenas * sizeof(bool))
? (*oldlenp / sizeof(bool)) : ctl_stats.narenas; ? (unsigned)(*oldlenp / sizeof(bool)) : ctl_stats.narenas;
} else { } else {
ret = 0; ret = 0;
nread = ctl_stats.narenas; nread = ctl_stats.narenas;
...@@ -1768,13 +1866,13 @@ arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp, ...@@ -1768,13 +1866,13 @@ arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp,
((bool *)oldp)[i] = ctl_stats.arenas[i].initialized; ((bool *)oldp)[i] = ctl_stats.arenas[i].initialized;
label_return: label_return:
malloc_mutex_unlock(&ctl_mtx); malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret); return (ret);
} }
static int static int
arenas_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp, arenas_lg_dirty_mult_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
size_t *oldlenp, void *newp, size_t newlen) void *oldp, size_t *oldlenp, void *newp, size_t newlen)
{ {
int ret; int ret;
...@@ -1798,6 +1896,32 @@ label_return: ...@@ -1798,6 +1896,32 @@ label_return:
return (ret); return (ret);
} }
static int
arenas_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
if (oldp != NULL && oldlenp != NULL) {
size_t oldval = arena_decay_time_default_get();
READ(oldval, ssize_t);
}
if (newp != NULL) {
if (newlen != sizeof(ssize_t)) {
ret = EINVAL;
goto label_return;
}
if (arena_decay_time_default_set(*(ssize_t *)newp)) {
ret = EFAULT;
goto label_return;
}
}
ret = 0;
label_return:
return (ret);
}
CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t) CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
CTL_RO_NL_GEN(arenas_page, PAGE, size_t) CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t) CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t)
...@@ -1807,7 +1931,7 @@ CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t) ...@@ -1807,7 +1931,7 @@ CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t) CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t) CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t)
static const ctl_named_node_t * static const ctl_named_node_t *
arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i) arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
{ {
if (i > NBINS) if (i > NBINS)
...@@ -1816,9 +1940,9 @@ arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i) ...@@ -1816,9 +1940,9 @@ arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
} }
CTL_RO_NL_GEN(arenas_nlruns, nlclasses, unsigned) CTL_RO_NL_GEN(arenas_nlruns, nlclasses, unsigned)
CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+mib[2]), size_t) CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+(szind_t)mib[2]), size_t)
static const ctl_named_node_t * static const ctl_named_node_t *
arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i) arenas_lrun_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
{ {
if (i > nlclasses) if (i > nlclasses)
...@@ -1827,9 +1951,10 @@ arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i) ...@@ -1827,9 +1951,10 @@ arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
} }
CTL_RO_NL_GEN(arenas_nhchunks, nhclasses, unsigned) CTL_RO_NL_GEN(arenas_nhchunks, nhclasses, unsigned)
CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+nlclasses+mib[2]), size_t) CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+nlclasses+(szind_t)mib[2]),
size_t)
static const ctl_named_node_t * static const ctl_named_node_t *
arenas_hchunk_i_index(const size_t *mib, size_t miblen, size_t i) arenas_hchunk_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
{ {
if (i > nhclasses) if (i > nhclasses)
...@@ -1838,15 +1963,15 @@ arenas_hchunk_i_index(const size_t *mib, size_t miblen, size_t i) ...@@ -1838,15 +1963,15 @@ arenas_hchunk_i_index(const size_t *mib, size_t miblen, size_t i)
} }
static int static int
arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, arenas_extend_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen)
{ {
int ret; int ret;
unsigned narenas; unsigned narenas;
malloc_mutex_lock(&ctl_mtx); malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
READONLY(); READONLY();
if (ctl_grow()) { if (ctl_grow(tsd_tsdn(tsd))) {
ret = EAGAIN; ret = EAGAIN;
goto label_return; goto label_return;
} }
...@@ -1855,15 +1980,15 @@ arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, ...@@ -1855,15 +1980,15 @@ arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
ret = 0; ret = 0;
label_return: label_return:
malloc_mutex_unlock(&ctl_mtx); malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret); return (ret);
} }
/******************************************************************************/ /******************************************************************************/
static int static int
prof_thread_active_init_ctl(const size_t *mib, size_t miblen, void *oldp, prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
size_t *oldlenp, void *newp, size_t newlen) void *oldp, size_t *oldlenp, void *newp, size_t newlen)
{ {
int ret; int ret;
bool oldval; bool oldval;
...@@ -1876,9 +2001,10 @@ prof_thread_active_init_ctl(const size_t *mib, size_t miblen, void *oldp, ...@@ -1876,9 +2001,10 @@ prof_thread_active_init_ctl(const size_t *mib, size_t miblen, void *oldp,
ret = EINVAL; ret = EINVAL;
goto label_return; goto label_return;
} }
oldval = prof_thread_active_init_set(*(bool *)newp); oldval = prof_thread_active_init_set(tsd_tsdn(tsd),
*(bool *)newp);
} else } else
oldval = prof_thread_active_init_get(); oldval = prof_thread_active_init_get(tsd_tsdn(tsd));
READ(oldval, bool); READ(oldval, bool);
ret = 0; ret = 0;
...@@ -1887,8 +2013,8 @@ label_return: ...@@ -1887,8 +2013,8 @@ label_return:
} }
static int static int
prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen)
{ {
int ret; int ret;
bool oldval; bool oldval;
...@@ -1901,9 +2027,9 @@ prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, ...@@ -1901,9 +2027,9 @@ prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
ret = EINVAL; ret = EINVAL;
goto label_return; goto label_return;
} }
oldval = prof_active_set(*(bool *)newp); oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp);
} else } else
oldval = prof_active_get(); oldval = prof_active_get(tsd_tsdn(tsd));
READ(oldval, bool); READ(oldval, bool);
ret = 0; ret = 0;
...@@ -1912,8 +2038,8 @@ label_return: ...@@ -1912,8 +2038,8 @@ label_return:
} }
static int static int
prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen)
{ {
int ret; int ret;
const char *filename = NULL; const char *filename = NULL;
...@@ -1924,7 +2050,7 @@ prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, ...@@ -1924,7 +2050,7 @@ prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
WRITEONLY(); WRITEONLY();
WRITE(filename, const char *); WRITE(filename, const char *);
if (prof_mdump(filename)) { if (prof_mdump(tsd, filename)) {
ret = EFAULT; ret = EFAULT;
goto label_return; goto label_return;
} }
...@@ -1935,8 +2061,8 @@ label_return: ...@@ -1935,8 +2061,8 @@ label_return:
} }
static int static int
prof_gdump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen)
{ {
int ret; int ret;
bool oldval; bool oldval;
...@@ -1949,9 +2075,9 @@ prof_gdump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, ...@@ -1949,9 +2075,9 @@ prof_gdump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
ret = EINVAL; ret = EINVAL;
goto label_return; goto label_return;
} }
oldval = prof_gdump_set(*(bool *)newp); oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp);
} else } else
oldval = prof_gdump_get(); oldval = prof_gdump_get(tsd_tsdn(tsd));
READ(oldval, bool); READ(oldval, bool);
ret = 0; ret = 0;
...@@ -1960,12 +2086,11 @@ label_return: ...@@ -1960,12 +2086,11 @@ label_return:
} }
static int static int
prof_reset_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen)
{ {
int ret; int ret;
size_t lg_sample = lg_prof_sample; size_t lg_sample = lg_prof_sample;
tsd_t *tsd;
if (!config_prof) if (!config_prof)
return (ENOENT); return (ENOENT);
...@@ -1975,8 +2100,6 @@ prof_reset_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, ...@@ -1975,8 +2100,6 @@ prof_reset_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
if (lg_sample >= (sizeof(uint64_t) << 3)) if (lg_sample >= (sizeof(uint64_t) << 3))
lg_sample = (sizeof(uint64_t) << 3) - 1; lg_sample = (sizeof(uint64_t) << 3) - 1;
tsd = tsd_fetch();
prof_reset(tsd, lg_sample); prof_reset(tsd, lg_sample);
ret = 0; ret = 0;
...@@ -1995,15 +2118,20 @@ CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t) ...@@ -1995,15 +2118,20 @@ CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats.metadata, size_t) CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats.metadata, size_t)
CTL_RO_CGEN(config_stats, stats_resident, ctl_stats.resident, size_t) CTL_RO_CGEN(config_stats, stats_resident, ctl_stats.resident, size_t)
CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t) CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
CTL_RO_CGEN(config_stats, stats_retained, ctl_stats.retained, size_t)
CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *) CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
CTL_RO_GEN(stats_arenas_i_lg_dirty_mult, ctl_stats.arenas[mib[2]].lg_dirty_mult, CTL_RO_GEN(stats_arenas_i_lg_dirty_mult, ctl_stats.arenas[mib[2]].lg_dirty_mult,
ssize_t) ssize_t)
CTL_RO_GEN(stats_arenas_i_decay_time, ctl_stats.arenas[mib[2]].decay_time,
ssize_t)
CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned) CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t) CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t) CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_mapped, CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
ctl_stats.arenas[mib[2]].astats.mapped, size_t) ctl_stats.arenas[mib[2]].astats.mapped, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
ctl_stats.arenas[mib[2]].astats.retained, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_npurge, CTL_RO_CGEN(config_stats, stats_arenas_i_npurge,
ctl_stats.arenas[mib[2]].astats.npurge, uint64_t) ctl_stats.arenas[mib[2]].astats.npurge, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise, CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
...@@ -2060,7 +2188,8 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns, ...@@ -2060,7 +2188,8 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns,
ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t) ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t)
static const ctl_named_node_t * static const ctl_named_node_t *
stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j) stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
size_t j)
{ {
if (j > NBINS) if (j > NBINS)
...@@ -2078,7 +2207,8 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns, ...@@ -2078,7 +2207,8 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns,
ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t) ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t)
static const ctl_named_node_t * static const ctl_named_node_t *
stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j) stats_arenas_i_lruns_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
size_t j)
{ {
if (j > nlclasses) if (j > nlclasses)
...@@ -2097,7 +2227,8 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_curhchunks, ...@@ -2097,7 +2227,8 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_curhchunks,
ctl_stats.arenas[mib[2]].hstats[mib[4]].curhchunks, size_t) ctl_stats.arenas[mib[2]].hstats[mib[4]].curhchunks, size_t)
static const ctl_named_node_t * static const ctl_named_node_t *
stats_arenas_i_hchunks_j_index(const size_t *mib, size_t miblen, size_t j) stats_arenas_i_hchunks_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
size_t j)
{ {
if (j > nhclasses) if (j > nhclasses)
...@@ -2106,11 +2237,11 @@ stats_arenas_i_hchunks_j_index(const size_t *mib, size_t miblen, size_t j) ...@@ -2106,11 +2237,11 @@ stats_arenas_i_hchunks_j_index(const size_t *mib, size_t miblen, size_t j)
} }
static const ctl_named_node_t * static const ctl_named_node_t *
stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i) stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
{ {
const ctl_named_node_t * ret; const ctl_named_node_t * ret;
malloc_mutex_lock(&ctl_mtx); malloc_mutex_lock(tsdn, &ctl_mtx);
if (i > ctl_stats.narenas || !ctl_stats.arenas[i].initialized) { if (i > ctl_stats.narenas || !ctl_stats.arenas[i].initialized) {
ret = NULL; ret = NULL;
goto label_return; goto label_return;
...@@ -2118,6 +2249,6 @@ stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i) ...@@ -2118,6 +2249,6 @@ stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i)
ret = super_stats_arenas_i_node; ret = super_stats_arenas_i_node;
label_return: label_return:
malloc_mutex_unlock(&ctl_mtx); malloc_mutex_unlock(tsdn, &ctl_mtx);
return (ret); return (ret);
} }
...@@ -3,45 +3,48 @@ ...@@ -3,45 +3,48 @@
/******************************************************************************/ /******************************************************************************/
/*
* Round down to the nearest chunk size that can actually be requested during
* normal huge allocation.
*/
JEMALLOC_INLINE_C size_t JEMALLOC_INLINE_C size_t
extent_quantize(size_t size) extent_quantize(size_t size)
{ {
size_t ret;
szind_t ind;
/* assert(size > 0);
* Round down to the nearest chunk size that can actually be requested
* during normal huge allocation. ind = size2index(size + 1);
*/ if (ind == 0) {
return (index2size(size2index(size + 1) - 1)); /* Avoid underflow. */
return (index2size(0));
}
ret = index2size(ind - 1);
assert(ret <= size);
return (ret);
} }
JEMALLOC_INLINE_C int JEMALLOC_INLINE_C int
extent_szad_comp(extent_node_t *a, extent_node_t *b) extent_sz_comp(const extent_node_t *a, const extent_node_t *b)
{ {
int ret;
size_t a_qsize = extent_quantize(extent_node_size_get(a)); size_t a_qsize = extent_quantize(extent_node_size_get(a));
size_t b_qsize = extent_quantize(extent_node_size_get(b)); size_t b_qsize = extent_quantize(extent_node_size_get(b));
/* return ((a_qsize > b_qsize) - (a_qsize < b_qsize));
* Compare based on quantized size rather than size, in order to sort }
* equally useful extents only by address.
*/
ret = (a_qsize > b_qsize) - (a_qsize < b_qsize);
if (ret == 0) {
uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
ret = (a_addr > b_addr) - (a_addr < b_addr); JEMALLOC_INLINE_C int
} extent_sn_comp(const extent_node_t *a, const extent_node_t *b)
{
size_t a_sn = extent_node_sn_get(a);
size_t b_sn = extent_node_sn_get(b);
return (ret); return ((a_sn > b_sn) - (a_sn < b_sn));
} }
/* Generate red-black tree functions. */
rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, szad_link,
extent_szad_comp)
JEMALLOC_INLINE_C int JEMALLOC_INLINE_C int
extent_ad_comp(extent_node_t *a, extent_node_t *b) extent_ad_comp(const extent_node_t *a, const extent_node_t *b)
{ {
uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a); uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b); uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
...@@ -49,5 +52,26 @@ extent_ad_comp(extent_node_t *a, extent_node_t *b) ...@@ -49,5 +52,26 @@ extent_ad_comp(extent_node_t *a, extent_node_t *b)
return ((a_addr > b_addr) - (a_addr < b_addr)); return ((a_addr > b_addr) - (a_addr < b_addr));
} }
JEMALLOC_INLINE_C int
extent_szsnad_comp(const extent_node_t *a, const extent_node_t *b)
{
int ret;
ret = extent_sz_comp(a, b);
if (ret != 0)
return (ret);
ret = extent_sn_comp(a, b);
if (ret != 0)
return (ret);
ret = extent_ad_comp(a, b);
return (ret);
}
/* Generate red-black tree functions. */
rb_gen(, extent_tree_szsnad_, extent_tree_t, extent_node_t, szsnad_link,
extent_szsnad_comp)
/* Generate red-black tree functions. */ /* Generate red-black tree functions. */
rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, ad_link, extent_ad_comp) rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, ad_link, extent_ad_comp)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment