Commit 7178cac0 authored by antirez's avatar antirez
Browse files

Revert "Jemalloc updated to 4.4.0."

This reverts commit 153f2f00.

Jemalloc 4.4.0 is apparently causing deadlocks in certain
systems. See for example https://github.com/antirez/redis/issues/3799.
As a cautionary step we are reverting the commit back and
releasing a new stable Redis version.
parent 33fad43c
...@@ -6,7 +6,7 @@ install_suffix=@install_suffix@ ...@@ -6,7 +6,7 @@ install_suffix=@install_suffix@
Name: jemalloc Name: jemalloc
Description: A general purpose malloc(3) implementation that emphasizes fragmentation avoidance and scalable concurrency support. Description: A general purpose malloc(3) implementation that emphasizes fragmentation avoidance and scalable concurrency support.
URL: http://jemalloc.net/ URL: http://www.canonware.com/jemalloc
Version: @jemalloc_version@ Version: @jemalloc_version@
Cflags: -I${includedir} Cflags: -I${includedir}
Libs: -L${libdir} -ljemalloc${install_suffix} Libs: -L${libdir} -ljemalloc${install_suffix}
How to build jemalloc for Windows
=================================
1. Install Cygwin with at least the following packages:
* autoconf
* autogen
* gawk
* grep
* sed
2. Install Visual Studio 2015 with Visual C++
3. Add Cygwin\bin to the PATH environment variable
4. Open "VS2015 x86 Native Tools Command Prompt"
(note: x86/x64 doesn't matter at this point)
5. Generate header files:
sh -c "CC=cl ./autogen.sh"
6. Now the project can be opened and built in Visual Studio:
msvc\jemalloc_vc2015.sln

Microsoft Visual Studio Solution File, Format Version 12.00
# Visual Studio 14
VisualStudioVersion = 14.0.24720.0
MinimumVisualStudioVersion = 10.0.40219.1
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{70A99006-6DE9-472B-8F83-4CEE6C616DF3}"
ProjectSection(SolutionItems) = preProject
ReadMe.txt = ReadMe.txt
EndProjectSection
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "jemalloc", "projects\vc2015\jemalloc\jemalloc.vcxproj", "{8D6BB292-9E1C-413D-9F98-4864BDC1514A}"
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "test_threads", "projects\vc2015\test_threads\test_threads.vcxproj", "{09028CFD-4EB7-491D-869C-0708DB97ED44}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|x64 = Debug|x64
Debug|x86 = Debug|x86
Debug-static|x64 = Debug-static|x64
Debug-static|x86 = Debug-static|x86
Release|x64 = Release|x64
Release|x86 = Release|x86
Release-static|x64 = Release-static|x64
Release-static|x86 = Release-static|x86
EndGlobalSection
GlobalSection(ProjectConfigurationPlatforms) = postSolution
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x64.ActiveCfg = Debug|x64
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x64.Build.0 = Debug|x64
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x86.ActiveCfg = Debug|Win32
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x86.Build.0 = Debug|Win32
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x64.ActiveCfg = Debug-static|x64
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x64.Build.0 = Debug-static|x64
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x86.ActiveCfg = Debug-static|Win32
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x86.Build.0 = Debug-static|Win32
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x64.ActiveCfg = Release|x64
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x64.Build.0 = Release|x64
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x86.ActiveCfg = Release|Win32
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x86.Build.0 = Release|Win32
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x64.ActiveCfg = Release-static|x64
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x64.Build.0 = Release-static|x64
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x86.ActiveCfg = Release-static|Win32
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x86.Build.0 = Release-static|Win32
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x64.ActiveCfg = Debug|x64
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x64.Build.0 = Debug|x64
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x86.ActiveCfg = Debug|Win32
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x86.Build.0 = Debug|Win32
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x64.ActiveCfg = Debug-static|x64
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x64.Build.0 = Debug-static|x64
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x86.ActiveCfg = Debug-static|Win32
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x86.Build.0 = Debug-static|Win32
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x64.ActiveCfg = Release|x64
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x64.Build.0 = Release|x64
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x86.ActiveCfg = Release|Win32
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x86.Build.0 = Release|Win32
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x64.ActiveCfg = Release-static|x64
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x64.Build.0 = Release-static|x64
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x86.ActiveCfg = Release-static|Win32
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x86.Build.0 = Release-static|Win32
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
EndGlobalSection
EndGlobal
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug-static|Win32">
<Configuration>Debug-static</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug-static|x64">
<Configuration>Debug-static</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug|Win32">
<Configuration>Debug</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release-static|Win32">
<Configuration>Release-static</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release-static|x64">
<Configuration>Release-static</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|Win32">
<Configuration>Release</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<ItemGroup>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\arena.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\assert.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\atomic.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\base.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\bitmap.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\chunk.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\chunk_dss.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\chunk_mmap.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\ckh.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\ctl.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\extent.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\hash.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\huge.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_decls.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_defs.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_macros.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\mb.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\mutex.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\nstime.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\pages.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\ph.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\private_namespace.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\private_unnamespace.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\prng.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\prof.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\public_namespace.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\public_unnamespace.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\ql.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\qr.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\quarantine.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\rb.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\rtree.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\size_classes.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\smoothstep.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\spin.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\stats.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\tcache.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\ticker.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\tsd.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\util.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\witness.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_defs.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_macros.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_mangle.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_protos.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_protos_jet.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_rename.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_typedefs.h" />
<ClInclude Include="..\..\..\..\include\msvc_compat\C99\stdbool.h" />
<ClInclude Include="..\..\..\..\include\msvc_compat\C99\stdint.h" />
<ClInclude Include="..\..\..\..\include\msvc_compat\strings.h" />
<ClInclude Include="..\..\..\..\include\msvc_compat\windows_extra.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\..\..\src\arena.c" />
<ClCompile Include="..\..\..\..\src\atomic.c" />
<ClCompile Include="..\..\..\..\src\base.c" />
<ClCompile Include="..\..\..\..\src\bitmap.c" />
<ClCompile Include="..\..\..\..\src\chunk.c" />
<ClCompile Include="..\..\..\..\src\chunk_dss.c" />
<ClCompile Include="..\..\..\..\src\chunk_mmap.c" />
<ClCompile Include="..\..\..\..\src\ckh.c" />
<ClCompile Include="..\..\..\..\src\ctl.c" />
<ClCompile Include="..\..\..\..\src\extent.c" />
<ClCompile Include="..\..\..\..\src\hash.c" />
<ClCompile Include="..\..\..\..\src\huge.c" />
<ClCompile Include="..\..\..\..\src\jemalloc.c" />
<ClCompile Include="..\..\..\..\src\mb.c" />
<ClCompile Include="..\..\..\..\src\mutex.c" />
<ClCompile Include="..\..\..\..\src\nstime.c" />
<ClCompile Include="..\..\..\..\src\pages.c" />
<ClCompile Include="..\..\..\..\src\prng.c" />
<ClCompile Include="..\..\..\..\src\prof.c" />
<ClCompile Include="..\..\..\..\src\quarantine.c" />
<ClCompile Include="..\..\..\..\src\rtree.c" />
<ClCompile Include="..\..\..\..\src\spin.c" />
<ClCompile Include="..\..\..\..\src\stats.c" />
<ClCompile Include="..\..\..\..\src\tcache.c" />
<ClCompile Include="..\..\..\..\src\ticker.c" />
<ClCompile Include="..\..\..\..\src\tsd.c" />
<ClCompile Include="..\..\..\..\src\util.c" />
<ClCompile Include="..\..\..\..\src\witness.c" />
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{8D6BB292-9E1C-413D-9F98-4864BDC1514A}</ProjectGuid>
<Keyword>Win32Proj</Keyword>
<RootNamespace>jemalloc</RootNamespace>
<WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Label="Shared">
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<TargetName>$(ProjectName)d</TargetName>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<TargetName>$(ProjectName)-$(PlatformToolset)-$(Configuration)</TargetName>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<TargetName>$(ProjectName)-$(PlatformToolset)-$(Configuration)</TargetName>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<TargetName>$(ProjectName)d</TargetName>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<TargetName>$(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration)</TargetName>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<TargetName>$(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration)</TargetName>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
<ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
<ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
<ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
<DebugInformationFormat>OldStyle</DebugInformationFormat>
<MinimalRebuild>false</MinimalRebuild>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
<ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>_REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
<ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
<ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>_REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
<DebugInformationFormat>OldStyle</DebugInformationFormat>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
</Link>
</ItemDefinitionGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>
\ No newline at end of file
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup>
<Filter Include="Source Files">
<UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
<Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
</Filter>
<Filter Include="Header Files">
<UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
<Extensions>h;hh;hpp;hxx;hm;inl;inc;xsd</Extensions>
</Filter>
<Filter Include="Header Files\internal">
<UniqueIdentifier>{5697dfa3-16cf-4932-b428-6e0ec6e9f98e}</UniqueIdentifier>
</Filter>
<Filter Include="Header Files\msvc_compat">
<UniqueIdentifier>{0cbd2ca6-42a7-4f82-8517-d7e7a14fd986}</UniqueIdentifier>
</Filter>
<Filter Include="Header Files\msvc_compat\C99">
<UniqueIdentifier>{0abe6f30-49b5-46dd-8aca-6e33363fa52c}</UniqueIdentifier>
</Filter>
</ItemGroup>
<ItemGroup>
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_defs.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_macros.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_mangle.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_protos.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_protos_jet.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_rename.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_typedefs.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\arena.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\assert.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\atomic.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\base.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\bitmap.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\chunk.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\chunk_dss.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\chunk_mmap.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\ckh.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\ctl.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\extent.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\hash.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\huge.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_decls.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_defs.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_macros.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\mb.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\mutex.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\nstime.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\pages.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\ph.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\private_namespace.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\private_unnamespace.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\prng.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\prof.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\public_namespace.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\public_unnamespace.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\ql.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\qr.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\quarantine.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\rb.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\rtree.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\size_classes.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\smoothstep.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\spin.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\stats.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\tcache.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\ticker.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\tsd.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\util.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\witness.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\msvc_compat\strings.h">
<Filter>Header Files\msvc_compat</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\msvc_compat\windows_extra.h">
<Filter>Header Files\msvc_compat</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\msvc_compat\C99\stdbool.h">
<Filter>Header Files\msvc_compat\C99</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\msvc_compat\C99\stdint.h">
<Filter>Header Files\msvc_compat\C99</Filter>
</ClInclude>
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\..\..\src\arena.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\atomic.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\base.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\bitmap.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\chunk.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\chunk_dss.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\chunk_mmap.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\ckh.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\ctl.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\extent.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\hash.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\huge.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\jemalloc.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\mb.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\mutex.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\nstime.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\pages.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\prng.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\prof.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\quarantine.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\rtree.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\spin.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\stats.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\tcache.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\ticker.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\tsd.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\util.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\witness.c">
<Filter>Source Files</Filter>
</ClCompile>
</ItemGroup>
</Project>
// jemalloc C++ threaded test
// Author: Rustam Abdullaev
// Public Domain
#include <atomic>
#include <functional>
#include <future>
#include <random>
#include <thread>
#include <vector>
#include <stdio.h>
#include <jemalloc/jemalloc.h>
using std::vector;
using std::thread;
using std::uniform_int_distribution;
using std::minstd_rand;
int test_threads()
{
je_malloc_conf = "narenas:3";
int narenas = 0;
size_t sz = sizeof(narenas);
je_mallctl("opt.narenas", (void *)&narenas, &sz, NULL, 0);
if (narenas != 3) {
printf("Error: unexpected number of arenas: %d\n", narenas);
return 1;
}
static const int sizes[] = { 7, 16, 32, 60, 91, 100, 120, 144, 169, 199, 255, 400, 670, 900, 917, 1025, 3333, 5190, 13131, 49192, 99999, 123123, 255265, 2333111 };
static const int numSizes = (int)(sizeof(sizes) / sizeof(sizes[0]));
vector<thread> workers;
static const int numThreads = narenas + 1, numAllocsMax = 25, numIter1 = 50, numIter2 = 50;
je_malloc_stats_print(NULL, NULL, NULL);
size_t allocated1;
size_t sz1 = sizeof(allocated1);
je_mallctl("stats.active", (void *)&allocated1, &sz1, NULL, 0);
printf("\nPress Enter to start threads...\n");
getchar();
printf("Starting %d threads x %d x %d iterations...\n", numThreads, numIter1, numIter2);
for (int i = 0; i < numThreads; i++) {
workers.emplace_back([tid=i]() {
uniform_int_distribution<int> sizeDist(0, numSizes - 1);
minstd_rand rnd(tid * 17);
uint8_t* ptrs[numAllocsMax];
int ptrsz[numAllocsMax];
for (int i = 0; i < numIter1; ++i) {
thread t([&]() {
for (int i = 0; i < numIter2; ++i) {
const int numAllocs = numAllocsMax - sizeDist(rnd);
for (int j = 0; j < numAllocs; j += 64) {
const int x = sizeDist(rnd);
const int sz = sizes[x];
ptrsz[j] = sz;
ptrs[j] = (uint8_t*)je_malloc(sz);
if (!ptrs[j]) {
printf("Unable to allocate %d bytes in thread %d, iter %d, alloc %d. %d\n", sz, tid, i, j, x);
exit(1);
}
for (int k = 0; k < sz; k++)
ptrs[j][k] = tid + k;
}
for (int j = 0; j < numAllocs; j += 64) {
for (int k = 0, sz = ptrsz[j]; k < sz; k++)
if (ptrs[j][k] != (uint8_t)(tid + k)) {
printf("Memory error in thread %d, iter %d, alloc %d @ %d : %02X!=%02X\n", tid, i, j, k, ptrs[j][k], (uint8_t)(tid + k));
exit(1);
}
je_free(ptrs[j]);
}
}
});
t.join();
}
});
}
for (thread& t : workers) {
t.join();
}
je_malloc_stats_print(NULL, NULL, NULL);
size_t allocated2;
je_mallctl("stats.active", (void *)&allocated2, &sz1, NULL, 0);
size_t leaked = allocated2 - allocated1;
printf("\nDone. Leaked: %zd bytes\n", leaked);
bool failed = leaked > 65536; // in case C++ runtime allocated something (e.g. iostream locale or facet)
printf("\nTest %s!\n", (failed ? "FAILED" : "successful"));
printf("\nPress Enter to continue...\n");
getchar();
return failed ? 1 : 0;
}
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug-static|Win32">
<Configuration>Debug-static</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug-static|x64">
<Configuration>Debug-static</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug|Win32">
<Configuration>Debug</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release-static|Win32">
<Configuration>Release-static</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release-static|x64">
<Configuration>Release-static</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|Win32">
<Configuration>Release</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{09028CFD-4EB7-491D-869C-0708DB97ED44}</ProjectGuid>
<Keyword>Win32Proj</Keyword>
<RootNamespace>test_threads</RootNamespace>
<WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Label="Shared">
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<LinkIncremental>true</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<LinkIncremental>true</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<LinkIncremental>true</LinkIncremental>
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
<LinkIncremental>true</LinkIncremental>
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<LinkIncremental>false</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<LinkIncremental>false</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<LinkIncremental>false</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<LinkIncremental>false</LinkIncremental>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
<AdditionalDependencies>jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
<AdditionalDependencies>jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
<AdditionalDependencies>jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
<AdditionalDependencies>jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
<AdditionalDependencies>jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
<AdditionalDependencies>jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClCompile Include="test_threads.cpp" />
<ClCompile Include="test_threads_main.cpp" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\jemalloc\jemalloc.vcxproj">
<Project>{8d6bb292-9e1c-413d-9f98-4864bdc1514a}</Project>
</ProjectReference>
</ItemGroup>
<ItemGroup>
<ClInclude Include="test_threads.h" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>
\ No newline at end of file
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup>
<Filter Include="Source Files">
<UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
<Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
</Filter>
<Filter Include="Header Files">
<UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
<Extensions>h;hh;hpp;hxx;hm;inl;inc;xsd</Extensions>
</Filter>
</ItemGroup>
<ItemGroup>
<ClCompile Include="test_threads.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="test_threads_main.cpp">
<Filter>Source Files</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="test_threads.h">
<Filter>Header Files</Filter>
</ClInclude>
</ItemGroup>
</Project>
\ No newline at end of file
#include "test_threads.h"
#include <future>
#include <functional>
#include <chrono>
using namespace std::chrono_literals;
int main(int argc, char** argv)
{
int rc = test_threads();
return rc;
}
...@@ -4,23 +4,16 @@ ...@@ -4,23 +4,16 @@
/******************************************************************************/ /******************************************************************************/
/* Data. */ /* Data. */
purge_mode_t opt_purge = PURGE_DEFAULT;
const char *purge_mode_names[] = {
"ratio",
"decay",
"N/A"
};
ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT; ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
static ssize_t lg_dirty_mult_default; static ssize_t lg_dirty_mult_default;
ssize_t opt_decay_time = DECAY_TIME_DEFAULT;
static ssize_t decay_time_default;
arena_bin_info_t arena_bin_info[NBINS]; arena_bin_info_t arena_bin_info[NBINS];
size_t map_bias; size_t map_bias;
size_t map_misc_offset; size_t map_misc_offset;
size_t arena_maxrun; /* Max run size for arenas. */ size_t arena_maxrun; /* Max run size for arenas. */
size_t large_maxclass; /* Max large size class. */ size_t large_maxclass; /* Max large size class. */
static size_t small_maxrun; /* Max run size used for small size classes. */
static bool *small_run_tab; /* Valid small run page multiples. */
unsigned nlclasses; /* Number of large size classes. */ unsigned nlclasses; /* Number of large size classes. */
unsigned nhclasses; /* Number of huge size classes. */ unsigned nhclasses; /* Number of huge size classes. */
...@@ -30,57 +23,60 @@ unsigned nhclasses; /* Number of huge size classes. */ ...@@ -30,57 +23,60 @@ unsigned nhclasses; /* Number of huge size classes. */
* definition. * definition.
*/ */
static void arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, static void arena_purge(arena_t *arena, bool all);
arena_chunk_t *chunk); static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
static void arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, bool cleaned, bool decommitted);
size_t ndirty_limit); static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
static void arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, arena_run_t *run, arena_bin_t *bin);
bool dirty, bool cleaned, bool decommitted); static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
static void arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, arena_bin_t *bin);
arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin);
static void arena_bin_lower_run(arena_t *arena, arena_run_t *run,
arena_bin_t *bin);
/******************************************************************************/ /******************************************************************************/
JEMALLOC_INLINE_C size_t #define CHUNK_MAP_KEY ((uintptr_t)0x1U)
arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm)
JEMALLOC_INLINE_C arena_chunk_map_misc_t *
arena_miscelm_key_create(size_t size)
{ {
arena_chunk_t *chunk;
size_t pageind, mapbits;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); return ((arena_chunk_map_misc_t *)(arena_mapbits_size_encode(size) |
pageind = arena_miscelm_to_pageind(miscelm); CHUNK_MAP_KEY));
mapbits = arena_mapbits_get(chunk, pageind);
return (arena_mapbits_size_decode(mapbits));
} }
JEMALLOC_INLINE_C const extent_node_t * JEMALLOC_INLINE_C bool
arena_miscelm_extent_get(const arena_chunk_map_misc_t *miscelm) arena_miscelm_is_key(const arena_chunk_map_misc_t *miscelm)
{ {
arena_chunk_t *chunk;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); return (((uintptr_t)miscelm & CHUNK_MAP_KEY) != 0);
return (&chunk->node);
} }
JEMALLOC_INLINE_C int #undef CHUNK_MAP_KEY
arena_sn_comp(const arena_chunk_map_misc_t *a, const arena_chunk_map_misc_t *b)
JEMALLOC_INLINE_C size_t
arena_miscelm_key_size_get(const arena_chunk_map_misc_t *miscelm)
{ {
size_t a_sn, b_sn;
assert(a != NULL); assert(arena_miscelm_is_key(miscelm));
assert(b != NULL);
return (arena_mapbits_size_decode((uintptr_t)miscelm));
}
a_sn = extent_node_sn_get(arena_miscelm_extent_get(a)); JEMALLOC_INLINE_C size_t
b_sn = extent_node_sn_get(arena_miscelm_extent_get(b)); arena_miscelm_size_get(arena_chunk_map_misc_t *miscelm)
{
arena_chunk_t *chunk;
size_t pageind, mapbits;
assert(!arena_miscelm_is_key(miscelm));
return ((a_sn > b_sn) - (a_sn < b_sn)); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
pageind = arena_miscelm_to_pageind(miscelm);
mapbits = arena_mapbits_get(chunk, pageind);
return (arena_mapbits_size_decode(mapbits));
} }
JEMALLOC_INLINE_C int JEMALLOC_INLINE_C int
arena_ad_comp(const arena_chunk_map_misc_t *a, arena_run_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b)
const arena_chunk_map_misc_t *b)
{ {
uintptr_t a_miscelm = (uintptr_t)a; uintptr_t a_miscelm = (uintptr_t)a;
uintptr_t b_miscelm = (uintptr_t)b; uintptr_t b_miscelm = (uintptr_t)b;
...@@ -91,79 +87,74 @@ arena_ad_comp(const arena_chunk_map_misc_t *a, ...@@ -91,79 +87,74 @@ arena_ad_comp(const arena_chunk_map_misc_t *a,
return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm)); return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
} }
JEMALLOC_INLINE_C int /* Generate red-black tree functions. */
arena_snad_comp(const arena_chunk_map_misc_t *a, rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_misc_t,
const arena_chunk_map_misc_t *b) rb_link, arena_run_comp)
static size_t
run_quantize(size_t size)
{ {
int ret; size_t qsize;
assert(a != NULL); assert(size != 0);
assert(b != NULL); assert(size == PAGE_CEILING(size));
ret = arena_sn_comp(a, b); /* Don't change sizes that are valid small run sizes. */
if (ret != 0) if (size <= small_maxrun && small_run_tab[size >> LG_PAGE])
return (ret); return (size);
ret = arena_ad_comp(a, b); /*
return (ret); * Round down to the nearest run size that can actually be requested
* during normal large allocation. Add large_pad so that cache index
* randomization can offset the allocation from the page boundary.
*/
qsize = index2size(size2index(size - large_pad + 1) - 1) + large_pad;
if (qsize <= SMALL_MAXCLASS + large_pad)
return (run_quantize(size - large_pad));
assert(qsize <= size);
return (qsize);
} }
/* Generate pairing heap functions. */
ph_gen(static UNUSED, arena_run_heap_, arena_run_heap_t, arena_chunk_map_misc_t,
ph_link, arena_snad_comp)
#ifdef JEMALLOC_JET
#undef run_quantize_floor
#define run_quantize_floor JEMALLOC_N(n_run_quantize_floor)
#endif
static size_t static size_t
run_quantize_floor(size_t size) run_quantize_next(size_t size)
{ {
size_t ret; size_t large_run_size_next;
pszind_t pind;
assert(size > 0);
assert(size <= HUGE_MAXCLASS);
assert((size & PAGE_MASK) == 0);
assert(size != 0); assert(size != 0);
assert(size == PAGE_CEILING(size)); assert(size == PAGE_CEILING(size));
pind = psz2ind(size - large_pad + 1); /*
if (pind == 0) { * Return the next quantized size greater than the input size.
/* * Quantized sizes comprise the union of run sizes that back small
* Avoid underflow. This short-circuit would also do the right * region runs, and run sizes that back large regions with no explicit
* thing for all sizes in the range for which there are * alignment constraints.
* PAGE-spaced size classes, but it's simplest to just handle */
* the one case that would cause erroneous results.
*/ if (size > SMALL_MAXCLASS) {
return (size); large_run_size_next = PAGE_CEILING(index2size(size2index(size -
large_pad) + 1) + large_pad);
} else
large_run_size_next = SIZE_T_MAX;
if (size >= small_maxrun)
return (large_run_size_next);
while (true) {
size += PAGE;
assert(size <= small_maxrun);
if (small_run_tab[size >> LG_PAGE]) {
if (large_run_size_next < size)
return (large_run_size_next);
return (size);
}
} }
ret = pind2sz(pind - 1) + large_pad;
assert(ret <= size);
return (ret);
} }
#ifdef JEMALLOC_JET
#undef run_quantize_floor
#define run_quantize_floor JEMALLOC_N(run_quantize_floor)
run_quantize_t *run_quantize_floor = JEMALLOC_N(n_run_quantize_floor);
#endif
#ifdef JEMALLOC_JET
#undef run_quantize_ceil
#define run_quantize_ceil JEMALLOC_N(n_run_quantize_ceil)
#endif
static size_t static size_t
run_quantize_ceil(size_t size) run_quantize_first(size_t size)
{ {
size_t ret; size_t qsize = run_quantize(size);
assert(size > 0); if (qsize < size) {
assert(size <= HUGE_MAXCLASS);
assert((size & PAGE_MASK) == 0);
ret = run_quantize_floor(size);
if (ret < size) {
/* /*
* Skip a quantization that may have an adequately large run, * Skip a quantization that may have an adequately large run,
* because under-sized runs may be mixed in. This only happens * because under-sized runs may be mixed in. This only happens
...@@ -172,50 +163,72 @@ run_quantize_ceil(size_t size) ...@@ -172,50 +163,72 @@ run_quantize_ceil(size_t size)
* search would potentially find sufficiently aligned available * search would potentially find sufficiently aligned available
* memory somewhere lower. * memory somewhere lower.
*/ */
ret = pind2sz(psz2ind(ret - large_pad + 1)) + large_pad; qsize = run_quantize_next(size);
}
return (qsize);
}
JEMALLOC_INLINE_C int
arena_avail_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b)
{
int ret;
uintptr_t a_miscelm = (uintptr_t)a;
size_t a_qsize = run_quantize(arena_miscelm_is_key(a) ?
arena_miscelm_key_size_get(a) : arena_miscelm_size_get(a));
size_t b_qsize = run_quantize(arena_miscelm_size_get(b));
/*
* Compare based on quantized size rather than size, in order to sort
* equally useful runs only by address.
*/
ret = (a_qsize > b_qsize) - (a_qsize < b_qsize);
if (ret == 0) {
if (!arena_miscelm_is_key(a)) {
uintptr_t b_miscelm = (uintptr_t)b;
ret = (a_miscelm > b_miscelm) - (a_miscelm < b_miscelm);
} else {
/*
* Treat keys as if they are lower than anything else.
*/
ret = -1;
}
} }
return (ret); return (ret);
} }
#ifdef JEMALLOC_JET
#undef run_quantize_ceil /* Generate red-black tree functions. */
#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil) rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t,
run_quantize_t *run_quantize_ceil = JEMALLOC_N(n_run_quantize_ceil); arena_chunk_map_misc_t, rb_link, arena_avail_comp)
#endif
static void static void
arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
size_t npages) size_t npages)
{ {
pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get(
arena_miscelm_get_const(chunk, pageind))));
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE)); LG_PAGE));
assert((npages << LG_PAGE) < chunksize); arena_avail_tree_insert(&arena->runs_avail, arena_miscelm_get(chunk,
assert(pind2sz(pind) <= chunksize); pageind));
arena_run_heap_insert(&arena->runs_avail[pind],
arena_miscelm_get_mutable(chunk, pageind));
} }
static void static void
arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
size_t npages) size_t npages)
{ {
pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get(
arena_miscelm_get_const(chunk, pageind))));
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE)); LG_PAGE));
assert((npages << LG_PAGE) < chunksize); arena_avail_tree_remove(&arena->runs_avail, arena_miscelm_get(chunk,
assert(pind2sz(pind) <= chunksize); pageind));
arena_run_heap_remove(&arena->runs_avail[pind],
arena_miscelm_get_mutable(chunk, pageind));
} }
static void static void
arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
size_t npages) size_t npages)
{ {
arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
pageind);
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE)); LG_PAGE));
...@@ -232,8 +245,7 @@ static void ...@@ -232,8 +245,7 @@ static void
arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
size_t npages) size_t npages)
{ {
arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
pageind);
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE)); LG_PAGE));
...@@ -280,14 +292,14 @@ JEMALLOC_INLINE_C void * ...@@ -280,14 +292,14 @@ JEMALLOC_INLINE_C void *
arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info) arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
{ {
void *ret; void *ret;
size_t regind; unsigned regind;
arena_chunk_map_misc_t *miscelm; arena_chunk_map_misc_t *miscelm;
void *rpages; void *rpages;
assert(run->nfree > 0); assert(run->nfree > 0);
assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info)); assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info));
regind = (unsigned)bitmap_sfu(run->bitmap, &bin_info->bitmap_info); regind = bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
miscelm = arena_run_to_miscelm(run); miscelm = arena_run_to_miscelm(run);
rpages = arena_miscelm_to_rpages(miscelm); rpages = arena_miscelm_to_rpages(miscelm);
ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset + ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset +
...@@ -304,7 +316,7 @@ arena_run_reg_dalloc(arena_run_t *run, void *ptr) ...@@ -304,7 +316,7 @@ arena_run_reg_dalloc(arena_run_t *run, void *ptr)
size_t mapbits = arena_mapbits_get(chunk, pageind); size_t mapbits = arena_mapbits_get(chunk, pageind);
szind_t binind = arena_ptr_small_binind_get(ptr, mapbits); szind_t binind = arena_ptr_small_binind_get(ptr, mapbits);
arena_bin_info_t *bin_info = &arena_bin_info[binind]; arena_bin_info_t *bin_info = &arena_bin_info[binind];
size_t regind = arena_run_regind(run, bin_info, ptr); unsigned regind = arena_run_regind(run, bin_info, ptr);
assert(run->nfree < bin_info->nregs); assert(run->nfree < bin_info->nregs);
/* Freeing an interior pointer can cause assertion failure. */ /* Freeing an interior pointer can cause assertion failure. */
...@@ -352,30 +364,16 @@ arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind) ...@@ -352,30 +364,16 @@ arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
} }
static void static void
arena_nactive_add(arena_t *arena, size_t add_pages) arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages)
{ {
if (config_stats) { if (config_stats) {
size_t cactive_add = CHUNK_CEILING((arena->nactive + ssize_t cactive_diff = CHUNK_CEILING((arena->nactive + add_pages
add_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive << - sub_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
LG_PAGE); LG_PAGE);
if (cactive_add != 0) if (cactive_diff != 0)
stats_cactive_add(cactive_add); stats_cactive_add(cactive_diff);
} }
arena->nactive += add_pages;
}
static void
arena_nactive_sub(arena_t *arena, size_t sub_pages)
{
if (config_stats) {
size_t cactive_sub = CHUNK_CEILING(arena->nactive << LG_PAGE) -
CHUNK_CEILING((arena->nactive - sub_pages) << LG_PAGE);
if (cactive_sub != 0)
stats_cactive_sub(cactive_sub);
}
arena->nactive -= sub_pages;
} }
static void static void
...@@ -396,7 +394,8 @@ arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind, ...@@ -396,7 +394,8 @@ arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
arena_avail_remove(arena, chunk, run_ind, total_pages); arena_avail_remove(arena, chunk, run_ind, total_pages);
if (flag_dirty != 0) if (flag_dirty != 0)
arena_run_dirty_remove(arena, chunk, run_ind, total_pages); arena_run_dirty_remove(arena, chunk, run_ind, total_pages);
arena_nactive_add(arena, need_pages); arena_cactive_update(arena, need_pages, 0);
arena->nactive += need_pages;
/* Keep track of trailing unused pages for later use. */ /* Keep track of trailing unused pages for later use. */
if (rem_pages > 0) { if (rem_pages > 0) {
...@@ -568,8 +567,7 @@ arena_chunk_init_spare(arena_t *arena) ...@@ -568,8 +567,7 @@ arena_chunk_init_spare(arena_t *arena)
} }
static bool static bool
arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, bool zero)
size_t sn, bool zero)
{ {
/* /*
...@@ -578,67 +576,64 @@ arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, ...@@ -578,67 +576,64 @@ arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
* of runs is tracked individually, and upon chunk deallocation the * of runs is tracked individually, and upon chunk deallocation the
* entire chunk is in a consistent commit state. * entire chunk is in a consistent commit state.
*/ */
extent_node_init(&chunk->node, arena, chunk, chunksize, sn, zero, true); extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true);
extent_node_achunk_set(&chunk->node, true); extent_node_achunk_set(&chunk->node, true);
return (chunk_register(tsdn, chunk, &chunk->node)); return (chunk_register(chunk, &chunk->node));
} }
static arena_chunk_t * static arena_chunk_t *
arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena, arena_chunk_alloc_internal_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
chunk_hooks_t *chunk_hooks, bool *zero, bool *commit) bool *zero, bool *commit)
{ {
arena_chunk_t *chunk; arena_chunk_t *chunk;
size_t sn;
malloc_mutex_unlock(tsdn, &arena->lock); malloc_mutex_unlock(&arena->lock);
chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks, chunk = (arena_chunk_t *)chunk_alloc_wrapper(arena, chunk_hooks, NULL,
NULL, chunksize, chunksize, &sn, zero, commit); chunksize, chunksize, zero, commit);
if (chunk != NULL && !*commit) { if (chunk != NULL && !*commit) {
/* Commit header. */ /* Commit header. */
if (chunk_hooks->commit(chunk, chunksize, 0, map_bias << if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
LG_PAGE, arena->ind)) { LG_PAGE, arena->ind)) {
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, chunk_dalloc_wrapper(arena, chunk_hooks,
(void *)chunk, chunksize, sn, *zero, *commit); (void *)chunk, chunksize, *commit);
chunk = NULL; chunk = NULL;
} }
} }
if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, sn, if (chunk != NULL && arena_chunk_register(arena, chunk, *zero)) {
*zero)) {
if (!*commit) { if (!*commit) {
/* Undo commit of header. */ /* Undo commit of header. */
chunk_hooks->decommit(chunk, chunksize, 0, map_bias << chunk_hooks->decommit(chunk, chunksize, 0, map_bias <<
LG_PAGE, arena->ind); LG_PAGE, arena->ind);
} }
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, (void *)chunk, chunk_dalloc_wrapper(arena, chunk_hooks, (void *)chunk,
chunksize, sn, *zero, *commit); chunksize, *commit);
chunk = NULL; chunk = NULL;
} }
malloc_mutex_lock(tsdn, &arena->lock); malloc_mutex_lock(&arena->lock);
return (chunk); return (chunk);
} }
static arena_chunk_t * static arena_chunk_t *
arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero, arena_chunk_alloc_internal(arena_t *arena, bool *zero, bool *commit)
bool *commit)
{ {
arena_chunk_t *chunk; arena_chunk_t *chunk;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
size_t sn;
chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize, chunk = chunk_alloc_cache(arena, &chunk_hooks, NULL, chunksize,
chunksize, &sn, zero, commit, true); chunksize, zero, true);
if (chunk != NULL) { if (chunk != NULL) {
if (arena_chunk_register(tsdn, arena, chunk, sn, *zero)) { if (arena_chunk_register(arena, chunk, *zero)) {
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, chunk_dalloc_cache(arena, &chunk_hooks, chunk,
chunksize, sn, true); chunksize, true);
return (NULL); return (NULL);
} }
*commit = true;
} }
if (chunk == NULL) { if (chunk == NULL) {
chunk = arena_chunk_alloc_internal_hard(tsdn, arena, chunk = arena_chunk_alloc_internal_hard(arena, &chunk_hooks,
&chunk_hooks, zero, commit); zero, commit);
} }
if (config_stats && chunk != NULL) { if (config_stats && chunk != NULL) {
...@@ -650,7 +645,7 @@ arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero, ...@@ -650,7 +645,7 @@ arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
} }
static arena_chunk_t * static arena_chunk_t *
arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena) arena_chunk_init_hard(arena_t *arena)
{ {
arena_chunk_t *chunk; arena_chunk_t *chunk;
bool zero, commit; bool zero, commit;
...@@ -660,16 +655,14 @@ arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena) ...@@ -660,16 +655,14 @@ arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
zero = false; zero = false;
commit = false; commit = false;
chunk = arena_chunk_alloc_internal(tsdn, arena, &zero, &commit); chunk = arena_chunk_alloc_internal(arena, &zero, &commit);
if (chunk == NULL) if (chunk == NULL)
return (NULL); return (NULL);
chunk->hugepage = true;
/* /*
* Initialize the map to contain one maximal free untouched run. Mark * Initialize the map to contain one maximal free untouched run. Mark
* the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed * the pages as zeroed if chunk_alloc() returned a zeroed or decommitted
* or decommitted chunk. * chunk.
*/ */
flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED; flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED;
flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED; flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED;
...@@ -681,18 +674,17 @@ arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena) ...@@ -681,18 +674,17 @@ arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
*/ */
if (!zero) { if (!zero) {
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED( JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
(void *)arena_bitselm_get_const(chunk, map_bias+1), (void *)arena_bitselm_get(chunk, map_bias+1),
(size_t)((uintptr_t)arena_bitselm_get_const(chunk, (size_t)((uintptr_t) arena_bitselm_get(chunk,
chunk_npages-1) - chunk_npages-1) - (uintptr_t)arena_bitselm_get(chunk,
(uintptr_t)arena_bitselm_get_const(chunk, map_bias+1))); map_bias+1)));
for (i = map_bias+1; i < chunk_npages-1; i++) for (i = map_bias+1; i < chunk_npages-1; i++)
arena_mapbits_internal_set(chunk, i, flag_unzeroed); arena_mapbits_internal_set(chunk, i, flag_unzeroed);
} else { } else {
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
*)arena_bitselm_get_const(chunk, map_bias+1), *)arena_bitselm_get(chunk, map_bias+1), (size_t)((uintptr_t)
(size_t)((uintptr_t)arena_bitselm_get_const(chunk, arena_bitselm_get(chunk, chunk_npages-1) -
chunk_npages-1) - (uintptr_t)arena_bitselm_get(chunk, map_bias+1)));
(uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
if (config_debug) { if (config_debug) {
for (i = map_bias+1; i < chunk_npages-1; i++) { for (i = map_bias+1; i < chunk_npages-1; i++) {
assert(arena_mapbits_unzeroed_get(chunk, i) == assert(arena_mapbits_unzeroed_get(chunk, i) ==
...@@ -707,84 +699,27 @@ arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena) ...@@ -707,84 +699,27 @@ arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
} }
static arena_chunk_t * static arena_chunk_t *
arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena) arena_chunk_alloc(arena_t *arena)
{ {
arena_chunk_t *chunk; arena_chunk_t *chunk;
if (arena->spare != NULL) if (arena->spare != NULL)
chunk = arena_chunk_init_spare(arena); chunk = arena_chunk_init_spare(arena);
else { else {
chunk = arena_chunk_init_hard(tsdn, arena); chunk = arena_chunk_init_hard(arena);
if (chunk == NULL) if (chunk == NULL)
return (NULL); return (NULL);
} }
ql_elm_new(&chunk->node, ql_link); /* Insert the run into the runs_avail tree. */
ql_tail_insert(&arena->achunks, &chunk->node, ql_link);
arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias); arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
return (chunk); return (chunk);
} }
static void static void
arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk) arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
{
size_t sn, hugepage;
bool committed;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
chunk_deregister(chunk, &chunk->node);
sn = extent_node_sn_get(&chunk->node);
hugepage = chunk->hugepage;
committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
if (!committed) {
/*
* Decommit the header. Mark the chunk as decommitted even if
* header decommit fails, since treating a partially committed
* chunk as committed has a high potential for causing later
* access of decommitted memory.
*/
chunk_hooks = chunk_hooks_get(tsdn, arena);
chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE,
arena->ind);
}
if (!hugepage) {
/*
* Convert chunk back to the default state, so that all
* subsequent chunk allocations start out with chunks that can
* be backed by transparent huge pages.
*/
pages_huge(chunk, chunksize);
}
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize,
sn, committed);
if (config_stats) {
arena->stats.mapped -= chunksize;
arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
}
}
static void
arena_spare_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *spare)
{
assert(arena->spare != spare);
if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
arena_run_dirty_remove(arena, spare, map_bias,
chunk_npages-map_bias);
}
arena_chunk_discard(tsdn, arena, spare);
}
static void
arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
{ {
arena_chunk_t *spare;
assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
...@@ -797,14 +732,49 @@ arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk) ...@@ -797,14 +732,49 @@ arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
assert(arena_mapbits_decommitted_get(chunk, map_bias) == assert(arena_mapbits_decommitted_get(chunk, map_bias) ==
arena_mapbits_decommitted_get(chunk, chunk_npages-1)); arena_mapbits_decommitted_get(chunk, chunk_npages-1));
/* Remove run from runs_avail, so that the arena does not use it. */ /*
* Remove run from the runs_avail tree, so that the arena does not use
* it.
*/
arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias); arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
ql_remove(&arena->achunks, &chunk->node, ql_link); if (arena->spare != NULL) {
spare = arena->spare; arena_chunk_t *spare = arena->spare;
arena->spare = chunk; chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
if (spare != NULL) bool committed;
arena_spare_discard(tsdn, arena, spare);
arena->spare = chunk;
if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
arena_run_dirty_remove(arena, spare, map_bias,
chunk_npages-map_bias);
}
chunk_deregister(spare, &spare->node);
committed = (arena_mapbits_decommitted_get(spare, map_bias) ==
0);
if (!committed) {
/*
* Decommit the header. Mark the chunk as decommitted
* even if header decommit fails, since treating a
* partially committed chunk as committed has a high
* potential for causing later access of decommitted
* memory.
*/
chunk_hooks = chunk_hooks_get(arena);
chunk_hooks.decommit(spare, chunksize, 0, map_bias <<
LG_PAGE, arena->ind);
}
chunk_dalloc_cache(arena, &chunk_hooks, (void *)spare,
chunksize, committed);
if (config_stats) {
arena->stats.mapped -= chunksize;
arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
}
} else
arena->spare = chunk;
} }
static void static void
...@@ -846,17 +816,6 @@ arena_huge_dalloc_stats_update(arena_t *arena, size_t usize) ...@@ -846,17 +816,6 @@ arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
arena->stats.hstats[index].curhchunks--; arena->stats.hstats[index].curhchunks--;
} }
static void
arena_huge_reset_stats_cancel(arena_t *arena, size_t usize)
{
szind_t index = size2index(usize) - nlclasses - NBINS;
cassert(config_stats);
arena->stats.ndalloc_huge++;
arena->stats.hstats[index].ndalloc--;
}
static void static void
arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize) arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
{ {
...@@ -888,240 +847,243 @@ arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize, ...@@ -888,240 +847,243 @@ arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
} }
extent_node_t * extent_node_t *
arena_node_alloc(tsdn_t *tsdn, arena_t *arena) arena_node_alloc(arena_t *arena)
{ {
extent_node_t *node; extent_node_t *node;
malloc_mutex_lock(tsdn, &arena->node_cache_mtx); malloc_mutex_lock(&arena->node_cache_mtx);
node = ql_last(&arena->node_cache, ql_link); node = ql_last(&arena->node_cache, ql_link);
if (node == NULL) { if (node == NULL) {
malloc_mutex_unlock(tsdn, &arena->node_cache_mtx); malloc_mutex_unlock(&arena->node_cache_mtx);
return (base_alloc(tsdn, sizeof(extent_node_t))); return (base_alloc(sizeof(extent_node_t)));
} }
ql_tail_remove(&arena->node_cache, extent_node_t, ql_link); ql_tail_remove(&arena->node_cache, extent_node_t, ql_link);
malloc_mutex_unlock(tsdn, &arena->node_cache_mtx); malloc_mutex_unlock(&arena->node_cache_mtx);
return (node); return (node);
} }
void void
arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node) arena_node_dalloc(arena_t *arena, extent_node_t *node)
{ {
malloc_mutex_lock(tsdn, &arena->node_cache_mtx); malloc_mutex_lock(&arena->node_cache_mtx);
ql_elm_new(node, ql_link); ql_elm_new(node, ql_link);
ql_tail_insert(&arena->node_cache, node, ql_link); ql_tail_insert(&arena->node_cache, node, ql_link);
malloc_mutex_unlock(tsdn, &arena->node_cache_mtx); malloc_mutex_unlock(&arena->node_cache_mtx);
} }
static void * static void *
arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena, arena_chunk_alloc_huge_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, size_t *sn, size_t usize, size_t alignment, bool *zero, size_t csize)
bool *zero, size_t csize)
{ {
void *ret; void *ret;
bool commit = true; bool commit = true;
ret = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize, ret = chunk_alloc_wrapper(arena, chunk_hooks, NULL, csize, alignment,
alignment, sn, zero, &commit); zero, &commit);
if (ret == NULL) { if (ret == NULL) {
/* Revert optimistic stats updates. */ /* Revert optimistic stats updates. */
malloc_mutex_lock(tsdn, &arena->lock); malloc_mutex_lock(&arena->lock);
if (config_stats) { if (config_stats) {
arena_huge_malloc_stats_update_undo(arena, usize); arena_huge_malloc_stats_update_undo(arena, usize);
arena->stats.mapped -= usize; arena->stats.mapped -= usize;
} }
arena_nactive_sub(arena, usize >> LG_PAGE); arena->nactive -= (usize >> LG_PAGE);
malloc_mutex_unlock(tsdn, &arena->lock); malloc_mutex_unlock(&arena->lock);
} }
return (ret); return (ret);
} }
void * void *
arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize, arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
size_t alignment, size_t *sn, bool *zero) bool *zero)
{ {
void *ret; void *ret;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
size_t csize = CHUNK_CEILING(usize); size_t csize = CHUNK_CEILING(usize);
bool commit = true;
malloc_mutex_lock(tsdn, &arena->lock); malloc_mutex_lock(&arena->lock);
/* Optimistically update stats. */ /* Optimistically update stats. */
if (config_stats) { if (config_stats) {
arena_huge_malloc_stats_update(arena, usize); arena_huge_malloc_stats_update(arena, usize);
arena->stats.mapped += usize; arena->stats.mapped += usize;
} }
arena_nactive_add(arena, usize >> LG_PAGE); arena->nactive += (usize >> LG_PAGE);
ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize, ret = chunk_alloc_cache(arena, &chunk_hooks, NULL, csize, alignment,
alignment, sn, zero, &commit, true); zero, true);
malloc_mutex_unlock(tsdn, &arena->lock); malloc_mutex_unlock(&arena->lock);
if (ret == NULL) { if (ret == NULL) {
ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks, ret = arena_chunk_alloc_huge_hard(arena, &chunk_hooks, usize,
usize, alignment, sn, zero, csize); alignment, zero, csize);
} }
if (config_stats && ret != NULL)
stats_cactive_add(usize);
return (ret); return (ret);
} }
void void
arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize, arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize)
size_t sn)
{ {
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
size_t csize; size_t csize;
csize = CHUNK_CEILING(usize); csize = CHUNK_CEILING(usize);
malloc_mutex_lock(tsdn, &arena->lock); malloc_mutex_lock(&arena->lock);
if (config_stats) { if (config_stats) {
arena_huge_dalloc_stats_update(arena, usize); arena_huge_dalloc_stats_update(arena, usize);
arena->stats.mapped -= usize; arena->stats.mapped -= usize;
stats_cactive_sub(usize);
} }
arena_nactive_sub(arena, usize >> LG_PAGE); arena->nactive -= (usize >> LG_PAGE);
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, sn, true); chunk_dalloc_cache(arena, &chunk_hooks, chunk, csize, true);
malloc_mutex_unlock(tsdn, &arena->lock); malloc_mutex_unlock(&arena->lock);
} }
void void
arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk, arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk, size_t oldsize,
size_t oldsize, size_t usize) size_t usize)
{ {
assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize)); assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize));
assert(oldsize != usize); assert(oldsize != usize);
malloc_mutex_lock(tsdn, &arena->lock); malloc_mutex_lock(&arena->lock);
if (config_stats) if (config_stats)
arena_huge_ralloc_stats_update(arena, oldsize, usize); arena_huge_ralloc_stats_update(arena, oldsize, usize);
if (oldsize < usize) if (oldsize < usize) {
arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE); size_t udiff = usize - oldsize;
else arena->nactive += udiff >> LG_PAGE;
arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE); if (config_stats)
malloc_mutex_unlock(tsdn, &arena->lock); stats_cactive_add(udiff);
} else {
size_t udiff = oldsize - usize;
arena->nactive -= udiff >> LG_PAGE;
if (config_stats)
stats_cactive_sub(udiff);
}
malloc_mutex_unlock(&arena->lock);
} }
void void
arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk, arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize,
size_t oldsize, size_t usize, size_t sn) size_t usize)
{ {
size_t udiff = oldsize - usize; size_t udiff = oldsize - usize;
size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize); size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
malloc_mutex_lock(tsdn, &arena->lock); malloc_mutex_lock(&arena->lock);
if (config_stats) { if (config_stats) {
arena_huge_ralloc_stats_update(arena, oldsize, usize); arena_huge_ralloc_stats_update(arena, oldsize, usize);
if (cdiff != 0) if (cdiff != 0) {
arena->stats.mapped -= cdiff; arena->stats.mapped -= cdiff;
stats_cactive_sub(udiff);
}
} }
arena_nactive_sub(arena, udiff >> LG_PAGE); arena->nactive -= udiff >> LG_PAGE;
if (cdiff != 0) { if (cdiff != 0) {
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
void *nchunk = (void *)((uintptr_t)chunk + void *nchunk = (void *)((uintptr_t)chunk +
CHUNK_CEILING(usize)); CHUNK_CEILING(usize));
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff, chunk_dalloc_cache(arena, &chunk_hooks, nchunk, cdiff, true);
sn, true);
} }
malloc_mutex_unlock(tsdn, &arena->lock); malloc_mutex_unlock(&arena->lock);
} }
static bool static bool
arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena, arena_chunk_ralloc_huge_expand_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize, void *chunk, size_t oldsize, size_t usize, bool *zero, void *nchunk,
size_t *sn, bool *zero, void *nchunk, size_t udiff, size_t cdiff) size_t udiff, size_t cdiff)
{ {
bool err; bool err;
bool commit = true; bool commit = true;
err = (chunk_alloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff, err = (chunk_alloc_wrapper(arena, chunk_hooks, nchunk, cdiff, chunksize,
chunksize, sn, zero, &commit) == NULL); zero, &commit) == NULL);
if (err) { if (err) {
/* Revert optimistic stats updates. */ /* Revert optimistic stats updates. */
malloc_mutex_lock(tsdn, &arena->lock); malloc_mutex_lock(&arena->lock);
if (config_stats) { if (config_stats) {
arena_huge_ralloc_stats_update_undo(arena, oldsize, arena_huge_ralloc_stats_update_undo(arena, oldsize,
usize); usize);
arena->stats.mapped -= cdiff; arena->stats.mapped -= cdiff;
} }
arena_nactive_sub(arena, udiff >> LG_PAGE); arena->nactive -= (udiff >> LG_PAGE);
malloc_mutex_unlock(tsdn, &arena->lock); malloc_mutex_unlock(&arena->lock);
} else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk, } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
cdiff, true, arena->ind)) { cdiff, true, arena->ind)) {
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff, chunk_dalloc_arena(arena, chunk_hooks, nchunk, cdiff, *zero,
*sn, *zero, true); true);
err = true; err = true;
} }
return (err); return (err);
} }
bool bool
arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk, arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize,
size_t oldsize, size_t usize, bool *zero) size_t usize, bool *zero)
{ {
bool err; bool err;
chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena); chunk_hooks_t chunk_hooks = chunk_hooks_get(arena);
void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize)); void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
size_t udiff = usize - oldsize; size_t udiff = usize - oldsize;
size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize); size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
size_t sn;
bool commit = true;
malloc_mutex_lock(tsdn, &arena->lock); malloc_mutex_lock(&arena->lock);
/* Optimistically update stats. */ /* Optimistically update stats. */
if (config_stats) { if (config_stats) {
arena_huge_ralloc_stats_update(arena, oldsize, usize); arena_huge_ralloc_stats_update(arena, oldsize, usize);
arena->stats.mapped += cdiff; arena->stats.mapped += cdiff;
} }
arena_nactive_add(arena, udiff >> LG_PAGE); arena->nactive += (udiff >> LG_PAGE);
err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff, err = (chunk_alloc_cache(arena, &arena->chunk_hooks, nchunk, cdiff,
chunksize, &sn, zero, &commit, true) == NULL); chunksize, zero, true) == NULL);
malloc_mutex_unlock(tsdn, &arena->lock); malloc_mutex_unlock(&arena->lock);
if (err) { if (err) {
err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena, err = arena_chunk_ralloc_huge_expand_hard(arena, &chunk_hooks,
&chunk_hooks, chunk, oldsize, usize, &sn, zero, nchunk, chunk, oldsize, usize, zero, nchunk, udiff,
udiff, cdiff); cdiff);
} else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk, } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
cdiff, true, arena->ind)) { cdiff, true, arena->ind)) {
chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff, chunk_dalloc_arena(arena, &chunk_hooks, nchunk, cdiff, *zero,
sn, *zero, true); true);
err = true; err = true;
} }
if (config_stats && !err)
stats_cactive_add(udiff);
return (err); return (err);
} }
/* /*
* Do first-best-fit run selection, i.e. select the lowest run that best fits. * Do first-best-fit run selection, i.e. select the lowest run that best fits.
* Run sizes are indexed, so not all candidate runs are necessarily exactly the * Run sizes are quantized, so not all candidate runs are necessarily exactly
* same size. * the same size.
*/ */
static arena_run_t * static arena_run_t *
arena_run_first_best_fit(arena_t *arena, size_t size) arena_run_first_best_fit(arena_t *arena, size_t size)
{ {
pszind_t pind, i; size_t search_size = run_quantize_first(size);
arena_chunk_map_misc_t *key = arena_miscelm_key_create(search_size);
pind = psz2ind(run_quantize_ceil(size)); arena_chunk_map_misc_t *miscelm =
arena_avail_tree_nsearch(&arena->runs_avail, key);
for (i = pind; pind2sz(i) <= chunksize; i++) { if (miscelm == NULL)
arena_chunk_map_misc_t *miscelm = arena_run_heap_first( return (NULL);
&arena->runs_avail[i]); return (&miscelm->run);
if (miscelm != NULL)
return (&miscelm->run);
}
return (NULL);
} }
static arena_run_t * static arena_run_t *
arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero) arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
{ {
arena_run_t *run = arena_run_first_best_fit(arena, size); arena_run_t *run = arena_run_first_best_fit(arena, s2u(size));
if (run != NULL) { if (run != NULL) {
if (arena_run_split_large(arena, run, size, zero)) if (arena_run_split_large(arena, run, size, zero))
run = NULL; run = NULL;
...@@ -1130,7 +1092,7 @@ arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero) ...@@ -1130,7 +1092,7 @@ arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
} }
static arena_run_t * static arena_run_t *
arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero) arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
{ {
arena_chunk_t *chunk; arena_chunk_t *chunk;
arena_run_t *run; arena_run_t *run;
...@@ -1146,9 +1108,9 @@ arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero) ...@@ -1146,9 +1108,9 @@ arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero)
/* /*
* No usable runs. Create a new chunk from which to allocate the run. * No usable runs. Create a new chunk from which to allocate the run.
*/ */
chunk = arena_chunk_alloc(tsdn, arena); chunk = arena_chunk_alloc(arena);
if (chunk != NULL) { if (chunk != NULL) {
run = &arena_miscelm_get_mutable(chunk, map_bias)->run; run = &arena_miscelm_get(chunk, map_bias)->run;
if (arena_run_split_large(arena, run, size, zero)) if (arena_run_split_large(arena, run, size, zero))
run = NULL; run = NULL;
return (run); return (run);
...@@ -1174,7 +1136,7 @@ arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind) ...@@ -1174,7 +1136,7 @@ arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind)
} }
static arena_run_t * static arena_run_t *
arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind) arena_run_alloc_small(arena_t *arena, size_t size, szind_t binind)
{ {
arena_chunk_t *chunk; arena_chunk_t *chunk;
arena_run_t *run; arena_run_t *run;
...@@ -1191,9 +1153,9 @@ arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind) ...@@ -1191,9 +1153,9 @@ arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind)
/* /*
* No usable runs. Create a new chunk from which to allocate the run. * No usable runs. Create a new chunk from which to allocate the run.
*/ */
chunk = arena_chunk_alloc(tsdn, arena); chunk = arena_chunk_alloc(arena);
if (chunk != NULL) { if (chunk != NULL) {
run = &arena_miscelm_get_mutable(chunk, map_bias)->run; run = &arena_miscelm_get(chunk, map_bias)->run;
if (arena_run_split_small(arena, run, size, binind)) if (arena_run_split_small(arena, run, size, binind))
run = NULL; run = NULL;
return (run); return (run);
...@@ -1216,239 +1178,42 @@ arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult) ...@@ -1216,239 +1178,42 @@ arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult)
} }
ssize_t ssize_t
arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena) arena_lg_dirty_mult_get(arena_t *arena)
{ {
ssize_t lg_dirty_mult; ssize_t lg_dirty_mult;
malloc_mutex_lock(tsdn, &arena->lock); malloc_mutex_lock(&arena->lock);
lg_dirty_mult = arena->lg_dirty_mult; lg_dirty_mult = arena->lg_dirty_mult;
malloc_mutex_unlock(tsdn, &arena->lock); malloc_mutex_unlock(&arena->lock);
return (lg_dirty_mult); return (lg_dirty_mult);
} }
bool bool
arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, ssize_t lg_dirty_mult) arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult)
{ {
if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
return (true); return (true);
malloc_mutex_lock(tsdn, &arena->lock); malloc_mutex_lock(&arena->lock);
arena->lg_dirty_mult = lg_dirty_mult; arena->lg_dirty_mult = lg_dirty_mult;
arena_maybe_purge(tsdn, arena); arena_maybe_purge(arena);
malloc_mutex_unlock(tsdn, &arena->lock); malloc_mutex_unlock(&arena->lock);
return (false); return (false);
} }
static void void
arena_decay_deadline_init(arena_t *arena) arena_maybe_purge(arena_t *arena)
{
assert(opt_purge == purge_mode_decay);
/*
* Generate a new deadline that is uniformly random within the next
* epoch after the current one.
*/
nstime_copy(&arena->decay.deadline, &arena->decay.epoch);
nstime_add(&arena->decay.deadline, &arena->decay.interval);
if (arena->decay.time > 0) {
nstime_t jitter;
nstime_init(&jitter, prng_range_u64(&arena->decay.jitter_state,
nstime_ns(&arena->decay.interval)));
nstime_add(&arena->decay.deadline, &jitter);
}
}
static bool
arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time)
{
assert(opt_purge == purge_mode_decay);
return (nstime_compare(&arena->decay.deadline, time) <= 0);
}
static size_t
arena_decay_backlog_npages_limit(const arena_t *arena)
{
static const uint64_t h_steps[] = {
#define STEP(step, h, x, y) \
h,
SMOOTHSTEP
#undef STEP
};
uint64_t sum;
size_t npages_limit_backlog;
unsigned i;
assert(opt_purge == purge_mode_decay);
/*
* For each element of decay_backlog, multiply by the corresponding
* fixed-point smoothstep decay factor. Sum the products, then divide
* to round down to the nearest whole number of pages.
*/
sum = 0;
for (i = 0; i < SMOOTHSTEP_NSTEPS; i++)
sum += arena->decay.backlog[i] * h_steps[i];
npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
return (npages_limit_backlog);
}
static void
arena_decay_backlog_update_last(arena_t *arena)
{
size_t ndirty_delta = (arena->ndirty > arena->decay.ndirty) ?
arena->ndirty - arena->decay.ndirty : 0;
arena->decay.backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta;
}
static void
arena_decay_backlog_update(arena_t *arena, uint64_t nadvance_u64)
{
if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
memset(arena->decay.backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
sizeof(size_t));
} else {
size_t nadvance_z = (size_t)nadvance_u64;
assert((uint64_t)nadvance_z == nadvance_u64);
memmove(arena->decay.backlog, &arena->decay.backlog[nadvance_z],
(SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
if (nadvance_z > 1) {
memset(&arena->decay.backlog[SMOOTHSTEP_NSTEPS -
nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
}
}
arena_decay_backlog_update_last(arena);
}
static void
arena_decay_epoch_advance_helper(arena_t *arena, const nstime_t *time)
{
uint64_t nadvance_u64;
nstime_t delta;
assert(opt_purge == purge_mode_decay);
assert(arena_decay_deadline_reached(arena, time));
nstime_copy(&delta, time);
nstime_subtract(&delta, &arena->decay.epoch);
nadvance_u64 = nstime_divide(&delta, &arena->decay.interval);
assert(nadvance_u64 > 0);
/* Add nadvance_u64 decay intervals to epoch. */
nstime_copy(&delta, &arena->decay.interval);
nstime_imultiply(&delta, nadvance_u64);
nstime_add(&arena->decay.epoch, &delta);
/* Set a new deadline. */
arena_decay_deadline_init(arena);
/* Update the backlog. */
arena_decay_backlog_update(arena, nadvance_u64);
}
static void
arena_decay_epoch_advance_purge(tsdn_t *tsdn, arena_t *arena)
{
size_t ndirty_limit = arena_decay_backlog_npages_limit(arena);
if (arena->ndirty > ndirty_limit)
arena_purge_to_limit(tsdn, arena, ndirty_limit);
arena->decay.ndirty = arena->ndirty;
}
static void
arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, const nstime_t *time)
{
arena_decay_epoch_advance_helper(arena, time);
arena_decay_epoch_advance_purge(tsdn, arena);
}
static void
arena_decay_init(arena_t *arena, ssize_t decay_time)
{
arena->decay.time = decay_time;
if (decay_time > 0) {
nstime_init2(&arena->decay.interval, decay_time, 0);
nstime_idivide(&arena->decay.interval, SMOOTHSTEP_NSTEPS);
}
nstime_init(&arena->decay.epoch, 0);
nstime_update(&arena->decay.epoch);
arena->decay.jitter_state = (uint64_t)(uintptr_t)arena;
arena_decay_deadline_init(arena);
arena->decay.ndirty = arena->ndirty;
memset(arena->decay.backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
}
static bool
arena_decay_time_valid(ssize_t decay_time)
{
if (decay_time < -1)
return (false);
if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX)
return (true);
return (false);
}
ssize_t
arena_decay_time_get(tsdn_t *tsdn, arena_t *arena)
{
ssize_t decay_time;
malloc_mutex_lock(tsdn, &arena->lock);
decay_time = arena->decay.time;
malloc_mutex_unlock(tsdn, &arena->lock);
return (decay_time);
}
bool
arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time)
{
if (!arena_decay_time_valid(decay_time))
return (true);
malloc_mutex_lock(tsdn, &arena->lock);
/*
* Restart decay backlog from scratch, which may cause many dirty pages
* to be immediately purged. It would conceptually be possible to map
* the old backlog onto the new backlog, but there is no justification
* for such complexity since decay_time changes are intended to be
* infrequent, either between the {-1, 0, >0} states, or a one-time
* arbitrary change during initial arena configuration.
*/
arena_decay_init(arena, decay_time);
arena_maybe_purge(tsdn, arena);
malloc_mutex_unlock(tsdn, &arena->lock);
return (false);
}
static void
arena_maybe_purge_ratio(tsdn_t *tsdn, arena_t *arena)
{ {
assert(opt_purge == purge_mode_ratio);
/* Don't purge if the option is disabled. */ /* Don't purge if the option is disabled. */
if (arena->lg_dirty_mult < 0) if (arena->lg_dirty_mult < 0)
return; return;
/* Don't recursively purge. */
if (arena->purging)
return;
/* /*
* Iterate, since preventing recursive purging could otherwise leave too * Iterate, since preventing recursive purging could otherwise leave too
* many dirty pages. * many dirty pages.
...@@ -1463,68 +1228,10 @@ arena_maybe_purge_ratio(tsdn_t *tsdn, arena_t *arena) ...@@ -1463,68 +1228,10 @@ arena_maybe_purge_ratio(tsdn_t *tsdn, arena_t *arena)
*/ */
if (arena->ndirty <= threshold) if (arena->ndirty <= threshold)
return; return;
arena_purge_to_limit(tsdn, arena, threshold); arena_purge(arena, false);
} }
} }
static void
arena_maybe_purge_decay(tsdn_t *tsdn, arena_t *arena)
{
nstime_t time;
assert(opt_purge == purge_mode_decay);
/* Purge all or nothing if the option is disabled. */
if (arena->decay.time <= 0) {
if (arena->decay.time == 0)
arena_purge_to_limit(tsdn, arena, 0);
return;
}
nstime_init(&time, 0);
nstime_update(&time);
if (unlikely(!nstime_monotonic() && nstime_compare(&arena->decay.epoch,
&time) > 0)) {
/*
* Time went backwards. Move the epoch back in time and
* generate a new deadline, with the expectation that time
* typically flows forward for long enough periods of time that
* epochs complete. Unfortunately, this strategy is susceptible
* to clock jitter triggering premature epoch advances, but
* clock jitter estimation and compensation isn't feasible here
* because calls into this code are event-driven.
*/
nstime_copy(&arena->decay.epoch, &time);
arena_decay_deadline_init(arena);
} else {
/* Verify that time does not go backwards. */
assert(nstime_compare(&arena->decay.epoch, &time) <= 0);
}
/*
* If the deadline has been reached, advance to the current epoch and
* purge to the new limit if necessary. Note that dirty pages created
* during the current epoch are not subject to purge until a future
* epoch, so as a result purging only happens during epoch advances.
*/
if (arena_decay_deadline_reached(arena, &time))
arena_decay_epoch_advance(tsdn, arena, &time);
}
void
arena_maybe_purge(tsdn_t *tsdn, arena_t *arena)
{
/* Don't recursively purge. */
if (arena->purging)
return;
if (opt_purge == purge_mode_ratio)
arena_maybe_purge_ratio(tsdn, arena);
else
arena_maybe_purge_decay(tsdn, arena);
}
static size_t static size_t
arena_dirty_count(arena_t *arena) arena_dirty_count(arena_t *arena)
{ {
...@@ -1556,19 +1263,39 @@ arena_dirty_count(arena_t *arena) ...@@ -1556,19 +1263,39 @@ arena_dirty_count(arena_t *arena)
ndirty += npages; ndirty += npages;
} }
return (ndirty); return (ndirty);
}
static size_t
arena_compute_npurge(arena_t *arena, bool all)
{
size_t npurge;
/*
* Compute the minimum number of pages that this thread should try to
* purge.
*/
if (!all) {
size_t threshold = (arena->nactive >> arena->lg_dirty_mult);
threshold = threshold < chunk_npages ? chunk_npages : threshold;
npurge = arena->ndirty - threshold;
} else
npurge = arena->ndirty;
return (npurge);
} }
static size_t static size_t
arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all,
size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel, size_t npurge, arena_runs_dirty_link_t *purge_runs_sentinel,
extent_node_t *purge_chunks_sentinel) extent_node_t *purge_chunks_sentinel)
{ {
arena_runs_dirty_link_t *rdelm, *rdelm_next; arena_runs_dirty_link_t *rdelm, *rdelm_next;
extent_node_t *chunkselm; extent_node_t *chunkselm;
size_t nstashed = 0; size_t nstashed = 0;
/* Stash runs/chunks according to ndirty_limit. */ /* Stash at least npurge pages. */
for (rdelm = qr_next(&arena->runs_dirty, rd_link), for (rdelm = qr_next(&arena->runs_dirty, rd_link),
chunkselm = qr_next(&arena->chunks_cache, cc_link); chunkselm = qr_next(&arena->chunks_cache, cc_link);
rdelm != &arena->runs_dirty; rdelm = rdelm_next) { rdelm != &arena->runs_dirty; rdelm = rdelm_next) {
...@@ -1577,32 +1304,24 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -1577,32 +1304,24 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
if (rdelm == &chunkselm->rd) { if (rdelm == &chunkselm->rd) {
extent_node_t *chunkselm_next; extent_node_t *chunkselm_next;
size_t sn; bool zero;
bool zero, commit;
UNUSED void *chunk; UNUSED void *chunk;
npages = extent_node_size_get(chunkselm) >> LG_PAGE;
if (opt_purge == purge_mode_decay && arena->ndirty -
(nstashed + npages) < ndirty_limit)
break;
chunkselm_next = qr_next(chunkselm, cc_link); chunkselm_next = qr_next(chunkselm, cc_link);
/* /*
* Allocate. chunkselm remains valid due to the * Allocate. chunkselm remains valid due to the
* dalloc_node=false argument to chunk_alloc_cache(). * dalloc_node=false argument to chunk_alloc_cache().
*/ */
zero = false; zero = false;
commit = false; chunk = chunk_alloc_cache(arena, chunk_hooks,
chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks,
extent_node_addr_get(chunkselm), extent_node_addr_get(chunkselm),
extent_node_size_get(chunkselm), chunksize, &sn, extent_node_size_get(chunkselm), chunksize, &zero,
&zero, &commit, false); false);
assert(chunk == extent_node_addr_get(chunkselm)); assert(chunk == extent_node_addr_get(chunkselm));
assert(zero == extent_node_zeroed_get(chunkselm)); assert(zero == extent_node_zeroed_get(chunkselm));
extent_node_dirty_insert(chunkselm, purge_runs_sentinel, extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
purge_chunks_sentinel); purge_chunks_sentinel);
assert(npages == (extent_node_size_get(chunkselm) >> npages = extent_node_size_get(chunkselm) >> LG_PAGE;
LG_PAGE));
chunkselm = chunkselm_next; chunkselm = chunkselm_next;
} else { } else {
arena_chunk_t *chunk = arena_chunk_t *chunk =
...@@ -1615,9 +1334,6 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -1615,9 +1334,6 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
arena_mapbits_unallocated_size_get(chunk, pageind); arena_mapbits_unallocated_size_get(chunk, pageind);
npages = run_size >> LG_PAGE; npages = run_size >> LG_PAGE;
if (opt_purge == purge_mode_decay && arena->ndirty -
(nstashed + npages) < ndirty_limit)
break;
assert(pageind + npages <= chunk_npages); assert(pageind + npages <= chunk_npages);
assert(arena_mapbits_dirty_get(chunk, pageind) == assert(arena_mapbits_dirty_get(chunk, pageind) ==
...@@ -1628,7 +1344,7 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -1628,7 +1344,7 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
* prior to allocation. * prior to allocation.
*/ */
if (chunk == arena->spare) if (chunk == arena->spare)
arena_chunk_alloc(tsdn, arena); arena_chunk_alloc(arena);
/* Temporarily allocate the free dirty run. */ /* Temporarily allocate the free dirty run. */
arena_run_split_large(arena, run, run_size, false); arena_run_split_large(arena, run, run_size, false);
...@@ -1643,8 +1359,7 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -1643,8 +1359,7 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
} }
nstashed += npages; nstashed += npages;
if (opt_purge == purge_mode_ratio && arena->ndirty - nstashed <= if (!all && nstashed >= npurge)
ndirty_limit)
break; break;
} }
...@@ -1652,7 +1367,7 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -1652,7 +1367,7 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
} }
static size_t static size_t
arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks,
arena_runs_dirty_link_t *purge_runs_sentinel, arena_runs_dirty_link_t *purge_runs_sentinel,
extent_node_t *purge_chunks_sentinel) extent_node_t *purge_chunks_sentinel)
{ {
...@@ -1664,7 +1379,7 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -1664,7 +1379,7 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
nmadvise = 0; nmadvise = 0;
npurged = 0; npurged = 0;
malloc_mutex_unlock(tsdn, &arena->lock); malloc_mutex_unlock(&arena->lock);
for (rdelm = qr_next(purge_runs_sentinel, rd_link), for (rdelm = qr_next(purge_runs_sentinel, rd_link),
chunkselm = qr_next(purge_chunks_sentinel, cc_link); chunkselm = qr_next(purge_chunks_sentinel, cc_link);
rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) { rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) {
...@@ -1693,17 +1408,6 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -1693,17 +1408,6 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
run_size = arena_mapbits_large_size_get(chunk, pageind); run_size = arena_mapbits_large_size_get(chunk, pageind);
npages = run_size >> LG_PAGE; npages = run_size >> LG_PAGE;
/*
* If this is the first run purged within chunk, mark
* the chunk as non-huge. This will prevent all use of
* transparent huge pages for this chunk until the chunk
* as a whole is deallocated.
*/
if (chunk->hugepage) {
pages_nohuge(chunk, chunksize);
chunk->hugepage = false;
}
assert(pageind + npages <= chunk_npages); assert(pageind + npages <= chunk_npages);
assert(!arena_mapbits_decommitted_get(chunk, pageind)); assert(!arena_mapbits_decommitted_get(chunk, pageind));
assert(!arena_mapbits_decommitted_get(chunk, assert(!arena_mapbits_decommitted_get(chunk,
...@@ -1714,7 +1418,7 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -1714,7 +1418,7 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
flag_unzeroed = 0; flag_unzeroed = 0;
flags = CHUNK_MAP_DECOMMITTED; flags = CHUNK_MAP_DECOMMITTED;
} else { } else {
flag_unzeroed = chunk_purge_wrapper(tsdn, arena, flag_unzeroed = chunk_purge_wrapper(arena,
chunk_hooks, chunk, chunksize, pageind << chunk_hooks, chunk, chunksize, pageind <<
LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0; LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0;
flags = flag_unzeroed; flags = flag_unzeroed;
...@@ -1745,7 +1449,7 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -1745,7 +1449,7 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
if (config_stats) if (config_stats)
nmadvise++; nmadvise++;
} }
malloc_mutex_lock(tsdn, &arena->lock); malloc_mutex_lock(&arena->lock);
if (config_stats) { if (config_stats) {
arena->stats.nmadvise += nmadvise; arena->stats.nmadvise += nmadvise;
...@@ -1756,7 +1460,7 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -1756,7 +1460,7 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
} }
static void static void
arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks,
arena_runs_dirty_link_t *purge_runs_sentinel, arena_runs_dirty_link_t *purge_runs_sentinel,
extent_node_t *purge_chunks_sentinel) extent_node_t *purge_chunks_sentinel)
{ {
...@@ -1773,14 +1477,13 @@ arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -1773,14 +1477,13 @@ arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
cc_link); cc_link);
void *addr = extent_node_addr_get(chunkselm); void *addr = extent_node_addr_get(chunkselm);
size_t size = extent_node_size_get(chunkselm); size_t size = extent_node_size_get(chunkselm);
size_t sn = extent_node_sn_get(chunkselm);
bool zeroed = extent_node_zeroed_get(chunkselm); bool zeroed = extent_node_zeroed_get(chunkselm);
bool committed = extent_node_committed_get(chunkselm); bool committed = extent_node_committed_get(chunkselm);
extent_node_dirty_remove(chunkselm); extent_node_dirty_remove(chunkselm);
arena_node_dalloc(tsdn, arena, chunkselm); arena_node_dalloc(arena, chunkselm);
chunkselm = chunkselm_next; chunkselm = chunkselm_next;
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr, chunk_dalloc_arena(arena, chunk_hooks, addr, size,
size, sn, zeroed, committed); zeroed, committed);
} else { } else {
arena_chunk_t *chunk = arena_chunk_t *chunk =
(arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
...@@ -1791,26 +1494,16 @@ arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -1791,26 +1494,16 @@ arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
pageind) != 0); pageind) != 0);
arena_run_t *run = &miscelm->run; arena_run_t *run = &miscelm->run;
qr_remove(rdelm, rd_link); qr_remove(rdelm, rd_link);
arena_run_dalloc(tsdn, arena, run, false, true, arena_run_dalloc(arena, run, false, true, decommitted);
decommitted);
} }
} }
} }
/*
* NB: ndirty_limit is interpreted differently depending on opt_purge:
* - purge_mode_ratio: Purge as few dirty run/chunks as possible to reach the
* desired state:
* (arena->ndirty <= ndirty_limit)
* - purge_mode_decay: Purge as many dirty runs/chunks as possible without
* violating the invariant:
* (arena->ndirty >= ndirty_limit)
*/
static void static void
arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit) arena_purge(arena_t *arena, bool all)
{ {
chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena); chunk_hooks_t chunk_hooks = chunk_hooks_get(arena);
size_t npurge, npurged; size_t npurge, npurgeable, npurged;
arena_runs_dirty_link_t purge_runs_sentinel; arena_runs_dirty_link_t purge_runs_sentinel;
extent_node_t purge_chunks_sentinel; extent_node_t purge_chunks_sentinel;
...@@ -1824,183 +1517,34 @@ arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit) ...@@ -1824,183 +1517,34 @@ arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit)
size_t ndirty = arena_dirty_count(arena); size_t ndirty = arena_dirty_count(arena);
assert(ndirty == arena->ndirty); assert(ndirty == arena->ndirty);
} }
assert(opt_purge != purge_mode_ratio || (arena->nactive >> assert((arena->nactive >> arena->lg_dirty_mult) < arena->ndirty || all);
arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0);
if (config_stats)
arena->stats.npurge++;
npurge = arena_compute_npurge(arena, all);
qr_new(&purge_runs_sentinel, rd_link); qr_new(&purge_runs_sentinel, rd_link);
extent_node_dirty_linkage_init(&purge_chunks_sentinel); extent_node_dirty_linkage_init(&purge_chunks_sentinel);
npurge = arena_stash_dirty(tsdn, arena, &chunk_hooks, ndirty_limit, npurgeable = arena_stash_dirty(arena, &chunk_hooks, all, npurge,
&purge_runs_sentinel, &purge_chunks_sentinel); &purge_runs_sentinel, &purge_chunks_sentinel);
if (npurge == 0) assert(npurgeable >= npurge);
goto label_return; npurged = arena_purge_stashed(arena, &chunk_hooks, &purge_runs_sentinel,
npurged = arena_purge_stashed(tsdn, arena, &chunk_hooks, &purge_chunks_sentinel);
&purge_runs_sentinel, &purge_chunks_sentinel); assert(npurged == npurgeable);
assert(npurged == npurge); arena_unstash_purged(arena, &chunk_hooks, &purge_runs_sentinel,
arena_unstash_purged(tsdn, arena, &chunk_hooks, &purge_runs_sentinel,
&purge_chunks_sentinel); &purge_chunks_sentinel);
if (config_stats)
arena->stats.npurge++;
label_return:
arena->purging = false; arena->purging = false;
} }
void void
arena_purge(tsdn_t *tsdn, arena_t *arena, bool all) arena_purge_all(arena_t *arena)
{
malloc_mutex_lock(tsdn, &arena->lock);
if (all)
arena_purge_to_limit(tsdn, arena, 0);
else
arena_maybe_purge(tsdn, arena);
malloc_mutex_unlock(tsdn, &arena->lock);
}
static void
arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk)
{
size_t pageind, npages;
cassert(config_prof);
assert(opt_prof);
/*
* Iterate over the allocated runs and remove profiled allocations from
* the sample set.
*/
for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
if (arena_mapbits_allocated_get(chunk, pageind) != 0) {
if (arena_mapbits_large_get(chunk, pageind) != 0) {
void *ptr = (void *)((uintptr_t)chunk + (pageind
<< LG_PAGE));
size_t usize = isalloc(tsd_tsdn(tsd), ptr,
config_prof);
prof_free(tsd, ptr, usize);
npages = arena_mapbits_large_size_get(chunk,
pageind) >> LG_PAGE;
} else {
/* Skip small run. */
size_t binind = arena_mapbits_binind_get(chunk,
pageind);
arena_bin_info_t *bin_info =
&arena_bin_info[binind];
npages = bin_info->run_size >> LG_PAGE;
}
} else {
/* Skip unallocated run. */
npages = arena_mapbits_unallocated_size_get(chunk,
pageind) >> LG_PAGE;
}
assert(pageind + npages <= chunk_npages);
}
}
void
arena_reset(tsd_t *tsd, arena_t *arena)
{ {
unsigned i;
extent_node_t *node;
/*
* Locking in this function is unintuitive. The caller guarantees that
* no concurrent operations are happening in this arena, but there are
* still reasons that some locking is necessary:
*
* - Some of the functions in the transitive closure of calls assume
* appropriate locks are held, and in some cases these locks are
* temporarily dropped to avoid lock order reversal or deadlock due to
* reentry.
* - mallctl("epoch", ...) may concurrently refresh stats. While
* strictly speaking this is a "concurrent operation", disallowing
* stats refreshes would impose an inconvenient burden.
*/
/* Remove large allocations from prof sample set. */
if (config_prof && opt_prof) {
ql_foreach(node, &arena->achunks, ql_link) {
arena_achunk_prof_reset(tsd, arena,
extent_node_addr_get(node));
}
}
/* Reset curruns for large size classes. */
if (config_stats) {
for (i = 0; i < nlclasses; i++)
arena->stats.lstats[i].curruns = 0;
}
/* Huge allocations. */
malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
for (node = ql_last(&arena->huge, ql_link); node != NULL; node =
ql_last(&arena->huge, ql_link)) {
void *ptr = extent_node_addr_get(node);
size_t usize;
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
if (config_stats || (config_prof && opt_prof))
usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
/* Remove huge allocation from prof sample set. */
if (config_prof && opt_prof)
prof_free(tsd, ptr, usize);
huge_dalloc(tsd_tsdn(tsd), ptr);
malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
/* Cancel out unwanted effects on stats. */
if (config_stats)
arena_huge_reset_stats_cancel(arena, usize);
}
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
/* Bins. */
for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i];
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
bin->runcur = NULL;
arena_run_heap_new(&bin->runs);
if (config_stats) {
bin->stats.curregs = 0;
bin->stats.curruns = 0;
}
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
}
/*
* Re-initialize runs_dirty such that the chunks_cache and runs_dirty
* chains directly correspond.
*/
qr_new(&arena->runs_dirty, rd_link);
for (node = qr_next(&arena->chunks_cache, cc_link);
node != &arena->chunks_cache; node = qr_next(node, cc_link)) {
qr_new(&node->rd, rd_link);
qr_meld(&arena->runs_dirty, &node->rd, rd_link);
}
/* Arena chunks. */
for (node = ql_last(&arena->achunks, ql_link); node != NULL; node =
ql_last(&arena->achunks, ql_link)) {
ql_remove(&arena->achunks, node, ql_link);
arena_chunk_discard(tsd_tsdn(tsd), arena,
extent_node_addr_get(node));
}
/* Spare. */
if (arena->spare != NULL) {
arena_chunk_discard(tsd_tsdn(tsd), arena, arena->spare);
arena->spare = NULL;
}
assert(!arena->purging); malloc_mutex_lock(&arena->lock);
arena->nactive = 0; arena_purge(arena, true);
malloc_mutex_unlock(&arena->lock);
for (i = 0; i < NPSIZES; i++)
arena_run_heap_new(&arena->runs_avail[i]);
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
} }
static void static void
...@@ -2116,9 +1660,21 @@ arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, ...@@ -2116,9 +1660,21 @@ arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
return (size); return (size);
} }
static bool
arena_run_decommit(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run)
{
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
size_t run_ind = arena_miscelm_to_pageind(miscelm);
size_t offset = run_ind << LG_PAGE;
size_t length = arena_run_size_get(arena, chunk, run, run_ind);
return (arena->chunk_hooks.decommit(chunk, chunksize, offset, length,
arena->ind));
}
static void static void
arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty, arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned,
bool cleaned, bool decommitted) bool decommitted)
{ {
arena_chunk_t *chunk; arena_chunk_t *chunk;
arena_chunk_map_misc_t *miscelm; arena_chunk_map_misc_t *miscelm;
...@@ -2131,7 +1687,8 @@ arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty, ...@@ -2131,7 +1687,8 @@ arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty,
assert(run_ind < chunk_npages); assert(run_ind < chunk_npages);
size = arena_run_size_get(arena, chunk, run, run_ind); size = arena_run_size_get(arena, chunk, run, run_ind);
run_pages = (size >> LG_PAGE); run_pages = (size >> LG_PAGE);
arena_nactive_sub(arena, run_pages); arena_cactive_update(arena, 0, run_pages);
arena->nactive -= run_pages;
/* /*
* The run is dirty if the caller claims to have dirtied it, as well as * The run is dirty if the caller claims to have dirtied it, as well as
...@@ -2178,7 +1735,7 @@ arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty, ...@@ -2178,7 +1735,7 @@ arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty,
if (size == arena_maxrun) { if (size == arena_maxrun) {
assert(run_ind == map_bias); assert(run_ind == map_bias);
assert(run_pages == (arena_maxrun >> LG_PAGE)); assert(run_pages == (arena_maxrun >> LG_PAGE));
arena_chunk_dalloc(tsdn, arena, chunk); arena_chunk_dalloc(arena, chunk);
} }
/* /*
...@@ -2189,12 +1746,21 @@ arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty, ...@@ -2189,12 +1746,21 @@ arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty,
* chances of spuriously crossing the dirty page purging threshold. * chances of spuriously crossing the dirty page purging threshold.
*/ */
if (dirty) if (dirty)
arena_maybe_purge(tsdn, arena); arena_maybe_purge(arena);
}
static void
arena_run_dalloc_decommit(arena_t *arena, arena_chunk_t *chunk,
arena_run_t *run)
{
bool committed = arena_run_decommit(arena, chunk, run);
arena_run_dalloc(arena, run, committed, false, !committed);
} }
static void static void
arena_run_trim_head(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
arena_run_t *run, size_t oldsize, size_t newsize) size_t oldsize, size_t newsize)
{ {
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
size_t pageind = arena_miscelm_to_pageind(miscelm); size_t pageind = arena_miscelm_to_pageind(miscelm);
...@@ -2229,13 +1795,12 @@ arena_run_trim_head(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, ...@@ -2229,13 +1795,12 @@ arena_run_trim_head(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
pageind+head_npages))); pageind+head_npages)));
arena_run_dalloc(tsdn, arena, run, false, false, (flag_decommitted != arena_run_dalloc(arena, run, false, false, (flag_decommitted != 0));
0));
} }
static void static void
arena_run_trim_tail(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
arena_run_t *run, size_t oldsize, size_t newsize, bool dirty) size_t oldsize, size_t newsize, bool dirty)
{ {
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
size_t pageind = arena_miscelm_to_pageind(miscelm); size_t pageind = arena_miscelm_to_pageind(miscelm);
...@@ -2272,10 +1837,20 @@ arena_run_trim_tail(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, ...@@ -2272,10 +1837,20 @@ arena_run_trim_tail(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
pageind+head_npages))); pageind+head_npages)));
tail_miscelm = arena_miscelm_get_mutable(chunk, pageind + head_npages); tail_miscelm = arena_miscelm_get(chunk, pageind + head_npages);
tail_run = &tail_miscelm->run; tail_run = &tail_miscelm->run;
arena_run_dalloc(tsdn, arena, tail_run, dirty, false, (flag_decommitted arena_run_dalloc(arena, tail_run, dirty, false, (flag_decommitted !=
!= 0)); 0));
}
static arena_run_t *
arena_bin_runs_first(arena_bin_t *bin)
{
arena_chunk_map_misc_t *miscelm = arena_run_tree_first(&bin->runs);
if (miscelm != NULL)
return (&miscelm->run);
return (NULL);
} }
static void static void
...@@ -2283,25 +1858,35 @@ arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run) ...@@ -2283,25 +1858,35 @@ arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
{ {
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
arena_run_heap_insert(&bin->runs, miscelm); assert(arena_run_tree_search(&bin->runs, miscelm) == NULL);
arena_run_tree_insert(&bin->runs, miscelm);
} }
static arena_run_t * static void
arena_bin_nonfull_run_tryget(arena_bin_t *bin) arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
{ {
arena_chunk_map_misc_t *miscelm; arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
miscelm = arena_run_heap_remove_first(&bin->runs); assert(arena_run_tree_search(&bin->runs, miscelm) != NULL);
if (miscelm == NULL)
return (NULL);
if (config_stats)
bin->stats.reruns++;
return (&miscelm->run); arena_run_tree_remove(&bin->runs, miscelm);
} }
static arena_run_t * static arena_run_t *
arena_bin_nonfull_run_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin) arena_bin_nonfull_run_tryget(arena_bin_t *bin)
{
arena_run_t *run = arena_bin_runs_first(bin);
if (run != NULL) {
arena_bin_runs_remove(bin, run);
if (config_stats)
bin->stats.reruns++;
}
return (run);
}
static arena_run_t *
arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
{ {
arena_run_t *run; arena_run_t *run;
szind_t binind; szind_t binind;
...@@ -2317,19 +1902,19 @@ arena_bin_nonfull_run_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin) ...@@ -2317,19 +1902,19 @@ arena_bin_nonfull_run_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
bin_info = &arena_bin_info[binind]; bin_info = &arena_bin_info[binind];
/* Allocate a new run. */ /* Allocate a new run. */
malloc_mutex_unlock(tsdn, &bin->lock); malloc_mutex_unlock(&bin->lock);
/******************************/ /******************************/
malloc_mutex_lock(tsdn, &arena->lock); malloc_mutex_lock(&arena->lock);
run = arena_run_alloc_small(tsdn, arena, bin_info->run_size, binind); run = arena_run_alloc_small(arena, bin_info->run_size, binind);
if (run != NULL) { if (run != NULL) {
/* Initialize run internals. */ /* Initialize run internals. */
run->binind = binind; run->binind = binind;
run->nfree = bin_info->nregs; run->nfree = bin_info->nregs;
bitmap_init(run->bitmap, &bin_info->bitmap_info); bitmap_init(run->bitmap, &bin_info->bitmap_info);
} }
malloc_mutex_unlock(tsdn, &arena->lock); malloc_mutex_unlock(&arena->lock);
/********************************/ /********************************/
malloc_mutex_lock(tsdn, &bin->lock); malloc_mutex_lock(&bin->lock);
if (run != NULL) { if (run != NULL) {
if (config_stats) { if (config_stats) {
bin->stats.nruns++; bin->stats.nruns++;
...@@ -2352,7 +1937,7 @@ arena_bin_nonfull_run_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin) ...@@ -2352,7 +1937,7 @@ arena_bin_nonfull_run_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */ /* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
static void * static void *
arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin) arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
{ {
szind_t binind; szind_t binind;
arena_bin_info_t *bin_info; arena_bin_info_t *bin_info;
...@@ -2361,7 +1946,7 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin) ...@@ -2361,7 +1946,7 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
binind = arena_bin_index(arena, bin); binind = arena_bin_index(arena, bin);
bin_info = &arena_bin_info[binind]; bin_info = &arena_bin_info[binind];
bin->runcur = NULL; bin->runcur = NULL;
run = arena_bin_nonfull_run_get(tsdn, arena, bin); run = arena_bin_nonfull_run_get(arena, bin);
if (bin->runcur != NULL && bin->runcur->nfree > 0) { if (bin->runcur != NULL && bin->runcur->nfree > 0) {
/* /*
* Another thread updated runcur while this one ran without the * Another thread updated runcur while this one ran without the
...@@ -2382,11 +1967,10 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin) ...@@ -2382,11 +1967,10 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
* were just deallocated from the run. * were just deallocated from the run.
*/ */
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
if (run->nfree == bin_info->nregs) { if (run->nfree == bin_info->nregs)
arena_dalloc_bin_run(tsdn, arena, chunk, run, arena_dalloc_bin_run(arena, chunk, run, bin);
bin); else
} else arena_bin_lower_run(arena, chunk, run, bin);
arena_bin_lower_run(arena, run, bin);
} }
return (ret); return (ret);
} }
...@@ -2402,18 +1986,18 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin) ...@@ -2402,18 +1986,18 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
} }
void void
arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin, arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind,
szind_t binind, uint64_t prof_accumbytes) uint64_t prof_accumbytes)
{ {
unsigned i, nfill; unsigned i, nfill;
arena_bin_t *bin; arena_bin_t *bin;
assert(tbin->ncached == 0); assert(tbin->ncached == 0);
if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) if (config_prof && arena_prof_accum(arena, prof_accumbytes))
prof_idump(tsdn); prof_idump();
bin = &arena->bins[binind]; bin = &arena->bins[binind];
malloc_mutex_lock(tsdn, &bin->lock); malloc_mutex_lock(&bin->lock);
for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
tbin->lg_fill_div); i < nfill; i++) { tbin->lg_fill_div); i < nfill; i++) {
arena_run_t *run; arena_run_t *run;
...@@ -2421,15 +2005,16 @@ arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin, ...@@ -2421,15 +2005,16 @@ arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin,
if ((run = bin->runcur) != NULL && run->nfree > 0) if ((run = bin->runcur) != NULL && run->nfree > 0)
ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]); ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
else else
ptr = arena_bin_malloc_hard(tsdn, arena, bin); ptr = arena_bin_malloc_hard(arena, bin);
if (ptr == NULL) { if (ptr == NULL) {
/* /*
* OOM. tbin->avail isn't yet filled down to its first * OOM. tbin->avail isn't yet filled down to its first
* element, so the successful allocations (if any) must * element, so the successful allocations (if any) must
* be moved just before tbin->avail before bailing out. * be moved to the base of tbin->avail before bailing
* out.
*/ */
if (i > 0) { if (i > 0) {
memmove(tbin->avail - i, tbin->avail - nfill, memmove(tbin->avail, &tbin->avail[nfill - i],
i * sizeof(void *)); i * sizeof(void *));
} }
break; break;
...@@ -2439,7 +2024,7 @@ arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin, ...@@ -2439,7 +2024,7 @@ arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin,
true); true);
} }
/* Insert such that low regions get used first. */ /* Insert such that low regions get used first. */
*(tbin->avail - nfill + i) = ptr; tbin->avail[nfill - 1 - i] = ptr;
} }
if (config_stats) { if (config_stats) {
bin->stats.nmalloc += i; bin->stats.nmalloc += i;
...@@ -2448,31 +2033,29 @@ arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin, ...@@ -2448,31 +2033,29 @@ arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin,
bin->stats.nfills++; bin->stats.nfills++;
tbin->tstats.nrequests = 0; tbin->tstats.nrequests = 0;
} }
malloc_mutex_unlock(tsdn, &bin->lock); malloc_mutex_unlock(&bin->lock);
tbin->ncached = i; tbin->ncached = i;
arena_decay_tick(tsdn, arena);
} }
void void
arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero) arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
{ {
size_t redzone_size = bin_info->redzone_size;
if (zero) { if (zero) {
memset((void *)((uintptr_t)ptr - redzone_size), size_t redzone_size = bin_info->redzone_size;
JEMALLOC_ALLOC_JUNK, redzone_size); memset((void *)((uintptr_t)ptr - redzone_size), 0xa5,
memset((void *)((uintptr_t)ptr + bin_info->reg_size), redzone_size);
JEMALLOC_ALLOC_JUNK, redzone_size); memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5,
redzone_size);
} else { } else {
memset((void *)((uintptr_t)ptr - redzone_size), memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5,
JEMALLOC_ALLOC_JUNK, bin_info->reg_interval); bin_info->reg_interval);
} }
} }
#ifdef JEMALLOC_JET #ifdef JEMALLOC_JET
#undef arena_redzone_corruption #undef arena_redzone_corruption
#define arena_redzone_corruption JEMALLOC_N(n_arena_redzone_corruption) #define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl)
#endif #endif
static void static void
arena_redzone_corruption(void *ptr, size_t usize, bool after, arena_redzone_corruption(void *ptr, size_t usize, bool after,
...@@ -2487,7 +2070,7 @@ arena_redzone_corruption(void *ptr, size_t usize, bool after, ...@@ -2487,7 +2070,7 @@ arena_redzone_corruption(void *ptr, size_t usize, bool after,
#undef arena_redzone_corruption #undef arena_redzone_corruption
#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption) #define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
arena_redzone_corruption_t *arena_redzone_corruption = arena_redzone_corruption_t *arena_redzone_corruption =
JEMALLOC_N(n_arena_redzone_corruption); JEMALLOC_N(arena_redzone_corruption_impl);
#endif #endif
static void static void
...@@ -2502,22 +2085,22 @@ arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset) ...@@ -2502,22 +2085,22 @@ arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
for (i = 1; i <= redzone_size; i++) { for (i = 1; i <= redzone_size; i++) {
uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i); uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
if (*byte != JEMALLOC_ALLOC_JUNK) { if (*byte != 0xa5) {
error = true; error = true;
arena_redzone_corruption(ptr, size, false, i, arena_redzone_corruption(ptr, size, false, i,
*byte); *byte);
if (reset) if (reset)
*byte = JEMALLOC_ALLOC_JUNK; *byte = 0xa5;
} }
} }
for (i = 0; i < redzone_size; i++) { for (i = 0; i < redzone_size; i++) {
uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i); uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
if (*byte != JEMALLOC_ALLOC_JUNK) { if (*byte != 0xa5) {
error = true; error = true;
arena_redzone_corruption(ptr, size, true, i, arena_redzone_corruption(ptr, size, true, i,
*byte); *byte);
if (reset) if (reset)
*byte = JEMALLOC_ALLOC_JUNK; *byte = 0xa5;
} }
} }
} }
...@@ -2528,7 +2111,7 @@ arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset) ...@@ -2528,7 +2111,7 @@ arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
#ifdef JEMALLOC_JET #ifdef JEMALLOC_JET
#undef arena_dalloc_junk_small #undef arena_dalloc_junk_small
#define arena_dalloc_junk_small JEMALLOC_N(n_arena_dalloc_junk_small) #define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl)
#endif #endif
void void
arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info) arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
...@@ -2536,14 +2119,14 @@ arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info) ...@@ -2536,14 +2119,14 @@ arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
size_t redzone_size = bin_info->redzone_size; size_t redzone_size = bin_info->redzone_size;
arena_redzones_validate(ptr, bin_info, false); arena_redzones_validate(ptr, bin_info, false);
memset((void *)((uintptr_t)ptr - redzone_size), JEMALLOC_FREE_JUNK, memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
bin_info->reg_interval); bin_info->reg_interval);
} }
#ifdef JEMALLOC_JET #ifdef JEMALLOC_JET
#undef arena_dalloc_junk_small #undef arena_dalloc_junk_small
#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) #define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
arena_dalloc_junk_small_t *arena_dalloc_junk_small = arena_dalloc_junk_small_t *arena_dalloc_junk_small =
JEMALLOC_N(n_arena_dalloc_junk_small); JEMALLOC_N(arena_dalloc_junk_small_impl);
#endif #endif
void void
...@@ -2561,26 +2144,27 @@ arena_quarantine_junk_small(void *ptr, size_t usize) ...@@ -2561,26 +2144,27 @@ arena_quarantine_junk_small(void *ptr, size_t usize)
arena_redzones_validate(ptr, bin_info, true); arena_redzones_validate(ptr, bin_info, true);
} }
static void * void *
arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) arena_malloc_small(arena_t *arena, size_t size, bool zero)
{ {
void *ret; void *ret;
arena_bin_t *bin; arena_bin_t *bin;
size_t usize;
arena_run_t *run; arena_run_t *run;
szind_t binind;
binind = size2index(size);
assert(binind < NBINS); assert(binind < NBINS);
bin = &arena->bins[binind]; bin = &arena->bins[binind];
usize = index2size(binind); size = index2size(binind);
malloc_mutex_lock(tsdn, &bin->lock); malloc_mutex_lock(&bin->lock);
if ((run = bin->runcur) != NULL && run->nfree > 0) if ((run = bin->runcur) != NULL && run->nfree > 0)
ret = arena_run_reg_alloc(run, &arena_bin_info[binind]); ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
else else
ret = arena_bin_malloc_hard(tsdn, arena, bin); ret = arena_bin_malloc_hard(arena, bin);
if (ret == NULL) { if (ret == NULL) {
malloc_mutex_unlock(tsdn, &bin->lock); malloc_mutex_unlock(&bin->lock);
return (NULL); return (NULL);
} }
...@@ -2589,9 +2173,9 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) ...@@ -2589,9 +2173,9 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
bin->stats.nrequests++; bin->stats.nrequests++;
bin->stats.curregs++; bin->stats.curregs++;
} }
malloc_mutex_unlock(tsdn, &bin->lock); malloc_mutex_unlock(&bin->lock);
if (config_prof && !isthreaded && arena_prof_accum(tsdn, arena, usize)) if (config_prof && !isthreaded && arena_prof_accum(arena, size))
prof_idump(tsdn); prof_idump();
if (!zero) { if (!zero) {
if (config_fill) { if (config_fill) {
...@@ -2599,35 +2183,34 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) ...@@ -2599,35 +2183,34 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
arena_alloc_junk_small(ret, arena_alloc_junk_small(ret,
&arena_bin_info[binind], false); &arena_bin_info[binind], false);
} else if (unlikely(opt_zero)) } else if (unlikely(opt_zero))
memset(ret, 0, usize); memset(ret, 0, size);
} }
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize); JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
} else { } else {
if (config_fill && unlikely(opt_junk_alloc)) { if (config_fill && unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret, &arena_bin_info[binind], arena_alloc_junk_small(ret, &arena_bin_info[binind],
true); true);
} }
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize); JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, usize); memset(ret, 0, size);
} }
arena_decay_tick(tsdn, arena);
return (ret); return (ret);
} }
void * void *
arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) arena_malloc_large(arena_t *arena, size_t size, bool zero)
{ {
void *ret; void *ret;
size_t usize; size_t usize;
uintptr_t random_offset; uintptr_t random_offset;
arena_run_t *run; arena_run_t *run;
arena_chunk_map_misc_t *miscelm; arena_chunk_map_misc_t *miscelm;
UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false); UNUSED bool idump;
/* Large allocation. */ /* Large allocation. */
usize = index2size(binind); usize = s2u(size);
malloc_mutex_lock(tsdn, &arena->lock); malloc_mutex_lock(&arena->lock);
if (config_cache_oblivious) { if (config_cache_oblivious) {
uint64_t r; uint64_t r;
...@@ -2636,21 +2219,22 @@ arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) ...@@ -2636,21 +2219,22 @@ arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
* that is a multiple of the cacheline size, e.g. [0 .. 63) * 64 * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64
* for 4 KiB pages and 64-byte cachelines. * for 4 KiB pages and 64-byte cachelines.
*/ */
r = prng_lg_range_zu(&arena->offset_state, LG_PAGE - prng64(r, LG_PAGE - LG_CACHELINE, arena->offset_state,
LG_CACHELINE, false); UINT64_C(6364136223846793009),
UINT64_C(1442695040888963409));
random_offset = ((uintptr_t)r) << LG_CACHELINE; random_offset = ((uintptr_t)r) << LG_CACHELINE;
} else } else
random_offset = 0; random_offset = 0;
run = arena_run_alloc_large(tsdn, arena, usize + large_pad, zero); run = arena_run_alloc_large(arena, usize + large_pad, zero);
if (run == NULL) { if (run == NULL) {
malloc_mutex_unlock(tsdn, &arena->lock); malloc_mutex_unlock(&arena->lock);
return (NULL); return (NULL);
} }
miscelm = arena_run_to_miscelm(run); miscelm = arena_run_to_miscelm(run);
ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) + ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) +
random_offset); random_offset);
if (config_stats) { if (config_stats) {
szind_t index = binind - NBINS; szind_t index = size2index(usize) - NBINS;
arena->stats.nmalloc_large++; arena->stats.nmalloc_large++;
arena->stats.nrequests_large++; arena->stats.nrequests_large++;
...@@ -2661,45 +2245,25 @@ arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) ...@@ -2661,45 +2245,25 @@ arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
} }
if (config_prof) if (config_prof)
idump = arena_prof_accum_locked(arena, usize); idump = arena_prof_accum_locked(arena, usize);
malloc_mutex_unlock(tsdn, &arena->lock); malloc_mutex_unlock(&arena->lock);
if (config_prof && idump) if (config_prof && idump)
prof_idump(tsdn); prof_idump();
if (!zero) { if (!zero) {
if (config_fill) { if (config_fill) {
if (unlikely(opt_junk_alloc)) if (unlikely(opt_junk_alloc))
memset(ret, JEMALLOC_ALLOC_JUNK, usize); memset(ret, 0xa5, usize);
else if (unlikely(opt_zero)) else if (unlikely(opt_zero))
memset(ret, 0, usize); memset(ret, 0, usize);
} }
} }
arena_decay_tick(tsdn, arena);
return (ret); return (ret);
} }
void *
arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
bool zero)
{
assert(!tsdn_null(tsdn) || arena != NULL);
if (likely(!tsdn_null(tsdn)))
arena = arena_choose(tsdn_tsd(tsdn), arena);
if (unlikely(arena == NULL))
return (NULL);
if (likely(size <= SMALL_MAXCLASS))
return (arena_malloc_small(tsdn, arena, ind, zero));
if (likely(size <= large_maxclass))
return (arena_malloc_large(tsdn, arena, ind, zero));
return (huge_malloc(tsdn, arena, index2size(ind), zero));
}
/* Only handles large allocations that require more than page alignment. */ /* Only handles large allocations that require more than page alignment. */
static void * static void *
arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
bool zero) bool zero)
{ {
void *ret; void *ret;
...@@ -2709,21 +2273,19 @@ arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, ...@@ -2709,21 +2273,19 @@ arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
arena_chunk_map_misc_t *miscelm; arena_chunk_map_misc_t *miscelm;
void *rpages; void *rpages;
assert(!tsdn_null(tsdn) || arena != NULL);
assert(usize == PAGE_CEILING(usize)); assert(usize == PAGE_CEILING(usize));
if (likely(!tsdn_null(tsdn))) arena = arena_choose(tsd, arena);
arena = arena_choose(tsdn_tsd(tsdn), arena);
if (unlikely(arena == NULL)) if (unlikely(arena == NULL))
return (NULL); return (NULL);
alignment = PAGE_CEILING(alignment); alignment = PAGE_CEILING(alignment);
alloc_size = usize + large_pad + alignment - PAGE; alloc_size = usize + large_pad + alignment - PAGE;
malloc_mutex_lock(tsdn, &arena->lock); malloc_mutex_lock(&arena->lock);
run = arena_run_alloc_large(tsdn, arena, alloc_size, false); run = arena_run_alloc_large(arena, alloc_size, false);
if (run == NULL) { if (run == NULL) {
malloc_mutex_unlock(tsdn, &arena->lock); malloc_mutex_unlock(&arena->lock);
return (NULL); return (NULL);
} }
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
...@@ -2738,16 +2300,16 @@ arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, ...@@ -2738,16 +2300,16 @@ arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
arena_chunk_map_misc_t *head_miscelm = miscelm; arena_chunk_map_misc_t *head_miscelm = miscelm;
arena_run_t *head_run = run; arena_run_t *head_run = run;
miscelm = arena_miscelm_get_mutable(chunk, miscelm = arena_miscelm_get(chunk,
arena_miscelm_to_pageind(head_miscelm) + (leadsize >> arena_miscelm_to_pageind(head_miscelm) + (leadsize >>
LG_PAGE)); LG_PAGE));
run = &miscelm->run; run = &miscelm->run;
arena_run_trim_head(tsdn, arena, chunk, head_run, alloc_size, arena_run_trim_head(arena, chunk, head_run, alloc_size,
alloc_size - leadsize); alloc_size - leadsize);
} }
if (trailsize != 0) { if (trailsize != 0) {
arena_run_trim_tail(tsdn, arena, chunk, run, usize + large_pad + arena_run_trim_tail(arena, chunk, run, usize + large_pad +
trailsize, usize + large_pad, false); trailsize, usize + large_pad, false);
} }
if (arena_run_init_large(arena, run, usize + large_pad, zero)) { if (arena_run_init_large(arena, run, usize + large_pad, zero)) {
...@@ -2758,8 +2320,8 @@ arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, ...@@ -2758,8 +2320,8 @@ arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
run_ind) != 0); run_ind) != 0);
assert(decommitted); /* Cause of OOM. */ assert(decommitted); /* Cause of OOM. */
arena_run_dalloc(tsdn, arena, run, dirty, false, decommitted); arena_run_dalloc(arena, run, dirty, false, decommitted);
malloc_mutex_unlock(tsdn, &arena->lock); malloc_mutex_unlock(&arena->lock);
return (NULL); return (NULL);
} }
ret = arena_miscelm_to_rpages(miscelm); ret = arena_miscelm_to_rpages(miscelm);
...@@ -2774,20 +2336,19 @@ arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, ...@@ -2774,20 +2336,19 @@ arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
arena->stats.lstats[index].nrequests++; arena->stats.lstats[index].nrequests++;
arena->stats.lstats[index].curruns++; arena->stats.lstats[index].curruns++;
} }
malloc_mutex_unlock(tsdn, &arena->lock); malloc_mutex_unlock(&arena->lock);
if (config_fill && !zero) { if (config_fill && !zero) {
if (unlikely(opt_junk_alloc)) if (unlikely(opt_junk_alloc))
memset(ret, JEMALLOC_ALLOC_JUNK, usize); memset(ret, 0xa5, usize);
else if (unlikely(opt_zero)) else if (unlikely(opt_zero))
memset(ret, 0, usize); memset(ret, 0, usize);
} }
arena_decay_tick(tsdn, arena);
return (ret); return (ret);
} }
void * void *
arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
bool zero, tcache_t *tcache) bool zero, tcache_t *tcache)
{ {
void *ret; void *ret;
...@@ -2795,8 +2356,7 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, ...@@ -2795,8 +2356,7 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
&& (usize & PAGE_MASK) == 0))) { && (usize & PAGE_MASK) == 0))) {
/* Small; alignment doesn't require special run placement. */ /* Small; alignment doesn't require special run placement. */
ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero, ret = arena_malloc(tsd, arena, usize, zero, tcache);
tcache, true);
} else if (usize <= large_maxclass && alignment <= PAGE) { } else if (usize <= large_maxclass && alignment <= PAGE) {
/* /*
* Large; alignment doesn't require special run placement. * Large; alignment doesn't require special run placement.
...@@ -2804,25 +2364,25 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, ...@@ -2804,25 +2364,25 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
* the base of the run, so do some bit manipulation to retrieve * the base of the run, so do some bit manipulation to retrieve
* the base. * the base.
*/ */
ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero, ret = arena_malloc(tsd, arena, usize, zero, tcache);
tcache, true);
if (config_cache_oblivious) if (config_cache_oblivious)
ret = (void *)((uintptr_t)ret & ~PAGE_MASK); ret = (void *)((uintptr_t)ret & ~PAGE_MASK);
} else { } else {
if (likely(usize <= large_maxclass)) { if (likely(usize <= large_maxclass)) {
ret = arena_palloc_large(tsdn, arena, usize, alignment, ret = arena_palloc_large(tsd, arena, usize, alignment,
zero); zero);
} else if (likely(alignment <= chunksize)) } else if (likely(alignment <= chunksize))
ret = huge_malloc(tsdn, arena, usize, zero); ret = huge_malloc(tsd, arena, usize, zero, tcache);
else { else {
ret = huge_palloc(tsdn, arena, usize, alignment, zero); ret = huge_palloc(tsd, arena, usize, alignment, zero,
tcache);
} }
} }
return (ret); return (ret);
} }
void void
arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size) arena_prof_promoted(const void *ptr, size_t size)
{ {
arena_chunk_t *chunk; arena_chunk_t *chunk;
size_t pageind; size_t pageind;
...@@ -2831,8 +2391,8 @@ arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size) ...@@ -2831,8 +2391,8 @@ arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size)
cassert(config_prof); cassert(config_prof);
assert(ptr != NULL); assert(ptr != NULL);
assert(CHUNK_ADDR2BASE(ptr) != ptr); assert(CHUNK_ADDR2BASE(ptr) != ptr);
assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS); assert(isalloc(ptr, false) == LARGE_MINCLASS);
assert(isalloc(tsdn, ptr, true) == LARGE_MINCLASS); assert(isalloc(ptr, true) == LARGE_MINCLASS);
assert(size <= SMALL_MAXCLASS); assert(size <= SMALL_MAXCLASS);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
...@@ -2841,8 +2401,8 @@ arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size) ...@@ -2841,8 +2401,8 @@ arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size)
assert(binind < NBINS); assert(binind < NBINS);
arena_mapbits_large_binind_set(chunk, pageind, binind); arena_mapbits_large_binind_set(chunk, pageind, binind);
assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS); assert(isalloc(ptr, false) == LARGE_MINCLASS);
assert(isalloc(tsdn, ptr, true) == size); assert(isalloc(ptr, true) == size);
} }
static void static void
...@@ -2858,51 +2418,48 @@ arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run, ...@@ -2858,51 +2418,48 @@ arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
&chunk->node), bin); &chunk->node), bin);
arena_bin_info_t *bin_info = &arena_bin_info[binind]; arena_bin_info_t *bin_info = &arena_bin_info[binind];
/*
* The following block's conditional is necessary because if the
* run only contains one region, then it never gets inserted
* into the non-full runs tree.
*/
if (bin_info->nregs != 1) { if (bin_info->nregs != 1) {
arena_chunk_map_misc_t *miscelm = /*
arena_run_to_miscelm(run); * This block's conditional is necessary because if the
* run only contains one region, then it never gets
arena_run_heap_remove(&bin->runs, miscelm); * inserted into the non-full runs tree.
*/
arena_bin_runs_remove(bin, run);
} }
} }
} }
static void static void
arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
arena_run_t *run, arena_bin_t *bin) arena_bin_t *bin)
{ {
assert(run != bin->runcur); assert(run != bin->runcur);
assert(arena_run_tree_search(&bin->runs, arena_run_to_miscelm(run)) ==
NULL);
malloc_mutex_unlock(tsdn, &bin->lock); malloc_mutex_unlock(&bin->lock);
/******************************/ /******************************/
malloc_mutex_lock(tsdn, &arena->lock); malloc_mutex_lock(&arena->lock);
arena_run_dalloc(tsdn, arena, run, true, false, false); arena_run_dalloc_decommit(arena, chunk, run);
malloc_mutex_unlock(tsdn, &arena->lock); malloc_mutex_unlock(&arena->lock);
/****************************/ /****************************/
malloc_mutex_lock(tsdn, &bin->lock); malloc_mutex_lock(&bin->lock);
if (config_stats) if (config_stats)
bin->stats.curruns--; bin->stats.curruns--;
} }
static void static void
arena_bin_lower_run(arena_t *arena, arena_run_t *run, arena_bin_t *bin) arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
arena_bin_t *bin)
{ {
/* /*
* Make sure that if bin->runcur is non-NULL, it refers to the * Make sure that if bin->runcur is non-NULL, it refers to the lowest
* oldest/lowest non-full run. It is okay to NULL runcur out rather * non-full run. It is okay to NULL runcur out rather than proactively
* than proactively keeping it pointing at the oldest/lowest non-full * keeping it pointing at the lowest non-full run.
* run.
*/ */
if (bin->runcur != NULL && if ((uintptr_t)run < (uintptr_t)bin->runcur) {
arena_snad_comp(arena_run_to_miscelm(bin->runcur),
arena_run_to_miscelm(run)) > 0) {
/* Switch runcur. */ /* Switch runcur. */
if (bin->runcur->nfree > 0) if (bin->runcur->nfree > 0)
arena_bin_runs_insert(bin, bin->runcur); arena_bin_runs_insert(bin, bin->runcur);
...@@ -2914,8 +2471,8 @@ arena_bin_lower_run(arena_t *arena, arena_run_t *run, arena_bin_t *bin) ...@@ -2914,8 +2471,8 @@ arena_bin_lower_run(arena_t *arena, arena_run_t *run, arena_bin_t *bin)
} }
static void static void
arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr,
void *ptr, arena_chunk_map_bits_t *bitselm, bool junked) arena_chunk_map_bits_t *bitselm, bool junked)
{ {
size_t pageind, rpages_ind; size_t pageind, rpages_ind;
arena_run_t *run; arena_run_t *run;
...@@ -2925,7 +2482,7 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, ...@@ -2925,7 +2482,7 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run; run = &arena_miscelm_get(chunk, rpages_ind)->run;
binind = run->binind; binind = run->binind;
bin = &arena->bins[binind]; bin = &arena->bins[binind];
bin_info = &arena_bin_info[binind]; bin_info = &arena_bin_info[binind];
...@@ -2936,9 +2493,9 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, ...@@ -2936,9 +2493,9 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
arena_run_reg_dalloc(run, ptr); arena_run_reg_dalloc(run, ptr);
if (run->nfree == bin_info->nregs) { if (run->nfree == bin_info->nregs) {
arena_dissociate_bin_run(chunk, run, bin); arena_dissociate_bin_run(chunk, run, bin);
arena_dalloc_bin_run(tsdn, arena, chunk, run, bin); arena_dalloc_bin_run(arena, chunk, run, bin);
} else if (run->nfree == 1 && run != bin->runcur) } else if (run->nfree == 1 && run != bin->runcur)
arena_bin_lower_run(arena, run, bin); arena_bin_lower_run(arena, chunk, run, bin);
if (config_stats) { if (config_stats) {
bin->stats.ndalloc++; bin->stats.ndalloc++;
...@@ -2947,15 +2504,15 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, ...@@ -2947,15 +2504,15 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
} }
void void
arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm) arena_chunk_map_bits_t *bitselm)
{ {
arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, true); arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, true);
} }
void void
arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr, arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t pageind, arena_chunk_map_bits_t *bitselm) size_t pageind, arena_chunk_map_bits_t *bitselm)
{ {
arena_run_t *run; arena_run_t *run;
...@@ -2963,16 +2520,16 @@ arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr, ...@@ -2963,16 +2520,16 @@ arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t rpages_ind; size_t rpages_ind;
rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run; run = &arena_miscelm_get(chunk, rpages_ind)->run;
bin = &arena->bins[run->binind]; bin = &arena->bins[run->binind];
malloc_mutex_lock(tsdn, &bin->lock); malloc_mutex_lock(&bin->lock);
arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, false); arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, false);
malloc_mutex_unlock(tsdn, &bin->lock); malloc_mutex_unlock(&bin->lock);
} }
void void
arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
void *ptr, size_t pageind) size_t pageind)
{ {
arena_chunk_map_bits_t *bitselm; arena_chunk_map_bits_t *bitselm;
...@@ -2981,36 +2538,34 @@ arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, ...@@ -2981,36 +2538,34 @@ arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
pageind)) != BININD_INVALID); pageind)) != BININD_INVALID);
} }
bitselm = arena_bitselm_get_mutable(chunk, pageind); bitselm = arena_bitselm_get(chunk, pageind);
arena_dalloc_bin(tsdn, arena, chunk, ptr, pageind, bitselm); arena_dalloc_bin(arena, chunk, ptr, pageind, bitselm);
arena_decay_tick(tsdn, arena);
} }
#ifdef JEMALLOC_JET #ifdef JEMALLOC_JET
#undef arena_dalloc_junk_large #undef arena_dalloc_junk_large
#define arena_dalloc_junk_large JEMALLOC_N(n_arena_dalloc_junk_large) #define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl)
#endif #endif
void void
arena_dalloc_junk_large(void *ptr, size_t usize) arena_dalloc_junk_large(void *ptr, size_t usize)
{ {
if (config_fill && unlikely(opt_junk_free)) if (config_fill && unlikely(opt_junk_free))
memset(ptr, JEMALLOC_FREE_JUNK, usize); memset(ptr, 0x5a, usize);
} }
#ifdef JEMALLOC_JET #ifdef JEMALLOC_JET
#undef arena_dalloc_junk_large #undef arena_dalloc_junk_large
#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large) #define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
arena_dalloc_junk_large_t *arena_dalloc_junk_large = arena_dalloc_junk_large_t *arena_dalloc_junk_large =
JEMALLOC_N(n_arena_dalloc_junk_large); JEMALLOC_N(arena_dalloc_junk_large_impl);
#endif #endif
static void static void
arena_dalloc_large_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk,
arena_chunk_t *chunk, void *ptr, bool junked) void *ptr, bool junked)
{ {
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
pageind);
arena_run_t *run = &miscelm->run; arena_run_t *run = &miscelm->run;
if (config_fill || config_stats) { if (config_fill || config_stats) {
...@@ -3029,35 +2584,32 @@ arena_dalloc_large_locked_impl(tsdn_t *tsdn, arena_t *arena, ...@@ -3029,35 +2584,32 @@ arena_dalloc_large_locked_impl(tsdn_t *tsdn, arena_t *arena,
} }
} }
arena_run_dalloc(tsdn, arena, run, true, false, false); arena_run_dalloc_decommit(arena, chunk, run);
} }
void void
arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena, arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk,
arena_chunk_t *chunk, void *ptr) void *ptr)
{ {
arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, true); arena_dalloc_large_locked_impl(arena, chunk, ptr, true);
} }
void void
arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
void *ptr)
{ {
malloc_mutex_lock(tsdn, &arena->lock); malloc_mutex_lock(&arena->lock);
arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, false); arena_dalloc_large_locked_impl(arena, chunk, ptr, false);
malloc_mutex_unlock(tsdn, &arena->lock); malloc_mutex_unlock(&arena->lock);
arena_decay_tick(tsdn, arena);
} }
static void static void
arena_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
void *ptr, size_t oldsize, size_t size) size_t oldsize, size_t size)
{ {
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
pageind);
arena_run_t *run = &miscelm->run; arena_run_t *run = &miscelm->run;
assert(size < oldsize); assert(size < oldsize);
...@@ -3066,8 +2618,8 @@ arena_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, ...@@ -3066,8 +2618,8 @@ arena_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
* Shrink the run, and make trailing pages available for other * Shrink the run, and make trailing pages available for other
* allocations. * allocations.
*/ */
malloc_mutex_lock(tsdn, &arena->lock); malloc_mutex_lock(&arena->lock);
arena_run_trim_tail(tsdn, arena, chunk, run, oldsize + large_pad, size + arena_run_trim_tail(arena, chunk, run, oldsize + large_pad, size +
large_pad, true); large_pad, true);
if (config_stats) { if (config_stats) {
szind_t oldindex = size2index(oldsize) - NBINS; szind_t oldindex = size2index(oldsize) - NBINS;
...@@ -3085,12 +2637,12 @@ arena_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, ...@@ -3085,12 +2637,12 @@ arena_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
arena->stats.lstats[index].nrequests++; arena->stats.lstats[index].nrequests++;
arena->stats.lstats[index].curruns++; arena->stats.lstats[index].curruns++;
} }
malloc_mutex_unlock(tsdn, &arena->lock); malloc_mutex_unlock(&arena->lock);
} }
static bool static bool
arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero) size_t oldsize, size_t usize_min, size_t usize_max, bool zero)
{ {
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
size_t npages = (oldsize + large_pad) >> LG_PAGE; size_t npages = (oldsize + large_pad) >> LG_PAGE;
...@@ -3100,7 +2652,7 @@ arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, ...@@ -3100,7 +2652,7 @@ arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
large_pad); large_pad);
/* Try to extend the run. */ /* Try to extend the run. */
malloc_mutex_lock(tsdn, &arena->lock); malloc_mutex_lock(&arena->lock);
if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk, if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk,
pageind+npages) != 0) pageind+npages) != 0)
goto label_fail; goto label_fail;
...@@ -3123,7 +2675,7 @@ arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, ...@@ -3123,7 +2675,7 @@ arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
if (splitsize == 0) if (splitsize == 0)
goto label_fail; goto label_fail;
run = &arena_miscelm_get_mutable(chunk, pageind+npages)->run; run = &arena_miscelm_get(chunk, pageind+npages)->run;
if (arena_run_split_large(arena, run, splitsize, zero)) if (arena_run_split_large(arena, run, splitsize, zero))
goto label_fail; goto label_fail;
...@@ -3131,16 +2683,10 @@ arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, ...@@ -3131,16 +2683,10 @@ arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
/* /*
* Zero the trailing bytes of the original allocation's * Zero the trailing bytes of the original allocation's
* last page, since they are in an indeterminate state. * last page, since they are in an indeterminate state.
* There will always be trailing bytes, because ptr's
* offset from the beginning of the run is a multiple of
* CACHELINE in [0 .. PAGE).
*/ */
void *zbase = (void *)((uintptr_t)ptr + oldsize); assert(PAGE_CEILING(oldsize) == oldsize);
void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase + memset((void *)((uintptr_t)ptr + oldsize), 0,
PAGE)); PAGE_CEILING((uintptr_t)ptr) - (uintptr_t)ptr);
size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
assert(nzero > 0);
memset(zbase, 0, nzero);
} }
size = oldsize + splitsize; size = oldsize + splitsize;
...@@ -3180,24 +2726,24 @@ arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, ...@@ -3180,24 +2726,24 @@ arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
arena->stats.lstats[index].nrequests++; arena->stats.lstats[index].nrequests++;
arena->stats.lstats[index].curruns++; arena->stats.lstats[index].curruns++;
} }
malloc_mutex_unlock(tsdn, &arena->lock); malloc_mutex_unlock(&arena->lock);
return (false); return (false);
} }
label_fail: label_fail:
malloc_mutex_unlock(tsdn, &arena->lock); malloc_mutex_unlock(&arena->lock);
return (true); return (true);
} }
#ifdef JEMALLOC_JET #ifdef JEMALLOC_JET
#undef arena_ralloc_junk_large #undef arena_ralloc_junk_large
#define arena_ralloc_junk_large JEMALLOC_N(n_arena_ralloc_junk_large) #define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl)
#endif #endif
static void static void
arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize) arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
{ {
if (config_fill && unlikely(opt_junk_free)) { if (config_fill && unlikely(opt_junk_free)) {
memset((void *)((uintptr_t)ptr + usize), JEMALLOC_FREE_JUNK, memset((void *)((uintptr_t)ptr + usize), 0x5a,
old_usize - usize); old_usize - usize);
} }
} }
...@@ -3205,7 +2751,7 @@ arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize) ...@@ -3205,7 +2751,7 @@ arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
#undef arena_ralloc_junk_large #undef arena_ralloc_junk_large
#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large) #define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
arena_ralloc_junk_large_t *arena_ralloc_junk_large = arena_ralloc_junk_large_t *arena_ralloc_junk_large =
JEMALLOC_N(n_arena_ralloc_junk_large); JEMALLOC_N(arena_ralloc_junk_large_impl);
#endif #endif
/* /*
...@@ -3213,7 +2759,7 @@ arena_ralloc_junk_large_t *arena_ralloc_junk_large = ...@@ -3213,7 +2759,7 @@ arena_ralloc_junk_large_t *arena_ralloc_junk_large =
* always fail if growing an object, and the following run is already in use. * always fail if growing an object, and the following run is already in use.
*/ */
static bool static bool
arena_ralloc_large(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min, arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min,
size_t usize_max, bool zero) size_t usize_max, bool zero)
{ {
arena_chunk_t *chunk; arena_chunk_t *chunk;
...@@ -3228,16 +2774,15 @@ arena_ralloc_large(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min, ...@@ -3228,16 +2774,15 @@ arena_ralloc_large(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
arena = extent_node_arena_get(&chunk->node); arena = extent_node_arena_get(&chunk->node);
if (oldsize < usize_max) { if (oldsize < usize_max) {
bool ret = arena_ralloc_large_grow(tsdn, arena, chunk, ptr, bool ret = arena_ralloc_large_grow(arena, chunk, ptr, oldsize,
oldsize, usize_min, usize_max, zero); usize_min, usize_max, zero);
if (config_fill && !ret && !zero) { if (config_fill && !ret && !zero) {
if (unlikely(opt_junk_alloc)) { if (unlikely(opt_junk_alloc)) {
memset((void *)((uintptr_t)ptr + oldsize), memset((void *)((uintptr_t)ptr + oldsize), 0xa5,
JEMALLOC_ALLOC_JUNK, isalloc(ptr, config_prof) - oldsize);
isalloc(tsdn, ptr, config_prof) - oldsize);
} else if (unlikely(opt_zero)) { } else if (unlikely(opt_zero)) {
memset((void *)((uintptr_t)ptr + oldsize), 0, memset((void *)((uintptr_t)ptr + oldsize), 0,
isalloc(tsdn, ptr, config_prof) - oldsize); isalloc(ptr, config_prof) - oldsize);
} }
} }
return (ret); return (ret);
...@@ -3246,27 +2791,19 @@ arena_ralloc_large(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min, ...@@ -3246,27 +2791,19 @@ arena_ralloc_large(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
assert(oldsize > usize_max); assert(oldsize > usize_max);
/* Fill before shrinking in order avoid a race. */ /* Fill before shrinking in order avoid a race. */
arena_ralloc_junk_large(ptr, oldsize, usize_max); arena_ralloc_junk_large(ptr, oldsize, usize_max);
arena_ralloc_large_shrink(tsdn, arena, chunk, ptr, oldsize, usize_max); arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, usize_max);
return (false); return (false);
} }
bool bool
arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t extra, bool zero) bool zero)
{ {
size_t usize_min, usize_max; size_t usize_min, usize_max;
/* Calls with non-zero extra had to clamp extra. */
assert(extra == 0 || size + extra <= HUGE_MAXCLASS);
if (unlikely(size > HUGE_MAXCLASS))
return (true);
usize_min = s2u(size); usize_min = s2u(size);
usize_max = s2u(size + extra); usize_max = s2u(size + extra);
if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) { if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) {
arena_chunk_t *chunk;
/* /*
* Avoid moving the allocation if the size class can be left the * Avoid moving the allocation if the size class can be left the
* same. * same.
...@@ -3274,39 +2811,37 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, ...@@ -3274,39 +2811,37 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
if (oldsize <= SMALL_MAXCLASS) { if (oldsize <= SMALL_MAXCLASS) {
assert(arena_bin_info[size2index(oldsize)].reg_size == assert(arena_bin_info[size2index(oldsize)].reg_size ==
oldsize); oldsize);
if ((usize_max > SMALL_MAXCLASS || if ((usize_max <= SMALL_MAXCLASS &&
size2index(usize_max) != size2index(oldsize)) && size2index(usize_max) == size2index(oldsize)) ||
(size > oldsize || usize_max < oldsize)) (size <= oldsize && usize_max >= oldsize))
return (true); return (false);
} else { } else {
if (usize_max <= SMALL_MAXCLASS) if (usize_max > SMALL_MAXCLASS) {
return (true); if (!arena_ralloc_large(ptr, oldsize, usize_min,
if (arena_ralloc_large(tsdn, ptr, oldsize, usize_min, usize_max, zero))
usize_max, zero)) return (false);
return (true); }
} }
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); /* Reallocation would require a move. */
arena_decay_tick(tsdn, extent_node_arena_get(&chunk->node)); return (true);
return (false);
} else { } else {
return (huge_ralloc_no_move(tsdn, ptr, oldsize, usize_min, return (huge_ralloc_no_move(ptr, oldsize, usize_min, usize_max,
usize_max, zero)); zero));
} }
} }
static void * static void *
arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, arena_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
size_t alignment, bool zero, tcache_t *tcache) size_t alignment, bool zero, tcache_t *tcache)
{ {
if (alignment == 0) if (alignment == 0)
return (arena_malloc(tsdn, arena, usize, size2index(usize), return (arena_malloc(tsd, arena, usize, zero, tcache));
zero, tcache, true));
usize = sa2u(usize, alignment); usize = sa2u(usize, alignment);
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) if (usize == 0)
return (NULL); return (NULL);
return (ipalloct(tsdn, usize, alignment, zero, tcache, arena)); return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
} }
void * void *
...@@ -3317,15 +2852,14 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, ...@@ -3317,15 +2852,14 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t usize; size_t usize;
usize = s2u(size); usize = s2u(size);
if (unlikely(usize == 0 || size > HUGE_MAXCLASS)) if (usize == 0)
return (NULL); return (NULL);
if (likely(usize <= large_maxclass)) { if (likely(usize <= large_maxclass)) {
size_t copysize; size_t copysize;
/* Try to avoid moving the allocation. */ /* Try to avoid moving the allocation. */
if (!arena_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, 0, if (!arena_ralloc_no_move(ptr, oldsize, usize, 0, zero))
zero))
return (ptr); return (ptr);
/* /*
...@@ -3333,8 +2867,8 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, ...@@ -3333,8 +2867,8 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
* the object. In that case, fall back to allocating new space * the object. In that case, fall back to allocating new space
* and copying. * and copying.
*/ */
ret = arena_ralloc_move_helper(tsd_tsdn(tsd), arena, usize, ret = arena_ralloc_move_helper(tsd, arena, usize, alignment,
alignment, zero, tcache); zero, tcache);
if (ret == NULL) if (ret == NULL)
return (NULL); return (NULL);
...@@ -3346,7 +2880,7 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, ...@@ -3346,7 +2880,7 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
copysize = (usize < oldsize) ? usize : oldsize; copysize = (usize < oldsize) ? usize : oldsize;
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
memcpy(ret, ptr, copysize); memcpy(ret, ptr, copysize);
isqalloc(tsd, ptr, oldsize, tcache, true); isqalloc(tsd, ptr, oldsize, tcache);
} else { } else {
ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment, ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment,
zero, tcache); zero, tcache);
...@@ -3355,25 +2889,25 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, ...@@ -3355,25 +2889,25 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
} }
dss_prec_t dss_prec_t
arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena) arena_dss_prec_get(arena_t *arena)
{ {
dss_prec_t ret; dss_prec_t ret;
malloc_mutex_lock(tsdn, &arena->lock); malloc_mutex_lock(&arena->lock);
ret = arena->dss_prec; ret = arena->dss_prec;
malloc_mutex_unlock(tsdn, &arena->lock); malloc_mutex_unlock(&arena->lock);
return (ret); return (ret);
} }
bool bool
arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec) arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
{ {
if (!have_dss) if (!have_dss)
return (dss_prec != dss_prec_disabled); return (dss_prec != dss_prec_disabled);
malloc_mutex_lock(tsdn, &arena->lock); malloc_mutex_lock(&arena->lock);
arena->dss_prec = dss_prec; arena->dss_prec = dss_prec;
malloc_mutex_unlock(tsdn, &arena->lock); malloc_mutex_unlock(&arena->lock);
return (false); return (false);
} }
...@@ -3388,76 +2922,27 @@ bool ...@@ -3388,76 +2922,27 @@ bool
arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult) arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult)
{ {
if (opt_purge != purge_mode_ratio)
return (true);
if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
return (true); return (true);
atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult); atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult);
return (false); return (false);
} }
ssize_t
arena_decay_time_default_get(void)
{
return ((ssize_t)atomic_read_z((size_t *)&decay_time_default));
}
bool
arena_decay_time_default_set(ssize_t decay_time)
{
if (opt_purge != purge_mode_decay)
return (true);
if (!arena_decay_time_valid(decay_time))
return (true);
atomic_write_z((size_t *)&decay_time_default, (size_t)decay_time);
return (false);
}
static void
arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads,
const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
size_t *nactive, size_t *ndirty)
{
*nthreads += arena_nthreads_get(arena, false);
*dss = dss_prec_names[arena->dss_prec];
*lg_dirty_mult = arena->lg_dirty_mult;
*decay_time = arena->decay.time;
*nactive += arena->nactive;
*ndirty += arena->ndirty;
}
void
arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
size_t *nactive, size_t *ndirty)
{
malloc_mutex_lock(tsdn, &arena->lock);
arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
decay_time, nactive, ndirty);
malloc_mutex_unlock(tsdn, &arena->lock);
}
void void
arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, arena_stats_merge(arena_t *arena, const char **dss, ssize_t *lg_dirty_mult,
const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
size_t *nactive, size_t *ndirty, arena_stats_t *astats, size_t *nactive, size_t *ndirty, arena_stats_t *astats,
malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats, malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
malloc_huge_stats_t *hstats) malloc_huge_stats_t *hstats)
{ {
unsigned i; unsigned i;
cassert(config_stats); malloc_mutex_lock(&arena->lock);
*dss = dss_prec_names[arena->dss_prec];
malloc_mutex_lock(tsdn, &arena->lock); *lg_dirty_mult = arena->lg_dirty_mult;
arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult, *nactive += arena->nactive;
decay_time, nactive, ndirty); *ndirty += arena->ndirty;
astats->mapped += arena->stats.mapped; astats->mapped += arena->stats.mapped;
astats->retained += arena->stats.retained;
astats->npurge += arena->stats.npurge; astats->npurge += arena->stats.npurge;
astats->nmadvise += arena->stats.nmadvise; astats->nmadvise += arena->stats.nmadvise;
astats->purged += arena->stats.purged; astats->purged += arena->stats.purged;
...@@ -3483,12 +2968,12 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, ...@@ -3483,12 +2968,12 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
hstats[i].ndalloc += arena->stats.hstats[i].ndalloc; hstats[i].ndalloc += arena->stats.hstats[i].ndalloc;
hstats[i].curhchunks += arena->stats.hstats[i].curhchunks; hstats[i].curhchunks += arena->stats.hstats[i].curhchunks;
} }
malloc_mutex_unlock(tsdn, &arena->lock); malloc_mutex_unlock(&arena->lock);
for (i = 0; i < NBINS; i++) { for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i]; arena_bin_t *bin = &arena->bins[i];
malloc_mutex_lock(tsdn, &bin->lock); malloc_mutex_lock(&bin->lock);
bstats[i].nmalloc += bin->stats.nmalloc; bstats[i].nmalloc += bin->stats.nmalloc;
bstats[i].ndalloc += bin->stats.ndalloc; bstats[i].ndalloc += bin->stats.ndalloc;
bstats[i].nrequests += bin->stats.nrequests; bstats[i].nrequests += bin->stats.nrequests;
...@@ -3500,61 +2985,33 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, ...@@ -3500,61 +2985,33 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
bstats[i].nruns += bin->stats.nruns; bstats[i].nruns += bin->stats.nruns;
bstats[i].reruns += bin->stats.reruns; bstats[i].reruns += bin->stats.reruns;
bstats[i].curruns += bin->stats.curruns; bstats[i].curruns += bin->stats.curruns;
malloc_mutex_unlock(tsdn, &bin->lock); malloc_mutex_unlock(&bin->lock);
} }
} }
unsigned
arena_nthreads_get(arena_t *arena, bool internal)
{
return (atomic_read_u(&arena->nthreads[internal]));
}
void
arena_nthreads_inc(arena_t *arena, bool internal)
{
atomic_add_u(&arena->nthreads[internal], 1);
}
void
arena_nthreads_dec(arena_t *arena, bool internal)
{
atomic_sub_u(&arena->nthreads[internal], 1);
}
size_t
arena_extent_sn_next(arena_t *arena)
{
return (atomic_add_z(&arena->extent_sn_next, 1) - 1);
}
arena_t * arena_t *
arena_new(tsdn_t *tsdn, unsigned ind) arena_new(unsigned ind)
{ {
arena_t *arena; arena_t *arena;
unsigned i; unsigned i;
arena_bin_t *bin;
/* /*
* Allocate arena, arena->lstats, and arena->hstats contiguously, mainly * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly
* because there is no way to clean up if base_alloc() OOMs. * because there is no way to clean up if base_alloc() OOMs.
*/ */
if (config_stats) { if (config_stats) {
arena = (arena_t *)base_alloc(tsdn, arena = (arena_t *)base_alloc(CACHELINE_CEILING(sizeof(arena_t))
CACHELINE_CEILING(sizeof(arena_t)) + + QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t) +
QUANTUM_CEILING((nlclasses * sizeof(malloc_large_stats_t))) nhclasses) * sizeof(malloc_huge_stats_t));
+ (nhclasses * sizeof(malloc_huge_stats_t)));
} else } else
arena = (arena_t *)base_alloc(tsdn, sizeof(arena_t)); arena = (arena_t *)base_alloc(sizeof(arena_t));
if (arena == NULL) if (arena == NULL)
return (NULL); return (NULL);
arena->ind = ind; arena->ind = ind;
arena->nthreads[0] = arena->nthreads[1] = 0; arena->nthreads = 0;
if (malloc_mutex_init(&arena->lock, "arena", WITNESS_RANK_ARENA)) if (malloc_mutex_init(&arena->lock))
return (NULL); return (NULL);
if (config_stats) { if (config_stats) {
...@@ -3584,15 +3041,11 @@ arena_new(tsdn_t *tsdn, unsigned ind) ...@@ -3584,15 +3041,11 @@ arena_new(tsdn_t *tsdn, unsigned ind)
* deterministic seed. * deterministic seed.
*/ */
arena->offset_state = config_debug ? ind : arena->offset_state = config_debug ? ind :
(size_t)(uintptr_t)arena; (uint64_t)(uintptr_t)arena;
} }
arena->dss_prec = chunk_dss_prec_get(); arena->dss_prec = chunk_dss_prec_get();
ql_new(&arena->achunks);
arena->extent_sn_next = 0;
arena->spare = NULL; arena->spare = NULL;
arena->lg_dirty_mult = arena_lg_dirty_mult_default_get(); arena->lg_dirty_mult = arena_lg_dirty_mult_default_get();
...@@ -3600,42 +3053,33 @@ arena_new(tsdn_t *tsdn, unsigned ind) ...@@ -3600,42 +3053,33 @@ arena_new(tsdn_t *tsdn, unsigned ind)
arena->nactive = 0; arena->nactive = 0;
arena->ndirty = 0; arena->ndirty = 0;
for (i = 0; i < NPSIZES; i++) arena_avail_tree_new(&arena->runs_avail);
arena_run_heap_new(&arena->runs_avail[i]);
qr_new(&arena->runs_dirty, rd_link); qr_new(&arena->runs_dirty, rd_link);
qr_new(&arena->chunks_cache, cc_link); qr_new(&arena->chunks_cache, cc_link);
if (opt_purge == purge_mode_decay)
arena_decay_init(arena, arena_decay_time_default_get());
ql_new(&arena->huge); ql_new(&arena->huge);
if (malloc_mutex_init(&arena->huge_mtx, "arena_huge", if (malloc_mutex_init(&arena->huge_mtx))
WITNESS_RANK_ARENA_HUGE))
return (NULL); return (NULL);
extent_tree_szsnad_new(&arena->chunks_szsnad_cached); extent_tree_szad_new(&arena->chunks_szad_cached);
extent_tree_ad_new(&arena->chunks_ad_cached); extent_tree_ad_new(&arena->chunks_ad_cached);
extent_tree_szsnad_new(&arena->chunks_szsnad_retained); extent_tree_szad_new(&arena->chunks_szad_retained);
extent_tree_ad_new(&arena->chunks_ad_retained); extent_tree_ad_new(&arena->chunks_ad_retained);
if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks", if (malloc_mutex_init(&arena->chunks_mtx))
WITNESS_RANK_ARENA_CHUNKS))
return (NULL); return (NULL);
ql_new(&arena->node_cache); ql_new(&arena->node_cache);
if (malloc_mutex_init(&arena->node_cache_mtx, "arena_node_cache", if (malloc_mutex_init(&arena->node_cache_mtx))
WITNESS_RANK_ARENA_NODE_CACHE))
return (NULL); return (NULL);
arena->chunk_hooks = chunk_hooks_default; arena->chunk_hooks = chunk_hooks_default;
/* Initialize bins. */ /* Initialize bins. */
for (i = 0; i < NBINS; i++) { for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i]; bin = &arena->bins[i];
if (malloc_mutex_init(&bin->lock, "arena_bin", if (malloc_mutex_init(&bin->lock))
WITNESS_RANK_ARENA_BIN))
return (NULL); return (NULL);
bin->runcur = NULL; bin->runcur = NULL;
arena_run_heap_new(&bin->runs); arena_run_tree_new(&bin->runs);
if (config_stats) if (config_stats)
memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
} }
...@@ -3667,7 +3111,8 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info) ...@@ -3667,7 +3111,8 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info)
* be twice as large in order to maintain alignment. * be twice as large in order to maintain alignment.
*/ */
if (config_fill && unlikely(opt_redzone)) { if (config_fill && unlikely(opt_redzone)) {
size_t align_min = ZU(1) << (ffs_zu(bin_info->reg_size) - 1); size_t align_min = ZU(1) << (jemalloc_ffs(bin_info->reg_size) -
1);
if (align_min <= REDZONE_MINSIZE) { if (align_min <= REDZONE_MINSIZE) {
bin_info->redzone_size = REDZONE_MINSIZE; bin_info->redzone_size = REDZONE_MINSIZE;
pad_size = 0; pad_size = 0;
...@@ -3687,19 +3132,18 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info) ...@@ -3687,19 +3132,18 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info)
* size). * size).
*/ */
try_run_size = PAGE; try_run_size = PAGE;
try_nregs = (uint32_t)(try_run_size / bin_info->reg_size); try_nregs = try_run_size / bin_info->reg_size;
do { do {
perfect_run_size = try_run_size; perfect_run_size = try_run_size;
perfect_nregs = try_nregs; perfect_nregs = try_nregs;
try_run_size += PAGE; try_run_size += PAGE;
try_nregs = (uint32_t)(try_run_size / bin_info->reg_size); try_nregs = try_run_size / bin_info->reg_size;
} while (perfect_run_size != perfect_nregs * bin_info->reg_size); } while (perfect_run_size != perfect_nregs * bin_info->reg_size);
assert(perfect_nregs <= RUN_MAXREGS); assert(perfect_nregs <= RUN_MAXREGS);
actual_run_size = perfect_run_size; actual_run_size = perfect_run_size;
actual_nregs = (uint32_t)((actual_run_size - pad_size) / actual_nregs = (actual_run_size - pad_size) / bin_info->reg_interval;
bin_info->reg_interval);
/* /*
* Redzones can require enough padding that not even a single region can * Redzones can require enough padding that not even a single region can
...@@ -3711,8 +3155,8 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info) ...@@ -3711,8 +3155,8 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info)
assert(config_fill && unlikely(opt_redzone)); assert(config_fill && unlikely(opt_redzone));
actual_run_size += PAGE; actual_run_size += PAGE;
actual_nregs = (uint32_t)((actual_run_size - pad_size) / actual_nregs = (actual_run_size - pad_size) /
bin_info->reg_interval); bin_info->reg_interval;
} }
/* /*
...@@ -3720,8 +3164,8 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info) ...@@ -3720,8 +3164,8 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info)
*/ */
while (actual_run_size > arena_maxrun) { while (actual_run_size > arena_maxrun) {
actual_run_size -= PAGE; actual_run_size -= PAGE;
actual_nregs = (uint32_t)((actual_run_size - pad_size) / actual_nregs = (actual_run_size - pad_size) /
bin_info->reg_interval); bin_info->reg_interval;
} }
assert(actual_nregs > 0); assert(actual_nregs > 0);
assert(actual_run_size == s2u(actual_run_size)); assert(actual_run_size == s2u(actual_run_size));
...@@ -3729,8 +3173,11 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info) ...@@ -3729,8 +3173,11 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info)
/* Copy final settings. */ /* Copy final settings. */
bin_info->run_size = actual_run_size; bin_info->run_size = actual_run_size;
bin_info->nregs = actual_nregs; bin_info->nregs = actual_nregs;
bin_info->reg0_offset = (uint32_t)(actual_run_size - (actual_nregs * bin_info->reg0_offset = actual_run_size - (actual_nregs *
bin_info->reg_interval) - pad_size + bin_info->redzone_size); bin_info->reg_interval) - pad_size + bin_info->redzone_size;
if (actual_run_size > small_maxrun)
small_maxrun = actual_run_size;
assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
* bin_info->reg_interval) + pad_size == bin_info->run_size); * bin_info->reg_interval) + pad_size == bin_info->run_size);
...@@ -3747,7 +3194,7 @@ bin_info_init(void) ...@@ -3747,7 +3194,7 @@ bin_info_init(void)
bin_info_run_size_calc(bin_info); \ bin_info_run_size_calc(bin_info); \
bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs); bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
#define BIN_INFO_INIT_bin_no(index, size) #define BIN_INFO_INIT_bin_no(index, size)
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \ #define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)) BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
SIZE_CLASSES SIZE_CLASSES
#undef BIN_INFO_INIT_bin_yes #undef BIN_INFO_INIT_bin_yes
...@@ -3755,13 +3202,38 @@ bin_info_init(void) ...@@ -3755,13 +3202,38 @@ bin_info_init(void)
#undef SC #undef SC
} }
void static bool
small_run_size_init(void)
{
assert(small_maxrun != 0);
small_run_tab = (bool *)base_alloc(sizeof(bool) * (small_maxrun >>
LG_PAGE));
if (small_run_tab == NULL)
return (true);
#define TAB_INIT_bin_yes(index, size) { \
arena_bin_info_t *bin_info = &arena_bin_info[index]; \
small_run_tab[bin_info->run_size >> LG_PAGE] = true; \
}
#define TAB_INIT_bin_no(index, size)
#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
TAB_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
SIZE_CLASSES
#undef TAB_INIT_bin_yes
#undef TAB_INIT_bin_no
#undef SC
return (false);
}
bool
arena_boot(void) arena_boot(void)
{ {
unsigned i; unsigned i;
arena_lg_dirty_mult_default_set(opt_lg_dirty_mult); arena_lg_dirty_mult_default_set(opt_lg_dirty_mult);
arena_decay_time_default_set(opt_decay_time);
/* /*
* Compute the header size such that it is large enough to contain the * Compute the header size such that it is large enough to contain the
...@@ -3803,61 +3275,44 @@ arena_boot(void) ...@@ -3803,61 +3275,44 @@ arena_boot(void)
nhclasses = NSIZES - nlclasses - NBINS; nhclasses = NSIZES - nlclasses - NBINS;
bin_info_init(); bin_info_init();
return (small_run_size_init());
} }
void void
arena_prefork0(tsdn_t *tsdn, arena_t *arena) arena_prefork(arena_t *arena)
{
malloc_mutex_prefork(tsdn, &arena->lock);
}
void
arena_prefork1(tsdn_t *tsdn, arena_t *arena)
{
malloc_mutex_prefork(tsdn, &arena->chunks_mtx);
}
void
arena_prefork2(tsdn_t *tsdn, arena_t *arena)
{
malloc_mutex_prefork(tsdn, &arena->node_cache_mtx);
}
void
arena_prefork3(tsdn_t *tsdn, arena_t *arena)
{ {
unsigned i; unsigned i;
malloc_mutex_prefork(&arena->lock);
malloc_mutex_prefork(&arena->huge_mtx);
malloc_mutex_prefork(&arena->chunks_mtx);
malloc_mutex_prefork(&arena->node_cache_mtx);
for (i = 0; i < NBINS; i++) for (i = 0; i < NBINS; i++)
malloc_mutex_prefork(tsdn, &arena->bins[i].lock); malloc_mutex_prefork(&arena->bins[i].lock);
malloc_mutex_prefork(tsdn, &arena->huge_mtx);
} }
void void
arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) arena_postfork_parent(arena_t *arena)
{ {
unsigned i; unsigned i;
malloc_mutex_postfork_parent(tsdn, &arena->huge_mtx);
for (i = 0; i < NBINS; i++) for (i = 0; i < NBINS; i++)
malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock); malloc_mutex_postfork_parent(&arena->bins[i].lock);
malloc_mutex_postfork_parent(tsdn, &arena->node_cache_mtx); malloc_mutex_postfork_parent(&arena->node_cache_mtx);
malloc_mutex_postfork_parent(tsdn, &arena->chunks_mtx); malloc_mutex_postfork_parent(&arena->chunks_mtx);
malloc_mutex_postfork_parent(tsdn, &arena->lock); malloc_mutex_postfork_parent(&arena->huge_mtx);
malloc_mutex_postfork_parent(&arena->lock);
} }
void void
arena_postfork_child(tsdn_t *tsdn, arena_t *arena) arena_postfork_child(arena_t *arena)
{ {
unsigned i; unsigned i;
malloc_mutex_postfork_child(tsdn, &arena->huge_mtx);
for (i = 0; i < NBINS; i++) for (i = 0; i < NBINS; i++)
malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock); malloc_mutex_postfork_child(&arena->bins[i].lock);
malloc_mutex_postfork_child(tsdn, &arena->node_cache_mtx); malloc_mutex_postfork_child(&arena->node_cache_mtx);
malloc_mutex_postfork_child(tsdn, &arena->chunks_mtx); malloc_mutex_postfork_child(&arena->chunks_mtx);
malloc_mutex_postfork_child(tsdn, &arena->lock); malloc_mutex_postfork_child(&arena->huge_mtx);
malloc_mutex_postfork_child(&arena->lock);
} }
...@@ -5,8 +5,7 @@ ...@@ -5,8 +5,7 @@
/* Data. */ /* Data. */
static malloc_mutex_t base_mtx; static malloc_mutex_t base_mtx;
static size_t base_extent_sn_next; static extent_tree_t base_avail_szad;
static extent_tree_t base_avail_szsnad;
static extent_node_t *base_nodes; static extent_node_t *base_nodes;
static size_t base_allocated; static size_t base_allocated;
static size_t base_resident; static size_t base_resident;
...@@ -14,13 +13,12 @@ static size_t base_mapped; ...@@ -14,13 +13,12 @@ static size_t base_mapped;
/******************************************************************************/ /******************************************************************************/
/* base_mtx must be held. */
static extent_node_t * static extent_node_t *
base_node_try_alloc(tsdn_t *tsdn) base_node_try_alloc(void)
{ {
extent_node_t *node; extent_node_t *node;
malloc_mutex_assert_owner(tsdn, &base_mtx);
if (base_nodes == NULL) if (base_nodes == NULL)
return (NULL); return (NULL);
node = base_nodes; node = base_nodes;
...@@ -29,42 +27,33 @@ base_node_try_alloc(tsdn_t *tsdn) ...@@ -29,42 +27,33 @@ base_node_try_alloc(tsdn_t *tsdn)
return (node); return (node);
} }
/* base_mtx must be held. */
static void static void
base_node_dalloc(tsdn_t *tsdn, extent_node_t *node) base_node_dalloc(extent_node_t *node)
{ {
malloc_mutex_assert_owner(tsdn, &base_mtx);
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t)); JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
*(extent_node_t **)node = base_nodes; *(extent_node_t **)node = base_nodes;
base_nodes = node; base_nodes = node;
} }
static void /* base_mtx must be held. */
base_extent_node_init(extent_node_t *node, void *addr, size_t size)
{
size_t sn = atomic_add_z(&base_extent_sn_next, 1) - 1;
extent_node_init(node, NULL, addr, size, sn, true, true);
}
static extent_node_t * static extent_node_t *
base_chunk_alloc(tsdn_t *tsdn, size_t minsize) base_chunk_alloc(size_t minsize)
{ {
extent_node_t *node; extent_node_t *node;
size_t csize, nsize; size_t csize, nsize;
void *addr; void *addr;
malloc_mutex_assert_owner(tsdn, &base_mtx);
assert(minsize != 0); assert(minsize != 0);
node = base_node_try_alloc(tsdn); node = base_node_try_alloc();
/* Allocate enough space to also carve a node out if necessary. */ /* Allocate enough space to also carve a node out if necessary. */
nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0; nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0;
csize = CHUNK_CEILING(minsize + nsize); csize = CHUNK_CEILING(minsize + nsize);
addr = chunk_alloc_base(csize); addr = chunk_alloc_base(csize);
if (addr == NULL) { if (addr == NULL) {
if (node != NULL) if (node != NULL)
base_node_dalloc(tsdn, node); base_node_dalloc(node);
return (NULL); return (NULL);
} }
base_mapped += csize; base_mapped += csize;
...@@ -77,7 +66,7 @@ base_chunk_alloc(tsdn_t *tsdn, size_t minsize) ...@@ -77,7 +66,7 @@ base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
base_resident += PAGE_CEILING(nsize); base_resident += PAGE_CEILING(nsize);
} }
} }
base_extent_node_init(node, addr, csize); extent_node_init(node, NULL, addr, csize, true, true);
return (node); return (node);
} }
...@@ -87,7 +76,7 @@ base_chunk_alloc(tsdn_t *tsdn, size_t minsize) ...@@ -87,7 +76,7 @@ base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
* physical memory usage. * physical memory usage.
*/ */
void * void *
base_alloc(tsdn_t *tsdn, size_t size) base_alloc(size_t size)
{ {
void *ret; void *ret;
size_t csize, usize; size_t csize, usize;
...@@ -101,15 +90,15 @@ base_alloc(tsdn_t *tsdn, size_t size) ...@@ -101,15 +90,15 @@ base_alloc(tsdn_t *tsdn, size_t size)
csize = CACHELINE_CEILING(size); csize = CACHELINE_CEILING(size);
usize = s2u(csize); usize = s2u(csize);
extent_node_init(&key, NULL, NULL, usize, 0, false, false); extent_node_init(&key, NULL, NULL, usize, false, false);
malloc_mutex_lock(tsdn, &base_mtx); malloc_mutex_lock(&base_mtx);
node = extent_tree_szsnad_nsearch(&base_avail_szsnad, &key); node = extent_tree_szad_nsearch(&base_avail_szad, &key);
if (node != NULL) { if (node != NULL) {
/* Use existing space. */ /* Use existing space. */
extent_tree_szsnad_remove(&base_avail_szsnad, node); extent_tree_szad_remove(&base_avail_szad, node);
} else { } else {
/* Try to allocate more space. */ /* Try to allocate more space. */
node = base_chunk_alloc(tsdn, csize); node = base_chunk_alloc(csize);
} }
if (node == NULL) { if (node == NULL) {
ret = NULL; ret = NULL;
...@@ -120,9 +109,9 @@ base_alloc(tsdn_t *tsdn, size_t size) ...@@ -120,9 +109,9 @@ base_alloc(tsdn_t *tsdn, size_t size)
if (extent_node_size_get(node) > csize) { if (extent_node_size_get(node) > csize) {
extent_node_addr_set(node, (void *)((uintptr_t)ret + csize)); extent_node_addr_set(node, (void *)((uintptr_t)ret + csize));
extent_node_size_set(node, extent_node_size_get(node) - csize); extent_node_size_set(node, extent_node_size_get(node) - csize);
extent_tree_szsnad_insert(&base_avail_szsnad, node); extent_tree_szad_insert(&base_avail_szad, node);
} else } else
base_node_dalloc(tsdn, node); base_node_dalloc(node);
if (config_stats) { if (config_stats) {
base_allocated += csize; base_allocated += csize;
/* /*
...@@ -134,54 +123,52 @@ base_alloc(tsdn_t *tsdn, size_t size) ...@@ -134,54 +123,52 @@ base_alloc(tsdn_t *tsdn, size_t size)
} }
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize); JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize);
label_return: label_return:
malloc_mutex_unlock(tsdn, &base_mtx); malloc_mutex_unlock(&base_mtx);
return (ret); return (ret);
} }
void void
base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident, base_stats_get(size_t *allocated, size_t *resident, size_t *mapped)
size_t *mapped)
{ {
malloc_mutex_lock(tsdn, &base_mtx); malloc_mutex_lock(&base_mtx);
assert(base_allocated <= base_resident); assert(base_allocated <= base_resident);
assert(base_resident <= base_mapped); assert(base_resident <= base_mapped);
*allocated = base_allocated; *allocated = base_allocated;
*resident = base_resident; *resident = base_resident;
*mapped = base_mapped; *mapped = base_mapped;
malloc_mutex_unlock(tsdn, &base_mtx); malloc_mutex_unlock(&base_mtx);
} }
bool bool
base_boot(void) base_boot(void)
{ {
if (malloc_mutex_init(&base_mtx, "base", WITNESS_RANK_BASE)) if (malloc_mutex_init(&base_mtx))
return (true); return (true);
base_extent_sn_next = 0; extent_tree_szad_new(&base_avail_szad);
extent_tree_szsnad_new(&base_avail_szsnad);
base_nodes = NULL; base_nodes = NULL;
return (false); return (false);
} }
void void
base_prefork(tsdn_t *tsdn) base_prefork(void)
{ {
malloc_mutex_prefork(tsdn, &base_mtx); malloc_mutex_prefork(&base_mtx);
} }
void void
base_postfork_parent(tsdn_t *tsdn) base_postfork_parent(void)
{ {
malloc_mutex_postfork_parent(tsdn, &base_mtx); malloc_mutex_postfork_parent(&base_mtx);
} }
void void
base_postfork_child(tsdn_t *tsdn) base_postfork_child(void)
{ {
malloc_mutex_postfork_child(tsdn, &base_mtx); malloc_mutex_postfork_child(&base_mtx);
} }
...@@ -3,8 +3,6 @@ ...@@ -3,8 +3,6 @@
/******************************************************************************/ /******************************************************************************/
#ifdef USE_TREE
void void
bitmap_info_init(bitmap_info_t *binfo, size_t nbits) bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
{ {
...@@ -34,11 +32,20 @@ bitmap_info_init(bitmap_info_t *binfo, size_t nbits) ...@@ -34,11 +32,20 @@ bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
binfo->nbits = nbits; binfo->nbits = nbits;
} }
static size_t size_t
bitmap_info_ngroups(const bitmap_info_t *binfo) bitmap_info_ngroups(const bitmap_info_t *binfo)
{ {
return (binfo->levels[binfo->nlevels].group_offset); return (binfo->levels[binfo->nlevels].group_offset << LG_SIZEOF_BITMAP);
}
size_t
bitmap_size(size_t nbits)
{
bitmap_info_t binfo;
bitmap_info_init(&binfo, nbits);
return (bitmap_info_ngroups(&binfo));
} }
void void
...@@ -54,7 +61,8 @@ bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo) ...@@ -54,7 +61,8 @@ bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
* correspond to the first logical bit in the group, so extra bits * correspond to the first logical bit in the group, so extra bits
* are the most significant bits of the last group. * are the most significant bits of the last group.
*/ */
memset(bitmap, 0xffU, bitmap_size(binfo)); memset(bitmap, 0xffU, binfo->levels[binfo->nlevels].group_offset <<
LG_SIZEOF_BITMAP);
extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK)) extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
& BITMAP_GROUP_NBITS_MASK; & BITMAP_GROUP_NBITS_MASK;
if (extra != 0) if (extra != 0)
...@@ -68,44 +76,3 @@ bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo) ...@@ -68,44 +76,3 @@ bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
bitmap[binfo->levels[i+1].group_offset - 1] >>= extra; bitmap[binfo->levels[i+1].group_offset - 1] >>= extra;
} }
} }
#else /* USE_TREE */
void
bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
{
assert(nbits > 0);
assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
binfo->ngroups = BITMAP_BITS2GROUPS(nbits);
binfo->nbits = nbits;
}
static size_t
bitmap_info_ngroups(const bitmap_info_t *binfo)
{
return (binfo->ngroups);
}
void
bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
{
size_t extra;
memset(bitmap, 0xffU, bitmap_size(binfo));
extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
& BITMAP_GROUP_NBITS_MASK;
if (extra != 0)
bitmap[binfo->ngroups - 1] >>= extra;
}
#endif /* USE_TREE */
size_t
bitmap_size(const bitmap_info_t *binfo)
{
return (bitmap_info_ngroups(binfo) << LG_SIZEOF_BITMAP);
}
...@@ -49,10 +49,9 @@ const chunk_hooks_t chunk_hooks_default = { ...@@ -49,10 +49,9 @@ const chunk_hooks_t chunk_hooks_default = {
* definition. * definition.
*/ */
static void chunk_record(tsdn_t *tsdn, arena_t *arena, static void chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szsnad, extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, size_t sn, void *chunk, size_t size, bool zeroed, bool committed);
bool zeroed, bool committed);
/******************************************************************************/ /******************************************************************************/
...@@ -64,23 +63,23 @@ chunk_hooks_get_locked(arena_t *arena) ...@@ -64,23 +63,23 @@ chunk_hooks_get_locked(arena_t *arena)
} }
chunk_hooks_t chunk_hooks_t
chunk_hooks_get(tsdn_t *tsdn, arena_t *arena) chunk_hooks_get(arena_t *arena)
{ {
chunk_hooks_t chunk_hooks; chunk_hooks_t chunk_hooks;
malloc_mutex_lock(tsdn, &arena->chunks_mtx); malloc_mutex_lock(&arena->chunks_mtx);
chunk_hooks = chunk_hooks_get_locked(arena); chunk_hooks = chunk_hooks_get_locked(arena);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx); malloc_mutex_unlock(&arena->chunks_mtx);
return (chunk_hooks); return (chunk_hooks);
} }
chunk_hooks_t chunk_hooks_t
chunk_hooks_set(tsdn_t *tsdn, arena_t *arena, const chunk_hooks_t *chunk_hooks) chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks)
{ {
chunk_hooks_t old_chunk_hooks; chunk_hooks_t old_chunk_hooks;
malloc_mutex_lock(tsdn, &arena->chunks_mtx); malloc_mutex_lock(&arena->chunks_mtx);
old_chunk_hooks = arena->chunk_hooks; old_chunk_hooks = arena->chunk_hooks;
/* /*
* Copy each field atomically so that it is impossible for readers to * Copy each field atomically so that it is impossible for readers to
...@@ -105,14 +104,14 @@ chunk_hooks_set(tsdn_t *tsdn, arena_t *arena, const chunk_hooks_t *chunk_hooks) ...@@ -105,14 +104,14 @@ chunk_hooks_set(tsdn_t *tsdn, arena_t *arena, const chunk_hooks_t *chunk_hooks)
ATOMIC_COPY_HOOK(split); ATOMIC_COPY_HOOK(split);
ATOMIC_COPY_HOOK(merge); ATOMIC_COPY_HOOK(merge);
#undef ATOMIC_COPY_HOOK #undef ATOMIC_COPY_HOOK
malloc_mutex_unlock(tsdn, &arena->chunks_mtx); malloc_mutex_unlock(&arena->chunks_mtx);
return (old_chunk_hooks); return (old_chunk_hooks);
} }
static void static void
chunk_hooks_assure_initialized_impl(tsdn_t *tsdn, arena_t *arena, chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks,
chunk_hooks_t *chunk_hooks, bool locked) bool locked)
{ {
static const chunk_hooks_t uninitialized_hooks = static const chunk_hooks_t uninitialized_hooks =
CHUNK_HOOKS_INITIALIZER; CHUNK_HOOKS_INITIALIZER;
...@@ -120,28 +119,27 @@ chunk_hooks_assure_initialized_impl(tsdn_t *tsdn, arena_t *arena, ...@@ -120,28 +119,27 @@ chunk_hooks_assure_initialized_impl(tsdn_t *tsdn, arena_t *arena,
if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) == if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) ==
0) { 0) {
*chunk_hooks = locked ? chunk_hooks_get_locked(arena) : *chunk_hooks = locked ? chunk_hooks_get_locked(arena) :
chunk_hooks_get(tsdn, arena); chunk_hooks_get(arena);
} }
} }
static void static void
chunk_hooks_assure_initialized_locked(tsdn_t *tsdn, arena_t *arena, chunk_hooks_assure_initialized_locked(arena_t *arena,
chunk_hooks_t *chunk_hooks) chunk_hooks_t *chunk_hooks)
{ {
chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, true); chunk_hooks_assure_initialized_impl(arena, chunk_hooks, true);
} }
static void static void
chunk_hooks_assure_initialized(tsdn_t *tsdn, arena_t *arena, chunk_hooks_assure_initialized(arena_t *arena, chunk_hooks_t *chunk_hooks)
chunk_hooks_t *chunk_hooks)
{ {
chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, false); chunk_hooks_assure_initialized_impl(arena, chunk_hooks, false);
} }
bool bool
chunk_register(tsdn_t *tsdn, const void *chunk, const extent_node_t *node) chunk_register(const void *chunk, const extent_node_t *node)
{ {
assert(extent_node_addr_get(node) == chunk); assert(extent_node_addr_get(node) == chunk);
...@@ -161,7 +159,7 @@ chunk_register(tsdn_t *tsdn, const void *chunk, const extent_node_t *node) ...@@ -161,7 +159,7 @@ chunk_register(tsdn_t *tsdn, const void *chunk, const extent_node_t *node)
high = atomic_read_z(&highchunks); high = atomic_read_z(&highchunks);
} }
if (cur > high && prof_gdump_get_unlocked()) if (cur > high && prof_gdump_get_unlocked())
prof_gdump(tsdn); prof_gdump();
} }
return (false); return (false);
...@@ -183,35 +181,33 @@ chunk_deregister(const void *chunk, const extent_node_t *node) ...@@ -183,35 +181,33 @@ chunk_deregister(const void *chunk, const extent_node_t *node)
} }
/* /*
* Do first-best-fit chunk selection, i.e. select the oldest/lowest chunk that * Do first-best-fit chunk selection, i.e. select the lowest chunk that best
* best fits. * fits.
*/ */
static extent_node_t * static extent_node_t *
chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szsnad, size_t size) chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad,
extent_tree_t *chunks_ad, size_t size)
{ {
extent_node_t key; extent_node_t key;
assert(size == CHUNK_CEILING(size)); assert(size == CHUNK_CEILING(size));
extent_node_init(&key, arena, NULL, size, 0, false, false); extent_node_init(&key, arena, NULL, size, false, false);
return (extent_tree_szsnad_nsearch(chunks_szsnad, &key)); return (extent_tree_szad_nsearch(chunks_szad, &key));
} }
static void * static void *
chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache, extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
bool *commit, bool dalloc_node) bool dalloc_node)
{ {
void *ret; void *ret;
extent_node_t *node; extent_node_t *node;
size_t alloc_size, leadsize, trailsize; size_t alloc_size, leadsize, trailsize;
bool zeroed, committed; bool zeroed, committed;
assert(CHUNK_CEILING(size) == size);
assert(alignment > 0);
assert(new_addr == NULL || alignment == chunksize); assert(new_addr == NULL || alignment == chunksize);
assert(CHUNK_ADDR2BASE(new_addr) == new_addr);
/* /*
* Cached chunks use the node linkage embedded in their headers, in * Cached chunks use the node linkage embedded in their headers, in
* which case dalloc_node is true, and new_addr is non-NULL because * which case dalloc_node is true, and new_addr is non-NULL because
...@@ -219,23 +215,24 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -219,23 +215,24 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
*/ */
assert(dalloc_node || new_addr != NULL); assert(dalloc_node || new_addr != NULL);
alloc_size = size + CHUNK_CEILING(alignment) - chunksize; alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize));
/* Beware size_t wrap-around. */ /* Beware size_t wrap-around. */
if (alloc_size < size) if (alloc_size < size)
return (NULL); return (NULL);
malloc_mutex_lock(tsdn, &arena->chunks_mtx); malloc_mutex_lock(&arena->chunks_mtx);
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks); chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
if (new_addr != NULL) { if (new_addr != NULL) {
extent_node_t key; extent_node_t key;
extent_node_init(&key, arena, new_addr, alloc_size, 0, false, extent_node_init(&key, arena, new_addr, alloc_size, false,
false); false);
node = extent_tree_ad_search(chunks_ad, &key); node = extent_tree_ad_search(chunks_ad, &key);
} else { } else {
node = chunk_first_best_fit(arena, chunks_szsnad, alloc_size); node = chunk_first_best_fit(arena, chunks_szad, chunks_ad,
alloc_size);
} }
if (node == NULL || (new_addr != NULL && extent_node_size_get(node) < if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
size)) { size)) {
malloc_mutex_unlock(tsdn, &arena->chunks_mtx); malloc_mutex_unlock(&arena->chunks_mtx);
return (NULL); return (NULL);
} }
leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node), leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node),
...@@ -244,7 +241,6 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -244,7 +241,6 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
assert(extent_node_size_get(node) >= leadsize + size); assert(extent_node_size_get(node) >= leadsize + size);
trailsize = extent_node_size_get(node) - leadsize - size; trailsize = extent_node_size_get(node) - leadsize - size;
ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize); ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize);
*sn = extent_node_sn_get(node);
zeroed = extent_node_zeroed_get(node); zeroed = extent_node_zeroed_get(node);
if (zeroed) if (zeroed)
*zero = true; *zero = true;
...@@ -255,17 +251,17 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -255,17 +251,17 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
if (leadsize != 0 && if (leadsize != 0 &&
chunk_hooks->split(extent_node_addr_get(node), chunk_hooks->split(extent_node_addr_get(node),
extent_node_size_get(node), leadsize, size, false, arena->ind)) { extent_node_size_get(node), leadsize, size, false, arena->ind)) {
malloc_mutex_unlock(tsdn, &arena->chunks_mtx); malloc_mutex_unlock(&arena->chunks_mtx);
return (NULL); return (NULL);
} }
/* Remove node from the tree. */ /* Remove node from the tree. */
extent_tree_szsnad_remove(chunks_szsnad, node); extent_tree_szad_remove(chunks_szad, node);
extent_tree_ad_remove(chunks_ad, node); extent_tree_ad_remove(chunks_ad, node);
arena_chunk_cache_maybe_remove(arena, node, cache); arena_chunk_cache_maybe_remove(arena, node, cache);
if (leadsize != 0) { if (leadsize != 0) {
/* Insert the leading space as a smaller chunk. */ /* Insert the leading space as a smaller chunk. */
extent_node_size_set(node, leadsize); extent_node_size_set(node, leadsize);
extent_tree_szsnad_insert(chunks_szsnad, node); extent_tree_szad_insert(chunks_szad, node);
extent_tree_ad_insert(chunks_ad, node); extent_tree_ad_insert(chunks_ad, node);
arena_chunk_cache_maybe_insert(arena, node, cache); arena_chunk_cache_maybe_insert(arena, node, cache);
node = NULL; node = NULL;
...@@ -275,42 +271,41 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -275,42 +271,41 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
if (chunk_hooks->split(ret, size + trailsize, size, if (chunk_hooks->split(ret, size + trailsize, size,
trailsize, false, arena->ind)) { trailsize, false, arena->ind)) {
if (dalloc_node && node != NULL) if (dalloc_node && node != NULL)
arena_node_dalloc(tsdn, arena, node); arena_node_dalloc(arena, node);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx); malloc_mutex_unlock(&arena->chunks_mtx);
chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad, chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad,
chunks_ad, cache, ret, size + trailsize, *sn, cache, ret, size + trailsize, zeroed, committed);
zeroed, committed);
return (NULL); return (NULL);
} }
/* Insert the trailing space as a smaller chunk. */ /* Insert the trailing space as a smaller chunk. */
if (node == NULL) { if (node == NULL) {
node = arena_node_alloc(tsdn, arena); node = arena_node_alloc(arena);
if (node == NULL) { if (node == NULL) {
malloc_mutex_unlock(tsdn, &arena->chunks_mtx); malloc_mutex_unlock(&arena->chunks_mtx);
chunk_record(tsdn, arena, chunk_hooks, chunk_record(arena, chunk_hooks, chunks_szad,
chunks_szsnad, chunks_ad, cache, ret, size chunks_ad, cache, ret, size + trailsize,
+ trailsize, *sn, zeroed, committed); zeroed, committed);
return (NULL); return (NULL);
} }
} }
extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size), extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size),
trailsize, *sn, zeroed, committed); trailsize, zeroed, committed);
extent_tree_szsnad_insert(chunks_szsnad, node); extent_tree_szad_insert(chunks_szad, node);
extent_tree_ad_insert(chunks_ad, node); extent_tree_ad_insert(chunks_ad, node);
arena_chunk_cache_maybe_insert(arena, node, cache); arena_chunk_cache_maybe_insert(arena, node, cache);
node = NULL; node = NULL;
} }
if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) { if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
malloc_mutex_unlock(tsdn, &arena->chunks_mtx); malloc_mutex_unlock(&arena->chunks_mtx);
chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad, chunks_ad, chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, cache,
cache, ret, size, *sn, zeroed, committed); ret, size, zeroed, committed);
return (NULL); return (NULL);
} }
malloc_mutex_unlock(tsdn, &arena->chunks_mtx); malloc_mutex_unlock(&arena->chunks_mtx);
assert(dalloc_node || node != NULL); assert(dalloc_node || node != NULL);
if (dalloc_node && node != NULL) if (dalloc_node && node != NULL)
arena_node_dalloc(tsdn, arena, node); arena_node_dalloc(arena, node);
if (*zero) { if (*zero) {
if (!zeroed) if (!zeroed)
memset(ret, 0, size); memset(ret, 0, size);
...@@ -318,11 +313,10 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -318,11 +313,10 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
size_t i; size_t i;
size_t *p = (size_t *)(uintptr_t)ret; size_t *p = (size_t *)(uintptr_t)ret;
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size);
for (i = 0; i < size / sizeof(size_t); i++) for (i = 0; i < size / sizeof(size_t); i++)
assert(p[i] == 0); assert(p[i] == 0);
} }
if (config_valgrind)
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size);
} }
return (ret); return (ret);
} }
...@@ -334,29 +328,39 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -334,29 +328,39 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
* them if they are returned. * them if they are returned.
*/ */
static void * static void *
chunk_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) bool *zero, bool *commit, dss_prec_t dss_prec)
{ {
void *ret; void *ret;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
assert(size != 0); assert(size != 0);
assert((size & chunksize_mask) == 0); assert((size & chunksize_mask) == 0);
assert(alignment != 0); assert(alignment != 0);
assert((alignment & chunksize_mask) == 0); assert((alignment & chunksize_mask) == 0);
/* Retained. */
if ((ret = chunk_recycle(arena, &chunk_hooks,
&arena->chunks_szad_retained, &arena->chunks_ad_retained, false,
new_addr, size, alignment, zero, commit, true)) != NULL)
return (ret);
/* "primary" dss. */ /* "primary" dss. */
if (have_dss && dss_prec == dss_prec_primary && (ret = if (have_dss && dss_prec == dss_prec_primary && (ret =
chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
commit)) != NULL)
return (ret);
/* mmap. */
if ((ret = chunk_alloc_mmap(new_addr, size, alignment, zero, commit)) !=
NULL) NULL)
return (ret); return (ret);
/*
* mmap. Requesting an address is not implemented for
* chunk_alloc_mmap(), so only call it if (new_addr == NULL).
*/
if (new_addr == NULL && (ret = chunk_alloc_mmap(size, alignment, zero,
commit)) != NULL)
return (ret);
/* "secondary" dss. */ /* "secondary" dss. */
if (have_dss && dss_prec == dss_prec_secondary && (ret = if (have_dss && dss_prec == dss_prec_secondary && (ret =
chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
commit)) != NULL) NULL)
return (ret); return (ret);
/* All strategies for allocation failed. */ /* All strategies for allocation failed. */
...@@ -376,7 +380,7 @@ chunk_alloc_base(size_t size) ...@@ -376,7 +380,7 @@ chunk_alloc_base(size_t size)
*/ */
zero = true; zero = true;
commit = true; commit = true;
ret = chunk_alloc_mmap(NULL, size, chunksize, &zero, &commit); ret = chunk_alloc_mmap(size, chunksize, &zero, &commit);
if (ret == NULL) if (ret == NULL)
return (NULL); return (NULL);
if (config_valgrind) if (config_valgrind)
...@@ -386,33 +390,37 @@ chunk_alloc_base(size_t size) ...@@ -386,33 +390,37 @@ chunk_alloc_base(size_t size)
} }
void * void *
chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero, size_t size, size_t alignment, bool *zero, bool dalloc_node)
bool *commit, bool dalloc_node)
{ {
void *ret; void *ret;
bool commit;
assert(size != 0); assert(size != 0);
assert((size & chunksize_mask) == 0); assert((size & chunksize_mask) == 0);
assert(alignment != 0); assert(alignment != 0);
assert((alignment & chunksize_mask) == 0); assert((alignment & chunksize_mask) == 0);
ret = chunk_recycle(tsdn, arena, chunk_hooks, commit = true;
&arena->chunks_szsnad_cached, &arena->chunks_ad_cached, true, ret = chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_cached,
new_addr, size, alignment, sn, zero, commit, dalloc_node); &arena->chunks_ad_cached, true, new_addr, size, alignment, zero,
&commit, dalloc_node);
if (ret == NULL) if (ret == NULL)
return (NULL); return (NULL);
assert(commit);
if (config_valgrind) if (config_valgrind)
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
return (ret); return (ret);
} }
static arena_t * static arena_t *
chunk_arena_get(tsdn_t *tsdn, unsigned arena_ind) chunk_arena_get(unsigned arena_ind)
{ {
arena_t *arena; arena_t *arena;
arena = arena_get(tsdn, arena_ind, false); /* Dodge tsd for a0 in order to avoid bootstrapping issues. */
arena = (arena_ind == 0) ? a0get() : arena_get(tsd_fetch(), arena_ind,
false, true);
/* /*
* The arena we're allocating on behalf of must have been initialized * The arena we're allocating on behalf of must have been initialized
* already. * already.
...@@ -422,12 +430,14 @@ chunk_arena_get(tsdn_t *tsdn, unsigned arena_ind) ...@@ -422,12 +430,14 @@ chunk_arena_get(tsdn_t *tsdn, unsigned arena_ind)
} }
static void * static void *
chunk_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr, chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
size_t size, size_t alignment, bool *zero, bool *commit) bool *commit, unsigned arena_ind)
{ {
void *ret; void *ret;
arena_t *arena;
ret = chunk_alloc_core(tsdn, arena, new_addr, size, alignment, zero, arena = chunk_arena_get(arena_ind);
ret = chunk_alloc_core(arena, new_addr, size, alignment, zero,
commit, arena->dss_prec); commit, arena->dss_prec);
if (ret == NULL) if (ret == NULL)
return (NULL); return (NULL);
...@@ -437,80 +447,26 @@ chunk_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr, ...@@ -437,80 +447,26 @@ chunk_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
return (ret); return (ret);
} }
static void *
chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
bool *commit, unsigned arena_ind)
{
tsdn_t *tsdn;
arena_t *arena;
tsdn = tsdn_fetch();
arena = chunk_arena_get(tsdn, arena_ind);
return (chunk_alloc_default_impl(tsdn, arena, new_addr, size, alignment,
zero, commit));
}
static void *
chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
bool *commit)
{
void *ret;
assert(size != 0);
assert((size & chunksize_mask) == 0);
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
ret = chunk_recycle(tsdn, arena, chunk_hooks,
&arena->chunks_szsnad_retained, &arena->chunks_ad_retained, false,
new_addr, size, alignment, sn, zero, commit, true);
if (config_stats && ret != NULL)
arena->stats.retained -= size;
return (ret);
}
void * void *
chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero, size_t size, size_t alignment, bool *zero, bool *commit)
bool *commit)
{ {
void *ret; void *ret;
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks); chunk_hooks_assure_initialized(arena, chunk_hooks);
ret = chunk_hooks->alloc(new_addr, size, alignment, zero, commit,
ret = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size, arena->ind);
alignment, sn, zero, commit); if (ret == NULL)
if (ret == NULL) { return (NULL);
if (chunk_hooks->alloc == chunk_alloc_default) { if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default)
/* Call directly to propagate tsdn. */ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
ret = chunk_alloc_default_impl(tsdn, arena, new_addr,
size, alignment, zero, commit);
} else {
ret = chunk_hooks->alloc(new_addr, size, alignment,
zero, commit, arena->ind);
}
if (ret == NULL)
return (NULL);
*sn = arena_extent_sn_next(arena);
if (config_valgrind && chunk_hooks->alloc !=
chunk_alloc_default)
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
}
return (ret); return (ret);
} }
static void static void
chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache, extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
void *chunk, size_t size, size_t sn, bool zeroed, bool committed) void *chunk, size_t size, bool zeroed, bool committed)
{ {
bool unzeroed; bool unzeroed;
extent_node_t *node, *prev; extent_node_t *node, *prev;
...@@ -520,9 +476,9 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -520,9 +476,9 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
unzeroed = cache || !zeroed; unzeroed = cache || !zeroed;
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size); JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
malloc_mutex_lock(tsdn, &arena->chunks_mtx); malloc_mutex_lock(&arena->chunks_mtx);
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks); chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0, 0, extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0,
false, false); false, false);
node = extent_tree_ad_nsearch(chunks_ad, &key); node = extent_tree_ad_nsearch(chunks_ad, &key);
/* Try to coalesce forward. */ /* Try to coalesce forward. */
...@@ -534,21 +490,19 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -534,21 +490,19 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
/* /*
* Coalesce chunk with the following address range. This does * Coalesce chunk with the following address range. This does
* not change the position within chunks_ad, so only * not change the position within chunks_ad, so only
* remove/insert from/into chunks_szsnad. * remove/insert from/into chunks_szad.
*/ */
extent_tree_szsnad_remove(chunks_szsnad, node); extent_tree_szad_remove(chunks_szad, node);
arena_chunk_cache_maybe_remove(arena, node, cache); arena_chunk_cache_maybe_remove(arena, node, cache);
extent_node_addr_set(node, chunk); extent_node_addr_set(node, chunk);
extent_node_size_set(node, size + extent_node_size_get(node)); extent_node_size_set(node, size + extent_node_size_get(node));
if (sn < extent_node_sn_get(node))
extent_node_sn_set(node, sn);
extent_node_zeroed_set(node, extent_node_zeroed_get(node) && extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
!unzeroed); !unzeroed);
extent_tree_szsnad_insert(chunks_szsnad, node); extent_tree_szad_insert(chunks_szad, node);
arena_chunk_cache_maybe_insert(arena, node, cache); arena_chunk_cache_maybe_insert(arena, node, cache);
} else { } else {
/* Coalescing forward failed, so insert a new node. */ /* Coalescing forward failed, so insert a new node. */
node = arena_node_alloc(tsdn, arena); node = arena_node_alloc(arena);
if (node == NULL) { if (node == NULL) {
/* /*
* Node allocation failed, which is an exceedingly * Node allocation failed, which is an exceedingly
...@@ -557,15 +511,15 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -557,15 +511,15 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
* a virtual memory leak. * a virtual memory leak.
*/ */
if (cache) { if (cache) {
chunk_purge_wrapper(tsdn, arena, chunk_hooks, chunk_purge_wrapper(arena, chunk_hooks, chunk,
chunk, size, 0, size); size, 0, size);
} }
goto label_return; goto label_return;
} }
extent_node_init(node, arena, chunk, size, sn, !unzeroed, extent_node_init(node, arena, chunk, size, !unzeroed,
committed); committed);
extent_tree_ad_insert(chunks_ad, node); extent_tree_ad_insert(chunks_ad, node);
extent_tree_szsnad_insert(chunks_szsnad, node); extent_tree_szad_insert(chunks_szad, node);
arena_chunk_cache_maybe_insert(arena, node, cache); arena_chunk_cache_maybe_insert(arena, node, cache);
} }
...@@ -579,33 +533,31 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -579,33 +533,31 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
/* /*
* Coalesce chunk with the previous address range. This does * Coalesce chunk with the previous address range. This does
* not change the position within chunks_ad, so only * not change the position within chunks_ad, so only
* remove/insert node from/into chunks_szsnad. * remove/insert node from/into chunks_szad.
*/ */
extent_tree_szsnad_remove(chunks_szsnad, prev); extent_tree_szad_remove(chunks_szad, prev);
extent_tree_ad_remove(chunks_ad, prev); extent_tree_ad_remove(chunks_ad, prev);
arena_chunk_cache_maybe_remove(arena, prev, cache); arena_chunk_cache_maybe_remove(arena, prev, cache);
extent_tree_szsnad_remove(chunks_szsnad, node); extent_tree_szad_remove(chunks_szad, node);
arena_chunk_cache_maybe_remove(arena, node, cache); arena_chunk_cache_maybe_remove(arena, node, cache);
extent_node_addr_set(node, extent_node_addr_get(prev)); extent_node_addr_set(node, extent_node_addr_get(prev));
extent_node_size_set(node, extent_node_size_get(prev) + extent_node_size_set(node, extent_node_size_get(prev) +
extent_node_size_get(node)); extent_node_size_get(node));
if (extent_node_sn_get(prev) < extent_node_sn_get(node))
extent_node_sn_set(node, extent_node_sn_get(prev));
extent_node_zeroed_set(node, extent_node_zeroed_get(prev) && extent_node_zeroed_set(node, extent_node_zeroed_get(prev) &&
extent_node_zeroed_get(node)); extent_node_zeroed_get(node));
extent_tree_szsnad_insert(chunks_szsnad, node); extent_tree_szad_insert(chunks_szad, node);
arena_chunk_cache_maybe_insert(arena, node, cache); arena_chunk_cache_maybe_insert(arena, node, cache);
arena_node_dalloc(tsdn, arena, prev); arena_node_dalloc(arena, prev);
} }
label_return: label_return:
malloc_mutex_unlock(tsdn, &arena->chunks_mtx); malloc_mutex_unlock(&arena->chunks_mtx);
} }
void void
chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
void *chunk, size_t size, size_t sn, bool committed) size_t size, bool committed)
{ {
assert(chunk != NULL); assert(chunk != NULL);
...@@ -613,49 +565,24 @@ chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -613,49 +565,24 @@ chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
assert(size != 0); assert(size != 0);
assert((size & chunksize_mask) == 0); assert((size & chunksize_mask) == 0);
chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_cached, chunk_record(arena, chunk_hooks, &arena->chunks_szad_cached,
&arena->chunks_ad_cached, true, chunk, size, sn, false, &arena->chunks_ad_cached, true, chunk, size, false, committed);
committed); arena_maybe_purge(arena);
arena_maybe_purge(tsdn, arena);
}
static bool
chunk_dalloc_default_impl(void *chunk, size_t size)
{
if (!have_dss || !chunk_in_dss(chunk))
return (chunk_dalloc_mmap(chunk, size));
return (true);
}
static bool
chunk_dalloc_default(void *chunk, size_t size, bool committed,
unsigned arena_ind)
{
return (chunk_dalloc_default_impl(chunk, size));
} }
void void
chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
void *chunk, size_t size, size_t sn, bool zeroed, bool committed) size_t size, bool zeroed, bool committed)
{ {
bool err;
assert(chunk != NULL); assert(chunk != NULL);
assert(CHUNK_ADDR2BASE(chunk) == chunk); assert(CHUNK_ADDR2BASE(chunk) == chunk);
assert(size != 0); assert(size != 0);
assert((size & chunksize_mask) == 0); assert((size & chunksize_mask) == 0);
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks); chunk_hooks_assure_initialized(arena, chunk_hooks);
/* Try to deallocate. */ /* Try to deallocate. */
if (chunk_hooks->dalloc == chunk_dalloc_default) { if (!chunk_hooks->dalloc(chunk, size, committed, arena->ind))
/* Call directly to propagate tsdn. */
err = chunk_dalloc_default_impl(chunk, size);
} else
err = chunk_hooks->dalloc(chunk, size, committed, arena->ind);
if (!err)
return; return;
/* Try to decommit; purge if that fails. */ /* Try to decommit; purge if that fails. */
if (committed) { if (committed) {
...@@ -664,12 +591,29 @@ chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, ...@@ -664,12 +591,29 @@ chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
} }
zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size, zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size,
arena->ind); arena->ind);
chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_retained, chunk_record(arena, chunk_hooks, &arena->chunks_szad_retained,
&arena->chunks_ad_retained, false, chunk, size, sn, zeroed, &arena->chunks_ad_retained, false, chunk, size, zeroed, committed);
committed); }
static bool
chunk_dalloc_default(void *chunk, size_t size, bool committed,
unsigned arena_ind)
{
if (config_stats) if (!have_dss || !chunk_in_dss(chunk))
arena->stats.retained += size; return (chunk_dalloc_mmap(chunk, size));
return (true);
}
void
chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
size_t size, bool committed)
{
chunk_hooks_assure_initialized(arena, chunk_hooks);
chunk_hooks->dalloc(chunk, size, committed, arena->ind);
if (config_valgrind && chunk_hooks->dalloc != chunk_dalloc_default)
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
} }
static bool static bool
...@@ -690,9 +634,8 @@ chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length, ...@@ -690,9 +634,8 @@ chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length,
length)); length));
} }
static bool bool
chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length, chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length)
unsigned arena_ind)
{ {
assert(chunk != NULL); assert(chunk != NULL);
...@@ -705,12 +648,21 @@ chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length, ...@@ -705,12 +648,21 @@ chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
length)); length));
} }
static bool
chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
unsigned arena_ind)
{
return (chunk_purge_arena(chunk_arena_get(arena_ind), chunk, offset,
length));
}
bool bool
chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
void *chunk, size_t size, size_t offset, size_t length) size_t size, size_t offset, size_t length)
{ {
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks); chunk_hooks_assure_initialized(arena, chunk_hooks);
return (chunk_hooks->purge(chunk, size, offset, length, arena->ind)); return (chunk_hooks->purge(chunk, size, offset, length, arena->ind));
} }
...@@ -725,30 +677,23 @@ chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b, ...@@ -725,30 +677,23 @@ chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b,
} }
static bool static bool
chunk_merge_default_impl(void *chunk_a, void *chunk_b) chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
bool committed, unsigned arena_ind)
{ {
if (!maps_coalesce) if (!maps_coalesce)
return (true); return (true);
if (have_dss && !chunk_dss_mergeable(chunk_a, chunk_b)) if (have_dss && chunk_in_dss(chunk_a) != chunk_in_dss(chunk_b))
return (true); return (true);
return (false); return (false);
} }
static bool
chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
bool committed, unsigned arena_ind)
{
return (chunk_merge_default_impl(chunk_a, chunk_b));
}
static rtree_node_elm_t * static rtree_node_elm_t *
chunks_rtree_node_alloc(size_t nelms) chunks_rtree_node_alloc(size_t nelms)
{ {
return ((rtree_node_elm_t *)base_alloc(TSDN_NULL, nelms * return ((rtree_node_elm_t *)base_alloc(nelms *
sizeof(rtree_node_elm_t))); sizeof(rtree_node_elm_t)));
} }
...@@ -771,7 +716,7 @@ chunk_boot(void) ...@@ -771,7 +716,7 @@ chunk_boot(void)
* so pages_map will always take fast path. * so pages_map will always take fast path.
*/ */
if (!opt_lg_chunk) { if (!opt_lg_chunk) {
opt_lg_chunk = ffs_u((unsigned)info.dwAllocationGranularity) opt_lg_chunk = jemalloc_ffs((int)info.dwAllocationGranularity)
- 1; - 1;
} }
#else #else
...@@ -785,11 +730,32 @@ chunk_boot(void) ...@@ -785,11 +730,32 @@ chunk_boot(void)
chunksize_mask = chunksize - 1; chunksize_mask = chunksize - 1;
chunk_npages = (chunksize >> LG_PAGE); chunk_npages = (chunksize >> LG_PAGE);
if (have_dss) if (have_dss && chunk_dss_boot())
chunk_dss_boot(); return (true);
if (rtree_new(&chunks_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) - if (rtree_new(&chunks_rtree, (ZU(1) << (LG_SIZEOF_PTR+3)) -
opt_lg_chunk), chunks_rtree_node_alloc, NULL)) opt_lg_chunk, chunks_rtree_node_alloc, NULL))
return (true); return (true);
return (false); return (false);
} }
void
chunk_prefork(void)
{
chunk_dss_prefork();
}
void
chunk_postfork_parent(void)
{
chunk_dss_postfork_parent();
}
void
chunk_postfork_child(void)
{
chunk_dss_postfork_child();
}
...@@ -10,19 +10,20 @@ const char *dss_prec_names[] = { ...@@ -10,19 +10,20 @@ const char *dss_prec_names[] = {
"N/A" "N/A"
}; };
/* Current dss precedence default, used when creating new arenas. */
static dss_prec_t dss_prec_default = DSS_PREC_DEFAULT;
/* /*
* Current dss precedence default, used when creating new arenas. NB: This is * Protects sbrk() calls. This avoids malloc races among threads, though it
* stored as unsigned rather than dss_prec_t because in principle there's no * does not protect against races with threads that call sbrk() directly.
* guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use
* atomic operations to synchronize the setting.
*/ */
static unsigned dss_prec_default = (unsigned)DSS_PREC_DEFAULT; static malloc_mutex_t dss_mtx;
/* Base address of the DSS. */ /* Base address of the DSS. */
static void *dss_base; static void *dss_base;
/* Atomic boolean indicating whether the DSS is exhausted. */ /* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
static unsigned dss_exhausted; static void *dss_prev;
/* Atomic current upper limit on DSS addresses. */ /* Current upper limit on DSS addresses. */
static void *dss_max; static void *dss_max;
/******************************************************************************/ /******************************************************************************/
...@@ -46,7 +47,9 @@ chunk_dss_prec_get(void) ...@@ -46,7 +47,9 @@ chunk_dss_prec_get(void)
if (!have_dss) if (!have_dss)
return (dss_prec_disabled); return (dss_prec_disabled);
ret = (dss_prec_t)atomic_read_u(&dss_prec_default); malloc_mutex_lock(&dss_mtx);
ret = dss_prec_default;
malloc_mutex_unlock(&dss_mtx);
return (ret); return (ret);
} }
...@@ -56,46 +59,15 @@ chunk_dss_prec_set(dss_prec_t dss_prec) ...@@ -56,46 +59,15 @@ chunk_dss_prec_set(dss_prec_t dss_prec)
if (!have_dss) if (!have_dss)
return (dss_prec != dss_prec_disabled); return (dss_prec != dss_prec_disabled);
atomic_write_u(&dss_prec_default, (unsigned)dss_prec); malloc_mutex_lock(&dss_mtx);
dss_prec_default = dss_prec;
malloc_mutex_unlock(&dss_mtx);
return (false); return (false);
} }
static void *
chunk_dss_max_update(void *new_addr)
{
void *max_cur;
spin_t spinner;
/*
* Get the current end of the DSS as max_cur and assure that dss_max is
* up to date.
*/
spin_init(&spinner);
while (true) {
void *max_prev = atomic_read_p(&dss_max);
max_cur = chunk_dss_sbrk(0);
if ((uintptr_t)max_prev > (uintptr_t)max_cur) {
/*
* Another thread optimistically updated dss_max. Wait
* for it to finish.
*/
spin_adaptive(&spinner);
continue;
}
if (!atomic_cas_p(&dss_max, max_prev, max_cur))
break;
}
/* Fixed new_addr can only be supported if it is at the edge of DSS. */
if (new_addr != NULL && max_cur != new_addr)
return (NULL);
return (max_cur);
}
void * void *
chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
size_t alignment, bool *zero, bool *commit) bool *zero, bool *commit)
{ {
cassert(have_dss); cassert(have_dss);
assert(size > 0 && (size & chunksize_mask) == 0); assert(size > 0 && (size & chunksize_mask) == 0);
...@@ -108,20 +80,28 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, ...@@ -108,20 +80,28 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
if ((intptr_t)size < 0) if ((intptr_t)size < 0)
return (NULL); return (NULL);
if (!atomic_read_u(&dss_exhausted)) { malloc_mutex_lock(&dss_mtx);
if (dss_prev != (void *)-1) {
/* /*
* The loop is necessary to recover from races with other * The loop is necessary to recover from races with other
* threads that are using the DSS for something other than * threads that are using the DSS for something other than
* malloc. * malloc.
*/ */
while (true) { do {
void *ret, *cpad, *max_cur, *dss_next, *dss_prev; void *ret, *cpad, *dss_next;
size_t gap_size, cpad_size; size_t gap_size, cpad_size;
intptr_t incr; intptr_t incr;
/* Avoid an unnecessary system call. */
if (new_addr != NULL && dss_max != new_addr)
break;
/* Get the current end of the DSS. */
dss_max = chunk_dss_sbrk(0);
max_cur = chunk_dss_max_update(new_addr); /* Make sure the earlier condition still holds. */
if (max_cur == NULL) if (new_addr != NULL && dss_max != new_addr)
goto label_oom; break;
/* /*
* Calculate how much padding is necessary to * Calculate how much padding is necessary to
...@@ -140,29 +120,22 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, ...@@ -140,29 +120,22 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
cpad_size = (uintptr_t)ret - (uintptr_t)cpad; cpad_size = (uintptr_t)ret - (uintptr_t)cpad;
dss_next = (void *)((uintptr_t)ret + size); dss_next = (void *)((uintptr_t)ret + size);
if ((uintptr_t)ret < (uintptr_t)dss_max || if ((uintptr_t)ret < (uintptr_t)dss_max ||
(uintptr_t)dss_next < (uintptr_t)dss_max) (uintptr_t)dss_next < (uintptr_t)dss_max) {
goto label_oom; /* Wrap-around. */ /* Wrap-around. */
malloc_mutex_unlock(&dss_mtx);
return (NULL);
}
incr = gap_size + cpad_size + size; incr = gap_size + cpad_size + size;
/*
* Optimistically update dss_max, and roll back below if
* sbrk() fails. No other thread will try to extend the
* DSS while dss_max is greater than the current DSS
* max reported by sbrk(0).
*/
if (atomic_cas_p(&dss_max, max_cur, dss_next))
continue;
/* Try to allocate. */
dss_prev = chunk_dss_sbrk(incr); dss_prev = chunk_dss_sbrk(incr);
if (dss_prev == max_cur) { if (dss_prev == dss_max) {
/* Success. */ /* Success. */
dss_max = dss_next;
malloc_mutex_unlock(&dss_mtx);
if (cpad_size != 0) { if (cpad_size != 0) {
chunk_hooks_t chunk_hooks = chunk_hooks_t chunk_hooks =
CHUNK_HOOKS_INITIALIZER; CHUNK_HOOKS_INITIALIZER;
chunk_dalloc_wrapper(tsdn, arena, chunk_dalloc_wrapper(arena,
&chunk_hooks, cpad, cpad_size, &chunk_hooks, cpad, cpad_size,
arena_extent_sn_next(arena), false,
true); true);
} }
if (*zero) { if (*zero) {
...@@ -174,65 +147,68 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, ...@@ -174,65 +147,68 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
*commit = pages_decommit(ret, size); *commit = pages_decommit(ret, size);
return (ret); return (ret);
} }
} while (dss_prev != (void *)-1);
/*
* Failure, whether due to OOM or a race with a raw
* sbrk() call from outside the allocator. Try to roll
* back optimistic dss_max update; if rollback fails,
* it's due to another caller of this function having
* succeeded since this invocation started, in which
* case rollback is not necessary.
*/
atomic_cas_p(&dss_max, dss_next, max_cur);
if (dss_prev == (void *)-1) {
/* OOM. */
atomic_write_u(&dss_exhausted, (unsigned)true);
goto label_oom;
}
}
} }
label_oom: malloc_mutex_unlock(&dss_mtx);
return (NULL);
}
static bool
chunk_in_dss_helper(void *chunk, void *max)
{
return ((uintptr_t)chunk >= (uintptr_t)dss_base && (uintptr_t)chunk < return (NULL);
(uintptr_t)max);
} }
bool bool
chunk_in_dss(void *chunk) chunk_in_dss(void *chunk)
{ {
bool ret;
cassert(have_dss); cassert(have_dss);
return (chunk_in_dss_helper(chunk, atomic_read_p(&dss_max))); malloc_mutex_lock(&dss_mtx);
if ((uintptr_t)chunk >= (uintptr_t)dss_base
&& (uintptr_t)chunk < (uintptr_t)dss_max)
ret = true;
else
ret = false;
malloc_mutex_unlock(&dss_mtx);
return (ret);
} }
bool bool
chunk_dss_mergeable(void *chunk_a, void *chunk_b) chunk_dss_boot(void)
{ {
void *max;
cassert(have_dss); cassert(have_dss);
max = atomic_read_p(&dss_max); if (malloc_mutex_init(&dss_mtx))
return (chunk_in_dss_helper(chunk_a, max) == return (true);
chunk_in_dss_helper(chunk_b, max)); dss_base = chunk_dss_sbrk(0);
dss_prev = dss_base;
dss_max = dss_base;
return (false);
} }
void void
chunk_dss_boot(void) chunk_dss_prefork(void)
{ {
cassert(have_dss); if (have_dss)
malloc_mutex_prefork(&dss_mtx);
}
dss_base = chunk_dss_sbrk(0); void
dss_exhausted = (unsigned)(dss_base == (void *)-1); chunk_dss_postfork_parent(void)
dss_max = dss_base; {
if (have_dss)
malloc_mutex_postfork_parent(&dss_mtx);
}
void
chunk_dss_postfork_child(void)
{
if (have_dss)
malloc_mutex_postfork_child(&dss_mtx);
} }
/******************************************************************************/ /******************************************************************************/
...@@ -16,22 +16,23 @@ chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit) ...@@ -16,22 +16,23 @@ chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
do { do {
void *pages; void *pages;
size_t leadsize; size_t leadsize;
pages = pages_map(NULL, alloc_size, commit); pages = pages_map(NULL, alloc_size);
if (pages == NULL) if (pages == NULL)
return (NULL); return (NULL);
leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) - leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
(uintptr_t)pages; (uintptr_t)pages;
ret = pages_trim(pages, alloc_size, leadsize, size, commit); ret = pages_trim(pages, alloc_size, leadsize, size);
} while (ret == NULL); } while (ret == NULL);
assert(ret != NULL); assert(ret != NULL);
*zero = true; *zero = true;
if (!*commit)
*commit = pages_decommit(ret, size);
return (ret); return (ret);
} }
void * void *
chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero, chunk_alloc_mmap(size_t size, size_t alignment, bool *zero, bool *commit)
bool *commit)
{ {
void *ret; void *ret;
size_t offset; size_t offset;
...@@ -52,10 +53,9 @@ chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero, ...@@ -52,10 +53,9 @@ chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
assert(alignment != 0); assert(alignment != 0);
assert((alignment & chunksize_mask) == 0); assert((alignment & chunksize_mask) == 0);
ret = pages_map(new_addr, size, commit); ret = pages_map(NULL, size);
if (ret == NULL || ret == new_addr) if (ret == NULL)
return (ret); return (NULL);
assert(new_addr == NULL);
offset = ALIGNMENT_ADDR2OFFSET(ret, alignment); offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
if (offset != 0) { if (offset != 0) {
pages_unmap(ret, size); pages_unmap(ret, size);
...@@ -64,6 +64,8 @@ chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero, ...@@ -64,6 +64,8 @@ chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
assert(ret != NULL); assert(ret != NULL);
*zero = true; *zero = true;
if (!*commit)
*commit = pages_decommit(ret, size);
return (ret); return (ret);
} }
......
...@@ -99,8 +99,7 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, ...@@ -99,8 +99,7 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
* Cycle through the cells in the bucket, starting at a random position. * Cycle through the cells in the bucket, starting at a random position.
* The randomness avoids worst-case search overhead as buckets fill up. * The randomness avoids worst-case search overhead as buckets fill up.
*/ */
offset = (unsigned)prng_lg_range_u64(&ckh->prng_state, prng32(offset, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C);
LG_CKH_BUCKET_CELLS);
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) +
((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))]; ((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))];
...@@ -142,8 +141,7 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, ...@@ -142,8 +141,7 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
* were an item for which both hashes indicated the same * were an item for which both hashes indicated the same
* bucket. * bucket.
*/ */
i = (unsigned)prng_lg_range_u64(&ckh->prng_state, prng32(i, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C);
LG_CKH_BUCKET_CELLS);
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
assert(cell->key != NULL); assert(cell->key != NULL);
...@@ -249,7 +247,8 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh) ...@@ -249,7 +247,8 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
{ {
bool ret; bool ret;
ckhc_t *tab, *ttab; ckhc_t *tab, *ttab;
unsigned lg_prevbuckets, lg_curcells; size_t lg_curcells;
unsigned lg_prevbuckets;
#ifdef CKH_COUNT #ifdef CKH_COUNT
ckh->ngrows++; ckh->ngrows++;
...@@ -267,12 +266,12 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh) ...@@ -267,12 +266,12 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
lg_curcells++; lg_curcells++;
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) { if (usize == 0) {
ret = true; ret = true;
goto label_return; goto label_return;
} }
tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL,
true, NULL, true, arena_ichoose(tsd, NULL)); true, NULL);
if (tab == NULL) { if (tab == NULL) {
ret = true; ret = true;
goto label_return; goto label_return;
...@@ -284,12 +283,12 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh) ...@@ -284,12 +283,12 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
if (!ckh_rebuild(ckh, tab)) { if (!ckh_rebuild(ckh, tab)) {
idalloctm(tsd_tsdn(tsd), tab, NULL, true, true); idalloctm(tsd, tab, tcache_get(tsd, false), true);
break; break;
} }
/* Rebuilding failed, so back out partially rebuilt table. */ /* Rebuilding failed, so back out partially rebuilt table. */
idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true); idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true);
ckh->tab = tab; ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets; ckh->lg_curbuckets = lg_prevbuckets;
} }
...@@ -303,8 +302,8 @@ static void ...@@ -303,8 +302,8 @@ static void
ckh_shrink(tsd_t *tsd, ckh_t *ckh) ckh_shrink(tsd_t *tsd, ckh_t *ckh)
{ {
ckhc_t *tab, *ttab; ckhc_t *tab, *ttab;
size_t usize; size_t lg_curcells, usize;
unsigned lg_prevbuckets, lg_curcells; unsigned lg_prevbuckets;
/* /*
* It is possible (though unlikely, given well behaved hashes) that the * It is possible (though unlikely, given well behaved hashes) that the
...@@ -313,10 +312,10 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) ...@@ -313,10 +312,10 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
lg_prevbuckets = ckh->lg_curbuckets; lg_prevbuckets = ckh->lg_curbuckets;
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1; lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) if (usize == 0)
return; return;
tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL, tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true,
true, arena_ichoose(tsd, NULL)); NULL);
if (tab == NULL) { if (tab == NULL) {
/* /*
* An OOM error isn't worth propagating, since it doesn't * An OOM error isn't worth propagating, since it doesn't
...@@ -331,7 +330,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) ...@@ -331,7 +330,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
if (!ckh_rebuild(ckh, tab)) { if (!ckh_rebuild(ckh, tab)) {
idalloctm(tsd_tsdn(tsd), tab, NULL, true, true); idalloctm(tsd, tab, tcache_get(tsd, false), true);
#ifdef CKH_COUNT #ifdef CKH_COUNT
ckh->nshrinks++; ckh->nshrinks++;
#endif #endif
...@@ -339,7 +338,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) ...@@ -339,7 +338,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
} }
/* Rebuilding failed, so back out partially rebuilt table. */ /* Rebuilding failed, so back out partially rebuilt table. */
idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true); idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true);
ckh->tab = tab; ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets; ckh->lg_curbuckets = lg_prevbuckets;
#ifdef CKH_COUNT #ifdef CKH_COUNT
...@@ -388,12 +387,12 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ...@@ -388,12 +387,12 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ckh->keycomp = keycomp; ckh->keycomp = keycomp;
usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE); usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) { if (usize == 0) {
ret = true; ret = true;
goto label_return; goto label_return;
} }
ckh->tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, ckh->tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true,
NULL, true, arena_ichoose(tsd, NULL)); NULL);
if (ckh->tab == NULL) { if (ckh->tab == NULL) {
ret = true; ret = true;
goto label_return; goto label_return;
...@@ -422,9 +421,9 @@ ckh_delete(tsd_t *tsd, ckh_t *ckh) ...@@ -422,9 +421,9 @@ ckh_delete(tsd_t *tsd, ckh_t *ckh)
(unsigned long long)ckh->nrelocs); (unsigned long long)ckh->nrelocs);
#endif #endif
idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true); idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true);
if (config_debug) if (config_debug)
memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t)); memset(ckh, 0x5a, sizeof(ckh_t));
} }
size_t size_t
......
...@@ -24,7 +24,7 @@ ctl_named_node(const ctl_node_t *node) ...@@ -24,7 +24,7 @@ ctl_named_node(const ctl_node_t *node)
} }
JEMALLOC_INLINE_C const ctl_named_node_t * JEMALLOC_INLINE_C const ctl_named_node_t *
ctl_named_children(const ctl_named_node_t *node, size_t index) ctl_named_children(const ctl_named_node_t *node, int index)
{ {
const ctl_named_node_t *children = ctl_named_node(node->children); const ctl_named_node_t *children = ctl_named_node(node->children);
...@@ -42,25 +42,25 @@ ctl_indexed_node(const ctl_node_t *node) ...@@ -42,25 +42,25 @@ ctl_indexed_node(const ctl_node_t *node)
/* Function prototypes for non-inline static functions. */ /* Function prototypes for non-inline static functions. */
#define CTL_PROTO(n) \ #define CTL_PROTO(n) \
static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ static int n##_ctl(const size_t *mib, size_t miblen, void *oldp, \
void *oldp, size_t *oldlenp, void *newp, size_t newlen); size_t *oldlenp, void *newp, size_t newlen);
#define INDEX_PROTO(n) \ #define INDEX_PROTO(n) \
static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \ static const ctl_named_node_t *n##_index(const size_t *mib, \
const size_t *mib, size_t miblen, size_t i); size_t miblen, size_t i);
static bool ctl_arena_init(ctl_arena_stats_t *astats); static bool ctl_arena_init(ctl_arena_stats_t *astats);
static void ctl_arena_clear(ctl_arena_stats_t *astats); static void ctl_arena_clear(ctl_arena_stats_t *astats);
static void ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats, static void ctl_arena_stats_amerge(ctl_arena_stats_t *cstats,
arena_t *arena); arena_t *arena);
static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats,
ctl_arena_stats_t *astats); ctl_arena_stats_t *astats);
static void ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, unsigned i); static void ctl_arena_refresh(arena_t *arena, unsigned i);
static bool ctl_grow(tsdn_t *tsdn); static bool ctl_grow(void);
static void ctl_refresh(tsdn_t *tsdn); static void ctl_refresh(void);
static bool ctl_init(tsdn_t *tsdn); static bool ctl_init(void);
static int ctl_lookup(tsdn_t *tsdn, const char *name, static int ctl_lookup(const char *name, ctl_node_t const **nodesp,
ctl_node_t const **nodesp, size_t *mibp, size_t *depthp); size_t *mibp, size_t *depthp);
CTL_PROTO(version) CTL_PROTO(version)
CTL_PROTO(epoch) CTL_PROTO(epoch)
...@@ -77,7 +77,6 @@ CTL_PROTO(config_cache_oblivious) ...@@ -77,7 +77,6 @@ CTL_PROTO(config_cache_oblivious)
CTL_PROTO(config_debug) CTL_PROTO(config_debug)
CTL_PROTO(config_fill) CTL_PROTO(config_fill)
CTL_PROTO(config_lazy_lock) CTL_PROTO(config_lazy_lock)
CTL_PROTO(config_malloc_conf)
CTL_PROTO(config_munmap) CTL_PROTO(config_munmap)
CTL_PROTO(config_prof) CTL_PROTO(config_prof)
CTL_PROTO(config_prof_libgcc) CTL_PROTO(config_prof_libgcc)
...@@ -92,9 +91,7 @@ CTL_PROTO(opt_abort) ...@@ -92,9 +91,7 @@ CTL_PROTO(opt_abort)
CTL_PROTO(opt_dss) CTL_PROTO(opt_dss)
CTL_PROTO(opt_lg_chunk) CTL_PROTO(opt_lg_chunk)
CTL_PROTO(opt_narenas) CTL_PROTO(opt_narenas)
CTL_PROTO(opt_purge)
CTL_PROTO(opt_lg_dirty_mult) CTL_PROTO(opt_lg_dirty_mult)
CTL_PROTO(opt_decay_time)
CTL_PROTO(opt_stats_print) CTL_PROTO(opt_stats_print)
CTL_PROTO(opt_junk) CTL_PROTO(opt_junk)
CTL_PROTO(opt_zero) CTL_PROTO(opt_zero)
...@@ -117,13 +114,10 @@ CTL_PROTO(opt_prof_accum) ...@@ -117,13 +114,10 @@ CTL_PROTO(opt_prof_accum)
CTL_PROTO(tcache_create) CTL_PROTO(tcache_create)
CTL_PROTO(tcache_flush) CTL_PROTO(tcache_flush)
CTL_PROTO(tcache_destroy) CTL_PROTO(tcache_destroy)
static void arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all);
CTL_PROTO(arena_i_purge) CTL_PROTO(arena_i_purge)
CTL_PROTO(arena_i_decay) static void arena_purge(unsigned arena_ind);
CTL_PROTO(arena_i_reset)
CTL_PROTO(arena_i_dss) CTL_PROTO(arena_i_dss)
CTL_PROTO(arena_i_lg_dirty_mult) CTL_PROTO(arena_i_lg_dirty_mult)
CTL_PROTO(arena_i_decay_time)
CTL_PROTO(arena_i_chunk_hooks) CTL_PROTO(arena_i_chunk_hooks)
INDEX_PROTO(arena_i) INDEX_PROTO(arena_i)
CTL_PROTO(arenas_bin_i_size) CTL_PROTO(arenas_bin_i_size)
...@@ -137,7 +131,6 @@ INDEX_PROTO(arenas_hchunk_i) ...@@ -137,7 +131,6 @@ INDEX_PROTO(arenas_hchunk_i)
CTL_PROTO(arenas_narenas) CTL_PROTO(arenas_narenas)
CTL_PROTO(arenas_initialized) CTL_PROTO(arenas_initialized)
CTL_PROTO(arenas_lg_dirty_mult) CTL_PROTO(arenas_lg_dirty_mult)
CTL_PROTO(arenas_decay_time)
CTL_PROTO(arenas_quantum) CTL_PROTO(arenas_quantum)
CTL_PROTO(arenas_page) CTL_PROTO(arenas_page)
CTL_PROTO(arenas_tcache_max) CTL_PROTO(arenas_tcache_max)
...@@ -188,11 +181,9 @@ INDEX_PROTO(stats_arenas_i_hchunks_j) ...@@ -188,11 +181,9 @@ INDEX_PROTO(stats_arenas_i_hchunks_j)
CTL_PROTO(stats_arenas_i_nthreads) CTL_PROTO(stats_arenas_i_nthreads)
CTL_PROTO(stats_arenas_i_dss) CTL_PROTO(stats_arenas_i_dss)
CTL_PROTO(stats_arenas_i_lg_dirty_mult) CTL_PROTO(stats_arenas_i_lg_dirty_mult)
CTL_PROTO(stats_arenas_i_decay_time)
CTL_PROTO(stats_arenas_i_pactive) CTL_PROTO(stats_arenas_i_pactive)
CTL_PROTO(stats_arenas_i_pdirty) CTL_PROTO(stats_arenas_i_pdirty)
CTL_PROTO(stats_arenas_i_mapped) CTL_PROTO(stats_arenas_i_mapped)
CTL_PROTO(stats_arenas_i_retained)
CTL_PROTO(stats_arenas_i_npurge) CTL_PROTO(stats_arenas_i_npurge)
CTL_PROTO(stats_arenas_i_nmadvise) CTL_PROTO(stats_arenas_i_nmadvise)
CTL_PROTO(stats_arenas_i_purged) CTL_PROTO(stats_arenas_i_purged)
...@@ -205,7 +196,6 @@ CTL_PROTO(stats_active) ...@@ -205,7 +196,6 @@ CTL_PROTO(stats_active)
CTL_PROTO(stats_metadata) CTL_PROTO(stats_metadata)
CTL_PROTO(stats_resident) CTL_PROTO(stats_resident)
CTL_PROTO(stats_mapped) CTL_PROTO(stats_mapped)
CTL_PROTO(stats_retained)
/******************************************************************************/ /******************************************************************************/
/* mallctl tree. */ /* mallctl tree. */
...@@ -251,7 +241,6 @@ static const ctl_named_node_t config_node[] = { ...@@ -251,7 +241,6 @@ static const ctl_named_node_t config_node[] = {
{NAME("debug"), CTL(config_debug)}, {NAME("debug"), CTL(config_debug)},
{NAME("fill"), CTL(config_fill)}, {NAME("fill"), CTL(config_fill)},
{NAME("lazy_lock"), CTL(config_lazy_lock)}, {NAME("lazy_lock"), CTL(config_lazy_lock)},
{NAME("malloc_conf"), CTL(config_malloc_conf)},
{NAME("munmap"), CTL(config_munmap)}, {NAME("munmap"), CTL(config_munmap)},
{NAME("prof"), CTL(config_prof)}, {NAME("prof"), CTL(config_prof)},
{NAME("prof_libgcc"), CTL(config_prof_libgcc)}, {NAME("prof_libgcc"), CTL(config_prof_libgcc)},
...@@ -269,9 +258,7 @@ static const ctl_named_node_t opt_node[] = { ...@@ -269,9 +258,7 @@ static const ctl_named_node_t opt_node[] = {
{NAME("dss"), CTL(opt_dss)}, {NAME("dss"), CTL(opt_dss)},
{NAME("lg_chunk"), CTL(opt_lg_chunk)}, {NAME("lg_chunk"), CTL(opt_lg_chunk)},
{NAME("narenas"), CTL(opt_narenas)}, {NAME("narenas"), CTL(opt_narenas)},
{NAME("purge"), CTL(opt_purge)},
{NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)}, {NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)},
{NAME("decay_time"), CTL(opt_decay_time)},
{NAME("stats_print"), CTL(opt_stats_print)}, {NAME("stats_print"), CTL(opt_stats_print)},
{NAME("junk"), CTL(opt_junk)}, {NAME("junk"), CTL(opt_junk)},
{NAME("zero"), CTL(opt_zero)}, {NAME("zero"), CTL(opt_zero)},
...@@ -301,11 +288,8 @@ static const ctl_named_node_t tcache_node[] = { ...@@ -301,11 +288,8 @@ static const ctl_named_node_t tcache_node[] = {
static const ctl_named_node_t arena_i_node[] = { static const ctl_named_node_t arena_i_node[] = {
{NAME("purge"), CTL(arena_i_purge)}, {NAME("purge"), CTL(arena_i_purge)},
{NAME("decay"), CTL(arena_i_decay)},
{NAME("reset"), CTL(arena_i_reset)},
{NAME("dss"), CTL(arena_i_dss)}, {NAME("dss"), CTL(arena_i_dss)},
{NAME("lg_dirty_mult"), CTL(arena_i_lg_dirty_mult)}, {NAME("lg_dirty_mult"), CTL(arena_i_lg_dirty_mult)},
{NAME("decay_time"), CTL(arena_i_decay_time)},
{NAME("chunk_hooks"), CTL(arena_i_chunk_hooks)} {NAME("chunk_hooks"), CTL(arena_i_chunk_hooks)}
}; };
static const ctl_named_node_t super_arena_i_node[] = { static const ctl_named_node_t super_arena_i_node[] = {
...@@ -355,7 +339,6 @@ static const ctl_named_node_t arenas_node[] = { ...@@ -355,7 +339,6 @@ static const ctl_named_node_t arenas_node[] = {
{NAME("narenas"), CTL(arenas_narenas)}, {NAME("narenas"), CTL(arenas_narenas)},
{NAME("initialized"), CTL(arenas_initialized)}, {NAME("initialized"), CTL(arenas_initialized)},
{NAME("lg_dirty_mult"), CTL(arenas_lg_dirty_mult)}, {NAME("lg_dirty_mult"), CTL(arenas_lg_dirty_mult)},
{NAME("decay_time"), CTL(arenas_decay_time)},
{NAME("quantum"), CTL(arenas_quantum)}, {NAME("quantum"), CTL(arenas_quantum)},
{NAME("page"), CTL(arenas_page)}, {NAME("page"), CTL(arenas_page)},
{NAME("tcache_max"), CTL(arenas_tcache_max)}, {NAME("tcache_max"), CTL(arenas_tcache_max)},
...@@ -456,11 +439,9 @@ static const ctl_named_node_t stats_arenas_i_node[] = { ...@@ -456,11 +439,9 @@ static const ctl_named_node_t stats_arenas_i_node[] = {
{NAME("nthreads"), CTL(stats_arenas_i_nthreads)}, {NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
{NAME("dss"), CTL(stats_arenas_i_dss)}, {NAME("dss"), CTL(stats_arenas_i_dss)},
{NAME("lg_dirty_mult"), CTL(stats_arenas_i_lg_dirty_mult)}, {NAME("lg_dirty_mult"), CTL(stats_arenas_i_lg_dirty_mult)},
{NAME("decay_time"), CTL(stats_arenas_i_decay_time)},
{NAME("pactive"), CTL(stats_arenas_i_pactive)}, {NAME("pactive"), CTL(stats_arenas_i_pactive)},
{NAME("pdirty"), CTL(stats_arenas_i_pdirty)}, {NAME("pdirty"), CTL(stats_arenas_i_pdirty)},
{NAME("mapped"), CTL(stats_arenas_i_mapped)}, {NAME("mapped"), CTL(stats_arenas_i_mapped)},
{NAME("retained"), CTL(stats_arenas_i_retained)},
{NAME("npurge"), CTL(stats_arenas_i_npurge)}, {NAME("npurge"), CTL(stats_arenas_i_npurge)},
{NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)}, {NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)},
{NAME("purged"), CTL(stats_arenas_i_purged)}, {NAME("purged"), CTL(stats_arenas_i_purged)},
...@@ -487,7 +468,6 @@ static const ctl_named_node_t stats_node[] = { ...@@ -487,7 +468,6 @@ static const ctl_named_node_t stats_node[] = {
{NAME("metadata"), CTL(stats_metadata)}, {NAME("metadata"), CTL(stats_metadata)},
{NAME("resident"), CTL(stats_resident)}, {NAME("resident"), CTL(stats_resident)},
{NAME("mapped"), CTL(stats_mapped)}, {NAME("mapped"), CTL(stats_mapped)},
{NAME("retained"), CTL(stats_retained)},
{NAME("arenas"), CHILD(indexed, stats_arenas)} {NAME("arenas"), CHILD(indexed, stats_arenas)}
}; };
...@@ -539,10 +519,8 @@ static void ...@@ -539,10 +519,8 @@ static void
ctl_arena_clear(ctl_arena_stats_t *astats) ctl_arena_clear(ctl_arena_stats_t *astats)
{ {
astats->nthreads = 0;
astats->dss = dss_prec_names[dss_prec_limit]; astats->dss = dss_prec_names[dss_prec_limit];
astats->lg_dirty_mult = -1; astats->lg_dirty_mult = -1;
astats->decay_time = -1;
astats->pactive = 0; astats->pactive = 0;
astats->pdirty = 0; astats->pdirty = 0;
if (config_stats) { if (config_stats) {
...@@ -560,27 +538,20 @@ ctl_arena_clear(ctl_arena_stats_t *astats) ...@@ -560,27 +538,20 @@ ctl_arena_clear(ctl_arena_stats_t *astats)
} }
static void static void
ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats, arena_t *arena) ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena)
{ {
unsigned i; unsigned i;
if (config_stats) { arena_stats_merge(arena, &cstats->dss, &cstats->lg_dirty_mult,
arena_stats_merge(tsdn, arena, &cstats->nthreads, &cstats->dss, &cstats->pactive, &cstats->pdirty, &cstats->astats, cstats->bstats,
&cstats->lg_dirty_mult, &cstats->decay_time, cstats->lstats, cstats->hstats);
&cstats->pactive, &cstats->pdirty, &cstats->astats,
cstats->bstats, cstats->lstats, cstats->hstats); for (i = 0; i < NBINS; i++) {
cstats->allocated_small += cstats->bstats[i].curregs *
for (i = 0; i < NBINS; i++) { index2size(i);
cstats->allocated_small += cstats->bstats[i].curregs * cstats->nmalloc_small += cstats->bstats[i].nmalloc;
index2size(i); cstats->ndalloc_small += cstats->bstats[i].ndalloc;
cstats->nmalloc_small += cstats->bstats[i].nmalloc; cstats->nrequests_small += cstats->bstats[i].nrequests;
cstats->ndalloc_small += cstats->bstats[i].ndalloc;
cstats->nrequests_small += cstats->bstats[i].nrequests;
}
} else {
arena_basic_stats_merge(tsdn, arena, &cstats->nthreads,
&cstats->dss, &cstats->lg_dirty_mult, &cstats->decay_time,
&cstats->pactive, &cstats->pdirty);
} }
} }
...@@ -589,91 +560,89 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats) ...@@ -589,91 +560,89 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
{ {
unsigned i; unsigned i;
sstats->nthreads += astats->nthreads;
sstats->pactive += astats->pactive; sstats->pactive += astats->pactive;
sstats->pdirty += astats->pdirty; sstats->pdirty += astats->pdirty;
if (config_stats) { sstats->astats.mapped += astats->astats.mapped;
sstats->astats.mapped += astats->astats.mapped; sstats->astats.npurge += astats->astats.npurge;
sstats->astats.retained += astats->astats.retained; sstats->astats.nmadvise += astats->astats.nmadvise;
sstats->astats.npurge += astats->astats.npurge; sstats->astats.purged += astats->astats.purged;
sstats->astats.nmadvise += astats->astats.nmadvise;
sstats->astats.purged += astats->astats.purged; sstats->astats.metadata_mapped += astats->astats.metadata_mapped;
sstats->astats.metadata_allocated += astats->astats.metadata_allocated;
sstats->astats.metadata_mapped +=
astats->astats.metadata_mapped; sstats->allocated_small += astats->allocated_small;
sstats->astats.metadata_allocated += sstats->nmalloc_small += astats->nmalloc_small;
astats->astats.metadata_allocated; sstats->ndalloc_small += astats->ndalloc_small;
sstats->nrequests_small += astats->nrequests_small;
sstats->allocated_small += astats->allocated_small;
sstats->nmalloc_small += astats->nmalloc_small; sstats->astats.allocated_large += astats->astats.allocated_large;
sstats->ndalloc_small += astats->ndalloc_small; sstats->astats.nmalloc_large += astats->astats.nmalloc_large;
sstats->nrequests_small += astats->nrequests_small; sstats->astats.ndalloc_large += astats->astats.ndalloc_large;
sstats->astats.nrequests_large += astats->astats.nrequests_large;
sstats->astats.allocated_large +=
astats->astats.allocated_large; sstats->astats.allocated_huge += astats->astats.allocated_huge;
sstats->astats.nmalloc_large += astats->astats.nmalloc_large; sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge;
sstats->astats.ndalloc_large += astats->astats.ndalloc_large; sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge;
sstats->astats.nrequests_large +=
astats->astats.nrequests_large; for (i = 0; i < NBINS; i++) {
sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
sstats->astats.allocated_huge += astats->astats.allocated_huge; sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge; sstats->bstats[i].nrequests += astats->bstats[i].nrequests;
sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge; sstats->bstats[i].curregs += astats->bstats[i].curregs;
if (config_tcache) {
for (i = 0; i < NBINS; i++) { sstats->bstats[i].nfills += astats->bstats[i].nfills;
sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc; sstats->bstats[i].nflushes +=
sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc; astats->bstats[i].nflushes;
sstats->bstats[i].nrequests +=
astats->bstats[i].nrequests;
sstats->bstats[i].curregs += astats->bstats[i].curregs;
if (config_tcache) {
sstats->bstats[i].nfills +=
astats->bstats[i].nfills;
sstats->bstats[i].nflushes +=
astats->bstats[i].nflushes;
}
sstats->bstats[i].nruns += astats->bstats[i].nruns;
sstats->bstats[i].reruns += astats->bstats[i].reruns;
sstats->bstats[i].curruns += astats->bstats[i].curruns;
} }
sstats->bstats[i].nruns += astats->bstats[i].nruns;
sstats->bstats[i].reruns += astats->bstats[i].reruns;
sstats->bstats[i].curruns += astats->bstats[i].curruns;
}
for (i = 0; i < nlclasses; i++) { for (i = 0; i < nlclasses; i++) {
sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc; sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc; sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
sstats->lstats[i].nrequests += sstats->lstats[i].nrequests += astats->lstats[i].nrequests;
astats->lstats[i].nrequests; sstats->lstats[i].curruns += astats->lstats[i].curruns;
sstats->lstats[i].curruns += astats->lstats[i].curruns; }
}
for (i = 0; i < nhclasses; i++) { for (i = 0; i < nhclasses; i++) {
sstats->hstats[i].nmalloc += astats->hstats[i].nmalloc; sstats->hstats[i].nmalloc += astats->hstats[i].nmalloc;
sstats->hstats[i].ndalloc += astats->hstats[i].ndalloc; sstats->hstats[i].ndalloc += astats->hstats[i].ndalloc;
sstats->hstats[i].curhchunks += sstats->hstats[i].curhchunks += astats->hstats[i].curhchunks;
astats->hstats[i].curhchunks;
}
} }
} }
static void static void
ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, unsigned i) ctl_arena_refresh(arena_t *arena, unsigned i)
{ {
ctl_arena_stats_t *astats = &ctl_stats.arenas[i]; ctl_arena_stats_t *astats = &ctl_stats.arenas[i];
ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas]; ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas];
ctl_arena_clear(astats); ctl_arena_clear(astats);
ctl_arena_stats_amerge(tsdn, astats, arena);
/* Merge into sum stats as well. */ sstats->nthreads += astats->nthreads;
ctl_arena_stats_smerge(sstats, astats); if (config_stats) {
ctl_arena_stats_amerge(astats, arena);
/* Merge into sum stats as well. */
ctl_arena_stats_smerge(sstats, astats);
} else {
astats->pactive += arena->nactive;
astats->pdirty += arena->ndirty;
/* Merge into sum stats as well. */
sstats->pactive += arena->nactive;
sstats->pdirty += arena->ndirty;
}
} }
static bool static bool
ctl_grow(tsdn_t *tsdn) ctl_grow(void)
{ {
ctl_arena_stats_t *astats; ctl_arena_stats_t *astats;
/* Initialize new arena. */ /* Initialize new arena. */
if (arena_init(tsdn, ctl_stats.narenas) == NULL) if (arena_init(ctl_stats.narenas) == NULL)
return (true); return (true);
/* Allocate extended arena stats. */ /* Allocate extended arena stats. */
...@@ -708,32 +677,47 @@ ctl_grow(tsdn_t *tsdn) ...@@ -708,32 +677,47 @@ ctl_grow(tsdn_t *tsdn)
} }
static void static void
ctl_refresh(tsdn_t *tsdn) ctl_refresh(void)
{ {
tsd_t *tsd;
unsigned i; unsigned i;
bool refreshed;
VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas); VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
/* /*
* Clear sum stats, since they will be merged into by * Clear sum stats, since they will be merged into by
* ctl_arena_refresh(). * ctl_arena_refresh().
*/ */
ctl_stats.arenas[ctl_stats.narenas].nthreads = 0;
ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]); ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]);
for (i = 0; i < ctl_stats.narenas; i++) tsd = tsd_fetch();
tarenas[i] = arena_get(tsdn, i, false); for (i = 0, refreshed = false; i < ctl_stats.narenas; i++) {
tarenas[i] = arena_get(tsd, i, false, false);
if (tarenas[i] == NULL && !refreshed) {
tarenas[i] = arena_get(tsd, i, false, true);
refreshed = true;
}
}
for (i = 0; i < ctl_stats.narenas; i++) {
if (tarenas[i] != NULL)
ctl_stats.arenas[i].nthreads = arena_nbound(i);
else
ctl_stats.arenas[i].nthreads = 0;
}
for (i = 0; i < ctl_stats.narenas; i++) { for (i = 0; i < ctl_stats.narenas; i++) {
bool initialized = (tarenas[i] != NULL); bool initialized = (tarenas[i] != NULL);
ctl_stats.arenas[i].initialized = initialized; ctl_stats.arenas[i].initialized = initialized;
if (initialized) if (initialized)
ctl_arena_refresh(tsdn, tarenas[i], i); ctl_arena_refresh(tarenas[i], i);
} }
if (config_stats) { if (config_stats) {
size_t base_allocated, base_resident, base_mapped; size_t base_allocated, base_resident, base_mapped;
base_stats_get(tsdn, &base_allocated, &base_resident, base_stats_get(&base_allocated, &base_resident, &base_mapped);
&base_mapped);
ctl_stats.allocated = ctl_stats.allocated =
ctl_stats.arenas[ctl_stats.narenas].allocated_small + ctl_stats.arenas[ctl_stats.narenas].allocated_small +
ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large + ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large +
...@@ -750,19 +734,17 @@ ctl_refresh(tsdn_t *tsdn) ...@@ -750,19 +734,17 @@ ctl_refresh(tsdn_t *tsdn)
ctl_stats.arenas[ctl_stats.narenas].pdirty) << LG_PAGE); ctl_stats.arenas[ctl_stats.narenas].pdirty) << LG_PAGE);
ctl_stats.mapped = base_mapped + ctl_stats.mapped = base_mapped +
ctl_stats.arenas[ctl_stats.narenas].astats.mapped; ctl_stats.arenas[ctl_stats.narenas].astats.mapped;
ctl_stats.retained =
ctl_stats.arenas[ctl_stats.narenas].astats.retained;
} }
ctl_epoch++; ctl_epoch++;
} }
static bool static bool
ctl_init(tsdn_t *tsdn) ctl_init(void)
{ {
bool ret; bool ret;
malloc_mutex_lock(tsdn, &ctl_mtx); malloc_mutex_lock(&ctl_mtx);
if (!ctl_initialized) { if (!ctl_initialized) {
/* /*
* Allocate space for one extra arena stats element, which * Allocate space for one extra arena stats element, which
...@@ -804,19 +786,19 @@ ctl_init(tsdn_t *tsdn) ...@@ -804,19 +786,19 @@ ctl_init(tsdn_t *tsdn)
ctl_stats.arenas[ctl_stats.narenas].initialized = true; ctl_stats.arenas[ctl_stats.narenas].initialized = true;
ctl_epoch = 0; ctl_epoch = 0;
ctl_refresh(tsdn); ctl_refresh();
ctl_initialized = true; ctl_initialized = true;
} }
ret = false; ret = false;
label_return: label_return:
malloc_mutex_unlock(tsdn, &ctl_mtx); malloc_mutex_unlock(&ctl_mtx);
return (ret); return (ret);
} }
static int static int
ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp, ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp,
size_t *mibp, size_t *depthp) size_t *depthp)
{ {
int ret; int ret;
const char *elm, *tdot, *dot; const char *elm, *tdot, *dot;
...@@ -868,7 +850,7 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp, ...@@ -868,7 +850,7 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
} }
inode = ctl_indexed_node(node->children); inode = ctl_indexed_node(node->children);
node = inode->index(tsdn, mibp, *depthp, (size_t)index); node = inode->index(mibp, *depthp, (size_t)index);
if (node == NULL) { if (node == NULL) {
ret = ENOENT; ret = ENOENT;
goto label_return; goto label_return;
...@@ -912,8 +894,8 @@ label_return: ...@@ -912,8 +894,8 @@ label_return:
} }
int int
ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
void *newp, size_t newlen) size_t newlen)
{ {
int ret; int ret;
size_t depth; size_t depth;
...@@ -921,19 +903,19 @@ ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, ...@@ -921,19 +903,19 @@ ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
size_t mib[CTL_MAX_DEPTH]; size_t mib[CTL_MAX_DEPTH];
const ctl_named_node_t *node; const ctl_named_node_t *node;
if (!ctl_initialized && ctl_init(tsd_tsdn(tsd))) { if (!ctl_initialized && ctl_init()) {
ret = EAGAIN; ret = EAGAIN;
goto label_return; goto label_return;
} }
depth = CTL_MAX_DEPTH; depth = CTL_MAX_DEPTH;
ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth); ret = ctl_lookup(name, nodes, mib, &depth);
if (ret != 0) if (ret != 0)
goto label_return; goto label_return;
node = ctl_named_node(nodes[depth-1]); node = ctl_named_node(nodes[depth-1]);
if (node != NULL && node->ctl) if (node != NULL && node->ctl)
ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen); ret = node->ctl(mib, depth, oldp, oldlenp, newp, newlen);
else { else {
/* The name refers to a partial path through the ctl tree. */ /* The name refers to a partial path through the ctl tree. */
ret = ENOENT; ret = ENOENT;
...@@ -944,29 +926,29 @@ label_return: ...@@ -944,29 +926,29 @@ label_return:
} }
int int
ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp, size_t *miblenp) ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp)
{ {
int ret; int ret;
if (!ctl_initialized && ctl_init(tsdn)) { if (!ctl_initialized && ctl_init()) {
ret = EAGAIN; ret = EAGAIN;
goto label_return; goto label_return;
} }
ret = ctl_lookup(tsdn, name, NULL, mibp, miblenp); ret = ctl_lookup(name, NULL, mibp, miblenp);
label_return: label_return:
return(ret); return(ret);
} }
int int
ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
size_t *oldlenp, void *newp, size_t newlen) void *newp, size_t newlen)
{ {
int ret; int ret;
const ctl_named_node_t *node; const ctl_named_node_t *node;
size_t i; size_t i;
if (!ctl_initialized && ctl_init(tsd_tsdn(tsd))) { if (!ctl_initialized && ctl_init()) {
ret = EAGAIN; ret = EAGAIN;
goto label_return; goto label_return;
} }
...@@ -978,7 +960,7 @@ ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ...@@ -978,7 +960,7 @@ ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
assert(node->nchildren > 0); assert(node->nchildren > 0);
if (ctl_named_node(node->children) != NULL) { if (ctl_named_node(node->children) != NULL) {
/* Children are named. */ /* Children are named. */
if (node->nchildren <= (unsigned)mib[i]) { if (node->nchildren <= mib[i]) {
ret = ENOENT; ret = ENOENT;
goto label_return; goto label_return;
} }
...@@ -988,7 +970,7 @@ ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ...@@ -988,7 +970,7 @@ ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
/* Indexed element. */ /* Indexed element. */
inode = ctl_indexed_node(node->children); inode = ctl_indexed_node(node->children);
node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]); node = inode->index(mib, miblen, mib[i]);
if (node == NULL) { if (node == NULL) {
ret = ENOENT; ret = ENOENT;
goto label_return; goto label_return;
...@@ -998,7 +980,7 @@ ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ...@@ -998,7 +980,7 @@ ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
/* Call the ctl function. */ /* Call the ctl function. */
if (node && node->ctl) if (node && node->ctl)
ret = node->ctl(tsd, mib, miblen, oldp, oldlenp, newp, newlen); ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen);
else { else {
/* Partial MIB. */ /* Partial MIB. */
ret = ENOENT; ret = ENOENT;
...@@ -1012,7 +994,7 @@ bool ...@@ -1012,7 +994,7 @@ bool
ctl_boot(void) ctl_boot(void)
{ {
if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL)) if (malloc_mutex_init(&ctl_mtx))
return (true); return (true);
ctl_initialized = false; ctl_initialized = false;
...@@ -1021,24 +1003,24 @@ ctl_boot(void) ...@@ -1021,24 +1003,24 @@ ctl_boot(void)
} }
void void
ctl_prefork(tsdn_t *tsdn) ctl_prefork(void)
{ {
malloc_mutex_prefork(tsdn, &ctl_mtx); malloc_mutex_prefork(&ctl_mtx);
} }
void void
ctl_postfork_parent(tsdn_t *tsdn) ctl_postfork_parent(void)
{ {
malloc_mutex_postfork_parent(tsdn, &ctl_mtx); malloc_mutex_postfork_parent(&ctl_mtx);
} }
void void
ctl_postfork_child(tsdn_t *tsdn) ctl_postfork_child(void)
{ {
malloc_mutex_postfork_child(tsdn, &ctl_mtx); malloc_mutex_postfork_child(&ctl_mtx);
} }
/******************************************************************************/ /******************************************************************************/
...@@ -1095,8 +1077,8 @@ ctl_postfork_child(tsdn_t *tsdn) ...@@ -1095,8 +1077,8 @@ ctl_postfork_child(tsdn_t *tsdn)
*/ */
#define CTL_RO_CLGEN(c, l, n, v, t) \ #define CTL_RO_CLGEN(c, l, n, v, t) \
static int \ static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
size_t *oldlenp, void *newp, size_t newlen) \ void *newp, size_t newlen) \
{ \ { \
int ret; \ int ret; \
t oldval; \ t oldval; \
...@@ -1104,7 +1086,7 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ ...@@ -1104,7 +1086,7 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
if (!(c)) \ if (!(c)) \
return (ENOENT); \ return (ENOENT); \
if (l) \ if (l) \
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ malloc_mutex_lock(&ctl_mtx); \
READONLY(); \ READONLY(); \
oldval = (v); \ oldval = (v); \
READ(oldval, t); \ READ(oldval, t); \
...@@ -1112,47 +1094,47 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ ...@@ -1112,47 +1094,47 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
ret = 0; \ ret = 0; \
label_return: \ label_return: \
if (l) \ if (l) \
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ malloc_mutex_unlock(&ctl_mtx); \
return (ret); \ return (ret); \
} }
#define CTL_RO_CGEN(c, n, v, t) \ #define CTL_RO_CGEN(c, n, v, t) \
static int \ static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
size_t *oldlenp, void *newp, size_t newlen) \ void *newp, size_t newlen) \
{ \ { \
int ret; \ int ret; \
t oldval; \ t oldval; \
\ \
if (!(c)) \ if (!(c)) \
return (ENOENT); \ return (ENOENT); \
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ malloc_mutex_lock(&ctl_mtx); \
READONLY(); \ READONLY(); \
oldval = (v); \ oldval = (v); \
READ(oldval, t); \ READ(oldval, t); \
\ \
ret = 0; \ ret = 0; \
label_return: \ label_return: \
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ malloc_mutex_unlock(&ctl_mtx); \
return (ret); \ return (ret); \
} }
#define CTL_RO_GEN(n, v, t) \ #define CTL_RO_GEN(n, v, t) \
static int \ static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
size_t *oldlenp, void *newp, size_t newlen) \ void *newp, size_t newlen) \
{ \ { \
int ret; \ int ret; \
t oldval; \ t oldval; \
\ \
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ malloc_mutex_lock(&ctl_mtx); \
READONLY(); \ READONLY(); \
oldval = (v); \ oldval = (v); \
READ(oldval, t); \ READ(oldval, t); \
\ \
ret = 0; \ ret = 0; \
label_return: \ label_return: \
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ malloc_mutex_unlock(&ctl_mtx); \
return (ret); \ return (ret); \
} }
...@@ -1162,8 +1144,8 @@ label_return: \ ...@@ -1162,8 +1144,8 @@ label_return: \
*/ */
#define CTL_RO_NL_CGEN(c, n, v, t) \ #define CTL_RO_NL_CGEN(c, n, v, t) \
static int \ static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
size_t *oldlenp, void *newp, size_t newlen) \ void *newp, size_t newlen) \
{ \ { \
int ret; \ int ret; \
t oldval; \ t oldval; \
...@@ -1181,8 +1163,8 @@ label_return: \ ...@@ -1181,8 +1163,8 @@ label_return: \
#define CTL_RO_NL_GEN(n, v, t) \ #define CTL_RO_NL_GEN(n, v, t) \
static int \ static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
size_t *oldlenp, void *newp, size_t newlen) \ void *newp, size_t newlen) \
{ \ { \
int ret; \ int ret; \
t oldval; \ t oldval; \
...@@ -1198,15 +1180,17 @@ label_return: \ ...@@ -1198,15 +1180,17 @@ label_return: \
#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \ #define CTL_TSD_RO_NL_CGEN(c, n, m, t) \
static int \ static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
size_t *oldlenp, void *newp, size_t newlen) \ void *newp, size_t newlen) \
{ \ { \
int ret; \ int ret; \
t oldval; \ t oldval; \
tsd_t *tsd; \
\ \
if (!(c)) \ if (!(c)) \
return (ENOENT); \ return (ENOENT); \
READONLY(); \ READONLY(); \
tsd = tsd_fetch(); \
oldval = (m(tsd)); \ oldval = (m(tsd)); \
READ(oldval, t); \ READ(oldval, t); \
\ \
...@@ -1215,17 +1199,17 @@ label_return: \ ...@@ -1215,17 +1199,17 @@ label_return: \
return (ret); \ return (ret); \
} }
#define CTL_RO_CONFIG_GEN(n, t) \ #define CTL_RO_BOOL_CONFIG_GEN(n) \
static int \ static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
size_t *oldlenp, void *newp, size_t newlen) \ void *newp, size_t newlen) \
{ \ { \
int ret; \ int ret; \
t oldval; \ bool oldval; \
\ \
READONLY(); \ READONLY(); \
oldval = n; \ oldval = n; \
READ(oldval, t); \ READ(oldval, bool); \
\ \
ret = 0; \ ret = 0; \
label_return: \ label_return: \
...@@ -1237,51 +1221,48 @@ label_return: \ ...@@ -1237,51 +1221,48 @@ label_return: \
CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *) CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
static int static int
epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
size_t *oldlenp, void *newp, size_t newlen) void *newp, size_t newlen)
{ {
int ret; int ret;
UNUSED uint64_t newval; UNUSED uint64_t newval;
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); malloc_mutex_lock(&ctl_mtx);
WRITE(newval, uint64_t); WRITE(newval, uint64_t);
if (newp != NULL) if (newp != NULL)
ctl_refresh(tsd_tsdn(tsd)); ctl_refresh();
READ(ctl_epoch, uint64_t); READ(ctl_epoch, uint64_t);
ret = 0; ret = 0;
label_return: label_return:
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); malloc_mutex_unlock(&ctl_mtx);
return (ret); return (ret);
} }
/******************************************************************************/ /******************************************************************************/
CTL_RO_CONFIG_GEN(config_cache_oblivious, bool) CTL_RO_BOOL_CONFIG_GEN(config_cache_oblivious)
CTL_RO_CONFIG_GEN(config_debug, bool) CTL_RO_BOOL_CONFIG_GEN(config_debug)
CTL_RO_CONFIG_GEN(config_fill, bool) CTL_RO_BOOL_CONFIG_GEN(config_fill)
CTL_RO_CONFIG_GEN(config_lazy_lock, bool) CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock)
CTL_RO_CONFIG_GEN(config_malloc_conf, const char *) CTL_RO_BOOL_CONFIG_GEN(config_munmap)
CTL_RO_CONFIG_GEN(config_munmap, bool) CTL_RO_BOOL_CONFIG_GEN(config_prof)
CTL_RO_CONFIG_GEN(config_prof, bool) CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc)
CTL_RO_CONFIG_GEN(config_prof_libgcc, bool) CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind)
CTL_RO_CONFIG_GEN(config_prof_libunwind, bool) CTL_RO_BOOL_CONFIG_GEN(config_stats)
CTL_RO_CONFIG_GEN(config_stats, bool) CTL_RO_BOOL_CONFIG_GEN(config_tcache)
CTL_RO_CONFIG_GEN(config_tcache, bool) CTL_RO_BOOL_CONFIG_GEN(config_tls)
CTL_RO_CONFIG_GEN(config_tls, bool) CTL_RO_BOOL_CONFIG_GEN(config_utrace)
CTL_RO_CONFIG_GEN(config_utrace, bool) CTL_RO_BOOL_CONFIG_GEN(config_valgrind)
CTL_RO_CONFIG_GEN(config_valgrind, bool) CTL_RO_BOOL_CONFIG_GEN(config_xmalloc)
CTL_RO_CONFIG_GEN(config_xmalloc, bool)
/******************************************************************************/ /******************************************************************************/
CTL_RO_NL_GEN(opt_abort, opt_abort, bool) CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
CTL_RO_NL_GEN(opt_dss, opt_dss, const char *) CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t) CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned) CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t)
CTL_RO_NL_GEN(opt_purge, purge_mode_names[opt_purge], const char *)
CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t) CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
CTL_RO_NL_GEN(opt_decay_time, opt_decay_time, ssize_t)
CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool) CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *) CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t) CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t)
...@@ -1306,18 +1287,20 @@ CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool) ...@@ -1306,18 +1287,20 @@ CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
/******************************************************************************/ /******************************************************************************/
static int static int
thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
size_t *oldlenp, void *newp, size_t newlen) void *newp, size_t newlen)
{ {
int ret; int ret;
tsd_t *tsd;
arena_t *oldarena; arena_t *oldarena;
unsigned newind, oldind; unsigned newind, oldind;
tsd = tsd_fetch();
oldarena = arena_choose(tsd, NULL); oldarena = arena_choose(tsd, NULL);
if (oldarena == NULL) if (oldarena == NULL)
return (EAGAIN); return (EAGAIN);
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); malloc_mutex_lock(&ctl_mtx);
newind = oldind = oldarena->ind; newind = oldind = oldarena->ind;
WRITE(newind, unsigned); WRITE(newind, unsigned);
READ(oldind, unsigned); READ(oldind, unsigned);
...@@ -1331,7 +1314,7 @@ thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ...@@ -1331,7 +1314,7 @@ thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
} }
/* Initialize arena if necessary. */ /* Initialize arena if necessary. */
newarena = arena_get(tsd_tsdn(tsd), newind, true); newarena = arena_get(tsd, newind, true, true);
if (newarena == NULL) { if (newarena == NULL) {
ret = EAGAIN; ret = EAGAIN;
goto label_return; goto label_return;
...@@ -1341,15 +1324,15 @@ thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ...@@ -1341,15 +1324,15 @@ thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
if (config_tcache) { if (config_tcache) {
tcache_t *tcache = tsd_tcache_get(tsd); tcache_t *tcache = tsd_tcache_get(tsd);
if (tcache != NULL) { if (tcache != NULL) {
tcache_arena_reassociate(tsd_tsdn(tsd), tcache, tcache_arena_reassociate(tcache, oldarena,
oldarena, newarena); newarena);
} }
} }
} }
ret = 0; ret = 0;
label_return: label_return:
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); malloc_mutex_unlock(&ctl_mtx);
return (ret); return (ret);
} }
...@@ -1363,8 +1346,8 @@ CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp, ...@@ -1363,8 +1346,8 @@ CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp,
tsd_thread_deallocatedp_get, uint64_t *) tsd_thread_deallocatedp_get, uint64_t *)
static int static int
thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen)
{ {
int ret; int ret;
bool oldval; bool oldval;
...@@ -1388,8 +1371,8 @@ label_return: ...@@ -1388,8 +1371,8 @@ label_return:
} }
static int static int
thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen)
{ {
int ret; int ret;
...@@ -1407,7 +1390,7 @@ label_return: ...@@ -1407,7 +1390,7 @@ label_return:
} }
static int static int
thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, thread_prof_name_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen)
{ {
int ret; int ret;
...@@ -1418,16 +1401,20 @@ thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ...@@ -1418,16 +1401,20 @@ thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
READ_XOR_WRITE(); READ_XOR_WRITE();
if (newp != NULL) { if (newp != NULL) {
tsd_t *tsd;
if (newlen != sizeof(const char *)) { if (newlen != sizeof(const char *)) {
ret = EINVAL; ret = EINVAL;
goto label_return; goto label_return;
} }
tsd = tsd_fetch();
if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) != if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) !=
0) 0)
goto label_return; goto label_return;
} else { } else {
const char *oldname = prof_thread_name_get(tsd); const char *oldname = prof_thread_name_get();
READ(oldname, const char *); READ(oldname, const char *);
} }
...@@ -1437,7 +1424,7 @@ label_return: ...@@ -1437,7 +1424,7 @@ label_return:
} }
static int static int
thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, thread_prof_active_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen)
{ {
int ret; int ret;
...@@ -1446,13 +1433,13 @@ thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ...@@ -1446,13 +1433,13 @@ thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
if (!config_prof) if (!config_prof)
return (ENOENT); return (ENOENT);
oldval = prof_thread_active_get(tsd); oldval = prof_thread_active_get();
if (newp != NULL) { if (newp != NULL) {
if (newlen != sizeof(bool)) { if (newlen != sizeof(bool)) {
ret = EINVAL; ret = EINVAL;
goto label_return; goto label_return;
} }
if (prof_thread_active_set(tsd, *(bool *)newp)) { if (prof_thread_active_set(*(bool *)newp)) {
ret = EAGAIN; ret = EAGAIN;
goto label_return; goto label_return;
} }
...@@ -1467,16 +1454,19 @@ label_return: ...@@ -1467,16 +1454,19 @@ label_return:
/******************************************************************************/ /******************************************************************************/
static int static int
tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, tcache_create_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
size_t *oldlenp, void *newp, size_t newlen) void *newp, size_t newlen)
{ {
int ret; int ret;
tsd_t *tsd;
unsigned tcache_ind; unsigned tcache_ind;
if (!config_tcache) if (!config_tcache)
return (ENOENT); return (ENOENT);
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); tsd = tsd_fetch();
malloc_mutex_lock(&ctl_mtx);
READONLY(); READONLY();
if (tcaches_create(tsd, &tcache_ind)) { if (tcaches_create(tsd, &tcache_ind)) {
ret = EFAULT; ret = EFAULT;
...@@ -1486,20 +1476,23 @@ tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ...@@ -1486,20 +1476,23 @@ tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0; ret = 0;
label_return: label_return:
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); malloc_mutex_unlock(&ctl_mtx);
return (ret); return (ret);
} }
static int static int
tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
size_t *oldlenp, void *newp, size_t newlen) void *newp, size_t newlen)
{ {
int ret; int ret;
tsd_t *tsd;
unsigned tcache_ind; unsigned tcache_ind;
if (!config_tcache) if (!config_tcache)
return (ENOENT); return (ENOENT);
tsd = tsd_fetch();
WRITEONLY(); WRITEONLY();
tcache_ind = UINT_MAX; tcache_ind = UINT_MAX;
WRITE(tcache_ind, unsigned); WRITE(tcache_ind, unsigned);
...@@ -1515,15 +1508,18 @@ label_return: ...@@ -1515,15 +1508,18 @@ label_return:
} }
static int static int
tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, tcache_destroy_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen)
{ {
int ret; int ret;
tsd_t *tsd;
unsigned tcache_ind; unsigned tcache_ind;
if (!config_tcache) if (!config_tcache)
return (ENOENT); return (ENOENT);
tsd = tsd_fetch();
WRITEONLY(); WRITEONLY();
tcache_ind = UINT_MAX; tcache_ind = UINT_MAX;
WRITE(tcache_ind, unsigned); WRITE(tcache_ind, unsigned);
...@@ -1540,105 +1536,48 @@ label_return: ...@@ -1540,105 +1536,48 @@ label_return:
/******************************************************************************/ /******************************************************************************/
/* ctl_mutex must be held during execution of this function. */
static void static void
arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all) arena_purge(unsigned arena_ind)
{ {
tsd_t *tsd;
unsigned i;
bool refreshed;
VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
malloc_mutex_lock(tsdn, &ctl_mtx); tsd = tsd_fetch();
{ for (i = 0, refreshed = false; i < ctl_stats.narenas; i++) {
unsigned narenas = ctl_stats.narenas; tarenas[i] = arena_get(tsd, i, false, false);
if (tarenas[i] == NULL && !refreshed) {
if (arena_ind == narenas) { tarenas[i] = arena_get(tsd, i, false, true);
unsigned i; refreshed = true;
VARIABLE_ARRAY(arena_t *, tarenas, narenas);
for (i = 0; i < narenas; i++)
tarenas[i] = arena_get(tsdn, i, false);
/*
* No further need to hold ctl_mtx, since narenas and
* tarenas contain everything needed below.
*/
malloc_mutex_unlock(tsdn, &ctl_mtx);
for (i = 0; i < narenas; i++) {
if (tarenas[i] != NULL)
arena_purge(tsdn, tarenas[i], all);
}
} else {
arena_t *tarena;
assert(arena_ind < narenas);
tarena = arena_get(tsdn, arena_ind, false);
/* No further need to hold ctl_mtx. */
malloc_mutex_unlock(tsdn, &ctl_mtx);
if (tarena != NULL)
arena_purge(tsdn, tarena, all);
} }
} }
}
static int
arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
READONLY(); if (arena_ind == ctl_stats.narenas) {
WRITEONLY(); unsigned i;
arena_i_purge(tsd_tsdn(tsd), (unsigned)mib[1], true); for (i = 0; i < ctl_stats.narenas; i++) {
if (tarenas[i] != NULL)
ret = 0; arena_purge_all(tarenas[i]);
label_return: }
return (ret); } else {
} assert(arena_ind < ctl_stats.narenas);
if (tarenas[arena_ind] != NULL)
static int arena_purge_all(tarenas[arena_ind]);
arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, }
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
READONLY();
WRITEONLY();
arena_i_purge(tsd_tsdn(tsd), (unsigned)mib[1], false);
ret = 0;
label_return:
return (ret);
} }
static int static int
arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, arena_i_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
size_t *oldlenp, void *newp, size_t newlen) void *newp, size_t newlen)
{ {
int ret; int ret;
unsigned arena_ind;
arena_t *arena;
READONLY(); READONLY();
WRITEONLY(); WRITEONLY();
malloc_mutex_lock(&ctl_mtx);
if ((config_valgrind && unlikely(in_valgrind)) || (config_fill && arena_purge(mib[1]);
unlikely(opt_quarantine))) { malloc_mutex_unlock(&ctl_mtx);
ret = EFAULT;
goto label_return;
}
arena_ind = (unsigned)mib[1];
if (config_debug) {
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
assert(arena_ind < ctl_stats.narenas);
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
}
assert(arena_ind >= opt_narenas);
arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
arena_reset(tsd, arena);
ret = 0; ret = 0;
label_return: label_return:
...@@ -1646,16 +1585,16 @@ label_return: ...@@ -1646,16 +1585,16 @@ label_return:
} }
static int static int
arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
size_t *oldlenp, void *newp, size_t newlen) void *newp, size_t newlen)
{ {
int ret; int ret;
const char *dss = NULL; const char *dss = NULL;
unsigned arena_ind = (unsigned)mib[1]; unsigned arena_ind = mib[1];
dss_prec_t dss_prec_old = dss_prec_limit; dss_prec_t dss_prec_old = dss_prec_limit;
dss_prec_t dss_prec = dss_prec_limit; dss_prec_t dss_prec = dss_prec_limit;
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); malloc_mutex_lock(&ctl_mtx);
WRITE(dss, const char *); WRITE(dss, const char *);
if (dss != NULL) { if (dss != NULL) {
int i; int i;
...@@ -1676,13 +1615,13 @@ arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ...@@ -1676,13 +1615,13 @@ arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
} }
if (arena_ind < ctl_stats.narenas) { if (arena_ind < ctl_stats.narenas) {
arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false); arena_t *arena = arena_get(tsd_fetch(), arena_ind, false, true);
if (arena == NULL || (dss_prec != dss_prec_limit && if (arena == NULL || (dss_prec != dss_prec_limit &&
arena_dss_prec_set(tsd_tsdn(tsd), arena, dss_prec))) { arena_dss_prec_set(arena, dss_prec))) {
ret = EFAULT; ret = EFAULT;
goto label_return; goto label_return;
} }
dss_prec_old = arena_dss_prec_get(tsd_tsdn(tsd), arena); dss_prec_old = arena_dss_prec_get(arena);
} else { } else {
if (dss_prec != dss_prec_limit && if (dss_prec != dss_prec_limit &&
chunk_dss_prec_set(dss_prec)) { chunk_dss_prec_set(dss_prec)) {
...@@ -1697,61 +1636,26 @@ arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ...@@ -1697,61 +1636,26 @@ arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0; ret = 0;
label_return: label_return:
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); malloc_mutex_unlock(&ctl_mtx);
return (ret); return (ret);
} }
static int static int
arena_i_lg_dirty_mult_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, arena_i_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp,
void *oldp, size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
unsigned arena_ind = (unsigned)mib[1];
arena_t *arena;
arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
if (arena == NULL) {
ret = EFAULT;
goto label_return;
}
if (oldp != NULL && oldlenp != NULL) {
size_t oldval = arena_lg_dirty_mult_get(tsd_tsdn(tsd), arena);
READ(oldval, ssize_t);
}
if (newp != NULL) {
if (newlen != sizeof(ssize_t)) {
ret = EINVAL;
goto label_return;
}
if (arena_lg_dirty_mult_set(tsd_tsdn(tsd), arena,
*(ssize_t *)newp)) {
ret = EFAULT;
goto label_return;
}
}
ret = 0;
label_return:
return (ret);
}
static int
arena_i_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen)
{ {
int ret; int ret;
unsigned arena_ind = (unsigned)mib[1]; unsigned arena_ind = mib[1];
arena_t *arena; arena_t *arena;
arena = arena_get(tsd_tsdn(tsd), arena_ind, false); arena = arena_get(tsd_fetch(), arena_ind, false, true);
if (arena == NULL) { if (arena == NULL) {
ret = EFAULT; ret = EFAULT;
goto label_return; goto label_return;
} }
if (oldp != NULL && oldlenp != NULL) { if (oldp != NULL && oldlenp != NULL) {
size_t oldval = arena_decay_time_get(tsd_tsdn(tsd), arena); size_t oldval = arena_lg_dirty_mult_get(arena);
READ(oldval, ssize_t); READ(oldval, ssize_t);
} }
if (newp != NULL) { if (newp != NULL) {
...@@ -1759,8 +1663,7 @@ arena_i_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ...@@ -1759,8 +1663,7 @@ arena_i_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = EINVAL; ret = EINVAL;
goto label_return; goto label_return;
} }
if (arena_decay_time_set(tsd_tsdn(tsd), arena, if (arena_lg_dirty_mult_set(arena, *(ssize_t *)newp)) {
*(ssize_t *)newp)) {
ret = EFAULT; ret = EFAULT;
goto label_return; goto label_return;
} }
...@@ -1772,25 +1675,24 @@ label_return: ...@@ -1772,25 +1675,24 @@ label_return:
} }
static int static int
arena_i_chunk_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, arena_i_chunk_hooks_ctl(const size_t *mib, size_t miblen, void *oldp,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen)
{ {
int ret; int ret;
unsigned arena_ind = (unsigned)mib[1]; unsigned arena_ind = mib[1];
arena_t *arena; arena_t *arena;
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); malloc_mutex_lock(&ctl_mtx);
if (arena_ind < narenas_total_get() && (arena = if (arena_ind < narenas_total_get() && (arena =
arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) { arena_get(tsd_fetch(), arena_ind, false, true)) != NULL) {
if (newp != NULL) { if (newp != NULL) {
chunk_hooks_t old_chunk_hooks, new_chunk_hooks; chunk_hooks_t old_chunk_hooks, new_chunk_hooks;
WRITE(new_chunk_hooks, chunk_hooks_t); WRITE(new_chunk_hooks, chunk_hooks_t);
old_chunk_hooks = chunk_hooks_set(tsd_tsdn(tsd), arena, old_chunk_hooks = chunk_hooks_set(arena,
&new_chunk_hooks); &new_chunk_hooks);
READ(old_chunk_hooks, chunk_hooks_t); READ(old_chunk_hooks, chunk_hooks_t);
} else { } else {
chunk_hooks_t old_chunk_hooks = chunk_hooks_t old_chunk_hooks = chunk_hooks_get(arena);
chunk_hooks_get(tsd_tsdn(tsd), arena);
READ(old_chunk_hooks, chunk_hooks_t); READ(old_chunk_hooks, chunk_hooks_t);
} }
} else { } else {
...@@ -1799,16 +1701,16 @@ arena_i_chunk_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, ...@@ -1799,16 +1701,16 @@ arena_i_chunk_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
} }
ret = 0; ret = 0;
label_return: label_return:
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); malloc_mutex_unlock(&ctl_mtx);
return (ret); return (ret);
} }
static const ctl_named_node_t * static const ctl_named_node_t *
arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) arena_i_index(const size_t *mib, size_t miblen, size_t i)
{ {
const ctl_named_node_t *ret; const ctl_named_node_t * ret;
malloc_mutex_lock(tsdn, &ctl_mtx); malloc_mutex_lock(&ctl_mtx);
if (i > ctl_stats.narenas) { if (i > ctl_stats.narenas) {
ret = NULL; ret = NULL;
goto label_return; goto label_return;
...@@ -1816,20 +1718,20 @@ arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) ...@@ -1816,20 +1718,20 @@ arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
ret = super_arena_i_node; ret = super_arena_i_node;
label_return: label_return:
malloc_mutex_unlock(tsdn, &ctl_mtx); malloc_mutex_unlock(&ctl_mtx);
return (ret); return (ret);
} }
/******************************************************************************/ /******************************************************************************/
static int static int
arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen)
{ {
int ret; int ret;
unsigned narenas; unsigned narenas;
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); malloc_mutex_lock(&ctl_mtx);
READONLY(); READONLY();
if (*oldlenp != sizeof(unsigned)) { if (*oldlenp != sizeof(unsigned)) {
ret = EINVAL; ret = EINVAL;
...@@ -1840,23 +1742,23 @@ arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ...@@ -1840,23 +1742,23 @@ arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0; ret = 0;
label_return: label_return:
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); malloc_mutex_unlock(&ctl_mtx);
return (ret); return (ret);
} }
static int static int
arenas_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen)
{ {
int ret; int ret;
unsigned nread, i; unsigned nread, i;
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); malloc_mutex_lock(&ctl_mtx);
READONLY(); READONLY();
if (*oldlenp != ctl_stats.narenas * sizeof(bool)) { if (*oldlenp != ctl_stats.narenas * sizeof(bool)) {
ret = EINVAL; ret = EINVAL;
nread = (*oldlenp < ctl_stats.narenas * sizeof(bool)) nread = (*oldlenp < ctl_stats.narenas * sizeof(bool))
? (unsigned)(*oldlenp / sizeof(bool)) : ctl_stats.narenas; ? (*oldlenp / sizeof(bool)) : ctl_stats.narenas;
} else { } else {
ret = 0; ret = 0;
nread = ctl_stats.narenas; nread = ctl_stats.narenas;
...@@ -1866,13 +1768,13 @@ arenas_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ...@@ -1866,13 +1768,13 @@ arenas_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
((bool *)oldp)[i] = ctl_stats.arenas[i].initialized; ((bool *)oldp)[i] = ctl_stats.arenas[i].initialized;
label_return: label_return:
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); malloc_mutex_unlock(&ctl_mtx);
return (ret); return (ret);
} }
static int static int
arenas_lg_dirty_mult_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, arenas_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen)
{ {
int ret; int ret;
...@@ -1896,32 +1798,6 @@ label_return: ...@@ -1896,32 +1798,6 @@ label_return:
return (ret); return (ret);
} }
static int
arenas_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
if (oldp != NULL && oldlenp != NULL) {
size_t oldval = arena_decay_time_default_get();
READ(oldval, ssize_t);
}
if (newp != NULL) {
if (newlen != sizeof(ssize_t)) {
ret = EINVAL;
goto label_return;
}
if (arena_decay_time_default_set(*(ssize_t *)newp)) {
ret = EFAULT;
goto label_return;
}
}
ret = 0;
label_return:
return (ret);
}
CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t) CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
CTL_RO_NL_GEN(arenas_page, PAGE, size_t) CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t) CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t)
...@@ -1931,7 +1807,7 @@ CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t) ...@@ -1931,7 +1807,7 @@ CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t) CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t) CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t)
static const ctl_named_node_t * static const ctl_named_node_t *
arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
{ {
if (i > NBINS) if (i > NBINS)
...@@ -1940,9 +1816,9 @@ arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) ...@@ -1940,9 +1816,9 @@ arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
} }
CTL_RO_NL_GEN(arenas_nlruns, nlclasses, unsigned) CTL_RO_NL_GEN(arenas_nlruns, nlclasses, unsigned)
CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+(szind_t)mib[2]), size_t) CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+mib[2]), size_t)
static const ctl_named_node_t * static const ctl_named_node_t *
arenas_lrun_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
{ {
if (i > nlclasses) if (i > nlclasses)
...@@ -1951,10 +1827,9 @@ arenas_lrun_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) ...@@ -1951,10 +1827,9 @@ arenas_lrun_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
} }
CTL_RO_NL_GEN(arenas_nhchunks, nhclasses, unsigned) CTL_RO_NL_GEN(arenas_nhchunks, nhclasses, unsigned)
CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+nlclasses+(szind_t)mib[2]), CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+nlclasses+mib[2]), size_t)
size_t)
static const ctl_named_node_t * static const ctl_named_node_t *
arenas_hchunk_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) arenas_hchunk_i_index(const size_t *mib, size_t miblen, size_t i)
{ {
if (i > nhclasses) if (i > nhclasses)
...@@ -1963,15 +1838,15 @@ arenas_hchunk_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) ...@@ -1963,15 +1838,15 @@ arenas_hchunk_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
} }
static int static int
arenas_extend_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
size_t *oldlenp, void *newp, size_t newlen) void *newp, size_t newlen)
{ {
int ret; int ret;
unsigned narenas; unsigned narenas;
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); malloc_mutex_lock(&ctl_mtx);
READONLY(); READONLY();
if (ctl_grow(tsd_tsdn(tsd))) { if (ctl_grow()) {
ret = EAGAIN; ret = EAGAIN;
goto label_return; goto label_return;
} }
...@@ -1980,15 +1855,15 @@ arenas_extend_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ...@@ -1980,15 +1855,15 @@ arenas_extend_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0; ret = 0;
label_return: label_return:
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); malloc_mutex_unlock(&ctl_mtx);
return (ret); return (ret);
} }
/******************************************************************************/ /******************************************************************************/
static int static int
prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, prof_thread_active_init_ctl(const size_t *mib, size_t miblen, void *oldp,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) size_t *oldlenp, void *newp, size_t newlen)
{ {
int ret; int ret;
bool oldval; bool oldval;
...@@ -2001,10 +1876,9 @@ prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, ...@@ -2001,10 +1876,9 @@ prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
ret = EINVAL; ret = EINVAL;
goto label_return; goto label_return;
} }
oldval = prof_thread_active_init_set(tsd_tsdn(tsd), oldval = prof_thread_active_init_set(*(bool *)newp);
*(bool *)newp);
} else } else
oldval = prof_thread_active_init_get(tsd_tsdn(tsd)); oldval = prof_thread_active_init_get();
READ(oldval, bool); READ(oldval, bool);
ret = 0; ret = 0;
...@@ -2013,8 +1887,8 @@ label_return: ...@@ -2013,8 +1887,8 @@ label_return:
} }
static int static int
prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
size_t *oldlenp, void *newp, size_t newlen) void *newp, size_t newlen)
{ {
int ret; int ret;
bool oldval; bool oldval;
...@@ -2027,9 +1901,9 @@ prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ...@@ -2027,9 +1901,9 @@ prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = EINVAL; ret = EINVAL;
goto label_return; goto label_return;
} }
oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp); oldval = prof_active_set(*(bool *)newp);
} else } else
oldval = prof_active_get(tsd_tsdn(tsd)); oldval = prof_active_get();
READ(oldval, bool); READ(oldval, bool);
ret = 0; ret = 0;
...@@ -2038,8 +1912,8 @@ label_return: ...@@ -2038,8 +1912,8 @@ label_return:
} }
static int static int
prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
size_t *oldlenp, void *newp, size_t newlen) void *newp, size_t newlen)
{ {
int ret; int ret;
const char *filename = NULL; const char *filename = NULL;
...@@ -2050,7 +1924,7 @@ prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ...@@ -2050,7 +1924,7 @@ prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
WRITEONLY(); WRITEONLY();
WRITE(filename, const char *); WRITE(filename, const char *);
if (prof_mdump(tsd, filename)) { if (prof_mdump(filename)) {
ret = EFAULT; ret = EFAULT;
goto label_return; goto label_return;
} }
...@@ -2061,8 +1935,8 @@ label_return: ...@@ -2061,8 +1935,8 @@ label_return:
} }
static int static int
prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, prof_gdump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
size_t *oldlenp, void *newp, size_t newlen) void *newp, size_t newlen)
{ {
int ret; int ret;
bool oldval; bool oldval;
...@@ -2075,9 +1949,9 @@ prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ...@@ -2075,9 +1949,9 @@ prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = EINVAL; ret = EINVAL;
goto label_return; goto label_return;
} }
oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp); oldval = prof_gdump_set(*(bool *)newp);
} else } else
oldval = prof_gdump_get(tsd_tsdn(tsd)); oldval = prof_gdump_get();
READ(oldval, bool); READ(oldval, bool);
ret = 0; ret = 0;
...@@ -2086,11 +1960,12 @@ label_return: ...@@ -2086,11 +1960,12 @@ label_return:
} }
static int static int
prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, prof_reset_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
size_t *oldlenp, void *newp, size_t newlen) void *newp, size_t newlen)
{ {
int ret; int ret;
size_t lg_sample = lg_prof_sample; size_t lg_sample = lg_prof_sample;
tsd_t *tsd;
if (!config_prof) if (!config_prof)
return (ENOENT); return (ENOENT);
...@@ -2100,6 +1975,8 @@ prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ...@@ -2100,6 +1975,8 @@ prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
if (lg_sample >= (sizeof(uint64_t) << 3)) if (lg_sample >= (sizeof(uint64_t) << 3))
lg_sample = (sizeof(uint64_t) << 3) - 1; lg_sample = (sizeof(uint64_t) << 3) - 1;
tsd = tsd_fetch();
prof_reset(tsd, lg_sample); prof_reset(tsd, lg_sample);
ret = 0; ret = 0;
...@@ -2118,20 +1995,15 @@ CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t) ...@@ -2118,20 +1995,15 @@ CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats.metadata, size_t) CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats.metadata, size_t)
CTL_RO_CGEN(config_stats, stats_resident, ctl_stats.resident, size_t) CTL_RO_CGEN(config_stats, stats_resident, ctl_stats.resident, size_t)
CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t) CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
CTL_RO_CGEN(config_stats, stats_retained, ctl_stats.retained, size_t)
CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *) CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
CTL_RO_GEN(stats_arenas_i_lg_dirty_mult, ctl_stats.arenas[mib[2]].lg_dirty_mult, CTL_RO_GEN(stats_arenas_i_lg_dirty_mult, ctl_stats.arenas[mib[2]].lg_dirty_mult,
ssize_t) ssize_t)
CTL_RO_GEN(stats_arenas_i_decay_time, ctl_stats.arenas[mib[2]].decay_time,
ssize_t)
CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned) CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t) CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t) CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_mapped, CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
ctl_stats.arenas[mib[2]].astats.mapped, size_t) ctl_stats.arenas[mib[2]].astats.mapped, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
ctl_stats.arenas[mib[2]].astats.retained, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_npurge, CTL_RO_CGEN(config_stats, stats_arenas_i_npurge,
ctl_stats.arenas[mib[2]].astats.npurge, uint64_t) ctl_stats.arenas[mib[2]].astats.npurge, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise, CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
...@@ -2188,8 +2060,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns, ...@@ -2188,8 +2060,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns,
ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t) ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t)
static const ctl_named_node_t * static const ctl_named_node_t *
stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j)
size_t j)
{ {
if (j > NBINS) if (j > NBINS)
...@@ -2207,8 +2078,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns, ...@@ -2207,8 +2078,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns,
ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t) ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t)
static const ctl_named_node_t * static const ctl_named_node_t *
stats_arenas_i_lruns_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
size_t j)
{ {
if (j > nlclasses) if (j > nlclasses)
...@@ -2227,8 +2097,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_curhchunks, ...@@ -2227,8 +2097,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_curhchunks,
ctl_stats.arenas[mib[2]].hstats[mib[4]].curhchunks, size_t) ctl_stats.arenas[mib[2]].hstats[mib[4]].curhchunks, size_t)
static const ctl_named_node_t * static const ctl_named_node_t *
stats_arenas_i_hchunks_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, stats_arenas_i_hchunks_j_index(const size_t *mib, size_t miblen, size_t j)
size_t j)
{ {
if (j > nhclasses) if (j > nhclasses)
...@@ -2237,11 +2106,11 @@ stats_arenas_i_hchunks_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, ...@@ -2237,11 +2106,11 @@ stats_arenas_i_hchunks_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
} }
static const ctl_named_node_t * static const ctl_named_node_t *
stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i)
{ {
const ctl_named_node_t * ret; const ctl_named_node_t * ret;
malloc_mutex_lock(tsdn, &ctl_mtx); malloc_mutex_lock(&ctl_mtx);
if (i > ctl_stats.narenas || !ctl_stats.arenas[i].initialized) { if (i > ctl_stats.narenas || !ctl_stats.arenas[i].initialized) {
ret = NULL; ret = NULL;
goto label_return; goto label_return;
...@@ -2249,6 +2118,6 @@ stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) ...@@ -2249,6 +2118,6 @@ stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
ret = super_stats_arenas_i_node; ret = super_stats_arenas_i_node;
label_return: label_return:
malloc_mutex_unlock(tsdn, &ctl_mtx); malloc_mutex_unlock(&ctl_mtx);
return (ret); return (ret);
} }
...@@ -3,48 +3,45 @@ ...@@ -3,48 +3,45 @@
/******************************************************************************/ /******************************************************************************/
/*
* Round down to the nearest chunk size that can actually be requested during
* normal huge allocation.
*/
JEMALLOC_INLINE_C size_t JEMALLOC_INLINE_C size_t
extent_quantize(size_t size) extent_quantize(size_t size)
{ {
size_t ret;
szind_t ind;
assert(size > 0); /*
* Round down to the nearest chunk size that can actually be requested
ind = size2index(size + 1); * during normal huge allocation.
if (ind == 0) { */
/* Avoid underflow. */ return (index2size(size2index(size + 1) - 1));
return (index2size(0));
}
ret = index2size(ind - 1);
assert(ret <= size);
return (ret);
} }
JEMALLOC_INLINE_C int JEMALLOC_INLINE_C int
extent_sz_comp(const extent_node_t *a, const extent_node_t *b) extent_szad_comp(extent_node_t *a, extent_node_t *b)
{ {
int ret;
size_t a_qsize = extent_quantize(extent_node_size_get(a)); size_t a_qsize = extent_quantize(extent_node_size_get(a));
size_t b_qsize = extent_quantize(extent_node_size_get(b)); size_t b_qsize = extent_quantize(extent_node_size_get(b));
return ((a_qsize > b_qsize) - (a_qsize < b_qsize)); /*
} * Compare based on quantized size rather than size, in order to sort
* equally useful extents only by address.
*/
ret = (a_qsize > b_qsize) - (a_qsize < b_qsize);
if (ret == 0) {
uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
JEMALLOC_INLINE_C int ret = (a_addr > b_addr) - (a_addr < b_addr);
extent_sn_comp(const extent_node_t *a, const extent_node_t *b) }
{
size_t a_sn = extent_node_sn_get(a);
size_t b_sn = extent_node_sn_get(b);
return ((a_sn > b_sn) - (a_sn < b_sn)); return (ret);
} }
/* Generate red-black tree functions. */
rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, szad_link,
extent_szad_comp)
JEMALLOC_INLINE_C int JEMALLOC_INLINE_C int
extent_ad_comp(const extent_node_t *a, const extent_node_t *b) extent_ad_comp(extent_node_t *a, extent_node_t *b)
{ {
uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a); uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b); uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
...@@ -52,26 +49,5 @@ extent_ad_comp(const extent_node_t *a, const extent_node_t *b) ...@@ -52,26 +49,5 @@ extent_ad_comp(const extent_node_t *a, const extent_node_t *b)
return ((a_addr > b_addr) - (a_addr < b_addr)); return ((a_addr > b_addr) - (a_addr < b_addr));
} }
JEMALLOC_INLINE_C int
extent_szsnad_comp(const extent_node_t *a, const extent_node_t *b)
{
int ret;
ret = extent_sz_comp(a, b);
if (ret != 0)
return (ret);
ret = extent_sn_comp(a, b);
if (ret != 0)
return (ret);
ret = extent_ad_comp(a, b);
return (ret);
}
/* Generate red-black tree functions. */
rb_gen(, extent_tree_szsnad_, extent_tree_t, extent_node_t, szsnad_link,
extent_szsnad_comp)
/* Generate red-black tree functions. */ /* Generate red-black tree functions. */
rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, ad_link, extent_ad_comp) rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, ad_link, extent_ad_comp)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment