Commit 36c1acc2 authored by antirez's avatar antirez
Browse files

Jemalloc updated to 4.4.0.

The original jemalloc source tree was modified to:

1. Remove the configure error that prevents nested builds.
2. Insert the Redis private Jemalloc API in order to allow the
Redis fragmentation function to work.
parent 37b4c954
#ifndef MSVC_COMPAT_WINDOWS_EXTRA_H
#define MSVC_COMPAT_WINDOWS_EXTRA_H
#ifndef ENOENT
# define ENOENT ERROR_PATH_NOT_FOUND
#endif
#ifndef EINVAL
# define EINVAL ERROR_BAD_ARGUMENTS
#endif
#ifndef EAGAIN
# define EAGAIN ERROR_OUTOFMEMORY
#endif
#ifndef EPERM
# define EPERM ERROR_WRITE_FAULT
#endif
#ifndef EFAULT
# define EFAULT ERROR_INVALID_ADDRESS
#endif
#ifndef ENOMEM
# define ENOMEM ERROR_NOT_ENOUGH_MEMORY
#endif
#ifndef ERANGE
# define ERANGE ERROR_INVALID_DATA
#endif
#include <errno.h>
#endif /* MSVC_COMPAT_WINDOWS_EXTRA_H */
......@@ -6,7 +6,7 @@ install_suffix=@install_suffix@
Name: jemalloc
Description: A general purpose malloc(3) implementation that emphasizes fragmentation avoidance and scalable concurrency support.
URL: http://www.canonware.com/jemalloc
URL: http://jemalloc.net/
Version: @jemalloc_version@
Cflags: -I${includedir}
Libs: -L${libdir} -ljemalloc${install_suffix}
How to build jemalloc for Windows
=================================
1. Install Cygwin with at least the following packages:
* autoconf
* autogen
* gawk
* grep
* sed
2. Install Visual Studio 2015 with Visual C++
3. Add Cygwin\bin to the PATH environment variable
4. Open "VS2015 x86 Native Tools Command Prompt"
(note: x86/x64 doesn't matter at this point)
5. Generate header files:
sh -c "CC=cl ./autogen.sh"
6. Now the project can be opened and built in Visual Studio:
msvc\jemalloc_vc2015.sln

Microsoft Visual Studio Solution File, Format Version 12.00
# Visual Studio 14
VisualStudioVersion = 14.0.24720.0
MinimumVisualStudioVersion = 10.0.40219.1
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{70A99006-6DE9-472B-8F83-4CEE6C616DF3}"
ProjectSection(SolutionItems) = preProject
ReadMe.txt = ReadMe.txt
EndProjectSection
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "jemalloc", "projects\vc2015\jemalloc\jemalloc.vcxproj", "{8D6BB292-9E1C-413D-9F98-4864BDC1514A}"
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "test_threads", "projects\vc2015\test_threads\test_threads.vcxproj", "{09028CFD-4EB7-491D-869C-0708DB97ED44}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|x64 = Debug|x64
Debug|x86 = Debug|x86
Debug-static|x64 = Debug-static|x64
Debug-static|x86 = Debug-static|x86
Release|x64 = Release|x64
Release|x86 = Release|x86
Release-static|x64 = Release-static|x64
Release-static|x86 = Release-static|x86
EndGlobalSection
GlobalSection(ProjectConfigurationPlatforms) = postSolution
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x64.ActiveCfg = Debug|x64
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x64.Build.0 = Debug|x64
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x86.ActiveCfg = Debug|Win32
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x86.Build.0 = Debug|Win32
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x64.ActiveCfg = Debug-static|x64
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x64.Build.0 = Debug-static|x64
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x86.ActiveCfg = Debug-static|Win32
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x86.Build.0 = Debug-static|Win32
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x64.ActiveCfg = Release|x64
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x64.Build.0 = Release|x64
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x86.ActiveCfg = Release|Win32
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x86.Build.0 = Release|Win32
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x64.ActiveCfg = Release-static|x64
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x64.Build.0 = Release-static|x64
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x86.ActiveCfg = Release-static|Win32
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x86.Build.0 = Release-static|Win32
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x64.ActiveCfg = Debug|x64
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x64.Build.0 = Debug|x64
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x86.ActiveCfg = Debug|Win32
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x86.Build.0 = Debug|Win32
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x64.ActiveCfg = Debug-static|x64
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x64.Build.0 = Debug-static|x64
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x86.ActiveCfg = Debug-static|Win32
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x86.Build.0 = Debug-static|Win32
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x64.ActiveCfg = Release|x64
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x64.Build.0 = Release|x64
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x86.ActiveCfg = Release|Win32
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x86.Build.0 = Release|Win32
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x64.ActiveCfg = Release-static|x64
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x64.Build.0 = Release-static|x64
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x86.ActiveCfg = Release-static|Win32
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x86.Build.0 = Release-static|Win32
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
EndGlobalSection
EndGlobal
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug-static|Win32">
<Configuration>Debug-static</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug-static|x64">
<Configuration>Debug-static</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug|Win32">
<Configuration>Debug</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release-static|Win32">
<Configuration>Release-static</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release-static|x64">
<Configuration>Release-static</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|Win32">
<Configuration>Release</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<ItemGroup>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\arena.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\assert.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\atomic.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\base.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\bitmap.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\chunk.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\chunk_dss.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\chunk_mmap.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\ckh.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\ctl.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\extent.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\hash.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\huge.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_decls.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_defs.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_macros.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\mb.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\mutex.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\nstime.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\pages.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\ph.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\private_namespace.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\private_unnamespace.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\prng.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\prof.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\public_namespace.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\public_unnamespace.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\ql.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\qr.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\quarantine.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\rb.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\rtree.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\size_classes.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\smoothstep.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\spin.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\stats.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\tcache.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\ticker.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\tsd.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\util.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\witness.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_defs.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_macros.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_mangle.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_protos.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_protos_jet.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_rename.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_typedefs.h" />
<ClInclude Include="..\..\..\..\include\msvc_compat\C99\stdbool.h" />
<ClInclude Include="..\..\..\..\include\msvc_compat\C99\stdint.h" />
<ClInclude Include="..\..\..\..\include\msvc_compat\strings.h" />
<ClInclude Include="..\..\..\..\include\msvc_compat\windows_extra.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\..\..\src\arena.c" />
<ClCompile Include="..\..\..\..\src\atomic.c" />
<ClCompile Include="..\..\..\..\src\base.c" />
<ClCompile Include="..\..\..\..\src\bitmap.c" />
<ClCompile Include="..\..\..\..\src\chunk.c" />
<ClCompile Include="..\..\..\..\src\chunk_dss.c" />
<ClCompile Include="..\..\..\..\src\chunk_mmap.c" />
<ClCompile Include="..\..\..\..\src\ckh.c" />
<ClCompile Include="..\..\..\..\src\ctl.c" />
<ClCompile Include="..\..\..\..\src\extent.c" />
<ClCompile Include="..\..\..\..\src\hash.c" />
<ClCompile Include="..\..\..\..\src\huge.c" />
<ClCompile Include="..\..\..\..\src\jemalloc.c" />
<ClCompile Include="..\..\..\..\src\mb.c" />
<ClCompile Include="..\..\..\..\src\mutex.c" />
<ClCompile Include="..\..\..\..\src\nstime.c" />
<ClCompile Include="..\..\..\..\src\pages.c" />
<ClCompile Include="..\..\..\..\src\prng.c" />
<ClCompile Include="..\..\..\..\src\prof.c" />
<ClCompile Include="..\..\..\..\src\quarantine.c" />
<ClCompile Include="..\..\..\..\src\rtree.c" />
<ClCompile Include="..\..\..\..\src\spin.c" />
<ClCompile Include="..\..\..\..\src\stats.c" />
<ClCompile Include="..\..\..\..\src\tcache.c" />
<ClCompile Include="..\..\..\..\src\ticker.c" />
<ClCompile Include="..\..\..\..\src\tsd.c" />
<ClCompile Include="..\..\..\..\src\util.c" />
<ClCompile Include="..\..\..\..\src\witness.c" />
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{8D6BB292-9E1C-413D-9F98-4864BDC1514A}</ProjectGuid>
<Keyword>Win32Proj</Keyword>
<RootNamespace>jemalloc</RootNamespace>
<WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Label="Shared">
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<TargetName>$(ProjectName)d</TargetName>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<TargetName>$(ProjectName)-$(PlatformToolset)-$(Configuration)</TargetName>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<TargetName>$(ProjectName)-$(PlatformToolset)-$(Configuration)</TargetName>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<TargetName>$(ProjectName)d</TargetName>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<TargetName>$(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration)</TargetName>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<TargetName>$(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration)</TargetName>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
<ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
<ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
<ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
<DebugInformationFormat>OldStyle</DebugInformationFormat>
<MinimalRebuild>false</MinimalRebuild>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
<ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>_REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
<ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
<ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>_REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
<DebugInformationFormat>OldStyle</DebugInformationFormat>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
</Link>
</ItemDefinitionGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>
\ No newline at end of file
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup>
<Filter Include="Source Files">
<UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
<Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
</Filter>
<Filter Include="Header Files">
<UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
<Extensions>h;hh;hpp;hxx;hm;inl;inc;xsd</Extensions>
</Filter>
<Filter Include="Header Files\internal">
<UniqueIdentifier>{5697dfa3-16cf-4932-b428-6e0ec6e9f98e}</UniqueIdentifier>
</Filter>
<Filter Include="Header Files\msvc_compat">
<UniqueIdentifier>{0cbd2ca6-42a7-4f82-8517-d7e7a14fd986}</UniqueIdentifier>
</Filter>
<Filter Include="Header Files\msvc_compat\C99">
<UniqueIdentifier>{0abe6f30-49b5-46dd-8aca-6e33363fa52c}</UniqueIdentifier>
</Filter>
</ItemGroup>
<ItemGroup>
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_defs.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_macros.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_mangle.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_protos.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_protos_jet.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_rename.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_typedefs.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\arena.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\assert.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\atomic.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\base.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\bitmap.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\chunk.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\chunk_dss.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\chunk_mmap.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\ckh.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\ctl.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\extent.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\hash.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\huge.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_decls.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_defs.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_macros.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\mb.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\mutex.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\nstime.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\pages.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\ph.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\private_namespace.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\private_unnamespace.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\prng.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\prof.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\public_namespace.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\public_unnamespace.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\ql.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\qr.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\quarantine.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\rb.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\rtree.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\size_classes.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\smoothstep.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\spin.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\stats.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\tcache.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\ticker.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\tsd.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\util.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\witness.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\msvc_compat\strings.h">
<Filter>Header Files\msvc_compat</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\msvc_compat\windows_extra.h">
<Filter>Header Files\msvc_compat</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\msvc_compat\C99\stdbool.h">
<Filter>Header Files\msvc_compat\C99</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\msvc_compat\C99\stdint.h">
<Filter>Header Files\msvc_compat\C99</Filter>
</ClInclude>
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\..\..\src\arena.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\atomic.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\base.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\bitmap.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\chunk.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\chunk_dss.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\chunk_mmap.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\ckh.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\ctl.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\extent.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\hash.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\huge.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\jemalloc.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\mb.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\mutex.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\nstime.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\pages.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\prng.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\prof.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\quarantine.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\rtree.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\spin.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\stats.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\tcache.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\ticker.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\tsd.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\util.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\witness.c">
<Filter>Source Files</Filter>
</ClCompile>
</ItemGroup>
</Project>
// jemalloc C++ threaded test
// Author: Rustam Abdullaev
// Public Domain
#include <atomic>
#include <functional>
#include <future>
#include <random>
#include <thread>
#include <vector>
#include <stdio.h>
#include <jemalloc/jemalloc.h>
using std::vector;
using std::thread;
using std::uniform_int_distribution;
using std::minstd_rand;
int test_threads()
{
je_malloc_conf = "narenas:3";
int narenas = 0;
size_t sz = sizeof(narenas);
je_mallctl("opt.narenas", (void *)&narenas, &sz, NULL, 0);
if (narenas != 3) {
printf("Error: unexpected number of arenas: %d\n", narenas);
return 1;
}
static const int sizes[] = { 7, 16, 32, 60, 91, 100, 120, 144, 169, 199, 255, 400, 670, 900, 917, 1025, 3333, 5190, 13131, 49192, 99999, 123123, 255265, 2333111 };
static const int numSizes = (int)(sizeof(sizes) / sizeof(sizes[0]));
vector<thread> workers;
static const int numThreads = narenas + 1, numAllocsMax = 25, numIter1 = 50, numIter2 = 50;
je_malloc_stats_print(NULL, NULL, NULL);
size_t allocated1;
size_t sz1 = sizeof(allocated1);
je_mallctl("stats.active", (void *)&allocated1, &sz1, NULL, 0);
printf("\nPress Enter to start threads...\n");
getchar();
printf("Starting %d threads x %d x %d iterations...\n", numThreads, numIter1, numIter2);
for (int i = 0; i < numThreads; i++) {
workers.emplace_back([tid=i]() {
uniform_int_distribution<int> sizeDist(0, numSizes - 1);
minstd_rand rnd(tid * 17);
uint8_t* ptrs[numAllocsMax];
int ptrsz[numAllocsMax];
for (int i = 0; i < numIter1; ++i) {
thread t([&]() {
for (int i = 0; i < numIter2; ++i) {
const int numAllocs = numAllocsMax - sizeDist(rnd);
for (int j = 0; j < numAllocs; j += 64) {
const int x = sizeDist(rnd);
const int sz = sizes[x];
ptrsz[j] = sz;
ptrs[j] = (uint8_t*)je_malloc(sz);
if (!ptrs[j]) {
printf("Unable to allocate %d bytes in thread %d, iter %d, alloc %d. %d\n", sz, tid, i, j, x);
exit(1);
}
for (int k = 0; k < sz; k++)
ptrs[j][k] = tid + k;
}
for (int j = 0; j < numAllocs; j += 64) {
for (int k = 0, sz = ptrsz[j]; k < sz; k++)
if (ptrs[j][k] != (uint8_t)(tid + k)) {
printf("Memory error in thread %d, iter %d, alloc %d @ %d : %02X!=%02X\n", tid, i, j, k, ptrs[j][k], (uint8_t)(tid + k));
exit(1);
}
je_free(ptrs[j]);
}
}
});
t.join();
}
});
}
for (thread& t : workers) {
t.join();
}
je_malloc_stats_print(NULL, NULL, NULL);
size_t allocated2;
je_mallctl("stats.active", (void *)&allocated2, &sz1, NULL, 0);
size_t leaked = allocated2 - allocated1;
printf("\nDone. Leaked: %zd bytes\n", leaked);
bool failed = leaked > 65536; // in case C++ runtime allocated something (e.g. iostream locale or facet)
printf("\nTest %s!\n", (failed ? "FAILED" : "successful"));
printf("\nPress Enter to continue...\n");
getchar();
return failed ? 1 : 0;
}
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug-static|Win32">
<Configuration>Debug-static</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug-static|x64">
<Configuration>Debug-static</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug|Win32">
<Configuration>Debug</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release-static|Win32">
<Configuration>Release-static</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release-static|x64">
<Configuration>Release-static</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|Win32">
<Configuration>Release</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{09028CFD-4EB7-491D-869C-0708DB97ED44}</ProjectGuid>
<Keyword>Win32Proj</Keyword>
<RootNamespace>test_threads</RootNamespace>
<WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Label="Shared">
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<LinkIncremental>true</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<LinkIncremental>true</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<LinkIncremental>true</LinkIncremental>
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
<LinkIncremental>true</LinkIncremental>
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<LinkIncremental>false</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<LinkIncremental>false</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<LinkIncremental>false</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<LinkIncremental>false</LinkIncremental>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
<AdditionalDependencies>jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
<AdditionalDependencies>jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
<AdditionalDependencies>jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
<AdditionalDependencies>jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
<AdditionalDependencies>jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
<AdditionalDependencies>jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClCompile Include="test_threads.cpp" />
<ClCompile Include="test_threads_main.cpp" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\jemalloc\jemalloc.vcxproj">
<Project>{8d6bb292-9e1c-413d-9f98-4864bdc1514a}</Project>
</ProjectReference>
</ItemGroup>
<ItemGroup>
<ClInclude Include="test_threads.h" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>
\ No newline at end of file
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup>
<Filter Include="Source Files">
<UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
<Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
</Filter>
<Filter Include="Header Files">
<UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
<Extensions>h;hh;hpp;hxx;hm;inl;inc;xsd</Extensions>
</Filter>
</ItemGroup>
<ItemGroup>
<ClCompile Include="test_threads.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="test_threads_main.cpp">
<Filter>Source Files</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="test_threads.h">
<Filter>Header Files</Filter>
</ClInclude>
</ItemGroup>
</Project>
\ No newline at end of file
#include "test_threads.h"
#include <future>
#include <functional>
#include <chrono>
using namespace std::chrono_literals;
int main(int argc, char** argv)
{
int rc = test_threads();
return rc;
}
......@@ -4,16 +4,23 @@
/******************************************************************************/
/* Data. */
purge_mode_t opt_purge = PURGE_DEFAULT;
const char *purge_mode_names[] = {
"ratio",
"decay",
"N/A"
};
ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
static ssize_t lg_dirty_mult_default;
ssize_t opt_decay_time = DECAY_TIME_DEFAULT;
static ssize_t decay_time_default;
arena_bin_info_t arena_bin_info[NBINS];
size_t map_bias;
size_t map_misc_offset;
size_t arena_maxrun; /* Max run size for arenas. */
size_t large_maxclass; /* Max large size class. */
static size_t small_maxrun; /* Max run size used for small size classes. */
static bool *small_run_tab; /* Valid small run page multiples. */
unsigned nlclasses; /* Number of large size classes. */
unsigned nhclasses; /* Number of huge size classes. */
......@@ -23,60 +30,57 @@ unsigned nhclasses; /* Number of huge size classes. */
* definition.
*/
static void arena_purge(arena_t *arena, bool all);
static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
bool cleaned, bool decommitted);
static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
arena_run_t *run, arena_bin_t *bin);
static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
arena_run_t *run, arena_bin_t *bin);
static void arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena,
arena_chunk_t *chunk);
static void arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena,
size_t ndirty_limit);
static void arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run,
bool dirty, bool cleaned, bool decommitted);
static void arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena,
arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin);
static void arena_bin_lower_run(arena_t *arena, arena_run_t *run,
arena_bin_t *bin);
/******************************************************************************/
#define CHUNK_MAP_KEY ((uintptr_t)0x1U)
JEMALLOC_INLINE_C arena_chunk_map_misc_t *
arena_miscelm_key_create(size_t size)
JEMALLOC_INLINE_C size_t
arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm)
{
arena_chunk_t *chunk;
size_t pageind, mapbits;
return ((arena_chunk_map_misc_t *)(arena_mapbits_size_encode(size) |
CHUNK_MAP_KEY));
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
pageind = arena_miscelm_to_pageind(miscelm);
mapbits = arena_mapbits_get(chunk, pageind);
return (arena_mapbits_size_decode(mapbits));
}
JEMALLOC_INLINE_C bool
arena_miscelm_is_key(const arena_chunk_map_misc_t *miscelm)
JEMALLOC_INLINE_C const extent_node_t *
arena_miscelm_extent_get(const arena_chunk_map_misc_t *miscelm)
{
arena_chunk_t *chunk;
return (((uintptr_t)miscelm & CHUNK_MAP_KEY) != 0);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
return (&chunk->node);
}
#undef CHUNK_MAP_KEY
JEMALLOC_INLINE_C size_t
arena_miscelm_key_size_get(const arena_chunk_map_misc_t *miscelm)
JEMALLOC_INLINE_C int
arena_sn_comp(const arena_chunk_map_misc_t *a, const arena_chunk_map_misc_t *b)
{
size_t a_sn, b_sn;
assert(arena_miscelm_is_key(miscelm));
return (arena_mapbits_size_decode((uintptr_t)miscelm));
}
JEMALLOC_INLINE_C size_t
arena_miscelm_size_get(arena_chunk_map_misc_t *miscelm)
{
arena_chunk_t *chunk;
size_t pageind, mapbits;
assert(a != NULL);
assert(b != NULL);
assert(!arena_miscelm_is_key(miscelm));
a_sn = extent_node_sn_get(arena_miscelm_extent_get(a));
b_sn = extent_node_sn_get(arena_miscelm_extent_get(b));
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
pageind = arena_miscelm_to_pageind(miscelm);
mapbits = arena_mapbits_get(chunk, pageind);
return (arena_mapbits_size_decode(mapbits));
return ((a_sn > b_sn) - (a_sn < b_sn));
}
JEMALLOC_INLINE_C int
arena_run_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b)
arena_ad_comp(const arena_chunk_map_misc_t *a,
const arena_chunk_map_misc_t *b)
{
uintptr_t a_miscelm = (uintptr_t)a;
uintptr_t b_miscelm = (uintptr_t)b;
......@@ -87,74 +91,79 @@ arena_run_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b)
return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
}
/* Generate red-black tree functions. */
rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_misc_t,
rb_link, arena_run_comp)
static size_t
run_quantize(size_t size)
JEMALLOC_INLINE_C int
arena_snad_comp(const arena_chunk_map_misc_t *a,
const arena_chunk_map_misc_t *b)
{
size_t qsize;
int ret;
assert(size != 0);
assert(size == PAGE_CEILING(size));
assert(a != NULL);
assert(b != NULL);
/* Don't change sizes that are valid small run sizes. */
if (size <= small_maxrun && small_run_tab[size >> LG_PAGE])
return (size);
ret = arena_sn_comp(a, b);
if (ret != 0)
return (ret);
/*
* Round down to the nearest run size that can actually be requested
* during normal large allocation. Add large_pad so that cache index
* randomization can offset the allocation from the page boundary.
*/
qsize = index2size(size2index(size - large_pad + 1) - 1) + large_pad;
if (qsize <= SMALL_MAXCLASS + large_pad)
return (run_quantize(size - large_pad));
assert(qsize <= size);
return (qsize);
ret = arena_ad_comp(a, b);
return (ret);
}
/* Generate pairing heap functions. */
ph_gen(static UNUSED, arena_run_heap_, arena_run_heap_t, arena_chunk_map_misc_t,
ph_link, arena_snad_comp)
#ifdef JEMALLOC_JET
#undef run_quantize_floor
#define run_quantize_floor JEMALLOC_N(n_run_quantize_floor)
#endif
static size_t
run_quantize_next(size_t size)
run_quantize_floor(size_t size)
{
size_t large_run_size_next;
size_t ret;
pszind_t pind;
assert(size > 0);
assert(size <= HUGE_MAXCLASS);
assert((size & PAGE_MASK) == 0);
assert(size != 0);
assert(size == PAGE_CEILING(size));
/*
* Return the next quantized size greater than the input size.
* Quantized sizes comprise the union of run sizes that back small
* region runs, and run sizes that back large regions with no explicit
* alignment constraints.
*/
if (size > SMALL_MAXCLASS) {
large_run_size_next = PAGE_CEILING(index2size(size2index(size -
large_pad) + 1) + large_pad);
} else
large_run_size_next = SIZE_T_MAX;
if (size >= small_maxrun)
return (large_run_size_next);
while (true) {
size += PAGE;
assert(size <= small_maxrun);
if (small_run_tab[size >> LG_PAGE]) {
if (large_run_size_next < size)
return (large_run_size_next);
return (size);
}
pind = psz2ind(size - large_pad + 1);
if (pind == 0) {
/*
* Avoid underflow. This short-circuit would also do the right
* thing for all sizes in the range for which there are
* PAGE-spaced size classes, but it's simplest to just handle
* the one case that would cause erroneous results.
*/
return (size);
}
ret = pind2sz(pind - 1) + large_pad;
assert(ret <= size);
return (ret);
}
#ifdef JEMALLOC_JET
#undef run_quantize_floor
#define run_quantize_floor JEMALLOC_N(run_quantize_floor)
run_quantize_t *run_quantize_floor = JEMALLOC_N(n_run_quantize_floor);
#endif
#ifdef JEMALLOC_JET
#undef run_quantize_ceil
#define run_quantize_ceil JEMALLOC_N(n_run_quantize_ceil)
#endif
static size_t
run_quantize_first(size_t size)
run_quantize_ceil(size_t size)
{
size_t qsize = run_quantize(size);
size_t ret;
assert(size > 0);
assert(size <= HUGE_MAXCLASS);
assert((size & PAGE_MASK) == 0);
if (qsize < size) {
ret = run_quantize_floor(size);
if (ret < size) {
/*
* Skip a quantization that may have an adequately large run,
* because under-sized runs may be mixed in. This only happens
......@@ -163,72 +172,50 @@ run_quantize_first(size_t size)
* search would potentially find sufficiently aligned available
* memory somewhere lower.
*/
qsize = run_quantize_next(size);
ret = pind2sz(psz2ind(ret - large_pad + 1)) + large_pad;
}
return (qsize);
}
JEMALLOC_INLINE_C int
arena_avail_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b)
{
int ret;
uintptr_t a_miscelm = (uintptr_t)a;
size_t a_qsize = run_quantize(arena_miscelm_is_key(a) ?
arena_miscelm_key_size_get(a) : arena_miscelm_size_get(a));
size_t b_qsize = run_quantize(arena_miscelm_size_get(b));
/*
* Compare based on quantized size rather than size, in order to sort
* equally useful runs only by address.
*/
ret = (a_qsize > b_qsize) - (a_qsize < b_qsize);
if (ret == 0) {
if (!arena_miscelm_is_key(a)) {
uintptr_t b_miscelm = (uintptr_t)b;
ret = (a_miscelm > b_miscelm) - (a_miscelm < b_miscelm);
} else {
/*
* Treat keys as if they are lower than anything else.
*/
ret = -1;
}
}
return (ret);
}
/* Generate red-black tree functions. */
rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t,
arena_chunk_map_misc_t, rb_link, arena_avail_comp)
#ifdef JEMALLOC_JET
#undef run_quantize_ceil
#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil)
run_quantize_t *run_quantize_ceil = JEMALLOC_N(n_run_quantize_ceil);
#endif
static void
arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
size_t npages)
{
pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get(
arena_miscelm_get_const(chunk, pageind))));
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE));
arena_avail_tree_insert(&arena->runs_avail, arena_miscelm_get(chunk,
pageind));
assert((npages << LG_PAGE) < chunksize);
assert(pind2sz(pind) <= chunksize);
arena_run_heap_insert(&arena->runs_avail[pind],
arena_miscelm_get_mutable(chunk, pageind));
}
static void
arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
size_t npages)
{
pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get(
arena_miscelm_get_const(chunk, pageind))));
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE));
arena_avail_tree_remove(&arena->runs_avail, arena_miscelm_get(chunk,
pageind));
assert((npages << LG_PAGE) < chunksize);
assert(pind2sz(pind) <= chunksize);
arena_run_heap_remove(&arena->runs_avail[pind],
arena_miscelm_get_mutable(chunk, pageind));
}
static void
arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
size_t npages)
{
arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
pageind);
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE));
......@@ -245,7 +232,8 @@ static void
arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
size_t npages)
{
arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
pageind);
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE));
......@@ -292,14 +280,14 @@ JEMALLOC_INLINE_C void *
arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
{
void *ret;
unsigned regind;
size_t regind;
arena_chunk_map_misc_t *miscelm;
void *rpages;
assert(run->nfree > 0);
assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info));
regind = bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
regind = (unsigned)bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
miscelm = arena_run_to_miscelm(run);
rpages = arena_miscelm_to_rpages(miscelm);
ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset +
......@@ -316,7 +304,7 @@ arena_run_reg_dalloc(arena_run_t *run, void *ptr)
size_t mapbits = arena_mapbits_get(chunk, pageind);
szind_t binind = arena_ptr_small_binind_get(ptr, mapbits);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
unsigned regind = arena_run_regind(run, bin_info, ptr);
size_t regind = arena_run_regind(run, bin_info, ptr);
assert(run->nfree < bin_info->nregs);
/* Freeing an interior pointer can cause assertion failure. */
......@@ -364,16 +352,30 @@ arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
}
static void
arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages)
arena_nactive_add(arena_t *arena, size_t add_pages)
{
if (config_stats) {
ssize_t cactive_diff = CHUNK_CEILING((arena->nactive + add_pages
- sub_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
size_t cactive_add = CHUNK_CEILING((arena->nactive +
add_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
LG_PAGE);
if (cactive_diff != 0)
stats_cactive_add(cactive_diff);
if (cactive_add != 0)
stats_cactive_add(cactive_add);
}
arena->nactive += add_pages;
}
static void
arena_nactive_sub(arena_t *arena, size_t sub_pages)
{
if (config_stats) {
size_t cactive_sub = CHUNK_CEILING(arena->nactive << LG_PAGE) -
CHUNK_CEILING((arena->nactive - sub_pages) << LG_PAGE);
if (cactive_sub != 0)
stats_cactive_sub(cactive_sub);
}
arena->nactive -= sub_pages;
}
static void
......@@ -394,8 +396,7 @@ arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
arena_avail_remove(arena, chunk, run_ind, total_pages);
if (flag_dirty != 0)
arena_run_dirty_remove(arena, chunk, run_ind, total_pages);
arena_cactive_update(arena, need_pages, 0);
arena->nactive += need_pages;
arena_nactive_add(arena, need_pages);
/* Keep track of trailing unused pages for later use. */
if (rem_pages > 0) {
......@@ -567,7 +568,8 @@ arena_chunk_init_spare(arena_t *arena)
}
static bool
arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, bool zero)
arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
size_t sn, bool zero)
{
/*
......@@ -576,64 +578,67 @@ arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, bool zero)
* of runs is tracked individually, and upon chunk deallocation the
* entire chunk is in a consistent commit state.
*/
extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true);
extent_node_init(&chunk->node, arena, chunk, chunksize, sn, zero, true);
extent_node_achunk_set(&chunk->node, true);
return (chunk_register(chunk, &chunk->node));
return (chunk_register(tsdn, chunk, &chunk->node));
}
static arena_chunk_t *
arena_chunk_alloc_internal_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
bool *zero, bool *commit)
arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, bool *zero, bool *commit)
{
arena_chunk_t *chunk;
size_t sn;
malloc_mutex_unlock(&arena->lock);
malloc_mutex_unlock(tsdn, &arena->lock);
chunk = (arena_chunk_t *)chunk_alloc_wrapper(arena, chunk_hooks, NULL,
chunksize, chunksize, zero, commit);
chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks,
NULL, chunksize, chunksize, &sn, zero, commit);
if (chunk != NULL && !*commit) {
/* Commit header. */
if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
LG_PAGE, arena->ind)) {
chunk_dalloc_wrapper(arena, chunk_hooks,
(void *)chunk, chunksize, *commit);
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
(void *)chunk, chunksize, sn, *zero, *commit);
chunk = NULL;
}
}
if (chunk != NULL && arena_chunk_register(arena, chunk, *zero)) {
if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, sn,
*zero)) {
if (!*commit) {
/* Undo commit of header. */
chunk_hooks->decommit(chunk, chunksize, 0, map_bias <<
LG_PAGE, arena->ind);
}
chunk_dalloc_wrapper(arena, chunk_hooks, (void *)chunk,
chunksize, *commit);
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, (void *)chunk,
chunksize, sn, *zero, *commit);
chunk = NULL;
}
malloc_mutex_lock(&arena->lock);
malloc_mutex_lock(tsdn, &arena->lock);
return (chunk);
}
static arena_chunk_t *
arena_chunk_alloc_internal(arena_t *arena, bool *zero, bool *commit)
arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
bool *commit)
{
arena_chunk_t *chunk;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
size_t sn;
chunk = chunk_alloc_cache(arena, &chunk_hooks, NULL, chunksize,
chunksize, zero, true);
chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize,
chunksize, &sn, zero, commit, true);
if (chunk != NULL) {
if (arena_chunk_register(arena, chunk, *zero)) {
chunk_dalloc_cache(arena, &chunk_hooks, chunk,
chunksize, true);
if (arena_chunk_register(tsdn, arena, chunk, sn, *zero)) {
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk,
chunksize, sn, true);
return (NULL);
}
*commit = true;
}
if (chunk == NULL) {
chunk = arena_chunk_alloc_internal_hard(arena, &chunk_hooks,
zero, commit);
chunk = arena_chunk_alloc_internal_hard(tsdn, arena,
&chunk_hooks, zero, commit);
}
if (config_stats && chunk != NULL) {
......@@ -645,7 +650,7 @@ arena_chunk_alloc_internal(arena_t *arena, bool *zero, bool *commit)
}
static arena_chunk_t *
arena_chunk_init_hard(arena_t *arena)
arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
{
arena_chunk_t *chunk;
bool zero, commit;
......@@ -655,14 +660,16 @@ arena_chunk_init_hard(arena_t *arena)
zero = false;
commit = false;
chunk = arena_chunk_alloc_internal(arena, &zero, &commit);
chunk = arena_chunk_alloc_internal(tsdn, arena, &zero, &commit);
if (chunk == NULL)
return (NULL);
chunk->hugepage = true;
/*
* Initialize the map to contain one maximal free untouched run. Mark
* the pages as zeroed if chunk_alloc() returned a zeroed or decommitted
* chunk.
* the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed
* or decommitted chunk.
*/
flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED;
flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED;
......@@ -674,17 +681,18 @@ arena_chunk_init_hard(arena_t *arena)
*/
if (!zero) {
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
(void *)arena_bitselm_get(chunk, map_bias+1),
(size_t)((uintptr_t) arena_bitselm_get(chunk,
chunk_npages-1) - (uintptr_t)arena_bitselm_get(chunk,
map_bias+1)));
(void *)arena_bitselm_get_const(chunk, map_bias+1),
(size_t)((uintptr_t)arena_bitselm_get_const(chunk,
chunk_npages-1) -
(uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
for (i = map_bias+1; i < chunk_npages-1; i++)
arena_mapbits_internal_set(chunk, i, flag_unzeroed);
} else {
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
*)arena_bitselm_get(chunk, map_bias+1), (size_t)((uintptr_t)
arena_bitselm_get(chunk, chunk_npages-1) -
(uintptr_t)arena_bitselm_get(chunk, map_bias+1)));
*)arena_bitselm_get_const(chunk, map_bias+1),
(size_t)((uintptr_t)arena_bitselm_get_const(chunk,
chunk_npages-1) -
(uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
if (config_debug) {
for (i = map_bias+1; i < chunk_npages-1; i++) {
assert(arena_mapbits_unzeroed_get(chunk, i) ==
......@@ -699,28 +707,85 @@ arena_chunk_init_hard(arena_t *arena)
}
static arena_chunk_t *
arena_chunk_alloc(arena_t *arena)
arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena)
{
arena_chunk_t *chunk;
if (arena->spare != NULL)
chunk = arena_chunk_init_spare(arena);
else {
chunk = arena_chunk_init_hard(arena);
chunk = arena_chunk_init_hard(tsdn, arena);
if (chunk == NULL)
return (NULL);
}
/* Insert the run into the runs_avail tree. */
ql_elm_new(&chunk->node, ql_link);
ql_tail_insert(&arena->achunks, &chunk->node, ql_link);
arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
return (chunk);
}
static void
arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
{
size_t sn, hugepage;
bool committed;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
chunk_deregister(chunk, &chunk->node);
sn = extent_node_sn_get(&chunk->node);
hugepage = chunk->hugepage;
committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
if (!committed) {
/*
* Decommit the header. Mark the chunk as decommitted even if
* header decommit fails, since treating a partially committed
* chunk as committed has a high potential for causing later
* access of decommitted memory.
*/
chunk_hooks = chunk_hooks_get(tsdn, arena);
chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE,
arena->ind);
}
if (!hugepage) {
/*
* Convert chunk back to the default state, so that all
* subsequent chunk allocations start out with chunks that can
* be backed by transparent huge pages.
*/
pages_huge(chunk, chunksize);
}
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize,
sn, committed);
if (config_stats) {
arena->stats.mapped -= chunksize;
arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
}
}
static void
arena_spare_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *spare)
{
assert(arena->spare != spare);
if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
arena_run_dirty_remove(arena, spare, map_bias,
chunk_npages-map_bias);
}
arena_chunk_discard(tsdn, arena, spare);
}
static void
arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
{
arena_chunk_t *spare;
assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
......@@ -732,49 +797,14 @@ arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
assert(arena_mapbits_decommitted_get(chunk, map_bias) ==
arena_mapbits_decommitted_get(chunk, chunk_npages-1));
/*
* Remove run from the runs_avail tree, so that the arena does not use
* it.
*/
/* Remove run from runs_avail, so that the arena does not use it. */
arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
if (arena->spare != NULL) {
arena_chunk_t *spare = arena->spare;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
bool committed;
arena->spare = chunk;
if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
arena_run_dirty_remove(arena, spare, map_bias,
chunk_npages-map_bias);
}
chunk_deregister(spare, &spare->node);
committed = (arena_mapbits_decommitted_get(spare, map_bias) ==
0);
if (!committed) {
/*
* Decommit the header. Mark the chunk as decommitted
* even if header decommit fails, since treating a
* partially committed chunk as committed has a high
* potential for causing later access of decommitted
* memory.
*/
chunk_hooks = chunk_hooks_get(arena);
chunk_hooks.decommit(spare, chunksize, 0, map_bias <<
LG_PAGE, arena->ind);
}
chunk_dalloc_cache(arena, &chunk_hooks, (void *)spare,
chunksize, committed);
if (config_stats) {
arena->stats.mapped -= chunksize;
arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
}
} else
arena->spare = chunk;
ql_remove(&arena->achunks, &chunk->node, ql_link);
spare = arena->spare;
arena->spare = chunk;
if (spare != NULL)
arena_spare_discard(tsdn, arena, spare);
}
static void
......@@ -816,6 +846,17 @@ arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
arena->stats.hstats[index].curhchunks--;
}
static void
arena_huge_reset_stats_cancel(arena_t *arena, size_t usize)
{
szind_t index = size2index(usize) - nlclasses - NBINS;
cassert(config_stats);
arena->stats.ndalloc_huge++;
arena->stats.hstats[index].ndalloc--;
}
static void
arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
{
......@@ -847,243 +888,240 @@ arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
}
extent_node_t *
arena_node_alloc(arena_t *arena)
arena_node_alloc(tsdn_t *tsdn, arena_t *arena)
{
extent_node_t *node;
malloc_mutex_lock(&arena->node_cache_mtx);
malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
node = ql_last(&arena->node_cache, ql_link);
if (node == NULL) {
malloc_mutex_unlock(&arena->node_cache_mtx);
return (base_alloc(sizeof(extent_node_t)));
malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
return (base_alloc(tsdn, sizeof(extent_node_t)));
}
ql_tail_remove(&arena->node_cache, extent_node_t, ql_link);
malloc_mutex_unlock(&arena->node_cache_mtx);
malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
return (node);
}
void
arena_node_dalloc(arena_t *arena, extent_node_t *node)
arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node)
{
malloc_mutex_lock(&arena->node_cache_mtx);
malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
ql_elm_new(node, ql_link);
ql_tail_insert(&arena->node_cache, node, ql_link);
malloc_mutex_unlock(&arena->node_cache_mtx);
malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
}
static void *
arena_chunk_alloc_huge_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
size_t usize, size_t alignment, bool *zero, size_t csize)
arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, size_t *sn,
bool *zero, size_t csize)
{
void *ret;
bool commit = true;
ret = chunk_alloc_wrapper(arena, chunk_hooks, NULL, csize, alignment,
zero, &commit);
ret = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize,
alignment, sn, zero, &commit);
if (ret == NULL) {
/* Revert optimistic stats updates. */
malloc_mutex_lock(&arena->lock);
malloc_mutex_lock(tsdn, &arena->lock);
if (config_stats) {
arena_huge_malloc_stats_update_undo(arena, usize);
arena->stats.mapped -= usize;
}
arena->nactive -= (usize >> LG_PAGE);
malloc_mutex_unlock(&arena->lock);
arena_nactive_sub(arena, usize >> LG_PAGE);
malloc_mutex_unlock(tsdn, &arena->lock);
}
return (ret);
}
void *
arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
bool *zero)
arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, size_t *sn, bool *zero)
{
void *ret;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
size_t csize = CHUNK_CEILING(usize);
bool commit = true;
malloc_mutex_lock(&arena->lock);
malloc_mutex_lock(tsdn, &arena->lock);
/* Optimistically update stats. */
if (config_stats) {
arena_huge_malloc_stats_update(arena, usize);
arena->stats.mapped += usize;
}
arena->nactive += (usize >> LG_PAGE);
arena_nactive_add(arena, usize >> LG_PAGE);
ret = chunk_alloc_cache(arena, &chunk_hooks, NULL, csize, alignment,
zero, true);
malloc_mutex_unlock(&arena->lock);
ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize,
alignment, sn, zero, &commit, true);
malloc_mutex_unlock(tsdn, &arena->lock);
if (ret == NULL) {
ret = arena_chunk_alloc_huge_hard(arena, &chunk_hooks, usize,
alignment, zero, csize);
ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
usize, alignment, sn, zero, csize);
}
if (config_stats && ret != NULL)
stats_cactive_add(usize);
return (ret);
}
void
arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize)
arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize,
size_t sn)
{
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
size_t csize;
csize = CHUNK_CEILING(usize);
malloc_mutex_lock(&arena->lock);
malloc_mutex_lock(tsdn, &arena->lock);
if (config_stats) {
arena_huge_dalloc_stats_update(arena, usize);
arena->stats.mapped -= usize;
stats_cactive_sub(usize);
}
arena->nactive -= (usize >> LG_PAGE);
arena_nactive_sub(arena, usize >> LG_PAGE);
chunk_dalloc_cache(arena, &chunk_hooks, chunk, csize, true);
malloc_mutex_unlock(&arena->lock);
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, sn, true);
malloc_mutex_unlock(tsdn, &arena->lock);
}
void
arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk, size_t oldsize,
size_t usize)
arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk,
size_t oldsize, size_t usize)
{
assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize));
assert(oldsize != usize);
malloc_mutex_lock(&arena->lock);
malloc_mutex_lock(tsdn, &arena->lock);
if (config_stats)
arena_huge_ralloc_stats_update(arena, oldsize, usize);
if (oldsize < usize) {
size_t udiff = usize - oldsize;
arena->nactive += udiff >> LG_PAGE;
if (config_stats)
stats_cactive_add(udiff);
} else {
size_t udiff = oldsize - usize;
arena->nactive -= udiff >> LG_PAGE;
if (config_stats)
stats_cactive_sub(udiff);
}
malloc_mutex_unlock(&arena->lock);
if (oldsize < usize)
arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE);
else
arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE);
malloc_mutex_unlock(tsdn, &arena->lock);
}
void
arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize,
size_t usize)
arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
size_t oldsize, size_t usize, size_t sn)
{
size_t udiff = oldsize - usize;
size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
malloc_mutex_lock(&arena->lock);
malloc_mutex_lock(tsdn, &arena->lock);
if (config_stats) {
arena_huge_ralloc_stats_update(arena, oldsize, usize);
if (cdiff != 0) {
if (cdiff != 0)
arena->stats.mapped -= cdiff;
stats_cactive_sub(udiff);
}
}
arena->nactive -= udiff >> LG_PAGE;
arena_nactive_sub(arena, udiff >> LG_PAGE);
if (cdiff != 0) {
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
void *nchunk = (void *)((uintptr_t)chunk +
CHUNK_CEILING(usize));
chunk_dalloc_cache(arena, &chunk_hooks, nchunk, cdiff, true);
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
sn, true);
}
malloc_mutex_unlock(&arena->lock);
malloc_mutex_unlock(tsdn, &arena->lock);
}
static bool
arena_chunk_ralloc_huge_expand_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
void *chunk, size_t oldsize, size_t usize, bool *zero, void *nchunk,
size_t udiff, size_t cdiff)
arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize,
size_t *sn, bool *zero, void *nchunk, size_t udiff, size_t cdiff)
{
bool err;
bool commit = true;
err = (chunk_alloc_wrapper(arena, chunk_hooks, nchunk, cdiff, chunksize,
zero, &commit) == NULL);
err = (chunk_alloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
chunksize, sn, zero, &commit) == NULL);
if (err) {
/* Revert optimistic stats updates. */
malloc_mutex_lock(&arena->lock);
malloc_mutex_lock(tsdn, &arena->lock);
if (config_stats) {
arena_huge_ralloc_stats_update_undo(arena, oldsize,
usize);
arena->stats.mapped -= cdiff;
}
arena->nactive -= (udiff >> LG_PAGE);
malloc_mutex_unlock(&arena->lock);
arena_nactive_sub(arena, udiff >> LG_PAGE);
malloc_mutex_unlock(tsdn, &arena->lock);
} else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
cdiff, true, arena->ind)) {
chunk_dalloc_arena(arena, chunk_hooks, nchunk, cdiff, *zero,
true);
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
*sn, *zero, true);
err = true;
}
return (err);
}
bool
arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize,
size_t usize, bool *zero)
arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk,
size_t oldsize, size_t usize, bool *zero)
{
bool err;
chunk_hooks_t chunk_hooks = chunk_hooks_get(arena);
chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
size_t udiff = usize - oldsize;
size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
size_t sn;
bool commit = true;
malloc_mutex_lock(&arena->lock);
malloc_mutex_lock(tsdn, &arena->lock);
/* Optimistically update stats. */
if (config_stats) {
arena_huge_ralloc_stats_update(arena, oldsize, usize);
arena->stats.mapped += cdiff;
}
arena->nactive += (udiff >> LG_PAGE);
arena_nactive_add(arena, udiff >> LG_PAGE);
err = (chunk_alloc_cache(arena, &arena->chunk_hooks, nchunk, cdiff,
chunksize, zero, true) == NULL);
malloc_mutex_unlock(&arena->lock);
err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
chunksize, &sn, zero, &commit, true) == NULL);
malloc_mutex_unlock(tsdn, &arena->lock);
if (err) {
err = arena_chunk_ralloc_huge_expand_hard(arena, &chunk_hooks,
chunk, oldsize, usize, zero, nchunk, udiff,
cdiff);
err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena,
&chunk_hooks, chunk, oldsize, usize, &sn, zero, nchunk,
udiff, cdiff);
} else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
cdiff, true, arena->ind)) {
chunk_dalloc_arena(arena, &chunk_hooks, nchunk, cdiff, *zero,
true);
chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff,
sn, *zero, true);
err = true;
}
if (config_stats && !err)
stats_cactive_add(udiff);
return (err);
}
/*
* Do first-best-fit run selection, i.e. select the lowest run that best fits.
* Run sizes are quantized, so not all candidate runs are necessarily exactly
* the same size.
* Run sizes are indexed, so not all candidate runs are necessarily exactly the
* same size.
*/
static arena_run_t *
arena_run_first_best_fit(arena_t *arena, size_t size)
{
size_t search_size = run_quantize_first(size);
arena_chunk_map_misc_t *key = arena_miscelm_key_create(search_size);
arena_chunk_map_misc_t *miscelm =
arena_avail_tree_nsearch(&arena->runs_avail, key);
if (miscelm == NULL)
return (NULL);
return (&miscelm->run);
pszind_t pind, i;
pind = psz2ind(run_quantize_ceil(size));
for (i = pind; pind2sz(i) <= chunksize; i++) {
arena_chunk_map_misc_t *miscelm = arena_run_heap_first(
&arena->runs_avail[i]);
if (miscelm != NULL)
return (&miscelm->run);
}
return (NULL);
}
static arena_run_t *
arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
{
arena_run_t *run = arena_run_first_best_fit(arena, s2u(size));
arena_run_t *run = arena_run_first_best_fit(arena, size);
if (run != NULL) {
if (arena_run_split_large(arena, run, size, zero))
run = NULL;
......@@ -1092,7 +1130,7 @@ arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
}
static arena_run_t *
arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero)
{
arena_chunk_t *chunk;
arena_run_t *run;
......@@ -1108,9 +1146,9 @@ arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
/*
* No usable runs. Create a new chunk from which to allocate the run.
*/
chunk = arena_chunk_alloc(arena);
chunk = arena_chunk_alloc(tsdn, arena);
if (chunk != NULL) {
run = &arena_miscelm_get(chunk, map_bias)->run;
run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
if (arena_run_split_large(arena, run, size, zero))
run = NULL;
return (run);
......@@ -1136,7 +1174,7 @@ arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind)
}
static arena_run_t *
arena_run_alloc_small(arena_t *arena, size_t size, szind_t binind)
arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind)
{
arena_chunk_t *chunk;
arena_run_t *run;
......@@ -1153,9 +1191,9 @@ arena_run_alloc_small(arena_t *arena, size_t size, szind_t binind)
/*
* No usable runs. Create a new chunk from which to allocate the run.
*/
chunk = arena_chunk_alloc(arena);
chunk = arena_chunk_alloc(tsdn, arena);
if (chunk != NULL) {
run = &arena_miscelm_get(chunk, map_bias)->run;
run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
if (arena_run_split_small(arena, run, size, binind))
run = NULL;
return (run);
......@@ -1178,42 +1216,239 @@ arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult)
}
ssize_t
arena_lg_dirty_mult_get(arena_t *arena)
arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena)
{
ssize_t lg_dirty_mult;
malloc_mutex_lock(&arena->lock);
malloc_mutex_lock(tsdn, &arena->lock);
lg_dirty_mult = arena->lg_dirty_mult;
malloc_mutex_unlock(&arena->lock);
malloc_mutex_unlock(tsdn, &arena->lock);
return (lg_dirty_mult);
}
bool
arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult)
arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, ssize_t lg_dirty_mult)
{
if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
return (true);
malloc_mutex_lock(&arena->lock);
malloc_mutex_lock(tsdn, &arena->lock);
arena->lg_dirty_mult = lg_dirty_mult;
arena_maybe_purge(arena);
malloc_mutex_unlock(&arena->lock);
arena_maybe_purge(tsdn, arena);
malloc_mutex_unlock(tsdn, &arena->lock);
return (false);
}
void
arena_maybe_purge(arena_t *arena)
static void
arena_decay_deadline_init(arena_t *arena)
{
assert(opt_purge == purge_mode_decay);
/*
* Generate a new deadline that is uniformly random within the next
* epoch after the current one.
*/
nstime_copy(&arena->decay.deadline, &arena->decay.epoch);
nstime_add(&arena->decay.deadline, &arena->decay.interval);
if (arena->decay.time > 0) {
nstime_t jitter;
nstime_init(&jitter, prng_range_u64(&arena->decay.jitter_state,
nstime_ns(&arena->decay.interval)));
nstime_add(&arena->decay.deadline, &jitter);
}
}
static bool
arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time)
{
assert(opt_purge == purge_mode_decay);
return (nstime_compare(&arena->decay.deadline, time) <= 0);
}
static size_t
arena_decay_backlog_npages_limit(const arena_t *arena)
{
static const uint64_t h_steps[] = {
#define STEP(step, h, x, y) \
h,
SMOOTHSTEP
#undef STEP
};
uint64_t sum;
size_t npages_limit_backlog;
unsigned i;
assert(opt_purge == purge_mode_decay);
/*
* For each element of decay_backlog, multiply by the corresponding
* fixed-point smoothstep decay factor. Sum the products, then divide
* to round down to the nearest whole number of pages.
*/
sum = 0;
for (i = 0; i < SMOOTHSTEP_NSTEPS; i++)
sum += arena->decay.backlog[i] * h_steps[i];
npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
return (npages_limit_backlog);
}
static void
arena_decay_backlog_update_last(arena_t *arena)
{
size_t ndirty_delta = (arena->ndirty > arena->decay.ndirty) ?
arena->ndirty - arena->decay.ndirty : 0;
arena->decay.backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta;
}
static void
arena_decay_backlog_update(arena_t *arena, uint64_t nadvance_u64)
{
if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
memset(arena->decay.backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
sizeof(size_t));
} else {
size_t nadvance_z = (size_t)nadvance_u64;
assert((uint64_t)nadvance_z == nadvance_u64);
memmove(arena->decay.backlog, &arena->decay.backlog[nadvance_z],
(SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
if (nadvance_z > 1) {
memset(&arena->decay.backlog[SMOOTHSTEP_NSTEPS -
nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
}
}
arena_decay_backlog_update_last(arena);
}
static void
arena_decay_epoch_advance_helper(arena_t *arena, const nstime_t *time)
{
uint64_t nadvance_u64;
nstime_t delta;
assert(opt_purge == purge_mode_decay);
assert(arena_decay_deadline_reached(arena, time));
nstime_copy(&delta, time);
nstime_subtract(&delta, &arena->decay.epoch);
nadvance_u64 = nstime_divide(&delta, &arena->decay.interval);
assert(nadvance_u64 > 0);
/* Add nadvance_u64 decay intervals to epoch. */
nstime_copy(&delta, &arena->decay.interval);
nstime_imultiply(&delta, nadvance_u64);
nstime_add(&arena->decay.epoch, &delta);
/* Set a new deadline. */
arena_decay_deadline_init(arena);
/* Update the backlog. */
arena_decay_backlog_update(arena, nadvance_u64);
}
static void
arena_decay_epoch_advance_purge(tsdn_t *tsdn, arena_t *arena)
{
size_t ndirty_limit = arena_decay_backlog_npages_limit(arena);
if (arena->ndirty > ndirty_limit)
arena_purge_to_limit(tsdn, arena, ndirty_limit);
arena->decay.ndirty = arena->ndirty;
}
static void
arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, const nstime_t *time)
{
arena_decay_epoch_advance_helper(arena, time);
arena_decay_epoch_advance_purge(tsdn, arena);
}
static void
arena_decay_init(arena_t *arena, ssize_t decay_time)
{
arena->decay.time = decay_time;
if (decay_time > 0) {
nstime_init2(&arena->decay.interval, decay_time, 0);
nstime_idivide(&arena->decay.interval, SMOOTHSTEP_NSTEPS);
}
nstime_init(&arena->decay.epoch, 0);
nstime_update(&arena->decay.epoch);
arena->decay.jitter_state = (uint64_t)(uintptr_t)arena;
arena_decay_deadline_init(arena);
arena->decay.ndirty = arena->ndirty;
memset(arena->decay.backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
}
static bool
arena_decay_time_valid(ssize_t decay_time)
{
if (decay_time < -1)
return (false);
if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX)
return (true);
return (false);
}
ssize_t
arena_decay_time_get(tsdn_t *tsdn, arena_t *arena)
{
ssize_t decay_time;
malloc_mutex_lock(tsdn, &arena->lock);
decay_time = arena->decay.time;
malloc_mutex_unlock(tsdn, &arena->lock);
return (decay_time);
}
bool
arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time)
{
if (!arena_decay_time_valid(decay_time))
return (true);
malloc_mutex_lock(tsdn, &arena->lock);
/*
* Restart decay backlog from scratch, which may cause many dirty pages
* to be immediately purged. It would conceptually be possible to map
* the old backlog onto the new backlog, but there is no justification
* for such complexity since decay_time changes are intended to be
* infrequent, either between the {-1, 0, >0} states, or a one-time
* arbitrary change during initial arena configuration.
*/
arena_decay_init(arena, decay_time);
arena_maybe_purge(tsdn, arena);
malloc_mutex_unlock(tsdn, &arena->lock);
return (false);
}
static void
arena_maybe_purge_ratio(tsdn_t *tsdn, arena_t *arena)
{
assert(opt_purge == purge_mode_ratio);
/* Don't purge if the option is disabled. */
if (arena->lg_dirty_mult < 0)
return;
/* Don't recursively purge. */
if (arena->purging)
return;
/*
* Iterate, since preventing recursive purging could otherwise leave too
* many dirty pages.
......@@ -1228,8 +1463,66 @@ arena_maybe_purge(arena_t *arena)
*/
if (arena->ndirty <= threshold)
return;
arena_purge(arena, false);
arena_purge_to_limit(tsdn, arena, threshold);
}
}
static void
arena_maybe_purge_decay(tsdn_t *tsdn, arena_t *arena)
{
nstime_t time;
assert(opt_purge == purge_mode_decay);
/* Purge all or nothing if the option is disabled. */
if (arena->decay.time <= 0) {
if (arena->decay.time == 0)
arena_purge_to_limit(tsdn, arena, 0);
return;
}
nstime_init(&time, 0);
nstime_update(&time);
if (unlikely(!nstime_monotonic() && nstime_compare(&arena->decay.epoch,
&time) > 0)) {
/*
* Time went backwards. Move the epoch back in time and
* generate a new deadline, with the expectation that time
* typically flows forward for long enough periods of time that
* epochs complete. Unfortunately, this strategy is susceptible
* to clock jitter triggering premature epoch advances, but
* clock jitter estimation and compensation isn't feasible here
* because calls into this code are event-driven.
*/
nstime_copy(&arena->decay.epoch, &time);
arena_decay_deadline_init(arena);
} else {
/* Verify that time does not go backwards. */
assert(nstime_compare(&arena->decay.epoch, &time) <= 0);
}
/*
* If the deadline has been reached, advance to the current epoch and
* purge to the new limit if necessary. Note that dirty pages created
* during the current epoch are not subject to purge until a future
* epoch, so as a result purging only happens during epoch advances.
*/
if (arena_decay_deadline_reached(arena, &time))
arena_decay_epoch_advance(tsdn, arena, &time);
}
void
arena_maybe_purge(tsdn_t *tsdn, arena_t *arena)
{
/* Don't recursively purge. */
if (arena->purging)
return;
if (opt_purge == purge_mode_ratio)
arena_maybe_purge_ratio(tsdn, arena);
else
arena_maybe_purge_decay(tsdn, arena);
}
static size_t
......@@ -1253,49 +1546,29 @@ arena_dirty_count(arena_t *arena)
arena_chunk_map_misc_t *miscelm =
arena_rd_to_miscelm(rdelm);
size_t pageind = arena_miscelm_to_pageind(miscelm);
assert(arena_mapbits_allocated_get(chunk, pageind) ==
0);
assert(arena_mapbits_large_get(chunk, pageind) == 0);
assert(arena_mapbits_dirty_get(chunk, pageind) != 0);
npages = arena_mapbits_unallocated_size_get(chunk,
pageind) >> LG_PAGE;
}
ndirty += npages;
}
return (ndirty);
}
static size_t
arena_compute_npurge(arena_t *arena, bool all)
{
size_t npurge;
/*
* Compute the minimum number of pages that this thread should try to
* purge.
*/
if (!all) {
size_t threshold = (arena->nactive >> arena->lg_dirty_mult);
threshold = threshold < chunk_npages ? chunk_npages : threshold;
npurge = arena->ndirty - threshold;
} else
npurge = arena->ndirty;
assert(arena_mapbits_allocated_get(chunk, pageind) ==
0);
assert(arena_mapbits_large_get(chunk, pageind) == 0);
assert(arena_mapbits_dirty_get(chunk, pageind) != 0);
npages = arena_mapbits_unallocated_size_get(chunk,
pageind) >> LG_PAGE;
}
ndirty += npages;
}
return (npurge);
return (ndirty);
}
static size_t
arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all,
size_t npurge, arena_runs_dirty_link_t *purge_runs_sentinel,
arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel,
extent_node_t *purge_chunks_sentinel)
{
arena_runs_dirty_link_t *rdelm, *rdelm_next;
extent_node_t *chunkselm;
size_t nstashed = 0;
/* Stash at least npurge pages. */
/* Stash runs/chunks according to ndirty_limit. */
for (rdelm = qr_next(&arena->runs_dirty, rd_link),
chunkselm = qr_next(&arena->chunks_cache, cc_link);
rdelm != &arena->runs_dirty; rdelm = rdelm_next) {
......@@ -1304,24 +1577,32 @@ arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all,
if (rdelm == &chunkselm->rd) {
extent_node_t *chunkselm_next;
bool zero;
size_t sn;
bool zero, commit;
UNUSED void *chunk;
npages = extent_node_size_get(chunkselm) >> LG_PAGE;
if (opt_purge == purge_mode_decay && arena->ndirty -
(nstashed + npages) < ndirty_limit)
break;
chunkselm_next = qr_next(chunkselm, cc_link);
/*
* Allocate. chunkselm remains valid due to the
* dalloc_node=false argument to chunk_alloc_cache().
*/
zero = false;
chunk = chunk_alloc_cache(arena, chunk_hooks,
commit = false;
chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks,
extent_node_addr_get(chunkselm),
extent_node_size_get(chunkselm), chunksize, &zero,
false);
extent_node_size_get(chunkselm), chunksize, &sn,
&zero, &commit, false);
assert(chunk == extent_node_addr_get(chunkselm));
assert(zero == extent_node_zeroed_get(chunkselm));
extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
purge_chunks_sentinel);
npages = extent_node_size_get(chunkselm) >> LG_PAGE;
assert(npages == (extent_node_size_get(chunkselm) >>
LG_PAGE));
chunkselm = chunkselm_next;
} else {
arena_chunk_t *chunk =
......@@ -1334,6 +1615,9 @@ arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all,
arena_mapbits_unallocated_size_get(chunk, pageind);
npages = run_size >> LG_PAGE;
if (opt_purge == purge_mode_decay && arena->ndirty -
(nstashed + npages) < ndirty_limit)
break;
assert(pageind + npages <= chunk_npages);
assert(arena_mapbits_dirty_get(chunk, pageind) ==
......@@ -1344,7 +1628,7 @@ arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all,
* prior to allocation.
*/
if (chunk == arena->spare)
arena_chunk_alloc(arena);
arena_chunk_alloc(tsdn, arena);
/* Temporarily allocate the free dirty run. */
arena_run_split_large(arena, run, run_size, false);
......@@ -1359,7 +1643,8 @@ arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all,
}
nstashed += npages;
if (!all && nstashed >= npurge)
if (opt_purge == purge_mode_ratio && arena->ndirty - nstashed <=
ndirty_limit)
break;
}
......@@ -1367,7 +1652,7 @@ arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all,
}
static size_t
arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks,
arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
arena_runs_dirty_link_t *purge_runs_sentinel,
extent_node_t *purge_chunks_sentinel)
{
......@@ -1379,7 +1664,7 @@ arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks,
nmadvise = 0;
npurged = 0;
malloc_mutex_unlock(&arena->lock);
malloc_mutex_unlock(tsdn, &arena->lock);
for (rdelm = qr_next(purge_runs_sentinel, rd_link),
chunkselm = qr_next(purge_chunks_sentinel, cc_link);
rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) {
......@@ -1408,6 +1693,17 @@ arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks,
run_size = arena_mapbits_large_size_get(chunk, pageind);
npages = run_size >> LG_PAGE;
/*
* If this is the first run purged within chunk, mark
* the chunk as non-huge. This will prevent all use of
* transparent huge pages for this chunk until the chunk
* as a whole is deallocated.
*/
if (chunk->hugepage) {
pages_nohuge(chunk, chunksize);
chunk->hugepage = false;
}
assert(pageind + npages <= chunk_npages);
assert(!arena_mapbits_decommitted_get(chunk, pageind));
assert(!arena_mapbits_decommitted_get(chunk,
......@@ -1418,7 +1714,7 @@ arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks,
flag_unzeroed = 0;
flags = CHUNK_MAP_DECOMMITTED;
} else {
flag_unzeroed = chunk_purge_wrapper(arena,
flag_unzeroed = chunk_purge_wrapper(tsdn, arena,
chunk_hooks, chunk, chunksize, pageind <<
LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0;
flags = flag_unzeroed;
......@@ -1449,7 +1745,7 @@ arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks,
if (config_stats)
nmadvise++;
}
malloc_mutex_lock(&arena->lock);
malloc_mutex_lock(tsdn, &arena->lock);
if (config_stats) {
arena->stats.nmadvise += nmadvise;
......@@ -1460,7 +1756,7 @@ arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks,
}
static void
arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks,
arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
arena_runs_dirty_link_t *purge_runs_sentinel,
extent_node_t *purge_chunks_sentinel)
{
......@@ -1477,13 +1773,14 @@ arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks,
cc_link);
void *addr = extent_node_addr_get(chunkselm);
size_t size = extent_node_size_get(chunkselm);
size_t sn = extent_node_sn_get(chunkselm);
bool zeroed = extent_node_zeroed_get(chunkselm);
bool committed = extent_node_committed_get(chunkselm);
extent_node_dirty_remove(chunkselm);
arena_node_dalloc(arena, chunkselm);
arena_node_dalloc(tsdn, arena, chunkselm);
chunkselm = chunkselm_next;
chunk_dalloc_arena(arena, chunk_hooks, addr, size,
zeroed, committed);
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr,
size, sn, zeroed, committed);
} else {
arena_chunk_t *chunk =
(arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
......@@ -1494,16 +1791,26 @@ arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks,
pageind) != 0);
arena_run_t *run = &miscelm->run;
qr_remove(rdelm, rd_link);
arena_run_dalloc(arena, run, false, true, decommitted);
arena_run_dalloc(tsdn, arena, run, false, true,
decommitted);
}
}
}
/*
* NB: ndirty_limit is interpreted differently depending on opt_purge:
* - purge_mode_ratio: Purge as few dirty run/chunks as possible to reach the
* desired state:
* (arena->ndirty <= ndirty_limit)
* - purge_mode_decay: Purge as many dirty runs/chunks as possible without
* violating the invariant:
* (arena->ndirty >= ndirty_limit)
*/
static void
arena_purge(arena_t *arena, bool all)
arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit)
{
chunk_hooks_t chunk_hooks = chunk_hooks_get(arena);
size_t npurge, npurgeable, npurged;
chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
size_t npurge, npurged;
arena_runs_dirty_link_t purge_runs_sentinel;
extent_node_t purge_chunks_sentinel;
......@@ -1517,34 +1824,183 @@ arena_purge(arena_t *arena, bool all)
size_t ndirty = arena_dirty_count(arena);
assert(ndirty == arena->ndirty);
}
assert((arena->nactive >> arena->lg_dirty_mult) < arena->ndirty || all);
if (config_stats)
arena->stats.npurge++;
assert(opt_purge != purge_mode_ratio || (arena->nactive >>
arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0);
npurge = arena_compute_npurge(arena, all);
qr_new(&purge_runs_sentinel, rd_link);
extent_node_dirty_linkage_init(&purge_chunks_sentinel);
npurgeable = arena_stash_dirty(arena, &chunk_hooks, all, npurge,
npurge = arena_stash_dirty(tsdn, arena, &chunk_hooks, ndirty_limit,
&purge_runs_sentinel, &purge_chunks_sentinel);
assert(npurgeable >= npurge);
npurged = arena_purge_stashed(arena, &chunk_hooks, &purge_runs_sentinel,
&purge_chunks_sentinel);
assert(npurged == npurgeable);
arena_unstash_purged(arena, &chunk_hooks, &purge_runs_sentinel,
if (npurge == 0)
goto label_return;
npurged = arena_purge_stashed(tsdn, arena, &chunk_hooks,
&purge_runs_sentinel, &purge_chunks_sentinel);
assert(npurged == npurge);
arena_unstash_purged(tsdn, arena, &chunk_hooks, &purge_runs_sentinel,
&purge_chunks_sentinel);
if (config_stats)
arena->stats.npurge++;
label_return:
arena->purging = false;
}
void
arena_purge_all(arena_t *arena)
arena_purge(tsdn_t *tsdn, arena_t *arena, bool all)
{
malloc_mutex_lock(tsdn, &arena->lock);
if (all)
arena_purge_to_limit(tsdn, arena, 0);
else
arena_maybe_purge(tsdn, arena);
malloc_mutex_unlock(tsdn, &arena->lock);
}
static void
arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk)
{
size_t pageind, npages;
cassert(config_prof);
assert(opt_prof);
/*
* Iterate over the allocated runs and remove profiled allocations from
* the sample set.
*/
for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
if (arena_mapbits_allocated_get(chunk, pageind) != 0) {
if (arena_mapbits_large_get(chunk, pageind) != 0) {
void *ptr = (void *)((uintptr_t)chunk + (pageind
<< LG_PAGE));
size_t usize = isalloc(tsd_tsdn(tsd), ptr,
config_prof);
prof_free(tsd, ptr, usize);
npages = arena_mapbits_large_size_get(chunk,
pageind) >> LG_PAGE;
} else {
/* Skip small run. */
size_t binind = arena_mapbits_binind_get(chunk,
pageind);
arena_bin_info_t *bin_info =
&arena_bin_info[binind];
npages = bin_info->run_size >> LG_PAGE;
}
} else {
/* Skip unallocated run. */
npages = arena_mapbits_unallocated_size_get(chunk,
pageind) >> LG_PAGE;
}
assert(pageind + npages <= chunk_npages);
}
}
void
arena_reset(tsd_t *tsd, arena_t *arena)
{
unsigned i;
extent_node_t *node;
/*
* Locking in this function is unintuitive. The caller guarantees that
* no concurrent operations are happening in this arena, but there are
* still reasons that some locking is necessary:
*
* - Some of the functions in the transitive closure of calls assume
* appropriate locks are held, and in some cases these locks are
* temporarily dropped to avoid lock order reversal or deadlock due to
* reentry.
* - mallctl("epoch", ...) may concurrently refresh stats. While
* strictly speaking this is a "concurrent operation", disallowing
* stats refreshes would impose an inconvenient burden.
*/
/* Remove large allocations from prof sample set. */
if (config_prof && opt_prof) {
ql_foreach(node, &arena->achunks, ql_link) {
arena_achunk_prof_reset(tsd, arena,
extent_node_addr_get(node));
}
}
/* Reset curruns for large size classes. */
if (config_stats) {
for (i = 0; i < nlclasses; i++)
arena->stats.lstats[i].curruns = 0;
}
/* Huge allocations. */
malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
for (node = ql_last(&arena->huge, ql_link); node != NULL; node =
ql_last(&arena->huge, ql_link)) {
void *ptr = extent_node_addr_get(node);
size_t usize;
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
if (config_stats || (config_prof && opt_prof))
usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
/* Remove huge allocation from prof sample set. */
if (config_prof && opt_prof)
prof_free(tsd, ptr, usize);
huge_dalloc(tsd_tsdn(tsd), ptr);
malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
/* Cancel out unwanted effects on stats. */
if (config_stats)
arena_huge_reset_stats_cancel(arena, usize);
}
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
/* Bins. */
for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i];
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
bin->runcur = NULL;
arena_run_heap_new(&bin->runs);
if (config_stats) {
bin->stats.curregs = 0;
bin->stats.curruns = 0;
}
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
}
/*
* Re-initialize runs_dirty such that the chunks_cache and runs_dirty
* chains directly correspond.
*/
qr_new(&arena->runs_dirty, rd_link);
for (node = qr_next(&arena->chunks_cache, cc_link);
node != &arena->chunks_cache; node = qr_next(node, cc_link)) {
qr_new(&node->rd, rd_link);
qr_meld(&arena->runs_dirty, &node->rd, rd_link);
}
/* Arena chunks. */
for (node = ql_last(&arena->achunks, ql_link); node != NULL; node =
ql_last(&arena->achunks, ql_link)) {
ql_remove(&arena->achunks, node, ql_link);
arena_chunk_discard(tsd_tsdn(tsd), arena,
extent_node_addr_get(node));
}
/* Spare. */
if (arena->spare != NULL) {
arena_chunk_discard(tsd_tsdn(tsd), arena, arena->spare);
arena->spare = NULL;
}
malloc_mutex_lock(&arena->lock);
arena_purge(arena, true);
malloc_mutex_unlock(&arena->lock);
assert(!arena->purging);
arena->nactive = 0;
for (i = 0; i < NPSIZES; i++)
arena_run_heap_new(&arena->runs_avail[i]);
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
}
static void
......@@ -1660,21 +2116,9 @@ arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
return (size);
}
static bool
arena_run_decommit(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run)
{
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
size_t run_ind = arena_miscelm_to_pageind(miscelm);
size_t offset = run_ind << LG_PAGE;
size_t length = arena_run_size_get(arena, chunk, run, run_ind);
return (arena->chunk_hooks.decommit(chunk, chunksize, offset, length,
arena->ind));
}
static void
arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned,
bool decommitted)
arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty,
bool cleaned, bool decommitted)
{
arena_chunk_t *chunk;
arena_chunk_map_misc_t *miscelm;
......@@ -1687,8 +2131,7 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned,
assert(run_ind < chunk_npages);
size = arena_run_size_get(arena, chunk, run, run_ind);
run_pages = (size >> LG_PAGE);
arena_cactive_update(arena, 0, run_pages);
arena->nactive -= run_pages;
arena_nactive_sub(arena, run_pages);
/*
* The run is dirty if the caller claims to have dirtied it, as well as
......@@ -1735,7 +2178,7 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned,
if (size == arena_maxrun) {
assert(run_ind == map_bias);
assert(run_pages == (arena_maxrun >> LG_PAGE));
arena_chunk_dalloc(arena, chunk);
arena_chunk_dalloc(tsdn, arena, chunk);
}
/*
......@@ -1746,21 +2189,12 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned,
* chances of spuriously crossing the dirty page purging threshold.
*/
if (dirty)
arena_maybe_purge(arena);
}
static void
arena_run_dalloc_decommit(arena_t *arena, arena_chunk_t *chunk,
arena_run_t *run)
{
bool committed = arena_run_decommit(arena, chunk, run);
arena_run_dalloc(arena, run, committed, false, !committed);
arena_maybe_purge(tsdn, arena);
}
static void
arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
size_t oldsize, size_t newsize)
arena_run_trim_head(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
arena_run_t *run, size_t oldsize, size_t newsize)
{
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
size_t pageind = arena_miscelm_to_pageind(miscelm);
......@@ -1795,12 +2229,13 @@ arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
pageind+head_npages)));
arena_run_dalloc(arena, run, false, false, (flag_decommitted != 0));
arena_run_dalloc(tsdn, arena, run, false, false, (flag_decommitted !=
0));
}
static void
arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
size_t oldsize, size_t newsize, bool dirty)
arena_run_trim_tail(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
arena_run_t *run, size_t oldsize, size_t newsize, bool dirty)
{
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
size_t pageind = arena_miscelm_to_pageind(miscelm);
......@@ -1837,20 +2272,10 @@ arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
pageind+head_npages)));
tail_miscelm = arena_miscelm_get(chunk, pageind + head_npages);
tail_miscelm = arena_miscelm_get_mutable(chunk, pageind + head_npages);
tail_run = &tail_miscelm->run;
arena_run_dalloc(arena, tail_run, dirty, false, (flag_decommitted !=
0));
}
static arena_run_t *
arena_bin_runs_first(arena_bin_t *bin)
{
arena_chunk_map_misc_t *miscelm = arena_run_tree_first(&bin->runs);
if (miscelm != NULL)
return (&miscelm->run);
return (NULL);
arena_run_dalloc(tsdn, arena, tail_run, dirty, false, (flag_decommitted
!= 0));
}
static void
......@@ -1858,35 +2283,25 @@ arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
{
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
assert(arena_run_tree_search(&bin->runs, miscelm) == NULL);
arena_run_tree_insert(&bin->runs, miscelm);
}
static void
arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
{
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
assert(arena_run_tree_search(&bin->runs, miscelm) != NULL);
arena_run_tree_remove(&bin->runs, miscelm);
arena_run_heap_insert(&bin->runs, miscelm);
}
static arena_run_t *
arena_bin_nonfull_run_tryget(arena_bin_t *bin)
{
arena_run_t *run = arena_bin_runs_first(bin);
if (run != NULL) {
arena_bin_runs_remove(bin, run);
if (config_stats)
bin->stats.reruns++;
}
return (run);
arena_chunk_map_misc_t *miscelm;
miscelm = arena_run_heap_remove_first(&bin->runs);
if (miscelm == NULL)
return (NULL);
if (config_stats)
bin->stats.reruns++;
return (&miscelm->run);
}
static arena_run_t *
arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
arena_bin_nonfull_run_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
{
arena_run_t *run;
szind_t binind;
......@@ -1902,19 +2317,19 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
bin_info = &arena_bin_info[binind];
/* Allocate a new run. */
malloc_mutex_unlock(&bin->lock);
malloc_mutex_unlock(tsdn, &bin->lock);
/******************************/
malloc_mutex_lock(&arena->lock);
run = arena_run_alloc_small(arena, bin_info->run_size, binind);
malloc_mutex_lock(tsdn, &arena->lock);
run = arena_run_alloc_small(tsdn, arena, bin_info->run_size, binind);
if (run != NULL) {
/* Initialize run internals. */
run->binind = binind;
run->nfree = bin_info->nregs;
bitmap_init(run->bitmap, &bin_info->bitmap_info);
}
malloc_mutex_unlock(&arena->lock);
malloc_mutex_unlock(tsdn, &arena->lock);
/********************************/
malloc_mutex_lock(&bin->lock);
malloc_mutex_lock(tsdn, &bin->lock);
if (run != NULL) {
if (config_stats) {
bin->stats.nruns++;
......@@ -1937,7 +2352,7 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
static void *
arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
{
szind_t binind;
arena_bin_info_t *bin_info;
......@@ -1946,7 +2361,7 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
binind = arena_bin_index(arena, bin);
bin_info = &arena_bin_info[binind];
bin->runcur = NULL;
run = arena_bin_nonfull_run_get(arena, bin);
run = arena_bin_nonfull_run_get(tsdn, arena, bin);
if (bin->runcur != NULL && bin->runcur->nfree > 0) {
/*
* Another thread updated runcur while this one ran without the
......@@ -1967,10 +2382,11 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
* were just deallocated from the run.
*/
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
if (run->nfree == bin_info->nregs)
arena_dalloc_bin_run(arena, chunk, run, bin);
else
arena_bin_lower_run(arena, chunk, run, bin);
if (run->nfree == bin_info->nregs) {
arena_dalloc_bin_run(tsdn, arena, chunk, run,
bin);
} else
arena_bin_lower_run(arena, run, bin);
}
return (ret);
}
......@@ -1986,18 +2402,18 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
}
void
arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind,
uint64_t prof_accumbytes)
arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin,
szind_t binind, uint64_t prof_accumbytes)
{
unsigned i, nfill;
arena_bin_t *bin;
assert(tbin->ncached == 0);
if (config_prof && arena_prof_accum(arena, prof_accumbytes))
prof_idump();
if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes))
prof_idump(tsdn);
bin = &arena->bins[binind];
malloc_mutex_lock(&bin->lock);
malloc_mutex_lock(tsdn, &bin->lock);
for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
tbin->lg_fill_div); i < nfill; i++) {
arena_run_t *run;
......@@ -2005,16 +2421,15 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind,
if ((run = bin->runcur) != NULL && run->nfree > 0)
ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
else
ptr = arena_bin_malloc_hard(arena, bin);
ptr = arena_bin_malloc_hard(tsdn, arena, bin);
if (ptr == NULL) {
/*
* OOM. tbin->avail isn't yet filled down to its first
* element, so the successful allocations (if any) must
* be moved to the base of tbin->avail before bailing
* out.
* be moved just before tbin->avail before bailing out.
*/
if (i > 0) {
memmove(tbin->avail, &tbin->avail[nfill - i],
memmove(tbin->avail - i, tbin->avail - nfill,
i * sizeof(void *));
}
break;
......@@ -2024,7 +2439,7 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind,
true);
}
/* Insert such that low regions get used first. */
tbin->avail[nfill - 1 - i] = ptr;
*(tbin->avail - nfill + i) = ptr;
}
if (config_stats) {
bin->stats.nmalloc += i;
......@@ -2033,29 +2448,31 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind,
bin->stats.nfills++;
tbin->tstats.nrequests = 0;
}
malloc_mutex_unlock(&bin->lock);
malloc_mutex_unlock(tsdn, &bin->lock);
tbin->ncached = i;
arena_decay_tick(tsdn, arena);
}
void
arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
{
size_t redzone_size = bin_info->redzone_size;
if (zero) {
size_t redzone_size = bin_info->redzone_size;
memset((void *)((uintptr_t)ptr - redzone_size), 0xa5,
redzone_size);
memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5,
redzone_size);
memset((void *)((uintptr_t)ptr - redzone_size),
JEMALLOC_ALLOC_JUNK, redzone_size);
memset((void *)((uintptr_t)ptr + bin_info->reg_size),
JEMALLOC_ALLOC_JUNK, redzone_size);
} else {
memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5,
bin_info->reg_interval);
memset((void *)((uintptr_t)ptr - redzone_size),
JEMALLOC_ALLOC_JUNK, bin_info->reg_interval);
}
}
#ifdef JEMALLOC_JET
#undef arena_redzone_corruption
#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl)
#define arena_redzone_corruption JEMALLOC_N(n_arena_redzone_corruption)
#endif
static void
arena_redzone_corruption(void *ptr, size_t usize, bool after,
......@@ -2070,7 +2487,7 @@ arena_redzone_corruption(void *ptr, size_t usize, bool after,
#undef arena_redzone_corruption
#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
arena_redzone_corruption_t *arena_redzone_corruption =
JEMALLOC_N(arena_redzone_corruption_impl);
JEMALLOC_N(n_arena_redzone_corruption);
#endif
static void
......@@ -2085,22 +2502,22 @@ arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
for (i = 1; i <= redzone_size; i++) {
uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
if (*byte != 0xa5) {
if (*byte != JEMALLOC_ALLOC_JUNK) {
error = true;
arena_redzone_corruption(ptr, size, false, i,
*byte);
if (reset)
*byte = 0xa5;
*byte = JEMALLOC_ALLOC_JUNK;
}
}
for (i = 0; i < redzone_size; i++) {
uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
if (*byte != 0xa5) {
if (*byte != JEMALLOC_ALLOC_JUNK) {
error = true;
arena_redzone_corruption(ptr, size, true, i,
*byte);
if (reset)
*byte = 0xa5;
*byte = JEMALLOC_ALLOC_JUNK;
}
}
}
......@@ -2111,7 +2528,7 @@ arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_small
#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl)
#define arena_dalloc_junk_small JEMALLOC_N(n_arena_dalloc_junk_small)
#endif
void
arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
......@@ -2119,14 +2536,14 @@ arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
size_t redzone_size = bin_info->redzone_size;
arena_redzones_validate(ptr, bin_info, false);
memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
memset((void *)((uintptr_t)ptr - redzone_size), JEMALLOC_FREE_JUNK,
bin_info->reg_interval);
}
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_small
#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
arena_dalloc_junk_small_t *arena_dalloc_junk_small =
JEMALLOC_N(arena_dalloc_junk_small_impl);
JEMALLOC_N(n_arena_dalloc_junk_small);
#endif
void
......@@ -2144,27 +2561,26 @@ arena_quarantine_junk_small(void *ptr, size_t usize)
arena_redzones_validate(ptr, bin_info, true);
}
void *
arena_malloc_small(arena_t *arena, size_t size, bool zero)
static void *
arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
{
void *ret;
arena_bin_t *bin;
size_t usize;
arena_run_t *run;
szind_t binind;
binind = size2index(size);
assert(binind < NBINS);
bin = &arena->bins[binind];
size = index2size(binind);
usize = index2size(binind);
malloc_mutex_lock(&bin->lock);
malloc_mutex_lock(tsdn, &bin->lock);
if ((run = bin->runcur) != NULL && run->nfree > 0)
ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
else
ret = arena_bin_malloc_hard(arena, bin);
ret = arena_bin_malloc_hard(tsdn, arena, bin);
if (ret == NULL) {
malloc_mutex_unlock(&bin->lock);
malloc_mutex_unlock(tsdn, &bin->lock);
return (NULL);
}
......@@ -2173,9 +2589,9 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
bin->stats.nrequests++;
bin->stats.curregs++;
}
malloc_mutex_unlock(&bin->lock);
if (config_prof && !isthreaded && arena_prof_accum(arena, size))
prof_idump();
malloc_mutex_unlock(tsdn, &bin->lock);
if (config_prof && !isthreaded && arena_prof_accum(tsdn, arena, usize))
prof_idump(tsdn);
if (!zero) {
if (config_fill) {
......@@ -2183,34 +2599,35 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
arena_alloc_junk_small(ret,
&arena_bin_info[binind], false);
} else if (unlikely(opt_zero))
memset(ret, 0, size);
memset(ret, 0, usize);
}
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
} else {
if (config_fill && unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret, &arena_bin_info[binind],
true);
}
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
memset(ret, 0, usize);
}
arena_decay_tick(tsdn, arena);
return (ret);
}
void *
arena_malloc_large(arena_t *arena, size_t size, bool zero)
arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
{
void *ret;
size_t usize;
uintptr_t random_offset;
arena_run_t *run;
arena_chunk_map_misc_t *miscelm;
UNUSED bool idump;
UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
/* Large allocation. */
usize = s2u(size);
malloc_mutex_lock(&arena->lock);
usize = index2size(binind);
malloc_mutex_lock(tsdn, &arena->lock);
if (config_cache_oblivious) {
uint64_t r;
......@@ -2219,22 +2636,21 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero)
* that is a multiple of the cacheline size, e.g. [0 .. 63) * 64
* for 4 KiB pages and 64-byte cachelines.
*/
prng64(r, LG_PAGE - LG_CACHELINE, arena->offset_state,
UINT64_C(6364136223846793009),
UINT64_C(1442695040888963409));
r = prng_lg_range_zu(&arena->offset_state, LG_PAGE -
LG_CACHELINE, false);
random_offset = ((uintptr_t)r) << LG_CACHELINE;
} else
random_offset = 0;
run = arena_run_alloc_large(arena, usize + large_pad, zero);
run = arena_run_alloc_large(tsdn, arena, usize + large_pad, zero);
if (run == NULL) {
malloc_mutex_unlock(&arena->lock);
malloc_mutex_unlock(tsdn, &arena->lock);
return (NULL);
}
miscelm = arena_run_to_miscelm(run);
ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) +
random_offset);
if (config_stats) {
szind_t index = size2index(usize) - NBINS;
szind_t index = binind - NBINS;
arena->stats.nmalloc_large++;
arena->stats.nrequests_large++;
......@@ -2245,25 +2661,45 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero)
}
if (config_prof)
idump = arena_prof_accum_locked(arena, usize);
malloc_mutex_unlock(&arena->lock);
malloc_mutex_unlock(tsdn, &arena->lock);
if (config_prof && idump)
prof_idump();
prof_idump(tsdn);
if (!zero) {
if (config_fill) {
if (unlikely(opt_junk_alloc))
memset(ret, 0xa5, usize);
memset(ret, JEMALLOC_ALLOC_JUNK, usize);
else if (unlikely(opt_zero))
memset(ret, 0, usize);
}
}
arena_decay_tick(tsdn, arena);
return (ret);
}
void *
arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
bool zero)
{
assert(!tsdn_null(tsdn) || arena != NULL);
if (likely(!tsdn_null(tsdn)))
arena = arena_choose(tsdn_tsd(tsdn), arena);
if (unlikely(arena == NULL))
return (NULL);
if (likely(size <= SMALL_MAXCLASS))
return (arena_malloc_small(tsdn, arena, ind, zero));
if (likely(size <= large_maxclass))
return (arena_malloc_large(tsdn, arena, ind, zero));
return (huge_malloc(tsdn, arena, index2size(ind), zero));
}
/* Only handles large allocations that require more than page alignment. */
static void *
arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
bool zero)
{
void *ret;
......@@ -2273,19 +2709,21 @@ arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
arena_chunk_map_misc_t *miscelm;
void *rpages;
assert(!tsdn_null(tsdn) || arena != NULL);
assert(usize == PAGE_CEILING(usize));
arena = arena_choose(tsd, arena);
if (likely(!tsdn_null(tsdn)))
arena = arena_choose(tsdn_tsd(tsdn), arena);
if (unlikely(arena == NULL))
return (NULL);
alignment = PAGE_CEILING(alignment);
alloc_size = usize + large_pad + alignment - PAGE;
malloc_mutex_lock(&arena->lock);
run = arena_run_alloc_large(arena, alloc_size, false);
malloc_mutex_lock(tsdn, &arena->lock);
run = arena_run_alloc_large(tsdn, arena, alloc_size, false);
if (run == NULL) {
malloc_mutex_unlock(&arena->lock);
malloc_mutex_unlock(tsdn, &arena->lock);
return (NULL);
}
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
......@@ -2300,16 +2738,16 @@ arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
arena_chunk_map_misc_t *head_miscelm = miscelm;
arena_run_t *head_run = run;
miscelm = arena_miscelm_get(chunk,
miscelm = arena_miscelm_get_mutable(chunk,
arena_miscelm_to_pageind(head_miscelm) + (leadsize >>
LG_PAGE));
run = &miscelm->run;
arena_run_trim_head(arena, chunk, head_run, alloc_size,
arena_run_trim_head(tsdn, arena, chunk, head_run, alloc_size,
alloc_size - leadsize);
}
if (trailsize != 0) {
arena_run_trim_tail(arena, chunk, run, usize + large_pad +
arena_run_trim_tail(tsdn, arena, chunk, run, usize + large_pad +
trailsize, usize + large_pad, false);
}
if (arena_run_init_large(arena, run, usize + large_pad, zero)) {
......@@ -2320,8 +2758,8 @@ arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
run_ind) != 0);
assert(decommitted); /* Cause of OOM. */
arena_run_dalloc(arena, run, dirty, false, decommitted);
malloc_mutex_unlock(&arena->lock);
arena_run_dalloc(tsdn, arena, run, dirty, false, decommitted);
malloc_mutex_unlock(tsdn, &arena->lock);
return (NULL);
}
ret = arena_miscelm_to_rpages(miscelm);
......@@ -2336,19 +2774,20 @@ arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
arena->stats.lstats[index].nrequests++;
arena->stats.lstats[index].curruns++;
}
malloc_mutex_unlock(&arena->lock);
malloc_mutex_unlock(tsdn, &arena->lock);
if (config_fill && !zero) {
if (unlikely(opt_junk_alloc))
memset(ret, 0xa5, usize);
memset(ret, JEMALLOC_ALLOC_JUNK, usize);
else if (unlikely(opt_zero))
memset(ret, 0, usize);
}
arena_decay_tick(tsdn, arena);
return (ret);
}
void *
arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
bool zero, tcache_t *tcache)
{
void *ret;
......@@ -2356,7 +2795,8 @@ arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
&& (usize & PAGE_MASK) == 0))) {
/* Small; alignment doesn't require special run placement. */
ret = arena_malloc(tsd, arena, usize, zero, tcache);
ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
tcache, true);
} else if (usize <= large_maxclass && alignment <= PAGE) {
/*
* Large; alignment doesn't require special run placement.
......@@ -2364,25 +2804,25 @@ arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
* the base of the run, so do some bit manipulation to retrieve
* the base.
*/
ret = arena_malloc(tsd, arena, usize, zero, tcache);
ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
tcache, true);
if (config_cache_oblivious)
ret = (void *)((uintptr_t)ret & ~PAGE_MASK);
} else {
if (likely(usize <= large_maxclass)) {
ret = arena_palloc_large(tsd, arena, usize, alignment,
ret = arena_palloc_large(tsdn, arena, usize, alignment,
zero);
} else if (likely(alignment <= chunksize))
ret = huge_malloc(tsd, arena, usize, zero, tcache);
ret = huge_malloc(tsdn, arena, usize, zero);
else {
ret = huge_palloc(tsd, arena, usize, alignment, zero,
tcache);
ret = huge_palloc(tsdn, arena, usize, alignment, zero);
}
}
return (ret);
}
void
arena_prof_promoted(const void *ptr, size_t size)
arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size)
{
arena_chunk_t *chunk;
size_t pageind;
......@@ -2391,8 +2831,8 @@ arena_prof_promoted(const void *ptr, size_t size)
cassert(config_prof);
assert(ptr != NULL);
assert(CHUNK_ADDR2BASE(ptr) != ptr);
assert(isalloc(ptr, false) == LARGE_MINCLASS);
assert(isalloc(ptr, true) == LARGE_MINCLASS);
assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS);
assert(isalloc(tsdn, ptr, true) == LARGE_MINCLASS);
assert(size <= SMALL_MAXCLASS);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
......@@ -2401,8 +2841,8 @@ arena_prof_promoted(const void *ptr, size_t size)
assert(binind < NBINS);
arena_mapbits_large_binind_set(chunk, pageind, binind);
assert(isalloc(ptr, false) == LARGE_MINCLASS);
assert(isalloc(ptr, true) == size);
assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS);
assert(isalloc(tsdn, ptr, true) == size);
}
static void
......@@ -2418,48 +2858,51 @@ arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
&chunk->node), bin);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
/*
* The following block's conditional is necessary because if the
* run only contains one region, then it never gets inserted
* into the non-full runs tree.
*/
if (bin_info->nregs != 1) {
/*
* This block's conditional is necessary because if the
* run only contains one region, then it never gets
* inserted into the non-full runs tree.
*/
arena_bin_runs_remove(bin, run);
arena_chunk_map_misc_t *miscelm =
arena_run_to_miscelm(run);
arena_run_heap_remove(&bin->runs, miscelm);
}
}
}
static void
arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
arena_bin_t *bin)
arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
arena_run_t *run, arena_bin_t *bin)
{
assert(run != bin->runcur);
assert(arena_run_tree_search(&bin->runs, arena_run_to_miscelm(run)) ==
NULL);
malloc_mutex_unlock(&bin->lock);
malloc_mutex_unlock(tsdn, &bin->lock);
/******************************/
malloc_mutex_lock(&arena->lock);
arena_run_dalloc_decommit(arena, chunk, run);
malloc_mutex_unlock(&arena->lock);
malloc_mutex_lock(tsdn, &arena->lock);
arena_run_dalloc(tsdn, arena, run, true, false, false);
malloc_mutex_unlock(tsdn, &arena->lock);
/****************************/
malloc_mutex_lock(&bin->lock);
malloc_mutex_lock(tsdn, &bin->lock);
if (config_stats)
bin->stats.curruns--;
}
static void
arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
arena_bin_t *bin)
arena_bin_lower_run(arena_t *arena, arena_run_t *run, arena_bin_t *bin)
{
/*
* Make sure that if bin->runcur is non-NULL, it refers to the lowest
* non-full run. It is okay to NULL runcur out rather than proactively
* keeping it pointing at the lowest non-full run.
* Make sure that if bin->runcur is non-NULL, it refers to the
* oldest/lowest non-full run. It is okay to NULL runcur out rather
* than proactively keeping it pointing at the oldest/lowest non-full
* run.
*/
if ((uintptr_t)run < (uintptr_t)bin->runcur) {
if (bin->runcur != NULL &&
arena_snad_comp(arena_run_to_miscelm(bin->runcur),
arena_run_to_miscelm(run)) > 0) {
/* Switch runcur. */
if (bin->runcur->nfree > 0)
arena_bin_runs_insert(bin, bin->runcur);
......@@ -2471,8 +2914,8 @@ arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
}
static void
arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena_chunk_map_bits_t *bitselm, bool junked)
arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
void *ptr, arena_chunk_map_bits_t *bitselm, bool junked)
{
size_t pageind, rpages_ind;
arena_run_t *run;
......@@ -2482,7 +2925,7 @@ arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr,
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
run = &arena_miscelm_get(chunk, rpages_ind)->run;
run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
binind = run->binind;
bin = &arena->bins[binind];
bin_info = &arena_bin_info[binind];
......@@ -2493,9 +2936,9 @@ arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena_run_reg_dalloc(run, ptr);
if (run->nfree == bin_info->nregs) {
arena_dissociate_bin_run(chunk, run, bin);
arena_dalloc_bin_run(arena, chunk, run, bin);
arena_dalloc_bin_run(tsdn, arena, chunk, run, bin);
} else if (run->nfree == 1 && run != bin->runcur)
arena_bin_lower_run(arena, chunk, run, bin);
arena_bin_lower_run(arena, run, bin);
if (config_stats) {
bin->stats.ndalloc++;
......@@ -2504,15 +2947,15 @@ arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr,
}
void
arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena_chunk_map_bits_t *bitselm)
arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm)
{
arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, true);
arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, true);
}
void
arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t pageind, arena_chunk_map_bits_t *bitselm)
{
arena_run_t *run;
......@@ -2520,16 +2963,16 @@ arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t rpages_ind;
rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
run = &arena_miscelm_get(chunk, rpages_ind)->run;
run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
bin = &arena->bins[run->binind];
malloc_mutex_lock(&bin->lock);
arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, false);
malloc_mutex_unlock(&bin->lock);
malloc_mutex_lock(tsdn, &bin->lock);
arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, false);
malloc_mutex_unlock(tsdn, &bin->lock);
}
void
arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t pageind)
arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
void *ptr, size_t pageind)
{
arena_chunk_map_bits_t *bitselm;
......@@ -2538,34 +2981,36 @@ arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
pageind)) != BININD_INVALID);
}
bitselm = arena_bitselm_get(chunk, pageind);
arena_dalloc_bin(arena, chunk, ptr, pageind, bitselm);
bitselm = arena_bitselm_get_mutable(chunk, pageind);
arena_dalloc_bin(tsdn, arena, chunk, ptr, pageind, bitselm);
arena_decay_tick(tsdn, arena);
}
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_large
#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl)
#define arena_dalloc_junk_large JEMALLOC_N(n_arena_dalloc_junk_large)
#endif
void
arena_dalloc_junk_large(void *ptr, size_t usize)
{
if (config_fill && unlikely(opt_junk_free))
memset(ptr, 0x5a, usize);
memset(ptr, JEMALLOC_FREE_JUNK, usize);
}
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_large
#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
arena_dalloc_junk_large_t *arena_dalloc_junk_large =
JEMALLOC_N(arena_dalloc_junk_large_impl);
JEMALLOC_N(n_arena_dalloc_junk_large);
#endif
static void
arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk,
void *ptr, bool junked)
arena_dalloc_large_locked_impl(tsdn_t *tsdn, arena_t *arena,
arena_chunk_t *chunk, void *ptr, bool junked)
{
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
pageind);
arena_run_t *run = &miscelm->run;
if (config_fill || config_stats) {
......@@ -2584,32 +3029,35 @@ arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk,
}
}
arena_run_dalloc_decommit(arena, chunk, run);
arena_run_dalloc(tsdn, arena, run, true, false, false);
}
void
arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk,
void *ptr)
arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena,
arena_chunk_t *chunk, void *ptr)
{
arena_dalloc_large_locked_impl(arena, chunk, ptr, true);
arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, true);
}
void
arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
void *ptr)
{
malloc_mutex_lock(&arena->lock);
arena_dalloc_large_locked_impl(arena, chunk, ptr, false);
malloc_mutex_unlock(&arena->lock);
malloc_mutex_lock(tsdn, &arena->lock);
arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, false);
malloc_mutex_unlock(tsdn, &arena->lock);
arena_decay_tick(tsdn, arena);
}
static void
arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t oldsize, size_t size)
arena_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
void *ptr, size_t oldsize, size_t size)
{
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
pageind);
arena_run_t *run = &miscelm->run;
assert(size < oldsize);
......@@ -2618,8 +3066,8 @@ arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
* Shrink the run, and make trailing pages available for other
* allocations.
*/
malloc_mutex_lock(&arena->lock);
arena_run_trim_tail(arena, chunk, run, oldsize + large_pad, size +
malloc_mutex_lock(tsdn, &arena->lock);
arena_run_trim_tail(tsdn, arena, chunk, run, oldsize + large_pad, size +
large_pad, true);
if (config_stats) {
szind_t oldindex = size2index(oldsize) - NBINS;
......@@ -2637,12 +3085,12 @@ arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena->stats.lstats[index].nrequests++;
arena->stats.lstats[index].curruns++;
}
malloc_mutex_unlock(&arena->lock);
malloc_mutex_unlock(tsdn, &arena->lock);
}
static bool
arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t oldsize, size_t usize_min, size_t usize_max, bool zero)
arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero)
{
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
size_t npages = (oldsize + large_pad) >> LG_PAGE;
......@@ -2652,7 +3100,7 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
large_pad);
/* Try to extend the run. */
malloc_mutex_lock(&arena->lock);
malloc_mutex_lock(tsdn, &arena->lock);
if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk,
pageind+npages) != 0)
goto label_fail;
......@@ -2675,7 +3123,7 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
if (splitsize == 0)
goto label_fail;
run = &arena_miscelm_get(chunk, pageind+npages)->run;
run = &arena_miscelm_get_mutable(chunk, pageind+npages)->run;
if (arena_run_split_large(arena, run, splitsize, zero))
goto label_fail;
......@@ -2683,10 +3131,16 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
/*
* Zero the trailing bytes of the original allocation's
* last page, since they are in an indeterminate state.
* There will always be trailing bytes, because ptr's
* offset from the beginning of the run is a multiple of
* CACHELINE in [0 .. PAGE).
*/
assert(PAGE_CEILING(oldsize) == oldsize);
memset((void *)((uintptr_t)ptr + oldsize), 0,
PAGE_CEILING((uintptr_t)ptr) - (uintptr_t)ptr);
void *zbase = (void *)((uintptr_t)ptr + oldsize);
void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
PAGE));
size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
assert(nzero > 0);
memset(zbase, 0, nzero);
}
size = oldsize + splitsize;
......@@ -2726,24 +3180,24 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena->stats.lstats[index].nrequests++;
arena->stats.lstats[index].curruns++;
}
malloc_mutex_unlock(&arena->lock);
malloc_mutex_unlock(tsdn, &arena->lock);
return (false);
}
label_fail:
malloc_mutex_unlock(&arena->lock);
malloc_mutex_unlock(tsdn, &arena->lock);
return (true);
}
#ifdef JEMALLOC_JET
#undef arena_ralloc_junk_large
#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl)
#define arena_ralloc_junk_large JEMALLOC_N(n_arena_ralloc_junk_large)
#endif
static void
arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
{
if (config_fill && unlikely(opt_junk_free)) {
memset((void *)((uintptr_t)ptr + usize), 0x5a,
memset((void *)((uintptr_t)ptr + usize), JEMALLOC_FREE_JUNK,
old_usize - usize);
}
}
......@@ -2751,7 +3205,7 @@ arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
#undef arena_ralloc_junk_large
#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
arena_ralloc_junk_large_t *arena_ralloc_junk_large =
JEMALLOC_N(arena_ralloc_junk_large_impl);
JEMALLOC_N(n_arena_ralloc_junk_large);
#endif
/*
......@@ -2759,7 +3213,7 @@ arena_ralloc_junk_large_t *arena_ralloc_junk_large =
* always fail if growing an object, and the following run is already in use.
*/
static bool
arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min,
arena_ralloc_large(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
size_t usize_max, bool zero)
{
arena_chunk_t *chunk;
......@@ -2774,15 +3228,16 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min,
arena = extent_node_arena_get(&chunk->node);
if (oldsize < usize_max) {
bool ret = arena_ralloc_large_grow(arena, chunk, ptr, oldsize,
usize_min, usize_max, zero);
bool ret = arena_ralloc_large_grow(tsdn, arena, chunk, ptr,
oldsize, usize_min, usize_max, zero);
if (config_fill && !ret && !zero) {
if (unlikely(opt_junk_alloc)) {
memset((void *)((uintptr_t)ptr + oldsize), 0xa5,
isalloc(ptr, config_prof) - oldsize);
memset((void *)((uintptr_t)ptr + oldsize),
JEMALLOC_ALLOC_JUNK,
isalloc(tsdn, ptr, config_prof) - oldsize);
} else if (unlikely(opt_zero)) {
memset((void *)((uintptr_t)ptr + oldsize), 0,
isalloc(ptr, config_prof) - oldsize);
isalloc(tsdn, ptr, config_prof) - oldsize);
}
}
return (ret);
......@@ -2791,19 +3246,27 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min,
assert(oldsize > usize_max);
/* Fill before shrinking in order avoid a race. */
arena_ralloc_junk_large(ptr, oldsize, usize_max);
arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, usize_max);
arena_ralloc_large_shrink(tsdn, arena, chunk, ptr, oldsize, usize_max);
return (false);
}
bool
arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
bool zero)
arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero)
{
size_t usize_min, usize_max;
/* Calls with non-zero extra had to clamp extra. */
assert(extra == 0 || size + extra <= HUGE_MAXCLASS);
if (unlikely(size > HUGE_MAXCLASS))
return (true);
usize_min = s2u(size);
usize_max = s2u(size + extra);
if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) {
arena_chunk_t *chunk;
/*
* Avoid moving the allocation if the size class can be left the
* same.
......@@ -2811,37 +3274,39 @@ arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
if (oldsize <= SMALL_MAXCLASS) {
assert(arena_bin_info[size2index(oldsize)].reg_size ==
oldsize);
if ((usize_max <= SMALL_MAXCLASS &&
size2index(usize_max) == size2index(oldsize)) ||
(size <= oldsize && usize_max >= oldsize))
return (false);
if ((usize_max > SMALL_MAXCLASS ||
size2index(usize_max) != size2index(oldsize)) &&
(size > oldsize || usize_max < oldsize))
return (true);
} else {
if (usize_max > SMALL_MAXCLASS) {
if (!arena_ralloc_large(ptr, oldsize, usize_min,
usize_max, zero))
return (false);
}
if (usize_max <= SMALL_MAXCLASS)
return (true);
if (arena_ralloc_large(tsdn, ptr, oldsize, usize_min,
usize_max, zero))
return (true);
}
/* Reallocation would require a move. */
return (true);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
arena_decay_tick(tsdn, extent_node_arena_get(&chunk->node));
return (false);
} else {
return (huge_ralloc_no_move(ptr, oldsize, usize_min, usize_max,
zero));
return (huge_ralloc_no_move(tsdn, ptr, oldsize, usize_min,
usize_max, zero));
}
}
static void *
arena_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool zero, tcache_t *tcache)
{
if (alignment == 0)
return (arena_malloc(tsd, arena, usize, zero, tcache));
return (arena_malloc(tsdn, arena, usize, size2index(usize),
zero, tcache, true));
usize = sa2u(usize, alignment);
if (usize == 0)
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
return (NULL);
return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
return (ipalloct(tsdn, usize, alignment, zero, tcache, arena));
}
void *
......@@ -2852,14 +3317,15 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t usize;
usize = s2u(size);
if (usize == 0)
if (unlikely(usize == 0 || size > HUGE_MAXCLASS))
return (NULL);
if (likely(usize <= large_maxclass)) {
size_t copysize;
/* Try to avoid moving the allocation. */
if (!arena_ralloc_no_move(ptr, oldsize, usize, 0, zero))
if (!arena_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, 0,
zero))
return (ptr);
/*
......@@ -2867,8 +3333,8 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
* the object. In that case, fall back to allocating new space
* and copying.
*/
ret = arena_ralloc_move_helper(tsd, arena, usize, alignment,
zero, tcache);
ret = arena_ralloc_move_helper(tsd_tsdn(tsd), arena, usize,
alignment, zero, tcache);
if (ret == NULL)
return (NULL);
......@@ -2880,7 +3346,7 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
copysize = (usize < oldsize) ? usize : oldsize;
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
memcpy(ret, ptr, copysize);
isqalloc(tsd, ptr, oldsize, tcache);
isqalloc(tsd, ptr, oldsize, tcache, true);
} else {
ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment,
zero, tcache);
......@@ -2889,25 +3355,25 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
}
dss_prec_t
arena_dss_prec_get(arena_t *arena)
arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena)
{
dss_prec_t ret;
malloc_mutex_lock(&arena->lock);
malloc_mutex_lock(tsdn, &arena->lock);
ret = arena->dss_prec;
malloc_mutex_unlock(&arena->lock);
malloc_mutex_unlock(tsdn, &arena->lock);
return (ret);
}
bool
arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec)
{
if (!have_dss)
return (dss_prec != dss_prec_disabled);
malloc_mutex_lock(&arena->lock);
malloc_mutex_lock(tsdn, &arena->lock);
arena->dss_prec = dss_prec;
malloc_mutex_unlock(&arena->lock);
malloc_mutex_unlock(tsdn, &arena->lock);
return (false);
}
......@@ -2922,27 +3388,76 @@ bool
arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult)
{
if (opt_purge != purge_mode_ratio)
return (true);
if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
return (true);
atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult);
return (false);
}
ssize_t
arena_decay_time_default_get(void)
{
return ((ssize_t)atomic_read_z((size_t *)&decay_time_default));
}
bool
arena_decay_time_default_set(ssize_t decay_time)
{
if (opt_purge != purge_mode_decay)
return (true);
if (!arena_decay_time_valid(decay_time))
return (true);
atomic_write_z((size_t *)&decay_time_default, (size_t)decay_time);
return (false);
}
static void
arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads,
const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
size_t *nactive, size_t *ndirty)
{
*nthreads += arena_nthreads_get(arena, false);
*dss = dss_prec_names[arena->dss_prec];
*lg_dirty_mult = arena->lg_dirty_mult;
*decay_time = arena->decay.time;
*nactive += arena->nactive;
*ndirty += arena->ndirty;
}
void
arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
size_t *nactive, size_t *ndirty)
{
malloc_mutex_lock(tsdn, &arena->lock);
arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
decay_time, nactive, ndirty);
malloc_mutex_unlock(tsdn, &arena->lock);
}
void
arena_stats_merge(arena_t *arena, const char **dss, ssize_t *lg_dirty_mult,
arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
size_t *nactive, size_t *ndirty, arena_stats_t *astats,
malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
malloc_huge_stats_t *hstats)
{
unsigned i;
malloc_mutex_lock(&arena->lock);
*dss = dss_prec_names[arena->dss_prec];
*lg_dirty_mult = arena->lg_dirty_mult;
*nactive += arena->nactive;
*ndirty += arena->ndirty;
cassert(config_stats);
malloc_mutex_lock(tsdn, &arena->lock);
arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
decay_time, nactive, ndirty);
astats->mapped += arena->stats.mapped;
astats->retained += arena->stats.retained;
astats->npurge += arena->stats.npurge;
astats->nmadvise += arena->stats.nmadvise;
astats->purged += arena->stats.purged;
......@@ -2968,12 +3483,12 @@ arena_stats_merge(arena_t *arena, const char **dss, ssize_t *lg_dirty_mult,
hstats[i].ndalloc += arena->stats.hstats[i].ndalloc;
hstats[i].curhchunks += arena->stats.hstats[i].curhchunks;
}
malloc_mutex_unlock(&arena->lock);
malloc_mutex_unlock(tsdn, &arena->lock);
for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i];
malloc_mutex_lock(&bin->lock);
malloc_mutex_lock(tsdn, &bin->lock);
bstats[i].nmalloc += bin->stats.nmalloc;
bstats[i].ndalloc += bin->stats.ndalloc;
bstats[i].nrequests += bin->stats.nrequests;
......@@ -2985,33 +3500,61 @@ arena_stats_merge(arena_t *arena, const char **dss, ssize_t *lg_dirty_mult,
bstats[i].nruns += bin->stats.nruns;
bstats[i].reruns += bin->stats.reruns;
bstats[i].curruns += bin->stats.curruns;
malloc_mutex_unlock(&bin->lock);
malloc_mutex_unlock(tsdn, &bin->lock);
}
}
unsigned
arena_nthreads_get(arena_t *arena, bool internal)
{
return (atomic_read_u(&arena->nthreads[internal]));
}
void
arena_nthreads_inc(arena_t *arena, bool internal)
{
atomic_add_u(&arena->nthreads[internal], 1);
}
void
arena_nthreads_dec(arena_t *arena, bool internal)
{
atomic_sub_u(&arena->nthreads[internal], 1);
}
size_t
arena_extent_sn_next(arena_t *arena)
{
return (atomic_add_z(&arena->extent_sn_next, 1) - 1);
}
arena_t *
arena_new(unsigned ind)
arena_new(tsdn_t *tsdn, unsigned ind)
{
arena_t *arena;
unsigned i;
arena_bin_t *bin;
/*
* Allocate arena, arena->lstats, and arena->hstats contiguously, mainly
* because there is no way to clean up if base_alloc() OOMs.
*/
if (config_stats) {
arena = (arena_t *)base_alloc(CACHELINE_CEILING(sizeof(arena_t))
+ QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t) +
nhclasses) * sizeof(malloc_huge_stats_t));
arena = (arena_t *)base_alloc(tsdn,
CACHELINE_CEILING(sizeof(arena_t)) +
QUANTUM_CEILING((nlclasses * sizeof(malloc_large_stats_t)))
+ (nhclasses * sizeof(malloc_huge_stats_t)));
} else
arena = (arena_t *)base_alloc(sizeof(arena_t));
arena = (arena_t *)base_alloc(tsdn, sizeof(arena_t));
if (arena == NULL)
return (NULL);
arena->ind = ind;
arena->nthreads = 0;
if (malloc_mutex_init(&arena->lock))
arena->nthreads[0] = arena->nthreads[1] = 0;
if (malloc_mutex_init(&arena->lock, "arena", WITNESS_RANK_ARENA))
return (NULL);
if (config_stats) {
......@@ -3041,11 +3584,15 @@ arena_new(unsigned ind)
* deterministic seed.
*/
arena->offset_state = config_debug ? ind :
(uint64_t)(uintptr_t)arena;
(size_t)(uintptr_t)arena;
}
arena->dss_prec = chunk_dss_prec_get();
ql_new(&arena->achunks);
arena->extent_sn_next = 0;
arena->spare = NULL;
arena->lg_dirty_mult = arena_lg_dirty_mult_default_get();
......@@ -3053,33 +3600,42 @@ arena_new(unsigned ind)
arena->nactive = 0;
arena->ndirty = 0;
arena_avail_tree_new(&arena->runs_avail);
for (i = 0; i < NPSIZES; i++)
arena_run_heap_new(&arena->runs_avail[i]);
qr_new(&arena->runs_dirty, rd_link);
qr_new(&arena->chunks_cache, cc_link);
if (opt_purge == purge_mode_decay)
arena_decay_init(arena, arena_decay_time_default_get());
ql_new(&arena->huge);
if (malloc_mutex_init(&arena->huge_mtx))
if (malloc_mutex_init(&arena->huge_mtx, "arena_huge",
WITNESS_RANK_ARENA_HUGE))
return (NULL);
extent_tree_szad_new(&arena->chunks_szad_cached);
extent_tree_szsnad_new(&arena->chunks_szsnad_cached);
extent_tree_ad_new(&arena->chunks_ad_cached);
extent_tree_szad_new(&arena->chunks_szad_retained);
extent_tree_szsnad_new(&arena->chunks_szsnad_retained);
extent_tree_ad_new(&arena->chunks_ad_retained);
if (malloc_mutex_init(&arena->chunks_mtx))
if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks",
WITNESS_RANK_ARENA_CHUNKS))
return (NULL);
ql_new(&arena->node_cache);
if (malloc_mutex_init(&arena->node_cache_mtx))
if (malloc_mutex_init(&arena->node_cache_mtx, "arena_node_cache",
WITNESS_RANK_ARENA_NODE_CACHE))
return (NULL);
arena->chunk_hooks = chunk_hooks_default;
/* Initialize bins. */
for (i = 0; i < NBINS; i++) {
bin = &arena->bins[i];
if (malloc_mutex_init(&bin->lock))
arena_bin_t *bin = &arena->bins[i];
if (malloc_mutex_init(&bin->lock, "arena_bin",
WITNESS_RANK_ARENA_BIN))
return (NULL);
bin->runcur = NULL;
arena_run_tree_new(&bin->runs);
arena_run_heap_new(&bin->runs);
if (config_stats)
memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
}
......@@ -3111,8 +3667,7 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info)
* be twice as large in order to maintain alignment.
*/
if (config_fill && unlikely(opt_redzone)) {
size_t align_min = ZU(1) << (jemalloc_ffs(bin_info->reg_size) -
1);
size_t align_min = ZU(1) << (ffs_zu(bin_info->reg_size) - 1);
if (align_min <= REDZONE_MINSIZE) {
bin_info->redzone_size = REDZONE_MINSIZE;
pad_size = 0;
......@@ -3132,18 +3687,19 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info)
* size).
*/
try_run_size = PAGE;
try_nregs = try_run_size / bin_info->reg_size;
try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
do {
perfect_run_size = try_run_size;
perfect_nregs = try_nregs;
try_run_size += PAGE;
try_nregs = try_run_size / bin_info->reg_size;
try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
} while (perfect_run_size != perfect_nregs * bin_info->reg_size);
assert(perfect_nregs <= RUN_MAXREGS);
actual_run_size = perfect_run_size;
actual_nregs = (actual_run_size - pad_size) / bin_info->reg_interval;
actual_nregs = (uint32_t)((actual_run_size - pad_size) /
bin_info->reg_interval);
/*
* Redzones can require enough padding that not even a single region can
......@@ -3155,8 +3711,8 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info)
assert(config_fill && unlikely(opt_redzone));
actual_run_size += PAGE;
actual_nregs = (actual_run_size - pad_size) /
bin_info->reg_interval;
actual_nregs = (uint32_t)((actual_run_size - pad_size) /
bin_info->reg_interval);
}
/*
......@@ -3164,8 +3720,8 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info)
*/
while (actual_run_size > arena_maxrun) {
actual_run_size -= PAGE;
actual_nregs = (actual_run_size - pad_size) /
bin_info->reg_interval;
actual_nregs = (uint32_t)((actual_run_size - pad_size) /
bin_info->reg_interval);
}
assert(actual_nregs > 0);
assert(actual_run_size == s2u(actual_run_size));
......@@ -3173,11 +3729,8 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info)
/* Copy final settings. */
bin_info->run_size = actual_run_size;
bin_info->nregs = actual_nregs;
bin_info->reg0_offset = actual_run_size - (actual_nregs *
bin_info->reg_interval) - pad_size + bin_info->redzone_size;
if (actual_run_size > small_maxrun)
small_maxrun = actual_run_size;
bin_info->reg0_offset = (uint32_t)(actual_run_size - (actual_nregs *
bin_info->reg_interval) - pad_size + bin_info->redzone_size);
assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
* bin_info->reg_interval) + pad_size == bin_info->run_size);
......@@ -3194,7 +3747,7 @@ bin_info_init(void)
bin_info_run_size_calc(bin_info); \
bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
#define BIN_INFO_INIT_bin_no(index, size)
#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
SIZE_CLASSES
#undef BIN_INFO_INIT_bin_yes
......@@ -3202,38 +3755,13 @@ bin_info_init(void)
#undef SC
}
static bool
small_run_size_init(void)
{
assert(small_maxrun != 0);
small_run_tab = (bool *)base_alloc(sizeof(bool) * (small_maxrun >>
LG_PAGE));
if (small_run_tab == NULL)
return (true);
#define TAB_INIT_bin_yes(index, size) { \
arena_bin_info_t *bin_info = &arena_bin_info[index]; \
small_run_tab[bin_info->run_size >> LG_PAGE] = true; \
}
#define TAB_INIT_bin_no(index, size)
#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
TAB_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
SIZE_CLASSES
#undef TAB_INIT_bin_yes
#undef TAB_INIT_bin_no
#undef SC
return (false);
}
bool
void
arena_boot(void)
{
unsigned i;
arena_lg_dirty_mult_default_set(opt_lg_dirty_mult);
arena_decay_time_default_set(opt_decay_time);
/*
* Compute the header size such that it is large enough to contain the
......@@ -3275,44 +3803,61 @@ arena_boot(void)
nhclasses = NSIZES - nlclasses - NBINS;
bin_info_init();
return (small_run_size_init());
}
void
arena_prefork(arena_t *arena)
arena_prefork0(tsdn_t *tsdn, arena_t *arena)
{
malloc_mutex_prefork(tsdn, &arena->lock);
}
void
arena_prefork1(tsdn_t *tsdn, arena_t *arena)
{
malloc_mutex_prefork(tsdn, &arena->chunks_mtx);
}
void
arena_prefork2(tsdn_t *tsdn, arena_t *arena)
{
malloc_mutex_prefork(tsdn, &arena->node_cache_mtx);
}
void
arena_prefork3(tsdn_t *tsdn, arena_t *arena)
{
unsigned i;
malloc_mutex_prefork(&arena->lock);
malloc_mutex_prefork(&arena->huge_mtx);
malloc_mutex_prefork(&arena->chunks_mtx);
malloc_mutex_prefork(&arena->node_cache_mtx);
for (i = 0; i < NBINS; i++)
malloc_mutex_prefork(&arena->bins[i].lock);
malloc_mutex_prefork(tsdn, &arena->bins[i].lock);
malloc_mutex_prefork(tsdn, &arena->huge_mtx);
}
void
arena_postfork_parent(arena_t *arena)
arena_postfork_parent(tsdn_t *tsdn, arena_t *arena)
{
unsigned i;
malloc_mutex_postfork_parent(tsdn, &arena->huge_mtx);
for (i = 0; i < NBINS; i++)
malloc_mutex_postfork_parent(&arena->bins[i].lock);
malloc_mutex_postfork_parent(&arena->node_cache_mtx);
malloc_mutex_postfork_parent(&arena->chunks_mtx);
malloc_mutex_postfork_parent(&arena->huge_mtx);
malloc_mutex_postfork_parent(&arena->lock);
malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock);
malloc_mutex_postfork_parent(tsdn, &arena->node_cache_mtx);
malloc_mutex_postfork_parent(tsdn, &arena->chunks_mtx);
malloc_mutex_postfork_parent(tsdn, &arena->lock);
}
void
arena_postfork_child(arena_t *arena)
arena_postfork_child(tsdn_t *tsdn, arena_t *arena)
{
unsigned i;
malloc_mutex_postfork_child(tsdn, &arena->huge_mtx);
for (i = 0; i < NBINS; i++)
malloc_mutex_postfork_child(&arena->bins[i].lock);
malloc_mutex_postfork_child(&arena->node_cache_mtx);
malloc_mutex_postfork_child(&arena->chunks_mtx);
malloc_mutex_postfork_child(&arena->huge_mtx);
malloc_mutex_postfork_child(&arena->lock);
malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock);
malloc_mutex_postfork_child(tsdn, &arena->node_cache_mtx);
malloc_mutex_postfork_child(tsdn, &arena->chunks_mtx);
malloc_mutex_postfork_child(tsdn, &arena->lock);
}
......@@ -5,7 +5,8 @@
/* Data. */
static malloc_mutex_t base_mtx;
static extent_tree_t base_avail_szad;
static size_t base_extent_sn_next;
static extent_tree_t base_avail_szsnad;
static extent_node_t *base_nodes;
static size_t base_allocated;
static size_t base_resident;
......@@ -13,12 +14,13 @@ static size_t base_mapped;
/******************************************************************************/
/* base_mtx must be held. */
static extent_node_t *
base_node_try_alloc(void)
base_node_try_alloc(tsdn_t *tsdn)
{
extent_node_t *node;
malloc_mutex_assert_owner(tsdn, &base_mtx);
if (base_nodes == NULL)
return (NULL);
node = base_nodes;
......@@ -27,33 +29,42 @@ base_node_try_alloc(void)
return (node);
}
/* base_mtx must be held. */
static void
base_node_dalloc(extent_node_t *node)
base_node_dalloc(tsdn_t *tsdn, extent_node_t *node)
{
malloc_mutex_assert_owner(tsdn, &base_mtx);
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
*(extent_node_t **)node = base_nodes;
base_nodes = node;
}
/* base_mtx must be held. */
static void
base_extent_node_init(extent_node_t *node, void *addr, size_t size)
{
size_t sn = atomic_add_z(&base_extent_sn_next, 1) - 1;
extent_node_init(node, NULL, addr, size, sn, true, true);
}
static extent_node_t *
base_chunk_alloc(size_t minsize)
base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
{
extent_node_t *node;
size_t csize, nsize;
void *addr;
malloc_mutex_assert_owner(tsdn, &base_mtx);
assert(minsize != 0);
node = base_node_try_alloc();
node = base_node_try_alloc(tsdn);
/* Allocate enough space to also carve a node out if necessary. */
nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0;
csize = CHUNK_CEILING(minsize + nsize);
addr = chunk_alloc_base(csize);
if (addr == NULL) {
if (node != NULL)
base_node_dalloc(node);
base_node_dalloc(tsdn, node);
return (NULL);
}
base_mapped += csize;
......@@ -66,7 +77,7 @@ base_chunk_alloc(size_t minsize)
base_resident += PAGE_CEILING(nsize);
}
}
extent_node_init(node, NULL, addr, csize, true, true);
base_extent_node_init(node, addr, csize);
return (node);
}
......@@ -76,7 +87,7 @@ base_chunk_alloc(size_t minsize)
* physical memory usage.
*/
void *
base_alloc(size_t size)
base_alloc(tsdn_t *tsdn, size_t size)
{
void *ret;
size_t csize, usize;
......@@ -90,15 +101,15 @@ base_alloc(size_t size)
csize = CACHELINE_CEILING(size);
usize = s2u(csize);
extent_node_init(&key, NULL, NULL, usize, false, false);
malloc_mutex_lock(&base_mtx);
node = extent_tree_szad_nsearch(&base_avail_szad, &key);
extent_node_init(&key, NULL, NULL, usize, 0, false, false);
malloc_mutex_lock(tsdn, &base_mtx);
node = extent_tree_szsnad_nsearch(&base_avail_szsnad, &key);
if (node != NULL) {
/* Use existing space. */
extent_tree_szad_remove(&base_avail_szad, node);
extent_tree_szsnad_remove(&base_avail_szsnad, node);
} else {
/* Try to allocate more space. */
node = base_chunk_alloc(csize);
node = base_chunk_alloc(tsdn, csize);
}
if (node == NULL) {
ret = NULL;
......@@ -109,9 +120,9 @@ base_alloc(size_t size)
if (extent_node_size_get(node) > csize) {
extent_node_addr_set(node, (void *)((uintptr_t)ret + csize));
extent_node_size_set(node, extent_node_size_get(node) - csize);
extent_tree_szad_insert(&base_avail_szad, node);
extent_tree_szsnad_insert(&base_avail_szsnad, node);
} else
base_node_dalloc(node);
base_node_dalloc(tsdn, node);
if (config_stats) {
base_allocated += csize;
/*
......@@ -123,52 +134,54 @@ base_alloc(size_t size)
}
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize);
label_return:
malloc_mutex_unlock(&base_mtx);
malloc_mutex_unlock(tsdn, &base_mtx);
return (ret);
}
void
base_stats_get(size_t *allocated, size_t *resident, size_t *mapped)
base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident,
size_t *mapped)
{
malloc_mutex_lock(&base_mtx);
malloc_mutex_lock(tsdn, &base_mtx);
assert(base_allocated <= base_resident);
assert(base_resident <= base_mapped);
*allocated = base_allocated;
*resident = base_resident;
*mapped = base_mapped;
malloc_mutex_unlock(&base_mtx);
malloc_mutex_unlock(tsdn, &base_mtx);
}
bool
base_boot(void)
{
if (malloc_mutex_init(&base_mtx))
if (malloc_mutex_init(&base_mtx, "base", WITNESS_RANK_BASE))
return (true);
extent_tree_szad_new(&base_avail_szad);
base_extent_sn_next = 0;
extent_tree_szsnad_new(&base_avail_szsnad);
base_nodes = NULL;
return (false);
}
void
base_prefork(void)
base_prefork(tsdn_t *tsdn)
{
malloc_mutex_prefork(&base_mtx);
malloc_mutex_prefork(tsdn, &base_mtx);
}
void
base_postfork_parent(void)
base_postfork_parent(tsdn_t *tsdn)
{
malloc_mutex_postfork_parent(&base_mtx);
malloc_mutex_postfork_parent(tsdn, &base_mtx);
}
void
base_postfork_child(void)
base_postfork_child(tsdn_t *tsdn)
{
malloc_mutex_postfork_child(&base_mtx);
malloc_mutex_postfork_child(tsdn, &base_mtx);
}
......@@ -3,6 +3,8 @@
/******************************************************************************/
#ifdef USE_TREE
void
bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
{
......@@ -32,20 +34,11 @@ bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
binfo->nbits = nbits;
}
size_t
static size_t
bitmap_info_ngroups(const bitmap_info_t *binfo)
{
return (binfo->levels[binfo->nlevels].group_offset << LG_SIZEOF_BITMAP);
}
size_t
bitmap_size(size_t nbits)
{
bitmap_info_t binfo;
bitmap_info_init(&binfo, nbits);
return (bitmap_info_ngroups(&binfo));
return (binfo->levels[binfo->nlevels].group_offset);
}
void
......@@ -61,8 +54,7 @@ bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
* correspond to the first logical bit in the group, so extra bits
* are the most significant bits of the last group.
*/
memset(bitmap, 0xffU, binfo->levels[binfo->nlevels].group_offset <<
LG_SIZEOF_BITMAP);
memset(bitmap, 0xffU, bitmap_size(binfo));
extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
& BITMAP_GROUP_NBITS_MASK;
if (extra != 0)
......@@ -76,3 +68,44 @@ bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
bitmap[binfo->levels[i+1].group_offset - 1] >>= extra;
}
}
#else /* USE_TREE */
void
bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
{
assert(nbits > 0);
assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
binfo->ngroups = BITMAP_BITS2GROUPS(nbits);
binfo->nbits = nbits;
}
static size_t
bitmap_info_ngroups(const bitmap_info_t *binfo)
{
return (binfo->ngroups);
}
void
bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
{
size_t extra;
memset(bitmap, 0xffU, bitmap_size(binfo));
extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
& BITMAP_GROUP_NBITS_MASK;
if (extra != 0)
bitmap[binfo->ngroups - 1] >>= extra;
}
#endif /* USE_TREE */
size_t
bitmap_size(const bitmap_info_t *binfo)
{
return (bitmap_info_ngroups(binfo) << LG_SIZEOF_BITMAP);
}
......@@ -49,9 +49,10 @@ const chunk_hooks_t chunk_hooks_default = {
* definition.
*/
static void chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
void *chunk, size_t size, bool zeroed, bool committed);
static void chunk_record(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szsnad,
extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, size_t sn,
bool zeroed, bool committed);
/******************************************************************************/
......@@ -63,23 +64,23 @@ chunk_hooks_get_locked(arena_t *arena)
}
chunk_hooks_t
chunk_hooks_get(arena_t *arena)
chunk_hooks_get(tsdn_t *tsdn, arena_t *arena)
{
chunk_hooks_t chunk_hooks;
malloc_mutex_lock(&arena->chunks_mtx);
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
chunk_hooks = chunk_hooks_get_locked(arena);
malloc_mutex_unlock(&arena->chunks_mtx);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
return (chunk_hooks);
}
chunk_hooks_t
chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks)
chunk_hooks_set(tsdn_t *tsdn, arena_t *arena, const chunk_hooks_t *chunk_hooks)
{
chunk_hooks_t old_chunk_hooks;
malloc_mutex_lock(&arena->chunks_mtx);
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
old_chunk_hooks = arena->chunk_hooks;
/*
* Copy each field atomically so that it is impossible for readers to
......@@ -104,14 +105,14 @@ chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks)
ATOMIC_COPY_HOOK(split);
ATOMIC_COPY_HOOK(merge);
#undef ATOMIC_COPY_HOOK
malloc_mutex_unlock(&arena->chunks_mtx);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
return (old_chunk_hooks);
}
static void
chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks,
bool locked)
chunk_hooks_assure_initialized_impl(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, bool locked)
{
static const chunk_hooks_t uninitialized_hooks =
CHUNK_HOOKS_INITIALIZER;
......@@ -119,27 +120,28 @@ chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks,
if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) ==
0) {
*chunk_hooks = locked ? chunk_hooks_get_locked(arena) :
chunk_hooks_get(arena);
chunk_hooks_get(tsdn, arena);
}
}
static void
chunk_hooks_assure_initialized_locked(arena_t *arena,
chunk_hooks_assure_initialized_locked(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks)
{
chunk_hooks_assure_initialized_impl(arena, chunk_hooks, true);
chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, true);
}
static void
chunk_hooks_assure_initialized(arena_t *arena, chunk_hooks_t *chunk_hooks)
chunk_hooks_assure_initialized(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks)
{
chunk_hooks_assure_initialized_impl(arena, chunk_hooks, false);
chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, false);
}
bool
chunk_register(const void *chunk, const extent_node_t *node)
chunk_register(tsdn_t *tsdn, const void *chunk, const extent_node_t *node)
{
assert(extent_node_addr_get(node) == chunk);
......@@ -159,7 +161,7 @@ chunk_register(const void *chunk, const extent_node_t *node)
high = atomic_read_z(&highchunks);
}
if (cur > high && prof_gdump_get_unlocked())
prof_gdump();
prof_gdump(tsdn);
}
return (false);
......@@ -181,33 +183,35 @@ chunk_deregister(const void *chunk, const extent_node_t *node)
}
/*
* Do first-best-fit chunk selection, i.e. select the lowest chunk that best
* fits.
* Do first-best-fit chunk selection, i.e. select the oldest/lowest chunk that
* best fits.
*/
static extent_node_t *
chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad,
extent_tree_t *chunks_ad, size_t size)
chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szsnad, size_t size)
{
extent_node_t key;
assert(size == CHUNK_CEILING(size));
extent_node_init(&key, arena, NULL, size, false, false);
return (extent_tree_szad_nsearch(chunks_szad, &key));
extent_node_init(&key, arena, NULL, size, 0, false, false);
return (extent_tree_szsnad_nsearch(chunks_szsnad, &key));
}
static void *
chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
bool dalloc_node)
chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache,
void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
bool *commit, bool dalloc_node)
{
void *ret;
extent_node_t *node;
size_t alloc_size, leadsize, trailsize;
bool zeroed, committed;
assert(CHUNK_CEILING(size) == size);
assert(alignment > 0);
assert(new_addr == NULL || alignment == chunksize);
assert(CHUNK_ADDR2BASE(new_addr) == new_addr);
/*
* Cached chunks use the node linkage embedded in their headers, in
* which case dalloc_node is true, and new_addr is non-NULL because
......@@ -215,24 +219,23 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
*/
assert(dalloc_node || new_addr != NULL);
alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize));
alloc_size = size + CHUNK_CEILING(alignment) - chunksize;
/* Beware size_t wrap-around. */
if (alloc_size < size)
return (NULL);
malloc_mutex_lock(&arena->chunks_mtx);
chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
if (new_addr != NULL) {
extent_node_t key;
extent_node_init(&key, arena, new_addr, alloc_size, false,
extent_node_init(&key, arena, new_addr, alloc_size, 0, false,
false);
node = extent_tree_ad_search(chunks_ad, &key);
} else {
node = chunk_first_best_fit(arena, chunks_szad, chunks_ad,
alloc_size);
node = chunk_first_best_fit(arena, chunks_szsnad, alloc_size);
}
if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
size)) {
malloc_mutex_unlock(&arena->chunks_mtx);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
return (NULL);
}
leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node),
......@@ -241,6 +244,7 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
assert(extent_node_size_get(node) >= leadsize + size);
trailsize = extent_node_size_get(node) - leadsize - size;
ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize);
*sn = extent_node_sn_get(node);
zeroed = extent_node_zeroed_get(node);
if (zeroed)
*zero = true;
......@@ -251,17 +255,17 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
if (leadsize != 0 &&
chunk_hooks->split(extent_node_addr_get(node),
extent_node_size_get(node), leadsize, size, false, arena->ind)) {
malloc_mutex_unlock(&arena->chunks_mtx);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
return (NULL);
}
/* Remove node from the tree. */
extent_tree_szad_remove(chunks_szad, node);
extent_tree_szsnad_remove(chunks_szsnad, node);
extent_tree_ad_remove(chunks_ad, node);
arena_chunk_cache_maybe_remove(arena, node, cache);
if (leadsize != 0) {
/* Insert the leading space as a smaller chunk. */
extent_node_size_set(node, leadsize);
extent_tree_szad_insert(chunks_szad, node);
extent_tree_szsnad_insert(chunks_szsnad, node);
extent_tree_ad_insert(chunks_ad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
node = NULL;
......@@ -271,41 +275,42 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
if (chunk_hooks->split(ret, size + trailsize, size,
trailsize, false, arena->ind)) {
if (dalloc_node && node != NULL)
arena_node_dalloc(arena, node);
malloc_mutex_unlock(&arena->chunks_mtx);
chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad,
cache, ret, size + trailsize, zeroed, committed);
arena_node_dalloc(tsdn, arena, node);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad,
chunks_ad, cache, ret, size + trailsize, *sn,
zeroed, committed);
return (NULL);
}
/* Insert the trailing space as a smaller chunk. */
if (node == NULL) {
node = arena_node_alloc(arena);
node = arena_node_alloc(tsdn, arena);
if (node == NULL) {
malloc_mutex_unlock(&arena->chunks_mtx);
chunk_record(arena, chunk_hooks, chunks_szad,
chunks_ad, cache, ret, size + trailsize,
zeroed, committed);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
chunk_record(tsdn, arena, chunk_hooks,
chunks_szsnad, chunks_ad, cache, ret, size
+ trailsize, *sn, zeroed, committed);
return (NULL);
}
}
extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size),
trailsize, zeroed, committed);
extent_tree_szad_insert(chunks_szad, node);
trailsize, *sn, zeroed, committed);
extent_tree_szsnad_insert(chunks_szsnad, node);
extent_tree_ad_insert(chunks_ad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
node = NULL;
}
if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
malloc_mutex_unlock(&arena->chunks_mtx);
chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, cache,
ret, size, zeroed, committed);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad, chunks_ad,
cache, ret, size, *sn, zeroed, committed);
return (NULL);
}
malloc_mutex_unlock(&arena->chunks_mtx);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
assert(dalloc_node || node != NULL);
if (dalloc_node && node != NULL)
arena_node_dalloc(arena, node);
arena_node_dalloc(tsdn, arena, node);
if (*zero) {
if (!zeroed)
memset(ret, 0, size);
......@@ -313,10 +318,11 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
size_t i;
size_t *p = (size_t *)(uintptr_t)ret;
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size);
for (i = 0; i < size / sizeof(size_t); i++)
assert(p[i] == 0);
}
if (config_valgrind)
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size);
}
return (ret);
}
......@@ -328,39 +334,29 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
* them if they are returned.
*/
static void *
chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
bool *zero, bool *commit, dss_prec_t dss_prec)
chunk_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec)
{
void *ret;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
assert(size != 0);
assert((size & chunksize_mask) == 0);
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
/* Retained. */
if ((ret = chunk_recycle(arena, &chunk_hooks,
&arena->chunks_szad_retained, &arena->chunks_ad_retained, false,
new_addr, size, alignment, zero, commit, true)) != NULL)
return (ret);
/* "primary" dss. */
if (have_dss && dss_prec == dss_prec_primary && (ret =
chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
NULL)
return (ret);
/*
* mmap. Requesting an address is not implemented for
* chunk_alloc_mmap(), so only call it if (new_addr == NULL).
*/
if (new_addr == NULL && (ret = chunk_alloc_mmap(size, alignment, zero,
chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
commit)) != NULL)
return (ret);
/* mmap. */
if ((ret = chunk_alloc_mmap(new_addr, size, alignment, zero, commit)) !=
NULL)
return (ret);
/* "secondary" dss. */
if (have_dss && dss_prec == dss_prec_secondary && (ret =
chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
NULL)
chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
commit)) != NULL)
return (ret);
/* All strategies for allocation failed. */
......@@ -380,7 +376,7 @@ chunk_alloc_base(size_t size)
*/
zero = true;
commit = true;
ret = chunk_alloc_mmap(size, chunksize, &zero, &commit);
ret = chunk_alloc_mmap(NULL, size, chunksize, &zero, &commit);
if (ret == NULL)
return (NULL);
if (config_valgrind)
......@@ -390,37 +386,33 @@ chunk_alloc_base(size_t size)
}
void *
chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
size_t size, size_t alignment, bool *zero, bool dalloc_node)
chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
bool *commit, bool dalloc_node)
{
void *ret;
bool commit;
assert(size != 0);
assert((size & chunksize_mask) == 0);
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
commit = true;
ret = chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_cached,
&arena->chunks_ad_cached, true, new_addr, size, alignment, zero,
&commit, dalloc_node);
ret = chunk_recycle(tsdn, arena, chunk_hooks,
&arena->chunks_szsnad_cached, &arena->chunks_ad_cached, true,
new_addr, size, alignment, sn, zero, commit, dalloc_node);
if (ret == NULL)
return (NULL);
assert(commit);
if (config_valgrind)
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
return (ret);
}
static arena_t *
chunk_arena_get(unsigned arena_ind)
chunk_arena_get(tsdn_t *tsdn, unsigned arena_ind)
{
arena_t *arena;
/* Dodge tsd for a0 in order to avoid bootstrapping issues. */
arena = (arena_ind == 0) ? a0get() : arena_get(tsd_fetch(), arena_ind,
false, true);
arena = arena_get(tsdn, arena_ind, false);
/*
* The arena we're allocating on behalf of must have been initialized
* already.
......@@ -430,14 +422,12 @@ chunk_arena_get(unsigned arena_ind)
}
static void *
chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
bool *commit, unsigned arena_ind)
chunk_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
size_t size, size_t alignment, bool *zero, bool *commit)
{
void *ret;
arena_t *arena;
arena = chunk_arena_get(arena_ind);
ret = chunk_alloc_core(arena, new_addr, size, alignment, zero,
ret = chunk_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
commit, arena->dss_prec);
if (ret == NULL)
return (NULL);
......@@ -447,26 +437,80 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
return (ret);
}
static void *
chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
bool *commit, unsigned arena_ind)
{
tsdn_t *tsdn;
arena_t *arena;
tsdn = tsdn_fetch();
arena = chunk_arena_get(tsdn, arena_ind);
return (chunk_alloc_default_impl(tsdn, arena, new_addr, size, alignment,
zero, commit));
}
static void *
chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
bool *commit)
{
void *ret;
assert(size != 0);
assert((size & chunksize_mask) == 0);
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
ret = chunk_recycle(tsdn, arena, chunk_hooks,
&arena->chunks_szsnad_retained, &arena->chunks_ad_retained, false,
new_addr, size, alignment, sn, zero, commit, true);
if (config_stats && ret != NULL)
arena->stats.retained -= size;
return (ret);
}
void *
chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
size_t size, size_t alignment, bool *zero, bool *commit)
chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
bool *commit)
{
void *ret;
chunk_hooks_assure_initialized(arena, chunk_hooks);
ret = chunk_hooks->alloc(new_addr, size, alignment, zero, commit,
arena->ind);
if (ret == NULL)
return (NULL);
if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default)
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
ret = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size,
alignment, sn, zero, commit);
if (ret == NULL) {
if (chunk_hooks->alloc == chunk_alloc_default) {
/* Call directly to propagate tsdn. */
ret = chunk_alloc_default_impl(tsdn, arena, new_addr,
size, alignment, zero, commit);
} else {
ret = chunk_hooks->alloc(new_addr, size, alignment,
zero, commit, arena->ind);
}
if (ret == NULL)
return (NULL);
*sn = arena_extent_sn_next(arena);
if (config_valgrind && chunk_hooks->alloc !=
chunk_alloc_default)
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
}
return (ret);
}
static void
chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
void *chunk, size_t size, bool zeroed, bool committed)
chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache,
void *chunk, size_t size, size_t sn, bool zeroed, bool committed)
{
bool unzeroed;
extent_node_t *node, *prev;
......@@ -476,9 +520,9 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
unzeroed = cache || !zeroed;
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
malloc_mutex_lock(&arena->chunks_mtx);
chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0,
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0, 0,
false, false);
node = extent_tree_ad_nsearch(chunks_ad, &key);
/* Try to coalesce forward. */
......@@ -490,19 +534,21 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
/*
* Coalesce chunk with the following address range. This does
* not change the position within chunks_ad, so only
* remove/insert from/into chunks_szad.
* remove/insert from/into chunks_szsnad.
*/
extent_tree_szad_remove(chunks_szad, node);
extent_tree_szsnad_remove(chunks_szsnad, node);
arena_chunk_cache_maybe_remove(arena, node, cache);
extent_node_addr_set(node, chunk);
extent_node_size_set(node, size + extent_node_size_get(node));
if (sn < extent_node_sn_get(node))
extent_node_sn_set(node, sn);
extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
!unzeroed);
extent_tree_szad_insert(chunks_szad, node);
extent_tree_szsnad_insert(chunks_szsnad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
} else {
/* Coalescing forward failed, so insert a new node. */
node = arena_node_alloc(arena);
node = arena_node_alloc(tsdn, arena);
if (node == NULL) {
/*
* Node allocation failed, which is an exceedingly
......@@ -511,15 +557,15 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
* a virtual memory leak.
*/
if (cache) {
chunk_purge_wrapper(arena, chunk_hooks, chunk,
size, 0, size);
chunk_purge_wrapper(tsdn, arena, chunk_hooks,
chunk, size, 0, size);
}
goto label_return;
}
extent_node_init(node, arena, chunk, size, !unzeroed,
extent_node_init(node, arena, chunk, size, sn, !unzeroed,
committed);
extent_tree_ad_insert(chunks_ad, node);
extent_tree_szad_insert(chunks_szad, node);
extent_tree_szsnad_insert(chunks_szsnad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
}
......@@ -533,31 +579,33 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
/*
* Coalesce chunk with the previous address range. This does
* not change the position within chunks_ad, so only
* remove/insert node from/into chunks_szad.
* remove/insert node from/into chunks_szsnad.
*/
extent_tree_szad_remove(chunks_szad, prev);
extent_tree_szsnad_remove(chunks_szsnad, prev);
extent_tree_ad_remove(chunks_ad, prev);
arena_chunk_cache_maybe_remove(arena, prev, cache);
extent_tree_szad_remove(chunks_szad, node);
extent_tree_szsnad_remove(chunks_szsnad, node);
arena_chunk_cache_maybe_remove(arena, node, cache);
extent_node_addr_set(node, extent_node_addr_get(prev));
extent_node_size_set(node, extent_node_size_get(prev) +
extent_node_size_get(node));
if (extent_node_sn_get(prev) < extent_node_sn_get(node))
extent_node_sn_set(node, extent_node_sn_get(prev));
extent_node_zeroed_set(node, extent_node_zeroed_get(prev) &&
extent_node_zeroed_get(node));
extent_tree_szad_insert(chunks_szad, node);
extent_tree_szsnad_insert(chunks_szsnad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
arena_node_dalloc(arena, prev);
arena_node_dalloc(tsdn, arena, prev);
}
label_return:
malloc_mutex_unlock(&arena->chunks_mtx);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
}
void
chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
size_t size, bool committed)
chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *chunk, size_t size, size_t sn, bool committed)
{
assert(chunk != NULL);
......@@ -565,24 +613,49 @@ chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
assert(size != 0);
assert((size & chunksize_mask) == 0);
chunk_record(arena, chunk_hooks, &arena->chunks_szad_cached,
&arena->chunks_ad_cached, true, chunk, size, false, committed);
arena_maybe_purge(arena);
chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_cached,
&arena->chunks_ad_cached, true, chunk, size, sn, false,
committed);
arena_maybe_purge(tsdn, arena);
}
static bool
chunk_dalloc_default_impl(void *chunk, size_t size)
{
if (!have_dss || !chunk_in_dss(chunk))
return (chunk_dalloc_mmap(chunk, size));
return (true);
}
static bool
chunk_dalloc_default(void *chunk, size_t size, bool committed,
unsigned arena_ind)
{
return (chunk_dalloc_default_impl(chunk, size));
}
void
chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
size_t size, bool zeroed, bool committed)
chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *chunk, size_t size, size_t sn, bool zeroed, bool committed)
{
bool err;
assert(chunk != NULL);
assert(CHUNK_ADDR2BASE(chunk) == chunk);
assert(size != 0);
assert((size & chunksize_mask) == 0);
chunk_hooks_assure_initialized(arena, chunk_hooks);
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
/* Try to deallocate. */
if (!chunk_hooks->dalloc(chunk, size, committed, arena->ind))
if (chunk_hooks->dalloc == chunk_dalloc_default) {
/* Call directly to propagate tsdn. */
err = chunk_dalloc_default_impl(chunk, size);
} else
err = chunk_hooks->dalloc(chunk, size, committed, arena->ind);
if (!err)
return;
/* Try to decommit; purge if that fails. */
if (committed) {
......@@ -591,29 +664,12 @@ chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
}
zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size,
arena->ind);
chunk_record(arena, chunk_hooks, &arena->chunks_szad_retained,
&arena->chunks_ad_retained, false, chunk, size, zeroed, committed);
}
static bool
chunk_dalloc_default(void *chunk, size_t size, bool committed,
unsigned arena_ind)
{
chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_retained,
&arena->chunks_ad_retained, false, chunk, size, sn, zeroed,
committed);
if (!have_dss || !chunk_in_dss(chunk))
return (chunk_dalloc_mmap(chunk, size));
return (true);
}
void
chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
size_t size, bool committed)
{
chunk_hooks_assure_initialized(arena, chunk_hooks);
chunk_hooks->dalloc(chunk, size, committed, arena->ind);
if (config_valgrind && chunk_hooks->dalloc != chunk_dalloc_default)
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
if (config_stats)
arena->stats.retained += size;
}
static bool
......@@ -634,8 +690,9 @@ chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length,
length));
}
bool
chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length)
static bool
chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
unsigned arena_ind)
{
assert(chunk != NULL);
......@@ -648,21 +705,12 @@ chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length)
length));
}
static bool
chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
unsigned arena_ind)
{
return (chunk_purge_arena(chunk_arena_get(arena_ind), chunk, offset,
length));
}
bool
chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
size_t size, size_t offset, size_t length)
chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *chunk, size_t size, size_t offset, size_t length)
{
chunk_hooks_assure_initialized(arena, chunk_hooks);
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
return (chunk_hooks->purge(chunk, size, offset, length, arena->ind));
}
......@@ -677,23 +725,30 @@ chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b,
}
static bool
chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
bool committed, unsigned arena_ind)
chunk_merge_default_impl(void *chunk_a, void *chunk_b)
{
if (!maps_coalesce)
return (true);
if (have_dss && chunk_in_dss(chunk_a) != chunk_in_dss(chunk_b))
if (have_dss && !chunk_dss_mergeable(chunk_a, chunk_b))
return (true);
return (false);
}
static bool
chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
bool committed, unsigned arena_ind)
{
return (chunk_merge_default_impl(chunk_a, chunk_b));
}
static rtree_node_elm_t *
chunks_rtree_node_alloc(size_t nelms)
{
return ((rtree_node_elm_t *)base_alloc(nelms *
return ((rtree_node_elm_t *)base_alloc(TSDN_NULL, nelms *
sizeof(rtree_node_elm_t)));
}
......@@ -716,7 +771,7 @@ chunk_boot(void)
* so pages_map will always take fast path.
*/
if (!opt_lg_chunk) {
opt_lg_chunk = jemalloc_ffs((int)info.dwAllocationGranularity)
opt_lg_chunk = ffs_u((unsigned)info.dwAllocationGranularity)
- 1;
}
#else
......@@ -730,32 +785,11 @@ chunk_boot(void)
chunksize_mask = chunksize - 1;
chunk_npages = (chunksize >> LG_PAGE);
if (have_dss && chunk_dss_boot())
return (true);
if (rtree_new(&chunks_rtree, (ZU(1) << (LG_SIZEOF_PTR+3)) -
opt_lg_chunk, chunks_rtree_node_alloc, NULL))
if (have_dss)
chunk_dss_boot();
if (rtree_new(&chunks_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) -
opt_lg_chunk), chunks_rtree_node_alloc, NULL))
return (true);
return (false);
}
void
chunk_prefork(void)
{
chunk_dss_prefork();
}
void
chunk_postfork_parent(void)
{
chunk_dss_postfork_parent();
}
void
chunk_postfork_child(void)
{
chunk_dss_postfork_child();
}
......@@ -10,20 +10,19 @@ const char *dss_prec_names[] = {
"N/A"
};
/* Current dss precedence default, used when creating new arenas. */
static dss_prec_t dss_prec_default = DSS_PREC_DEFAULT;
/*
* Protects sbrk() calls. This avoids malloc races among threads, though it
* does not protect against races with threads that call sbrk() directly.
* Current dss precedence default, used when creating new arenas. NB: This is
* stored as unsigned rather than dss_prec_t because in principle there's no
* guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use
* atomic operations to synchronize the setting.
*/
static malloc_mutex_t dss_mtx;
static unsigned dss_prec_default = (unsigned)DSS_PREC_DEFAULT;
/* Base address of the DSS. */
static void *dss_base;
/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
static void *dss_prev;
/* Current upper limit on DSS addresses. */
/* Atomic boolean indicating whether the DSS is exhausted. */
static unsigned dss_exhausted;
/* Atomic current upper limit on DSS addresses. */
static void *dss_max;
/******************************************************************************/
......@@ -47,9 +46,7 @@ chunk_dss_prec_get(void)
if (!have_dss)
return (dss_prec_disabled);
malloc_mutex_lock(&dss_mtx);
ret = dss_prec_default;
malloc_mutex_unlock(&dss_mtx);
ret = (dss_prec_t)atomic_read_u(&dss_prec_default);
return (ret);
}
......@@ -59,15 +56,46 @@ chunk_dss_prec_set(dss_prec_t dss_prec)
if (!have_dss)
return (dss_prec != dss_prec_disabled);
malloc_mutex_lock(&dss_mtx);
dss_prec_default = dss_prec;
malloc_mutex_unlock(&dss_mtx);
atomic_write_u(&dss_prec_default, (unsigned)dss_prec);
return (false);
}
static void *
chunk_dss_max_update(void *new_addr)
{
void *max_cur;
spin_t spinner;
/*
* Get the current end of the DSS as max_cur and assure that dss_max is
* up to date.
*/
spin_init(&spinner);
while (true) {
void *max_prev = atomic_read_p(&dss_max);
max_cur = chunk_dss_sbrk(0);
if ((uintptr_t)max_prev > (uintptr_t)max_cur) {
/*
* Another thread optimistically updated dss_max. Wait
* for it to finish.
*/
spin_adaptive(&spinner);
continue;
}
if (!atomic_cas_p(&dss_max, max_prev, max_cur))
break;
}
/* Fixed new_addr can only be supported if it is at the edge of DSS. */
if (new_addr != NULL && max_cur != new_addr)
return (NULL);
return (max_cur);
}
void *
chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
bool *zero, bool *commit)
chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit)
{
cassert(have_dss);
assert(size > 0 && (size & chunksize_mask) == 0);
......@@ -80,28 +108,20 @@ chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
if ((intptr_t)size < 0)
return (NULL);
malloc_mutex_lock(&dss_mtx);
if (dss_prev != (void *)-1) {
if (!atomic_read_u(&dss_exhausted)) {
/*
* The loop is necessary to recover from races with other
* threads that are using the DSS for something other than
* malloc.
*/
do {
void *ret, *cpad, *dss_next;
while (true) {
void *ret, *cpad, *max_cur, *dss_next, *dss_prev;
size_t gap_size, cpad_size;
intptr_t incr;
/* Avoid an unnecessary system call. */
if (new_addr != NULL && dss_max != new_addr)
break;
/* Get the current end of the DSS. */
dss_max = chunk_dss_sbrk(0);
/* Make sure the earlier condition still holds. */
if (new_addr != NULL && dss_max != new_addr)
break;
max_cur = chunk_dss_max_update(new_addr);
if (max_cur == NULL)
goto label_oom;
/*
* Calculate how much padding is necessary to
......@@ -120,22 +140,29 @@ chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
cpad_size = (uintptr_t)ret - (uintptr_t)cpad;
dss_next = (void *)((uintptr_t)ret + size);
if ((uintptr_t)ret < (uintptr_t)dss_max ||
(uintptr_t)dss_next < (uintptr_t)dss_max) {
/* Wrap-around. */
malloc_mutex_unlock(&dss_mtx);
return (NULL);
}
(uintptr_t)dss_next < (uintptr_t)dss_max)
goto label_oom; /* Wrap-around. */
incr = gap_size + cpad_size + size;
/*
* Optimistically update dss_max, and roll back below if
* sbrk() fails. No other thread will try to extend the
* DSS while dss_max is greater than the current DSS
* max reported by sbrk(0).
*/
if (atomic_cas_p(&dss_max, max_cur, dss_next))
continue;
/* Try to allocate. */
dss_prev = chunk_dss_sbrk(incr);
if (dss_prev == dss_max) {
if (dss_prev == max_cur) {
/* Success. */
dss_max = dss_next;
malloc_mutex_unlock(&dss_mtx);
if (cpad_size != 0) {
chunk_hooks_t chunk_hooks =
CHUNK_HOOKS_INITIALIZER;
chunk_dalloc_wrapper(arena,
chunk_dalloc_wrapper(tsdn, arena,
&chunk_hooks, cpad, cpad_size,
arena_extent_sn_next(arena), false,
true);
}
if (*zero) {
......@@ -147,68 +174,65 @@ chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
*commit = pages_decommit(ret, size);
return (ret);
}
} while (dss_prev != (void *)-1);
}
malloc_mutex_unlock(&dss_mtx);
/*
* Failure, whether due to OOM or a race with a raw
* sbrk() call from outside the allocator. Try to roll
* back optimistic dss_max update; if rollback fails,
* it's due to another caller of this function having
* succeeded since this invocation started, in which
* case rollback is not necessary.
*/
atomic_cas_p(&dss_max, dss_next, max_cur);
if (dss_prev == (void *)-1) {
/* OOM. */
atomic_write_u(&dss_exhausted, (unsigned)true);
goto label_oom;
}
}
}
label_oom:
return (NULL);
}
bool
chunk_in_dss(void *chunk)
static bool
chunk_in_dss_helper(void *chunk, void *max)
{
bool ret;
cassert(have_dss);
malloc_mutex_lock(&dss_mtx);
if ((uintptr_t)chunk >= (uintptr_t)dss_base
&& (uintptr_t)chunk < (uintptr_t)dss_max)
ret = true;
else
ret = false;
malloc_mutex_unlock(&dss_mtx);
return (ret);
return ((uintptr_t)chunk >= (uintptr_t)dss_base && (uintptr_t)chunk <
(uintptr_t)max);
}
bool
chunk_dss_boot(void)
chunk_in_dss(void *chunk)
{
cassert(have_dss);
if (malloc_mutex_init(&dss_mtx))
return (true);
dss_base = chunk_dss_sbrk(0);
dss_prev = dss_base;
dss_max = dss_base;
return (false);
return (chunk_in_dss_helper(chunk, atomic_read_p(&dss_max)));
}
void
chunk_dss_prefork(void)
bool
chunk_dss_mergeable(void *chunk_a, void *chunk_b)
{
void *max;
if (have_dss)
malloc_mutex_prefork(&dss_mtx);
}
void
chunk_dss_postfork_parent(void)
{
cassert(have_dss);
if (have_dss)
malloc_mutex_postfork_parent(&dss_mtx);
max = atomic_read_p(&dss_max);
return (chunk_in_dss_helper(chunk_a, max) ==
chunk_in_dss_helper(chunk_b, max));
}
void
chunk_dss_postfork_child(void)
chunk_dss_boot(void)
{
if (have_dss)
malloc_mutex_postfork_child(&dss_mtx);
cassert(have_dss);
dss_base = chunk_dss_sbrk(0);
dss_exhausted = (unsigned)(dss_base == (void *)-1);
dss_max = dss_base;
}
/******************************************************************************/
......@@ -16,23 +16,22 @@ chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
do {
void *pages;
size_t leadsize;
pages = pages_map(NULL, alloc_size);
pages = pages_map(NULL, alloc_size, commit);
if (pages == NULL)
return (NULL);
leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
(uintptr_t)pages;
ret = pages_trim(pages, alloc_size, leadsize, size);
ret = pages_trim(pages, alloc_size, leadsize, size, commit);
} while (ret == NULL);
assert(ret != NULL);
*zero = true;
if (!*commit)
*commit = pages_decommit(ret, size);
return (ret);
}
void *
chunk_alloc_mmap(size_t size, size_t alignment, bool *zero, bool *commit)
chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
bool *commit)
{
void *ret;
size_t offset;
......@@ -53,9 +52,10 @@ chunk_alloc_mmap(size_t size, size_t alignment, bool *zero, bool *commit)
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
ret = pages_map(NULL, size);
if (ret == NULL)
return (NULL);
ret = pages_map(new_addr, size, commit);
if (ret == NULL || ret == new_addr)
return (ret);
assert(new_addr == NULL);
offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
if (offset != 0) {
pages_unmap(ret, size);
......@@ -64,8 +64,6 @@ chunk_alloc_mmap(size_t size, size_t alignment, bool *zero, bool *commit)
assert(ret != NULL);
*zero = true;
if (!*commit)
*commit = pages_decommit(ret, size);
return (ret);
}
......
......@@ -99,7 +99,8 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
* Cycle through the cells in the bucket, starting at a random position.
* The randomness avoids worst-case search overhead as buckets fill up.
*/
prng32(offset, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C);
offset = (unsigned)prng_lg_range_u64(&ckh->prng_state,
LG_CKH_BUCKET_CELLS);
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) +
((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))];
......@@ -141,7 +142,8 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
* were an item for which both hashes indicated the same
* bucket.
*/
prng32(i, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C);
i = (unsigned)prng_lg_range_u64(&ckh->prng_state,
LG_CKH_BUCKET_CELLS);
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
assert(cell->key != NULL);
......@@ -247,8 +249,7 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
{
bool ret;
ckhc_t *tab, *ttab;
size_t lg_curcells;
unsigned lg_prevbuckets;
unsigned lg_prevbuckets, lg_curcells;
#ifdef CKH_COUNT
ckh->ngrows++;
......@@ -266,12 +267,12 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
lg_curcells++;
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
if (usize == 0) {
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
ret = true;
goto label_return;
}
tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL,
true, NULL);
tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE,
true, NULL, true, arena_ichoose(tsd, NULL));
if (tab == NULL) {
ret = true;
goto label_return;
......@@ -283,12 +284,12 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
if (!ckh_rebuild(ckh, tab)) {
idalloctm(tsd, tab, tcache_get(tsd, false), true);
idalloctm(tsd_tsdn(tsd), tab, NULL, true, true);
break;
}
/* Rebuilding failed, so back out partially rebuilt table. */
idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true);
idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true);
ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets;
}
......@@ -302,8 +303,8 @@ static void
ckh_shrink(tsd_t *tsd, ckh_t *ckh)
{
ckhc_t *tab, *ttab;
size_t lg_curcells, usize;
unsigned lg_prevbuckets;
size_t usize;
unsigned lg_prevbuckets, lg_curcells;
/*
* It is possible (though unlikely, given well behaved hashes) that the
......@@ -312,10 +313,10 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
lg_prevbuckets = ckh->lg_curbuckets;
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
if (usize == 0)
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
return;
tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true,
NULL);
tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL,
true, arena_ichoose(tsd, NULL));
if (tab == NULL) {
/*
* An OOM error isn't worth propagating, since it doesn't
......@@ -330,7 +331,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
if (!ckh_rebuild(ckh, tab)) {
idalloctm(tsd, tab, tcache_get(tsd, false), true);
idalloctm(tsd_tsdn(tsd), tab, NULL, true, true);
#ifdef CKH_COUNT
ckh->nshrinks++;
#endif
......@@ -338,7 +339,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
}
/* Rebuilding failed, so back out partially rebuilt table. */
idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true);
idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true);
ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets;
#ifdef CKH_COUNT
......@@ -387,12 +388,12 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ckh->keycomp = keycomp;
usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
if (usize == 0) {
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
ret = true;
goto label_return;
}
ckh->tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true,
NULL);
ckh->tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true,
NULL, true, arena_ichoose(tsd, NULL));
if (ckh->tab == NULL) {
ret = true;
goto label_return;
......@@ -421,9 +422,9 @@ ckh_delete(tsd_t *tsd, ckh_t *ckh)
(unsigned long long)ckh->nrelocs);
#endif
idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true);
idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true);
if (config_debug)
memset(ckh, 0x5a, sizeof(ckh_t));
memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t));
}
size_t
......
......@@ -24,7 +24,7 @@ ctl_named_node(const ctl_node_t *node)
}
JEMALLOC_INLINE_C const ctl_named_node_t *
ctl_named_children(const ctl_named_node_t *node, int index)
ctl_named_children(const ctl_named_node_t *node, size_t index)
{
const ctl_named_node_t *children = ctl_named_node(node->children);
......@@ -42,25 +42,25 @@ ctl_indexed_node(const ctl_node_t *node)
/* Function prototypes for non-inline static functions. */
#define CTL_PROTO(n) \
static int n##_ctl(const size_t *mib, size_t miblen, void *oldp, \
size_t *oldlenp, void *newp, size_t newlen);
static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
void *oldp, size_t *oldlenp, void *newp, size_t newlen);
#define INDEX_PROTO(n) \
static const ctl_named_node_t *n##_index(const size_t *mib, \
size_t miblen, size_t i);
static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \
const size_t *mib, size_t miblen, size_t i);
static bool ctl_arena_init(ctl_arena_stats_t *astats);
static void ctl_arena_clear(ctl_arena_stats_t *astats);
static void ctl_arena_stats_amerge(ctl_arena_stats_t *cstats,
static void ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats,
arena_t *arena);
static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats,
ctl_arena_stats_t *astats);
static void ctl_arena_refresh(arena_t *arena, unsigned i);
static bool ctl_grow(void);
static void ctl_refresh(void);
static bool ctl_init(void);
static int ctl_lookup(const char *name, ctl_node_t const **nodesp,
size_t *mibp, size_t *depthp);
static void ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, unsigned i);
static bool ctl_grow(tsdn_t *tsdn);
static void ctl_refresh(tsdn_t *tsdn);
static bool ctl_init(tsdn_t *tsdn);
static int ctl_lookup(tsdn_t *tsdn, const char *name,
ctl_node_t const **nodesp, size_t *mibp, size_t *depthp);
CTL_PROTO(version)
CTL_PROTO(epoch)
......@@ -77,6 +77,7 @@ CTL_PROTO(config_cache_oblivious)
CTL_PROTO(config_debug)
CTL_PROTO(config_fill)
CTL_PROTO(config_lazy_lock)
CTL_PROTO(config_malloc_conf)
CTL_PROTO(config_munmap)
CTL_PROTO(config_prof)
CTL_PROTO(config_prof_libgcc)
......@@ -91,7 +92,9 @@ CTL_PROTO(opt_abort)
CTL_PROTO(opt_dss)
CTL_PROTO(opt_lg_chunk)
CTL_PROTO(opt_narenas)
CTL_PROTO(opt_purge)
CTL_PROTO(opt_lg_dirty_mult)
CTL_PROTO(opt_decay_time)
CTL_PROTO(opt_stats_print)
CTL_PROTO(opt_junk)
CTL_PROTO(opt_zero)
......@@ -114,10 +117,13 @@ CTL_PROTO(opt_prof_accum)
CTL_PROTO(tcache_create)
CTL_PROTO(tcache_flush)
CTL_PROTO(tcache_destroy)
static void arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all);
CTL_PROTO(arena_i_purge)
static void arena_purge(unsigned arena_ind);
CTL_PROTO(arena_i_decay)
CTL_PROTO(arena_i_reset)
CTL_PROTO(arena_i_dss)
CTL_PROTO(arena_i_lg_dirty_mult)
CTL_PROTO(arena_i_decay_time)
CTL_PROTO(arena_i_chunk_hooks)
INDEX_PROTO(arena_i)
CTL_PROTO(arenas_bin_i_size)
......@@ -131,6 +137,7 @@ INDEX_PROTO(arenas_hchunk_i)
CTL_PROTO(arenas_narenas)
CTL_PROTO(arenas_initialized)
CTL_PROTO(arenas_lg_dirty_mult)
CTL_PROTO(arenas_decay_time)
CTL_PROTO(arenas_quantum)
CTL_PROTO(arenas_page)
CTL_PROTO(arenas_tcache_max)
......@@ -181,9 +188,11 @@ INDEX_PROTO(stats_arenas_i_hchunks_j)
CTL_PROTO(stats_arenas_i_nthreads)
CTL_PROTO(stats_arenas_i_dss)
CTL_PROTO(stats_arenas_i_lg_dirty_mult)
CTL_PROTO(stats_arenas_i_decay_time)
CTL_PROTO(stats_arenas_i_pactive)
CTL_PROTO(stats_arenas_i_pdirty)
CTL_PROTO(stats_arenas_i_mapped)
CTL_PROTO(stats_arenas_i_retained)
CTL_PROTO(stats_arenas_i_npurge)
CTL_PROTO(stats_arenas_i_nmadvise)
CTL_PROTO(stats_arenas_i_purged)
......@@ -196,6 +205,7 @@ CTL_PROTO(stats_active)
CTL_PROTO(stats_metadata)
CTL_PROTO(stats_resident)
CTL_PROTO(stats_mapped)
CTL_PROTO(stats_retained)
/******************************************************************************/
/* mallctl tree. */
......@@ -241,6 +251,7 @@ static const ctl_named_node_t config_node[] = {
{NAME("debug"), CTL(config_debug)},
{NAME("fill"), CTL(config_fill)},
{NAME("lazy_lock"), CTL(config_lazy_lock)},
{NAME("malloc_conf"), CTL(config_malloc_conf)},
{NAME("munmap"), CTL(config_munmap)},
{NAME("prof"), CTL(config_prof)},
{NAME("prof_libgcc"), CTL(config_prof_libgcc)},
......@@ -258,7 +269,9 @@ static const ctl_named_node_t opt_node[] = {
{NAME("dss"), CTL(opt_dss)},
{NAME("lg_chunk"), CTL(opt_lg_chunk)},
{NAME("narenas"), CTL(opt_narenas)},
{NAME("purge"), CTL(opt_purge)},
{NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)},
{NAME("decay_time"), CTL(opt_decay_time)},
{NAME("stats_print"), CTL(opt_stats_print)},
{NAME("junk"), CTL(opt_junk)},
{NAME("zero"), CTL(opt_zero)},
......@@ -288,8 +301,11 @@ static const ctl_named_node_t tcache_node[] = {
static const ctl_named_node_t arena_i_node[] = {
{NAME("purge"), CTL(arena_i_purge)},
{NAME("decay"), CTL(arena_i_decay)},
{NAME("reset"), CTL(arena_i_reset)},
{NAME("dss"), CTL(arena_i_dss)},
{NAME("lg_dirty_mult"), CTL(arena_i_lg_dirty_mult)},
{NAME("decay_time"), CTL(arena_i_decay_time)},
{NAME("chunk_hooks"), CTL(arena_i_chunk_hooks)}
};
static const ctl_named_node_t super_arena_i_node[] = {
......@@ -339,6 +355,7 @@ static const ctl_named_node_t arenas_node[] = {
{NAME("narenas"), CTL(arenas_narenas)},
{NAME("initialized"), CTL(arenas_initialized)},
{NAME("lg_dirty_mult"), CTL(arenas_lg_dirty_mult)},
{NAME("decay_time"), CTL(arenas_decay_time)},
{NAME("quantum"), CTL(arenas_quantum)},
{NAME("page"), CTL(arenas_page)},
{NAME("tcache_max"), CTL(arenas_tcache_max)},
......@@ -439,9 +456,11 @@ static const ctl_named_node_t stats_arenas_i_node[] = {
{NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
{NAME("dss"), CTL(stats_arenas_i_dss)},
{NAME("lg_dirty_mult"), CTL(stats_arenas_i_lg_dirty_mult)},
{NAME("decay_time"), CTL(stats_arenas_i_decay_time)},
{NAME("pactive"), CTL(stats_arenas_i_pactive)},
{NAME("pdirty"), CTL(stats_arenas_i_pdirty)},
{NAME("mapped"), CTL(stats_arenas_i_mapped)},
{NAME("retained"), CTL(stats_arenas_i_retained)},
{NAME("npurge"), CTL(stats_arenas_i_npurge)},
{NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)},
{NAME("purged"), CTL(stats_arenas_i_purged)},
......@@ -468,6 +487,7 @@ static const ctl_named_node_t stats_node[] = {
{NAME("metadata"), CTL(stats_metadata)},
{NAME("resident"), CTL(stats_resident)},
{NAME("mapped"), CTL(stats_mapped)},
{NAME("retained"), CTL(stats_retained)},
{NAME("arenas"), CHILD(indexed, stats_arenas)}
};
......@@ -519,8 +539,10 @@ static void
ctl_arena_clear(ctl_arena_stats_t *astats)
{
astats->nthreads = 0;
astats->dss = dss_prec_names[dss_prec_limit];
astats->lg_dirty_mult = -1;
astats->decay_time = -1;
astats->pactive = 0;
astats->pdirty = 0;
if (config_stats) {
......@@ -538,20 +560,27 @@ ctl_arena_clear(ctl_arena_stats_t *astats)
}
static void
ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena)
ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats, arena_t *arena)
{
unsigned i;
arena_stats_merge(arena, &cstats->dss, &cstats->lg_dirty_mult,
&cstats->pactive, &cstats->pdirty, &cstats->astats, cstats->bstats,
cstats->lstats, cstats->hstats);
for (i = 0; i < NBINS; i++) {
cstats->allocated_small += cstats->bstats[i].curregs *
index2size(i);
cstats->nmalloc_small += cstats->bstats[i].nmalloc;
cstats->ndalloc_small += cstats->bstats[i].ndalloc;
cstats->nrequests_small += cstats->bstats[i].nrequests;
if (config_stats) {
arena_stats_merge(tsdn, arena, &cstats->nthreads, &cstats->dss,
&cstats->lg_dirty_mult, &cstats->decay_time,
&cstats->pactive, &cstats->pdirty, &cstats->astats,
cstats->bstats, cstats->lstats, cstats->hstats);
for (i = 0; i < NBINS; i++) {
cstats->allocated_small += cstats->bstats[i].curregs *
index2size(i);
cstats->nmalloc_small += cstats->bstats[i].nmalloc;
cstats->ndalloc_small += cstats->bstats[i].ndalloc;
cstats->nrequests_small += cstats->bstats[i].nrequests;
}
} else {
arena_basic_stats_merge(tsdn, arena, &cstats->nthreads,
&cstats->dss, &cstats->lg_dirty_mult, &cstats->decay_time,
&cstats->pactive, &cstats->pdirty);
}
}
......@@ -560,89 +589,91 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
{
unsigned i;
sstats->nthreads += astats->nthreads;
sstats->pactive += astats->pactive;
sstats->pdirty += astats->pdirty;
sstats->astats.mapped += astats->astats.mapped;
sstats->astats.npurge += astats->astats.npurge;
sstats->astats.nmadvise += astats->astats.nmadvise;
sstats->astats.purged += astats->astats.purged;
sstats->astats.metadata_mapped += astats->astats.metadata_mapped;
sstats->astats.metadata_allocated += astats->astats.metadata_allocated;
sstats->allocated_small += astats->allocated_small;
sstats->nmalloc_small += astats->nmalloc_small;
sstats->ndalloc_small += astats->ndalloc_small;
sstats->nrequests_small += astats->nrequests_small;
sstats->astats.allocated_large += astats->astats.allocated_large;
sstats->astats.nmalloc_large += astats->astats.nmalloc_large;
sstats->astats.ndalloc_large += astats->astats.ndalloc_large;
sstats->astats.nrequests_large += astats->astats.nrequests_large;
sstats->astats.allocated_huge += astats->astats.allocated_huge;
sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge;
sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge;
for (i = 0; i < NBINS; i++) {
sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
sstats->bstats[i].nrequests += astats->bstats[i].nrequests;
sstats->bstats[i].curregs += astats->bstats[i].curregs;
if (config_tcache) {
sstats->bstats[i].nfills += astats->bstats[i].nfills;
sstats->bstats[i].nflushes +=
astats->bstats[i].nflushes;
if (config_stats) {
sstats->astats.mapped += astats->astats.mapped;
sstats->astats.retained += astats->astats.retained;
sstats->astats.npurge += astats->astats.npurge;
sstats->astats.nmadvise += astats->astats.nmadvise;
sstats->astats.purged += astats->astats.purged;
sstats->astats.metadata_mapped +=
astats->astats.metadata_mapped;
sstats->astats.metadata_allocated +=
astats->astats.metadata_allocated;
sstats->allocated_small += astats->allocated_small;
sstats->nmalloc_small += astats->nmalloc_small;
sstats->ndalloc_small += astats->ndalloc_small;
sstats->nrequests_small += astats->nrequests_small;
sstats->astats.allocated_large +=
astats->astats.allocated_large;
sstats->astats.nmalloc_large += astats->astats.nmalloc_large;
sstats->astats.ndalloc_large += astats->astats.ndalloc_large;
sstats->astats.nrequests_large +=
astats->astats.nrequests_large;
sstats->astats.allocated_huge += astats->astats.allocated_huge;
sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge;
sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge;
for (i = 0; i < NBINS; i++) {
sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
sstats->bstats[i].nrequests +=
astats->bstats[i].nrequests;
sstats->bstats[i].curregs += astats->bstats[i].curregs;
if (config_tcache) {
sstats->bstats[i].nfills +=
astats->bstats[i].nfills;
sstats->bstats[i].nflushes +=
astats->bstats[i].nflushes;
}
sstats->bstats[i].nruns += astats->bstats[i].nruns;
sstats->bstats[i].reruns += astats->bstats[i].reruns;
sstats->bstats[i].curruns += astats->bstats[i].curruns;
}
sstats->bstats[i].nruns += astats->bstats[i].nruns;
sstats->bstats[i].reruns += astats->bstats[i].reruns;
sstats->bstats[i].curruns += astats->bstats[i].curruns;
}
for (i = 0; i < nlclasses; i++) {
sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
sstats->lstats[i].nrequests += astats->lstats[i].nrequests;
sstats->lstats[i].curruns += astats->lstats[i].curruns;
}
for (i = 0; i < nlclasses; i++) {
sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
sstats->lstats[i].nrequests +=
astats->lstats[i].nrequests;
sstats->lstats[i].curruns += astats->lstats[i].curruns;
}
for (i = 0; i < nhclasses; i++) {
sstats->hstats[i].nmalloc += astats->hstats[i].nmalloc;
sstats->hstats[i].ndalloc += astats->hstats[i].ndalloc;
sstats->hstats[i].curhchunks += astats->hstats[i].curhchunks;
for (i = 0; i < nhclasses; i++) {
sstats->hstats[i].nmalloc += astats->hstats[i].nmalloc;
sstats->hstats[i].ndalloc += astats->hstats[i].ndalloc;
sstats->hstats[i].curhchunks +=
astats->hstats[i].curhchunks;
}
}
}
static void
ctl_arena_refresh(arena_t *arena, unsigned i)
ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, unsigned i)
{
ctl_arena_stats_t *astats = &ctl_stats.arenas[i];
ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas];
ctl_arena_clear(astats);
sstats->nthreads += astats->nthreads;
if (config_stats) {
ctl_arena_stats_amerge(astats, arena);
/* Merge into sum stats as well. */
ctl_arena_stats_smerge(sstats, astats);
} else {
astats->pactive += arena->nactive;
astats->pdirty += arena->ndirty;
/* Merge into sum stats as well. */
sstats->pactive += arena->nactive;
sstats->pdirty += arena->ndirty;
}
ctl_arena_stats_amerge(tsdn, astats, arena);
/* Merge into sum stats as well. */
ctl_arena_stats_smerge(sstats, astats);
}
static bool
ctl_grow(void)
ctl_grow(tsdn_t *tsdn)
{
ctl_arena_stats_t *astats;
/* Initialize new arena. */
if (arena_init(ctl_stats.narenas) == NULL)
if (arena_init(tsdn, ctl_stats.narenas) == NULL)
return (true);
/* Allocate extended arena stats. */
......@@ -677,47 +708,32 @@ ctl_grow(void)
}
static void
ctl_refresh(void)
ctl_refresh(tsdn_t *tsdn)
{
tsd_t *tsd;
unsigned i;
bool refreshed;
VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
/*
* Clear sum stats, since they will be merged into by
* ctl_arena_refresh().
*/
ctl_stats.arenas[ctl_stats.narenas].nthreads = 0;
ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]);
tsd = tsd_fetch();
for (i = 0, refreshed = false; i < ctl_stats.narenas; i++) {
tarenas[i] = arena_get(tsd, i, false, false);
if (tarenas[i] == NULL && !refreshed) {
tarenas[i] = arena_get(tsd, i, false, true);
refreshed = true;
}
}
for (i = 0; i < ctl_stats.narenas; i++) {
if (tarenas[i] != NULL)
ctl_stats.arenas[i].nthreads = arena_nbound(i);
else
ctl_stats.arenas[i].nthreads = 0;
}
for (i = 0; i < ctl_stats.narenas; i++)
tarenas[i] = arena_get(tsdn, i, false);
for (i = 0; i < ctl_stats.narenas; i++) {
bool initialized = (tarenas[i] != NULL);
ctl_stats.arenas[i].initialized = initialized;
if (initialized)
ctl_arena_refresh(tarenas[i], i);
ctl_arena_refresh(tsdn, tarenas[i], i);
}
if (config_stats) {
size_t base_allocated, base_resident, base_mapped;
base_stats_get(&base_allocated, &base_resident, &base_mapped);
base_stats_get(tsdn, &base_allocated, &base_resident,
&base_mapped);
ctl_stats.allocated =
ctl_stats.arenas[ctl_stats.narenas].allocated_small +
ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large +
......@@ -734,17 +750,19 @@ ctl_refresh(void)
ctl_stats.arenas[ctl_stats.narenas].pdirty) << LG_PAGE);
ctl_stats.mapped = base_mapped +
ctl_stats.arenas[ctl_stats.narenas].astats.mapped;
ctl_stats.retained =
ctl_stats.arenas[ctl_stats.narenas].astats.retained;
}
ctl_epoch++;
}
static bool
ctl_init(void)
ctl_init(tsdn_t *tsdn)
{
bool ret;
malloc_mutex_lock(&ctl_mtx);
malloc_mutex_lock(tsdn, &ctl_mtx);
if (!ctl_initialized) {
/*
* Allocate space for one extra arena stats element, which
......@@ -786,19 +804,19 @@ ctl_init(void)
ctl_stats.arenas[ctl_stats.narenas].initialized = true;
ctl_epoch = 0;
ctl_refresh();
ctl_refresh(tsdn);
ctl_initialized = true;
}
ret = false;
label_return:
malloc_mutex_unlock(&ctl_mtx);
malloc_mutex_unlock(tsdn, &ctl_mtx);
return (ret);
}
static int
ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp,
size_t *depthp)
ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
size_t *mibp, size_t *depthp)
{
int ret;
const char *elm, *tdot, *dot;
......@@ -850,7 +868,7 @@ ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp,
}
inode = ctl_indexed_node(node->children);
node = inode->index(mibp, *depthp, (size_t)index);
node = inode->index(tsdn, mibp, *depthp, (size_t)index);
if (node == NULL) {
ret = ENOENT;
goto label_return;
......@@ -894,8 +912,8 @@ label_return:
}
int
ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
size_t newlen)
ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
size_t depth;
......@@ -903,19 +921,19 @@ ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
size_t mib[CTL_MAX_DEPTH];
const ctl_named_node_t *node;
if (!ctl_initialized && ctl_init()) {
if (!ctl_initialized && ctl_init(tsd_tsdn(tsd))) {
ret = EAGAIN;
goto label_return;
}
depth = CTL_MAX_DEPTH;
ret = ctl_lookup(name, nodes, mib, &depth);
ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth);
if (ret != 0)
goto label_return;
node = ctl_named_node(nodes[depth-1]);
if (node != NULL && node->ctl)
ret = node->ctl(mib, depth, oldp, oldlenp, newp, newlen);
ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen);
else {
/* The name refers to a partial path through the ctl tree. */
ret = ENOENT;
......@@ -926,29 +944,29 @@ label_return:
}
int
ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp)
ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp, size_t *miblenp)
{
int ret;
if (!ctl_initialized && ctl_init()) {
if (!ctl_initialized && ctl_init(tsdn)) {
ret = EAGAIN;
goto label_return;
}
ret = ctl_lookup(name, NULL, mibp, miblenp);
ret = ctl_lookup(tsdn, name, NULL, mibp, miblenp);
label_return:
return(ret);
}
int
ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
const ctl_named_node_t *node;
size_t i;
if (!ctl_initialized && ctl_init()) {
if (!ctl_initialized && ctl_init(tsd_tsdn(tsd))) {
ret = EAGAIN;
goto label_return;
}
......@@ -960,7 +978,7 @@ ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
assert(node->nchildren > 0);
if (ctl_named_node(node->children) != NULL) {
/* Children are named. */
if (node->nchildren <= mib[i]) {
if (node->nchildren <= (unsigned)mib[i]) {
ret = ENOENT;
goto label_return;
}
......@@ -970,7 +988,7 @@ ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
/* Indexed element. */
inode = ctl_indexed_node(node->children);
node = inode->index(mib, miblen, mib[i]);
node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]);
if (node == NULL) {
ret = ENOENT;
goto label_return;
......@@ -980,7 +998,7 @@ ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
/* Call the ctl function. */
if (node && node->ctl)
ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen);
ret = node->ctl(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
else {
/* Partial MIB. */
ret = ENOENT;
......@@ -994,7 +1012,7 @@ bool
ctl_boot(void)
{
if (malloc_mutex_init(&ctl_mtx))
if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL))
return (true);
ctl_initialized = false;
......@@ -1003,24 +1021,24 @@ ctl_boot(void)
}
void
ctl_prefork(void)
ctl_prefork(tsdn_t *tsdn)
{
malloc_mutex_prefork(&ctl_mtx);
malloc_mutex_prefork(tsdn, &ctl_mtx);
}
void
ctl_postfork_parent(void)
ctl_postfork_parent(tsdn_t *tsdn)
{
malloc_mutex_postfork_parent(&ctl_mtx);
malloc_mutex_postfork_parent(tsdn, &ctl_mtx);
}
void
ctl_postfork_child(void)
ctl_postfork_child(tsdn_t *tsdn)
{
malloc_mutex_postfork_child(&ctl_mtx);
malloc_mutex_postfork_child(tsdn, &ctl_mtx);
}
/******************************************************************************/
......@@ -1077,8 +1095,8 @@ ctl_postfork_child(void)
*/
#define CTL_RO_CLGEN(c, l, n, v, t) \
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
void *newp, size_t newlen) \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
size_t *oldlenp, void *newp, size_t newlen) \
{ \
int ret; \
t oldval; \
......@@ -1086,7 +1104,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
if (!(c)) \
return (ENOENT); \
if (l) \
malloc_mutex_lock(&ctl_mtx); \
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
READONLY(); \
oldval = (v); \
READ(oldval, t); \
......@@ -1094,47 +1112,47 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
ret = 0; \
label_return: \
if (l) \
malloc_mutex_unlock(&ctl_mtx); \
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
return (ret); \
}
#define CTL_RO_CGEN(c, n, v, t) \
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
void *newp, size_t newlen) \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
size_t *oldlenp, void *newp, size_t newlen) \
{ \
int ret; \
t oldval; \
\
if (!(c)) \
return (ENOENT); \
malloc_mutex_lock(&ctl_mtx); \
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
READONLY(); \
oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
malloc_mutex_unlock(&ctl_mtx); \
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
return (ret); \
}
#define CTL_RO_GEN(n, v, t) \
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
void *newp, size_t newlen) \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
size_t *oldlenp, void *newp, size_t newlen) \
{ \
int ret; \
t oldval; \
\
malloc_mutex_lock(&ctl_mtx); \
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
READONLY(); \
oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
malloc_mutex_unlock(&ctl_mtx); \
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
return (ret); \
}
......@@ -1144,8 +1162,8 @@ label_return: \
*/
#define CTL_RO_NL_CGEN(c, n, v, t) \
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
void *newp, size_t newlen) \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
size_t *oldlenp, void *newp, size_t newlen) \
{ \
int ret; \
t oldval; \
......@@ -1163,8 +1181,8 @@ label_return: \
#define CTL_RO_NL_GEN(n, v, t) \
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
void *newp, size_t newlen) \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
size_t *oldlenp, void *newp, size_t newlen) \
{ \
int ret; \
t oldval; \
......@@ -1180,17 +1198,15 @@ label_return: \
#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
void *newp, size_t newlen) \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
size_t *oldlenp, void *newp, size_t newlen) \
{ \
int ret; \
t oldval; \
tsd_t *tsd; \
\
if (!(c)) \
return (ENOENT); \
READONLY(); \
tsd = tsd_fetch(); \
oldval = (m(tsd)); \
READ(oldval, t); \
\
......@@ -1199,17 +1215,17 @@ label_return: \
return (ret); \
}
#define CTL_RO_BOOL_CONFIG_GEN(n) \
#define CTL_RO_CONFIG_GEN(n, t) \
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
void *newp, size_t newlen) \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
size_t *oldlenp, void *newp, size_t newlen) \
{ \
int ret; \
bool oldval; \
t oldval; \
\
READONLY(); \
oldval = n; \
READ(oldval, bool); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
......@@ -1221,48 +1237,51 @@ label_return: \
CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
static int
epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
UNUSED uint64_t newval;
malloc_mutex_lock(&ctl_mtx);
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
WRITE(newval, uint64_t);
if (newp != NULL)
ctl_refresh();
ctl_refresh(tsd_tsdn(tsd));
READ(ctl_epoch, uint64_t);
ret = 0;
label_return:
malloc_mutex_unlock(&ctl_mtx);
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret);
}
/******************************************************************************/
CTL_RO_BOOL_CONFIG_GEN(config_cache_oblivious)
CTL_RO_BOOL_CONFIG_GEN(config_debug)
CTL_RO_BOOL_CONFIG_GEN(config_fill)
CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock)
CTL_RO_BOOL_CONFIG_GEN(config_munmap)
CTL_RO_BOOL_CONFIG_GEN(config_prof)
CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc)
CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind)
CTL_RO_BOOL_CONFIG_GEN(config_stats)
CTL_RO_BOOL_CONFIG_GEN(config_tcache)
CTL_RO_BOOL_CONFIG_GEN(config_tls)
CTL_RO_BOOL_CONFIG_GEN(config_utrace)
CTL_RO_BOOL_CONFIG_GEN(config_valgrind)
CTL_RO_BOOL_CONFIG_GEN(config_xmalloc)
CTL_RO_CONFIG_GEN(config_cache_oblivious, bool)
CTL_RO_CONFIG_GEN(config_debug, bool)
CTL_RO_CONFIG_GEN(config_fill, bool)
CTL_RO_CONFIG_GEN(config_lazy_lock, bool)
CTL_RO_CONFIG_GEN(config_malloc_conf, const char *)
CTL_RO_CONFIG_GEN(config_munmap, bool)
CTL_RO_CONFIG_GEN(config_prof, bool)
CTL_RO_CONFIG_GEN(config_prof_libgcc, bool)
CTL_RO_CONFIG_GEN(config_prof_libunwind, bool)
CTL_RO_CONFIG_GEN(config_stats, bool)
CTL_RO_CONFIG_GEN(config_tcache, bool)
CTL_RO_CONFIG_GEN(config_tls, bool)
CTL_RO_CONFIG_GEN(config_utrace, bool)
CTL_RO_CONFIG_GEN(config_valgrind, bool)
CTL_RO_CONFIG_GEN(config_xmalloc, bool)
/******************************************************************************/
CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t)
CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned)
CTL_RO_NL_GEN(opt_purge, purge_mode_names[opt_purge], const char *)
CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
CTL_RO_NL_GEN(opt_decay_time, opt_decay_time, ssize_t)
CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t)
......@@ -1287,20 +1306,18 @@ CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
/******************************************************************************/
static int
thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
tsd_t *tsd;
arena_t *oldarena;
unsigned newind, oldind;
tsd = tsd_fetch();
oldarena = arena_choose(tsd, NULL);
if (oldarena == NULL)
return (EAGAIN);
malloc_mutex_lock(&ctl_mtx);
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
newind = oldind = oldarena->ind;
WRITE(newind, unsigned);
READ(oldind, unsigned);
......@@ -1314,7 +1331,7 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
}
/* Initialize arena if necessary. */
newarena = arena_get(tsd, newind, true, true);
newarena = arena_get(tsd_tsdn(tsd), newind, true);
if (newarena == NULL) {
ret = EAGAIN;
goto label_return;
......@@ -1324,15 +1341,15 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
if (config_tcache) {
tcache_t *tcache = tsd_tcache_get(tsd);
if (tcache != NULL) {
tcache_arena_reassociate(tcache, oldarena,
newarena);
tcache_arena_reassociate(tsd_tsdn(tsd), tcache,
oldarena, newarena);
}
}
}
ret = 0;
label_return:
malloc_mutex_unlock(&ctl_mtx);
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret);
}
......@@ -1346,8 +1363,8 @@ CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp,
tsd_thread_deallocatedp_get, uint64_t *)
static int
thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
bool oldval;
......@@ -1371,8 +1388,8 @@ label_return:
}
static int
thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
......@@ -1390,7 +1407,7 @@ label_return:
}
static int
thread_prof_name_ctl(const size_t *mib, size_t miblen, void *oldp,
thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
......@@ -1401,20 +1418,16 @@ thread_prof_name_ctl(const size_t *mib, size_t miblen, void *oldp,
READ_XOR_WRITE();
if (newp != NULL) {
tsd_t *tsd;
if (newlen != sizeof(const char *)) {
ret = EINVAL;
goto label_return;
}
tsd = tsd_fetch();
if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) !=
0)
goto label_return;
} else {
const char *oldname = prof_thread_name_get();
const char *oldname = prof_thread_name_get(tsd);
READ(oldname, const char *);
}
......@@ -1424,7 +1437,7 @@ label_return:
}
static int
thread_prof_active_ctl(const size_t *mib, size_t miblen, void *oldp,
thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
......@@ -1433,13 +1446,13 @@ thread_prof_active_ctl(const size_t *mib, size_t miblen, void *oldp,
if (!config_prof)
return (ENOENT);
oldval = prof_thread_active_get();
oldval = prof_thread_active_get(tsd);
if (newp != NULL) {
if (newlen != sizeof(bool)) {
ret = EINVAL;
goto label_return;
}
if (prof_thread_active_set(*(bool *)newp)) {
if (prof_thread_active_set(tsd, *(bool *)newp)) {
ret = EAGAIN;
goto label_return;
}
......@@ -1454,19 +1467,16 @@ label_return:
/******************************************************************************/
static int
tcache_create_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
tsd_t *tsd;
unsigned tcache_ind;
if (!config_tcache)
return (ENOENT);
tsd = tsd_fetch();
malloc_mutex_lock(&ctl_mtx);
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
READONLY();
if (tcaches_create(tsd, &tcache_ind)) {
ret = EFAULT;
......@@ -1476,23 +1486,20 @@ tcache_create_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
ret = 0;
label_return:
malloc_mutex_unlock(&ctl_mtx);
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret);
}
static int
tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
tsd_t *tsd;
unsigned tcache_ind;
if (!config_tcache)
return (ENOENT);
tsd = tsd_fetch();
WRITEONLY();
tcache_ind = UINT_MAX;
WRITE(tcache_ind, unsigned);
......@@ -1508,18 +1515,15 @@ label_return:
}
static int
tcache_destroy_ctl(const size_t *mib, size_t miblen, void *oldp,
tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
tsd_t *tsd;
unsigned tcache_ind;
if (!config_tcache)
return (ENOENT);
tsd = tsd_fetch();
WRITEONLY();
tcache_ind = UINT_MAX;
WRITE(tcache_ind, unsigned);
......@@ -1536,48 +1540,56 @@ label_return:
/******************************************************************************/
/* ctl_mutex must be held during execution of this function. */
static void
arena_purge(unsigned arena_ind)
arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all)
{
tsd_t *tsd;
unsigned i;
bool refreshed;
VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
tsd = tsd_fetch();
for (i = 0, refreshed = false; i < ctl_stats.narenas; i++) {
tarenas[i] = arena_get(tsd, i, false, false);
if (tarenas[i] == NULL && !refreshed) {
tarenas[i] = arena_get(tsd, i, false, true);
refreshed = true;
}
}
malloc_mutex_lock(tsdn, &ctl_mtx);
{
unsigned narenas = ctl_stats.narenas;
if (arena_ind == ctl_stats.narenas) {
unsigned i;
for (i = 0; i < ctl_stats.narenas; i++) {
if (tarenas[i] != NULL)
arena_purge_all(tarenas[i]);
if (arena_ind == narenas) {
unsigned i;
VARIABLE_ARRAY(arena_t *, tarenas, narenas);
for (i = 0; i < narenas; i++)
tarenas[i] = arena_get(tsdn, i, false);
/*
* No further need to hold ctl_mtx, since narenas and
* tarenas contain everything needed below.
*/
malloc_mutex_unlock(tsdn, &ctl_mtx);
for (i = 0; i < narenas; i++) {
if (tarenas[i] != NULL)
arena_purge(tsdn, tarenas[i], all);
}
} else {
arena_t *tarena;
assert(arena_ind < narenas);
tarena = arena_get(tsdn, arena_ind, false);
/* No further need to hold ctl_mtx. */
malloc_mutex_unlock(tsdn, &ctl_mtx);
if (tarena != NULL)
arena_purge(tsdn, tarena, all);
}
} else {
assert(arena_ind < ctl_stats.narenas);
if (tarenas[arena_ind] != NULL)
arena_purge_all(tarenas[arena_ind]);
}
}
static int
arena_i_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
READONLY();
WRITEONLY();
malloc_mutex_lock(&ctl_mtx);
arena_purge(mib[1]);
malloc_mutex_unlock(&ctl_mtx);
arena_i_purge(tsd_tsdn(tsd), (unsigned)mib[1], true);
ret = 0;
label_return:
......@@ -1585,16 +1597,65 @@ label_return:
}
static int
arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
READONLY();
WRITEONLY();
arena_i_purge(tsd_tsdn(tsd), (unsigned)mib[1], false);
ret = 0;
label_return:
return (ret);
}
static int
arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
unsigned arena_ind;
arena_t *arena;
READONLY();
WRITEONLY();
if ((config_valgrind && unlikely(in_valgrind)) || (config_fill &&
unlikely(opt_quarantine))) {
ret = EFAULT;
goto label_return;
}
arena_ind = (unsigned)mib[1];
if (config_debug) {
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
assert(arena_ind < ctl_stats.narenas);
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
}
assert(arena_ind >= opt_narenas);
arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
arena_reset(tsd, arena);
ret = 0;
label_return:
return (ret);
}
static int
arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
const char *dss = NULL;
unsigned arena_ind = mib[1];
unsigned arena_ind = (unsigned)mib[1];
dss_prec_t dss_prec_old = dss_prec_limit;
dss_prec_t dss_prec = dss_prec_limit;
malloc_mutex_lock(&ctl_mtx);
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
WRITE(dss, const char *);
if (dss != NULL) {
int i;
......@@ -1615,13 +1676,13 @@ arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
}
if (arena_ind < ctl_stats.narenas) {
arena_t *arena = arena_get(tsd_fetch(), arena_ind, false, true);
arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
if (arena == NULL || (dss_prec != dss_prec_limit &&
arena_dss_prec_set(arena, dss_prec))) {
arena_dss_prec_set(tsd_tsdn(tsd), arena, dss_prec))) {
ret = EFAULT;
goto label_return;
}
dss_prec_old = arena_dss_prec_get(arena);
dss_prec_old = arena_dss_prec_get(tsd_tsdn(tsd), arena);
} else {
if (dss_prec != dss_prec_limit &&
chunk_dss_prec_set(dss_prec)) {
......@@ -1636,26 +1697,26 @@ arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
ret = 0;
label_return:
malloc_mutex_unlock(&ctl_mtx);
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret);
}
static int
arena_i_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
arena_i_lg_dirty_mult_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
unsigned arena_ind = mib[1];
unsigned arena_ind = (unsigned)mib[1];
arena_t *arena;
arena = arena_get(tsd_fetch(), arena_ind, false, true);
arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
if (arena == NULL) {
ret = EFAULT;
goto label_return;
}
if (oldp != NULL && oldlenp != NULL) {
size_t oldval = arena_lg_dirty_mult_get(arena);
size_t oldval = arena_lg_dirty_mult_get(tsd_tsdn(tsd), arena);
READ(oldval, ssize_t);
}
if (newp != NULL) {
......@@ -1663,7 +1724,8 @@ arena_i_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp,
ret = EINVAL;
goto label_return;
}
if (arena_lg_dirty_mult_set(arena, *(ssize_t *)newp)) {
if (arena_lg_dirty_mult_set(tsd_tsdn(tsd), arena,
*(ssize_t *)newp)) {
ret = EFAULT;
goto label_return;
}
......@@ -1675,24 +1737,60 @@ label_return:
}
static int
arena_i_chunk_hooks_ctl(const size_t *mib, size_t miblen, void *oldp,
arena_i_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
unsigned arena_ind = mib[1];
unsigned arena_ind = (unsigned)mib[1];
arena_t *arena;
malloc_mutex_lock(&ctl_mtx);
arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
if (arena == NULL) {
ret = EFAULT;
goto label_return;
}
if (oldp != NULL && oldlenp != NULL) {
size_t oldval = arena_decay_time_get(tsd_tsdn(tsd), arena);
READ(oldval, ssize_t);
}
if (newp != NULL) {
if (newlen != sizeof(ssize_t)) {
ret = EINVAL;
goto label_return;
}
if (arena_decay_time_set(tsd_tsdn(tsd), arena,
*(ssize_t *)newp)) {
ret = EFAULT;
goto label_return;
}
}
ret = 0;
label_return:
return (ret);
}
static int
arena_i_chunk_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
unsigned arena_ind = (unsigned)mib[1];
arena_t *arena;
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
if (arena_ind < narenas_total_get() && (arena =
arena_get(tsd_fetch(), arena_ind, false, true)) != NULL) {
arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) {
if (newp != NULL) {
chunk_hooks_t old_chunk_hooks, new_chunk_hooks;
WRITE(new_chunk_hooks, chunk_hooks_t);
old_chunk_hooks = chunk_hooks_set(arena,
old_chunk_hooks = chunk_hooks_set(tsd_tsdn(tsd), arena,
&new_chunk_hooks);
READ(old_chunk_hooks, chunk_hooks_t);
} else {
chunk_hooks_t old_chunk_hooks = chunk_hooks_get(arena);
chunk_hooks_t old_chunk_hooks =
chunk_hooks_get(tsd_tsdn(tsd), arena);
READ(old_chunk_hooks, chunk_hooks_t);
}
} else {
......@@ -1701,16 +1799,16 @@ arena_i_chunk_hooks_ctl(const size_t *mib, size_t miblen, void *oldp,
}
ret = 0;
label_return:
malloc_mutex_unlock(&ctl_mtx);
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret);
}
static const ctl_named_node_t *
arena_i_index(const size_t *mib, size_t miblen, size_t i)
arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
{
const ctl_named_node_t * ret;
const ctl_named_node_t *ret;
malloc_mutex_lock(&ctl_mtx);
malloc_mutex_lock(tsdn, &ctl_mtx);
if (i > ctl_stats.narenas) {
ret = NULL;
goto label_return;
......@@ -1718,20 +1816,20 @@ arena_i_index(const size_t *mib, size_t miblen, size_t i)
ret = super_arena_i_node;
label_return:
malloc_mutex_unlock(&ctl_mtx);
malloc_mutex_unlock(tsdn, &ctl_mtx);
return (ret);
}
/******************************************************************************/
static int
arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp,
arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
unsigned narenas;
malloc_mutex_lock(&ctl_mtx);
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
READONLY();
if (*oldlenp != sizeof(unsigned)) {
ret = EINVAL;
......@@ -1742,23 +1840,23 @@ arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp,
ret = 0;
label_return:
malloc_mutex_unlock(&ctl_mtx);
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret);
}
static int
arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp,
arenas_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
unsigned nread, i;
malloc_mutex_lock(&ctl_mtx);
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
READONLY();
if (*oldlenp != ctl_stats.narenas * sizeof(bool)) {
ret = EINVAL;
nread = (*oldlenp < ctl_stats.narenas * sizeof(bool))
? (*oldlenp / sizeof(bool)) : ctl_stats.narenas;
? (unsigned)(*oldlenp / sizeof(bool)) : ctl_stats.narenas;
} else {
ret = 0;
nread = ctl_stats.narenas;
......@@ -1768,13 +1866,13 @@ arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp,
((bool *)oldp)[i] = ctl_stats.arenas[i].initialized;
label_return:
malloc_mutex_unlock(&ctl_mtx);
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret);
}
static int
arenas_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
arenas_lg_dirty_mult_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
......@@ -1798,6 +1896,32 @@ label_return:
return (ret);
}
static int
arenas_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
if (oldp != NULL && oldlenp != NULL) {
size_t oldval = arena_decay_time_default_get();
READ(oldval, ssize_t);
}
if (newp != NULL) {
if (newlen != sizeof(ssize_t)) {
ret = EINVAL;
goto label_return;
}
if (arena_decay_time_default_set(*(ssize_t *)newp)) {
ret = EFAULT;
goto label_return;
}
}
ret = 0;
label_return:
return (ret);
}
CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t)
......@@ -1807,7 +1931,7 @@ CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t)
static const ctl_named_node_t *
arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
{
if (i > NBINS)
......@@ -1816,9 +1940,9 @@ arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
}
CTL_RO_NL_GEN(arenas_nlruns, nlclasses, unsigned)
CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+mib[2]), size_t)
CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+(szind_t)mib[2]), size_t)
static const ctl_named_node_t *
arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
arenas_lrun_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
{
if (i > nlclasses)
......@@ -1827,9 +1951,10 @@ arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
}
CTL_RO_NL_GEN(arenas_nhchunks, nhclasses, unsigned)
CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+nlclasses+mib[2]), size_t)
CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+nlclasses+(szind_t)mib[2]),
size_t)
static const ctl_named_node_t *
arenas_hchunk_i_index(const size_t *mib, size_t miblen, size_t i)
arenas_hchunk_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
{
if (i > nhclasses)
......@@ -1838,15 +1963,15 @@ arenas_hchunk_i_index(const size_t *mib, size_t miblen, size_t i)
}
static int
arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
arenas_extend_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
unsigned narenas;
malloc_mutex_lock(&ctl_mtx);
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
READONLY();
if (ctl_grow()) {
if (ctl_grow(tsd_tsdn(tsd))) {
ret = EAGAIN;
goto label_return;
}
......@@ -1855,15 +1980,15 @@ arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
ret = 0;
label_return:
malloc_mutex_unlock(&ctl_mtx);
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret);
}
/******************************************************************************/
static int
prof_thread_active_init_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
bool oldval;
......@@ -1876,9 +2001,10 @@ prof_thread_active_init_ctl(const size_t *mib, size_t miblen, void *oldp,
ret = EINVAL;
goto label_return;
}
oldval = prof_thread_active_init_set(*(bool *)newp);
oldval = prof_thread_active_init_set(tsd_tsdn(tsd),
*(bool *)newp);
} else
oldval = prof_thread_active_init_get();
oldval = prof_thread_active_init_get(tsd_tsdn(tsd));
READ(oldval, bool);
ret = 0;
......@@ -1887,8 +2013,8 @@ label_return:
}
static int
prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
bool oldval;
......@@ -1901,9 +2027,9 @@ prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
ret = EINVAL;
goto label_return;
}
oldval = prof_active_set(*(bool *)newp);
oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp);
} else
oldval = prof_active_get();
oldval = prof_active_get(tsd_tsdn(tsd));
READ(oldval, bool);
ret = 0;
......@@ -1912,8 +2038,8 @@ label_return:
}
static int
prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
const char *filename = NULL;
......@@ -1924,7 +2050,7 @@ prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
WRITEONLY();
WRITE(filename, const char *);
if (prof_mdump(filename)) {
if (prof_mdump(tsd, filename)) {
ret = EFAULT;
goto label_return;
}
......@@ -1935,8 +2061,8 @@ label_return:
}
static int
prof_gdump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
bool oldval;
......@@ -1949,9 +2075,9 @@ prof_gdump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
ret = EINVAL;
goto label_return;
}
oldval = prof_gdump_set(*(bool *)newp);
oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp);
} else
oldval = prof_gdump_get();
oldval = prof_gdump_get(tsd_tsdn(tsd));
READ(oldval, bool);
ret = 0;
......@@ -1960,12 +2086,11 @@ label_return:
}
static int
prof_reset_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
size_t lg_sample = lg_prof_sample;
tsd_t *tsd;
if (!config_prof)
return (ENOENT);
......@@ -1975,8 +2100,6 @@ prof_reset_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
if (lg_sample >= (sizeof(uint64_t) << 3))
lg_sample = (sizeof(uint64_t) << 3) - 1;
tsd = tsd_fetch();
prof_reset(tsd, lg_sample);
ret = 0;
......@@ -1995,15 +2118,20 @@ CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats.metadata, size_t)
CTL_RO_CGEN(config_stats, stats_resident, ctl_stats.resident, size_t)
CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
CTL_RO_CGEN(config_stats, stats_retained, ctl_stats.retained, size_t)
CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
CTL_RO_GEN(stats_arenas_i_lg_dirty_mult, ctl_stats.arenas[mib[2]].lg_dirty_mult,
ssize_t)
CTL_RO_GEN(stats_arenas_i_decay_time, ctl_stats.arenas[mib[2]].decay_time,
ssize_t)
CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
ctl_stats.arenas[mib[2]].astats.mapped, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
ctl_stats.arenas[mib[2]].astats.retained, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_npurge,
ctl_stats.arenas[mib[2]].astats.npurge, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
......@@ -2060,7 +2188,8 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns,
ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t)
static const ctl_named_node_t *
stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j)
stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
size_t j)
{
if (j > NBINS)
......@@ -2078,7 +2207,8 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns,
ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t)
static const ctl_named_node_t *
stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
stats_arenas_i_lruns_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
size_t j)
{
if (j > nlclasses)
......@@ -2097,7 +2227,8 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_curhchunks,
ctl_stats.arenas[mib[2]].hstats[mib[4]].curhchunks, size_t)
static const ctl_named_node_t *
stats_arenas_i_hchunks_j_index(const size_t *mib, size_t miblen, size_t j)
stats_arenas_i_hchunks_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
size_t j)
{
if (j > nhclasses)
......@@ -2106,11 +2237,11 @@ stats_arenas_i_hchunks_j_index(const size_t *mib, size_t miblen, size_t j)
}
static const ctl_named_node_t *
stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i)
stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
{
const ctl_named_node_t * ret;
malloc_mutex_lock(&ctl_mtx);
malloc_mutex_lock(tsdn, &ctl_mtx);
if (i > ctl_stats.narenas || !ctl_stats.arenas[i].initialized) {
ret = NULL;
goto label_return;
......@@ -2118,6 +2249,6 @@ stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i)
ret = super_stats_arenas_i_node;
label_return:
malloc_mutex_unlock(&ctl_mtx);
malloc_mutex_unlock(tsdn, &ctl_mtx);
return (ret);
}
......@@ -3,45 +3,48 @@
/******************************************************************************/
/*
* Round down to the nearest chunk size that can actually be requested during
* normal huge allocation.
*/
JEMALLOC_INLINE_C size_t
extent_quantize(size_t size)
{
size_t ret;
szind_t ind;
/*
* Round down to the nearest chunk size that can actually be requested
* during normal huge allocation.
*/
return (index2size(size2index(size + 1) - 1));
assert(size > 0);
ind = size2index(size + 1);
if (ind == 0) {
/* Avoid underflow. */
return (index2size(0));
}
ret = index2size(ind - 1);
assert(ret <= size);
return (ret);
}
JEMALLOC_INLINE_C int
extent_szad_comp(extent_node_t *a, extent_node_t *b)
extent_sz_comp(const extent_node_t *a, const extent_node_t *b)
{
int ret;
size_t a_qsize = extent_quantize(extent_node_size_get(a));
size_t b_qsize = extent_quantize(extent_node_size_get(b));
/*
* Compare based on quantized size rather than size, in order to sort
* equally useful extents only by address.
*/
ret = (a_qsize > b_qsize) - (a_qsize < b_qsize);
if (ret == 0) {
uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
return ((a_qsize > b_qsize) - (a_qsize < b_qsize));
}
ret = (a_addr > b_addr) - (a_addr < b_addr);
}
JEMALLOC_INLINE_C int
extent_sn_comp(const extent_node_t *a, const extent_node_t *b)
{
size_t a_sn = extent_node_sn_get(a);
size_t b_sn = extent_node_sn_get(b);
return (ret);
return ((a_sn > b_sn) - (a_sn < b_sn));
}
/* Generate red-black tree functions. */
rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, szad_link,
extent_szad_comp)
JEMALLOC_INLINE_C int
extent_ad_comp(extent_node_t *a, extent_node_t *b)
extent_ad_comp(const extent_node_t *a, const extent_node_t *b)
{
uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
......@@ -49,5 +52,26 @@ extent_ad_comp(extent_node_t *a, extent_node_t *b)
return ((a_addr > b_addr) - (a_addr < b_addr));
}
JEMALLOC_INLINE_C int
extent_szsnad_comp(const extent_node_t *a, const extent_node_t *b)
{
int ret;
ret = extent_sz_comp(a, b);
if (ret != 0)
return (ret);
ret = extent_sn_comp(a, b);
if (ret != 0)
return (ret);
ret = extent_ad_comp(a, b);
return (ret);
}
/* Generate red-black tree functions. */
rb_gen(, extent_tree_szsnad_, extent_tree_t, extent_node_t, szsnad_link,
extent_szsnad_comp)
/* Generate red-black tree functions. */
rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, ad_link, extent_ad_comp)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment