Unverified Commit fb1f4f4e authored by Wander Hillen's avatar Wander Hillen Committed by GitHub
Browse files

Merge branch 'unstable' into minor-typos

parents dda8cc18 6e98214f
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup>
<Filter Include="Source Files">
<UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
<Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
</Filter>
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\..\..\src\arena.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\background_thread.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\base.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\bitmap.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\ckh.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\ctl.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\extent.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\extent_dss.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\extent_mmap.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\hash.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\hooks.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\jemalloc.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\large.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\malloc_io.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\mutex.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\mutex_pool.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\nstime.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\pages.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\prng.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\prof.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\rtree.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\stats.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\sz.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\tcache.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\ticker.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\tsd.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\witness.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\log.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\bin.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\div.c">
<Filter>Source Files</Filter>
</ClCompile>
</ItemGroup>
</Project>
\ No newline at end of file
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug-static|Win32">
<Configuration>Debug-static</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug-static|x64">
<Configuration>Debug-static</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug|Win32">
<Configuration>Debug</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release-static|Win32">
<Configuration>Release-static</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release-static|x64">
<Configuration>Release-static</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|Win32">
<Configuration>Release</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{09028CFD-4EB7-491D-869C-0708DB97ED44}</ProjectGuid>
<Keyword>Win32Proj</Keyword>
<RootNamespace>test_threads</RootNamespace>
<WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Label="Shared">
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<LinkIncremental>true</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<LinkIncremental>true</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<LinkIncremental>true</LinkIncremental>
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
<LinkIncremental>true</LinkIncremental>
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<LinkIncremental>false</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<LinkIncremental>false</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<LinkIncremental>false</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<LinkIncremental>false</LinkIncremental>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
<AdditionalDependencies>jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
<AdditionalDependencies>jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
<AdditionalDependencies>jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
<AdditionalDependencies>jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
<AdditionalDependencies>jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
<AdditionalDependencies>jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClCompile Include="..\..\..\test_threads\test_threads.cpp" />
<ClCompile Include="..\..\..\test_threads\test_threads_main.cpp" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\jemalloc\jemalloc.vcxproj">
<Project>{8d6bb292-9e1c-413d-9f98-4864bdc1514a}</Project>
</ProjectReference>
</ItemGroup>
<ItemGroup>
<ClInclude Include="..\..\..\test_threads\test_threads.h" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>
\ No newline at end of file
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup>
<Filter Include="Source Files">
<UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
<Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
</Filter>
<Filter Include="Header Files">
<UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
<Extensions>h;hh;hpp;hxx;hm;inl;inc;xsd</Extensions>
</Filter>
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\..\test_threads\test_threads.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\test_threads\test_threads_main.cpp">
<Filter>Source Files</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="..\..\..\test_threads\test_threads.h">
<Filter>Header Files</Filter>
</ClInclude>
</ItemGroup>
</Project>
\ No newline at end of file
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="15.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug-static|Win32">
<Configuration>Debug-static</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug-static|x64">
<Configuration>Debug-static</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug|Win32">
<Configuration>Debug</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release-static|Win32">
<Configuration>Release-static</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release-static|x64">
<Configuration>Release-static</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|Win32">
<Configuration>Release</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\..\..\src\arena.c" />
<ClCompile Include="..\..\..\..\src\background_thread.c" />
<ClCompile Include="..\..\..\..\src\base.c" />
<ClCompile Include="..\..\..\..\src\bin.c" />
<ClCompile Include="..\..\..\..\src\bitmap.c" />
<ClCompile Include="..\..\..\..\src\ckh.c" />
<ClCompile Include="..\..\..\..\src\ctl.c" />
<ClCompile Include="..\..\..\..\src\div.c" />
<ClCompile Include="..\..\..\..\src\extent.c" />
<ClCompile Include="..\..\..\..\src\extent_dss.c" />
<ClCompile Include="..\..\..\..\src\extent_mmap.c" />
<ClCompile Include="..\..\..\..\src\hash.c" />
<ClCompile Include="..\..\..\..\src\hooks.c" />
<ClCompile Include="..\..\..\..\src\jemalloc.c" />
<ClCompile Include="..\..\..\..\src\large.c" />
<ClCompile Include="..\..\..\..\src\log.c" />
<ClCompile Include="..\..\..\..\src\malloc_io.c" />
<ClCompile Include="..\..\..\..\src\mutex.c" />
<ClCompile Include="..\..\..\..\src\mutex_pool.c" />
<ClCompile Include="..\..\..\..\src\nstime.c" />
<ClCompile Include="..\..\..\..\src\pages.c" />
<ClCompile Include="..\..\..\..\src\prng.c" />
<ClCompile Include="..\..\..\..\src\prof.c" />
<ClCompile Include="..\..\..\..\src\rtree.c" />
<ClCompile Include="..\..\..\..\src\stats.c" />
<ClCompile Include="..\..\..\..\src\sz.c" />
<ClCompile Include="..\..\..\..\src\tcache.c" />
<ClCompile Include="..\..\..\..\src\ticker.c" />
<ClCompile Include="..\..\..\..\src\tsd.c" />
<ClCompile Include="..\..\..\..\src\witness.c" />
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{8D6BB292-9E1C-413D-9F98-4864BDC1514A}</ProjectGuid>
<Keyword>Win32Proj</Keyword>
<RootNamespace>jemalloc</RootNamespace>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v141</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v141</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v141</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v141</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v141</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v141</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v141</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v141</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Label="Shared">
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<TargetName>$(ProjectName)d</TargetName>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<TargetName>$(ProjectName)-$(PlatformToolset)-$(Configuration)</TargetName>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<TargetName>$(ProjectName)-$(PlatformToolset)-$(Configuration)</TargetName>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<TargetName>$(ProjectName)d</TargetName>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<TargetName>$(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration)</TargetName>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<TargetName>$(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration)</TargetName>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
<ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
<ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
<ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>JEMALLOC_NO_PRIVATE_NAMESPACE;JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
<DebugInformationFormat>OldStyle</DebugInformationFormat>
<MinimalRebuild>false</MinimalRebuild>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
<ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>_REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
<ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<PreprocessorDefinitions>JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
<ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
<DebugInformationFormat>OldStyle</DebugInformationFormat>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
</Link>
</ItemDefinitionGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>
\ No newline at end of file
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup>
<Filter Include="Source Files">
<UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
<Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
</Filter>
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\..\..\src\arena.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\background_thread.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\base.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\bitmap.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\ckh.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\ctl.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\extent.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\extent_dss.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\extent_mmap.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\hash.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\hooks.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\jemalloc.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\large.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\malloc_io.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\mutex.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\mutex_pool.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\nstime.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\pages.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\prng.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\prof.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\rtree.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\stats.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\sz.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\tcache.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\ticker.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\tsd.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\witness.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\log.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\bin.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\div.c">
<Filter>Source Files</Filter>
</ClCompile>
</ItemGroup>
</Project>
\ No newline at end of file
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="15.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug-static|Win32">
<Configuration>Debug-static</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug-static|x64">
<Configuration>Debug-static</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug|Win32">
<Configuration>Debug</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release-static|Win32">
<Configuration>Release-static</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release-static|x64">
<Configuration>Release-static</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|Win32">
<Configuration>Release</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{09028CFD-4EB7-491D-869C-0708DB97ED44}</ProjectGuid>
<Keyword>Win32Proj</Keyword>
<RootNamespace>test_threads</RootNamespace>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v141</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v141</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v141</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v141</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v141</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v141</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v141</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v141</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Label="Shared">
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<LinkIncremental>true</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<LinkIncremental>true</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<LinkIncremental>true</LinkIncremental>
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
<LinkIncremental>true</LinkIncremental>
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<LinkIncremental>false</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<LinkIncremental>false</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<LinkIncremental>false</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<LinkIncremental>false</LinkIncremental>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
<AdditionalDependencies>jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
<AdditionalDependencies>jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
<AdditionalDependencies>jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
<AdditionalDependencies>jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
<AdditionalDependencies>jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
<AdditionalDependencies>jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClCompile Include="..\..\..\test_threads\test_threads.cpp" />
<ClCompile Include="..\..\..\test_threads\test_threads_main.cpp" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\jemalloc\jemalloc.vcxproj">
<Project>{8d6bb292-9e1c-413d-9f98-4864bdc1514a}</Project>
</ProjectReference>
</ItemGroup>
<ItemGroup>
<ClInclude Include="..\..\..\test_threads\test_threads.h" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>
\ No newline at end of file
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup>
<Filter Include="Source Files">
<UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
<Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
</Filter>
<Filter Include="Header Files">
<UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
<Extensions>h;hh;hpp;hxx;hm;inl;inc;xsd</Extensions>
</Filter>
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\..\test_threads\test_threads.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\test_threads\test_threads_main.cpp">
<Filter>Source Files</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="..\..\..\test_threads\test_threads.h">
<Filter>Header Files</Filter>
</ClInclude>
</ItemGroup>
</Project>
\ No newline at end of file
// jemalloc C++ threaded test
// Author: Rustam Abdullaev
// Public Domain
#include <atomic>
#include <functional>
#include <future>
#include <random>
#include <thread>
#include <vector>
#include <stdio.h>
#include <jemalloc/jemalloc.h>
using std::vector;
using std::thread;
using std::uniform_int_distribution;
using std::minstd_rand;
int test_threads() {
je_malloc_conf = "narenas:3";
int narenas = 0;
size_t sz = sizeof(narenas);
je_mallctl("opt.narenas", (void *)&narenas, &sz, NULL, 0);
if (narenas != 3) {
printf("Error: unexpected number of arenas: %d\n", narenas);
return 1;
}
static const int sizes[] = { 7, 16, 32, 60, 91, 100, 120, 144, 169, 199, 255, 400, 670, 900, 917, 1025, 3333, 5190, 13131, 49192, 99999, 123123, 255265, 2333111 };
static const int numSizes = (int)(sizeof(sizes) / sizeof(sizes[0]));
vector<thread> workers;
static const int numThreads = narenas + 1, numAllocsMax = 25, numIter1 = 50, numIter2 = 50;
je_malloc_stats_print(NULL, NULL, NULL);
size_t allocated1;
size_t sz1 = sizeof(allocated1);
je_mallctl("stats.active", (void *)&allocated1, &sz1, NULL, 0);
printf("\nPress Enter to start threads...\n");
getchar();
printf("Starting %d threads x %d x %d iterations...\n", numThreads, numIter1, numIter2);
for (int i = 0; i < numThreads; i++) {
workers.emplace_back([tid=i]() {
uniform_int_distribution<int> sizeDist(0, numSizes - 1);
minstd_rand rnd(tid * 17);
uint8_t* ptrs[numAllocsMax];
int ptrsz[numAllocsMax];
for (int i = 0; i < numIter1; ++i) {
thread t([&]() {
for (int i = 0; i < numIter2; ++i) {
const int numAllocs = numAllocsMax - sizeDist(rnd);
for (int j = 0; j < numAllocs; j += 64) {
const int x = sizeDist(rnd);
const int sz = sizes[x];
ptrsz[j] = sz;
ptrs[j] = (uint8_t*)je_malloc(sz);
if (!ptrs[j]) {
printf("Unable to allocate %d bytes in thread %d, iter %d, alloc %d. %d\n", sz, tid, i, j, x);
exit(1);
}
for (int k = 0; k < sz; k++)
ptrs[j][k] = tid + k;
}
for (int j = 0; j < numAllocs; j += 64) {
for (int k = 0, sz = ptrsz[j]; k < sz; k++)
if (ptrs[j][k] != (uint8_t)(tid + k)) {
printf("Memory error in thread %d, iter %d, alloc %d @ %d : %02X!=%02X\n", tid, i, j, k, ptrs[j][k], (uint8_t)(tid + k));
exit(1);
}
je_free(ptrs[j]);
}
}
});
t.join();
}
});
}
for (thread& t : workers) {
t.join();
}
je_malloc_stats_print(NULL, NULL, NULL);
size_t allocated2;
je_mallctl("stats.active", (void *)&allocated2, &sz1, NULL, 0);
size_t leaked = allocated2 - allocated1;
printf("\nDone. Leaked: %zd bytes\n", leaked);
bool failed = leaked > 65536; // in case C++ runtime allocated something (e.g. iostream locale or facet)
printf("\nTest %s!\n", (failed ? "FAILED" : "successful"));
printf("\nPress Enter to continue...\n");
getchar();
return failed ? 1 : 0;
}
#pragma once
int test_threads();
#include "test_threads.h"
#include <future>
#include <functional>
#include <chrono>
using namespace std::chrono_literals;
int main(int argc, char** argv) {
int rc = test_threads();
return rc;
}
$(dirname "$)")/scripts/gen_run_tests.py | bash
#!/usr/bin/env python
import sys
from itertools import combinations
from os import uname
from multiprocessing import cpu_count
# Later, we want to test extended vaddr support. Apparently, the "real" way of
# checking this is flaky on OS X.
bits_64 = sys.maxsize > 2**32
nparallel = cpu_count() * 2
uname = uname()[0]
def powerset(items):
result = []
for i in xrange(len(items) + 1):
result += combinations(items, i)
return result
possible_compilers = [('gcc', 'g++'), ('clang', 'clang++')]
possible_compiler_opts = [
'-m32',
]
possible_config_opts = [
'--enable-debug',
'--enable-prof',
'--disable-stats',
]
if bits_64:
possible_config_opts.append('--with-lg-vaddr=56')
possible_malloc_conf_opts = [
'tcache:false',
'dss:primary',
'percpu_arena:percpu',
'background_thread:true',
]
print 'set -e'
print 'if [ -f Makefile ] ; then make relclean ; fi'
print 'autoconf'
print 'rm -rf run_tests.out'
print 'mkdir run_tests.out'
print 'cd run_tests.out'
ind = 0
for cc, cxx in possible_compilers:
for compiler_opts in powerset(possible_compiler_opts):
for config_opts in powerset(possible_config_opts):
for malloc_conf_opts in powerset(possible_malloc_conf_opts):
if cc is 'clang' \
and '-m32' in possible_compiler_opts \
and '--enable-prof' in config_opts:
continue
config_line = (
'EXTRA_CFLAGS=-Werror EXTRA_CXXFLAGS=-Werror '
+ 'CC="{} {}" '.format(cc, " ".join(compiler_opts))
+ 'CXX="{} {}" '.format(cxx, " ".join(compiler_opts))
+ '../../configure '
+ " ".join(config_opts) + (' --with-malloc-conf=' +
",".join(malloc_conf_opts) if len(malloc_conf_opts) > 0
else '')
)
# We don't want to test large vaddr spaces in 32-bit mode.
if ('-m32' in compiler_opts and '--with-lg-vaddr=56' in
config_opts):
continue
# Per CPU arenas are only supported on Linux.
linux_supported = ('percpu_arena:percpu' in malloc_conf_opts \
or 'background_thread:true' in malloc_conf_opts)
# Heap profiling and dss are not supported on OS X.
darwin_unsupported = ('--enable-prof' in config_opts or \
'dss:primary' in malloc_conf_opts)
if (uname == 'Linux' and linux_supported) \
or (not linux_supported and (uname != 'Darwin' or \
not darwin_unsupported)):
print """cat <<EOF > run_test_%(ind)d.sh
#!/bin/sh
set -e
abort() {
echo "==> Error" >> run_test.log
echo "Error; see run_tests.out/run_test_%(ind)d.out/run_test.log"
exit 255 # Special exit code tells xargs to terminate.
}
# Environment variables are not supported.
run_cmd() {
echo "==> \$@" >> run_test.log
\$@ >> run_test.log 2>&1 || abort
}
echo "=> run_test_%(ind)d: %(config_line)s"
mkdir run_test_%(ind)d.out
cd run_test_%(ind)d.out
echo "==> %(config_line)s" >> run_test.log
%(config_line)s >> run_test.log 2>&1 || abort
run_cmd make all tests
run_cmd make check
run_cmd make distclean
EOF
chmod 755 run_test_%(ind)d.sh""" % {'ind': ind, 'config_line': config_line}
ind += 1
print 'for i in `seq 0 %(last_ind)d` ; do echo run_test_${i}.sh ; done | xargs -P %(nparallel)d -n 1 sh' % {'last_ind': ind-1, 'nparallel': nparallel}
#!/usr/bin/env python
from itertools import combinations
travis_template = """\
language: generic
matrix:
include:
%s
before_script:
- autoconf
- ./configure ${COMPILER_FLAGS:+ \
CC="$CC $COMPILER_FLAGS" \
CXX="$CXX $COMPILER_FLAGS" } \
$CONFIGURE_FLAGS
- make -j3
- make -j3 tests
script:
- make check
"""
# The 'default' configuration is gcc, on linux, with no compiler or configure
# flags. We also test with clang, -m32, --enable-debug, --enable-prof,
# --disable-stats, and --with-malloc-conf=tcache:false. To avoid abusing
# travis though, we don't test all 2**7 = 128 possible combinations of these;
# instead, we only test combinations of up to 2 'unusual' settings, under the
# hope that bugs involving interactions of such settings are rare.
# Things at once, for C(7, 0) + C(7, 1) + C(7, 2) = 29
MAX_UNUSUAL_OPTIONS = 2
os_default = 'linux'
os_unusual = 'osx'
compilers_default = 'CC=gcc CXX=g++'
compilers_unusual = 'CC=clang CXX=clang++'
compiler_flag_unusuals = ['-m32']
configure_flag_unusuals = [
'--enable-debug',
'--enable-prof',
'--disable-stats',
]
malloc_conf_unusuals = [
'tcache:false',
'dss:primary',
'percpu_arena:percpu',
'background_thread:true',
]
all_unusuals = (
[os_unusual] + [compilers_unusual] + compiler_flag_unusuals
+ configure_flag_unusuals + malloc_conf_unusuals
)
unusual_combinations_to_test = []
for i in xrange(MAX_UNUSUAL_OPTIONS + 1):
unusual_combinations_to_test += combinations(all_unusuals, i)
include_rows = ""
for unusual_combination in unusual_combinations_to_test:
os = os_default
if os_unusual in unusual_combination:
os = os_unusual
compilers = compilers_default
if compilers_unusual in unusual_combination:
compilers = compilers_unusual
compiler_flags = [
x for x in unusual_combination if x in compiler_flag_unusuals]
configure_flags = [
x for x in unusual_combination if x in configure_flag_unusuals]
malloc_conf = [
x for x in unusual_combination if x in malloc_conf_unusuals]
# Filter out unsupported configurations on OS X.
if os == 'osx' and ('dss:primary' in malloc_conf or \
'percpu_arena:percpu' in malloc_conf or 'background_thread:true' \
in malloc_conf):
continue
if len(malloc_conf) > 0:
configure_flags.append('--with-malloc-conf=' + ",".join(malloc_conf))
# Filter out an unsupported configuration - heap profiling on OS X.
if os == 'osx' and '--enable-prof' in configure_flags:
continue
# We get some spurious errors when -Warray-bounds is enabled.
env_string = ('{} COMPILER_FLAGS="{}" CONFIGURE_FLAGS="{}" '
'EXTRA_CFLAGS="-Werror -Wno-array-bounds"').format(
compilers, " ".join(compiler_flags), " ".join(configure_flags))
include_rows += ' - os: %s\n' % os
include_rows += ' env: %s\n' % env_string
if '-m32' in unusual_combination and os == 'linux':
include_rows += ' addons:\n'
include_rows += ' apt:\n'
include_rows += ' packages:\n'
include_rows += ' - gcc-multilib\n'
print travis_template % include_rows
#define JEMALLOC_ARENA_C_ #define JEMALLOC_ARENA_C_
#include "jemalloc/internal/jemalloc_internal.h" #include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/div.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/extent_mmap.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/util.h"
/******************************************************************************/ /******************************************************************************/
/* Data. */ /* Data. */
ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT; /*
static ssize_t lg_dirty_mult_default; * Define names for both unininitialized and initialized phases, so that
arena_bin_info_t arena_bin_info[NBINS]; * options and mallctl processing are straightforward.
*/
size_t map_bias; const char *percpu_arena_mode_names[] = {
size_t map_misc_offset; "percpu",
size_t arena_maxrun; /* Max run size for arenas. */ "phycpu",
size_t large_maxclass; /* Max large size class. */ "disabled",
static size_t small_maxrun; /* Max run size used for small size classes. */ "percpu",
static bool *small_run_tab; /* Valid small run page multiples. */ "phycpu"
unsigned nlclasses; /* Number of large size classes. */ };
unsigned nhclasses; /* Number of huge size classes. */ percpu_arena_mode_t opt_percpu_arena = PERCPU_ARENA_DEFAULT;
ssize_t opt_dirty_decay_ms = DIRTY_DECAY_MS_DEFAULT;
ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT;
static atomic_zd_t dirty_decay_ms_default;
static atomic_zd_t muzzy_decay_ms_default;
const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = {
#define STEP(step, h, x, y) \
h,
SMOOTHSTEP
#undef STEP
};
static div_info_t arena_binind_div_info[NBINS];
/******************************************************************************/ /******************************************************************************/
/* /*
...@@ -23,2008 +48,1244 @@ unsigned nhclasses; /* Number of huge size classes. */ ...@@ -23,2008 +48,1244 @@ unsigned nhclasses; /* Number of huge size classes. */
* definition. * definition.
*/ */
static void arena_purge(arena_t *arena, bool all); static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena,
static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit,
bool cleaned, bool decommitted); size_t npages_decay_max, bool is_background_thread);
static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena,
arena_run_t *run, arena_bin_t *bin); bool is_background_thread, bool all);
static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
arena_run_t *run, arena_bin_t *bin); bin_t *bin);
static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
bin_t *bin);
/******************************************************************************/ /******************************************************************************/
#define CHUNK_MAP_KEY ((uintptr_t)0x1U) void
arena_basic_stats_merge(UNUSED tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
JEMALLOC_INLINE_C arena_chunk_map_misc_t * const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
arena_miscelm_key_create(size_t size) size_t *nactive, size_t *ndirty, size_t *nmuzzy) {
{ *nthreads += arena_nthreads_get(arena, false);
*dss = dss_prec_names[arena_dss_prec_get(arena)];
return ((arena_chunk_map_misc_t *)(arena_mapbits_size_encode(size) | *dirty_decay_ms = arena_dirty_decay_ms_get(arena);
CHUNK_MAP_KEY)); *muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
*nactive += atomic_load_zu(&arena->nactive, ATOMIC_RELAXED);
*ndirty += extents_npages_get(&arena->extents_dirty);
*nmuzzy += extents_npages_get(&arena->extents_muzzy);
} }
JEMALLOC_INLINE_C bool void
arena_miscelm_is_key(const arena_chunk_map_misc_t *miscelm) arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
{ const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
bin_stats_t *bstats, arena_stats_large_t *lstats) {
cassert(config_stats);
return (((uintptr_t)miscelm & CHUNK_MAP_KEY) != 0); arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms,
muzzy_decay_ms, nactive, ndirty, nmuzzy);
size_t base_allocated, base_resident, base_mapped, metadata_thp;
base_stats_get(tsdn, arena->base, &base_allocated, &base_resident,
&base_mapped, &metadata_thp);
arena_stats_lock(tsdn, &arena->stats);
arena_stats_accum_zu(&astats->mapped, base_mapped
+ arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped));
arena_stats_accum_zu(&astats->retained,
extents_npages_get(&arena->extents_retained) << LG_PAGE);
arena_stats_accum_u64(&astats->decay_dirty.npurge,
arena_stats_read_u64(tsdn, &arena->stats,
&arena->stats.decay_dirty.npurge));
arena_stats_accum_u64(&astats->decay_dirty.nmadvise,
arena_stats_read_u64(tsdn, &arena->stats,
&arena->stats.decay_dirty.nmadvise));
arena_stats_accum_u64(&astats->decay_dirty.purged,
arena_stats_read_u64(tsdn, &arena->stats,
&arena->stats.decay_dirty.purged));
arena_stats_accum_u64(&astats->decay_muzzy.npurge,
arena_stats_read_u64(tsdn, &arena->stats,
&arena->stats.decay_muzzy.npurge));
arena_stats_accum_u64(&astats->decay_muzzy.nmadvise,
arena_stats_read_u64(tsdn, &arena->stats,
&arena->stats.decay_muzzy.nmadvise));
arena_stats_accum_u64(&astats->decay_muzzy.purged,
arena_stats_read_u64(tsdn, &arena->stats,
&arena->stats.decay_muzzy.purged));
arena_stats_accum_zu(&astats->base, base_allocated);
arena_stats_accum_zu(&astats->internal, arena_internal_get(arena));
arena_stats_accum_zu(&astats->metadata_thp, metadata_thp);
arena_stats_accum_zu(&astats->resident, base_resident +
(((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) +
extents_npages_get(&arena->extents_dirty) +
extents_npages_get(&arena->extents_muzzy)) << LG_PAGE)));
for (szind_t i = 0; i < NSIZES - NBINS; i++) {
uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats,
&arena->stats.lstats[i].nmalloc);
arena_stats_accum_u64(&lstats[i].nmalloc, nmalloc);
arena_stats_accum_u64(&astats->nmalloc_large, nmalloc);
uint64_t ndalloc = arena_stats_read_u64(tsdn, &arena->stats,
&arena->stats.lstats[i].ndalloc);
arena_stats_accum_u64(&lstats[i].ndalloc, ndalloc);
arena_stats_accum_u64(&astats->ndalloc_large, ndalloc);
uint64_t nrequests = arena_stats_read_u64(tsdn, &arena->stats,
&arena->stats.lstats[i].nrequests);
arena_stats_accum_u64(&lstats[i].nrequests,
nmalloc + nrequests);
arena_stats_accum_u64(&astats->nrequests_large,
nmalloc + nrequests);
assert(nmalloc >= ndalloc);
assert(nmalloc - ndalloc <= SIZE_T_MAX);
size_t curlextents = (size_t)(nmalloc - ndalloc);
lstats[i].curlextents += curlextents;
arena_stats_accum_zu(&astats->allocated_large,
curlextents * sz_index2size(NBINS + i));
}
arena_stats_unlock(tsdn, &arena->stats);
/* tcache_bytes counts currently cached bytes. */
atomic_store_zu(&astats->tcache_bytes, 0, ATOMIC_RELAXED);
malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
cache_bin_array_descriptor_t *descriptor;
ql_foreach(descriptor, &arena->cache_bin_array_descriptor_ql, link) {
szind_t i = 0;
for (; i < NBINS; i++) {
cache_bin_t *tbin = &descriptor->bins_small[i];
arena_stats_accum_zu(&astats->tcache_bytes,
tbin->ncached * sz_index2size(i));
}
for (; i < nhbins; i++) {
cache_bin_t *tbin = &descriptor->bins_large[i];
arena_stats_accum_zu(&astats->tcache_bytes,
tbin->ncached * sz_index2size(i));
}
}
malloc_mutex_prof_read(tsdn,
&astats->mutex_prof_data[arena_prof_mutex_tcache_list],
&arena->tcache_ql_mtx);
malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
#define READ_ARENA_MUTEX_PROF_DATA(mtx, ind) \
malloc_mutex_lock(tsdn, &arena->mtx); \
malloc_mutex_prof_read(tsdn, &astats->mutex_prof_data[ind], \
&arena->mtx); \
malloc_mutex_unlock(tsdn, &arena->mtx);
/* Gather per arena mutex profiling data. */
READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large);
READ_ARENA_MUTEX_PROF_DATA(extent_avail_mtx,
arena_prof_mutex_extent_avail)
READ_ARENA_MUTEX_PROF_DATA(extents_dirty.mtx,
arena_prof_mutex_extents_dirty)
READ_ARENA_MUTEX_PROF_DATA(extents_muzzy.mtx,
arena_prof_mutex_extents_muzzy)
READ_ARENA_MUTEX_PROF_DATA(extents_retained.mtx,
arena_prof_mutex_extents_retained)
READ_ARENA_MUTEX_PROF_DATA(decay_dirty.mtx,
arena_prof_mutex_decay_dirty)
READ_ARENA_MUTEX_PROF_DATA(decay_muzzy.mtx,
arena_prof_mutex_decay_muzzy)
READ_ARENA_MUTEX_PROF_DATA(base->mtx,
arena_prof_mutex_base)
#undef READ_ARENA_MUTEX_PROF_DATA
nstime_copy(&astats->uptime, &arena->create_time);
nstime_update(&astats->uptime);
nstime_subtract(&astats->uptime, &arena->create_time);
for (szind_t i = 0; i < NBINS; i++) {
bin_stats_merge(tsdn, &bstats[i], &arena->bins[i]);
}
} }
#undef CHUNK_MAP_KEY void
arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
JEMALLOC_INLINE_C size_t extent_hooks_t **r_extent_hooks, extent_t *extent) {
arena_miscelm_key_size_get(const arena_chunk_map_misc_t *miscelm) witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
{ WITNESS_RANK_CORE, 0);
assert(arena_miscelm_is_key(miscelm)); extents_dalloc(tsdn, arena, r_extent_hooks, &arena->extents_dirty,
extent);
return (arena_mapbits_size_decode((uintptr_t)miscelm)); if (arena_dirty_decay_ms_get(arena) == 0) {
arena_decay_dirty(tsdn, arena, false, true);
} else {
arena_background_thread_inactivity_check(tsdn, arena, false);
}
} }
JEMALLOC_INLINE_C size_t static void *
arena_miscelm_size_get(arena_chunk_map_misc_t *miscelm) arena_slab_reg_alloc(extent_t *slab, const bin_info_t *bin_info) {
{ void *ret;
arena_chunk_t *chunk; arena_slab_data_t *slab_data = extent_slab_data_get(slab);
size_t pageind, mapbits; size_t regind;
assert(!arena_miscelm_is_key(miscelm)); assert(extent_nfree_get(slab) > 0);
assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info);
pageind = arena_miscelm_to_pageind(miscelm); ret = (void *)((uintptr_t)extent_addr_get(slab) +
mapbits = arena_mapbits_get(chunk, pageind); (uintptr_t)(bin_info->reg_size * regind));
return (arena_mapbits_size_decode(mapbits)); extent_nfree_dec(slab);
return ret;
} }
JEMALLOC_INLINE_C int #ifndef JEMALLOC_JET
arena_run_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b) static
{ #endif
uintptr_t a_miscelm = (uintptr_t)a; size_t
uintptr_t b_miscelm = (uintptr_t)b; arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) {
size_t diff, regind;
assert(a != NULL);
assert(b != NULL);
return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
}
/* Generate red-black tree functions. */ /* Freeing a pointer outside the slab can cause assertion failure. */
rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_misc_t, assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab));
rb_link, arena_run_comp) assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab));
/* Freeing an interior pointer can cause assertion failure. */
assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) %
(uintptr_t)bin_infos[binind].reg_size == 0);
static size_t diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab));
run_quantize(size_t size)
{
size_t qsize;
assert(size != 0); /* Avoid doing division with a variable divisor. */
assert(size == PAGE_CEILING(size)); regind = div_compute(&arena_binind_div_info[binind], diff);
/* Don't change sizes that are valid small run sizes. */ assert(regind < bin_infos[binind].nregs);
if (size <= small_maxrun && small_run_tab[size >> LG_PAGE])
return (size);
/* return regind;
* Round down to the nearest run size that can actually be requested
* during normal large allocation. Add large_pad so that cache index
* randomization can offset the allocation from the page boundary.
*/
qsize = index2size(size2index(size - large_pad + 1) - 1) + large_pad;
if (qsize <= SMALL_MAXCLASS + large_pad)
return (run_quantize(size - large_pad));
assert(qsize <= size);
return (qsize);
} }
static size_t static void
run_quantize_next(size_t size) arena_slab_reg_dalloc(extent_t *slab, arena_slab_data_t *slab_data, void *ptr) {
{ szind_t binind = extent_szind_get(slab);
size_t large_run_size_next; const bin_info_t *bin_info = &bin_infos[binind];
size_t regind = arena_slab_regind(slab, binind, ptr);
assert(size != 0);
assert(size == PAGE_CEILING(size));
/* assert(extent_nfree_get(slab) < bin_info->nregs);
* Return the next quantized size greater than the input size. /* Freeing an unallocated pointer can cause assertion failure. */
* Quantized sizes comprise the union of run sizes that back small assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind));
* region runs, and run sizes that back large regions with no explicit
* alignment constraints.
*/
if (size > SMALL_MAXCLASS) { bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind);
large_run_size_next = PAGE_CEILING(index2size(size2index(size - extent_nfree_inc(slab);
large_pad) + 1) + large_pad);
} else
large_run_size_next = SIZE_T_MAX;
if (size >= small_maxrun)
return (large_run_size_next);
while (true) {
size += PAGE;
assert(size <= small_maxrun);
if (small_run_tab[size >> LG_PAGE]) {
if (large_run_size_next < size)
return (large_run_size_next);
return (size);
}
}
} }
static size_t static void
run_quantize_first(size_t size) arena_nactive_add(arena_t *arena, size_t add_pages) {
{ atomic_fetch_add_zu(&arena->nactive, add_pages, ATOMIC_RELAXED);
size_t qsize = run_quantize(size); }
if (qsize < size) { static void
/* arena_nactive_sub(arena_t *arena, size_t sub_pages) {
* Skip a quantization that may have an adequately large run, assert(atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) >= sub_pages);
* because under-sized runs may be mixed in. This only happens atomic_fetch_sub_zu(&arena->nactive, sub_pages, ATOMIC_RELAXED);
* when an unusual size is requested, i.e. for aligned
* allocation, and is just one of several places where linear
* search would potentially find sufficiently aligned available
* memory somewhere lower.
*/
qsize = run_quantize_next(size);
}
return (qsize);
} }
JEMALLOC_INLINE_C int static void
arena_avail_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b) arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
{ szind_t index, hindex;
int ret;
uintptr_t a_miscelm = (uintptr_t)a;
size_t a_qsize = run_quantize(arena_miscelm_is_key(a) ?
arena_miscelm_key_size_get(a) : arena_miscelm_size_get(a));
size_t b_qsize = run_quantize(arena_miscelm_size_get(b));
/* cassert(config_stats);
* Compare based on quantized size rather than size, in order to sort
* equally useful runs only by address.
*/
ret = (a_qsize > b_qsize) - (a_qsize < b_qsize);
if (ret == 0) {
if (!arena_miscelm_is_key(a)) {
uintptr_t b_miscelm = (uintptr_t)b;
ret = (a_miscelm > b_miscelm) - (a_miscelm < b_miscelm); if (usize < LARGE_MINCLASS) {
} else { usize = LARGE_MINCLASS;
/*
* Treat keys as if they are lower than anything else.
*/
ret = -1;
}
} }
index = sz_size2index(usize);
hindex = (index >= NBINS) ? index - NBINS : 0;
return (ret); arena_stats_add_u64(tsdn, &arena->stats,
&arena->stats.lstats[hindex].nmalloc, 1);
} }
/* Generate red-black tree functions. */
rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t,
arena_chunk_map_misc_t, rb_link, arena_avail_comp)
static void static void
arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
size_t npages) szind_t index, hindex;
{
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> cassert(config_stats);
LG_PAGE));
arena_avail_tree_insert(&arena->runs_avail, arena_miscelm_get(chunk,
pageind));
}
static void if (usize < LARGE_MINCLASS) {
arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, usize = LARGE_MINCLASS;
size_t npages) }
{ index = sz_size2index(usize);
hindex = (index >= NBINS) ? index - NBINS : 0;
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> arena_stats_add_u64(tsdn, &arena->stats,
LG_PAGE)); &arena->stats.lstats[hindex].ndalloc, 1);
arena_avail_tree_remove(&arena->runs_avail, arena_miscelm_get(chunk,
pageind));
} }
static void static void
arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize,
size_t npages) size_t usize) {
{ arena_large_dalloc_stats_update(tsdn, arena, oldusize);
arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); arena_large_malloc_stats_update(tsdn, arena, usize);
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE));
assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
CHUNK_MAP_DIRTY);
qr_new(&miscelm->rd, rd_link);
qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link);
arena->ndirty += npages;
} }
static void extent_t *
arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t npages) size_t alignment, bool *zero) {
{ extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
LG_PAGE)); WITNESS_RANK_CORE, 0);
assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
CHUNK_MAP_DIRTY);
qr_remove(&miscelm->rd, rd_link); szind_t szind = sz_size2index(usize);
assert(arena->ndirty >= npages); size_t mapped_add;
arena->ndirty -= npages; bool commit = true;
} extent_t *extent = extents_alloc(tsdn, arena, &extent_hooks,
&arena->extents_dirty, NULL, usize, sz_large_pad, alignment, false,
szind, zero, &commit);
if (extent == NULL) {
extent = extents_alloc(tsdn, arena, &extent_hooks,
&arena->extents_muzzy, NULL, usize, sz_large_pad, alignment,
false, szind, zero, &commit);
}
size_t size = usize + sz_large_pad;
if (extent == NULL) {
extent = extent_alloc_wrapper(tsdn, arena, &extent_hooks, NULL,
usize, sz_large_pad, alignment, false, szind, zero,
&commit);
if (config_stats) {
/*
* extent may be NULL on OOM, but in that case
* mapped_add isn't used below, so there's no need to
* conditionlly set it to 0 here.
*/
mapped_add = size;
}
} else if (config_stats) {
mapped_add = 0;
}
static size_t if (extent != NULL) {
arena_chunk_dirty_npages(const extent_node_t *node) if (config_stats) {
{ arena_stats_lock(tsdn, &arena->stats);
arena_large_malloc_stats_update(tsdn, arena, usize);
if (mapped_add != 0) {
arena_stats_add_zu(tsdn, &arena->stats,
&arena->stats.mapped, mapped_add);
}
arena_stats_unlock(tsdn, &arena->stats);
}
arena_nactive_add(arena, size >> LG_PAGE);
}
return (extent_node_size_get(node) >> LG_PAGE); return extent;
} }
void void
arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache) arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
{ if (config_stats) {
arena_stats_lock(tsdn, &arena->stats);
if (cache) { arena_large_dalloc_stats_update(tsdn, arena,
extent_node_dirty_linkage_init(node); extent_usize_get(extent));
extent_node_dirty_insert(node, &arena->runs_dirty, arena_stats_unlock(tsdn, &arena->stats);
&arena->chunks_cache);
arena->ndirty += arena_chunk_dirty_npages(node);
} }
arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE);
} }
void void
arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty) arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
{ size_t oldusize) {
size_t usize = extent_usize_get(extent);
size_t udiff = oldusize - usize;
if (dirty) { if (config_stats) {
extent_node_dirty_remove(node); arena_stats_lock(tsdn, &arena->stats);
assert(arena->ndirty >= arena_chunk_dirty_npages(node)); arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
arena->ndirty -= arena_chunk_dirty_npages(node); arena_stats_unlock(tsdn, &arena->stats);
} }
arena_nactive_sub(arena, udiff >> LG_PAGE);
} }
JEMALLOC_INLINE_C void * void
arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info) arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
{ size_t oldusize) {
void *ret; size_t usize = extent_usize_get(extent);
unsigned regind; size_t udiff = usize - oldusize;
arena_chunk_map_misc_t *miscelm;
void *rpages;
assert(run->nfree > 0);
assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info));
regind = bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
miscelm = arena_run_to_miscelm(run);
rpages = arena_miscelm_to_rpages(miscelm);
ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset +
(uintptr_t)(bin_info->reg_interval * regind));
run->nfree--;
return (ret);
}
JEMALLOC_INLINE_C void
arena_run_reg_dalloc(arena_run_t *run, void *ptr)
{
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
size_t mapbits = arena_mapbits_get(chunk, pageind);
szind_t binind = arena_ptr_small_binind_get(ptr, mapbits);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
unsigned regind = arena_run_regind(run, bin_info, ptr);
assert(run->nfree < bin_info->nregs);
/* Freeing an interior pointer can cause assertion failure. */
assert(((uintptr_t)ptr -
((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
(uintptr_t)bin_info->reg0_offset)) %
(uintptr_t)bin_info->reg_interval == 0);
assert((uintptr_t)ptr >=
(uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
(uintptr_t)bin_info->reg0_offset);
/* Freeing an unallocated pointer can cause assertion failure. */
assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind));
bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind); if (config_stats) {
run->nfree++; arena_stats_lock(tsdn, &arena->stats);
arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
arena_stats_unlock(tsdn, &arena->stats);
}
arena_nactive_add(arena, udiff >> LG_PAGE);
} }
JEMALLOC_INLINE_C void static ssize_t
arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages) arena_decay_ms_read(arena_decay_t *decay) {
{ return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
}
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + static void
(run_ind << LG_PAGE)), (npages << LG_PAGE)); arena_decay_ms_write(arena_decay_t *decay, ssize_t decay_ms) {
memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0, atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED);
(npages << LG_PAGE));
} }
JEMALLOC_INLINE_C void static void
arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind) arena_decay_deadline_init(arena_decay_t *decay) {
{ /*
* Generate a new deadline that is uniformly random within the next
* epoch after the current one.
*/
nstime_copy(&decay->deadline, &decay->epoch);
nstime_add(&decay->deadline, &decay->interval);
if (arena_decay_ms_read(decay) > 0) {
nstime_t jitter;
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind nstime_init(&jitter, prng_range_u64(&decay->jitter_state,
<< LG_PAGE)), PAGE); nstime_ns(&decay->interval)));
nstime_add(&decay->deadline, &jitter);
}
} }
JEMALLOC_INLINE_C void static bool
arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind) arena_decay_deadline_reached(const arena_decay_t *decay, const nstime_t *time) {
{ return (nstime_compare(&decay->deadline, time) <= 0);
size_t i;
UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
arena_run_page_mark_zeroed(chunk, run_ind);
for (i = 0; i < PAGE / sizeof(size_t); i++)
assert(p[i] == 0);
} }
static void static size_t
arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages) arena_decay_backlog_npages_limit(const arena_decay_t *decay) {
{ uint64_t sum;
size_t npages_limit_backlog;
unsigned i;
if (config_stats) { /*
ssize_t cactive_diff = CHUNK_CEILING((arena->nactive + add_pages * For each element of decay_backlog, multiply by the corresponding
- sub_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive << * fixed-point smoothstep decay factor. Sum the products, then divide
LG_PAGE); * to round down to the nearest whole number of pages.
if (cactive_diff != 0) */
stats_cactive_add(cactive_diff); sum = 0;
for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
sum += decay->backlog[i] * h_steps[i];
} }
npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
return npages_limit_backlog;
} }
static void static void
arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind, arena_decay_backlog_update_last(arena_decay_t *decay, size_t current_npages) {
size_t flag_dirty, size_t flag_decommitted, size_t need_pages) size_t npages_delta = (current_npages > decay->nunpurged) ?
{ current_npages - decay->nunpurged : 0;
size_t total_pages, rem_pages; decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta;
assert(flag_dirty == 0 || flag_decommitted == 0); if (config_debug) {
if (current_npages > decay->ceil_npages) {
total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >> decay->ceil_npages = current_npages;
LG_PAGE; }
assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) == size_t npages_limit = arena_decay_backlog_npages_limit(decay);
flag_dirty); assert(decay->ceil_npages >= npages_limit);
assert(need_pages <= total_pages); if (decay->ceil_npages > npages_limit) {
rem_pages = total_pages - need_pages; decay->ceil_npages = npages_limit;
arena_avail_remove(arena, chunk, run_ind, total_pages);
if (flag_dirty != 0)
arena_run_dirty_remove(arena, chunk, run_ind, total_pages);
arena_cactive_update(arena, need_pages, 0);
arena->nactive += need_pages;
/* Keep track of trailing unused pages for later use. */
if (rem_pages > 0) {
size_t flags = flag_dirty | flag_decommitted;
size_t flag_unzeroed_mask = (flags == 0) ? CHUNK_MAP_UNZEROED :
0;
arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
(rem_pages << LG_PAGE), flags |
(arena_mapbits_unzeroed_get(chunk, run_ind+need_pages) &
flag_unzeroed_mask));
arena_mapbits_unallocated_set(chunk, run_ind+total_pages-1,
(rem_pages << LG_PAGE), flags |
(arena_mapbits_unzeroed_get(chunk, run_ind+total_pages-1) &
flag_unzeroed_mask));
if (flag_dirty != 0) {
arena_run_dirty_insert(arena, chunk, run_ind+need_pages,
rem_pages);
} }
arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages);
} }
} }
static bool static void
arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size, arena_decay_backlog_update(arena_decay_t *decay, uint64_t nadvance_u64,
bool remove, bool zero) size_t current_npages) {
{ if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
arena_chunk_t *chunk; memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
arena_chunk_map_misc_t *miscelm; sizeof(size_t));
size_t flag_dirty, flag_decommitted, run_ind, need_pages;
size_t flag_unzeroed_mask;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
miscelm = arena_run_to_miscelm(run);
run_ind = arena_miscelm_to_pageind(miscelm);
flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
need_pages = (size >> LG_PAGE);
assert(need_pages > 0);
if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
run_ind << LG_PAGE, size, arena->ind))
return (true);
if (remove) {
arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
flag_decommitted, need_pages);
}
if (zero) {
if (flag_decommitted != 0) {
/* The run is untouched, and therefore zeroed. */
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
*)((uintptr_t)chunk + (run_ind << LG_PAGE)),
(need_pages << LG_PAGE));
} else if (flag_dirty != 0) {
/* The run is dirty, so all pages must be zeroed. */
arena_run_zero(chunk, run_ind, need_pages);
} else {
/*
* The run is clean, so some pages may be zeroed (i.e.
* never before touched).
*/
size_t i;
for (i = 0; i < need_pages; i++) {
if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
!= 0)
arena_run_zero(chunk, run_ind+i, 1);
else if (config_debug) {
arena_run_page_validate_zeroed(chunk,
run_ind+i);
} else {
arena_run_page_mark_zeroed(chunk,
run_ind+i);
}
}
}
} else { } else {
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + size_t nadvance_z = (size_t)nadvance_u64;
(run_ind << LG_PAGE)), (need_pages << LG_PAGE));
}
/* assert((uint64_t)nadvance_z == nadvance_u64);
* Set the last element first, in case the run only contains one page
* (i.e. both statements set the same element).
*/
flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
CHUNK_MAP_UNZEROED : 0;
arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty |
(flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
run_ind+need_pages-1)));
arena_mapbits_large_set(chunk, run_ind, size, flag_dirty |
(flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, run_ind)));
return (false);
}
static bool memmove(decay->backlog, &decay->backlog[nadvance_z],
arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
{ if (nadvance_z > 1) {
memset(&decay->backlog[SMOOTHSTEP_NSTEPS -
nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
}
}
return (arena_run_split_large_helper(arena, run, size, true, zero)); arena_decay_backlog_update_last(decay, current_npages);
} }
static bool static void
arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
{ extents_t *extents, size_t current_npages, size_t npages_limit,
bool is_background_thread) {
return (arena_run_split_large_helper(arena, run, size, false, zero)); if (current_npages > npages_limit) {
arena_decay_to_limit(tsdn, arena, decay, extents, false,
npages_limit, current_npages - npages_limit,
is_background_thread);
}
} }
static bool static void
arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size, arena_decay_epoch_advance_helper(arena_decay_t *decay, const nstime_t *time,
szind_t binind) size_t current_npages) {
{ assert(arena_decay_deadline_reached(decay, time));
arena_chunk_t *chunk;
arena_chunk_map_misc_t *miscelm;
size_t flag_dirty, flag_decommitted, run_ind, need_pages, i;
assert(binind != BININD_INVALID); nstime_t delta;
nstime_copy(&delta, time);
nstime_subtract(&delta, &decay->epoch);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval);
miscelm = arena_run_to_miscelm(run); assert(nadvance_u64 > 0);
run_ind = arena_miscelm_to_pageind(miscelm);
flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
need_pages = (size >> LG_PAGE);
assert(need_pages > 0);
if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize, /* Add nadvance_u64 decay intervals to epoch. */
run_ind << LG_PAGE, size, arena->ind)) nstime_copy(&delta, &decay->interval);
return (true); nstime_imultiply(&delta, nadvance_u64);
nstime_add(&decay->epoch, &delta);
arena_run_split_remove(arena, chunk, run_ind, flag_dirty, /* Set a new deadline. */
flag_decommitted, need_pages); arena_decay_deadline_init(decay);
for (i = 0; i < need_pages; i++) { /* Update the backlog. */
size_t flag_unzeroed = arena_mapbits_unzeroed_get(chunk, arena_decay_backlog_update(decay, nadvance_u64, current_npages);
run_ind+i);
arena_mapbits_small_set(chunk, run_ind+i, i, binind,
flag_unzeroed);
if (config_debug && flag_dirty == 0 && flag_unzeroed == 0)
arena_run_page_validate_zeroed(chunk, run_ind+i);
}
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
(run_ind << LG_PAGE)), (need_pages << LG_PAGE));
return (false);
} }
static arena_chunk_t * static void
arena_chunk_init_spare(arena_t *arena) arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
{ extents_t *extents, const nstime_t *time, bool is_background_thread) {
arena_chunk_t *chunk; size_t current_npages = extents_npages_get(extents);
arena_decay_epoch_advance_helper(decay, time, current_npages);
assert(arena->spare != NULL); size_t npages_limit = arena_decay_backlog_npages_limit(decay);
/* We may unlock decay->mtx when try_purge(). Finish logging first. */
decay->nunpurged = (npages_limit > current_npages) ? npages_limit :
current_npages;
chunk = arena->spare; if (!background_thread_enabled() || is_background_thread) {
arena->spare = NULL; arena_decay_try_purge(tsdn, arena, decay, extents,
current_npages, npages_limit, is_background_thread);
}
}
assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); static void
assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); arena_decay_reinit(arena_decay_t *decay, ssize_t decay_ms) {
assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == arena_decay_ms_write(decay, decay_ms);
arena_maxrun); if (decay_ms > 0) {
assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == nstime_init(&decay->interval, (uint64_t)decay_ms *
arena_maxrun); KQU(1000000));
assert(arena_mapbits_dirty_get(chunk, map_bias) == nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS);
arena_mapbits_dirty_get(chunk, chunk_npages-1)); }
return (chunk); nstime_init(&decay->epoch, 0);
nstime_update(&decay->epoch);
decay->jitter_state = (uint64_t)(uintptr_t)decay;
arena_decay_deadline_init(decay);
decay->nunpurged = 0;
memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
} }
static bool static bool
arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, bool zero) arena_decay_init(arena_decay_t *decay, ssize_t decay_ms,
{ arena_stats_decay_t *stats) {
if (config_debug) {
/* for (size_t i = 0; i < sizeof(arena_decay_t); i++) {
* The extent node notion of "committed" doesn't directly apply to assert(((char *)decay)[i] == 0);
* arena chunks. Arbitrarily mark them as committed. The commit state
* of runs is tracked individually, and upon chunk deallocation the
* entire chunk is in a consistent commit state.
*/
extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true);
extent_node_achunk_set(&chunk->node, true);
return (chunk_register(chunk, &chunk->node));
}
static arena_chunk_t *
arena_chunk_alloc_internal_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
bool *zero, bool *commit)
{
arena_chunk_t *chunk;
malloc_mutex_unlock(&arena->lock);
chunk = (arena_chunk_t *)chunk_alloc_wrapper(arena, chunk_hooks, NULL,
chunksize, chunksize, zero, commit);
if (chunk != NULL && !*commit) {
/* Commit header. */
if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
LG_PAGE, arena->ind)) {
chunk_dalloc_wrapper(arena, chunk_hooks,
(void *)chunk, chunksize, *commit);
chunk = NULL;
} }
decay->ceil_npages = 0;
} }
if (chunk != NULL && arena_chunk_register(arena, chunk, *zero)) { if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY,
if (!*commit) { malloc_mutex_rank_exclusive)) {
/* Undo commit of header. */ return true;
chunk_hooks->decommit(chunk, chunksize, 0, map_bias <<
LG_PAGE, arena->ind);
}
chunk_dalloc_wrapper(arena, chunk_hooks, (void *)chunk,
chunksize, *commit);
chunk = NULL;
} }
decay->purging = false;
malloc_mutex_lock(&arena->lock); arena_decay_reinit(decay, decay_ms);
return (chunk); /* Memory is zeroed, so there is no need to clear stats. */
if (config_stats) {
decay->stats = stats;
}
return false;
} }
static arena_chunk_t * static bool
arena_chunk_alloc_internal(arena_t *arena, bool *zero, bool *commit) arena_decay_ms_valid(ssize_t decay_ms) {
{ if (decay_ms < -1) {
arena_chunk_t *chunk; return false;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; }
if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX *
KQU(1000)) {
return true;
}
return false;
}
chunk = chunk_alloc_cache(arena, &chunk_hooks, NULL, chunksize, static bool
chunksize, zero, true); arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
if (chunk != NULL) { extents_t *extents, bool is_background_thread) {
if (arena_chunk_register(arena, chunk, *zero)) { malloc_mutex_assert_owner(tsdn, &decay->mtx);
chunk_dalloc_cache(arena, &chunk_hooks, chunk,
chunksize, true); /* Purge all or nothing if the option is disabled. */
return (NULL); ssize_t decay_ms = arena_decay_ms_read(decay);
if (decay_ms <= 0) {
if (decay_ms == 0) {
arena_decay_to_limit(tsdn, arena, decay, extents, false,
0, extents_npages_get(extents),
is_background_thread);
} }
*commit = true; return false;
} }
if (chunk == NULL) {
chunk = arena_chunk_alloc_internal_hard(arena, &chunk_hooks, nstime_t time;
zero, commit); nstime_init(&time, 0);
nstime_update(&time);
if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch, &time)
> 0)) {
/*
* Time went backwards. Move the epoch back in time and
* generate a new deadline, with the expectation that time
* typically flows forward for long enough periods of time that
* epochs complete. Unfortunately, this strategy is susceptible
* to clock jitter triggering premature epoch advances, but
* clock jitter estimation and compensation isn't feasible here
* because calls into this code are event-driven.
*/
nstime_copy(&decay->epoch, &time);
arena_decay_deadline_init(decay);
} else {
/* Verify that time does not go backwards. */
assert(nstime_compare(&decay->epoch, &time) <= 0);
} }
if (config_stats && chunk != NULL) { /*
arena->stats.mapped += chunksize; * If the deadline has been reached, advance to the current epoch and
arena->stats.metadata_mapped += (map_bias << LG_PAGE); * purge to the new limit if necessary. Note that dirty pages created
* during the current epoch are not subject to purge until a future
* epoch, so as a result purging only happens during epoch advances, or
* being triggered by background threads (scheduled event).
*/
bool advance_epoch = arena_decay_deadline_reached(decay, &time);
if (advance_epoch) {
arena_decay_epoch_advance(tsdn, arena, decay, extents, &time,
is_background_thread);
} else if (is_background_thread) {
arena_decay_try_purge(tsdn, arena, decay, extents,
extents_npages_get(extents),
arena_decay_backlog_npages_limit(decay),
is_background_thread);
} }
return (chunk); return advance_epoch;
} }
static arena_chunk_t * static ssize_t
arena_chunk_init_hard(arena_t *arena) arena_decay_ms_get(arena_decay_t *decay) {
{ return arena_decay_ms_read(decay);
arena_chunk_t *chunk; }
bool zero, commit;
size_t flag_unzeroed, flag_decommitted, i;
assert(arena->spare == NULL); ssize_t
arena_dirty_decay_ms_get(arena_t *arena) {
return arena_decay_ms_get(&arena->decay_dirty);
}
zero = false; ssize_t
commit = false; arena_muzzy_decay_ms_get(arena_t *arena) {
chunk = arena_chunk_alloc_internal(arena, &zero, &commit); return arena_decay_ms_get(&arena->decay_muzzy);
if (chunk == NULL) }
return (NULL);
static bool
arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
extents_t *extents, ssize_t decay_ms) {
if (!arena_decay_ms_valid(decay_ms)) {
return true;
}
malloc_mutex_lock(tsdn, &decay->mtx);
/* /*
* Initialize the map to contain one maximal free untouched run. Mark * Restart decay backlog from scratch, which may cause many dirty pages
* the pages as zeroed if chunk_alloc() returned a zeroed or decommitted * to be immediately purged. It would conceptually be possible to map
* chunk. * the old backlog onto the new backlog, but there is no justification
*/ * for such complexity since decay_ms changes are intended to be
flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED; * infrequent, either between the {-1, 0, >0} states, or a one-time
flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED; * arbitrary change during initial arena configuration.
arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun,
flag_unzeroed | flag_decommitted);
/*
* There is no need to initialize the internal page map entries unless
* the chunk is not zeroed.
*/ */
if (!zero) { arena_decay_reinit(decay, decay_ms);
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED( arena_maybe_decay(tsdn, arena, decay, extents, false);
(void *)arena_bitselm_get(chunk, map_bias+1), malloc_mutex_unlock(tsdn, &decay->mtx);
(size_t)((uintptr_t) arena_bitselm_get(chunk,
chunk_npages-1) - (uintptr_t)arena_bitselm_get(chunk,
map_bias+1)));
for (i = map_bias+1; i < chunk_npages-1; i++)
arena_mapbits_internal_set(chunk, i, flag_unzeroed);
} else {
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
*)arena_bitselm_get(chunk, map_bias+1), (size_t)((uintptr_t)
arena_bitselm_get(chunk, chunk_npages-1) -
(uintptr_t)arena_bitselm_get(chunk, map_bias+1)));
if (config_debug) {
for (i = map_bias+1; i < chunk_npages-1; i++) {
assert(arena_mapbits_unzeroed_get(chunk, i) ==
flag_unzeroed);
}
}
}
arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun,
flag_unzeroed);
return (chunk); return false;
} }
static arena_chunk_t * bool
arena_chunk_alloc(arena_t *arena) arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
{ ssize_t decay_ms) {
arena_chunk_t *chunk; return arena_decay_ms_set(tsdn, arena, &arena->decay_dirty,
&arena->extents_dirty, decay_ms);
}
if (arena->spare != NULL) bool
chunk = arena_chunk_init_spare(arena); arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
else { ssize_t decay_ms) {
chunk = arena_chunk_init_hard(arena); return arena_decay_ms_set(tsdn, arena, &arena->decay_muzzy,
if (chunk == NULL) &arena->extents_muzzy, decay_ms);
return (NULL); }
}
/* Insert the run into the runs_avail tree. */ static size_t
arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias); arena_stash_decayed(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_limit,
size_t npages_decay_max, extent_list_t *decay_extents) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
return (chunk); /* Stash extents according to npages_limit. */
size_t nstashed = 0;
extent_t *extent;
while (nstashed < npages_decay_max &&
(extent = extents_evict(tsdn, arena, r_extent_hooks, extents,
npages_limit)) != NULL) {
extent_list_append(decay_extents, extent);
nstashed += extent_size_get(extent) >> LG_PAGE;
}
return nstashed;
} }
static void static size_t
arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk) arena_decay_stashed(tsdn_t *tsdn, arena_t *arena,
{ extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents,
bool all, extent_list_t *decay_extents, bool is_background_thread) {
assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); UNUSED size_t nmadvise, nunmapped;
assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); size_t npurged;
assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
arena_maxrun);
assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
arena_maxrun);
assert(arena_mapbits_dirty_get(chunk, map_bias) ==
arena_mapbits_dirty_get(chunk, chunk_npages-1));
assert(arena_mapbits_decommitted_get(chunk, map_bias) ==
arena_mapbits_decommitted_get(chunk, chunk_npages-1));
/*
* Remove run from the runs_avail tree, so that the arena does not use
* it.
*/
arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
if (arena->spare != NULL) { if (config_stats) {
arena_chunk_t *spare = arena->spare; nmadvise = 0;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; nunmapped = 0;
bool committed; }
npurged = 0;
arena->spare = chunk; ssize_t muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
if (arena_mapbits_dirty_get(spare, map_bias) != 0) { for (extent_t *extent = extent_list_first(decay_extents); extent !=
arena_run_dirty_remove(arena, spare, map_bias, NULL; extent = extent_list_first(decay_extents)) {
chunk_npages-map_bias); if (config_stats) {
nmadvise++;
} }
size_t npages = extent_size_get(extent) >> LG_PAGE;
chunk_deregister(spare, &spare->node); npurged += npages;
extent_list_remove(decay_extents, extent);
committed = (arena_mapbits_decommitted_get(spare, map_bias) == switch (extents_state_get(extents)) {
0); case extent_state_active:
if (!committed) { not_reached();
/* case extent_state_dirty:
* Decommit the header. Mark the chunk as decommitted if (!all && muzzy_decay_ms != 0 &&
* even if header decommit fails, since treating a !extent_purge_lazy_wrapper(tsdn, arena,
* partially committed chunk as committed has a high r_extent_hooks, extent, 0,
* potential for causing later access of decommitted extent_size_get(extent))) {
* memory. extents_dalloc(tsdn, arena, r_extent_hooks,
*/ &arena->extents_muzzy, extent);
chunk_hooks = chunk_hooks_get(arena); arena_background_thread_inactivity_check(tsdn,
chunk_hooks.decommit(spare, chunksize, 0, map_bias << arena, is_background_thread);
LG_PAGE, arena->ind); break;
}
/* Fall through. */
case extent_state_muzzy:
extent_dalloc_wrapper(tsdn, arena, r_extent_hooks,
extent);
if (config_stats) {
nunmapped += npages;
}
break;
case extent_state_retained:
default:
not_reached();
} }
}
chunk_dalloc_cache(arena, &chunk_hooks, (void *)spare, if (config_stats) {
chunksize, committed); arena_stats_lock(tsdn, &arena->stats);
arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->npurge,
1);
arena_stats_add_u64(tsdn, &arena->stats,
&decay->stats->nmadvise, nmadvise);
arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->purged,
npurged);
arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped,
nunmapped << LG_PAGE);
arena_stats_unlock(tsdn, &arena->stats);
}
if (config_stats) { return npurged;
arena->stats.mapped -= chunksize;
arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
}
} else
arena->spare = chunk;
} }
/*
* npages_limit: Decay at most npages_decay_max pages without violating the
* invariant: (extents_npages_get(extents) >= npages_limit). We need an upper
* bound on number of pages in order to prevent unbounded growth (namely in
* stashed), otherwise unbounded new pages could be added to extents during the
* current decay run, so that the purging thread never finishes.
*/
static void static void
arena_huge_malloc_stats_update(arena_t *arena, size_t usize) arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
{ extents_t *extents, bool all, size_t npages_limit, size_t npages_decay_max,
szind_t index = size2index(usize) - nlclasses - NBINS; bool is_background_thread) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 1);
malloc_mutex_assert_owner(tsdn, &decay->mtx);
if (decay->purging) {
return;
}
decay->purging = true;
malloc_mutex_unlock(tsdn, &decay->mtx);
cassert(config_stats); extent_hooks_t *extent_hooks = extent_hooks_get(arena);
arena->stats.nmalloc_huge++; extent_list_t decay_extents;
arena->stats.allocated_huge += usize; extent_list_init(&decay_extents);
arena->stats.hstats[index].nmalloc++;
arena->stats.hstats[index].curhchunks++; size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents,
npages_limit, npages_decay_max, &decay_extents);
if (npurge != 0) {
UNUSED size_t npurged = arena_decay_stashed(tsdn, arena,
&extent_hooks, decay, extents, all, &decay_extents,
is_background_thread);
assert(npurged == npurge);
}
malloc_mutex_lock(tsdn, &decay->mtx);
decay->purging = false;
} }
static void static bool
arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize) arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
{ extents_t *extents, bool is_background_thread, bool all) {
szind_t index = size2index(usize) - nlclasses - NBINS; if (all) {
malloc_mutex_lock(tsdn, &decay->mtx);
arena_decay_to_limit(tsdn, arena, decay, extents, all, 0,
extents_npages_get(extents), is_background_thread);
malloc_mutex_unlock(tsdn, &decay->mtx);
cassert(config_stats); return false;
}
arena->stats.nmalloc_huge--; if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
arena->stats.allocated_huge -= usize; /* No need to wait if another thread is in progress. */
arena->stats.hstats[index].nmalloc--; return true;
arena->stats.hstats[index].curhchunks--; }
}
static void bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents,
arena_huge_dalloc_stats_update(arena_t *arena, size_t usize) is_background_thread);
{ UNUSED size_t npages_new;
szind_t index = size2index(usize) - nlclasses - NBINS; if (epoch_advanced) {
/* Backlog is updated on epoch advance. */
npages_new = decay->backlog[SMOOTHSTEP_NSTEPS-1];
}
malloc_mutex_unlock(tsdn, &decay->mtx);
cassert(config_stats); if (have_background_thread && background_thread_enabled() &&
epoch_advanced && !is_background_thread) {
background_thread_interval_check(tsdn, arena, decay,
npages_new);
}
arena->stats.ndalloc_huge++; return false;
arena->stats.allocated_huge -= usize;
arena->stats.hstats[index].ndalloc++;
arena->stats.hstats[index].curhchunks--;
} }
static void static bool
arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize) arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
{ bool all) {
szind_t index = size2index(usize) - nlclasses - NBINS; return arena_decay_impl(tsdn, arena, &arena->decay_dirty,
&arena->extents_dirty, is_background_thread, all);
}
cassert(config_stats); static bool
arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
bool all) {
return arena_decay_impl(tsdn, arena, &arena->decay_muzzy,
&arena->extents_muzzy, is_background_thread, all);
}
arena->stats.ndalloc_huge--; void
arena->stats.allocated_huge += usize; arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) {
arena->stats.hstats[index].ndalloc--; if (arena_decay_dirty(tsdn, arena, is_background_thread, all)) {
arena->stats.hstats[index].curhchunks++; return;
}
arena_decay_muzzy(tsdn, arena, is_background_thread, all);
} }
static void static void
arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize) arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) {
{ arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE);
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, slab);
}
arena_huge_dalloc_stats_update(arena, oldsize); static void
arena_huge_malloc_stats_update(arena, usize); arena_bin_slabs_nonfull_insert(bin_t *bin, extent_t *slab) {
assert(extent_nfree_get(slab) > 0);
extent_heap_insert(&bin->slabs_nonfull, slab);
} }
static void static void
arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize, arena_bin_slabs_nonfull_remove(bin_t *bin, extent_t *slab) {
size_t usize) extent_heap_remove(&bin->slabs_nonfull, slab);
{ }
arena_huge_dalloc_stats_update_undo(arena, oldsize); static extent_t *
arena_huge_malloc_stats_update_undo(arena, usize); arena_bin_slabs_nonfull_tryget(bin_t *bin) {
extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull);
if (slab == NULL) {
return NULL;
}
if (config_stats) {
bin->stats.reslabs++;
}
return slab;
} }
extent_node_t * static void
arena_node_alloc(arena_t *arena) arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, extent_t *slab) {
{ assert(extent_nfree_get(slab) == 0);
extent_node_t *node; /*
* Tracking extents is required by arena_reset, which is not allowed
* for auto arenas. Bypass this step to avoid touching the extent
* linkage (often results in cache misses) for auto arenas.
*/
if (arena_is_auto(arena)) {
return;
}
extent_list_append(&bin->slabs_full, slab);
}
malloc_mutex_lock(&arena->node_cache_mtx); static void
node = ql_last(&arena->node_cache, ql_link); arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, extent_t *slab) {
if (node == NULL) { if (arena_is_auto(arena)) {
malloc_mutex_unlock(&arena->node_cache_mtx); return;
return (base_alloc(sizeof(extent_node_t)));
} }
ql_tail_remove(&arena->node_cache, extent_node_t, ql_link); extent_list_remove(&bin->slabs_full, slab);
malloc_mutex_unlock(&arena->node_cache_mtx);
return (node);
} }
void void
arena_node_dalloc(arena_t *arena, extent_node_t *node) arena_reset(tsd_t *tsd, arena_t *arena) {
{ /*
* Locking in this function is unintuitive. The caller guarantees that
* no concurrent operations are happening in this arena, but there are
* still reasons that some locking is necessary:
*
* - Some of the functions in the transitive closure of calls assume
* appropriate locks are held, and in some cases these locks are
* temporarily dropped to avoid lock order reversal or deadlock due to
* reentry.
* - mallctl("epoch", ...) may concurrently refresh stats. While
* strictly speaking this is a "concurrent operation", disallowing
* stats refreshes would impose an inconvenient burden.
*/
malloc_mutex_lock(&arena->node_cache_mtx); /* Large allocations. */
ql_elm_new(node, ql_link); malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
ql_tail_insert(&arena->node_cache, node, ql_link);
malloc_mutex_unlock(&arena->node_cache_mtx);
}
static void * for (extent_t *extent = extent_list_first(&arena->large); extent !=
arena_chunk_alloc_huge_hard(arena_t *arena, chunk_hooks_t *chunk_hooks, NULL; extent = extent_list_first(&arena->large)) {
size_t usize, size_t alignment, bool *zero, size_t csize) void *ptr = extent_base_get(extent);
{ size_t usize;
void *ret;
bool commit = true;
ret = chunk_alloc_wrapper(arena, chunk_hooks, NULL, csize, alignment, malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
zero, &commit); alloc_ctx_t alloc_ctx;
if (ret == NULL) { rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
/* Revert optimistic stats updates. */ rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
malloc_mutex_lock(&arena->lock); (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
assert(alloc_ctx.szind != NSIZES);
if (config_stats || (config_prof && opt_prof)) {
usize = sz_index2size(alloc_ctx.szind);
assert(usize == isalloc(tsd_tsdn(tsd), ptr));
}
/* Remove large allocation from prof sample set. */
if (config_prof && opt_prof) {
prof_free(tsd, ptr, usize, &alloc_ctx);
}
large_dalloc(tsd_tsdn(tsd), extent);
malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
}
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
/* Bins. */
for (unsigned i = 0; i < NBINS; i++) {
extent_t *slab;
bin_t *bin = &arena->bins[i];
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
if (bin->slabcur != NULL) {
slab = bin->slabcur;
bin->slabcur = NULL;
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
}
while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) !=
NULL) {
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
}
for (slab = extent_list_first(&bin->slabs_full); slab != NULL;
slab = extent_list_first(&bin->slabs_full)) {
arena_bin_slabs_full_remove(arena, bin, slab);
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
}
if (config_stats) { if (config_stats) {
arena_huge_malloc_stats_update_undo(arena, usize); bin->stats.curregs = 0;
arena->stats.mapped -= usize; bin->stats.curslabs = 0;
} }
arena->nactive -= (usize >> LG_PAGE); malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
malloc_mutex_unlock(&arena->lock);
} }
return (ret); atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
} }
void * static void
arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment, arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) {
bool *zero)
{
void *ret;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
size_t csize = CHUNK_CEILING(usize);
malloc_mutex_lock(&arena->lock);
/* Optimistically update stats. */
if (config_stats) {
arena_huge_malloc_stats_update(arena, usize);
arena->stats.mapped += usize;
}
arena->nactive += (usize >> LG_PAGE);
ret = chunk_alloc_cache(arena, &chunk_hooks, NULL, csize, alignment,
zero, true);
malloc_mutex_unlock(&arena->lock);
if (ret == NULL) {
ret = arena_chunk_alloc_huge_hard(arena, &chunk_hooks, usize,
alignment, zero, csize);
}
if (config_stats && ret != NULL)
stats_cactive_add(usize);
return (ret);
}
void
arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize)
{
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
size_t csize;
csize = CHUNK_CEILING(usize);
malloc_mutex_lock(&arena->lock);
if (config_stats) {
arena_huge_dalloc_stats_update(arena, usize);
arena->stats.mapped -= usize;
stats_cactive_sub(usize);
}
arena->nactive -= (usize >> LG_PAGE);
chunk_dalloc_cache(arena, &chunk_hooks, chunk, csize, true);
malloc_mutex_unlock(&arena->lock);
}
void
arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk, size_t oldsize,
size_t usize)
{
assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize));
assert(oldsize != usize);
malloc_mutex_lock(&arena->lock);
if (config_stats)
arena_huge_ralloc_stats_update(arena, oldsize, usize);
if (oldsize < usize) {
size_t udiff = usize - oldsize;
arena->nactive += udiff >> LG_PAGE;
if (config_stats)
stats_cactive_add(udiff);
} else {
size_t udiff = oldsize - usize;
arena->nactive -= udiff >> LG_PAGE;
if (config_stats)
stats_cactive_sub(udiff);
}
malloc_mutex_unlock(&arena->lock);
}
void
arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize,
size_t usize)
{
size_t udiff = oldsize - usize;
size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
malloc_mutex_lock(&arena->lock);
if (config_stats) {
arena_huge_ralloc_stats_update(arena, oldsize, usize);
if (cdiff != 0) {
arena->stats.mapped -= cdiff;
stats_cactive_sub(udiff);
}
}
arena->nactive -= udiff >> LG_PAGE;
if (cdiff != 0) {
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
void *nchunk = (void *)((uintptr_t)chunk +
CHUNK_CEILING(usize));
chunk_dalloc_cache(arena, &chunk_hooks, nchunk, cdiff, true);
}
malloc_mutex_unlock(&arena->lock);
}
static bool
arena_chunk_ralloc_huge_expand_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
void *chunk, size_t oldsize, size_t usize, bool *zero, void *nchunk,
size_t udiff, size_t cdiff)
{
bool err;
bool commit = true;
err = (chunk_alloc_wrapper(arena, chunk_hooks, nchunk, cdiff, chunksize,
zero, &commit) == NULL);
if (err) {
/* Revert optimistic stats updates. */
malloc_mutex_lock(&arena->lock);
if (config_stats) {
arena_huge_ralloc_stats_update_undo(arena, oldsize,
usize);
arena->stats.mapped -= cdiff;
}
arena->nactive -= (udiff >> LG_PAGE);
malloc_mutex_unlock(&arena->lock);
} else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
cdiff, true, arena->ind)) {
chunk_dalloc_arena(arena, chunk_hooks, nchunk, cdiff, *zero,
true);
err = true;
}
return (err);
}
bool
arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize,
size_t usize, bool *zero)
{
bool err;
chunk_hooks_t chunk_hooks = chunk_hooks_get(arena);
void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
size_t udiff = usize - oldsize;
size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
malloc_mutex_lock(&arena->lock);
/* Optimistically update stats. */
if (config_stats) {
arena_huge_ralloc_stats_update(arena, oldsize, usize);
arena->stats.mapped += cdiff;
}
arena->nactive += (udiff >> LG_PAGE);
err = (chunk_alloc_cache(arena, &arena->chunk_hooks, nchunk, cdiff,
chunksize, zero, true) == NULL);
malloc_mutex_unlock(&arena->lock);
if (err) {
err = arena_chunk_ralloc_huge_expand_hard(arena, &chunk_hooks,
chunk, oldsize, usize, zero, nchunk, udiff,
cdiff);
} else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
cdiff, true, arena->ind)) {
chunk_dalloc_arena(arena, &chunk_hooks, nchunk, cdiff, *zero,
true);
err = true;
}
if (config_stats && !err)
stats_cactive_add(udiff);
return (err);
}
/*
* Do first-best-fit run selection, i.e. select the lowest run that best fits.
* Run sizes are quantized, so not all candidate runs are necessarily exactly
* the same size.
*/
static arena_run_t *
arena_run_first_best_fit(arena_t *arena, size_t size)
{
size_t search_size = run_quantize_first(size);
arena_chunk_map_misc_t *key = arena_miscelm_key_create(search_size);
arena_chunk_map_misc_t *miscelm =
arena_avail_tree_nsearch(&arena->runs_avail, key);
if (miscelm == NULL)
return (NULL);
return (&miscelm->run);
}
static arena_run_t *
arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
{
arena_run_t *run = arena_run_first_best_fit(arena, s2u(size));
if (run != NULL) {
if (arena_run_split_large(arena, run, size, zero))
run = NULL;
}
return (run);
}
static arena_run_t *
arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
{
arena_chunk_t *chunk;
arena_run_t *run;
assert(size <= arena_maxrun);
assert(size == PAGE_CEILING(size));
/* Search the arena's chunks for the lowest best fit. */
run = arena_run_alloc_large_helper(arena, size, zero);
if (run != NULL)
return (run);
/*
* No usable runs. Create a new chunk from which to allocate the run.
*/
chunk = arena_chunk_alloc(arena);
if (chunk != NULL) {
run = &arena_miscelm_get(chunk, map_bias)->run;
if (arena_run_split_large(arena, run, size, zero))
run = NULL;
return (run);
}
/*
* arena_chunk_alloc() failed, but another thread may have made
* sufficient memory available while this one dropped arena->lock in
* arena_chunk_alloc(), so search one more time.
*/
return (arena_run_alloc_large_helper(arena, size, zero));
}
static arena_run_t *
arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind)
{
arena_run_t *run = arena_run_first_best_fit(arena, size);
if (run != NULL) {
if (arena_run_split_small(arena, run, size, binind))
run = NULL;
}
return (run);
}
static arena_run_t *
arena_run_alloc_small(arena_t *arena, size_t size, szind_t binind)
{
arena_chunk_t *chunk;
arena_run_t *run;
assert(size <= arena_maxrun);
assert(size == PAGE_CEILING(size));
assert(binind != BININD_INVALID);
/* Search the arena's chunks for the lowest best fit. */
run = arena_run_alloc_small_helper(arena, size, binind);
if (run != NULL)
return (run);
/* /*
* No usable runs. Create a new chunk from which to allocate the run. * Iterate over the retained extents and destroy them. This gives the
* extent allocator underlying the extent hooks an opportunity to unmap
* all retained memory without having to keep its own metadata
* structures. In practice, virtual memory for dss-allocated extents is
* leaked here, so best practice is to avoid dss for arenas to be
* destroyed, or provide custom extent hooks that track retained
* dss-based extents for later reuse.
*/ */
chunk = arena_chunk_alloc(arena); extent_hooks_t *extent_hooks = extent_hooks_get(arena);
if (chunk != NULL) { extent_t *extent;
run = &arena_miscelm_get(chunk, map_bias)->run; while ((extent = extents_evict(tsdn, arena, &extent_hooks,
if (arena_run_split_small(arena, run, size, binind)) &arena->extents_retained, 0)) != NULL) {
run = NULL; extent_destroy_wrapper(tsdn, arena, &extent_hooks, extent);
return (run);
} }
/*
* arena_chunk_alloc() failed, but another thread may have made
* sufficient memory available while this one dropped arena->lock in
* arena_chunk_alloc(), so search one more time.
*/
return (arena_run_alloc_small_helper(arena, size, binind));
}
static bool
arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult)
{
return (lg_dirty_mult >= -1 && lg_dirty_mult < (ssize_t)(sizeof(size_t)
<< 3));
}
ssize_t
arena_lg_dirty_mult_get(arena_t *arena)
{
ssize_t lg_dirty_mult;
malloc_mutex_lock(&arena->lock);
lg_dirty_mult = arena->lg_dirty_mult;
malloc_mutex_unlock(&arena->lock);
return (lg_dirty_mult);
}
bool
arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult)
{
if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
return (true);
malloc_mutex_lock(&arena->lock);
arena->lg_dirty_mult = lg_dirty_mult;
arena_maybe_purge(arena);
malloc_mutex_unlock(&arena->lock);
return (false);
} }
void void
arena_maybe_purge(arena_t *arena) arena_destroy(tsd_t *tsd, arena_t *arena) {
{ assert(base_ind_get(arena->base) >= narenas_auto);
assert(arena_nthreads_get(arena, false) == 0);
/* Don't purge if the option is disabled. */ assert(arena_nthreads_get(arena, true) == 0);
if (arena->lg_dirty_mult < 0)
return;
/* Don't recursively purge. */
if (arena->purging)
return;
/*
* Iterate, since preventing recursive purging could otherwise leave too
* many dirty pages.
*/
while (true) {
size_t threshold = (arena->nactive >> arena->lg_dirty_mult);
if (threshold < chunk_npages)
threshold = chunk_npages;
/*
* Don't purge unless the number of purgeable pages exceeds the
* threshold.
*/
if (arena->ndirty <= threshold)
return;
arena_purge(arena, false);
}
}
static size_t
arena_dirty_count(arena_t *arena)
{
size_t ndirty = 0;
arena_runs_dirty_link_t *rdelm;
extent_node_t *chunkselm;
for (rdelm = qr_next(&arena->runs_dirty, rd_link),
chunkselm = qr_next(&arena->chunks_cache, cc_link);
rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) {
size_t npages;
if (rdelm == &chunkselm->rd) {
npages = extent_node_size_get(chunkselm) >> LG_PAGE;
chunkselm = qr_next(chunkselm, cc_link);
} else {
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
rdelm);
arena_chunk_map_misc_t *miscelm =
arena_rd_to_miscelm(rdelm);
size_t pageind = arena_miscelm_to_pageind(miscelm);
assert(arena_mapbits_allocated_get(chunk, pageind) ==
0);
assert(arena_mapbits_large_get(chunk, pageind) == 0);
assert(arena_mapbits_dirty_get(chunk, pageind) != 0);
npages = arena_mapbits_unallocated_size_get(chunk,
pageind) >> LG_PAGE;
}
ndirty += npages;
}
return (ndirty);
}
static size_t
arena_compute_npurge(arena_t *arena, bool all)
{
size_t npurge;
/* /*
* Compute the minimum number of pages that this thread should try to * No allocations have occurred since arena_reset() was called.
* purge. * Furthermore, the caller (arena_i_destroy_ctl()) purged all cached
* extents, so only retained extents may remain.
*/ */
if (!all) { assert(extents_npages_get(&arena->extents_dirty) == 0);
size_t threshold = (arena->nactive >> arena->lg_dirty_mult); assert(extents_npages_get(&arena->extents_muzzy) == 0);
threshold = threshold < chunk_npages ? chunk_npages : threshold;
npurge = arena->ndirty - threshold;
} else
npurge = arena->ndirty;
return (npurge);
}
static size_t
arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all,
size_t npurge, arena_runs_dirty_link_t *purge_runs_sentinel,
extent_node_t *purge_chunks_sentinel)
{
arena_runs_dirty_link_t *rdelm, *rdelm_next;
extent_node_t *chunkselm;
size_t nstashed = 0;
/* Stash at least npurge pages. */
for (rdelm = qr_next(&arena->runs_dirty, rd_link),
chunkselm = qr_next(&arena->chunks_cache, cc_link);
rdelm != &arena->runs_dirty; rdelm = rdelm_next) {
size_t npages;
rdelm_next = qr_next(rdelm, rd_link);
if (rdelm == &chunkselm->rd) {
extent_node_t *chunkselm_next;
bool zero;
UNUSED void *chunk;
chunkselm_next = qr_next(chunkselm, cc_link);
/*
* Allocate. chunkselm remains valid due to the
* dalloc_node=false argument to chunk_alloc_cache().
*/
zero = false;
chunk = chunk_alloc_cache(arena, chunk_hooks,
extent_node_addr_get(chunkselm),
extent_node_size_get(chunkselm), chunksize, &zero,
false);
assert(chunk == extent_node_addr_get(chunkselm));
assert(zero == extent_node_zeroed_get(chunkselm));
extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
purge_chunks_sentinel);
npages = extent_node_size_get(chunkselm) >> LG_PAGE;
chunkselm = chunkselm_next;
} else {
arena_chunk_t *chunk =
(arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
arena_chunk_map_misc_t *miscelm =
arena_rd_to_miscelm(rdelm);
size_t pageind = arena_miscelm_to_pageind(miscelm);
arena_run_t *run = &miscelm->run;
size_t run_size =
arena_mapbits_unallocated_size_get(chunk, pageind);
npages = run_size >> LG_PAGE;
assert(pageind + npages <= chunk_npages);
assert(arena_mapbits_dirty_get(chunk, pageind) ==
arena_mapbits_dirty_get(chunk, pageind+npages-1));
/*
* If purging the spare chunk's run, make it available
* prior to allocation.
*/
if (chunk == arena->spare)
arena_chunk_alloc(arena);
/* Temporarily allocate the free dirty run. */
arena_run_split_large(arena, run, run_size, false);
/* Stash. */
if (false)
qr_new(rdelm, rd_link); /* Redundant. */
else {
assert(qr_next(rdelm, rd_link) == rdelm);
assert(qr_prev(rdelm, rd_link) == rdelm);
}
qr_meld(purge_runs_sentinel, rdelm, rd_link);
}
nstashed += npages;
if (!all && nstashed >= npurge)
break;
}
return (nstashed);
}
static size_t
arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks,
arena_runs_dirty_link_t *purge_runs_sentinel,
extent_node_t *purge_chunks_sentinel)
{
size_t npurged, nmadvise;
arena_runs_dirty_link_t *rdelm;
extent_node_t *chunkselm;
if (config_stats)
nmadvise = 0;
npurged = 0;
malloc_mutex_unlock(&arena->lock);
for (rdelm = qr_next(purge_runs_sentinel, rd_link),
chunkselm = qr_next(purge_chunks_sentinel, cc_link);
rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) {
size_t npages;
if (rdelm == &chunkselm->rd) {
/*
* Don't actually purge the chunk here because 1)
* chunkselm is embedded in the chunk and must remain
* valid, and 2) we deallocate the chunk in
* arena_unstash_purged(), where it is destroyed,
* decommitted, or purged, depending on chunk
* deallocation policy.
*/
size_t size = extent_node_size_get(chunkselm);
npages = size >> LG_PAGE;
chunkselm = qr_next(chunkselm, cc_link);
} else {
size_t pageind, run_size, flag_unzeroed, flags, i;
bool decommitted;
arena_chunk_t *chunk =
(arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
arena_chunk_map_misc_t *miscelm =
arena_rd_to_miscelm(rdelm);
pageind = arena_miscelm_to_pageind(miscelm);
run_size = arena_mapbits_large_size_get(chunk, pageind);
npages = run_size >> LG_PAGE;
assert(pageind + npages <= chunk_npages);
assert(!arena_mapbits_decommitted_get(chunk, pageind));
assert(!arena_mapbits_decommitted_get(chunk,
pageind+npages-1));
decommitted = !chunk_hooks->decommit(chunk, chunksize,
pageind << LG_PAGE, npages << LG_PAGE, arena->ind);
if (decommitted) {
flag_unzeroed = 0;
flags = CHUNK_MAP_DECOMMITTED;
} else {
flag_unzeroed = chunk_purge_wrapper(arena,
chunk_hooks, chunk, chunksize, pageind <<
LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0;
flags = flag_unzeroed;
}
arena_mapbits_large_set(chunk, pageind+npages-1, 0,
flags);
arena_mapbits_large_set(chunk, pageind, run_size,
flags);
/*
* Set the unzeroed flag for internal pages, now that
* chunk_purge_wrapper() has returned whether the pages
* were zeroed as a side effect of purging. This chunk
* map modification is safe even though the arena mutex
* isn't currently owned by this thread, because the run
* is marked as allocated, thus protecting it from being
* modified by any other thread. As long as these
* writes don't perturb the first and last elements'
* CHUNK_MAP_ALLOCATED bits, behavior is well defined.
*/
for (i = 1; i < npages-1; i++) {
arena_mapbits_internal_set(chunk, pageind+i,
flag_unzeroed);
}
}
npurged += npages;
if (config_stats)
nmadvise++;
}
malloc_mutex_lock(&arena->lock);
if (config_stats) {
arena->stats.nmadvise += nmadvise;
arena->stats.purged += npurged;
}
return (npurged);
}
static void
arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks,
arena_runs_dirty_link_t *purge_runs_sentinel,
extent_node_t *purge_chunks_sentinel)
{
arena_runs_dirty_link_t *rdelm, *rdelm_next;
extent_node_t *chunkselm;
/* Deallocate chunks/runs. */
for (rdelm = qr_next(purge_runs_sentinel, rd_link),
chunkselm = qr_next(purge_chunks_sentinel, cc_link);
rdelm != purge_runs_sentinel; rdelm = rdelm_next) {
rdelm_next = qr_next(rdelm, rd_link);
if (rdelm == &chunkselm->rd) {
extent_node_t *chunkselm_next = qr_next(chunkselm,
cc_link);
void *addr = extent_node_addr_get(chunkselm);
size_t size = extent_node_size_get(chunkselm);
bool zeroed = extent_node_zeroed_get(chunkselm);
bool committed = extent_node_committed_get(chunkselm);
extent_node_dirty_remove(chunkselm);
arena_node_dalloc(arena, chunkselm);
chunkselm = chunkselm_next;
chunk_dalloc_arena(arena, chunk_hooks, addr, size,
zeroed, committed);
} else {
arena_chunk_t *chunk =
(arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
arena_chunk_map_misc_t *miscelm =
arena_rd_to_miscelm(rdelm);
size_t pageind = arena_miscelm_to_pageind(miscelm);
bool decommitted = (arena_mapbits_decommitted_get(chunk,
pageind) != 0);
arena_run_t *run = &miscelm->run;
qr_remove(rdelm, rd_link);
arena_run_dalloc(arena, run, false, true, decommitted);
}
}
}
static void
arena_purge(arena_t *arena, bool all)
{
chunk_hooks_t chunk_hooks = chunk_hooks_get(arena);
size_t npurge, npurgeable, npurged;
arena_runs_dirty_link_t purge_runs_sentinel;
extent_node_t purge_chunks_sentinel;
arena->purging = true;
/*
* Calls to arena_dirty_count() are disabled even for debug builds
* because overhead grows nonlinearly as memory usage increases.
*/
if (false && config_debug) {
size_t ndirty = arena_dirty_count(arena);
assert(ndirty == arena->ndirty);
}
assert((arena->nactive >> arena->lg_dirty_mult) < arena->ndirty || all);
if (config_stats)
arena->stats.npurge++;
npurge = arena_compute_npurge(arena, all);
qr_new(&purge_runs_sentinel, rd_link);
extent_node_dirty_linkage_init(&purge_chunks_sentinel);
npurgeable = arena_stash_dirty(arena, &chunk_hooks, all, npurge,
&purge_runs_sentinel, &purge_chunks_sentinel);
assert(npurgeable >= npurge);
npurged = arena_purge_stashed(arena, &chunk_hooks, &purge_runs_sentinel,
&purge_chunks_sentinel);
assert(npurged == npurgeable);
arena_unstash_purged(arena, &chunk_hooks, &purge_runs_sentinel,
&purge_chunks_sentinel);
arena->purging = false;
}
void
arena_purge_all(arena_t *arena)
{
malloc_mutex_lock(&arena->lock);
arena_purge(arena, true);
malloc_mutex_unlock(&arena->lock);
}
static void
arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty,
size_t flag_decommitted)
{
size_t size = *p_size;
size_t run_ind = *p_run_ind;
size_t run_pages = *p_run_pages;
/* Try to coalesce forward. */
if (run_ind + run_pages < chunk_npages &&
arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty &&
arena_mapbits_decommitted_get(chunk, run_ind+run_pages) ==
flag_decommitted) {
size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
run_ind+run_pages);
size_t nrun_pages = nrun_size >> LG_PAGE;
/*
* Remove successor from runs_avail; the coalesced run is
* inserted later.
*/
assert(arena_mapbits_unallocated_size_get(chunk,
run_ind+run_pages+nrun_pages-1) == nrun_size);
assert(arena_mapbits_dirty_get(chunk,
run_ind+run_pages+nrun_pages-1) == flag_dirty);
assert(arena_mapbits_decommitted_get(chunk,
run_ind+run_pages+nrun_pages-1) == flag_decommitted);
arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages);
/*
* If the successor is dirty, remove it from the set of dirty
* pages.
*/
if (flag_dirty != 0) {
arena_run_dirty_remove(arena, chunk, run_ind+run_pages,
nrun_pages);
}
size += nrun_size; /* Deallocate retained memory. */
run_pages += nrun_pages; arena_destroy_retained(tsd_tsdn(tsd), arena);
arena_mapbits_unallocated_size_set(chunk, run_ind, size);
arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
size);
}
/* Try to coalesce backward. */
if (run_ind > map_bias && arena_mapbits_allocated_get(chunk,
run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) ==
flag_dirty && arena_mapbits_decommitted_get(chunk, run_ind-1) ==
flag_decommitted) {
size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
run_ind-1);
size_t prun_pages = prun_size >> LG_PAGE;
run_ind -= prun_pages;
/*
* Remove predecessor from runs_avail; the coalesced run is
* inserted later.
*/
assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
prun_size);
assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
flag_decommitted);
arena_avail_remove(arena, chunk, run_ind, prun_pages);
/*
* If the predecessor is dirty, remove it from the set of dirty
* pages.
*/
if (flag_dirty != 0) {
arena_run_dirty_remove(arena, chunk, run_ind,
prun_pages);
}
size += prun_size;
run_pages += prun_pages;
arena_mapbits_unallocated_size_set(chunk, run_ind, size);
arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
size);
}
*p_size = size;
*p_run_ind = run_ind;
*p_run_pages = run_pages;
}
static size_t
arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
size_t run_ind)
{
size_t size;
assert(run_ind >= map_bias);
assert(run_ind < chunk_npages);
if (arena_mapbits_large_get(chunk, run_ind) != 0) {
size = arena_mapbits_large_size_get(chunk, run_ind);
assert(size == PAGE || arena_mapbits_large_size_get(chunk,
run_ind+(size>>LG_PAGE)-1) == 0);
} else {
arena_bin_info_t *bin_info = &arena_bin_info[run->binind];
size = bin_info->run_size;
}
return (size);
}
static bool
arena_run_decommit(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run)
{
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
size_t run_ind = arena_miscelm_to_pageind(miscelm);
size_t offset = run_ind << LG_PAGE;
size_t length = arena_run_size_get(arena, chunk, run, run_ind);
return (arena->chunk_hooks.decommit(chunk, chunksize, offset, length,
arena->ind));
}
static void
arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned,
bool decommitted)
{
arena_chunk_t *chunk;
arena_chunk_map_misc_t *miscelm;
size_t size, run_ind, run_pages, flag_dirty, flag_decommitted;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
miscelm = arena_run_to_miscelm(run);
run_ind = arena_miscelm_to_pageind(miscelm);
assert(run_ind >= map_bias);
assert(run_ind < chunk_npages);
size = arena_run_size_get(arena, chunk, run, run_ind);
run_pages = (size >> LG_PAGE);
arena_cactive_update(arena, 0, run_pages);
arena->nactive -= run_pages;
/* /*
* The run is dirty if the caller claims to have dirtied it, as well as * Remove the arena pointer from the arenas array. We rely on the fact
* if it was already dirty before being allocated and the caller * that there is no way for the application to get a dirty read from the
* doesn't claim to have cleaned it. * arenas array unless there is an inherent race in the application
* involving access of an arena being concurrently destroyed. The
* application must synchronize knowledge of the arena's validity, so as
* long as we use an atomic write to update the arenas array, the
* application will get a clean read any time after it synchronizes
* knowledge that the arena is no longer valid.
*/ */
assert(arena_mapbits_dirty_get(chunk, run_ind) == arena_set(base_ind_get(arena->base), NULL);
arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
if (!cleaned && !decommitted && arena_mapbits_dirty_get(chunk, run_ind)
!= 0)
dirty = true;
flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
flag_decommitted = decommitted ? CHUNK_MAP_DECOMMITTED : 0;
/* Mark pages as unallocated in the chunk map. */
if (dirty || decommitted) {
size_t flags = flag_dirty | flag_decommitted;
arena_mapbits_unallocated_set(chunk, run_ind, size, flags);
arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
flags);
} else {
arena_mapbits_unallocated_set(chunk, run_ind, size,
arena_mapbits_unzeroed_get(chunk, run_ind));
arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
}
arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages,
flag_dirty, flag_decommitted);
/* Insert into runs_avail, now that coalescing is complete. */
assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
assert(arena_mapbits_dirty_get(chunk, run_ind) ==
arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
arena_mapbits_decommitted_get(chunk, run_ind+run_pages-1));
arena_avail_insert(arena, chunk, run_ind, run_pages);
if (dirty)
arena_run_dirty_insert(arena, chunk, run_ind, run_pages);
/* Deallocate chunk if it is now completely unused. */
if (size == arena_maxrun) {
assert(run_ind == map_bias);
assert(run_pages == (arena_maxrun >> LG_PAGE));
arena_chunk_dalloc(arena, chunk);
}
/* /*
* It is okay to do dirty page processing here even if the chunk was * Destroy the base allocator, which manages all metadata ever mapped by
* deallocated above, since in that case it is the spare. Waiting * this arena.
* until after possible chunk deallocation to do dirty processing
* allows for an old spare to be fully deallocated, thus decreasing the
* chances of spuriously crossing the dirty page purging threshold.
*/ */
if (dirty) base_delete(tsd_tsdn(tsd), arena->base);
arena_maybe_purge(arena);
} }
static void static extent_t *
arena_run_dalloc_decommit(arena_t *arena, arena_chunk_t *chunk, arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena,
arena_run_t *run) extent_hooks_t **r_extent_hooks, const bin_info_t *bin_info,
{ szind_t szind) {
bool committed = arena_run_decommit(arena, chunk, run); extent_t *slab;
bool zero, commit;
arena_run_dalloc(arena, run, committed, false, !committed);
}
static void witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, WITNESS_RANK_CORE, 0);
size_t oldsize, size_t newsize)
{
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
size_t pageind = arena_miscelm_to_pageind(miscelm);
size_t head_npages = (oldsize - newsize) >> LG_PAGE;
size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
CHUNK_MAP_UNZEROED : 0;
assert(oldsize > newsize);
/* zero = false;
* Update the chunk map so that arena_run_dalloc() can treat the commit = true;
* leading run as separately allocated. Set the last element of each slab = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL,
* run first, in case of single-page runs. bin_info->slab_size, 0, PAGE, true, szind, &zero, &commit);
*/
assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
(flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
pageind+head_npages-1)));
arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty |
(flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
if (config_debug) { if (config_stats && slab != NULL) {
UNUSED size_t tail_npages = newsize >> LG_PAGE; arena_stats_mapped_add(tsdn, &arena->stats,
assert(arena_mapbits_large_size_get(chunk, bin_info->slab_size);
pageind+head_npages+tail_npages-1) == 0);
assert(arena_mapbits_dirty_get(chunk,
pageind+head_npages+tail_npages-1) == flag_dirty);
} }
arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
pageind+head_npages)));
arena_run_dalloc(arena, run, false, false, (flag_decommitted != 0)); return slab;
} }
static void static extent_t *
arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
size_t oldsize, size_t newsize, bool dirty) const bin_info_t *bin_info) {
{ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); WITNESS_RANK_CORE, 0);
size_t pageind = arena_miscelm_to_pageind(miscelm);
size_t head_npages = newsize >> LG_PAGE;
size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
CHUNK_MAP_UNZEROED : 0;
arena_chunk_map_misc_t *tail_miscelm;
arena_run_t *tail_run;
assert(oldsize > newsize);
/* extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
* Update the chunk map so that arena_run_dalloc() can treat the szind_t szind = sz_size2index(bin_info->reg_size);
* trailing run as separately allocated. Set the last element of each bool zero = false;
* run first, in case of single-page runs. bool commit = true;
*/ extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks,
assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); &arena->extents_dirty, NULL, bin_info->slab_size, 0, PAGE, true,
arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty | binind, &zero, &commit);
(flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, if (slab == NULL) {
pageind+head_npages-1))); slab = extents_alloc(tsdn, arena, &extent_hooks,
arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty | &arena->extents_muzzy, NULL, bin_info->slab_size, 0, PAGE,
(flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind))); true, binind, &zero, &commit);
}
if (config_debug) { if (slab == NULL) {
UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE; slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks,
assert(arena_mapbits_large_size_get(chunk, bin_info, szind);
pageind+head_npages+tail_npages-1) == 0); if (slab == NULL) {
assert(arena_mapbits_dirty_get(chunk, return NULL;
pageind+head_npages+tail_npages-1) == flag_dirty); }
} }
arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize, assert(extent_slab_get(slab));
flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
pageind+head_npages)));
tail_miscelm = arena_miscelm_get(chunk, pageind + head_npages);
tail_run = &tail_miscelm->run;
arena_run_dalloc(arena, tail_run, dirty, false, (flag_decommitted !=
0));
}
static arena_run_t *
arena_bin_runs_first(arena_bin_t *bin)
{
arena_chunk_map_misc_t *miscelm = arena_run_tree_first(&bin->runs);
if (miscelm != NULL)
return (&miscelm->run);
return (NULL);
}
static void /* Initialize slab internals. */
arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run) arena_slab_data_t *slab_data = extent_slab_data_get(slab);
{ extent_nfree_set(slab, bin_info->nregs);
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false);
assert(arena_run_tree_search(&bin->runs, miscelm) == NULL); arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE);
arena_run_tree_insert(&bin->runs, miscelm); return slab;
} }
static void static extent_t *
arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run) arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
{ szind_t binind) {
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); extent_t *slab;
const bin_info_t *bin_info;
assert(arena_run_tree_search(&bin->runs, miscelm) != NULL);
arena_run_tree_remove(&bin->runs, miscelm); /* Look for a usable slab. */
} slab = arena_bin_slabs_nonfull_tryget(bin);
if (slab != NULL) {
static arena_run_t * return slab;
arena_bin_nonfull_run_tryget(arena_bin_t *bin)
{
arena_run_t *run = arena_bin_runs_first(bin);
if (run != NULL) {
arena_bin_runs_remove(bin, run);
if (config_stats)
bin->stats.reruns++;
} }
return (run); /* No existing slabs have any space available. */
}
static arena_run_t * bin_info = &bin_infos[binind];
arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
{
arena_run_t *run;
szind_t binind;
arena_bin_info_t *bin_info;
/* Look for a usable run. */ /* Allocate a new slab. */
run = arena_bin_nonfull_run_tryget(bin); malloc_mutex_unlock(tsdn, &bin->lock);
if (run != NULL)
return (run);
/* No existing runs have any space available. */
binind = arena_bin_index(arena, bin);
bin_info = &arena_bin_info[binind];
/* Allocate a new run. */
malloc_mutex_unlock(&bin->lock);
/******************************/ /******************************/
malloc_mutex_lock(&arena->lock); slab = arena_slab_alloc(tsdn, arena, binind, bin_info);
run = arena_run_alloc_small(arena, bin_info->run_size, binind);
if (run != NULL) {
/* Initialize run internals. */
run->binind = binind;
run->nfree = bin_info->nregs;
bitmap_init(run->bitmap, &bin_info->bitmap_info);
}
malloc_mutex_unlock(&arena->lock);
/********************************/ /********************************/
malloc_mutex_lock(&bin->lock); malloc_mutex_lock(tsdn, &bin->lock);
if (run != NULL) { if (slab != NULL) {
if (config_stats) { if (config_stats) {
bin->stats.nruns++; bin->stats.nslabs++;
bin->stats.curruns++; bin->stats.curslabs++;
} }
return (run); return slab;
} }
/* /*
* arena_run_alloc_small() failed, but another thread may have made * arena_slab_alloc() failed, but another thread may have made
* sufficient memory available while this one dropped bin->lock above, * sufficient memory available while this one dropped bin->lock above,
* so search one more time. * so search one more time.
*/ */
run = arena_bin_nonfull_run_tryget(bin); slab = arena_bin_slabs_nonfull_tryget(bin);
if (run != NULL) if (slab != NULL) {
return (run); return slab;
}
return (NULL); return NULL;
} }
/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */ /* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */
static void * static void *
arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin) arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
{ szind_t binind) {
szind_t binind; const bin_info_t *bin_info;
arena_bin_info_t *bin_info; extent_t *slab;
arena_run_t *run;
bin_info = &bin_infos[binind];
binind = arena_bin_index(arena, bin); if (!arena_is_auto(arena) && bin->slabcur != NULL) {
bin_info = &arena_bin_info[binind]; arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
bin->runcur = NULL; bin->slabcur = NULL;
run = arena_bin_nonfull_run_get(arena, bin); }
if (bin->runcur != NULL && bin->runcur->nfree > 0) { slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind);
if (bin->slabcur != NULL) {
/* /*
* Another thread updated runcur while this one ran without the * Another thread updated slabcur while this one ran without the
* bin lock in arena_bin_nonfull_run_get(). * bin lock in arena_bin_nonfull_slab_get().
*/ */
void *ret; if (extent_nfree_get(bin->slabcur) > 0) {
assert(bin->runcur->nfree > 0); void *ret = arena_slab_reg_alloc(bin->slabcur,
ret = arena_run_reg_alloc(bin->runcur, bin_info); bin_info);
if (run != NULL) { if (slab != NULL) {
arena_chunk_t *chunk; /*
* arena_slab_alloc() may have allocated slab,
/* * or it may have been pulled from
* arena_run_alloc_small() may have allocated run, or * slabs_nonfull. Therefore it is unsafe to
* it may have pulled run from the bin's run tree. * make any assumptions about how slab has
* Therefore it is unsafe to make any assumptions about * previously been used, and
* how run has previously been used, and * arena_bin_lower_slab() must be called, as if
* arena_bin_lower_run() must be called, as if a region * a region were just deallocated from the slab.
* were just deallocated from the run. */
*/ if (extent_nfree_get(slab) == bin_info->nregs) {
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); arena_dalloc_bin_slab(tsdn, arena, slab,
if (run->nfree == bin_info->nregs) bin);
arena_dalloc_bin_run(arena, chunk, run, bin); } else {
else arena_bin_lower_slab(tsdn, arena, slab,
arena_bin_lower_run(arena, chunk, run, bin); bin);
}
}
return ret;
} }
return (ret);
}
if (run == NULL) arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
return (NULL); bin->slabcur = NULL;
}
bin->runcur = run; if (slab == NULL) {
return NULL;
}
bin->slabcur = slab;
assert(bin->runcur->nfree > 0); assert(extent_nfree_get(bin->slabcur) > 0);
return (arena_run_reg_alloc(bin->runcur, bin_info)); return arena_slab_reg_alloc(slab, bin_info);
} }
void void
arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind, arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
uint64_t prof_accumbytes) cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) {
{
unsigned i, nfill; unsigned i, nfill;
arena_bin_t *bin; bin_t *bin;
assert(tbin->ncached == 0); assert(tbin->ncached == 0);
if (config_prof && arena_prof_accum(arena, prof_accumbytes)) if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) {
prof_idump(); prof_idump(tsdn);
}
bin = &arena->bins[binind]; bin = &arena->bins[binind];
malloc_mutex_lock(&bin->lock); malloc_mutex_lock(tsdn, &bin->lock);
for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
tbin->lg_fill_div); i < nfill; i++) { tcache->lg_fill_div[binind]); i < nfill; i++) {
arena_run_t *run; extent_t *slab;
void *ptr; void *ptr;
if ((run = bin->runcur) != NULL && run->nfree > 0) if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) >
ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]); 0) {
else ptr = arena_slab_reg_alloc(slab, &bin_infos[binind]);
ptr = arena_bin_malloc_hard(arena, bin); } else {
ptr = arena_bin_malloc_hard(tsdn, arena, bin, binind);
}
if (ptr == NULL) { if (ptr == NULL) {
/* /*
* OOM. tbin->avail isn't yet filled down to its first * OOM. tbin->avail isn't yet filled down to its first
* element, so the successful allocations (if any) must * element, so the successful allocations (if any) must
* be moved to the base of tbin->avail before bailing * be moved just before tbin->avail before bailing out.
* out.
*/ */
if (i > 0) { if (i > 0) {
memmove(tbin->avail, &tbin->avail[nfill - i], memmove(tbin->avail - i, tbin->avail - nfill,
i * sizeof(void *)); i * sizeof(void *));
} }
break; break;
} }
if (config_fill && unlikely(opt_junk_alloc)) { if (config_fill && unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ptr, &arena_bin_info[binind], arena_alloc_junk_small(ptr, &bin_infos[binind], true);
true);
} }
/* Insert such that low regions get used first. */ /* Insert such that low regions get used first. */
tbin->avail[nfill - 1 - i] = ptr; *(tbin->avail - nfill + i) = ptr;
} }
if (config_stats) { if (config_stats) {
bin->stats.nmalloc += i; bin->stats.nmalloc += i;
...@@ -2033,139 +1294,46 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind, ...@@ -2033,139 +1294,46 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind,
bin->stats.nfills++; bin->stats.nfills++;
tbin->tstats.nrequests = 0; tbin->tstats.nrequests = 0;
} }
malloc_mutex_unlock(&bin->lock); malloc_mutex_unlock(tsdn, &bin->lock);
tbin->ncached = i; tbin->ncached = i;
arena_decay_tick(tsdn, arena);
} }
void void
arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero) arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info, bool zero) {
{ if (!zero) {
memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size);
if (zero) {
size_t redzone_size = bin_info->redzone_size;
memset((void *)((uintptr_t)ptr - redzone_size), 0xa5,
redzone_size);
memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5,
redzone_size);
} else {
memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5,
bin_info->reg_interval);
} }
} }
#ifdef JEMALLOC_JET
#undef arena_redzone_corruption
#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl)
#endif
static void
arena_redzone_corruption(void *ptr, size_t usize, bool after,
size_t offset, uint8_t byte)
{
malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p "
"(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s",
after ? "after" : "before", ptr, usize, byte);
}
#ifdef JEMALLOC_JET
#undef arena_redzone_corruption
#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
arena_redzone_corruption_t *arena_redzone_corruption =
JEMALLOC_N(arena_redzone_corruption_impl);
#endif
static void static void
arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset) arena_dalloc_junk_small_impl(void *ptr, const bin_info_t *bin_info) {
{ memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size);
bool error = false;
if (opt_junk_alloc) {
size_t size = bin_info->reg_size;
size_t redzone_size = bin_info->redzone_size;
size_t i;
for (i = 1; i <= redzone_size; i++) {
uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
if (*byte != 0xa5) {
error = true;
arena_redzone_corruption(ptr, size, false, i,
*byte);
if (reset)
*byte = 0xa5;
}
}
for (i = 0; i < redzone_size; i++) {
uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
if (*byte != 0xa5) {
error = true;
arena_redzone_corruption(ptr, size, true, i,
*byte);
if (reset)
*byte = 0xa5;
}
}
}
if (opt_abort && error)
abort();
}
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_small
#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl)
#endif
void
arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
{
size_t redzone_size = bin_info->redzone_size;
arena_redzones_validate(ptr, bin_info, false);
memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
bin_info->reg_interval);
}
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_small
#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
arena_dalloc_junk_small_t *arena_dalloc_junk_small =
JEMALLOC_N(arena_dalloc_junk_small_impl);
#endif
void
arena_quarantine_junk_small(void *ptr, size_t usize)
{
szind_t binind;
arena_bin_info_t *bin_info;
cassert(config_fill);
assert(opt_junk_free);
assert(opt_quarantine);
assert(usize <= SMALL_MAXCLASS);
binind = size2index(usize);
bin_info = &arena_bin_info[binind];
arena_redzones_validate(ptr, bin_info, true);
} }
arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small =
arena_dalloc_junk_small_impl;
void * static void *
arena_malloc_small(arena_t *arena, size_t size, bool zero) arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
{
void *ret; void *ret;
arena_bin_t *bin; bin_t *bin;
arena_run_t *run; size_t usize;
szind_t binind; extent_t *slab;
binind = size2index(size);
assert(binind < NBINS); assert(binind < NBINS);
bin = &arena->bins[binind]; bin = &arena->bins[binind];
size = index2size(binind); usize = sz_index2size(binind);
malloc_mutex_lock(&bin->lock); malloc_mutex_lock(tsdn, &bin->lock);
if ((run = bin->runcur) != NULL && run->nfree > 0) if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) {
ret = arena_run_reg_alloc(run, &arena_bin_info[binind]); ret = arena_slab_reg_alloc(slab, &bin_infos[binind]);
else } else {
ret = arena_bin_malloc_hard(arena, bin); ret = arena_bin_malloc_hard(tsdn, arena, bin, binind);
}
if (ret == NULL) { if (ret == NULL) {
malloc_mutex_unlock(&bin->lock); malloc_mutex_unlock(tsdn, &bin->lock);
return (NULL); return NULL;
} }
if (config_stats) { if (config_stats) {
...@@ -2173,864 +1341,465 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero) ...@@ -2173,864 +1341,465 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
bin->stats.nrequests++; bin->stats.nrequests++;
bin->stats.curregs++; bin->stats.curregs++;
} }
malloc_mutex_unlock(&bin->lock); malloc_mutex_unlock(tsdn, &bin->lock);
if (config_prof && !isthreaded && arena_prof_accum(arena, size)) if (config_prof && arena_prof_accum(tsdn, arena, usize)) {
prof_idump(); prof_idump(tsdn);
}
if (!zero) { if (!zero) {
if (config_fill) { if (config_fill) {
if (unlikely(opt_junk_alloc)) { if (unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret, arena_alloc_junk_small(ret,
&arena_bin_info[binind], false); &bin_infos[binind], false);
} else if (unlikely(opt_zero)) } else if (unlikely(opt_zero)) {
memset(ret, 0, size);
}
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
} else {
if (config_fill && unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret, &arena_bin_info[binind],
true);
}
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
}
return (ret);
}
void *
arena_malloc_large(arena_t *arena, size_t size, bool zero)
{
void *ret;
size_t usize;
uintptr_t random_offset;
arena_run_t *run;
arena_chunk_map_misc_t *miscelm;
UNUSED bool idump;
/* Large allocation. */
usize = s2u(size);
malloc_mutex_lock(&arena->lock);
if (config_cache_oblivious) {
uint64_t r;
/*
* Compute a uniformly distributed offset within the first page
* that is a multiple of the cacheline size, e.g. [0 .. 63) * 64
* for 4 KiB pages and 64-byte cachelines.
*/
prng64(r, LG_PAGE - LG_CACHELINE, arena->offset_state,
UINT64_C(6364136223846793009),
UINT64_C(1442695040888963409));
random_offset = ((uintptr_t)r) << LG_CACHELINE;
} else
random_offset = 0;
run = arena_run_alloc_large(arena, usize + large_pad, zero);
if (run == NULL) {
malloc_mutex_unlock(&arena->lock);
return (NULL);
}
miscelm = arena_run_to_miscelm(run);
ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) +
random_offset);
if (config_stats) {
szind_t index = size2index(usize) - NBINS;
arena->stats.nmalloc_large++;
arena->stats.nrequests_large++;
arena->stats.allocated_large += usize;
arena->stats.lstats[index].nmalloc++;
arena->stats.lstats[index].nrequests++;
arena->stats.lstats[index].curruns++;
}
if (config_prof)
idump = arena_prof_accum_locked(arena, usize);
malloc_mutex_unlock(&arena->lock);
if (config_prof && idump)
prof_idump();
if (!zero) {
if (config_fill) {
if (unlikely(opt_junk_alloc))
memset(ret, 0xa5, usize);
else if (unlikely(opt_zero))
memset(ret, 0, usize); memset(ret, 0, usize);
}
} }
}
return (ret);
}
/* Only handles large allocations that require more than page alignment. */
static void *
arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
bool zero)
{
void *ret;
size_t alloc_size, leadsize, trailsize;
arena_run_t *run;
arena_chunk_t *chunk;
arena_chunk_map_misc_t *miscelm;
void *rpages;
assert(usize == PAGE_CEILING(usize));
arena = arena_choose(tsd, arena);
if (unlikely(arena == NULL))
return (NULL);
alignment = PAGE_CEILING(alignment);
alloc_size = usize + large_pad + alignment - PAGE;
malloc_mutex_lock(&arena->lock);
run = arena_run_alloc_large(arena, alloc_size, false);
if (run == NULL) {
malloc_mutex_unlock(&arena->lock);
return (NULL);
}
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
miscelm = arena_run_to_miscelm(run);
rpages = arena_miscelm_to_rpages(miscelm);
leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) -
(uintptr_t)rpages;
assert(alloc_size >= leadsize + usize);
trailsize = alloc_size - leadsize - usize - large_pad;
if (leadsize != 0) {
arena_chunk_map_misc_t *head_miscelm = miscelm;
arena_run_t *head_run = run;
miscelm = arena_miscelm_get(chunk,
arena_miscelm_to_pageind(head_miscelm) + (leadsize >>
LG_PAGE));
run = &miscelm->run;
arena_run_trim_head(arena, chunk, head_run, alloc_size,
alloc_size - leadsize);
}
if (trailsize != 0) {
arena_run_trim_tail(arena, chunk, run, usize + large_pad +
trailsize, usize + large_pad, false);
}
if (arena_run_init_large(arena, run, usize + large_pad, zero)) {
size_t run_ind =
arena_miscelm_to_pageind(arena_run_to_miscelm(run));
bool dirty = (arena_mapbits_dirty_get(chunk, run_ind) != 0);
bool decommitted = (arena_mapbits_decommitted_get(chunk,
run_ind) != 0);
assert(decommitted); /* Cause of OOM. */
arena_run_dalloc(arena, run, dirty, false, decommitted);
malloc_mutex_unlock(&arena->lock);
return (NULL);
}
ret = arena_miscelm_to_rpages(miscelm);
if (config_stats) {
szind_t index = size2index(usize) - NBINS;
arena->stats.nmalloc_large++;
arena->stats.nrequests_large++;
arena->stats.allocated_large += usize;
arena->stats.lstats[index].nmalloc++;
arena->stats.lstats[index].nrequests++;
arena->stats.lstats[index].curruns++;
}
malloc_mutex_unlock(&arena->lock);
if (config_fill && !zero) {
if (unlikely(opt_junk_alloc))
memset(ret, 0xa5, usize);
else if (unlikely(opt_zero))
memset(ret, 0, usize);
}
return (ret);
}
void *
arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
bool zero, tcache_t *tcache)
{
void *ret;
if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
&& (usize & PAGE_MASK) == 0))) {
/* Small; alignment doesn't require special run placement. */
ret = arena_malloc(tsd, arena, usize, zero, tcache);
} else if (usize <= large_maxclass && alignment <= PAGE) {
/*
* Large; alignment doesn't require special run placement.
* However, the cached pointer may be at a random offset from
* the base of the run, so do some bit manipulation to retrieve
* the base.
*/
ret = arena_malloc(tsd, arena, usize, zero, tcache);
if (config_cache_oblivious)
ret = (void *)((uintptr_t)ret & ~PAGE_MASK);
} else { } else {
if (likely(usize <= large_maxclass)) { if (config_fill && unlikely(opt_junk_alloc)) {
ret = arena_palloc_large(tsd, arena, usize, alignment, arena_alloc_junk_small(ret, &bin_infos[binind],
zero); true);
} else if (likely(alignment <= chunksize)) }
ret = huge_malloc(tsd, arena, usize, zero, tcache); memset(ret, 0, usize);
else { }
ret = huge_palloc(tsd, arena, usize, alignment, zero,
tcache);
}
}
return (ret);
}
void
arena_prof_promoted(const void *ptr, size_t size)
{
arena_chunk_t *chunk;
size_t pageind;
szind_t binind;
cassert(config_prof);
assert(ptr != NULL);
assert(CHUNK_ADDR2BASE(ptr) != ptr);
assert(isalloc(ptr, false) == LARGE_MINCLASS);
assert(isalloc(ptr, true) == LARGE_MINCLASS);
assert(size <= SMALL_MAXCLASS);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
binind = size2index(size);
assert(binind < NBINS);
arena_mapbits_large_binind_set(chunk, pageind, binind);
assert(isalloc(ptr, false) == LARGE_MINCLASS);
assert(isalloc(ptr, true) == size);
}
static void
arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
arena_bin_t *bin)
{
/* Dissociate run from bin. */
if (run == bin->runcur)
bin->runcur = NULL;
else {
szind_t binind = arena_bin_index(extent_node_arena_get(
&chunk->node), bin);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
if (bin_info->nregs != 1) {
/*
* This block's conditional is necessary because if the
* run only contains one region, then it never gets
* inserted into the non-full runs tree.
*/
arena_bin_runs_remove(bin, run);
}
}
}
static void
arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
arena_bin_t *bin)
{
assert(run != bin->runcur);
assert(arena_run_tree_search(&bin->runs, arena_run_to_miscelm(run)) ==
NULL);
malloc_mutex_unlock(&bin->lock);
/******************************/
malloc_mutex_lock(&arena->lock);
arena_run_dalloc_decommit(arena, chunk, run);
malloc_mutex_unlock(&arena->lock);
/****************************/
malloc_mutex_lock(&bin->lock);
if (config_stats)
bin->stats.curruns--;
}
static void
arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
arena_bin_t *bin)
{
/* arena_decay_tick(tsdn, arena);
* Make sure that if bin->runcur is non-NULL, it refers to the lowest return ret;
* non-full run. It is okay to NULL runcur out rather than proactively
* keeping it pointing at the lowest non-full run.
*/
if ((uintptr_t)run < (uintptr_t)bin->runcur) {
/* Switch runcur. */
if (bin->runcur->nfree > 0)
arena_bin_runs_insert(bin, bin->runcur);
bin->runcur = run;
if (config_stats)
bin->stats.reruns++;
} else
arena_bin_runs_insert(bin, run);
} }
static void void *
arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr, arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
arena_chunk_map_bits_t *bitselm, bool junked) bool zero) {
{ assert(!tsdn_null(tsdn) || arena != NULL);
size_t pageind, rpages_ind;
arena_run_t *run;
arena_bin_t *bin;
arena_bin_info_t *bin_info;
szind_t binind;
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
run = &arena_miscelm_get(chunk, rpages_ind)->run;
binind = run->binind;
bin = &arena->bins[binind];
bin_info = &arena_bin_info[binind];
if (!junked && config_fill && unlikely(opt_junk_free))
arena_dalloc_junk_small(ptr, bin_info);
arena_run_reg_dalloc(run, ptr); if (likely(!tsdn_null(tsdn))) {
if (run->nfree == bin_info->nregs) { arena = arena_choose(tsdn_tsd(tsdn), arena);
arena_dissociate_bin_run(chunk, run, bin); }
arena_dalloc_bin_run(arena, chunk, run, bin); if (unlikely(arena == NULL)) {
} else if (run->nfree == 1 && run != bin->runcur) return NULL;
arena_bin_lower_run(arena, chunk, run, bin); }
if (config_stats) { if (likely(size <= SMALL_MAXCLASS)) {
bin->stats.ndalloc++; return arena_malloc_small(tsdn, arena, ind, zero);
bin->stats.curregs--;
} }
return large_malloc(tsdn, arena, sz_index2size(ind), zero);
} }
void void *
arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr, arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
arena_chunk_map_bits_t *bitselm) bool zero, tcache_t *tcache) {
{ void *ret;
arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, true); if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
&& (usize & PAGE_MASK) == 0))) {
/* Small; alignment doesn't require special slab placement. */
ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize),
zero, tcache, true);
} else {
if (likely(alignment <= CACHELINE)) {
ret = large_malloc(tsdn, arena, usize, zero);
} else {
ret = large_palloc(tsdn, arena, usize, alignment, zero);
}
}
return ret;
} }
void void
arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize) {
size_t pageind, arena_chunk_map_bits_t *bitselm) cassert(config_prof);
{ assert(ptr != NULL);
arena_run_t *run; assert(isalloc(tsdn, ptr) == LARGE_MINCLASS);
arena_bin_t *bin; assert(usize <= SMALL_MAXCLASS);
size_t rpages_ind;
rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); rtree_ctx_t rtree_ctx_fallback;
run = &arena_miscelm_get(chunk, rpages_ind)->run; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
bin = &arena->bins[run->binind];
malloc_mutex_lock(&bin->lock);
arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, false);
malloc_mutex_unlock(&bin->lock);
}
void extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, (uintptr_t)ptr, true);
size_t pageind) arena_t *arena = extent_arena_get(extent);
{
arena_chunk_map_bits_t *bitselm;
if (config_debug) { szind_t szind = sz_size2index(usize);
/* arena_ptr_small_binind_get() does extra sanity checking. */ extent_szind_set(extent, szind);
assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
pageind)) != BININD_INVALID); szind, false);
}
bitselm = arena_bitselm_get(chunk, pageind);
arena_dalloc_bin(arena, chunk, ptr, pageind, bitselm);
}
#ifdef JEMALLOC_JET prof_accum_cancel(tsdn, &arena->prof_accum, usize);
#undef arena_dalloc_junk_large
#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl)
#endif
void
arena_dalloc_junk_large(void *ptr, size_t usize)
{
if (config_fill && unlikely(opt_junk_free)) assert(isalloc(tsdn, ptr) == usize);
memset(ptr, 0x5a, usize);
} }
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_large
#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
arena_dalloc_junk_large_t *arena_dalloc_junk_large =
JEMALLOC_N(arena_dalloc_junk_large_impl);
#endif
static void static size_t
arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk, arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) {
void *ptr, bool junked) cassert(config_prof);
{ assert(ptr != NULL);
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
arena_run_t *run = &miscelm->run;
if (config_fill || config_stats) {
size_t usize = arena_mapbits_large_size_get(chunk, pageind) -
large_pad;
if (!junked)
arena_dalloc_junk_large(ptr, usize);
if (config_stats) {
szind_t index = size2index(usize) - NBINS;
arena->stats.ndalloc_large++; extent_szind_set(extent, NBINS);
arena->stats.allocated_large -= usize; rtree_ctx_t rtree_ctx_fallback;
arena->stats.lstats[index].ndalloc++; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
arena->stats.lstats[index].curruns--; rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
} NBINS, false);
}
arena_run_dalloc_decommit(arena, chunk, run); assert(isalloc(tsdn, ptr) == LARGE_MINCLASS);
return LARGE_MINCLASS;
} }
void void
arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk, arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
void *ptr) bool slow_path) {
{ cassert(config_prof);
assert(opt_prof);
arena_dalloc_large_locked_impl(arena, chunk, ptr, true); extent_t *extent = iealloc(tsdn, ptr);
size_t usize = arena_prof_demote(tsdn, extent, ptr);
if (usize <= tcache_maxclass) {
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
sz_size2index(usize), slow_path);
} else {
large_dalloc(tsdn, extent);
}
} }
void static void
arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr) arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) {
{ /* Dissociate slab from bin. */
if (slab == bin->slabcur) {
bin->slabcur = NULL;
} else {
szind_t binind = extent_szind_get(slab);
const bin_info_t *bin_info = &bin_infos[binind];
malloc_mutex_lock(&arena->lock); /*
arena_dalloc_large_locked_impl(arena, chunk, ptr, false); * The following block's conditional is necessary because if the
malloc_mutex_unlock(&arena->lock); * slab only contains one region, then it never gets inserted
* into the non-full slabs heap.
*/
if (bin_info->nregs == 1) {
arena_bin_slabs_full_remove(arena, bin, slab);
} else {
arena_bin_slabs_nonfull_remove(bin, slab);
}
}
} }
static void static void
arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr, arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
size_t oldsize, size_t size) bin_t *bin) {
{ assert(slab != bin->slabcur);
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
arena_run_t *run = &miscelm->run;
assert(size < oldsize); malloc_mutex_unlock(tsdn, &bin->lock);
/******************************/
/* arena_slab_dalloc(tsdn, arena, slab);
* Shrink the run, and make trailing pages available for other /****************************/
* allocations. malloc_mutex_lock(tsdn, &bin->lock);
*/
malloc_mutex_lock(&arena->lock);
arena_run_trim_tail(arena, chunk, run, oldsize + large_pad, size +
large_pad, true);
if (config_stats) { if (config_stats) {
szind_t oldindex = size2index(oldsize) - NBINS; bin->stats.curslabs--;
szind_t index = size2index(size) - NBINS;
arena->stats.ndalloc_large++;
arena->stats.allocated_large -= oldsize;
arena->stats.lstats[oldindex].ndalloc++;
arena->stats.lstats[oldindex].curruns--;
arena->stats.nmalloc_large++;
arena->stats.nrequests_large++;
arena->stats.allocated_large += size;
arena->stats.lstats[index].nmalloc++;
arena->stats.lstats[index].nrequests++;
arena->stats.lstats[index].curruns++;
} }
malloc_mutex_unlock(&arena->lock);
} }
static bool static void
arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, arena_bin_lower_slab(UNUSED tsdn_t *tsdn, arena_t *arena, extent_t *slab,
size_t oldsize, size_t usize_min, size_t usize_max, bool zero) bin_t *bin) {
{ assert(extent_nfree_get(slab) > 0);
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
size_t npages = (oldsize + large_pad) >> LG_PAGE;
size_t followsize;
assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) -
large_pad);
/* Try to extend the run. */
malloc_mutex_lock(&arena->lock);
if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk,
pageind+npages) != 0)
goto label_fail;
followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages);
if (oldsize + followsize >= usize_min) {
/*
* The next run is available and sufficiently large. Split the
* following run, then merge the first part with the existing
* allocation.
*/
arena_run_t *run;
size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask;
usize = usize_max;
while (oldsize + followsize < usize)
usize = index2size(size2index(usize)-1);
assert(usize >= usize_min);
assert(usize >= oldsize);
splitsize = usize - oldsize;
if (splitsize == 0)
goto label_fail;
run = &arena_miscelm_get(chunk, pageind+npages)->run;
if (arena_run_split_large(arena, run, splitsize, zero))
goto label_fail;
if (config_cache_oblivious && zero) {
/*
* Zero the trailing bytes of the original allocation's
* last page, since they are in an indeterminate state.
*/
assert(PAGE_CEILING(oldsize) == oldsize);
memset((void *)((uintptr_t)ptr + oldsize), 0,
PAGE_CEILING((uintptr_t)ptr) - (uintptr_t)ptr);
}
size = oldsize + splitsize;
npages = (size + large_pad) >> LG_PAGE;
/*
* Mark the extended run as dirty if either portion of the run
* was dirty before allocation. This is rather pedantic,
* because there's not actually any sequence of events that
* could cause the resulting run to be passed to
* arena_run_dalloc() with the dirty argument set to false
* (which is when dirty flag consistency would really matter).
*/
flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
arena_mapbits_dirty_get(chunk, pageind+npages-1);
flag_unzeroed_mask = flag_dirty == 0 ? CHUNK_MAP_UNZEROED : 0;
arena_mapbits_large_set(chunk, pageind, size + large_pad,
flag_dirty | (flag_unzeroed_mask &
arena_mapbits_unzeroed_get(chunk, pageind)));
arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty |
(flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
pageind+npages-1)));
/*
* Make sure that if bin->slabcur is non-NULL, it refers to the
* oldest/lowest non-full slab. It is okay to NULL slabcur out rather
* than proactively keeping it pointing at the oldest/lowest non-full
* slab.
*/
if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) {
/* Switch slabcur. */
if (extent_nfree_get(bin->slabcur) > 0) {
arena_bin_slabs_nonfull_insert(bin, bin->slabcur);
} else {
arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
}
bin->slabcur = slab;
if (config_stats) { if (config_stats) {
szind_t oldindex = size2index(oldsize) - NBINS; bin->stats.reslabs++;
szind_t index = size2index(size) - NBINS;
arena->stats.ndalloc_large++;
arena->stats.allocated_large -= oldsize;
arena->stats.lstats[oldindex].ndalloc++;
arena->stats.lstats[oldindex].curruns--;
arena->stats.nmalloc_large++;
arena->stats.nrequests_large++;
arena->stats.allocated_large += size;
arena->stats.lstats[index].nmalloc++;
arena->stats.lstats[index].nrequests++;
arena->stats.lstats[index].curruns++;
} }
malloc_mutex_unlock(&arena->lock); } else {
return (false); arena_bin_slabs_nonfull_insert(bin, slab);
} }
label_fail:
malloc_mutex_unlock(&arena->lock);
return (true);
} }
#ifdef JEMALLOC_JET
#undef arena_ralloc_junk_large
#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl)
#endif
static void static void
arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize) arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
{ void *ptr, bool junked) {
arena_slab_data_t *slab_data = extent_slab_data_get(slab);
szind_t binind = extent_szind_get(slab);
bin_t *bin = &arena->bins[binind];
const bin_info_t *bin_info = &bin_infos[binind];
if (!junked && config_fill && unlikely(opt_junk_free)) {
arena_dalloc_junk_small(ptr, bin_info);
}
if (config_fill && unlikely(opt_junk_free)) { arena_slab_reg_dalloc(slab, slab_data, ptr);
memset((void *)((uintptr_t)ptr + usize), 0x5a, unsigned nfree = extent_nfree_get(slab);
old_usize - usize); if (nfree == bin_info->nregs) {
arena_dissociate_bin_slab(arena, slab, bin);
arena_dalloc_bin_slab(tsdn, arena, slab, bin);
} else if (nfree == 1 && slab != bin->slabcur) {
arena_bin_slabs_full_remove(arena, bin, slab);
arena_bin_lower_slab(tsdn, arena, slab, bin);
}
if (config_stats) {
bin->stats.ndalloc++;
bin->stats.curregs--;
} }
} }
#ifdef JEMALLOC_JET
#undef arena_ralloc_junk_large
#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
arena_ralloc_junk_large_t *arena_ralloc_junk_large =
JEMALLOC_N(arena_ralloc_junk_large_impl);
#endif
/* void
* Try to resize a large allocation, in order to avoid copying. This will arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
* always fail if growing an object, and the following run is already in use. void *ptr) {
*/ arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, true);
static bool }
arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min,
size_t usize_max, bool zero)
{
arena_chunk_t *chunk;
arena_t *arena;
if (oldsize == usize_max) { static void
/* Current size class is compatible and maximal. */ arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) {
return (false); szind_t binind = extent_szind_get(extent);
} bin_t *bin = &arena->bins[binind];
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); malloc_mutex_lock(tsdn, &bin->lock);
arena = extent_node_arena_get(&chunk->node); arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, false);
malloc_mutex_unlock(tsdn, &bin->lock);
}
if (oldsize < usize_max) { void
bool ret = arena_ralloc_large_grow(arena, chunk, ptr, oldsize, arena_dalloc_small(tsdn_t *tsdn, void *ptr) {
usize_min, usize_max, zero); extent_t *extent = iealloc(tsdn, ptr);
if (config_fill && !ret && !zero) { arena_t *arena = extent_arena_get(extent);
if (unlikely(opt_junk_alloc)) {
memset((void *)((uintptr_t)ptr + oldsize), 0xa5,
isalloc(ptr, config_prof) - oldsize);
} else if (unlikely(opt_zero)) {
memset((void *)((uintptr_t)ptr + oldsize), 0,
isalloc(ptr, config_prof) - oldsize);
}
}
return (ret);
}
assert(oldsize > usize_max); arena_dalloc_bin(tsdn, arena, extent, ptr);
/* Fill before shrinking in order avoid a race. */ arena_decay_tick(tsdn, arena);
arena_ralloc_junk_large(ptr, oldsize, usize_max);
arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, usize_max);
return (false);
} }
bool bool
arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
bool zero) size_t extra, bool zero) {
{ /* Calls with non-zero extra had to clamp extra. */
size_t usize_min, usize_max; assert(extra == 0 || size + extra <= LARGE_MAXCLASS);
usize_min = s2u(size); if (unlikely(size > LARGE_MAXCLASS)) {
usize_max = s2u(size + extra); return true;
if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) { }
extent_t *extent = iealloc(tsdn, ptr);
size_t usize_min = sz_s2u(size);
size_t usize_max = sz_s2u(size + extra);
if (likely(oldsize <= SMALL_MAXCLASS && usize_min <= SMALL_MAXCLASS)) {
/* /*
* Avoid moving the allocation if the size class can be left the * Avoid moving the allocation if the size class can be left the
* same. * same.
*/ */
if (oldsize <= SMALL_MAXCLASS) { assert(bin_infos[sz_size2index(oldsize)].reg_size ==
assert(arena_bin_info[size2index(oldsize)].reg_size == oldsize);
oldsize); if ((usize_max > SMALL_MAXCLASS || sz_size2index(usize_max) !=
if ((usize_max <= SMALL_MAXCLASS && sz_size2index(oldsize)) && (size > oldsize || usize_max <
size2index(usize_max) == size2index(oldsize)) || oldsize)) {
(size <= oldsize && usize_max >= oldsize)) return true;
return (false);
} else {
if (usize_max > SMALL_MAXCLASS) {
if (!arena_ralloc_large(ptr, oldsize, usize_min,
usize_max, zero))
return (false);
}
} }
/* Reallocation would require a move. */ arena_decay_tick(tsdn, extent_arena_get(extent));
return (true); return false;
} else { } else if (oldsize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS) {
return (huge_ralloc_no_move(ptr, oldsize, usize_min, usize_max, return large_ralloc_no_move(tsdn, extent, usize_min, usize_max,
zero)); zero);
} }
return true;
} }
static void * static void *
arena_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize, arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool zero, tcache_t *tcache) size_t alignment, bool zero, tcache_t *tcache) {
{ if (alignment == 0) {
return arena_malloc(tsdn, arena, usize, sz_size2index(usize),
if (alignment == 0) zero, tcache, true);
return (arena_malloc(tsd, arena, usize, zero, tcache)); }
usize = sa2u(usize, alignment); usize = sz_sa2u(usize, alignment);
if (usize == 0) if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
return (NULL); return NULL;
return (ipalloct(tsd, usize, alignment, zero, tcache, arena)); }
return ipalloct(tsdn, usize, alignment, zero, tcache, arena);
} }
void * void *
arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
size_t alignment, bool zero, tcache_t *tcache) size_t size, size_t alignment, bool zero, tcache_t *tcache) {
{ size_t usize = sz_s2u(size);
void *ret; if (unlikely(usize == 0 || size > LARGE_MAXCLASS)) {
size_t usize; return NULL;
}
usize = s2u(size);
if (usize == 0)
return (NULL);
if (likely(usize <= large_maxclass)) {
size_t copysize;
if (likely(usize <= SMALL_MAXCLASS)) {
/* Try to avoid moving the allocation. */ /* Try to avoid moving the allocation. */
if (!arena_ralloc_no_move(ptr, oldsize, usize, 0, zero)) if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero)) {
return (ptr); return ptr;
}
/* }
* size and oldsize are different enough that we need to move
* the object. In that case, fall back to allocating new space
* and copying.
*/
ret = arena_ralloc_move_helper(tsd, arena, usize, alignment,
zero, tcache);
if (ret == NULL)
return (NULL);
/* if (oldsize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS) {
* Junk/zero-filling were already done by return large_ralloc(tsdn, arena, iealloc(tsdn, ptr), usize,
* ipalloc()/arena_malloc(). alignment, zero, tcache);
*/ }
copysize = (usize < oldsize) ? usize : oldsize; /*
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); * size and oldsize are different enough that we need to move the
memcpy(ret, ptr, copysize); * object. In that case, fall back to allocating new space and copying.
isqalloc(tsd, ptr, oldsize, tcache); */
} else { void *ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment,
ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment, zero, tcache);
zero, tcache); if (ret == NULL) {
return NULL;
} }
return (ret);
/*
* Junk/zero-filling were already done by
* ipalloc()/arena_malloc().
*/
size_t copysize = (usize < oldsize) ? usize : oldsize;
memcpy(ret, ptr, copysize);
isdalloct(tsdn, ptr, oldsize, tcache, NULL, true);
return ret;
} }
dss_prec_t dss_prec_t
arena_dss_prec_get(arena_t *arena) arena_dss_prec_get(arena_t *arena) {
{ return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIRE);
dss_prec_t ret;
malloc_mutex_lock(&arena->lock);
ret = arena->dss_prec;
malloc_mutex_unlock(&arena->lock);
return (ret);
} }
bool bool
arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) {
{ if (!have_dss) {
if (!have_dss)
return (dss_prec != dss_prec_disabled); return (dss_prec != dss_prec_disabled);
malloc_mutex_lock(&arena->lock); }
arena->dss_prec = dss_prec; atomic_store_u(&arena->dss_prec, (unsigned)dss_prec, ATOMIC_RELEASE);
malloc_mutex_unlock(&arena->lock); return false;
return (false); }
ssize_t
arena_dirty_decay_ms_default_get(void) {
return atomic_load_zd(&dirty_decay_ms_default, ATOMIC_RELAXED);
}
bool
arena_dirty_decay_ms_default_set(ssize_t decay_ms) {
if (!arena_decay_ms_valid(decay_ms)) {
return true;
}
atomic_store_zd(&dirty_decay_ms_default, decay_ms, ATOMIC_RELAXED);
return false;
} }
ssize_t ssize_t
arena_lg_dirty_mult_default_get(void) arena_muzzy_decay_ms_default_get(void) {
{ return atomic_load_zd(&muzzy_decay_ms_default, ATOMIC_RELAXED);
}
return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default)); bool
arena_muzzy_decay_ms_default_set(ssize_t decay_ms) {
if (!arena_decay_ms_valid(decay_ms)) {
return true;
}
atomic_store_zd(&muzzy_decay_ms_default, decay_ms, ATOMIC_RELAXED);
return false;
} }
bool bool
arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult) arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit,
{ size_t *new_limit) {
assert(opt_retain);
pszind_t new_ind JEMALLOC_CC_SILENCE_INIT(0);
if (new_limit != NULL) {
size_t limit = *new_limit;
/* Grow no more than the new limit. */
if ((new_ind = sz_psz2ind(limit + 1) - 1) >
EXTENT_GROW_MAX_PIND) {
return true;
}
}
malloc_mutex_lock(tsd_tsdn(tsd), &arena->extent_grow_mtx);
if (old_limit != NULL) {
*old_limit = sz_pind2sz(arena->retain_grow_limit);
}
if (new_limit != NULL) {
arena->retain_grow_limit = new_ind;
}
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->extent_grow_mtx);
return false;
}
if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) unsigned
return (true); arena_nthreads_get(arena_t *arena, bool internal) {
atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult); return atomic_load_u(&arena->nthreads[internal], ATOMIC_RELAXED);
return (false);
} }
void void
arena_stats_merge(arena_t *arena, const char **dss, ssize_t *lg_dirty_mult, arena_nthreads_inc(arena_t *arena, bool internal) {
size_t *nactive, size_t *ndirty, arena_stats_t *astats, atomic_fetch_add_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats, }
malloc_huge_stats_t *hstats)
{
unsigned i;
malloc_mutex_lock(&arena->lock); void
*dss = dss_prec_names[arena->dss_prec]; arena_nthreads_dec(arena_t *arena, bool internal) {
*lg_dirty_mult = arena->lg_dirty_mult; atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
*nactive += arena->nactive; }
*ndirty += arena->ndirty;
astats->mapped += arena->stats.mapped;
astats->npurge += arena->stats.npurge;
astats->nmadvise += arena->stats.nmadvise;
astats->purged += arena->stats.purged;
astats->metadata_mapped += arena->stats.metadata_mapped;
astats->metadata_allocated += arena_metadata_allocated_get(arena);
astats->allocated_large += arena->stats.allocated_large;
astats->nmalloc_large += arena->stats.nmalloc_large;
astats->ndalloc_large += arena->stats.ndalloc_large;
astats->nrequests_large += arena->stats.nrequests_large;
astats->allocated_huge += arena->stats.allocated_huge;
astats->nmalloc_huge += arena->stats.nmalloc_huge;
astats->ndalloc_huge += arena->stats.ndalloc_huge;
for (i = 0; i < nlclasses; i++) {
lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
lstats[i].nrequests += arena->stats.lstats[i].nrequests;
lstats[i].curruns += arena->stats.lstats[i].curruns;
}
for (i = 0; i < nhclasses; i++) {
hstats[i].nmalloc += arena->stats.hstats[i].nmalloc;
hstats[i].ndalloc += arena->stats.hstats[i].ndalloc;
hstats[i].curhchunks += arena->stats.hstats[i].curhchunks;
}
malloc_mutex_unlock(&arena->lock);
for (i = 0; i < NBINS; i++) { size_t
arena_bin_t *bin = &arena->bins[i]; arena_extent_sn_next(arena_t *arena) {
return atomic_fetch_add_zu(&arena->extent_sn_next, 1, ATOMIC_RELAXED);
malloc_mutex_lock(&bin->lock);
bstats[i].nmalloc += bin->stats.nmalloc;
bstats[i].ndalloc += bin->stats.ndalloc;
bstats[i].nrequests += bin->stats.nrequests;
bstats[i].curregs += bin->stats.curregs;
if (config_tcache) {
bstats[i].nfills += bin->stats.nfills;
bstats[i].nflushes += bin->stats.nflushes;
}
bstats[i].nruns += bin->stats.nruns;
bstats[i].reruns += bin->stats.reruns;
bstats[i].curruns += bin->stats.curruns;
malloc_mutex_unlock(&bin->lock);
}
} }
arena_t * arena_t *
arena_new(unsigned ind) arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
{
arena_t *arena; arena_t *arena;
base_t *base;
unsigned i; unsigned i;
arena_bin_t *bin;
/* if (ind == 0) {
* Allocate arena, arena->lstats, and arena->hstats contiguously, mainly base = b0get();
* because there is no way to clean up if base_alloc() OOMs. } else {
*/ base = base_new(tsdn, ind, extent_hooks);
if (config_stats) { if (base == NULL) {
arena = (arena_t *)base_alloc(CACHELINE_CEILING(sizeof(arena_t)) return NULL;
+ QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t) + }
nhclasses) * sizeof(malloc_huge_stats_t)); }
} else
arena = (arena_t *)base_alloc(sizeof(arena_t)); arena = (arena_t *)base_alloc(tsdn, base, sizeof(arena_t), CACHELINE);
if (arena == NULL) if (arena == NULL) {
return (NULL); goto label_error;
}
arena->ind = ind;
arena->nthreads = 0; atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
if (malloc_mutex_init(&arena->lock)) atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
return (NULL); arena->last_thd = NULL;
if (config_stats) { if (config_stats) {
memset(&arena->stats, 0, sizeof(arena_stats_t)); if (arena_stats_init(tsdn, &arena->stats)) {
arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena goto label_error;
+ CACHELINE_CEILING(sizeof(arena_t))); }
memset(arena->stats.lstats, 0, nlclasses *
sizeof(malloc_large_stats_t)); ql_new(&arena->tcache_ql);
arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena ql_new(&arena->cache_bin_array_descriptor_ql);
+ CACHELINE_CEILING(sizeof(arena_t)) + if (malloc_mutex_init(&arena->tcache_ql_mtx, "tcache_ql",
QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t))); WITNESS_RANK_TCACHE_QL, malloc_mutex_rank_exclusive)) {
memset(arena->stats.hstats, 0, nhclasses * goto label_error;
sizeof(malloc_huge_stats_t)); }
if (config_tcache) }
ql_new(&arena->tcache_ql);
} if (config_prof) {
if (prof_accum_init(tsdn, &arena->prof_accum)) {
if (config_prof) goto label_error;
arena->prof_accumbytes = 0; }
}
if (config_cache_oblivious) { if (config_cache_oblivious) {
/* /*
...@@ -3040,279 +1809,235 @@ arena_new(unsigned ind) ...@@ -3040,279 +1809,235 @@ arena_new(unsigned ind)
* cost of test repeatability. For debug builds, instead use a * cost of test repeatability. For debug builds, instead use a
* deterministic seed. * deterministic seed.
*/ */
arena->offset_state = config_debug ? ind : atomic_store_zu(&arena->offset_state, config_debug ? ind :
(uint64_t)(uintptr_t)arena; (size_t)(uintptr_t)arena, ATOMIC_RELAXED);
} }
arena->dss_prec = chunk_dss_prec_get(); atomic_store_zu(&arena->extent_sn_next, 0, ATOMIC_RELAXED);
arena->spare = NULL;
arena->lg_dirty_mult = arena_lg_dirty_mult_default_get(); atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(),
arena->purging = false; ATOMIC_RELAXED);
arena->nactive = 0;
arena->ndirty = 0;
arena_avail_tree_new(&arena->runs_avail); atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
qr_new(&arena->runs_dirty, rd_link);
qr_new(&arena->chunks_cache, cc_link);
ql_new(&arena->huge); extent_list_init(&arena->large);
if (malloc_mutex_init(&arena->huge_mtx)) if (malloc_mutex_init(&arena->large_mtx, "arena_large",
return (NULL); WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) {
goto label_error;
extent_tree_szad_new(&arena->chunks_szad_cached);
extent_tree_ad_new(&arena->chunks_ad_cached);
extent_tree_szad_new(&arena->chunks_szad_retained);
extent_tree_ad_new(&arena->chunks_ad_retained);
if (malloc_mutex_init(&arena->chunks_mtx))
return (NULL);
ql_new(&arena->node_cache);
if (malloc_mutex_init(&arena->node_cache_mtx))
return (NULL);
arena->chunk_hooks = chunk_hooks_default;
/* Initialize bins. */
for (i = 0; i < NBINS; i++) {
bin = &arena->bins[i];
if (malloc_mutex_init(&bin->lock))
return (NULL);
bin->runcur = NULL;
arena_run_tree_new(&bin->runs);
if (config_stats)
memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
} }
return (arena);
}
/*
* Calculate bin_info->run_size such that it meets the following constraints:
*
* *) bin_info->run_size <= arena_maxrun
* *) bin_info->nregs <= RUN_MAXREGS
*
* bin_info->nregs and bin_info->reg0_offset are also calculated here, since
* these settings are all interdependent.
*/
static void
bin_info_run_size_calc(arena_bin_info_t *bin_info)
{
size_t pad_size;
size_t try_run_size, perfect_run_size, actual_run_size;
uint32_t try_nregs, perfect_nregs, actual_nregs;
/* /*
* Determine redzone size based on minimum alignment and minimum * Delay coalescing for dirty extents despite the disruptive effect on
* redzone size. Add padding to the end of the run if it is needed to * memory layout for best-fit extent allocation, since cached extents
* align the regions. The padding allows each redzone to be half the * are likely to be reused soon after deallocation, and the cost of
* minimum alignment; without the padding, each redzone would have to * merging/splitting extents is non-trivial.
* be twice as large in order to maintain alignment.
*/ */
if (config_fill && unlikely(opt_redzone)) { if (extents_init(tsdn, &arena->extents_dirty, extent_state_dirty,
size_t align_min = ZU(1) << (jemalloc_ffs(bin_info->reg_size) - true)) {
1); goto label_error;
if (align_min <= REDZONE_MINSIZE) {
bin_info->redzone_size = REDZONE_MINSIZE;
pad_size = 0;
} else {
bin_info->redzone_size = align_min >> 1;
pad_size = bin_info->redzone_size;
}
} else {
bin_info->redzone_size = 0;
pad_size = 0;
} }
bin_info->reg_interval = bin_info->reg_size +
(bin_info->redzone_size << 1);
/* /*
* Compute run size under ideal conditions (no redzones, no limit on run * Coalesce muzzy extents immediately, because operations on them are in
* size). * the critical path much less often than for dirty extents.
*/ */
try_run_size = PAGE; if (extents_init(tsdn, &arena->extents_muzzy, extent_state_muzzy,
try_nregs = try_run_size / bin_info->reg_size; false)) {
do { goto label_error;
perfect_run_size = try_run_size; }
perfect_nregs = try_nregs;
try_run_size += PAGE;
try_nregs = try_run_size / bin_info->reg_size;
} while (perfect_run_size != perfect_nregs * bin_info->reg_size);
assert(perfect_nregs <= RUN_MAXREGS);
actual_run_size = perfect_run_size;
actual_nregs = (actual_run_size - pad_size) / bin_info->reg_interval;
/* /*
* Redzones can require enough padding that not even a single region can * Coalesce retained extents immediately, in part because they will
* fit within the number of pages that would normally be dedicated to a * never be evicted (and therefore there's no opportunity for delayed
* run for this size class. Increase the run size until at least one * coalescing), but also because operations on retained extents are not
* region fits. * in the critical path.
*/ */
while (actual_nregs == 0) { if (extents_init(tsdn, &arena->extents_retained, extent_state_retained,
assert(config_fill && unlikely(opt_redzone)); false)) {
goto label_error;
}
actual_run_size += PAGE; if (arena_decay_init(&arena->decay_dirty,
actual_nregs = (actual_run_size - pad_size) / arena_dirty_decay_ms_default_get(), &arena->stats.decay_dirty)) {
bin_info->reg_interval; goto label_error;
}
if (arena_decay_init(&arena->decay_muzzy,
arena_muzzy_decay_ms_default_get(), &arena->stats.decay_muzzy)) {
goto label_error;
} }
/* arena->extent_grow_next = sz_psz2ind(HUGEPAGE);
* Make sure that the run will fit within an arena chunk. arena->retain_grow_limit = EXTENT_GROW_MAX_PIND;
*/ if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow",
while (actual_run_size > arena_maxrun) { WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
actual_run_size -= PAGE; goto label_error;
actual_nregs = (actual_run_size - pad_size) /
bin_info->reg_interval;
} }
assert(actual_nregs > 0);
assert(actual_run_size == s2u(actual_run_size));
/* Copy final settings. */ extent_avail_new(&arena->extent_avail);
bin_info->run_size = actual_run_size; if (malloc_mutex_init(&arena->extent_avail_mtx, "extent_avail",
bin_info->nregs = actual_nregs; WITNESS_RANK_EXTENT_AVAIL, malloc_mutex_rank_exclusive)) {
bin_info->reg0_offset = actual_run_size - (actual_nregs * goto label_error;
bin_info->reg_interval) - pad_size + bin_info->redzone_size; }
/* Initialize bins. */
for (i = 0; i < NBINS; i++) {
bool err = bin_init(&arena->bins[i]);
if (err) {
goto label_error;
}
}
arena->base = base;
/* Set arena before creating background threads. */
arena_set(ind, arena);
nstime_init(&arena->create_time, 0);
nstime_update(&arena->create_time);
if (actual_run_size > small_maxrun) /* We don't support reentrancy for arena 0 bootstrapping. */
small_maxrun = actual_run_size; if (ind != 0) {
/*
* If we're here, then arena 0 already exists, so bootstrapping
* is done enough that we should have tsd.
*/
assert(!tsdn_null(tsdn));
pre_reentrancy(tsdn_tsd(tsdn), arena);
if (hooks_arena_new_hook) {
hooks_arena_new_hook();
}
post_reentrancy(tsdn_tsd(tsdn));
}
assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs return arena;
* bin_info->reg_interval) + pad_size == bin_info->run_size); label_error:
if (ind != 0) {
base_delete(tsdn, base);
}
return NULL;
} }
static void void
bin_info_init(void) arena_boot(void) {
{ arena_dirty_decay_ms_default_set(opt_dirty_decay_ms);
arena_bin_info_t *bin_info; arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms);
#define REGIND_bin_yes(index, reg_size) \
#define BIN_INFO_INIT_bin_yes(index, size) \ div_init(&arena_binind_div_info[(index)], (reg_size));
bin_info = &arena_bin_info[index]; \ #define REGIND_bin_no(index, reg_size)
bin_info->reg_size = size; \ #define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \
bin_info_run_size_calc(bin_info); \ lg_delta_lookup) \
bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs); REGIND_bin_##bin(index, (1U<<lg_grp) + (ndelta << lg_delta))
#define BIN_INFO_INIT_bin_no(index, size)
#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
SIZE_CLASSES SIZE_CLASSES
#undef BIN_INFO_INIT_bin_yes #undef REGIND_bin_yes
#undef BIN_INFO_INIT_bin_no #undef REGIND_bin_no
#undef SC #undef SC
} }
static bool void
small_run_size_init(void) arena_prefork0(tsdn_t *tsdn, arena_t *arena) {
{ malloc_mutex_prefork(tsdn, &arena->decay_dirty.mtx);
malloc_mutex_prefork(tsdn, &arena->decay_muzzy.mtx);
assert(small_maxrun != 0); }
small_run_tab = (bool *)base_alloc(sizeof(bool) * (small_maxrun >>
LG_PAGE));
if (small_run_tab == NULL)
return (true);
#define TAB_INIT_bin_yes(index, size) { \ void
arena_bin_info_t *bin_info = &arena_bin_info[index]; \ arena_prefork1(tsdn_t *tsdn, arena_t *arena) {
small_run_tab[bin_info->run_size >> LG_PAGE] = true; \ if (config_stats) {
malloc_mutex_prefork(tsdn, &arena->tcache_ql_mtx);
} }
#define TAB_INIT_bin_no(index, size)
#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
TAB_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
SIZE_CLASSES
#undef TAB_INIT_bin_yes
#undef TAB_INIT_bin_no
#undef SC
return (false);
} }
bool void
arena_boot(void) arena_prefork2(tsdn_t *tsdn, arena_t *arena) {
{ malloc_mutex_prefork(tsdn, &arena->extent_grow_mtx);
unsigned i; }
arena_lg_dirty_mult_default_set(opt_lg_dirty_mult); void
arena_prefork3(tsdn_t *tsdn, arena_t *arena) {
extents_prefork(tsdn, &arena->extents_dirty);
extents_prefork(tsdn, &arena->extents_muzzy);
extents_prefork(tsdn, &arena->extents_retained);
}
/* void
* Compute the header size such that it is large enough to contain the arena_prefork4(tsdn_t *tsdn, arena_t *arena) {
* page map. The page map is biased to omit entries for the header malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx);
* itself, so some iteration is necessary to compute the map bias. }
*
* 1) Compute safe header_size and map_bias values that include enough
* space for an unbiased page map.
* 2) Refine map_bias based on (1) to omit the header pages in the page
* map. The resulting map_bias may be one too small.
* 3) Refine map_bias based on (2). The result will be >= the result
* from (2), and will always be correct.
*/
map_bias = 0;
for (i = 0; i < 3; i++) {
size_t header_size = offsetof(arena_chunk_t, map_bits) +
((sizeof(arena_chunk_map_bits_t) +
sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias));
map_bias = (header_size + PAGE_MASK) >> LG_PAGE;
}
assert(map_bias > 0);
map_misc_offset = offsetof(arena_chunk_t, map_bits) +
sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias);
arena_maxrun = chunksize - (map_bias << LG_PAGE);
assert(arena_maxrun > 0);
large_maxclass = index2size(size2index(chunksize)-1);
if (large_maxclass > arena_maxrun) {
/*
* For small chunk sizes it's possible for there to be fewer
* non-header pages available than are necessary to serve the
* size classes just below chunksize.
*/
large_maxclass = arena_maxrun;
}
assert(large_maxclass > 0);
nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS);
nhclasses = NSIZES - nlclasses - NBINS;
bin_info_init(); void
return (small_run_size_init()); arena_prefork5(tsdn_t *tsdn, arena_t *arena) {
base_prefork(tsdn, arena->base);
} }
void void
arena_prefork(arena_t *arena) arena_prefork6(tsdn_t *tsdn, arena_t *arena) {
{ malloc_mutex_prefork(tsdn, &arena->large_mtx);
unsigned i; }
malloc_mutex_prefork(&arena->lock); void
malloc_mutex_prefork(&arena->huge_mtx); arena_prefork7(tsdn_t *tsdn, arena_t *arena) {
malloc_mutex_prefork(&arena->chunks_mtx); for (unsigned i = 0; i < NBINS; i++) {
malloc_mutex_prefork(&arena->node_cache_mtx); bin_prefork(tsdn, &arena->bins[i]);
for (i = 0; i < NBINS; i++) }
malloc_mutex_prefork(&arena->bins[i].lock);
} }
void void
arena_postfork_parent(arena_t *arena) arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
{
unsigned i; unsigned i;
for (i = 0; i < NBINS; i++) for (i = 0; i < NBINS; i++) {
malloc_mutex_postfork_parent(&arena->bins[i].lock); bin_postfork_parent(tsdn, &arena->bins[i]);
malloc_mutex_postfork_parent(&arena->node_cache_mtx); }
malloc_mutex_postfork_parent(&arena->chunks_mtx); malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
malloc_mutex_postfork_parent(&arena->huge_mtx); base_postfork_parent(tsdn, arena->base);
malloc_mutex_postfork_parent(&arena->lock); malloc_mutex_postfork_parent(tsdn, &arena->extent_avail_mtx);
extents_postfork_parent(tsdn, &arena->extents_dirty);
extents_postfork_parent(tsdn, &arena->extents_muzzy);
extents_postfork_parent(tsdn, &arena->extents_retained);
malloc_mutex_postfork_parent(tsdn, &arena->extent_grow_mtx);
malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx);
malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx);
if (config_stats) {
malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx);
}
} }
void void
arena_postfork_child(arena_t *arena) arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
{
unsigned i; unsigned i;
for (i = 0; i < NBINS; i++) atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
malloc_mutex_postfork_child(&arena->bins[i].lock); atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
malloc_mutex_postfork_child(&arena->node_cache_mtx); if (tsd_arena_get(tsdn_tsd(tsdn)) == arena) {
malloc_mutex_postfork_child(&arena->chunks_mtx); arena_nthreads_inc(arena, false);
malloc_mutex_postfork_child(&arena->huge_mtx); }
malloc_mutex_postfork_child(&arena->lock); if (tsd_iarena_get(tsdn_tsd(tsdn)) == arena) {
arena_nthreads_inc(arena, true);
}
if (config_stats) {
ql_new(&arena->tcache_ql);
ql_new(&arena->cache_bin_array_descriptor_ql);
tcache_t *tcache = tcache_get(tsdn_tsd(tsdn));
if (tcache != NULL && tcache->arena == arena) {
ql_elm_new(tcache, link);
ql_tail_insert(&arena->tcache_ql, tcache, link);
cache_bin_array_descriptor_init(
&tcache->cache_bin_array_descriptor,
tcache->bins_small, tcache->bins_large);
ql_tail_insert(&arena->cache_bin_array_descriptor_ql,
&tcache->cache_bin_array_descriptor, link);
}
}
for (i = 0; i < NBINS; i++) {
bin_postfork_child(tsdn, &arena->bins[i]);
}
malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
base_postfork_child(tsdn, arena->base);
malloc_mutex_postfork_child(tsdn, &arena->extent_avail_mtx);
extents_postfork_child(tsdn, &arena->extents_dirty);
extents_postfork_child(tsdn, &arena->extents_muzzy);
extents_postfork_child(tsdn, &arena->extents_retained);
malloc_mutex_postfork_child(tsdn, &arena->extent_grow_mtx);
malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx);
malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx);
if (config_stats) {
malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx);
}
} }
#define JEMALLOC_ATOMIC_C_
#include "jemalloc/internal/jemalloc_internal.h"
#define JEMALLOC_BACKGROUND_THREAD_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
/******************************************************************************/
/* Data. */
/* This option should be opt-in only. */
#define BACKGROUND_THREAD_DEFAULT false
/* Read-only after initialization. */
bool opt_background_thread = BACKGROUND_THREAD_DEFAULT;
size_t opt_max_background_threads = MAX_BACKGROUND_THREAD_LIMIT;
/* Used for thread creation, termination and stats. */
malloc_mutex_t background_thread_lock;
/* Indicates global state. Atomic because decay reads this w/o locking. */
atomic_b_t background_thread_enabled_state;
size_t n_background_threads;
size_t max_background_threads;
/* Thread info per-index. */
background_thread_info_t *background_thread_info;
/* False if no necessary runtime support. */
bool can_enable_background_thread;
/******************************************************************************/
#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
#include <dlfcn.h>
static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *,
void *(*)(void *), void *__restrict);
static void
pthread_create_wrapper_init(void) {
#ifdef JEMALLOC_LAZY_LOCK
if (!isthreaded) {
isthreaded = true;
}
#endif
}
int
pthread_create_wrapper(pthread_t *__restrict thread, const pthread_attr_t *attr,
void *(*start_routine)(void *), void *__restrict arg) {
pthread_create_wrapper_init();
return pthread_create_fptr(thread, attr, start_routine, arg);
}
#endif /* JEMALLOC_PTHREAD_CREATE_WRAPPER */
#ifndef JEMALLOC_BACKGROUND_THREAD
#define NOT_REACHED { not_reached(); }
bool background_thread_create(tsd_t *tsd, unsigned arena_ind) NOT_REACHED
bool background_threads_enable(tsd_t *tsd) NOT_REACHED
bool background_threads_disable(tsd_t *tsd) NOT_REACHED
void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
arena_decay_t *decay, size_t npages_new) NOT_REACHED
void background_thread_prefork0(tsdn_t *tsdn) NOT_REACHED
void background_thread_prefork1(tsdn_t *tsdn) NOT_REACHED
void background_thread_postfork_parent(tsdn_t *tsdn) NOT_REACHED
void background_thread_postfork_child(tsdn_t *tsdn) NOT_REACHED
bool background_thread_stats_read(tsdn_t *tsdn,
background_thread_stats_t *stats) NOT_REACHED
void background_thread_ctl_init(tsdn_t *tsdn) NOT_REACHED
#undef NOT_REACHED
#else
static bool background_thread_enabled_at_fork;
static void
background_thread_info_init(tsdn_t *tsdn, background_thread_info_t *info) {
background_thread_wakeup_time_set(tsdn, info, 0);
info->npages_to_purge_new = 0;
if (config_stats) {
info->tot_n_runs = 0;
nstime_init(&info->tot_sleep_time, 0);
}
}
static inline bool
set_current_thread_affinity(UNUSED int cpu) {
#if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
cpu_set_t cpuset;
CPU_ZERO(&cpuset);
CPU_SET(cpu, &cpuset);
int ret = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset);
return (ret != 0);
#else
return false;
#endif
}
/* Threshold for determining when to wake up the background thread. */
#define BACKGROUND_THREAD_NPAGES_THRESHOLD UINT64_C(1024)
#define BILLION UINT64_C(1000000000)
/* Minimal sleep interval 100 ms. */
#define BACKGROUND_THREAD_MIN_INTERVAL_NS (BILLION / 10)
static inline size_t
decay_npurge_after_interval(arena_decay_t *decay, size_t interval) {
size_t i;
uint64_t sum = 0;
for (i = 0; i < interval; i++) {
sum += decay->backlog[i] * h_steps[i];
}
for (; i < SMOOTHSTEP_NSTEPS; i++) {
sum += decay->backlog[i] * (h_steps[i] - h_steps[i - interval]);
}
return (size_t)(sum >> SMOOTHSTEP_BFP);
}
static uint64_t
arena_decay_compute_purge_interval_impl(tsdn_t *tsdn, arena_decay_t *decay,
extents_t *extents) {
if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
/* Use minimal interval if decay is contended. */
return BACKGROUND_THREAD_MIN_INTERVAL_NS;
}
uint64_t interval;
ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
if (decay_time <= 0) {
/* Purging is eagerly done or disabled currently. */
interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
goto label_done;
}
uint64_t decay_interval_ns = nstime_ns(&decay->interval);
assert(decay_interval_ns > 0);
size_t npages = extents_npages_get(extents);
if (npages == 0) {
unsigned i;
for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
if (decay->backlog[i] > 0) {
break;
}
}
if (i == SMOOTHSTEP_NSTEPS) {
/* No dirty pages recorded. Sleep indefinitely. */
interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
goto label_done;
}
}
if (npages <= BACKGROUND_THREAD_NPAGES_THRESHOLD) {
/* Use max interval. */
interval = decay_interval_ns * SMOOTHSTEP_NSTEPS;
goto label_done;
}
size_t lb = BACKGROUND_THREAD_MIN_INTERVAL_NS / decay_interval_ns;
size_t ub = SMOOTHSTEP_NSTEPS;
/* Minimal 2 intervals to ensure reaching next epoch deadline. */
lb = (lb < 2) ? 2 : lb;
if ((decay_interval_ns * ub <= BACKGROUND_THREAD_MIN_INTERVAL_NS) ||
(lb + 2 > ub)) {
interval = BACKGROUND_THREAD_MIN_INTERVAL_NS;
goto label_done;
}
assert(lb + 2 <= ub);
size_t npurge_lb, npurge_ub;
npurge_lb = decay_npurge_after_interval(decay, lb);
if (npurge_lb > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
interval = decay_interval_ns * lb;
goto label_done;
}
npurge_ub = decay_npurge_after_interval(decay, ub);
if (npurge_ub < BACKGROUND_THREAD_NPAGES_THRESHOLD) {
interval = decay_interval_ns * ub;
goto label_done;
}
unsigned n_search = 0;
size_t target, npurge;
while ((npurge_lb + BACKGROUND_THREAD_NPAGES_THRESHOLD < npurge_ub)
&& (lb + 2 < ub)) {
target = (lb + ub) / 2;
npurge = decay_npurge_after_interval(decay, target);
if (npurge > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
ub = target;
npurge_ub = npurge;
} else {
lb = target;
npurge_lb = npurge;
}
assert(n_search++ < lg_floor(SMOOTHSTEP_NSTEPS) + 1);
}
interval = decay_interval_ns * (ub + lb) / 2;
label_done:
interval = (interval < BACKGROUND_THREAD_MIN_INTERVAL_NS) ?
BACKGROUND_THREAD_MIN_INTERVAL_NS : interval;
malloc_mutex_unlock(tsdn, &decay->mtx);
return interval;
}
/* Compute purge interval for background threads. */
static uint64_t
arena_decay_compute_purge_interval(tsdn_t *tsdn, arena_t *arena) {
uint64_t i1, i2;
i1 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_dirty,
&arena->extents_dirty);
if (i1 == BACKGROUND_THREAD_MIN_INTERVAL_NS) {
return i1;
}
i2 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_muzzy,
&arena->extents_muzzy);
return i1 < i2 ? i1 : i2;
}
static void
background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info,
uint64_t interval) {
if (config_stats) {
info->tot_n_runs++;
}
info->npages_to_purge_new = 0;
struct timeval tv;
/* Specific clock required by timedwait. */
gettimeofday(&tv, NULL);
nstime_t before_sleep;
nstime_init2(&before_sleep, tv.tv_sec, tv.tv_usec * 1000);
int ret;
if (interval == BACKGROUND_THREAD_INDEFINITE_SLEEP) {
assert(background_thread_indefinite_sleep(info));
ret = pthread_cond_wait(&info->cond, &info->mtx.lock);
assert(ret == 0);
} else {
assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS &&
interval <= BACKGROUND_THREAD_INDEFINITE_SLEEP);
/* We need malloc clock (can be different from tv). */
nstime_t next_wakeup;
nstime_init(&next_wakeup, 0);
nstime_update(&next_wakeup);
nstime_iadd(&next_wakeup, interval);
assert(nstime_ns(&next_wakeup) <
BACKGROUND_THREAD_INDEFINITE_SLEEP);
background_thread_wakeup_time_set(tsdn, info,
nstime_ns(&next_wakeup));
nstime_t ts_wakeup;
nstime_copy(&ts_wakeup, &before_sleep);
nstime_iadd(&ts_wakeup, interval);
struct timespec ts;
ts.tv_sec = (size_t)nstime_sec(&ts_wakeup);
ts.tv_nsec = (size_t)nstime_nsec(&ts_wakeup);
assert(!background_thread_indefinite_sleep(info));
ret = pthread_cond_timedwait(&info->cond, &info->mtx.lock, &ts);
assert(ret == ETIMEDOUT || ret == 0);
background_thread_wakeup_time_set(tsdn, info,
BACKGROUND_THREAD_INDEFINITE_SLEEP);
}
if (config_stats) {
gettimeofday(&tv, NULL);
nstime_t after_sleep;
nstime_init2(&after_sleep, tv.tv_sec, tv.tv_usec * 1000);
if (nstime_compare(&after_sleep, &before_sleep) > 0) {
nstime_subtract(&after_sleep, &before_sleep);
nstime_add(&info->tot_sleep_time, &after_sleep);
}
}
}
static bool
background_thread_pause_check(tsdn_t *tsdn, background_thread_info_t *info) {
if (unlikely(info->state == background_thread_paused)) {
malloc_mutex_unlock(tsdn, &info->mtx);
/* Wait on global lock to update status. */
malloc_mutex_lock(tsdn, &background_thread_lock);
malloc_mutex_unlock(tsdn, &background_thread_lock);
malloc_mutex_lock(tsdn, &info->mtx);
return true;
}
return false;
}
static inline void
background_work_sleep_once(tsdn_t *tsdn, background_thread_info_t *info, unsigned ind) {
uint64_t min_interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
unsigned narenas = narenas_total_get();
for (unsigned i = ind; i < narenas; i += max_background_threads) {
arena_t *arena = arena_get(tsdn, i, false);
if (!arena) {
continue;
}
arena_decay(tsdn, arena, true, false);
if (min_interval == BACKGROUND_THREAD_MIN_INTERVAL_NS) {
/* Min interval will be used. */
continue;
}
uint64_t interval = arena_decay_compute_purge_interval(tsdn,
arena);
assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS);
if (min_interval > interval) {
min_interval = interval;
}
}
background_thread_sleep(tsdn, info, min_interval);
}
static bool
background_threads_disable_single(tsd_t *tsd, background_thread_info_t *info) {
if (info == &background_thread_info[0]) {
malloc_mutex_assert_owner(tsd_tsdn(tsd),
&background_thread_lock);
} else {
malloc_mutex_assert_not_owner(tsd_tsdn(tsd),
&background_thread_lock);
}
pre_reentrancy(tsd, NULL);
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
bool has_thread;
assert(info->state != background_thread_paused);
if (info->state == background_thread_started) {
has_thread = true;
info->state = background_thread_stopped;
pthread_cond_signal(&info->cond);
} else {
has_thread = false;
}
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
if (!has_thread) {
post_reentrancy(tsd);
return false;
}
void *ret;
if (pthread_join(info->thread, &ret)) {
post_reentrancy(tsd);
return true;
}
assert(ret == NULL);
n_background_threads--;
post_reentrancy(tsd);
return false;
}
static void *background_thread_entry(void *ind_arg);
static int
background_thread_create_signals_masked(pthread_t *thread,
const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg) {
/*
* Mask signals during thread creation so that the thread inherits
* an empty signal set.
*/
sigset_t set;
sigfillset(&set);
sigset_t oldset;
int mask_err = pthread_sigmask(SIG_SETMASK, &set, &oldset);
if (mask_err != 0) {
return mask_err;
}
int create_err = pthread_create_wrapper(thread, attr, start_routine,
arg);
/*
* Restore the signal mask. Failure to restore the signal mask here
* changes program behavior.
*/
int restore_err = pthread_sigmask(SIG_SETMASK, &oldset, NULL);
if (restore_err != 0) {
malloc_printf("<jemalloc>: background thread creation "
"failed (%d), and signal mask restoration failed "
"(%d)\n", create_err, restore_err);
if (opt_abort) {
abort();
}
}
return create_err;
}
static bool
check_background_thread_creation(tsd_t *tsd, unsigned *n_created,
bool *created_threads) {
bool ret = false;
if (likely(*n_created == n_background_threads)) {
return ret;
}
tsdn_t *tsdn = tsd_tsdn(tsd);
malloc_mutex_unlock(tsdn, &background_thread_info[0].mtx);
for (unsigned i = 1; i < max_background_threads; i++) {
if (created_threads[i]) {
continue;
}
background_thread_info_t *info = &background_thread_info[i];
malloc_mutex_lock(tsdn, &info->mtx);
/*
* In case of the background_thread_paused state because of
* arena reset, delay the creation.
*/
bool create = (info->state == background_thread_started);
malloc_mutex_unlock(tsdn, &info->mtx);
if (!create) {
continue;
}
pre_reentrancy(tsd, NULL);
int err = background_thread_create_signals_masked(&info->thread,
NULL, background_thread_entry, (void *)(uintptr_t)i);
post_reentrancy(tsd);
if (err == 0) {
(*n_created)++;
created_threads[i] = true;
} else {
malloc_printf("<jemalloc>: background thread "
"creation failed (%d)\n", err);
if (opt_abort) {
abort();
}
}
/* Return to restart the loop since we unlocked. */
ret = true;
break;
}
malloc_mutex_lock(tsdn, &background_thread_info[0].mtx);
return ret;
}
static void
background_thread0_work(tsd_t *tsd) {
/* Thread0 is also responsible for launching / terminating threads. */
VARIABLE_ARRAY(bool, created_threads, max_background_threads);
unsigned i;
for (i = 1; i < max_background_threads; i++) {
created_threads[i] = false;
}
/* Start working, and create more threads when asked. */
unsigned n_created = 1;
while (background_thread_info[0].state != background_thread_stopped) {
if (background_thread_pause_check(tsd_tsdn(tsd),
&background_thread_info[0])) {
continue;
}
if (check_background_thread_creation(tsd, &n_created,
(bool *)&created_threads)) {
continue;
}
background_work_sleep_once(tsd_tsdn(tsd),
&background_thread_info[0], 0);
}
/*
* Shut down other threads at exit. Note that the ctl thread is holding
* the global background_thread mutex (and is waiting) for us.
*/
assert(!background_thread_enabled());
for (i = 1; i < max_background_threads; i++) {
background_thread_info_t *info = &background_thread_info[i];
assert(info->state != background_thread_paused);
if (created_threads[i]) {
background_threads_disable_single(tsd, info);
} else {
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
if (info->state != background_thread_stopped) {
/* The thread was not created. */
assert(info->state ==
background_thread_started);
n_background_threads--;
info->state = background_thread_stopped;
}
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
}
}
background_thread_info[0].state = background_thread_stopped;
assert(n_background_threads == 1);
}
static void
background_work(tsd_t *tsd, unsigned ind) {
background_thread_info_t *info = &background_thread_info[ind];
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
background_thread_wakeup_time_set(tsd_tsdn(tsd), info,
BACKGROUND_THREAD_INDEFINITE_SLEEP);
if (ind == 0) {
background_thread0_work(tsd);
} else {
while (info->state != background_thread_stopped) {
if (background_thread_pause_check(tsd_tsdn(tsd),
info)) {
continue;
}
background_work_sleep_once(tsd_tsdn(tsd), info, ind);
}
}
assert(info->state == background_thread_stopped);
background_thread_wakeup_time_set(tsd_tsdn(tsd), info, 0);
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
}
static void *
background_thread_entry(void *ind_arg) {
unsigned thread_ind = (unsigned)(uintptr_t)ind_arg;
assert(thread_ind < max_background_threads);
#ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
pthread_setname_np(pthread_self(), "jemalloc_bg_thd");
#endif
if (opt_percpu_arena != percpu_arena_disabled) {
set_current_thread_affinity((int)thread_ind);
}
/*
* Start periodic background work. We use internal tsd which avoids
* side effects, for example triggering new arena creation (which in
* turn triggers another background thread creation).
*/
background_work(tsd_internal_fetch(), thread_ind);
assert(pthread_equal(pthread_self(),
background_thread_info[thread_ind].thread));
return NULL;
}
static void
background_thread_init(tsd_t *tsd, background_thread_info_t *info) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
info->state = background_thread_started;
background_thread_info_init(tsd_tsdn(tsd), info);
n_background_threads++;
}
/* Create a new background thread if needed. */
bool
background_thread_create(tsd_t *tsd, unsigned arena_ind) {
assert(have_background_thread);
malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
/* We create at most NCPUs threads. */
size_t thread_ind = arena_ind % max_background_threads;
background_thread_info_t *info = &background_thread_info[thread_ind];
bool need_new_thread;
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
need_new_thread = background_thread_enabled() &&
(info->state == background_thread_stopped);
if (need_new_thread) {
background_thread_init(tsd, info);
}
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
if (!need_new_thread) {
return false;
}
if (arena_ind != 0) {
/* Threads are created asynchronously by Thread 0. */
background_thread_info_t *t0 = &background_thread_info[0];
malloc_mutex_lock(tsd_tsdn(tsd), &t0->mtx);
assert(t0->state == background_thread_started);
pthread_cond_signal(&t0->cond);
malloc_mutex_unlock(tsd_tsdn(tsd), &t0->mtx);
return false;
}
pre_reentrancy(tsd, NULL);
/*
* To avoid complications (besides reentrancy), create internal
* background threads with the underlying pthread_create.
*/
int err = background_thread_create_signals_masked(&info->thread, NULL,
background_thread_entry, (void *)thread_ind);
post_reentrancy(tsd);
if (err != 0) {
malloc_printf("<jemalloc>: arena 0 background thread creation "
"failed (%d)\n", err);
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
info->state = background_thread_stopped;
n_background_threads--;
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
return true;
}
return false;
}
bool
background_threads_enable(tsd_t *tsd) {
assert(n_background_threads == 0);
assert(background_thread_enabled());
malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
VARIABLE_ARRAY(bool, marked, max_background_threads);
unsigned i, nmarked;
for (i = 0; i < max_background_threads; i++) {
marked[i] = false;
}
nmarked = 0;
/* Thread 0 is required and created at the end. */
marked[0] = true;
/* Mark the threads we need to create for thread 0. */
unsigned n = narenas_total_get();
for (i = 1; i < n; i++) {
if (marked[i % max_background_threads] ||
arena_get(tsd_tsdn(tsd), i, false) == NULL) {
continue;
}
background_thread_info_t *info = &background_thread_info[
i % max_background_threads];
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
assert(info->state == background_thread_stopped);
background_thread_init(tsd, info);
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
marked[i % max_background_threads] = true;
if (++nmarked == max_background_threads) {
break;
}
}
return background_thread_create(tsd, 0);
}
bool
background_threads_disable(tsd_t *tsd) {
assert(!background_thread_enabled());
malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
/* Thread 0 will be responsible for terminating other threads. */
if (background_threads_disable_single(tsd,
&background_thread_info[0])) {
return true;
}
assert(n_background_threads == 0);
return false;
}
/* Check if we need to signal the background thread early. */
void
background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
arena_decay_t *decay, size_t npages_new) {
background_thread_info_t *info = arena_background_thread_info_get(
arena);
if (malloc_mutex_trylock(tsdn, &info->mtx)) {
/*
* Background thread may hold the mutex for a long period of
* time. We'd like to avoid the variance on application
* threads. So keep this non-blocking, and leave the work to a
* future epoch.
*/
return;
}
if (info->state != background_thread_started) {
goto label_done;
}
if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
goto label_done;
}
ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
if (decay_time <= 0) {
/* Purging is eagerly done or disabled currently. */
goto label_done_unlock2;
}
uint64_t decay_interval_ns = nstime_ns(&decay->interval);
assert(decay_interval_ns > 0);
nstime_t diff;
nstime_init(&diff, background_thread_wakeup_time_get(info));
if (nstime_compare(&diff, &decay->epoch) <= 0) {
goto label_done_unlock2;
}
nstime_subtract(&diff, &decay->epoch);
if (nstime_ns(&diff) < BACKGROUND_THREAD_MIN_INTERVAL_NS) {
goto label_done_unlock2;
}
if (npages_new > 0) {
size_t n_epoch = (size_t)(nstime_ns(&diff) / decay_interval_ns);
/*
* Compute how many new pages we would need to purge by the next
* wakeup, which is used to determine if we should signal the
* background thread.
*/
uint64_t npurge_new;
if (n_epoch >= SMOOTHSTEP_NSTEPS) {
npurge_new = npages_new;
} else {
uint64_t h_steps_max = h_steps[SMOOTHSTEP_NSTEPS - 1];
assert(h_steps_max >=
h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
npurge_new = npages_new * (h_steps_max -
h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
npurge_new >>= SMOOTHSTEP_BFP;
}
info->npages_to_purge_new += npurge_new;
}
bool should_signal;
if (info->npages_to_purge_new > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
should_signal = true;
} else if (unlikely(background_thread_indefinite_sleep(info)) &&
(extents_npages_get(&arena->extents_dirty) > 0 ||
extents_npages_get(&arena->extents_muzzy) > 0 ||
info->npages_to_purge_new > 0)) {
should_signal = true;
} else {
should_signal = false;
}
if (should_signal) {
info->npages_to_purge_new = 0;
pthread_cond_signal(&info->cond);
}
label_done_unlock2:
malloc_mutex_unlock(tsdn, &decay->mtx);
label_done:
malloc_mutex_unlock(tsdn, &info->mtx);
}
void
background_thread_prefork0(tsdn_t *tsdn) {
malloc_mutex_prefork(tsdn, &background_thread_lock);
background_thread_enabled_at_fork = background_thread_enabled();
}
void
background_thread_prefork1(tsdn_t *tsdn) {
for (unsigned i = 0; i < max_background_threads; i++) {
malloc_mutex_prefork(tsdn, &background_thread_info[i].mtx);
}
}
void
background_thread_postfork_parent(tsdn_t *tsdn) {
for (unsigned i = 0; i < max_background_threads; i++) {
malloc_mutex_postfork_parent(tsdn,
&background_thread_info[i].mtx);
}
malloc_mutex_postfork_parent(tsdn, &background_thread_lock);
}
void
background_thread_postfork_child(tsdn_t *tsdn) {
for (unsigned i = 0; i < max_background_threads; i++) {
malloc_mutex_postfork_child(tsdn,
&background_thread_info[i].mtx);
}
malloc_mutex_postfork_child(tsdn, &background_thread_lock);
if (!background_thread_enabled_at_fork) {
return;
}
/* Clear background_thread state (reset to disabled for child). */
malloc_mutex_lock(tsdn, &background_thread_lock);
n_background_threads = 0;
background_thread_enabled_set(tsdn, false);
for (unsigned i = 0; i < max_background_threads; i++) {
background_thread_info_t *info = &background_thread_info[i];
malloc_mutex_lock(tsdn, &info->mtx);
info->state = background_thread_stopped;
int ret = pthread_cond_init(&info->cond, NULL);
assert(ret == 0);
background_thread_info_init(tsdn, info);
malloc_mutex_unlock(tsdn, &info->mtx);
}
malloc_mutex_unlock(tsdn, &background_thread_lock);
}
bool
background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) {
assert(config_stats);
malloc_mutex_lock(tsdn, &background_thread_lock);
if (!background_thread_enabled()) {
malloc_mutex_unlock(tsdn, &background_thread_lock);
return true;
}
stats->num_threads = n_background_threads;
uint64_t num_runs = 0;
nstime_init(&stats->run_interval, 0);
for (unsigned i = 0; i < max_background_threads; i++) {
background_thread_info_t *info = &background_thread_info[i];
if (malloc_mutex_trylock(tsdn, &info->mtx)) {
/*
* Each background thread run may take a long time;
* avoid waiting on the stats if the thread is active.
*/
continue;
}
if (info->state != background_thread_stopped) {
num_runs += info->tot_n_runs;
nstime_add(&stats->run_interval, &info->tot_sleep_time);
}
malloc_mutex_unlock(tsdn, &info->mtx);
}
stats->num_runs = num_runs;
if (num_runs > 0) {
nstime_idivide(&stats->run_interval, num_runs);
}
malloc_mutex_unlock(tsdn, &background_thread_lock);
return false;
}
#undef BACKGROUND_THREAD_NPAGES_THRESHOLD
#undef BILLION
#undef BACKGROUND_THREAD_MIN_INTERVAL_NS
static bool
pthread_create_fptr_init(void) {
if (pthread_create_fptr != NULL) {
return false;
}
pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create");
if (pthread_create_fptr == NULL) {
can_enable_background_thread = false;
if (config_lazy_lock || opt_background_thread) {
malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, "
"\"pthread_create\")\n");
abort();
}
} else {
can_enable_background_thread = true;
}
return false;
}
/*
* When lazy lock is enabled, we need to make sure setting isthreaded before
* taking any background_thread locks. This is called early in ctl (instead of
* wait for the pthread_create calls to trigger) because the mutex is required
* before creating background threads.
*/
void
background_thread_ctl_init(tsdn_t *tsdn) {
malloc_mutex_assert_not_owner(tsdn, &background_thread_lock);
#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
pthread_create_fptr_init();
pthread_create_wrapper_init();
#endif
}
#endif /* defined(JEMALLOC_BACKGROUND_THREAD) */
bool
background_thread_boot0(void) {
if (!have_background_thread && opt_background_thread) {
malloc_printf("<jemalloc>: option background_thread currently "
"supports pthread only\n");
return true;
}
#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
if ((config_lazy_lock || opt_background_thread) &&
pthread_create_fptr_init()) {
return true;
}
#endif
return false;
}
bool
background_thread_boot1(tsdn_t *tsdn) {
#ifdef JEMALLOC_BACKGROUND_THREAD
assert(have_background_thread);
assert(narenas_total_get() > 0);
if (opt_max_background_threads == MAX_BACKGROUND_THREAD_LIMIT &&
ncpus < MAX_BACKGROUND_THREAD_LIMIT) {
opt_max_background_threads = ncpus;
}
max_background_threads = opt_max_background_threads;
background_thread_enabled_set(tsdn, opt_background_thread);
if (malloc_mutex_init(&background_thread_lock,
"background_thread_global",
WITNESS_RANK_BACKGROUND_THREAD_GLOBAL,
malloc_mutex_rank_exclusive)) {
return true;
}
background_thread_info = (background_thread_info_t *)base_alloc(tsdn,
b0get(), opt_max_background_threads *
sizeof(background_thread_info_t), CACHELINE);
if (background_thread_info == NULL) {
return true;
}
for (unsigned i = 0; i < max_background_threads; i++) {
background_thread_info_t *info = &background_thread_info[i];
/* Thread mutex is rank_inclusive because of thread0. */
if (malloc_mutex_init(&info->mtx, "background_thread",
WITNESS_RANK_BACKGROUND_THREAD,
malloc_mutex_address_ordered)) {
return true;
}
if (pthread_cond_init(&info->cond, NULL)) {
return true;
}
malloc_mutex_lock(tsdn, &info->mtx);
info->state = background_thread_stopped;
background_thread_info_init(tsdn, info);
malloc_mutex_unlock(tsdn, &info->mtx);
}
#endif
return false;
}
#define JEMALLOC_BASE_C_ #define JEMALLOC_BASE_C_
#include "jemalloc/internal/jemalloc_internal.h" #include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/extent_mmap.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/sz.h"
/******************************************************************************/ /******************************************************************************/
/* Data. */ /* Data. */
static malloc_mutex_t base_mtx; static base_t *b0;
static extent_tree_t base_avail_szad;
static extent_node_t *base_nodes; metadata_thp_mode_t opt_metadata_thp = METADATA_THP_DEFAULT;
static size_t base_allocated;
static size_t base_resident; const char *metadata_thp_mode_names[] = {
static size_t base_mapped; "disabled",
"auto",
"always"
};
/******************************************************************************/ /******************************************************************************/
/* base_mtx must be held. */ static inline bool
static extent_node_t * metadata_thp_madvise(void) {
base_node_try_alloc(void) return (metadata_thp_enabled() &&
{ (init_system_thp_mode == thp_mode_default));
extent_node_t *node; }
static void *
base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size) {
void *addr;
bool zero = true;
bool commit = true;
if (base_nodes == NULL) /* Use huge page sizes and alignment regardless of opt_metadata_thp. */
return (NULL); assert(size == HUGEPAGE_CEILING(size));
node = base_nodes; size_t alignment = HUGEPAGE;
base_nodes = *(extent_node_t **)node; if (extent_hooks == &extent_hooks_default) {
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t)); addr = extent_alloc_mmap(NULL, size, alignment, &zero, &commit);
return (node); } else {
/* No arena context as we are creating new arenas. */
tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
pre_reentrancy(tsd, NULL);
addr = extent_hooks->alloc(extent_hooks, NULL, size, alignment,
&zero, &commit, ind);
post_reentrancy(tsd);
}
return addr;
} }
/* base_mtx must be held. */
static void static void
base_node_dalloc(extent_node_t *node) base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr,
{ size_t size) {
/*
* Cascade through dalloc, decommit, purge_forced, and purge_lazy,
* stopping at first success. This cascade is performed for consistency
* with the cascade in extent_dalloc_wrapper() because an application's
* custom hooks may not support e.g. dalloc. This function is only ever
* called as a side effect of arena destruction, so although it might
* seem pointless to do anything besides dalloc here, the application
* may in fact want the end state of all associated virtual memory to be
* in some consistent-but-allocated state.
*/
if (extent_hooks == &extent_hooks_default) {
if (!extent_dalloc_mmap(addr, size)) {
goto label_done;
}
if (!pages_decommit(addr, size)) {
goto label_done;
}
if (!pages_purge_forced(addr, size)) {
goto label_done;
}
if (!pages_purge_lazy(addr, size)) {
goto label_done;
}
/* Nothing worked. This should never happen. */
not_reached();
} else {
tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
pre_reentrancy(tsd, NULL);
if (extent_hooks->dalloc != NULL &&
!extent_hooks->dalloc(extent_hooks, addr, size, true,
ind)) {
goto label_post_reentrancy;
}
if (extent_hooks->decommit != NULL &&
!extent_hooks->decommit(extent_hooks, addr, size, 0, size,
ind)) {
goto label_post_reentrancy;
}
if (extent_hooks->purge_forced != NULL &&
!extent_hooks->purge_forced(extent_hooks, addr, size, 0,
size, ind)) {
goto label_post_reentrancy;
}
if (extent_hooks->purge_lazy != NULL &&
!extent_hooks->purge_lazy(extent_hooks, addr, size, 0, size,
ind)) {
goto label_post_reentrancy;
}
/* Nothing worked. That's the application's problem. */
label_post_reentrancy:
post_reentrancy(tsd);
}
label_done:
if (metadata_thp_madvise()) {
/* Set NOHUGEPAGE after unmap to avoid kernel defrag. */
assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 &&
(size & HUGEPAGE_MASK) == 0);
pages_nohuge(addr, size);
}
}
static void
base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr,
size_t size) {
size_t sn;
sn = *extent_sn_next;
(*extent_sn_next)++;
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t)); extent_binit(extent, addr, size, sn);
*(extent_node_t **)node = base_nodes;
base_nodes = node;
} }
/* base_mtx must be held. */ static size_t
static extent_node_t * base_get_num_blocks(base_t *base, bool with_new_block) {
base_chunk_alloc(size_t minsize) base_block_t *b = base->blocks;
{ assert(b != NULL);
extent_node_t *node;
size_t csize, nsize; size_t n_blocks = with_new_block ? 2 : 1;
void *addr; while (b->next != NULL) {
n_blocks++;
b = b->next;
}
assert(minsize != 0); return n_blocks;
node = base_node_try_alloc(); }
/* Allocate enough space to also carve a node out if necessary. */
nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0; static void
csize = CHUNK_CEILING(minsize + nsize); base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
addr = chunk_alloc_base(csize); assert(opt_metadata_thp == metadata_thp_auto);
if (addr == NULL) { malloc_mutex_assert_owner(tsdn, &base->mtx);
if (node != NULL) if (base->auto_thp_switched) {
base_node_dalloc(node); return;
return (NULL); }
} /* Called when adding a new block. */
base_mapped += csize; bool should_switch;
if (node == NULL) { if (base_ind_get(base) != 0) {
node = (extent_node_t *)addr; should_switch = (base_get_num_blocks(base, true) ==
addr = (void *)((uintptr_t)addr + nsize); BASE_AUTO_THP_THRESHOLD);
csize -= nsize; } else {
should_switch = (base_get_num_blocks(base, true) ==
BASE_AUTO_THP_THRESHOLD_A0);
}
if (!should_switch) {
return;
}
base->auto_thp_switched = true;
assert(!config_stats || base->n_thp == 0);
/* Make the initial blocks THP lazily. */
base_block_t *block = base->blocks;
while (block != NULL) {
assert((block->size & HUGEPAGE_MASK) == 0);
pages_huge(block, block->size);
if (config_stats) { if (config_stats) {
base_allocated += nsize; base->n_thp += HUGEPAGE_CEILING(block->size -
base_resident += PAGE_CEILING(nsize); extent_bsize_get(&block->extent)) >> LG_HUGEPAGE;
}
block = block->next;
assert(block == NULL || (base_ind_get(base) == 0));
}
}
static void *
base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size,
size_t alignment) {
void *ret;
assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM));
assert(size == ALIGNMENT_CEILING(size, alignment));
*gap_size = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent),
alignment) - (uintptr_t)extent_addr_get(extent);
ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size);
assert(extent_bsize_get(extent) >= *gap_size + size);
extent_binit(extent, (void *)((uintptr_t)extent_addr_get(extent) +
*gap_size + size), extent_bsize_get(extent) - *gap_size - size,
extent_sn_get(extent));
return ret;
}
static void
base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size,
void *addr, size_t size) {
if (extent_bsize_get(extent) > 0) {
/*
* Compute the index for the largest size class that does not
* exceed extent's size.
*/
szind_t index_floor =
sz_size2index(extent_bsize_get(extent) + 1) - 1;
extent_heap_insert(&base->avail[index_floor], extent);
}
if (config_stats) {
base->allocated += size;
/*
* Add one PAGE to base_resident for every page boundary that is
* crossed by the new allocation. Adjust n_thp similarly when
* metadata_thp is enabled.
*/
base->resident += PAGE_CEILING((uintptr_t)addr + size) -
PAGE_CEILING((uintptr_t)addr - gap_size);
assert(base->allocated <= base->resident);
assert(base->resident <= base->mapped);
if (metadata_thp_madvise() && (opt_metadata_thp ==
metadata_thp_always || base->auto_thp_switched)) {
base->n_thp += (HUGEPAGE_CEILING((uintptr_t)addr + size)
- HUGEPAGE_CEILING((uintptr_t)addr - gap_size)) >>
LG_HUGEPAGE;
assert(base->mapped >= base->n_thp << LG_HUGEPAGE);
}
}
}
static void *
base_extent_bump_alloc(base_t *base, extent_t *extent, size_t size,
size_t alignment) {
void *ret;
size_t gap_size;
ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment);
base_extent_bump_alloc_post(base, extent, gap_size, ret, size);
return ret;
}
/*
* Allocate a block of virtual memory that is large enough to start with a
* base_block_t header, followed by an object of specified size and alignment.
* On success a pointer to the initialized base_block_t header is returned.
*/
static base_block_t *
base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks,
unsigned ind, pszind_t *pind_last, size_t *extent_sn_next, size_t size,
size_t alignment) {
alignment = ALIGNMENT_CEILING(alignment, QUANTUM);
size_t usize = ALIGNMENT_CEILING(size, alignment);
size_t header_size = sizeof(base_block_t);
size_t gap_size = ALIGNMENT_CEILING(header_size, alignment) -
header_size;
/*
* Create increasingly larger blocks in order to limit the total number
* of disjoint virtual memory ranges. Choose the next size in the page
* size class series (skipping size classes that are not a multiple of
* HUGEPAGE), or a size large enough to satisfy the requested size and
* alignment, whichever is larger.
*/
size_t min_block_size = HUGEPAGE_CEILING(sz_psz2u(header_size + gap_size
+ usize));
pszind_t pind_next = (*pind_last + 1 < NPSIZES) ? *pind_last + 1 :
*pind_last;
size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next));
size_t block_size = (min_block_size > next_block_size) ? min_block_size
: next_block_size;
base_block_t *block = (base_block_t *)base_map(tsdn, extent_hooks, ind,
block_size);
if (block == NULL) {
return NULL;
}
if (metadata_thp_madvise()) {
void *addr = (void *)block;
assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 &&
(block_size & HUGEPAGE_MASK) == 0);
if (opt_metadata_thp == metadata_thp_always) {
pages_huge(addr, block_size);
} else if (opt_metadata_thp == metadata_thp_auto &&
base != NULL) {
/* base != NULL indicates this is not a new base. */
malloc_mutex_lock(tsdn, &base->mtx);
base_auto_thp_switch(tsdn, base);
if (base->auto_thp_switched) {
pages_huge(addr, block_size);
}
malloc_mutex_unlock(tsdn, &base->mtx);
} }
} }
extent_node_init(node, NULL, addr, csize, true, true);
return (node); *pind_last = sz_psz2ind(block_size);
block->size = block_size;
block->next = NULL;
assert(block_size >= header_size);
base_extent_init(extent_sn_next, &block->extent,
(void *)((uintptr_t)block + header_size), block_size - header_size);
return block;
} }
/* /*
* base_alloc() guarantees demand-zeroed memory, in order to make multi-page * Allocate an extent that is at least as large as specified size, with
* sparse data structures such as radix tree nodes efficient with respect to * specified alignment.
* physical memory usage.
*/ */
void * static extent_t *
base_alloc(size_t size) base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
{ malloc_mutex_assert_owner(tsdn, &base->mtx);
void *ret;
size_t csize, usize;
extent_node_t *node;
extent_node_t key;
extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
/* /*
* Round size up to nearest multiple of the cacheline size, so that * Drop mutex during base_block_alloc(), because an extent hook will be
* there is no chance of false cache line sharing. * called.
*/ */
csize = CACHELINE_CEILING(size); malloc_mutex_unlock(tsdn, &base->mtx);
base_block_t *block = base_block_alloc(tsdn, base, extent_hooks,
usize = s2u(csize); base_ind_get(base), &base->pind_last, &base->extent_sn_next, size,
extent_node_init(&key, NULL, NULL, usize, false, false); alignment);
malloc_mutex_lock(&base_mtx); malloc_mutex_lock(tsdn, &base->mtx);
node = extent_tree_szad_nsearch(&base_avail_szad, &key); if (block == NULL) {
if (node != NULL) { return NULL;
/* Use existing space. */
extent_tree_szad_remove(&base_avail_szad, node);
} else {
/* Try to allocate more space. */
node = base_chunk_alloc(csize);
} }
if (node == NULL) { block->next = base->blocks;
ret = NULL; base->blocks = block;
goto label_return; if (config_stats) {
base->allocated += sizeof(base_block_t);
base->resident += PAGE_CEILING(sizeof(base_block_t));
base->mapped += block->size;
if (metadata_thp_madvise() &&
!(opt_metadata_thp == metadata_thp_auto
&& !base->auto_thp_switched)) {
assert(base->n_thp > 0);
base->n_thp += HUGEPAGE_CEILING(sizeof(base_block_t)) >>
LG_HUGEPAGE;
}
assert(base->allocated <= base->resident);
assert(base->resident <= base->mapped);
assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
}
return &block->extent;
}
base_t *
b0get(void) {
return b0;
}
base_t *
base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
pszind_t pind_last = 0;
size_t extent_sn_next = 0;
base_block_t *block = base_block_alloc(tsdn, NULL, extent_hooks, ind,
&pind_last, &extent_sn_next, sizeof(base_t), QUANTUM);
if (block == NULL) {
return NULL;
} }
ret = extent_node_addr_get(node); size_t gap_size;
if (extent_node_size_get(node) > csize) { size_t base_alignment = CACHELINE;
extent_node_addr_set(node, (void *)((uintptr_t)ret + csize)); size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment);
extent_node_size_set(node, extent_node_size_get(node) - csize); base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->extent,
extent_tree_szad_insert(&base_avail_szad, node); &gap_size, base_size, base_alignment);
} else base->ind = ind;
base_node_dalloc(node); atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELAXED);
if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE,
malloc_mutex_rank_exclusive)) {
base_unmap(tsdn, extent_hooks, ind, block, block->size);
return NULL;
}
base->pind_last = pind_last;
base->extent_sn_next = extent_sn_next;
base->blocks = block;
base->auto_thp_switched = false;
for (szind_t i = 0; i < NSIZES; i++) {
extent_heap_new(&base->avail[i]);
}
if (config_stats) { if (config_stats) {
base_allocated += csize; base->allocated = sizeof(base_block_t);
/* base->resident = PAGE_CEILING(sizeof(base_block_t));
* Add one PAGE to base_resident for every page boundary that is base->mapped = block->size;
* crossed by the new allocation. base->n_thp = (opt_metadata_thp == metadata_thp_always) &&
*/ metadata_thp_madvise() ? HUGEPAGE_CEILING(sizeof(base_block_t))
base_resident += PAGE_CEILING((uintptr_t)ret + csize) - >> LG_HUGEPAGE : 0;
PAGE_CEILING((uintptr_t)ret); assert(base->allocated <= base->resident);
assert(base->resident <= base->mapped);
assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
} }
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize); base_extent_bump_alloc_post(base, &block->extent, gap_size, base,
label_return: base_size);
malloc_mutex_unlock(&base_mtx);
return (ret); return base;
} }
void void
base_stats_get(size_t *allocated, size_t *resident, size_t *mapped) base_delete(tsdn_t *tsdn, base_t *base) {
{ extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
base_block_t *next = base->blocks;
do {
base_block_t *block = next;
next = block->next;
base_unmap(tsdn, extent_hooks, base_ind_get(base), block,
block->size);
} while (next != NULL);
}
malloc_mutex_lock(&base_mtx); extent_hooks_t *
assert(base_allocated <= base_resident); base_extent_hooks_get(base_t *base) {
assert(base_resident <= base_mapped); return (extent_hooks_t *)atomic_load_p(&base->extent_hooks,
*allocated = base_allocated; ATOMIC_ACQUIRE);
*resident = base_resident;
*mapped = base_mapped;
malloc_mutex_unlock(&base_mtx);
} }
bool extent_hooks_t *
base_boot(void) base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) {
{ extent_hooks_t *old_extent_hooks = base_extent_hooks_get(base);
atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELEASE);
return old_extent_hooks;
}
if (malloc_mutex_init(&base_mtx)) static void *
return (true); base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment,
extent_tree_szad_new(&base_avail_szad); size_t *esn) {
base_nodes = NULL; alignment = QUANTUM_CEILING(alignment);
size_t usize = ALIGNMENT_CEILING(size, alignment);
size_t asize = usize + alignment - QUANTUM;
extent_t *extent = NULL;
malloc_mutex_lock(tsdn, &base->mtx);
for (szind_t i = sz_size2index(asize); i < NSIZES; i++) {
extent = extent_heap_remove_first(&base->avail[i]);
if (extent != NULL) {
/* Use existing space. */
break;
}
}
if (extent == NULL) {
/* Try to allocate more space. */
extent = base_extent_alloc(tsdn, base, usize, alignment);
}
void *ret;
if (extent == NULL) {
ret = NULL;
goto label_return;
}
return (false); ret = base_extent_bump_alloc(base, extent, usize, alignment);
if (esn != NULL) {
*esn = extent_sn_get(extent);
}
label_return:
malloc_mutex_unlock(tsdn, &base->mtx);
return ret;
}
/*
* base_alloc() returns zeroed memory, which is always demand-zeroed for the
* auto arenas, in order to make multi-page sparse data structures such as radix
* tree nodes efficient with respect to physical memory usage. Upon success a
* pointer to at least size bytes with specified alignment is returned. Note
* that size is rounded up to the nearest multiple of alignment to avoid false
* sharing.
*/
void *
base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
return base_alloc_impl(tsdn, base, size, alignment, NULL);
}
extent_t *
base_alloc_extent(tsdn_t *tsdn, base_t *base) {
size_t esn;
extent_t *extent = base_alloc_impl(tsdn, base, sizeof(extent_t),
CACHELINE, &esn);
if (extent == NULL) {
return NULL;
}
extent_esn_set(extent, esn);
return extent;
} }
void void
base_prefork(void) base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident,
{ size_t *mapped, size_t *n_thp) {
cassert(config_stats);
malloc_mutex_prefork(&base_mtx); malloc_mutex_lock(tsdn, &base->mtx);
assert(base->allocated <= base->resident);
assert(base->resident <= base->mapped);
*allocated = base->allocated;
*resident = base->resident;
*mapped = base->mapped;
*n_thp = base->n_thp;
malloc_mutex_unlock(tsdn, &base->mtx);
} }
void void
base_postfork_parent(void) base_prefork(tsdn_t *tsdn, base_t *base) {
{ malloc_mutex_prefork(tsdn, &base->mtx);
}
malloc_mutex_postfork_parent(&base_mtx); void
base_postfork_parent(tsdn_t *tsdn, base_t *base) {
malloc_mutex_postfork_parent(tsdn, &base->mtx);
} }
void void
base_postfork_child(void) base_postfork_child(tsdn_t *tsdn, base_t *base) {
{ malloc_mutex_postfork_child(tsdn, &base->mtx);
}
malloc_mutex_postfork_child(&base_mtx); bool
base_boot(tsdn_t *tsdn) {
b0 = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default);
return (b0 == NULL);
} }
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/bin.h"
#include "jemalloc/internal/witness.h"
const bin_info_t bin_infos[NBINS] = {
#define BIN_INFO_bin_yes(reg_size, slab_size, nregs) \
{reg_size, slab_size, nregs, BITMAP_INFO_INITIALIZER(nregs)},
#define BIN_INFO_bin_no(reg_size, slab_size, nregs)
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \
lg_delta_lookup) \
BIN_INFO_bin_##bin((1U<<lg_grp) + (ndelta<<lg_delta), \
(pgs << LG_PAGE), (pgs << LG_PAGE) / ((1U<<lg_grp) + \
(ndelta<<lg_delta)))
SIZE_CLASSES
#undef BIN_INFO_bin_yes
#undef BIN_INFO_bin_no
#undef SC
};
bool
bin_init(bin_t *bin) {
if (malloc_mutex_init(&bin->lock, "bin", WITNESS_RANK_BIN,
malloc_mutex_rank_exclusive)) {
return true;
}
bin->slabcur = NULL;
extent_heap_new(&bin->slabs_nonfull);
extent_list_init(&bin->slabs_full);
if (config_stats) {
memset(&bin->stats, 0, sizeof(bin_stats_t));
}
return false;
}
void
bin_prefork(tsdn_t *tsdn, bin_t *bin) {
malloc_mutex_prefork(tsdn, &bin->lock);
}
void
bin_postfork_parent(tsdn_t *tsdn, bin_t *bin) {
malloc_mutex_postfork_parent(tsdn, &bin->lock);
}
void
bin_postfork_child(tsdn_t *tsdn, bin_t *bin) {
malloc_mutex_postfork_child(tsdn, &bin->lock);
}
#define JEMALLOC_BITMAP_C_ #define JEMALLOC_BITMAP_C_
#include "jemalloc/internal/jemalloc_internal.h" #include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
/******************************************************************************/ /******************************************************************************/
#ifdef BITMAP_USE_TREE
void void
bitmap_info_init(bitmap_info_t *binfo, size_t nbits) bitmap_info_init(bitmap_info_t *binfo, size_t nbits) {
{
unsigned i; unsigned i;
size_t group_count; size_t group_count;
...@@ -32,47 +36,86 @@ bitmap_info_init(bitmap_info_t *binfo, size_t nbits) ...@@ -32,47 +36,86 @@ bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
binfo->nbits = nbits; binfo->nbits = nbits;
} }
size_t static size_t
bitmap_info_ngroups(const bitmap_info_t *binfo) bitmap_info_ngroups(const bitmap_info_t *binfo) {
{ return binfo->levels[binfo->nlevels].group_offset;
return (binfo->levels[binfo->nlevels].group_offset << LG_SIZEOF_BITMAP);
}
size_t
bitmap_size(size_t nbits)
{
bitmap_info_t binfo;
bitmap_info_init(&binfo, nbits);
return (bitmap_info_ngroups(&binfo));
} }
void void
bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo) bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) {
{
size_t extra; size_t extra;
unsigned i; unsigned i;
/* /*
* Bits are actually inverted with regard to the external bitmap * Bits are actually inverted with regard to the external bitmap
* interface, so the bitmap starts out with all 1 bits, except for * interface.
* trailing unused bits (if any). Note that each group uses bit 0 to
* correspond to the first logical bit in the group, so extra bits
* are the most significant bits of the last group.
*/ */
memset(bitmap, 0xffU, binfo->levels[binfo->nlevels].group_offset <<
LG_SIZEOF_BITMAP); if (fill) {
/* The "filled" bitmap starts out with all 0 bits. */
memset(bitmap, 0, bitmap_size(binfo));
return;
}
/*
* The "empty" bitmap starts out with all 1 bits, except for trailing
* unused bits (if any). Note that each group uses bit 0 to correspond
* to the first logical bit in the group, so extra bits are the most
* significant bits of the last group.
*/
memset(bitmap, 0xffU, bitmap_size(binfo));
extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK)) extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
& BITMAP_GROUP_NBITS_MASK; & BITMAP_GROUP_NBITS_MASK;
if (extra != 0) if (extra != 0) {
bitmap[binfo->levels[1].group_offset - 1] >>= extra; bitmap[binfo->levels[1].group_offset - 1] >>= extra;
}
for (i = 1; i < binfo->nlevels; i++) { for (i = 1; i < binfo->nlevels; i++) {
size_t group_count = binfo->levels[i].group_offset - size_t group_count = binfo->levels[i].group_offset -
binfo->levels[i-1].group_offset; binfo->levels[i-1].group_offset;
extra = (BITMAP_GROUP_NBITS - (group_count & extra = (BITMAP_GROUP_NBITS - (group_count &
BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK; BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK;
if (extra != 0) if (extra != 0) {
bitmap[binfo->levels[i+1].group_offset - 1] >>= extra; bitmap[binfo->levels[i+1].group_offset - 1] >>= extra;
}
}
}
#else /* BITMAP_USE_TREE */
void
bitmap_info_init(bitmap_info_t *binfo, size_t nbits) {
assert(nbits > 0);
assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
binfo->ngroups = BITMAP_BITS2GROUPS(nbits);
binfo->nbits = nbits;
}
static size_t
bitmap_info_ngroups(const bitmap_info_t *binfo) {
return binfo->ngroups;
}
void
bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) {
size_t extra;
if (fill) {
memset(bitmap, 0, bitmap_size(binfo));
return;
}
memset(bitmap, 0xffU, bitmap_size(binfo));
extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
& BITMAP_GROUP_NBITS_MASK;
if (extra != 0) {
bitmap[binfo->ngroups - 1] >>= extra;
} }
} }
#endif /* BITMAP_USE_TREE */
size_t
bitmap_size(const bitmap_info_t *binfo) {
return (bitmap_info_ngroups(binfo) << LG_SIZEOF_BITMAP);
}
#define JEMALLOC_CHUNK_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Data. */
const char *opt_dss = DSS_DEFAULT;
size_t opt_lg_chunk = 0;
/* Used exclusively for gdump triggering. */
static size_t curchunks;
static size_t highchunks;
rtree_t chunks_rtree;
/* Various chunk-related settings. */
size_t chunksize;
size_t chunksize_mask; /* (chunksize - 1). */
size_t chunk_npages;
static void *chunk_alloc_default(void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
static bool chunk_dalloc_default(void *chunk, size_t size, bool committed,
unsigned arena_ind);
static bool chunk_commit_default(void *chunk, size_t size, size_t offset,
size_t length, unsigned arena_ind);
static bool chunk_decommit_default(void *chunk, size_t size, size_t offset,
size_t length, unsigned arena_ind);
static bool chunk_purge_default(void *chunk, size_t size, size_t offset,
size_t length, unsigned arena_ind);
static bool chunk_split_default(void *chunk, size_t size, size_t size_a,
size_t size_b, bool committed, unsigned arena_ind);
static bool chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b,
size_t size_b, bool committed, unsigned arena_ind);
const chunk_hooks_t chunk_hooks_default = {
chunk_alloc_default,
chunk_dalloc_default,
chunk_commit_default,
chunk_decommit_default,
chunk_purge_default,
chunk_split_default,
chunk_merge_default
};
/******************************************************************************/
/*
* Function prototypes for static functions that are referenced prior to
* definition.
*/
static void chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
void *chunk, size_t size, bool zeroed, bool committed);
/******************************************************************************/
static chunk_hooks_t
chunk_hooks_get_locked(arena_t *arena)
{
return (arena->chunk_hooks);
}
chunk_hooks_t
chunk_hooks_get(arena_t *arena)
{
chunk_hooks_t chunk_hooks;
malloc_mutex_lock(&arena->chunks_mtx);
chunk_hooks = chunk_hooks_get_locked(arena);
malloc_mutex_unlock(&arena->chunks_mtx);
return (chunk_hooks);
}
chunk_hooks_t
chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks)
{
chunk_hooks_t old_chunk_hooks;
malloc_mutex_lock(&arena->chunks_mtx);
old_chunk_hooks = arena->chunk_hooks;
/*
* Copy each field atomically so that it is impossible for readers to
* see partially updated pointers. There are places where readers only
* need one hook function pointer (therefore no need to copy the
* entirety of arena->chunk_hooks), and stale reads do not affect
* correctness, so they perform unlocked reads.
*/
#define ATOMIC_COPY_HOOK(n) do { \
union { \
chunk_##n##_t **n; \
void **v; \
} u; \
u.n = &arena->chunk_hooks.n; \
atomic_write_p(u.v, chunk_hooks->n); \
} while (0)
ATOMIC_COPY_HOOK(alloc);
ATOMIC_COPY_HOOK(dalloc);
ATOMIC_COPY_HOOK(commit);
ATOMIC_COPY_HOOK(decommit);
ATOMIC_COPY_HOOK(purge);
ATOMIC_COPY_HOOK(split);
ATOMIC_COPY_HOOK(merge);
#undef ATOMIC_COPY_HOOK
malloc_mutex_unlock(&arena->chunks_mtx);
return (old_chunk_hooks);
}
static void
chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks,
bool locked)
{
static const chunk_hooks_t uninitialized_hooks =
CHUNK_HOOKS_INITIALIZER;
if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) ==
0) {
*chunk_hooks = locked ? chunk_hooks_get_locked(arena) :
chunk_hooks_get(arena);
}
}
static void
chunk_hooks_assure_initialized_locked(arena_t *arena,
chunk_hooks_t *chunk_hooks)
{
chunk_hooks_assure_initialized_impl(arena, chunk_hooks, true);
}
static void
chunk_hooks_assure_initialized(arena_t *arena, chunk_hooks_t *chunk_hooks)
{
chunk_hooks_assure_initialized_impl(arena, chunk_hooks, false);
}
bool
chunk_register(const void *chunk, const extent_node_t *node)
{
assert(extent_node_addr_get(node) == chunk);
if (rtree_set(&chunks_rtree, (uintptr_t)chunk, node))
return (true);
if (config_prof && opt_prof) {
size_t size = extent_node_size_get(node);
size_t nadd = (size == 0) ? 1 : size / chunksize;
size_t cur = atomic_add_z(&curchunks, nadd);
size_t high = atomic_read_z(&highchunks);
while (cur > high && atomic_cas_z(&highchunks, high, cur)) {
/*
* Don't refresh cur, because it may have decreased
* since this thread lost the highchunks update race.
*/
high = atomic_read_z(&highchunks);
}
if (cur > high && prof_gdump_get_unlocked())
prof_gdump();
}
return (false);
}
void
chunk_deregister(const void *chunk, const extent_node_t *node)
{
bool err;
err = rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL);
assert(!err);
if (config_prof && opt_prof) {
size_t size = extent_node_size_get(node);
size_t nsub = (size == 0) ? 1 : size / chunksize;
assert(atomic_read_z(&curchunks) >= nsub);
atomic_sub_z(&curchunks, nsub);
}
}
/*
* Do first-best-fit chunk selection, i.e. select the lowest chunk that best
* fits.
*/
static extent_node_t *
chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad,
extent_tree_t *chunks_ad, size_t size)
{
extent_node_t key;
assert(size == CHUNK_CEILING(size));
extent_node_init(&key, arena, NULL, size, false, false);
return (extent_tree_szad_nsearch(chunks_szad, &key));
}
static void *
chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
bool dalloc_node)
{
void *ret;
extent_node_t *node;
size_t alloc_size, leadsize, trailsize;
bool zeroed, committed;
assert(new_addr == NULL || alignment == chunksize);
/*
* Cached chunks use the node linkage embedded in their headers, in
* which case dalloc_node is true, and new_addr is non-NULL because
* we're operating on a specific chunk.
*/
assert(dalloc_node || new_addr != NULL);
alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize));
/* Beware size_t wrap-around. */
if (alloc_size < size)
return (NULL);
malloc_mutex_lock(&arena->chunks_mtx);
chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
if (new_addr != NULL) {
extent_node_t key;
extent_node_init(&key, arena, new_addr, alloc_size, false,
false);
node = extent_tree_ad_search(chunks_ad, &key);
} else {
node = chunk_first_best_fit(arena, chunks_szad, chunks_ad,
alloc_size);
}
if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
size)) {
malloc_mutex_unlock(&arena->chunks_mtx);
return (NULL);
}
leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node),
alignment) - (uintptr_t)extent_node_addr_get(node);
assert(new_addr == NULL || leadsize == 0);
assert(extent_node_size_get(node) >= leadsize + size);
trailsize = extent_node_size_get(node) - leadsize - size;
ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize);
zeroed = extent_node_zeroed_get(node);
if (zeroed)
*zero = true;
committed = extent_node_committed_get(node);
if (committed)
*commit = true;
/* Split the lead. */
if (leadsize != 0 &&
chunk_hooks->split(extent_node_addr_get(node),
extent_node_size_get(node), leadsize, size, false, arena->ind)) {
malloc_mutex_unlock(&arena->chunks_mtx);
return (NULL);
}
/* Remove node from the tree. */
extent_tree_szad_remove(chunks_szad, node);
extent_tree_ad_remove(chunks_ad, node);
arena_chunk_cache_maybe_remove(arena, node, cache);
if (leadsize != 0) {
/* Insert the leading space as a smaller chunk. */
extent_node_size_set(node, leadsize);
extent_tree_szad_insert(chunks_szad, node);
extent_tree_ad_insert(chunks_ad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
node = NULL;
}
if (trailsize != 0) {
/* Split the trail. */
if (chunk_hooks->split(ret, size + trailsize, size,
trailsize, false, arena->ind)) {
if (dalloc_node && node != NULL)
arena_node_dalloc(arena, node);
malloc_mutex_unlock(&arena->chunks_mtx);
chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad,
cache, ret, size + trailsize, zeroed, committed);
return (NULL);
}
/* Insert the trailing space as a smaller chunk. */
if (node == NULL) {
node = arena_node_alloc(arena);
if (node == NULL) {
malloc_mutex_unlock(&arena->chunks_mtx);
chunk_record(arena, chunk_hooks, chunks_szad,
chunks_ad, cache, ret, size + trailsize,
zeroed, committed);
return (NULL);
}
}
extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size),
trailsize, zeroed, committed);
extent_tree_szad_insert(chunks_szad, node);
extent_tree_ad_insert(chunks_ad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
node = NULL;
}
if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
malloc_mutex_unlock(&arena->chunks_mtx);
chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, cache,
ret, size, zeroed, committed);
return (NULL);
}
malloc_mutex_unlock(&arena->chunks_mtx);
assert(dalloc_node || node != NULL);
if (dalloc_node && node != NULL)
arena_node_dalloc(arena, node);
if (*zero) {
if (!zeroed)
memset(ret, 0, size);
else if (config_debug) {
size_t i;
size_t *p = (size_t *)(uintptr_t)ret;
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size);
for (i = 0; i < size / sizeof(size_t); i++)
assert(p[i] == 0);
}
}
return (ret);
}
/*
* If the caller specifies (!*zero), it is still possible to receive zeroed
* memory, in which case *zero is toggled to true. arena_chunk_alloc() takes
* advantage of this to avoid demanding zeroed chunks, but taking advantage of
* them if they are returned.
*/
static void *
chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
bool *zero, bool *commit, dss_prec_t dss_prec)
{
void *ret;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
assert(size != 0);
assert((size & chunksize_mask) == 0);
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
/* Retained. */
if ((ret = chunk_recycle(arena, &chunk_hooks,
&arena->chunks_szad_retained, &arena->chunks_ad_retained, false,
new_addr, size, alignment, zero, commit, true)) != NULL)
return (ret);
/* "primary" dss. */
if (have_dss && dss_prec == dss_prec_primary && (ret =
chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
NULL)
return (ret);
/*
* mmap. Requesting an address is not implemented for
* chunk_alloc_mmap(), so only call it if (new_addr == NULL).
*/
if (new_addr == NULL && (ret = chunk_alloc_mmap(size, alignment, zero,
commit)) != NULL)
return (ret);
/* "secondary" dss. */
if (have_dss && dss_prec == dss_prec_secondary && (ret =
chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
NULL)
return (ret);
/* All strategies for allocation failed. */
return (NULL);
}
void *
chunk_alloc_base(size_t size)
{
void *ret;
bool zero, commit;
/*
* Directly call chunk_alloc_mmap() rather than chunk_alloc_core()
* because it's critical that chunk_alloc_base() return untouched
* demand-zeroed virtual memory.
*/
zero = true;
commit = true;
ret = chunk_alloc_mmap(size, chunksize, &zero, &commit);
if (ret == NULL)
return (NULL);
if (config_valgrind)
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
return (ret);
}
void *
chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
size_t size, size_t alignment, bool *zero, bool dalloc_node)
{
void *ret;
bool commit;
assert(size != 0);
assert((size & chunksize_mask) == 0);
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
commit = true;
ret = chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_cached,
&arena->chunks_ad_cached, true, new_addr, size, alignment, zero,
&commit, dalloc_node);
if (ret == NULL)
return (NULL);
assert(commit);
if (config_valgrind)
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
return (ret);
}
static arena_t *
chunk_arena_get(unsigned arena_ind)
{
arena_t *arena;
/* Dodge tsd for a0 in order to avoid bootstrapping issues. */
arena = (arena_ind == 0) ? a0get() : arena_get(tsd_fetch(), arena_ind,
false, true);
/*
* The arena we're allocating on behalf of must have been initialized
* already.
*/
assert(arena != NULL);
return (arena);
}
static void *
chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
bool *commit, unsigned arena_ind)
{
void *ret;
arena_t *arena;
arena = chunk_arena_get(arena_ind);
ret = chunk_alloc_core(arena, new_addr, size, alignment, zero,
commit, arena->dss_prec);
if (ret == NULL)
return (NULL);
if (config_valgrind)
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
return (ret);
}
void *
chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
size_t size, size_t alignment, bool *zero, bool *commit)
{
void *ret;
chunk_hooks_assure_initialized(arena, chunk_hooks);
ret = chunk_hooks->alloc(new_addr, size, alignment, zero, commit,
arena->ind);
if (ret == NULL)
return (NULL);
if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default)
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
return (ret);
}
static void
chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
void *chunk, size_t size, bool zeroed, bool committed)
{
bool unzeroed;
extent_node_t *node, *prev;
extent_node_t key;
assert(!cache || !zeroed);
unzeroed = cache || !zeroed;
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
malloc_mutex_lock(&arena->chunks_mtx);
chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0,
false, false);
node = extent_tree_ad_nsearch(chunks_ad, &key);
/* Try to coalesce forward. */
if (node != NULL && extent_node_addr_get(node) ==
extent_node_addr_get(&key) && extent_node_committed_get(node) ==
committed && !chunk_hooks->merge(chunk, size,
extent_node_addr_get(node), extent_node_size_get(node), false,
arena->ind)) {
/*
* Coalesce chunk with the following address range. This does
* not change the position within chunks_ad, so only
* remove/insert from/into chunks_szad.
*/
extent_tree_szad_remove(chunks_szad, node);
arena_chunk_cache_maybe_remove(arena, node, cache);
extent_node_addr_set(node, chunk);
extent_node_size_set(node, size + extent_node_size_get(node));
extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
!unzeroed);
extent_tree_szad_insert(chunks_szad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
} else {
/* Coalescing forward failed, so insert a new node. */
node = arena_node_alloc(arena);
if (node == NULL) {
/*
* Node allocation failed, which is an exceedingly
* unlikely failure. Leak chunk after making sure its
* pages have already been purged, so that this is only
* a virtual memory leak.
*/
if (cache) {
chunk_purge_wrapper(arena, chunk_hooks, chunk,
size, 0, size);
}
goto label_return;
}
extent_node_init(node, arena, chunk, size, !unzeroed,
committed);
extent_tree_ad_insert(chunks_ad, node);
extent_tree_szad_insert(chunks_szad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
}
/* Try to coalesce backward. */
prev = extent_tree_ad_prev(chunks_ad, node);
if (prev != NULL && (void *)((uintptr_t)extent_node_addr_get(prev) +
extent_node_size_get(prev)) == chunk &&
extent_node_committed_get(prev) == committed &&
!chunk_hooks->merge(extent_node_addr_get(prev),
extent_node_size_get(prev), chunk, size, false, arena->ind)) {
/*
* Coalesce chunk with the previous address range. This does
* not change the position within chunks_ad, so only
* remove/insert node from/into chunks_szad.
*/
extent_tree_szad_remove(chunks_szad, prev);
extent_tree_ad_remove(chunks_ad, prev);
arena_chunk_cache_maybe_remove(arena, prev, cache);
extent_tree_szad_remove(chunks_szad, node);
arena_chunk_cache_maybe_remove(arena, node, cache);
extent_node_addr_set(node, extent_node_addr_get(prev));
extent_node_size_set(node, extent_node_size_get(prev) +
extent_node_size_get(node));
extent_node_zeroed_set(node, extent_node_zeroed_get(prev) &&
extent_node_zeroed_get(node));
extent_tree_szad_insert(chunks_szad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
arena_node_dalloc(arena, prev);
}
label_return:
malloc_mutex_unlock(&arena->chunks_mtx);
}
void
chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
size_t size, bool committed)
{
assert(chunk != NULL);
assert(CHUNK_ADDR2BASE(chunk) == chunk);
assert(size != 0);
assert((size & chunksize_mask) == 0);
chunk_record(arena, chunk_hooks, &arena->chunks_szad_cached,
&arena->chunks_ad_cached, true, chunk, size, false, committed);
arena_maybe_purge(arena);
}
void
chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
size_t size, bool zeroed, bool committed)
{
assert(chunk != NULL);
assert(CHUNK_ADDR2BASE(chunk) == chunk);
assert(size != 0);
assert((size & chunksize_mask) == 0);
chunk_hooks_assure_initialized(arena, chunk_hooks);
/* Try to deallocate. */
if (!chunk_hooks->dalloc(chunk, size, committed, arena->ind))
return;
/* Try to decommit; purge if that fails. */
if (committed) {
committed = chunk_hooks->decommit(chunk, size, 0, size,
arena->ind);
}
zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size,
arena->ind);
chunk_record(arena, chunk_hooks, &arena->chunks_szad_retained,
&arena->chunks_ad_retained, false, chunk, size, zeroed, committed);
}
static bool
chunk_dalloc_default(void *chunk, size_t size, bool committed,
unsigned arena_ind)
{
if (!have_dss || !chunk_in_dss(chunk))
return (chunk_dalloc_mmap(chunk, size));
return (true);
}
void
chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
size_t size, bool committed)
{
chunk_hooks_assure_initialized(arena, chunk_hooks);
chunk_hooks->dalloc(chunk, size, committed, arena->ind);
if (config_valgrind && chunk_hooks->dalloc != chunk_dalloc_default)
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
}
static bool
chunk_commit_default(void *chunk, size_t size, size_t offset, size_t length,
unsigned arena_ind)
{
return (pages_commit((void *)((uintptr_t)chunk + (uintptr_t)offset),
length));
}
static bool
chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length,
unsigned arena_ind)
{
return (pages_decommit((void *)((uintptr_t)chunk + (uintptr_t)offset),
length));
}
bool
chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length)
{
assert(chunk != NULL);
assert(CHUNK_ADDR2BASE(chunk) == chunk);
assert((offset & PAGE_MASK) == 0);
assert(length != 0);
assert((length & PAGE_MASK) == 0);
return (pages_purge((void *)((uintptr_t)chunk + (uintptr_t)offset),
length));
}
static bool
chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
unsigned arena_ind)
{
return (chunk_purge_arena(chunk_arena_get(arena_ind), chunk, offset,
length));
}
bool
chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
size_t size, size_t offset, size_t length)
{
chunk_hooks_assure_initialized(arena, chunk_hooks);
return (chunk_hooks->purge(chunk, size, offset, length, arena->ind));
}
static bool
chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b,
bool committed, unsigned arena_ind)
{
if (!maps_coalesce)
return (true);
return (false);
}
static bool
chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
bool committed, unsigned arena_ind)
{
if (!maps_coalesce)
return (true);
if (have_dss && chunk_in_dss(chunk_a) != chunk_in_dss(chunk_b))
return (true);
return (false);
}
static rtree_node_elm_t *
chunks_rtree_node_alloc(size_t nelms)
{
return ((rtree_node_elm_t *)base_alloc(nelms *
sizeof(rtree_node_elm_t)));
}
bool
chunk_boot(void)
{
#ifdef _WIN32
SYSTEM_INFO info;
GetSystemInfo(&info);
/*
* Verify actual page size is equal to or an integral multiple of
* configured page size.
*/
if (info.dwPageSize & ((1U << LG_PAGE) - 1))
return (true);
/*
* Configure chunksize (if not set) to match granularity (usually 64K),
* so pages_map will always take fast path.
*/
if (!opt_lg_chunk) {
opt_lg_chunk = jemalloc_ffs((int)info.dwAllocationGranularity)
- 1;
}
#else
if (!opt_lg_chunk)
opt_lg_chunk = LG_CHUNK_DEFAULT;
#endif
/* Set variables according to the value of opt_lg_chunk. */
chunksize = (ZU(1) << opt_lg_chunk);
assert(chunksize >= PAGE);
chunksize_mask = chunksize - 1;
chunk_npages = (chunksize >> LG_PAGE);
if (have_dss && chunk_dss_boot())
return (true);
if (rtree_new(&chunks_rtree, (ZU(1) << (LG_SIZEOF_PTR+3)) -
opt_lg_chunk, chunks_rtree_node_alloc, NULL))
return (true);
return (false);
}
void
chunk_prefork(void)
{
chunk_dss_prefork();
}
void
chunk_postfork_parent(void)
{
chunk_dss_postfork_parent();
}
void
chunk_postfork_child(void)
{
chunk_dss_postfork_child();
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment