Unverified Commit fb1f4f4e authored by Wander Hillen's avatar Wander Hillen Committed by GitHub
Browse files

Merge branch 'unstable' into minor-typos

parents dda8cc18 6e98214f
'\" t
.\" Title: JEMALLOC
.\" Author: Jason Evans
.\" Generator: DocBook XSL Stylesheets v1.78.1 <http://docbook.sf.net/>
.\" Date: 09/24/2015
.\" Manual: User Manual
.\" Source: jemalloc 4.0.3-0-ge9192eacf8935e29fc62fddc2701f7942b1cc02c
.\" Language: English
.\"
.TH "JEMALLOC" "3" "09/24/2015" "jemalloc 4.0.3-0-ge9192eacf893" "User Manual"
.\" -----------------------------------------------------------------
.\" * Define some portability stuff
.\" -----------------------------------------------------------------
.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.\" http://bugs.debian.org/507673
.\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html
.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.ie \n(.g .ds Aq \(aq
.el .ds Aq '
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
.\" disable hyphenation
.nh
.\" disable justification (adjust text to left margin only)
.ad l
.\" -----------------------------------------------------------------
.\" * MAIN CONTENT STARTS HERE *
.\" -----------------------------------------------------------------
.SH "NAME"
jemalloc \- general purpose memory allocation functions
.SH "LIBRARY"
.PP
This manual describes jemalloc 4\&.0\&.3\-0\-ge9192eacf8935e29fc62fddc2701f7942b1cc02c\&. More information can be found at the
\m[blue]\fBjemalloc website\fR\m[]\&\s-2\u[1]\d\s+2\&.
.SH "SYNOPSIS"
.sp
.ft B
.nf
#include <jemalloc/jemalloc\&.h>
.fi
.ft
.SS "Standard API"
.HP \w'void\ *malloc('u
.BI "void *malloc(size_t\ " "size" ");"
.HP \w'void\ *calloc('u
.BI "void *calloc(size_t\ " "number" ", size_t\ " "size" ");"
.HP \w'int\ posix_memalign('u
.BI "int posix_memalign(void\ **" "ptr" ", size_t\ " "alignment" ", size_t\ " "size" ");"
.HP \w'void\ *aligned_alloc('u
.BI "void *aligned_alloc(size_t\ " "alignment" ", size_t\ " "size" ");"
.HP \w'void\ *realloc('u
.BI "void *realloc(void\ *" "ptr" ", size_t\ " "size" ");"
.HP \w'void\ free('u
.BI "void free(void\ *" "ptr" ");"
.SS "Non\-standard API"
.HP \w'void\ *mallocx('u
.BI "void *mallocx(size_t\ " "size" ", int\ " "flags" ");"
.HP \w'void\ *rallocx('u
.BI "void *rallocx(void\ *" "ptr" ", size_t\ " "size" ", int\ " "flags" ");"
.HP \w'size_t\ xallocx('u
.BI "size_t xallocx(void\ *" "ptr" ", size_t\ " "size" ", size_t\ " "extra" ", int\ " "flags" ");"
.HP \w'size_t\ sallocx('u
.BI "size_t sallocx(void\ *" "ptr" ", int\ " "flags" ");"
.HP \w'void\ dallocx('u
.BI "void dallocx(void\ *" "ptr" ", int\ " "flags" ");"
.HP \w'void\ sdallocx('u
.BI "void sdallocx(void\ *" "ptr" ", size_t\ " "size" ", int\ " "flags" ");"
.HP \w'size_t\ nallocx('u
.BI "size_t nallocx(size_t\ " "size" ", int\ " "flags" ");"
.HP \w'int\ mallctl('u
.BI "int mallctl(const\ char\ *" "name" ", void\ *" "oldp" ", size_t\ *" "oldlenp" ", void\ *" "newp" ", size_t\ " "newlen" ");"
.HP \w'int\ mallctlnametomib('u
.BI "int mallctlnametomib(const\ char\ *" "name" ", size_t\ *" "mibp" ", size_t\ *" "miblenp" ");"
.HP \w'int\ mallctlbymib('u
.BI "int mallctlbymib(const\ size_t\ *" "mib" ", size_t\ " "miblen" ", void\ *" "oldp" ", size_t\ *" "oldlenp" ", void\ *" "newp" ", size_t\ " "newlen" ");"
.HP \w'void\ malloc_stats_print('u
.BI "void malloc_stats_print(void\ " "(*write_cb)" "\ (void\ *,\ const\ char\ *), void\ *" "cbopaque" ", const\ char\ *" "opts" ");"
.HP \w'size_t\ malloc_usable_size('u
.BI "size_t malloc_usable_size(const\ void\ *" "ptr" ");"
.HP \w'void\ (*malloc_message)('u
.BI "void (*malloc_message)(void\ *" "cbopaque" ", const\ char\ *" "s" ");"
.PP
const char *\fImalloc_conf\fR;
.SH "DESCRIPTION"
.SS "Standard API"
.PP
The
\fBmalloc\fR\fB\fR
function allocates
\fIsize\fR
bytes of uninitialized memory\&. The allocated space is suitably aligned (after possible pointer coercion) for storage of any type of object\&.
.PP
The
\fBcalloc\fR\fB\fR
function allocates space for
\fInumber\fR
objects, each
\fIsize\fR
bytes in length\&. The result is identical to calling
\fBmalloc\fR\fB\fR
with an argument of
\fInumber\fR
*
\fIsize\fR, with the exception that the allocated memory is explicitly initialized to zero bytes\&.
.PP
The
\fBposix_memalign\fR\fB\fR
function allocates
\fIsize\fR
bytes of memory such that the allocation\*(Aqs base address is a multiple of
\fIalignment\fR, and returns the allocation in the value pointed to by
\fIptr\fR\&. The requested
\fIalignment\fR
must be a power of 2 at least as large as
sizeof(\fBvoid *\fR)\&.
.PP
The
\fBaligned_alloc\fR\fB\fR
function allocates
\fIsize\fR
bytes of memory such that the allocation\*(Aqs base address is a multiple of
\fIalignment\fR\&. The requested
\fIalignment\fR
must be a power of 2\&. Behavior is undefined if
\fIsize\fR
is not an integral multiple of
\fIalignment\fR\&.
.PP
The
\fBrealloc\fR\fB\fR
function changes the size of the previously allocated memory referenced by
\fIptr\fR
to
\fIsize\fR
bytes\&. The contents of the memory are unchanged up to the lesser of the new and old sizes\&. If the new size is larger, the contents of the newly allocated portion of the memory are undefined\&. Upon success, the memory referenced by
\fIptr\fR
is freed and a pointer to the newly allocated memory is returned\&. Note that
\fBrealloc\fR\fB\fR
may move the memory allocation, resulting in a different return value than
\fIptr\fR\&. If
\fIptr\fR
is
\fBNULL\fR, the
\fBrealloc\fR\fB\fR
function behaves identically to
\fBmalloc\fR\fB\fR
for the specified size\&.
.PP
The
\fBfree\fR\fB\fR
function causes the allocated memory referenced by
\fIptr\fR
to be made available for future allocations\&. If
\fIptr\fR
is
\fBNULL\fR, no action occurs\&.
.SS "Non\-standard API"
.PP
The
\fBmallocx\fR\fB\fR,
\fBrallocx\fR\fB\fR,
\fBxallocx\fR\fB\fR,
\fBsallocx\fR\fB\fR,
\fBdallocx\fR\fB\fR,
\fBsdallocx\fR\fB\fR, and
\fBnallocx\fR\fB\fR
functions all have a
\fIflags\fR
argument that can be used to specify options\&. The functions only check the options that are contextually relevant\&. Use bitwise or (|) operations to specify one or more of the following:
.PP
\fBMALLOCX_LG_ALIGN(\fR\fB\fIla\fR\fR\fB) \fR
.RS 4
Align the memory allocation to start at an address that is a multiple of
(1 << \fIla\fR)\&. This macro does not validate that
\fIla\fR
is within the valid range\&.
.RE
.PP
\fBMALLOCX_ALIGN(\fR\fB\fIa\fR\fR\fB) \fR
.RS 4
Align the memory allocation to start at an address that is a multiple of
\fIa\fR, where
\fIa\fR
is a power of two\&. This macro does not validate that
\fIa\fR
is a power of 2\&.
.RE
.PP
\fBMALLOCX_ZERO\fR
.RS 4
Initialize newly allocated memory to contain zero bytes\&. In the growing reallocation case, the real size prior to reallocation defines the boundary between untouched bytes and those that are initialized to contain zero bytes\&. If this macro is absent, newly allocated memory is uninitialized\&.
.RE
.PP
\fBMALLOCX_TCACHE(\fR\fB\fItc\fR\fR\fB) \fR
.RS 4
Use the thread\-specific cache (tcache) specified by the identifier
\fItc\fR, which must have been acquired via the
"tcache\&.create"
mallctl\&. This macro does not validate that
\fItc\fR
specifies a valid identifier\&.
.RE
.PP
\fBMALLOCX_TCACHE_NONE\fR
.RS 4
Do not use a thread\-specific cache (tcache)\&. Unless
\fBMALLOCX_TCACHE(\fR\fB\fItc\fR\fR\fB)\fR
or
\fBMALLOCX_TCACHE_NONE\fR
is specified, an automatically managed tcache will be used under many circumstances\&. This macro cannot be used in the same
\fIflags\fR
argument as
\fBMALLOCX_TCACHE(\fR\fB\fItc\fR\fR\fB)\fR\&.
.RE
.PP
\fBMALLOCX_ARENA(\fR\fB\fIa\fR\fR\fB) \fR
.RS 4
Use the arena specified by the index
\fIa\fR\&. This macro has no effect for regions that were allocated via an arena other than the one specified\&. This macro does not validate that
\fIa\fR
specifies an arena index in the valid range\&.
.RE
.PP
The
\fBmallocx\fR\fB\fR
function allocates at least
\fIsize\fR
bytes of memory, and returns a pointer to the base address of the allocation\&. Behavior is undefined if
\fIsize\fR
is
\fB0\fR, or if request size overflows due to size class and/or alignment constraints\&.
.PP
The
\fBrallocx\fR\fB\fR
function resizes the allocation at
\fIptr\fR
to be at least
\fIsize\fR
bytes, and returns a pointer to the base address of the resulting allocation, which may or may not have moved from its original location\&. Behavior is undefined if
\fIsize\fR
is
\fB0\fR, or if request size overflows due to size class and/or alignment constraints\&.
.PP
The
\fBxallocx\fR\fB\fR
function resizes the allocation at
\fIptr\fR
in place to be at least
\fIsize\fR
bytes, and returns the real size of the allocation\&. If
\fIextra\fR
is non\-zero, an attempt is made to resize the allocation to be at least
(\fIsize\fR + \fIextra\fR)
bytes, though inability to allocate the extra byte(s) will not by itself result in failure to resize\&. Behavior is undefined if
\fIsize\fR
is
\fB0\fR, or if
(\fIsize\fR + \fIextra\fR > \fBSIZE_T_MAX\fR)\&.
.PP
The
\fBsallocx\fR\fB\fR
function returns the real size of the allocation at
\fIptr\fR\&.
.PP
The
\fBdallocx\fR\fB\fR
function causes the memory referenced by
\fIptr\fR
to be made available for future allocations\&.
.PP
The
\fBsdallocx\fR\fB\fR
function is an extension of
\fBdallocx\fR\fB\fR
with a
\fIsize\fR
parameter to allow the caller to pass in the allocation size as an optimization\&. The minimum valid input size is the original requested size of the allocation, and the maximum valid input size is the corresponding value returned by
\fBnallocx\fR\fB\fR
or
\fBsallocx\fR\fB\fR\&.
.PP
The
\fBnallocx\fR\fB\fR
function allocates no memory, but it performs the same size computation as the
\fBmallocx\fR\fB\fR
function, and returns the real size of the allocation that would result from the equivalent
\fBmallocx\fR\fB\fR
function call\&. Behavior is undefined if
\fIsize\fR
is
\fB0\fR, or if request size overflows due to size class and/or alignment constraints\&.
.PP
The
\fBmallctl\fR\fB\fR
function provides a general interface for introspecting the memory allocator, as well as setting modifiable parameters and triggering actions\&. The period\-separated
\fIname\fR
argument specifies a location in a tree\-structured namespace; see the
MALLCTL NAMESPACE
section for documentation on the tree contents\&. To read a value, pass a pointer via
\fIoldp\fR
to adequate space to contain the value, and a pointer to its length via
\fIoldlenp\fR; otherwise pass
\fBNULL\fR
and
\fBNULL\fR\&. Similarly, to write a value, pass a pointer to the value via
\fInewp\fR, and its length via
\fInewlen\fR; otherwise pass
\fBNULL\fR
and
\fB0\fR\&.
.PP
The
\fBmallctlnametomib\fR\fB\fR
function provides a way to avoid repeated name lookups for applications that repeatedly query the same portion of the namespace, by translating a name to a \(lqManagement Information Base\(rq (MIB) that can be passed repeatedly to
\fBmallctlbymib\fR\fB\fR\&. Upon successful return from
\fBmallctlnametomib\fR\fB\fR,
\fImibp\fR
contains an array of
\fI*miblenp\fR
integers, where
\fI*miblenp\fR
is the lesser of the number of components in
\fIname\fR
and the input value of
\fI*miblenp\fR\&. Thus it is possible to pass a
\fI*miblenp\fR
that is smaller than the number of period\-separated name components, which results in a partial MIB that can be used as the basis for constructing a complete MIB\&. For name components that are integers (e\&.g\&. the 2 in
"arenas\&.bin\&.2\&.size"), the corresponding MIB component will always be that integer\&. Therefore, it is legitimate to construct code like the following:
.sp
.if n \{\
.RS 4
.\}
.nf
unsigned nbins, i;
size_t mib[4];
size_t len, miblen;
len = sizeof(nbins);
mallctl("arenas\&.nbins", &nbins, &len, NULL, 0);
miblen = 4;
mallctlnametomib("arenas\&.bin\&.0\&.size", mib, &miblen);
for (i = 0; i < nbins; i++) {
size_t bin_size;
mib[2] = i;
len = sizeof(bin_size);
mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0);
/* Do something with bin_size\&.\&.\&. */
}
.fi
.if n \{\
.RE
.\}
.PP
The
\fBmalloc_stats_print\fR\fB\fR
function writes human\-readable summary statistics via the
\fIwrite_cb\fR
callback function pointer and
\fIcbopaque\fR
data passed to
\fIwrite_cb\fR, or
\fBmalloc_message\fR\fB\fR
if
\fIwrite_cb\fR
is
\fBNULL\fR\&. This function can be called repeatedly\&. General information that never changes during execution can be omitted by specifying "g" as a character within the
\fIopts\fR
string\&. Note that
\fBmalloc_message\fR\fB\fR
uses the
\fBmallctl*\fR\fB\fR
functions internally, so inconsistent statistics can be reported if multiple threads use these functions simultaneously\&. If
\fB\-\-enable\-stats\fR
is specified during configuration, \(lqm\(rq and \(lqa\(rq can be specified to omit merged arena and per arena statistics, respectively; \(lqb\(rq, \(lql\(rq, and \(lqh\(rq can be specified to omit per size class statistics for bins, large objects, and huge objects, respectively\&. Unrecognized characters are silently ignored\&. Note that thread caching may prevent some statistics from being completely up to date, since extra locking would be required to merge counters that track thread cache operations\&.
.PP
The
\fBmalloc_usable_size\fR\fB\fR
function returns the usable size of the allocation pointed to by
\fIptr\fR\&. The return value may be larger than the size that was requested during allocation\&. The
\fBmalloc_usable_size\fR\fB\fR
function is not a mechanism for in\-place
\fBrealloc\fR\fB\fR; rather it is provided solely as a tool for introspection purposes\&. Any discrepancy between the requested allocation size and the size reported by
\fBmalloc_usable_size\fR\fB\fR
should not be depended on, since such behavior is entirely implementation\-dependent\&.
.SH "TUNING"
.PP
Once, when the first call is made to one of the memory allocation routines, the allocator initializes its internals based in part on various options that can be specified at compile\- or run\-time\&.
.PP
The string pointed to by the global variable
\fImalloc_conf\fR, the \(lqname\(rq of the file referenced by the symbolic link named
/etc/malloc\&.conf, and the value of the environment variable
\fBMALLOC_CONF\fR, will be interpreted, in that order, from left to right as options\&. Note that
\fImalloc_conf\fR
may be read before
\fBmain\fR\fB\fR
is entered, so the declaration of
\fImalloc_conf\fR
should specify an initializer that contains the final value to be read by jemalloc\&.
\fImalloc_conf\fR
is a compile\-time setting, whereas
/etc/malloc\&.conf
and
\fBMALLOC_CONF\fR
can be safely set any time prior to program invocation\&.
.PP
An options string is a comma\-separated list of option:value pairs\&. There is one key corresponding to each
"opt\&.*"
mallctl (see the
MALLCTL NAMESPACE
section for options documentation)\&. For example,
abort:true,narenas:1
sets the
"opt\&.abort"
and
"opt\&.narenas"
options\&. Some options have boolean values (true/false), others have integer values (base 8, 10, or 16, depending on prefix), and yet others have raw string values\&.
.SH "IMPLEMENTATION NOTES"
.PP
Traditionally, allocators have used
\fBsbrk\fR(2)
to obtain memory, which is suboptimal for several reasons, including race conditions, increased fragmentation, and artificial limitations on maximum usable memory\&. If
\fBsbrk\fR(2)
is supported by the operating system, this allocator uses both
\fBmmap\fR(2)
and
\fBsbrk\fR(2), in that order of preference; otherwise only
\fBmmap\fR(2)
is used\&.
.PP
This allocator uses multiple arenas in order to reduce lock contention for threaded programs on multi\-processor systems\&. This works well with regard to threading scalability, but incurs some costs\&. There is a small fixed per\-arena overhead, and additionally, arenas manage memory completely independently of each other, which means a small fixed increase in overall memory fragmentation\&. These overheads are not generally an issue, given the number of arenas normally used\&. Note that using substantially more arenas than the default is not likely to improve performance, mainly due to reduced cache performance\&. However, it may make sense to reduce the number of arenas if an application does not make much use of the allocation functions\&.
.PP
In addition to multiple arenas, unless
\fB\-\-disable\-tcache\fR
is specified during configuration, this allocator supports thread\-specific caching for small and large objects, in order to make it possible to completely avoid synchronization for most allocation requests\&. Such caching allows very fast allocation in the common case, but it increases memory usage and fragmentation, since a bounded number of objects can remain allocated in each thread cache\&.
.PP
Memory is conceptually broken into equal\-sized chunks, where the chunk size is a power of two that is greater than the page size\&. Chunks are always aligned to multiples of the chunk size\&. This alignment makes it possible to find metadata for user objects very quickly\&.
.PP
User objects are broken into three categories according to size: small, large, and huge\&. Small and large objects are managed entirely by arenas; huge objects are additionally aggregated in a single data structure that is shared by all threads\&. Huge objects are typically used by applications infrequently enough that this single data structure is not a scalability issue\&.
.PP
Each chunk that is managed by an arena tracks its contents as runs of contiguous pages (unused, backing a set of small objects, or backing one large object)\&. The combination of chunk alignment and chunk page maps makes it possible to determine all metadata regarding small and large allocations in constant time\&.
.PP
Small objects are managed in groups by page runs\&. Each run maintains a bitmap to track which regions are in use\&. Allocation requests that are no more than half the quantum (8 or 16, depending on architecture) are rounded up to the nearest power of two that is at least
sizeof(\fBdouble\fR)\&. All other object size classes are multiples of the quantum, spaced such that there are four size classes for each doubling in size, which limits internal fragmentation to approximately 20% for all but the smallest size classes\&. Small size classes are smaller than four times the page size, large size classes are smaller than the chunk size (see the
"opt\&.lg_chunk"
option), and huge size classes extend from the chunk size up to one size class less than the full address space size\&.
.PP
Allocations are packed tightly together, which can be an issue for multi\-threaded applications\&. If you need to assure that allocations do not suffer from cacheline sharing, round your allocation requests up to the nearest multiple of the cacheline size, or specify cacheline alignment when allocating\&.
.PP
The
\fBrealloc\fR\fB\fR,
\fBrallocx\fR\fB\fR, and
\fBxallocx\fR\fB\fR
functions may resize allocations without moving them under limited circumstances\&. Unlike the
\fB*allocx\fR\fB\fR
API, the standard API does not officially round up the usable size of an allocation to the nearest size class, so technically it is necessary to call
\fBrealloc\fR\fB\fR
to grow e\&.g\&. a 9\-byte allocation to 16 bytes, or shrink a 16\-byte allocation to 9 bytes\&. Growth and shrinkage trivially succeeds in place as long as the pre\-size and post\-size both round up to the same size class\&. No other API guarantees are made regarding in\-place resizing, but the current implementation also tries to resize large and huge allocations in place, as long as the pre\-size and post\-size are both large or both huge\&. In such cases shrinkage always succeeds for large size classes, but for huge size classes the chunk allocator must support splitting (see
"arena\&.<i>\&.chunk_hooks")\&. Growth only succeeds if the trailing memory is currently available, and additionally for huge size classes the chunk allocator must support merging\&.
.PP
Assuming 2 MiB chunks, 4 KiB pages, and a 16\-byte quantum on a 64\-bit system, the size classes in each category are as shown in
Table 1\&.
.sp
.it 1 an-trap
.nr an-no-space-flag 1
.nr an-break-flag 1
.br
.B Table\ \&1.\ \&Size classes
.TS
allbox tab(:);
lB rB lB.
T{
Category
T}:T{
Spacing
T}:T{
Size
T}
.T&
l r l
^ r l
^ r l
^ r l
^ r l
^ r l
^ r l
^ r l
^ r l
l r l
^ r l
^ r l
^ r l
^ r l
^ r l
^ r l
^ r l
l r l
^ r l
^ r l
^ r l
^ r l
^ r l
^ r l.
T{
Small
T}:T{
lg
T}:T{
[8]
T}
:T{
16
T}:T{
[16, 32, 48, 64, 80, 96, 112, 128]
T}
:T{
32
T}:T{
[160, 192, 224, 256]
T}
:T{
64
T}:T{
[320, 384, 448, 512]
T}
:T{
128
T}:T{
[640, 768, 896, 1024]
T}
:T{
256
T}:T{
[1280, 1536, 1792, 2048]
T}
:T{
512
T}:T{
[2560, 3072, 3584, 4096]
T}
:T{
1 KiB
T}:T{
[5 KiB, 6 KiB, 7 KiB, 8 KiB]
T}
:T{
2 KiB
T}:T{
[10 KiB, 12 KiB, 14 KiB]
T}
T{
Large
T}:T{
2 KiB
T}:T{
[16 KiB]
T}
:T{
4 KiB
T}:T{
[20 KiB, 24 KiB, 28 KiB, 32 KiB]
T}
:T{
8 KiB
T}:T{
[40 KiB, 48 KiB, 54 KiB, 64 KiB]
T}
:T{
16 KiB
T}:T{
[80 KiB, 96 KiB, 112 KiB, 128 KiB]
T}
:T{
32 KiB
T}:T{
[160 KiB, 192 KiB, 224 KiB, 256 KiB]
T}
:T{
64 KiB
T}:T{
[320 KiB, 384 KiB, 448 KiB, 512 KiB]
T}
:T{
128 KiB
T}:T{
[640 KiB, 768 KiB, 896 KiB, 1 MiB]
T}
:T{
256 KiB
T}:T{
[1280 KiB, 1536 KiB, 1792 KiB]
T}
T{
Huge
T}:T{
256 KiB
T}:T{
[2 MiB]
T}
:T{
512 KiB
T}:T{
[2560 KiB, 3 MiB, 3584 KiB, 4 MiB]
T}
:T{
1 MiB
T}:T{
[5 MiB, 6 MiB, 7 MiB, 8 MiB]
T}
:T{
2 MiB
T}:T{
[10 MiB, 12 MiB, 14 MiB, 16 MiB]
T}
:T{
4 MiB
T}:T{
[20 MiB, 24 MiB, 28 MiB, 32 MiB]
T}
:T{
8 MiB
T}:T{
[40 MiB, 48 MiB, 56 MiB, 64 MiB]
T}
:T{
\&.\&.\&.
T}:T{
\&.\&.\&.
T}
.TE
.sp 1
.SH "MALLCTL NAMESPACE"
.PP
The following names are defined in the namespace accessible via the
\fBmallctl*\fR\fB\fR
functions\&. Value types are specified in parentheses, their readable/writable statuses are encoded as
rw,
r\-,
\-w, or
\-\-, and required build configuration flags follow, if any\&. A name element encoded as
<i>
or
<j>
indicates an integer component, where the integer varies from 0 to some upper value that must be determined via introspection\&. In the case of
"stats\&.arenas\&.<i>\&.*",
<i>
equal to
"arenas\&.narenas"
can be used to access the summation of statistics from all arenas\&. Take special note of the
"epoch"
mallctl, which controls refreshing of cached dynamic statistics\&.
.PP
"version" (\fBconst char *\fR) r\-
.RS 4
Return the jemalloc version string\&.
.RE
.PP
"epoch" (\fBuint64_t\fR) rw
.RS 4
If a value is passed in, refresh the data from which the
\fBmallctl*\fR\fB\fR
functions report values, and increment the epoch\&. Return the current epoch\&. This is useful for detecting whether another thread caused a refresh\&.
.RE
.PP
"config\&.cache_oblivious" (\fBbool\fR) r\-
.RS 4
\fB\-\-enable\-cache\-oblivious\fR
was specified during build configuration\&.
.RE
.PP
"config\&.debug" (\fBbool\fR) r\-
.RS 4
\fB\-\-enable\-debug\fR
was specified during build configuration\&.
.RE
.PP
"config\&.fill" (\fBbool\fR) r\-
.RS 4
\fB\-\-enable\-fill\fR
was specified during build configuration\&.
.RE
.PP
"config\&.lazy_lock" (\fBbool\fR) r\-
.RS 4
\fB\-\-enable\-lazy\-lock\fR
was specified during build configuration\&.
.RE
.PP
"config\&.munmap" (\fBbool\fR) r\-
.RS 4
\fB\-\-enable\-munmap\fR
was specified during build configuration\&.
.RE
.PP
"config\&.prof" (\fBbool\fR) r\-
.RS 4
\fB\-\-enable\-prof\fR
was specified during build configuration\&.
.RE
.PP
"config\&.prof_libgcc" (\fBbool\fR) r\-
.RS 4
\fB\-\-disable\-prof\-libgcc\fR
was not specified during build configuration\&.
.RE
.PP
"config\&.prof_libunwind" (\fBbool\fR) r\-
.RS 4
\fB\-\-enable\-prof\-libunwind\fR
was specified during build configuration\&.
.RE
.PP
"config\&.stats" (\fBbool\fR) r\-
.RS 4
\fB\-\-enable\-stats\fR
was specified during build configuration\&.
.RE
.PP
"config\&.tcache" (\fBbool\fR) r\-
.RS 4
\fB\-\-disable\-tcache\fR
was not specified during build configuration\&.
.RE
.PP
"config\&.tls" (\fBbool\fR) r\-
.RS 4
\fB\-\-disable\-tls\fR
was not specified during build configuration\&.
.RE
.PP
"config\&.utrace" (\fBbool\fR) r\-
.RS 4
\fB\-\-enable\-utrace\fR
was specified during build configuration\&.
.RE
.PP
"config\&.valgrind" (\fBbool\fR) r\-
.RS 4
\fB\-\-enable\-valgrind\fR
was specified during build configuration\&.
.RE
.PP
"config\&.xmalloc" (\fBbool\fR) r\-
.RS 4
\fB\-\-enable\-xmalloc\fR
was specified during build configuration\&.
.RE
.PP
"opt\&.abort" (\fBbool\fR) r\-
.RS 4
Abort\-on\-warning enabled/disabled\&. If true, most warnings are fatal\&. The process will call
\fBabort\fR(3)
in these cases\&. This option is disabled by default unless
\fB\-\-enable\-debug\fR
is specified during configuration, in which case it is enabled by default\&.
.RE
.PP
"opt\&.dss" (\fBconst char *\fR) r\-
.RS 4
dss (\fBsbrk\fR(2)) allocation precedence as related to
\fBmmap\fR(2)
allocation\&. The following settings are supported if
\fBsbrk\fR(2)
is supported by the operating system: \(lqdisabled\(rq, \(lqprimary\(rq, and \(lqsecondary\(rq; otherwise only \(lqdisabled\(rq is supported\&. The default is \(lqsecondary\(rq if
\fBsbrk\fR(2)
is supported by the operating system; \(lqdisabled\(rq otherwise\&.
.RE
.PP
"opt\&.lg_chunk" (\fBsize_t\fR) r\-
.RS 4
Virtual memory chunk size (log base 2)\&. If a chunk size outside the supported size range is specified, the size is silently clipped to the minimum/maximum supported size\&. The default chunk size is 2 MiB (2^21)\&.
.RE
.PP
"opt\&.narenas" (\fBsize_t\fR) r\-
.RS 4
Maximum number of arenas to use for automatic multiplexing of threads and arenas\&. The default is four times the number of CPUs, or one if there is a single CPU\&.
.RE
.PP
"opt\&.lg_dirty_mult" (\fBssize_t\fR) r\-
.RS 4
Per\-arena minimum ratio (log base 2) of active to dirty pages\&. Some dirty unused pages may be allowed to accumulate, within the limit set by the ratio (or one chunk worth of dirty pages, whichever is greater), before informing the kernel about some of those pages via
\fBmadvise\fR(2)
or a similar system call\&. This provides the kernel with sufficient information to recycle dirty pages if physical memory becomes scarce and the pages remain unused\&. The default minimum ratio is 8:1 (2^3:1); an option value of \-1 will disable dirty page purging\&. See
"arenas\&.lg_dirty_mult"
and
"arena\&.<i>\&.lg_dirty_mult"
for related dynamic control options\&.
.RE
.PP
"opt\&.stats_print" (\fBbool\fR) r\-
.RS 4
Enable/disable statistics printing at exit\&. If enabled, the
\fBmalloc_stats_print\fR\fB\fR
function is called at program exit via an
\fBatexit\fR(3)
function\&. If
\fB\-\-enable\-stats\fR
is specified during configuration, this has the potential to cause deadlock for a multi\-threaded process that exits while one or more threads are executing in the memory allocation functions\&. Furthermore,
\fBatexit\fR\fB\fR
may allocate memory during application initialization and then deadlock internally when jemalloc in turn calls
\fBatexit\fR\fB\fR, so this option is not univerally usable (though the application can register its own
\fBatexit\fR\fB\fR
function with equivalent functionality)\&. Therefore, this option should only be used with care; it is primarily intended as a performance tuning aid during application development\&. This option is disabled by default\&.
.RE
.PP
"opt\&.junk" (\fBconst char *\fR) r\- [\fB\-\-enable\-fill\fR]
.RS 4
Junk filling\&. If set to "alloc", each byte of uninitialized allocated memory will be initialized to
0xa5\&. If set to "free", all deallocated memory will be initialized to
0x5a\&. If set to "true", both allocated and deallocated memory will be initialized, and if set to "false", junk filling be disabled entirely\&. This is intended for debugging and will impact performance negatively\&. This option is "false" by default unless
\fB\-\-enable\-debug\fR
is specified during configuration, in which case it is "true" by default unless running inside
\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2\&.
.RE
.PP
"opt\&.quarantine" (\fBsize_t\fR) r\- [\fB\-\-enable\-fill\fR]
.RS 4
Per thread quarantine size in bytes\&. If non\-zero, each thread maintains a FIFO object quarantine that stores up to the specified number of bytes of memory\&. The quarantined memory is not freed until it is released from quarantine, though it is immediately junk\-filled if the
"opt\&.junk"
option is enabled\&. This feature is of particular use in combination with
\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2, which can detect attempts to access quarantined objects\&. This is intended for debugging and will impact performance negatively\&. The default quarantine size is 0 unless running inside Valgrind, in which case the default is 16 MiB\&.
.RE
.PP
"opt\&.redzone" (\fBbool\fR) r\- [\fB\-\-enable\-fill\fR]
.RS 4
Redzones enabled/disabled\&. If enabled, small allocations have redzones before and after them\&. Furthermore, if the
"opt\&.junk"
option is enabled, the redzones are checked for corruption during deallocation\&. However, the primary intended purpose of this feature is to be used in combination with
\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2, which needs redzones in order to do effective buffer overflow/underflow detection\&. This option is intended for debugging and will impact performance negatively\&. This option is disabled by default unless running inside Valgrind\&.
.RE
.PP
"opt\&.zero" (\fBbool\fR) r\- [\fB\-\-enable\-fill\fR]
.RS 4
Zero filling enabled/disabled\&. If enabled, each byte of uninitialized allocated memory will be initialized to 0\&. Note that this initialization only happens once for each byte, so
\fBrealloc\fR\fB\fR
and
\fBrallocx\fR\fB\fR
calls do not zero memory that was previously allocated\&. This is intended for debugging and will impact performance negatively\&. This option is disabled by default\&.
.RE
.PP
"opt\&.utrace" (\fBbool\fR) r\- [\fB\-\-enable\-utrace\fR]
.RS 4
Allocation tracing based on
\fButrace\fR(2)
enabled/disabled\&. This option is disabled by default\&.
.RE
.PP
"opt\&.xmalloc" (\fBbool\fR) r\- [\fB\-\-enable\-xmalloc\fR]
.RS 4
Abort\-on\-out\-of\-memory enabled/disabled\&. If enabled, rather than returning failure for any allocation function, display a diagnostic message on
\fBSTDERR_FILENO\fR
and cause the program to drop core (using
\fBabort\fR(3))\&. If an application is designed to depend on this behavior, set the option at compile time by including the following in the source code:
.sp
.if n \{\
.RS 4
.\}
.nf
malloc_conf = "xmalloc:true";
.fi
.if n \{\
.RE
.\}
.sp
This option is disabled by default\&.
.RE
.PP
"opt\&.tcache" (\fBbool\fR) r\- [\fB\-\-enable\-tcache\fR]
.RS 4
Thread\-specific caching (tcache) enabled/disabled\&. When there are multiple threads, each thread uses a tcache for objects up to a certain size\&. Thread\-specific caching allows many allocations to be satisfied without performing any thread synchronization, at the cost of increased memory use\&. See the
"opt\&.lg_tcache_max"
option for related tuning information\&. This option is enabled by default unless running inside
\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2, in which case it is forcefully disabled\&.
.RE
.PP
"opt\&.lg_tcache_max" (\fBsize_t\fR) r\- [\fB\-\-enable\-tcache\fR]
.RS 4
Maximum size class (log base 2) to cache in the thread\-specific cache (tcache)\&. At a minimum, all small size classes are cached, and at a maximum all large size classes are cached\&. The default maximum is 32 KiB (2^15)\&.
.RE
.PP
"opt\&.prof" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR]
.RS 4
Memory profiling enabled/disabled\&. If enabled, profile memory allocation activity\&. See the
"opt\&.prof_active"
option for on\-the\-fly activation/deactivation\&. See the
"opt\&.lg_prof_sample"
option for probabilistic sampling control\&. See the
"opt\&.prof_accum"
option for control of cumulative sample reporting\&. See the
"opt\&.lg_prof_interval"
option for information on interval\-triggered profile dumping, the
"opt\&.prof_gdump"
option for information on high\-water\-triggered profile dumping, and the
"opt\&.prof_final"
option for final profile dumping\&. Profile output is compatible with the
\fBjeprof\fR
command, which is based on the
\fBpprof\fR
that is developed as part of the
\m[blue]\fBgperftools package\fR\m[]\&\s-2\u[3]\d\s+2\&.
.RE
.PP
"opt\&.prof_prefix" (\fBconst char *\fR) r\- [\fB\-\-enable\-prof\fR]
.RS 4
Filename prefix for profile dumps\&. If the prefix is set to the empty string, no automatic dumps will occur; this is primarily useful for disabling the automatic final heap dump (which also disables leak reporting, if enabled)\&. The default prefix is
jeprof\&.
.RE
.PP
"opt\&.prof_active" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR]
.RS 4
Profiling activated/deactivated\&. This is a secondary control mechanism that makes it possible to start the application with profiling enabled (see the
"opt\&.prof"
option) but inactive, then toggle profiling at any time during program execution with the
"prof\&.active"
mallctl\&. This option is enabled by default\&.
.RE
.PP
"opt\&.prof_thread_active_init" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR]
.RS 4
Initial setting for
"thread\&.prof\&.active"
in newly created threads\&. The initial setting for newly created threads can also be changed during execution via the
"prof\&.thread_active_init"
mallctl\&. This option is enabled by default\&.
.RE
.PP
"opt\&.lg_prof_sample" (\fBsize_t\fR) r\- [\fB\-\-enable\-prof\fR]
.RS 4
Average interval (log base 2) between allocation samples, as measured in bytes of allocation activity\&. Increasing the sampling interval decreases profile fidelity, but also decreases the computational overhead\&. The default sample interval is 512 KiB (2^19 B)\&.
.RE
.PP
"opt\&.prof_accum" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR]
.RS 4
Reporting of cumulative object/byte counts in profile dumps enabled/disabled\&. If this option is enabled, every unique backtrace must be stored for the duration of execution\&. Depending on the application, this can impose a large memory overhead, and the cumulative counts are not always of interest\&. This option is disabled by default\&.
.RE
.PP
"opt\&.lg_prof_interval" (\fBssize_t\fR) r\- [\fB\-\-enable\-prof\fR]
.RS 4
Average interval (log base 2) between memory profile dumps, as measured in bytes of allocation activity\&. The actual interval between dumps may be sporadic because decentralized allocation counters are used to avoid synchronization bottlenecks\&. Profiles are dumped to files named according to the pattern
<prefix>\&.<pid>\&.<seq>\&.i<iseq>\&.heap, where
<prefix>
is controlled by the
"opt\&.prof_prefix"
option\&. By default, interval\-triggered profile dumping is disabled (encoded as \-1)\&.
.RE
.PP
"opt\&.prof_gdump" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR]
.RS 4
Set the initial state of
"prof\&.gdump", which when enabled triggers a memory profile dump every time the total virtual memory exceeds the previous maximum\&. This option is disabled by default\&.
.RE
.PP
"opt\&.prof_final" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR]
.RS 4
Use an
\fBatexit\fR(3)
function to dump final memory usage to a file named according to the pattern
<prefix>\&.<pid>\&.<seq>\&.f\&.heap, where
<prefix>
is controlled by the
"opt\&.prof_prefix"
option\&. Note that
\fBatexit\fR\fB\fR
may allocate memory during application initialization and then deadlock internally when jemalloc in turn calls
\fBatexit\fR\fB\fR, so this option is not univerally usable (though the application can register its own
\fBatexit\fR\fB\fR
function with equivalent functionality)\&. This option is disabled by default\&.
.RE
.PP
"opt\&.prof_leak" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR]
.RS 4
Leak reporting enabled/disabled\&. If enabled, use an
\fBatexit\fR(3)
function to report memory leaks detected by allocation sampling\&. See the
"opt\&.prof"
option for information on analyzing heap profile output\&. This option is disabled by default\&.
.RE
.PP
"thread\&.arena" (\fBunsigned\fR) rw
.RS 4
Get or set the arena associated with the calling thread\&. If the specified arena was not initialized beforehand (see the
"arenas\&.initialized"
mallctl), it will be automatically initialized as a side effect of calling this interface\&.
.RE
.PP
"thread\&.allocated" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Get the total number of bytes ever allocated by the calling thread\&. This counter has the potential to wrap around; it is up to the application to appropriately interpret the counter in such cases\&.
.RE
.PP
"thread\&.allocatedp" (\fBuint64_t *\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Get a pointer to the the value that is returned by the
"thread\&.allocated"
mallctl\&. This is useful for avoiding the overhead of repeated
\fBmallctl*\fR\fB\fR
calls\&.
.RE
.PP
"thread\&.deallocated" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Get the total number of bytes ever deallocated by the calling thread\&. This counter has the potential to wrap around; it is up to the application to appropriately interpret the counter in such cases\&.
.RE
.PP
"thread\&.deallocatedp" (\fBuint64_t *\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Get a pointer to the the value that is returned by the
"thread\&.deallocated"
mallctl\&. This is useful for avoiding the overhead of repeated
\fBmallctl*\fR\fB\fR
calls\&.
.RE
.PP
"thread\&.tcache\&.enabled" (\fBbool\fR) rw [\fB\-\-enable\-tcache\fR]
.RS 4
Enable/disable calling thread\*(Aqs tcache\&. The tcache is implicitly flushed as a side effect of becoming disabled (see
"thread\&.tcache\&.flush")\&.
.RE
.PP
"thread\&.tcache\&.flush" (\fBvoid\fR) \-\- [\fB\-\-enable\-tcache\fR]
.RS 4
Flush calling thread\*(Aqs thread\-specific cache (tcache)\&. This interface releases all cached objects and internal data structures associated with the calling thread\*(Aqs tcache\&. Ordinarily, this interface need not be called, since automatic periodic incremental garbage collection occurs, and the thread cache is automatically discarded when a thread exits\&. However, garbage collection is triggered by allocation activity, so it is possible for a thread that stops allocating/deallocating to retain its cache indefinitely, in which case the developer may find manual flushing useful\&.
.RE
.PP
"thread\&.prof\&.name" (\fBconst char *\fR) r\- or \-w [\fB\-\-enable\-prof\fR]
.RS 4
Get/set the descriptive name associated with the calling thread in memory profile dumps\&. An internal copy of the name string is created, so the input string need not be maintained after this interface completes execution\&. The output string of this interface should be copied for non\-ephemeral uses, because multiple implementation details can cause asynchronous string deallocation\&. Furthermore, each invocation of this interface can only read or write; simultaneous read/write is not supported due to string lifetime limitations\&. The name string must nil\-terminated and comprised only of characters in the sets recognized by
\fBisgraph\fR(3)
and
\fBisblank\fR(3)\&.
.RE
.PP
"thread\&.prof\&.active" (\fBbool\fR) rw [\fB\-\-enable\-prof\fR]
.RS 4
Control whether sampling is currently active for the calling thread\&. This is an activation mechanism in addition to
"prof\&.active"; both must be active for the calling thread to sample\&. This flag is enabled by default\&.
.RE
.PP
"tcache\&.create" (\fBunsigned\fR) r\- [\fB\-\-enable\-tcache\fR]
.RS 4
Create an explicit thread\-specific cache (tcache) and return an identifier that can be passed to the
\fBMALLOCX_TCACHE(\fR\fB\fItc\fR\fR\fB)\fR
macro to explicitly use the specified cache rather than the automatically managed one that is used by default\&. Each explicit cache can be used by only one thread at a time; the application must assure that this constraint holds\&.
.RE
.PP
"tcache\&.flush" (\fBunsigned\fR) \-w [\fB\-\-enable\-tcache\fR]
.RS 4
Flush the specified thread\-specific cache (tcache)\&. The same considerations apply to this interface as to
"thread\&.tcache\&.flush", except that the tcache will never be automatically be discarded\&.
.RE
.PP
"tcache\&.destroy" (\fBunsigned\fR) \-w [\fB\-\-enable\-tcache\fR]
.RS 4
Flush the specified thread\-specific cache (tcache) and make the identifier available for use during a future tcache creation\&.
.RE
.PP
"arena\&.<i>\&.purge" (\fBvoid\fR) \-\-
.RS 4
Purge unused dirty pages for arena <i>, or for all arenas if <i> equals
"arenas\&.narenas"\&.
.RE
.PP
"arena\&.<i>\&.dss" (\fBconst char *\fR) rw
.RS 4
Set the precedence of dss allocation as related to mmap allocation for arena <i>, or for all arenas if <i> equals
"arenas\&.narenas"\&. See
"opt\&.dss"
for supported settings\&.
.RE
.PP
"arena\&.<i>\&.lg_dirty_mult" (\fBssize_t\fR) rw
.RS 4
Current per\-arena minimum ratio (log base 2) of active to dirty pages for arena <i>\&. Each time this interface is set and the ratio is increased, pages are synchronously purged as necessary to impose the new ratio\&. See
"opt\&.lg_dirty_mult"
for additional information\&.
.RE
.PP
"arena\&.<i>\&.chunk_hooks" (\fBchunk_hooks_t\fR) rw
.RS 4
Get or set the chunk management hook functions for arena <i>\&. The functions must be capable of operating on all extant chunks associated with arena <i>, usually by passing unknown chunks to the replaced functions\&. In practice, it is feasible to control allocation for arenas created via
"arenas\&.extend"
such that all chunks originate from an application\-supplied chunk allocator (by setting custom chunk hook functions just after arena creation), but the automatically created arenas may have already created chunks prior to the application having an opportunity to take over chunk allocation\&.
.sp
.if n \{\
.RS 4
.\}
.nf
typedef struct {
chunk_alloc_t *alloc;
chunk_dalloc_t *dalloc;
chunk_commit_t *commit;
chunk_decommit_t *decommit;
chunk_purge_t *purge;
chunk_split_t *split;
chunk_merge_t *merge;
} chunk_hooks_t;
.fi
.if n \{\
.RE
.\}
.sp
The
\fBchunk_hooks_t\fR
structure comprises function pointers which are described individually below\&. jemalloc uses these functions to manage chunk lifetime, which starts off with allocation of mapped committed memory, in the simplest case followed by deallocation\&. However, there are performance and platform reasons to retain chunks for later reuse\&. Cleanup attempts cascade from deallocation to decommit to purging, which gives the chunk management functions opportunities to reject the most permanent cleanup operations in favor of less permanent (and often less costly) operations\&. The chunk splitting and merging operations can also be opted out of, but this is mainly intended to support platforms on which virtual memory mappings provided by the operating system kernel do not automatically coalesce and split, e\&.g\&. Windows\&.
.HP \w'typedef\ void\ *(chunk_alloc_t)('u
.BI "typedef void *(chunk_alloc_t)(void\ *" "chunk" ", size_t\ " "size" ", size_t\ " "alignment" ", bool\ *" "zero" ", bool\ *" "commit" ", unsigned\ " "arena_ind" ");"
.sp
.if n \{\
.RS 4
.\}
.nf
.fi
.if n \{\
.RE
.\}
.sp
A chunk allocation function conforms to the
\fBchunk_alloc_t\fR
type and upon success returns a pointer to
\fIsize\fR
bytes of mapped memory on behalf of arena
\fIarena_ind\fR
such that the chunk\*(Aqs base address is a multiple of
\fIalignment\fR, as well as setting
\fI*zero\fR
to indicate whether the chunk is zeroed and
\fI*commit\fR
to indicate whether the chunk is committed\&. Upon error the function returns
\fBNULL\fR
and leaves
\fI*zero\fR
and
\fI*commit\fR
unmodified\&. The
\fIsize\fR
parameter is always a multiple of the chunk size\&. The
\fIalignment\fR
parameter is always a power of two at least as large as the chunk size\&. Zeroing is mandatory if
\fI*zero\fR
is true upon function entry\&. Committing is mandatory if
\fI*commit\fR
is true upon function entry\&. If
\fIchunk\fR
is not
\fBNULL\fR, the returned pointer must be
\fIchunk\fR
on success or
\fBNULL\fR
on error\&. Committed memory may be committed in absolute terms as on a system that does not overcommit, or in implicit terms as on a system that overcommits and satisfies physical memory needs on demand via soft page faults\&. Note that replacing the default chunk allocation function makes the arena\*(Aqs
"arena\&.<i>\&.dss"
setting irrelevant\&.
.HP \w'typedef\ bool\ (chunk_dalloc_t)('u
.BI "typedef bool (chunk_dalloc_t)(void\ *" "chunk" ", size_t\ " "size" ", bool\ " "committed" ", unsigned\ " "arena_ind" ");"
.sp
.if n \{\
.RS 4
.\}
.nf
.fi
.if n \{\
.RE
.\}
.sp
A chunk deallocation function conforms to the
\fBchunk_dalloc_t\fR
type and deallocates a
\fIchunk\fR
of given
\fIsize\fR
with
\fIcommitted\fR/decommited memory as indicated, on behalf of arena
\fIarena_ind\fR, returning false upon success\&. If the function returns true, this indicates opt\-out from deallocation; the virtual memory mapping associated with the chunk remains mapped, in the same commit state, and available for future use, in which case it will be automatically retained for later reuse\&.
.HP \w'typedef\ bool\ (chunk_commit_t)('u
.BI "typedef bool (chunk_commit_t)(void\ *" "chunk" ", size_t\ " "size" ", size_t\ " "offset" ", size_t\ " "length" ", unsigned\ " "arena_ind" ");"
.sp
.if n \{\
.RS 4
.\}
.nf
.fi
.if n \{\
.RE
.\}
.sp
A chunk commit function conforms to the
\fBchunk_commit_t\fR
type and commits zeroed physical memory to back pages within a
\fIchunk\fR
of given
\fIsize\fR
at
\fIoffset\fR
bytes, extending for
\fIlength\fR
on behalf of arena
\fIarena_ind\fR, returning false upon success\&. Committed memory may be committed in absolute terms as on a system that does not overcommit, or in implicit terms as on a system that overcommits and satisfies physical memory needs on demand via soft page faults\&. If the function returns true, this indicates insufficient physical memory to satisfy the request\&.
.HP \w'typedef\ bool\ (chunk_decommit_t)('u
.BI "typedef bool (chunk_decommit_t)(void\ *" "chunk" ", size_t\ " "size" ", size_t\ " "offset" ", size_t\ " "length" ", unsigned\ " "arena_ind" ");"
.sp
.if n \{\
.RS 4
.\}
.nf
.fi
.if n \{\
.RE
.\}
.sp
A chunk decommit function conforms to the
\fBchunk_decommit_t\fR
type and decommits any physical memory that is backing pages within a
\fIchunk\fR
of given
\fIsize\fR
at
\fIoffset\fR
bytes, extending for
\fIlength\fR
on behalf of arena
\fIarena_ind\fR, returning false upon success, in which case the pages will be committed via the chunk commit function before being reused\&. If the function returns true, this indicates opt\-out from decommit; the memory remains committed and available for future use, in which case it will be automatically retained for later reuse\&.
.HP \w'typedef\ bool\ (chunk_purge_t)('u
.BI "typedef bool (chunk_purge_t)(void\ *" "chunk" ", size_t" "size" ", size_t\ " "offset" ", size_t\ " "length" ", unsigned\ " "arena_ind" ");"
.sp
.if n \{\
.RS 4
.\}
.nf
.fi
.if n \{\
.RE
.\}
.sp
A chunk purge function conforms to the
\fBchunk_purge_t\fR
type and optionally discards physical pages within the virtual memory mapping associated with
\fIchunk\fR
of given
\fIsize\fR
at
\fIoffset\fR
bytes, extending for
\fIlength\fR
on behalf of arena
\fIarena_ind\fR, returning false if pages within the purged virtual memory range will be zero\-filled the next time they are accessed\&.
.HP \w'typedef\ bool\ (chunk_split_t)('u
.BI "typedef bool (chunk_split_t)(void\ *" "chunk" ", size_t\ " "size" ", size_t\ " "size_a" ", size_t\ " "size_b" ", bool\ " "committed" ", unsigned\ " "arena_ind" ");"
.sp
.if n \{\
.RS 4
.\}
.nf
.fi
.if n \{\
.RE
.\}
.sp
A chunk split function conforms to the
\fBchunk_split_t\fR
type and optionally splits
\fIchunk\fR
of given
\fIsize\fR
into two adjacent chunks, the first of
\fIsize_a\fR
bytes, and the second of
\fIsize_b\fR
bytes, operating on
\fIcommitted\fR/decommitted memory as indicated, on behalf of arena
\fIarena_ind\fR, returning false upon success\&. If the function returns true, this indicates that the chunk remains unsplit and therefore should continue to be operated on as a whole\&.
.HP \w'typedef\ bool\ (chunk_merge_t)('u
.BI "typedef bool (chunk_merge_t)(void\ *" "chunk_a" ", size_t\ " "size_a" ", void\ *" "chunk_b" ", size_t\ " "size_b" ", bool\ " "committed" ", unsigned\ " "arena_ind" ");"
.sp
.if n \{\
.RS 4
.\}
.nf
.fi
.if n \{\
.RE
.\}
.sp
A chunk merge function conforms to the
\fBchunk_merge_t\fR
type and optionally merges adjacent chunks,
\fIchunk_a\fR
of given
\fIsize_a\fR
and
\fIchunk_b\fR
of given
\fIsize_b\fR
into one contiguous chunk, operating on
\fIcommitted\fR/decommitted memory as indicated, on behalf of arena
\fIarena_ind\fR, returning false upon success\&. If the function returns true, this indicates that the chunks remain distinct mappings and therefore should continue to be operated on independently\&.
.RE
.PP
"arenas\&.narenas" (\fBunsigned\fR) r\-
.RS 4
Current limit on number of arenas\&.
.RE
.PP
"arenas\&.initialized" (\fBbool *\fR) r\-
.RS 4
An array of
"arenas\&.narenas"
booleans\&. Each boolean indicates whether the corresponding arena is initialized\&.
.RE
.PP
"arenas\&.lg_dirty_mult" (\fBssize_t\fR) rw
.RS 4
Current default per\-arena minimum ratio (log base 2) of active to dirty pages, used to initialize
"arena\&.<i>\&.lg_dirty_mult"
during arena creation\&. See
"opt\&.lg_dirty_mult"
for additional information\&.
.RE
.PP
"arenas\&.quantum" (\fBsize_t\fR) r\-
.RS 4
Quantum size\&.
.RE
.PP
"arenas\&.page" (\fBsize_t\fR) r\-
.RS 4
Page size\&.
.RE
.PP
"arenas\&.tcache_max" (\fBsize_t\fR) r\- [\fB\-\-enable\-tcache\fR]
.RS 4
Maximum thread\-cached size class\&.
.RE
.PP
"arenas\&.nbins" (\fBunsigned\fR) r\-
.RS 4
Number of bin size classes\&.
.RE
.PP
"arenas\&.nhbins" (\fBunsigned\fR) r\- [\fB\-\-enable\-tcache\fR]
.RS 4
Total number of thread cache bin size classes\&.
.RE
.PP
"arenas\&.bin\&.<i>\&.size" (\fBsize_t\fR) r\-
.RS 4
Maximum size supported by size class\&.
.RE
.PP
"arenas\&.bin\&.<i>\&.nregs" (\fBuint32_t\fR) r\-
.RS 4
Number of regions per page run\&.
.RE
.PP
"arenas\&.bin\&.<i>\&.run_size" (\fBsize_t\fR) r\-
.RS 4
Number of bytes per page run\&.
.RE
.PP
"arenas\&.nlruns" (\fBunsigned\fR) r\-
.RS 4
Total number of large size classes\&.
.RE
.PP
"arenas\&.lrun\&.<i>\&.size" (\fBsize_t\fR) r\-
.RS 4
Maximum size supported by this large size class\&.
.RE
.PP
"arenas\&.nhchunks" (\fBunsigned\fR) r\-
.RS 4
Total number of huge size classes\&.
.RE
.PP
"arenas\&.hchunk\&.<i>\&.size" (\fBsize_t\fR) r\-
.RS 4
Maximum size supported by this huge size class\&.
.RE
.PP
"arenas\&.extend" (\fBunsigned\fR) r\-
.RS 4
Extend the array of arenas by appending a new arena, and returning the new arena index\&.
.RE
.PP
"prof\&.thread_active_init" (\fBbool\fR) rw [\fB\-\-enable\-prof\fR]
.RS 4
Control the initial setting for
"thread\&.prof\&.active"
in newly created threads\&. See the
"opt\&.prof_thread_active_init"
option for additional information\&.
.RE
.PP
"prof\&.active" (\fBbool\fR) rw [\fB\-\-enable\-prof\fR]
.RS 4
Control whether sampling is currently active\&. See the
"opt\&.prof_active"
option for additional information, as well as the interrelated
"thread\&.prof\&.active"
mallctl\&.
.RE
.PP
"prof\&.dump" (\fBconst char *\fR) \-w [\fB\-\-enable\-prof\fR]
.RS 4
Dump a memory profile to the specified file, or if NULL is specified, to a file according to the pattern
<prefix>\&.<pid>\&.<seq>\&.m<mseq>\&.heap, where
<prefix>
is controlled by the
"opt\&.prof_prefix"
option\&.
.RE
.PP
"prof\&.gdump" (\fBbool\fR) rw [\fB\-\-enable\-prof\fR]
.RS 4
When enabled, trigger a memory profile dump every time the total virtual memory exceeds the previous maximum\&. Profiles are dumped to files named according to the pattern
<prefix>\&.<pid>\&.<seq>\&.u<useq>\&.heap, where
<prefix>
is controlled by the
"opt\&.prof_prefix"
option\&.
.RE
.PP
"prof\&.reset" (\fBsize_t\fR) \-w [\fB\-\-enable\-prof\fR]
.RS 4
Reset all memory profile statistics, and optionally update the sample rate (see
"opt\&.lg_prof_sample"
and
"prof\&.lg_sample")\&.
.RE
.PP
"prof\&.lg_sample" (\fBsize_t\fR) r\- [\fB\-\-enable\-prof\fR]
.RS 4
Get the current sample rate (see
"opt\&.lg_prof_sample")\&.
.RE
.PP
"prof\&.interval" (\fBuint64_t\fR) r\- [\fB\-\-enable\-prof\fR]
.RS 4
Average number of bytes allocated between inverval\-based profile dumps\&. See the
"opt\&.lg_prof_interval"
option for additional information\&.
.RE
.PP
"stats\&.cactive" (\fBsize_t *\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Pointer to a counter that contains an approximate count of the current number of bytes in active pages\&. The estimate may be high, but never low, because each arena rounds up when computing its contribution to the counter\&. Note that the
"epoch"
mallctl has no bearing on this counter\&. Furthermore, counter consistency is maintained via atomic operations, so it is necessary to use an atomic operation in order to guarantee a consistent read when dereferencing the pointer\&.
.RE
.PP
"stats\&.allocated" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Total number of bytes allocated by the application\&.
.RE
.PP
"stats\&.active" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Total number of bytes in active pages allocated by the application\&. This is a multiple of the page size, and greater than or equal to
"stats\&.allocated"\&. This does not include
"stats\&.arenas\&.<i>\&.pdirty", nor pages entirely devoted to allocator metadata\&.
.RE
.PP
"stats\&.metadata" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Total number of bytes dedicated to metadata, which comprise base allocations used for bootstrap\-sensitive internal allocator data structures, arena chunk headers (see
"stats\&.arenas\&.<i>\&.metadata\&.mapped"), and internal allocations (see
"stats\&.arenas\&.<i>\&.metadata\&.allocated")\&.
.RE
.PP
"stats\&.resident" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Maximum number of bytes in physically resident data pages mapped by the allocator, comprising all pages dedicated to allocator metadata, pages backing active allocations, and unused dirty pages\&. This is a maximum rather than precise because pages may not actually be physically resident if they correspond to demand\-zeroed virtual memory that has not yet been touched\&. This is a multiple of the page size, and is larger than
"stats\&.active"\&.
.RE
.PP
"stats\&.mapped" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Total number of bytes in active chunks mapped by the allocator\&. This is a multiple of the chunk size, and is larger than
"stats\&.active"\&. This does not include inactive chunks, even those that contain unused dirty pages, which means that there is no strict ordering between this and
"stats\&.resident"\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.dss" (\fBconst char *\fR) r\-
.RS 4
dss (\fBsbrk\fR(2)) allocation precedence as related to
\fBmmap\fR(2)
allocation\&. See
"opt\&.dss"
for details\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.lg_dirty_mult" (\fBssize_t\fR) r\-
.RS 4
Minimum ratio (log base 2) of active to dirty pages\&. See
"opt\&.lg_dirty_mult"
for details\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.nthreads" (\fBunsigned\fR) r\-
.RS 4
Number of threads currently assigned to arena\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.pactive" (\fBsize_t\fR) r\-
.RS 4
Number of pages in active runs\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.pdirty" (\fBsize_t\fR) r\-
.RS 4
Number of pages within unused runs that are potentially dirty, and for which
\fBmadvise\fR\fB\fI\&.\&.\&.\fR\fR\fB \fR\fB\fI\fBMADV_DONTNEED\fR\fR\fR
or similar has not been called\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.mapped" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Number of mapped bytes\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.metadata\&.mapped" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Number of mapped bytes in arena chunk headers, which track the states of the non\-metadata pages\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.metadata\&.allocated" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Number of bytes dedicated to internal allocations\&. Internal allocations differ from application\-originated allocations in that they are for internal use, and that they are omitted from heap profiles\&. This statistic is reported separately from
"stats\&.metadata"
and
"stats\&.arenas\&.<i>\&.metadata\&.mapped"
because it overlaps with e\&.g\&. the
"stats\&.allocated"
and
"stats\&.active"
statistics, whereas the other metadata statistics do not\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.npurge" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Number of dirty page purge sweeps performed\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.nmadvise" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Number of
\fBmadvise\fR\fB\fI\&.\&.\&.\fR\fR\fB \fR\fB\fI\fBMADV_DONTNEED\fR\fR\fR
or similar calls made to purge dirty pages\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.purged" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Number of pages purged\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.small\&.allocated" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Number of bytes currently allocated by small objects\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.small\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of allocation requests served by small bins\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.small\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of small objects returned to bins\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.small\&.nrequests" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of small allocation requests\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.large\&.allocated" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Number of bytes currently allocated by large objects\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.large\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of large allocation requests served directly by the arena\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.large\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of large deallocation requests served directly by the arena\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.large\&.nrequests" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of large allocation requests\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.huge\&.allocated" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Number of bytes currently allocated by huge objects\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.huge\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of huge allocation requests served directly by the arena\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.huge\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of huge deallocation requests served directly by the arena\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.huge\&.nrequests" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of huge allocation requests\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.bins\&.<j>\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of allocations served by bin\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.bins\&.<j>\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of allocations returned to bin\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.bins\&.<j>\&.nrequests" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of allocation requests\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.bins\&.<j>\&.curregs" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Current number of regions for this size class\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.bins\&.<j>\&.nfills" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR \fB\-\-enable\-tcache\fR]
.RS 4
Cumulative number of tcache fills\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.bins\&.<j>\&.nflushes" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR \fB\-\-enable\-tcache\fR]
.RS 4
Cumulative number of tcache flushes\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.bins\&.<j>\&.nruns" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of runs created\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.bins\&.<j>\&.nreruns" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of times the current run from which to allocate changed\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.bins\&.<j>\&.curruns" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Current number of runs\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.lruns\&.<j>\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of allocation requests for this size class served directly by the arena\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.lruns\&.<j>\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of deallocation requests for this size class served directly by the arena\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.lruns\&.<j>\&.nrequests" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of allocation requests for this size class\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.lruns\&.<j>\&.curruns" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Current number of runs for this size class\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.hchunks\&.<j>\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of allocation requests for this size class served directly by the arena\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.hchunks\&.<j>\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of deallocation requests for this size class served directly by the arena\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.hchunks\&.<j>\&.nrequests" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of allocation requests for this size class\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.hchunks\&.<j>\&.curhchunks" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Current number of huge allocations for this size class\&.
.RE
.SH "DEBUGGING MALLOC PROBLEMS"
.PP
When debugging, it is a good idea to configure/build jemalloc with the
\fB\-\-enable\-debug\fR
and
\fB\-\-enable\-fill\fR
options, and recompile the program with suitable options and symbols for debugger support\&. When so configured, jemalloc incorporates a wide variety of run\-time assertions that catch application errors such as double\-free, write\-after\-free, etc\&.
.PP
Programs often accidentally depend on \(lquninitialized\(rq memory actually being filled with zero bytes\&. Junk filling (see the
"opt\&.junk"
option) tends to expose such bugs in the form of obviously incorrect results and/or coredumps\&. Conversely, zero filling (see the
"opt\&.zero"
option) eliminates the symptoms of such bugs\&. Between these two options, it is usually possible to quickly detect, diagnose, and eliminate such bugs\&.
.PP
This implementation does not provide much detail about the problems it detects, because the performance impact for storing such information would be prohibitive\&. However, jemalloc does integrate with the most excellent
\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2
tool if the
\fB\-\-enable\-valgrind\fR
configuration option is enabled\&.
.SH "DIAGNOSTIC MESSAGES"
.PP
If any of the memory allocation/deallocation functions detect an error or warning condition, a message will be printed to file descriptor
\fBSTDERR_FILENO\fR\&. Errors will result in the process dumping core\&. If the
"opt\&.abort"
option is set, most warnings are treated as errors\&.
.PP
The
\fImalloc_message\fR
variable allows the programmer to override the function which emits the text strings forming the errors and warnings if for some reason the
\fBSTDERR_FILENO\fR
file descriptor is not suitable for this\&.
\fBmalloc_message\fR\fB\fR
takes the
\fIcbopaque\fR
pointer argument that is
\fBNULL\fR
unless overridden by the arguments in a call to
\fBmalloc_stats_print\fR\fB\fR, followed by a string pointer\&. Please note that doing anything which tries to allocate memory in this function is likely to result in a crash or deadlock\&.
.PP
All messages are prefixed by \(lq<jemalloc>:\(rq\&.
.SH "RETURN VALUES"
.SS "Standard API"
.PP
The
\fBmalloc\fR\fB\fR
and
\fBcalloc\fR\fB\fR
functions return a pointer to the allocated memory if successful; otherwise a
\fBNULL\fR
pointer is returned and
\fIerrno\fR
is set to
ENOMEM\&.
.PP
The
\fBposix_memalign\fR\fB\fR
function returns the value 0 if successful; otherwise it returns an error value\&. The
\fBposix_memalign\fR\fB\fR
function will fail if:
.PP
EINVAL
.RS 4
The
\fIalignment\fR
parameter is not a power of 2 at least as large as
sizeof(\fBvoid *\fR)\&.
.RE
.PP
ENOMEM
.RS 4
Memory allocation error\&.
.RE
.PP
The
\fBaligned_alloc\fR\fB\fR
function returns a pointer to the allocated memory if successful; otherwise a
\fBNULL\fR
pointer is returned and
\fIerrno\fR
is set\&. The
\fBaligned_alloc\fR\fB\fR
function will fail if:
.PP
EINVAL
.RS 4
The
\fIalignment\fR
parameter is not a power of 2\&.
.RE
.PP
ENOMEM
.RS 4
Memory allocation error\&.
.RE
.PP
The
\fBrealloc\fR\fB\fR
function returns a pointer, possibly identical to
\fIptr\fR, to the allocated memory if successful; otherwise a
\fBNULL\fR
pointer is returned, and
\fIerrno\fR
is set to
ENOMEM
if the error was the result of an allocation failure\&. The
\fBrealloc\fR\fB\fR
function always leaves the original buffer intact when an error occurs\&.
.PP
The
\fBfree\fR\fB\fR
function returns no value\&.
.SS "Non\-standard API"
.PP
The
\fBmallocx\fR\fB\fR
and
\fBrallocx\fR\fB\fR
functions return a pointer to the allocated memory if successful; otherwise a
\fBNULL\fR
pointer is returned to indicate insufficient contiguous memory was available to service the allocation request\&.
.PP
The
\fBxallocx\fR\fB\fR
function returns the real size of the resulting resized allocation pointed to by
\fIptr\fR, which is a value less than
\fIsize\fR
if the allocation could not be adequately grown in place\&.
.PP
The
\fBsallocx\fR\fB\fR
function returns the real size of the allocation pointed to by
\fIptr\fR\&.
.PP
The
\fBnallocx\fR\fB\fR
returns the real size that would result from a successful equivalent
\fBmallocx\fR\fB\fR
function call, or zero if insufficient memory is available to perform the size computation\&.
.PP
The
\fBmallctl\fR\fB\fR,
\fBmallctlnametomib\fR\fB\fR, and
\fBmallctlbymib\fR\fB\fR
functions return 0 on success; otherwise they return an error value\&. The functions will fail if:
.PP
EINVAL
.RS 4
\fInewp\fR
is not
\fBNULL\fR, and
\fInewlen\fR
is too large or too small\&. Alternatively,
\fI*oldlenp\fR
is too large or too small; in this case as much data as possible are read despite the error\&.
.RE
.PP
ENOENT
.RS 4
\fIname\fR
or
\fImib\fR
specifies an unknown/invalid value\&.
.RE
.PP
EPERM
.RS 4
Attempt to read or write void value, or attempt to write read\-only value\&.
.RE
.PP
EAGAIN
.RS 4
A memory allocation failure occurred\&.
.RE
.PP
EFAULT
.RS 4
An interface with side effects failed in some way not directly related to
\fBmallctl*\fR\fB\fR
read/write processing\&.
.RE
.PP
The
\fBmalloc_usable_size\fR\fB\fR
function returns the usable size of the allocation pointed to by
\fIptr\fR\&.
.SH "ENVIRONMENT"
.PP
The following environment variable affects the execution of the allocation functions:
.PP
\fBMALLOC_CONF\fR
.RS 4
If the environment variable
\fBMALLOC_CONF\fR
is set, the characters it contains will be interpreted as options\&.
.RE
.SH "EXAMPLES"
.PP
To dump core whenever a problem occurs:
.sp
.if n \{\
.RS 4
.\}
.nf
ln \-s \*(Aqabort:true\*(Aq /etc/malloc\&.conf
.fi
.if n \{\
.RE
.\}
.PP
To specify in the source a chunk size that is 16 MiB:
.sp
.if n \{\
.RS 4
.\}
.nf
malloc_conf = "lg_chunk:24";
.fi
.if n \{\
.RE
.\}
.SH "SEE ALSO"
.PP
\fBmadvise\fR(2),
\fBmmap\fR(2),
\fBsbrk\fR(2),
\fButrace\fR(2),
\fBalloca\fR(3),
\fBatexit\fR(3),
\fBgetpagesize\fR(3)
.SH "STANDARDS"
.PP
The
\fBmalloc\fR\fB\fR,
\fBcalloc\fR\fB\fR,
\fBrealloc\fR\fB\fR, and
\fBfree\fR\fB\fR
functions conform to ISO/IEC 9899:1990 (\(lqISO C90\(rq)\&.
.PP
The
\fBposix_memalign\fR\fB\fR
function conforms to IEEE Std 1003\&.1\-2001 (\(lqPOSIX\&.1\(rq)\&.
.SH "AUTHOR"
.PP
\fBJason Evans\fR
.RS 4
.RE
.SH "NOTES"
.IP " 1." 4
jemalloc website
.RS 4
\%http://www.canonware.com/jemalloc/
.RE
.IP " 2." 4
Valgrind
.RS 4
\%http://valgrind.org/
.RE
.IP " 3." 4
gperftools package
.RS 4
\%http://code.google.com/p/gperftools/
.RE
<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>JEMALLOC</title><meta name="generator" content="DocBook XSL Stylesheets V1.78.1"></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="refentry"><a name="idp45223136"></a><div class="titlepage"></div><div class="refnamediv"><h2>Name</h2><p>jemalloc &#8212; general purpose memory allocation functions</p></div><div class="refsect1"><a name="library"></a><h2>LIBRARY</h2><p>This manual describes jemalloc 4.0.3-0-ge9192eacf8935e29fc62fddc2701f7942b1cc02c. More information
can be found at the <a class="ulink" href="http://www.canonware.com/jemalloc/" target="_top">jemalloc website</a>.</p></div><div class="refsynopsisdiv"><h2>SYNOPSIS</h2><div class="funcsynopsis"><pre class="funcsynopsisinfo">#include &lt;<code class="filename">jemalloc/jemalloc.h</code>&gt;</pre><div class="refsect2"><a name="idp44244480"></a><h3>Standard API</h3><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void *<b class="fsfunc">malloc</b>(</code></td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"></div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void *<b class="fsfunc">calloc</b>(</code></td><td>size_t <var class="pdparam">number</var>, </td></tr><tr><td></td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"></div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">int <b class="fsfunc">posix_memalign</b>(</code></td><td>void **<var class="pdparam">ptr</var>, </td></tr><tr><td></td><td>size_t <var class="pdparam">alignment</var>, </td></tr><tr><td></td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"></div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void *<b class="fsfunc">aligned_alloc</b>(</code></td><td>size_t <var class="pdparam">alignment</var>, </td></tr><tr><td></td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"></div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void *<b class="fsfunc">realloc</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td></td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"></div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void <b class="fsfunc">free</b>(</code></td><td>void *<var class="pdparam">ptr</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"></div></div><div class="refsect2"><a name="idp46062768"></a><h3>Non-standard API</h3><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void *<b class="fsfunc">mallocx</b>(</code></td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td></td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"></div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void *<b class="fsfunc">rallocx</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td></td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td></td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"></div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">size_t <b class="fsfunc">xallocx</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td></td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td></td><td>size_t <var class="pdparam">extra</var>, </td></tr><tr><td></td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"></div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">size_t <b class="fsfunc">sallocx</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td></td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"></div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void <b class="fsfunc">dallocx</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td></td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"></div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void <b class="fsfunc">sdallocx</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td></td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td></td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"></div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">size_t <b class="fsfunc">nallocx</b>(</code></td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td></td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"></div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">int <b class="fsfunc">mallctl</b>(</code></td><td>const char *<var class="pdparam">name</var>, </td></tr><tr><td></td><td>void *<var class="pdparam">oldp</var>, </td></tr><tr><td></td><td>size_t *<var class="pdparam">oldlenp</var>, </td></tr><tr><td></td><td>void *<var class="pdparam">newp</var>, </td></tr><tr><td></td><td>size_t <var class="pdparam">newlen</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"></div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">int <b class="fsfunc">mallctlnametomib</b>(</code></td><td>const char *<var class="pdparam">name</var>, </td></tr><tr><td></td><td>size_t *<var class="pdparam">mibp</var>, </td></tr><tr><td></td><td>size_t *<var class="pdparam">miblenp</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"></div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">int <b class="fsfunc">mallctlbymib</b>(</code></td><td>const size_t *<var class="pdparam">mib</var>, </td></tr><tr><td></td><td>size_t <var class="pdparam">miblen</var>, </td></tr><tr><td></td><td>void *<var class="pdparam">oldp</var>, </td></tr><tr><td></td><td>size_t *<var class="pdparam">oldlenp</var>, </td></tr><tr><td></td><td>void *<var class="pdparam">newp</var>, </td></tr><tr><td></td><td>size_t <var class="pdparam">newlen</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"></div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void <b class="fsfunc">malloc_stats_print</b>(</code></td><td>void <var class="pdparam">(*write_cb)</var>
<code>(</code>void *, const char *<code>)</code>
, </td></tr><tr><td></td><td>void *<var class="pdparam">cbopaque</var>, </td></tr><tr><td></td><td>const char *<var class="pdparam">opts</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"></div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">size_t <b class="fsfunc">malloc_usable_size</b>(</code></td><td>const void *<var class="pdparam">ptr</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"></div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void <b class="fsfunc">(*malloc_message)</b>(</code></td><td>void *<var class="pdparam">cbopaque</var>, </td></tr><tr><td></td><td>const char *<var class="pdparam">s</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"></div><p><span class="type">const char *</span><code class="varname">malloc_conf</code>;</p></div></div></div><div class="refsect1"><a name="description"></a><h2>DESCRIPTION</h2><div class="refsect2"><a name="idp46115952"></a><h3>Standard API</h3><p>The <code class="function">malloc</code>(<em class="parameter"><code></code></em>) function allocates
<em class="parameter"><code>size</code></em> bytes of uninitialized memory. The allocated
space is suitably aligned (after possible pointer coercion) for storage
of any type of object.</p><p>The <code class="function">calloc</code>(<em class="parameter"><code></code></em>) function allocates
space for <em class="parameter"><code>number</code></em> objects, each
<em class="parameter"><code>size</code></em> bytes in length. The result is identical to
calling <code class="function">malloc</code>(<em class="parameter"><code></code></em>) with an argument of
<em class="parameter"><code>number</code></em> * <em class="parameter"><code>size</code></em>, with the
exception that the allocated memory is explicitly initialized to zero
bytes.</p><p>The <code class="function">posix_memalign</code>(<em class="parameter"><code></code></em>) function
allocates <em class="parameter"><code>size</code></em> bytes of memory such that the
allocation's base address is a multiple of
<em class="parameter"><code>alignment</code></em>, and returns the allocation in the value
pointed to by <em class="parameter"><code>ptr</code></em>. The requested
<em class="parameter"><code>alignment</code></em> must be a power of 2 at least as large as
<code class="code">sizeof(<span class="type">void *</span>)</code>.</p><p>The <code class="function">aligned_alloc</code>(<em class="parameter"><code></code></em>) function
allocates <em class="parameter"><code>size</code></em> bytes of memory such that the
allocation's base address is a multiple of
<em class="parameter"><code>alignment</code></em>. The requested
<em class="parameter"><code>alignment</code></em> must be a power of 2. Behavior is
undefined if <em class="parameter"><code>size</code></em> is not an integral multiple of
<em class="parameter"><code>alignment</code></em>.</p><p>The <code class="function">realloc</code>(<em class="parameter"><code></code></em>) function changes the
size of the previously allocated memory referenced by
<em class="parameter"><code>ptr</code></em> to <em class="parameter"><code>size</code></em> bytes. The
contents of the memory are unchanged up to the lesser of the new and old
sizes. If the new size is larger, the contents of the newly allocated
portion of the memory are undefined. Upon success, the memory referenced
by <em class="parameter"><code>ptr</code></em> is freed and a pointer to the newly
allocated memory is returned. Note that
<code class="function">realloc</code>(<em class="parameter"><code></code></em>) may move the memory allocation,
resulting in a different return value than <em class="parameter"><code>ptr</code></em>.
If <em class="parameter"><code>ptr</code></em> is <code class="constant">NULL</code>, the
<code class="function">realloc</code>(<em class="parameter"><code></code></em>) function behaves identically to
<code class="function">malloc</code>(<em class="parameter"><code></code></em>) for the specified size.</p><p>The <code class="function">free</code>(<em class="parameter"><code></code></em>) function causes the
allocated memory referenced by <em class="parameter"><code>ptr</code></em> to be made
available for future allocations. If <em class="parameter"><code>ptr</code></em> is
<code class="constant">NULL</code>, no action occurs.</p></div><div class="refsect2"><a name="idp46144704"></a><h3>Non-standard API</h3><p>The <code class="function">mallocx</code>(<em class="parameter"><code></code></em>),
<code class="function">rallocx</code>(<em class="parameter"><code></code></em>),
<code class="function">xallocx</code>(<em class="parameter"><code></code></em>),
<code class="function">sallocx</code>(<em class="parameter"><code></code></em>),
<code class="function">dallocx</code>(<em class="parameter"><code></code></em>),
<code class="function">sdallocx</code>(<em class="parameter"><code></code></em>), and
<code class="function">nallocx</code>(<em class="parameter"><code></code></em>) functions all have a
<em class="parameter"><code>flags</code></em> argument that can be used to specify
options. The functions only check the options that are contextually
relevant. Use bitwise or (<code class="code">|</code>) operations to
specify one or more of the following:
</p><div class="variablelist"><dl class="variablelist"><dt><a name="MALLOCX_LG_ALIGN"></a><span class="term"><code class="constant">MALLOCX_LG_ALIGN(<em class="parameter"><code>la</code></em>)
</code></span></dt><dd><p>Align the memory allocation to start at an address
that is a multiple of <code class="code">(1 &lt;&lt;
<em class="parameter"><code>la</code></em>)</code>. This macro does not validate
that <em class="parameter"><code>la</code></em> is within the valid
range.</p></dd><dt><a name="MALLOCX_ALIGN"></a><span class="term"><code class="constant">MALLOCX_ALIGN(<em class="parameter"><code>a</code></em>)
</code></span></dt><dd><p>Align the memory allocation to start at an address
that is a multiple of <em class="parameter"><code>a</code></em>, where
<em class="parameter"><code>a</code></em> is a power of two. This macro does not
validate that <em class="parameter"><code>a</code></em> is a power of 2.
</p></dd><dt><a name="MALLOCX_ZERO"></a><span class="term"><code class="constant">MALLOCX_ZERO</code></span></dt><dd><p>Initialize newly allocated memory to contain zero
bytes. In the growing reallocation case, the real size prior to
reallocation defines the boundary between untouched bytes and those
that are initialized to contain zero bytes. If this macro is
absent, newly allocated memory is uninitialized.</p></dd><dt><a name="MALLOCX_TCACHE"></a><span class="term"><code class="constant">MALLOCX_TCACHE(<em class="parameter"><code>tc</code></em>)
</code></span></dt><dd><p>Use the thread-specific cache (tcache) specified by
the identifier <em class="parameter"><code>tc</code></em>, which must have been
acquired via the <a class="link" href="#tcache.create">
"<code class="mallctl">tcache.create</code>"
</a>
mallctl. This macro does not validate that
<em class="parameter"><code>tc</code></em> specifies a valid
identifier.</p></dd><dt><a name="MALLOC_TCACHE_NONE"></a><span class="term"><code class="constant">MALLOCX_TCACHE_NONE</code></span></dt><dd><p>Do not use a thread-specific cache (tcache). Unless
<code class="constant">MALLOCX_TCACHE(<em class="parameter"><code>tc</code></em>)</code> or
<code class="constant">MALLOCX_TCACHE_NONE</code> is specified, an
automatically managed tcache will be used under many circumstances.
This macro cannot be used in the same <em class="parameter"><code>flags</code></em>
argument as
<code class="constant">MALLOCX_TCACHE(<em class="parameter"><code>tc</code></em>)</code>.</p></dd><dt><a name="MALLOCX_ARENA"></a><span class="term"><code class="constant">MALLOCX_ARENA(<em class="parameter"><code>a</code></em>)
</code></span></dt><dd><p>Use the arena specified by the index
<em class="parameter"><code>a</code></em>. This macro has no effect for regions that
were allocated via an arena other than the one specified. This
macro does not validate that <em class="parameter"><code>a</code></em> specifies an
arena index in the valid range.</p></dd></dl></div><p>
</p><p>The <code class="function">mallocx</code>(<em class="parameter"><code></code></em>) function allocates at
least <em class="parameter"><code>size</code></em> bytes of memory, and returns a pointer
to the base address of the allocation. Behavior is undefined if
<em class="parameter"><code>size</code></em> is <code class="constant">0</code>, or if request size
overflows due to size class and/or alignment constraints.</p><p>The <code class="function">rallocx</code>(<em class="parameter"><code></code></em>) function resizes the
allocation at <em class="parameter"><code>ptr</code></em> to be at least
<em class="parameter"><code>size</code></em> bytes, and returns a pointer to the base
address of the resulting allocation, which may or may not have moved from
its original location. Behavior is undefined if
<em class="parameter"><code>size</code></em> is <code class="constant">0</code>, or if request size
overflows due to size class and/or alignment constraints.</p><p>The <code class="function">xallocx</code>(<em class="parameter"><code></code></em>) function resizes the
allocation at <em class="parameter"><code>ptr</code></em> in place to be at least
<em class="parameter"><code>size</code></em> bytes, and returns the real size of the
allocation. If <em class="parameter"><code>extra</code></em> is non-zero, an attempt is
made to resize the allocation to be at least <code class="code">(<em class="parameter"><code>size</code></em> +
<em class="parameter"><code>extra</code></em>)</code> bytes, though inability to allocate
the extra byte(s) will not by itself result in failure to resize.
Behavior is undefined if <em class="parameter"><code>size</code></em> is
<code class="constant">0</code>, or if <code class="code">(<em class="parameter"><code>size</code></em> + <em class="parameter"><code>extra</code></em>
&gt; <code class="constant">SIZE_T_MAX</code>)</code>.</p><p>The <code class="function">sallocx</code>(<em class="parameter"><code></code></em>) function returns the
real size of the allocation at <em class="parameter"><code>ptr</code></em>.</p><p>The <code class="function">dallocx</code>(<em class="parameter"><code></code></em>) function causes the
memory referenced by <em class="parameter"><code>ptr</code></em> to be made available for
future allocations.</p><p>The <code class="function">sdallocx</code>(<em class="parameter"><code></code></em>) function is an
extension of <code class="function">dallocx</code>(<em class="parameter"><code></code></em>) with a
<em class="parameter"><code>size</code></em> parameter to allow the caller to pass in the
allocation size as an optimization. The minimum valid input size is the
original requested size of the allocation, and the maximum valid input
size is the corresponding value returned by
<code class="function">nallocx</code>(<em class="parameter"><code></code></em>) or
<code class="function">sallocx</code>(<em class="parameter"><code></code></em>).</p><p>The <code class="function">nallocx</code>(<em class="parameter"><code></code></em>) function allocates no
memory, but it performs the same size computation as the
<code class="function">mallocx</code>(<em class="parameter"><code></code></em>) function, and returns the real
size of the allocation that would result from the equivalent
<code class="function">mallocx</code>(<em class="parameter"><code></code></em>) function call. Behavior is
undefined if <em class="parameter"><code>size</code></em> is <code class="constant">0</code>, or if
request size overflows due to size class and/or alignment
constraints.</p><p>The <code class="function">mallctl</code>(<em class="parameter"><code></code></em>) function provides a
general interface for introspecting the memory allocator, as well as
setting modifiable parameters and triggering actions. The
period-separated <em class="parameter"><code>name</code></em> argument specifies a
location in a tree-structured namespace; see the <a class="xref" href="#mallctl_namespace" title="MALLCTL NAMESPACE">MALLCTL NAMESPACE</a> section for
documentation on the tree contents. To read a value, pass a pointer via
<em class="parameter"><code>oldp</code></em> to adequate space to contain the value, and a
pointer to its length via <em class="parameter"><code>oldlenp</code></em>; otherwise pass
<code class="constant">NULL</code> and <code class="constant">NULL</code>. Similarly, to
write a value, pass a pointer to the value via
<em class="parameter"><code>newp</code></em>, and its length via
<em class="parameter"><code>newlen</code></em>; otherwise pass <code class="constant">NULL</code>
and <code class="constant">0</code>.</p><p>The <code class="function">mallctlnametomib</code>(<em class="parameter"><code></code></em>) function
provides a way to avoid repeated name lookups for applications that
repeatedly query the same portion of the namespace, by translating a name
to a &#8220;Management Information Base&#8221; (MIB) that can be passed
repeatedly to <code class="function">mallctlbymib</code>(<em class="parameter"><code></code></em>). Upon
successful return from <code class="function">mallctlnametomib</code>(<em class="parameter"><code></code></em>),
<em class="parameter"><code>mibp</code></em> contains an array of
<em class="parameter"><code>*miblenp</code></em> integers, where
<em class="parameter"><code>*miblenp</code></em> is the lesser of the number of components
in <em class="parameter"><code>name</code></em> and the input value of
<em class="parameter"><code>*miblenp</code></em>. Thus it is possible to pass a
<em class="parameter"><code>*miblenp</code></em> that is smaller than the number of
period-separated name components, which results in a partial MIB that can
be used as the basis for constructing a complete MIB. For name
components that are integers (e.g. the 2 in
<a class="link" href="#arenas.bin.i.size">
"<code class="mallctl">arenas.bin.2.size</code>"
</a>),
the corresponding MIB component will always be that integer. Therefore,
it is legitimate to construct code like the following: </p><pre class="programlisting">
unsigned nbins, i;
size_t mib[4];
size_t len, miblen;
len = sizeof(nbins);
mallctl("arenas.nbins", &amp;nbins, &amp;len, NULL, 0);
miblen = 4;
mallctlnametomib("arenas.bin.0.size", mib, &amp;miblen);
for (i = 0; i &lt; nbins; i++) {
size_t bin_size;
mib[2] = i;
len = sizeof(bin_size);
mallctlbymib(mib, miblen, &amp;bin_size, &amp;len, NULL, 0);
/* Do something with bin_size... */
}</pre><p>The <code class="function">malloc_stats_print</code>(<em class="parameter"><code></code></em>) function
writes human-readable summary statistics via the
<em class="parameter"><code>write_cb</code></em> callback function pointer and
<em class="parameter"><code>cbopaque</code></em> data passed to
<em class="parameter"><code>write_cb</code></em>, or
<code class="function">malloc_message</code>(<em class="parameter"><code></code></em>) if
<em class="parameter"><code>write_cb</code></em> is <code class="constant">NULL</code>. This
function can be called repeatedly. General information that never
changes during execution can be omitted by specifying "g" as a character
within the <em class="parameter"><code>opts</code></em> string. Note that
<code class="function">malloc_message</code>(<em class="parameter"><code></code></em>) uses the
<code class="function">mallctl*</code>(<em class="parameter"><code></code></em>) functions internally, so
inconsistent statistics can be reported if multiple threads use these
functions simultaneously. If <code class="option">--enable-stats</code> is
specified during configuration, &#8220;m&#8221; and &#8220;a&#8221; can
be specified to omit merged arena and per arena statistics, respectively;
&#8220;b&#8221;, &#8220;l&#8221;, and &#8220;h&#8221; can be specified to
omit per size class statistics for bins, large objects, and huge objects,
respectively. Unrecognized characters are silently ignored. Note that
thread caching may prevent some statistics from being completely up to
date, since extra locking would be required to merge counters that track
thread cache operations.
</p><p>The <code class="function">malloc_usable_size</code>(<em class="parameter"><code></code></em>) function
returns the usable size of the allocation pointed to by
<em class="parameter"><code>ptr</code></em>. The return value may be larger than the size
that was requested during allocation. The
<code class="function">malloc_usable_size</code>(<em class="parameter"><code></code></em>) function is not a
mechanism for in-place <code class="function">realloc</code>(<em class="parameter"><code></code></em>); rather
it is provided solely as a tool for introspection purposes. Any
discrepancy between the requested allocation size and the size reported
by <code class="function">malloc_usable_size</code>(<em class="parameter"><code></code></em>) should not be
depended on, since such behavior is entirely implementation-dependent.
</p></div></div><div class="refsect1"><a name="tuning"></a><h2>TUNING</h2><p>Once, when the first call is made to one of the memory allocation
routines, the allocator initializes its internals based in part on various
options that can be specified at compile- or run-time.</p><p>The string pointed to by the global variable
<code class="varname">malloc_conf</code>, the &#8220;name&#8221; of the file
referenced by the symbolic link named <code class="filename">/etc/malloc.conf</code>, and the value of the
environment variable <code class="envar">MALLOC_CONF</code>, will be interpreted, in
that order, from left to right as options. Note that
<code class="varname">malloc_conf</code> may be read before
<code class="function">main</code>(<em class="parameter"><code></code></em>) is entered, so the declaration of
<code class="varname">malloc_conf</code> should specify an initializer that contains
the final value to be read by jemalloc. <code class="varname">malloc_conf</code> is
a compile-time setting, whereas <code class="filename">/etc/malloc.conf</code> and <code class="envar">MALLOC_CONF</code>
can be safely set any time prior to program invocation.</p><p>An options string is a comma-separated list of option:value pairs.
There is one key corresponding to each <a class="link" href="#opt.abort">
"<code class="mallctl">opt.*</code>"
</a> mallctl (see the <a class="xref" href="#mallctl_namespace" title="MALLCTL NAMESPACE">MALLCTL NAMESPACE</a> section for options
documentation). For example, <code class="literal">abort:true,narenas:1</code> sets
the <a class="link" href="#opt.abort">
"<code class="mallctl">opt.abort</code>"
</a> and <a class="link" href="#opt.narenas">
"<code class="mallctl">opt.narenas</code>"
</a> options. Some
options have boolean values (true/false), others have integer values (base
8, 10, or 16, depending on prefix), and yet others have raw string
values.</p></div><div class="refsect1"><a name="implementation_notes"></a><h2>IMPLEMENTATION NOTES</h2><p>Traditionally, allocators have used
<span class="citerefentry"><span class="refentrytitle">sbrk</span>(2)</span> to obtain memory, which is
suboptimal for several reasons, including race conditions, increased
fragmentation, and artificial limitations on maximum usable memory. If
<span class="citerefentry"><span class="refentrytitle">sbrk</span>(2)</span> is supported by the operating
system, this allocator uses both
<span class="citerefentry"><span class="refentrytitle">mmap</span>(2)</span> and
<span class="citerefentry"><span class="refentrytitle">sbrk</span>(2)</span>, in that order of preference;
otherwise only <span class="citerefentry"><span class="refentrytitle">mmap</span>(2)</span> is used.</p><p>This allocator uses multiple arenas in order to reduce lock
contention for threaded programs on multi-processor systems. This works
well with regard to threading scalability, but incurs some costs. There is
a small fixed per-arena overhead, and additionally, arenas manage memory
completely independently of each other, which means a small fixed increase
in overall memory fragmentation. These overheads are not generally an
issue, given the number of arenas normally used. Note that using
substantially more arenas than the default is not likely to improve
performance, mainly due to reduced cache performance. However, it may make
sense to reduce the number of arenas if an application does not make much
use of the allocation functions.</p><p>In addition to multiple arenas, unless
<code class="option">--disable-tcache</code> is specified during configuration, this
allocator supports thread-specific caching for small and large objects, in
order to make it possible to completely avoid synchronization for most
allocation requests. Such caching allows very fast allocation in the
common case, but it increases memory usage and fragmentation, since a
bounded number of objects can remain allocated in each thread cache.</p><p>Memory is conceptually broken into equal-sized chunks, where the
chunk size is a power of two that is greater than the page size. Chunks
are always aligned to multiples of the chunk size. This alignment makes it
possible to find metadata for user objects very quickly.</p><p>User objects are broken into three categories according to size:
small, large, and huge. Small and large objects are managed entirely by
arenas; huge objects are additionally aggregated in a single data structure
that is shared by all threads. Huge objects are typically used by
applications infrequently enough that this single data structure is not a
scalability issue.</p><p>Each chunk that is managed by an arena tracks its contents as runs of
contiguous pages (unused, backing a set of small objects, or backing one
large object). The combination of chunk alignment and chunk page maps
makes it possible to determine all metadata regarding small and large
allocations in constant time.</p><p>Small objects are managed in groups by page runs. Each run maintains
a bitmap to track which regions are in use. Allocation requests that are no
more than half the quantum (8 or 16, depending on architecture) are rounded
up to the nearest power of two that is at least <code class="code">sizeof(<span class="type">double</span>)</code>. All other object size
classes are multiples of the quantum, spaced such that there are four size
classes for each doubling in size, which limits internal fragmentation to
approximately 20% for all but the smallest size classes. Small size classes
are smaller than four times the page size, large size classes are smaller
than the chunk size (see the <a class="link" href="#opt.lg_chunk">
"<code class="mallctl">opt.lg_chunk</code>"
</a> option), and
huge size classes extend from the chunk size up to one size class less than
the full address space size.</p><p>Allocations are packed tightly together, which can be an issue for
multi-threaded applications. If you need to assure that allocations do not
suffer from cacheline sharing, round your allocation requests up to the
nearest multiple of the cacheline size, or specify cacheline alignment when
allocating.</p><p>The <code class="function">realloc</code>(<em class="parameter"><code></code></em>),
<code class="function">rallocx</code>(<em class="parameter"><code></code></em>), and
<code class="function">xallocx</code>(<em class="parameter"><code></code></em>) functions may resize allocations
without moving them under limited circumstances. Unlike the
<code class="function">*allocx</code>(<em class="parameter"><code></code></em>) API, the standard API does not
officially round up the usable size of an allocation to the nearest size
class, so technically it is necessary to call
<code class="function">realloc</code>(<em class="parameter"><code></code></em>) to grow e.g. a 9-byte allocation to
16 bytes, or shrink a 16-byte allocation to 9 bytes. Growth and shrinkage
trivially succeeds in place as long as the pre-size and post-size both round
up to the same size class. No other API guarantees are made regarding
in-place resizing, but the current implementation also tries to resize large
and huge allocations in place, as long as the pre-size and post-size are
both large or both huge. In such cases shrinkage always succeeds for large
size classes, but for huge size classes the chunk allocator must support
splitting (see <a class="link" href="#arena.i.chunk_hooks">
"<code class="mallctl">arena.&lt;i&gt;.chunk_hooks</code>"
</a>).
Growth only succeeds if the trailing memory is currently available, and
additionally for huge size classes the chunk allocator must support
merging.</p><p>Assuming 2 MiB chunks, 4 KiB pages, and a 16-byte quantum on a
64-bit system, the size classes in each category are as shown in <a class="xref" href="#size_classes" title="Table1.Size classes">Table 1</a>.</p><div class="table"><a name="size_classes"></a><p class="title"><b>Table1.Size classes</b></p><div class="table-contents"><table summary="Size classes" border="1"><colgroup><col align="left" class="c1"><col align="right" class="c2"><col align="left" class="c3"></colgroup><thead><tr><th align="left">Category</th><th align="right">Spacing</th><th align="left">Size</th></tr></thead><tbody><tr><td rowspan="9" align="left">Small</td><td align="right">lg</td><td align="left">[8]</td></tr><tr><td align="right">16</td><td align="left">[16, 32, 48, 64, 80, 96, 112, 128]</td></tr><tr><td align="right">32</td><td align="left">[160, 192, 224, 256]</td></tr><tr><td align="right">64</td><td align="left">[320, 384, 448, 512]</td></tr><tr><td align="right">128</td><td align="left">[640, 768, 896, 1024]</td></tr><tr><td align="right">256</td><td align="left">[1280, 1536, 1792, 2048]</td></tr><tr><td align="right">512</td><td align="left">[2560, 3072, 3584, 4096]</td></tr><tr><td align="right">1 KiB</td><td align="left">[5 KiB, 6 KiB, 7 KiB, 8 KiB]</td></tr><tr><td align="right">2 KiB</td><td align="left">[10 KiB, 12 KiB, 14 KiB]</td></tr><tr><td rowspan="8" align="left">Large</td><td align="right">2 KiB</td><td align="left">[16 KiB]</td></tr><tr><td align="right">4 KiB</td><td align="left">[20 KiB, 24 KiB, 28 KiB, 32 KiB]</td></tr><tr><td align="right">8 KiB</td><td align="left">[40 KiB, 48 KiB, 54 KiB, 64 KiB]</td></tr><tr><td align="right">16 KiB</td><td align="left">[80 KiB, 96 KiB, 112 KiB, 128 KiB]</td></tr><tr><td align="right">32 KiB</td><td align="left">[160 KiB, 192 KiB, 224 KiB, 256 KiB]</td></tr><tr><td align="right">64 KiB</td><td align="left">[320 KiB, 384 KiB, 448 KiB, 512 KiB]</td></tr><tr><td align="right">128 KiB</td><td align="left">[640 KiB, 768 KiB, 896 KiB, 1 MiB]</td></tr><tr><td align="right">256 KiB</td><td align="left">[1280 KiB, 1536 KiB, 1792 KiB]</td></tr><tr><td rowspan="7" align="left">Huge</td><td align="right">256 KiB</td><td align="left">[2 MiB]</td></tr><tr><td align="right">512 KiB</td><td align="left">[2560 KiB, 3 MiB, 3584 KiB, 4 MiB]</td></tr><tr><td align="right">1 MiB</td><td align="left">[5 MiB, 6 MiB, 7 MiB, 8 MiB]</td></tr><tr><td align="right">2 MiB</td><td align="left">[10 MiB, 12 MiB, 14 MiB, 16 MiB]</td></tr><tr><td align="right">4 MiB</td><td align="left">[20 MiB, 24 MiB, 28 MiB, 32 MiB]</td></tr><tr><td align="right">8 MiB</td><td align="left">[40 MiB, 48 MiB, 56 MiB, 64 MiB]</td></tr><tr><td align="right">...</td><td align="left">...</td></tr></tbody></table></div></div><br class="table-break"></div><div class="refsect1"><a name="mallctl_namespace"></a><h2>MALLCTL NAMESPACE</h2><p>The following names are defined in the namespace accessible via the
<code class="function">mallctl*</code>(<em class="parameter"><code></code></em>) functions. Value types are
specified in parentheses, their readable/writable statuses are encoded as
<code class="literal">rw</code>, <code class="literal">r-</code>, <code class="literal">-w</code>, or
<code class="literal">--</code>, and required build configuration flags follow, if
any. A name element encoded as <code class="literal">&lt;i&gt;</code> or
<code class="literal">&lt;j&gt;</code> indicates an integer component, where the
integer varies from 0 to some upper value that must be determined via
introspection. In the case of
"<code class="mallctl">stats.arenas.&lt;i&gt;.*</code>"
,
<code class="literal">&lt;i&gt;</code> equal to <a class="link" href="#arenas.narenas">
"<code class="mallctl">arenas.narenas</code>"
</a> can be
used to access the summation of statistics from all arenas. Take special
note of the <a class="link" href="#epoch">
"<code class="mallctl">epoch</code>"
</a> mallctl,
which controls refreshing of cached dynamic statistics.</p><div class="variablelist"><dl class="variablelist"><dt><a name="version"></a><span class="term">
"<code class="mallctl">version</code>"
(<span class="type">const char *</span>)
<code class="literal">r-</code>
</span></dt><dd><p>Return the jemalloc version string.</p></dd><dt><a name="epoch"></a><span class="term">
"<code class="mallctl">epoch</code>"
(<span class="type">uint64_t</span>)
<code class="literal">rw</code>
</span></dt><dd><p>If a value is passed in, refresh the data from which
the <code class="function">mallctl*</code>(<em class="parameter"><code></code></em>) functions report values,
and increment the epoch. Return the current epoch. This is useful for
detecting whether another thread caused a refresh.</p></dd><dt><a name="config.cache_oblivious"></a><span class="term">
"<code class="mallctl">config.cache_oblivious</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--enable-cache-oblivious</code> was specified
during build configuration.</p></dd><dt><a name="config.debug"></a><span class="term">
"<code class="mallctl">config.debug</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--enable-debug</code> was specified during
build configuration.</p></dd><dt><a name="config.fill"></a><span class="term">
"<code class="mallctl">config.fill</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--enable-fill</code> was specified during
build configuration.</p></dd><dt><a name="config.lazy_lock"></a><span class="term">
"<code class="mallctl">config.lazy_lock</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--enable-lazy-lock</code> was specified
during build configuration.</p></dd><dt><a name="config.munmap"></a><span class="term">
"<code class="mallctl">config.munmap</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--enable-munmap</code> was specified during
build configuration.</p></dd><dt><a name="config.prof"></a><span class="term">
"<code class="mallctl">config.prof</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--enable-prof</code> was specified during
build configuration.</p></dd><dt><a name="config.prof_libgcc"></a><span class="term">
"<code class="mallctl">config.prof_libgcc</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--disable-prof-libgcc</code> was not
specified during build configuration.</p></dd><dt><a name="config.prof_libunwind"></a><span class="term">
"<code class="mallctl">config.prof_libunwind</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--enable-prof-libunwind</code> was specified
during build configuration.</p></dd><dt><a name="config.stats"></a><span class="term">
"<code class="mallctl">config.stats</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--enable-stats</code> was specified during
build configuration.</p></dd><dt><a name="config.tcache"></a><span class="term">
"<code class="mallctl">config.tcache</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--disable-tcache</code> was not specified
during build configuration.</p></dd><dt><a name="config.tls"></a><span class="term">
"<code class="mallctl">config.tls</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--disable-tls</code> was not specified during
build configuration.</p></dd><dt><a name="config.utrace"></a><span class="term">
"<code class="mallctl">config.utrace</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--enable-utrace</code> was specified during
build configuration.</p></dd><dt><a name="config.valgrind"></a><span class="term">
"<code class="mallctl">config.valgrind</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--enable-valgrind</code> was specified during
build configuration.</p></dd><dt><a name="config.xmalloc"></a><span class="term">
"<code class="mallctl">config.xmalloc</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--enable-xmalloc</code> was specified during
build configuration.</p></dd><dt><a name="opt.abort"></a><span class="term">
"<code class="mallctl">opt.abort</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p>Abort-on-warning enabled/disabled. If true, most
warnings are fatal. The process will call
<span class="citerefentry"><span class="refentrytitle">abort</span>(3)</span> in these cases. This option is
disabled by default unless <code class="option">--enable-debug</code> is
specified during configuration, in which case it is enabled by default.
</p></dd><dt><a name="opt.dss"></a><span class="term">
"<code class="mallctl">opt.dss</code>"
(<span class="type">const char *</span>)
<code class="literal">r-</code>
</span></dt><dd><p>dss (<span class="citerefentry"><span class="refentrytitle">sbrk</span>(2)</span>) allocation precedence as
related to <span class="citerefentry"><span class="refentrytitle">mmap</span>(2)</span> allocation. The following
settings are supported if
<span class="citerefentry"><span class="refentrytitle">sbrk</span>(2)</span> is supported by the operating
system: &#8220;disabled&#8221;, &#8220;primary&#8221;, and
&#8220;secondary&#8221;; otherwise only &#8220;disabled&#8221; is
supported. The default is &#8220;secondary&#8221; if
<span class="citerefentry"><span class="refentrytitle">sbrk</span>(2)</span> is supported by the operating
system; &#8220;disabled&#8221; otherwise.
</p></dd><dt><a name="opt.lg_chunk"></a><span class="term">
"<code class="mallctl">opt.lg_chunk</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
</span></dt><dd><p>Virtual memory chunk size (log base 2). If a chunk
size outside the supported size range is specified, the size is
silently clipped to the minimum/maximum supported size. The default
chunk size is 2 MiB (2^21).
</p></dd><dt><a name="opt.narenas"></a><span class="term">
"<code class="mallctl">opt.narenas</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
</span></dt><dd><p>Maximum number of arenas to use for automatic
multiplexing of threads and arenas. The default is four times the
number of CPUs, or one if there is a single CPU.</p></dd><dt><a name="opt.lg_dirty_mult"></a><span class="term">
"<code class="mallctl">opt.lg_dirty_mult</code>"
(<span class="type">ssize_t</span>)
<code class="literal">r-</code>
</span></dt><dd><p>Per-arena minimum ratio (log base 2) of active to dirty
pages. Some dirty unused pages may be allowed to accumulate, within
the limit set by the ratio (or one chunk worth of dirty pages,
whichever is greater), before informing the kernel about some of those
pages via <span class="citerefentry"><span class="refentrytitle">madvise</span>(2)</span> or a similar system call. This
provides the kernel with sufficient information to recycle dirty pages
if physical memory becomes scarce and the pages remain unused. The
default minimum ratio is 8:1 (2^3:1); an option value of -1 will
disable dirty page purging. See <a class="link" href="#arenas.lg_dirty_mult">
"<code class="mallctl">arenas.lg_dirty_mult</code>"
</a>
and <a class="link" href="#arena.i.lg_dirty_mult">
"<code class="mallctl">arena.&lt;i&gt;.lg_dirty_mult</code>"
</a>
for related dynamic control options.</p></dd><dt><a name="opt.stats_print"></a><span class="term">
"<code class="mallctl">opt.stats_print</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p>Enable/disable statistics printing at exit. If
enabled, the <code class="function">malloc_stats_print</code>(<em class="parameter"><code></code></em>)
function is called at program exit via an
<span class="citerefentry"><span class="refentrytitle">atexit</span>(3)</span> function. If
<code class="option">--enable-stats</code> is specified during configuration, this
has the potential to cause deadlock for a multi-threaded process that
exits while one or more threads are executing in the memory allocation
functions. Furthermore, <code class="function">atexit</code>(<em class="parameter"><code></code></em>) may
allocate memory during application initialization and then deadlock
internally when jemalloc in turn calls
<code class="function">atexit</code>(<em class="parameter"><code></code></em>), so this option is not
univerally usable (though the application can register its own
<code class="function">atexit</code>(<em class="parameter"><code></code></em>) function with equivalent
functionality). Therefore, this option should only be used with care;
it is primarily intended as a performance tuning aid during application
development. This option is disabled by default.</p></dd><dt><a name="opt.junk"></a><span class="term">
"<code class="mallctl">opt.junk</code>"
(<span class="type">const char *</span>)
<code class="literal">r-</code>
[<code class="option">--enable-fill</code>]
</span></dt><dd><p>Junk filling. If set to "alloc", each byte of
uninitialized allocated memory will be initialized to
<code class="literal">0xa5</code>. If set to "free", all deallocated memory will
be initialized to <code class="literal">0x5a</code>. If set to "true", both
allocated and deallocated memory will be initialized, and if set to
"false", junk filling be disabled entirely. This is intended for
debugging and will impact performance negatively. This option is
"false" by default unless <code class="option">--enable-debug</code> is specified
during configuration, in which case it is "true" by default unless
running inside <a class="ulink" href="http://valgrind.org/" target="_top">Valgrind</a>.</p></dd><dt><a name="opt.quarantine"></a><span class="term">
"<code class="mallctl">opt.quarantine</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-fill</code>]
</span></dt><dd><p>Per thread quarantine size in bytes. If non-zero, each
thread maintains a FIFO object quarantine that stores up to the
specified number of bytes of memory. The quarantined memory is not
freed until it is released from quarantine, though it is immediately
junk-filled if the <a class="link" href="#opt.junk">
"<code class="mallctl">opt.junk</code>"
</a> option is
enabled. This feature is of particular use in combination with <a class="ulink" href="http://valgrind.org/" target="_top">Valgrind</a>, which can detect attempts
to access quarantined objects. This is intended for debugging and will
impact performance negatively. The default quarantine size is 0 unless
running inside Valgrind, in which case the default is 16
MiB.</p></dd><dt><a name="opt.redzone"></a><span class="term">
"<code class="mallctl">opt.redzone</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
[<code class="option">--enable-fill</code>]
</span></dt><dd><p>Redzones enabled/disabled. If enabled, small
allocations have redzones before and after them. Furthermore, if the
<a class="link" href="#opt.junk">
"<code class="mallctl">opt.junk</code>"
</a> option is
enabled, the redzones are checked for corruption during deallocation.
However, the primary intended purpose of this feature is to be used in
combination with <a class="ulink" href="http://valgrind.org/" target="_top">Valgrind</a>,
which needs redzones in order to do effective buffer overflow/underflow
detection. This option is intended for debugging and will impact
performance negatively. This option is disabled by
default unless running inside Valgrind.</p></dd><dt><a name="opt.zero"></a><span class="term">
"<code class="mallctl">opt.zero</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
[<code class="option">--enable-fill</code>]
</span></dt><dd><p>Zero filling enabled/disabled. If enabled, each byte
of uninitialized allocated memory will be initialized to 0. Note that
this initialization only happens once for each byte, so
<code class="function">realloc</code>(<em class="parameter"><code></code></em>) and
<code class="function">rallocx</code>(<em class="parameter"><code></code></em>) calls do not zero memory that
was previously allocated. This is intended for debugging and will
impact performance negatively. This option is disabled by default.
</p></dd><dt><a name="opt.utrace"></a><span class="term">
"<code class="mallctl">opt.utrace</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
[<code class="option">--enable-utrace</code>]
</span></dt><dd><p>Allocation tracing based on
<span class="citerefentry"><span class="refentrytitle">utrace</span>(2)</span> enabled/disabled. This option
is disabled by default.</p></dd><dt><a name="opt.xmalloc"></a><span class="term">
"<code class="mallctl">opt.xmalloc</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
[<code class="option">--enable-xmalloc</code>]
</span></dt><dd><p>Abort-on-out-of-memory enabled/disabled. If enabled,
rather than returning failure for any allocation function, display a
diagnostic message on <code class="constant">STDERR_FILENO</code> and cause the
program to drop core (using
<span class="citerefentry"><span class="refentrytitle">abort</span>(3)</span>). If an application is
designed to depend on this behavior, set the option at compile time by
including the following in the source code:
</p><pre class="programlisting">
malloc_conf = "xmalloc:true";</pre><p>
This option is disabled by default.</p></dd><dt><a name="opt.tcache"></a><span class="term">
"<code class="mallctl">opt.tcache</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
[<code class="option">--enable-tcache</code>]
</span></dt><dd><p>Thread-specific caching (tcache) enabled/disabled. When
there are multiple threads, each thread uses a tcache for objects up to
a certain size. Thread-specific caching allows many allocations to be
satisfied without performing any thread synchronization, at the cost of
increased memory use. See the <a class="link" href="#opt.lg_tcache_max">
"<code class="mallctl">opt.lg_tcache_max</code>"
</a>
option for related tuning information. This option is enabled by
default unless running inside <a class="ulink" href="http://valgrind.org/" target="_top">Valgrind</a>, in which case it is
forcefully disabled.</p></dd><dt><a name="opt.lg_tcache_max"></a><span class="term">
"<code class="mallctl">opt.lg_tcache_max</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-tcache</code>]
</span></dt><dd><p>Maximum size class (log base 2) to cache in the
thread-specific cache (tcache). At a minimum, all small size classes
are cached, and at a maximum all large size classes are cached. The
default maximum is 32 KiB (2^15).</p></dd><dt><a name="opt.prof"></a><span class="term">
"<code class="mallctl">opt.prof</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
[<code class="option">--enable-prof</code>]
</span></dt><dd><p>Memory profiling enabled/disabled. If enabled, profile
memory allocation activity. See the <a class="link" href="#opt.prof_active">
"<code class="mallctl">opt.prof_active</code>"
</a>
option for on-the-fly activation/deactivation. See the <a class="link" href="#opt.lg_prof_sample">
"<code class="mallctl">opt.lg_prof_sample</code>"
</a>
option for probabilistic sampling control. See the <a class="link" href="#opt.prof_accum">
"<code class="mallctl">opt.prof_accum</code>"
</a>
option for control of cumulative sample reporting. See the <a class="link" href="#opt.lg_prof_interval">
"<code class="mallctl">opt.lg_prof_interval</code>"
</a>
option for information on interval-triggered profile dumping, the <a class="link" href="#opt.prof_gdump">
"<code class="mallctl">opt.prof_gdump</code>"
</a>
option for information on high-water-triggered profile dumping, and the
<a class="link" href="#opt.prof_final">
"<code class="mallctl">opt.prof_final</code>"
</a>
option for final profile dumping. Profile output is compatible with
the <span class="command"><strong>jeprof</strong></span> command, which is based on the
<span class="command"><strong>pprof</strong></span> that is developed as part of the <a class="ulink" href="http://code.google.com/p/gperftools/" target="_top">gperftools
package</a>.</p></dd><dt><a name="opt.prof_prefix"></a><span class="term">
"<code class="mallctl">opt.prof_prefix</code>"
(<span class="type">const char *</span>)
<code class="literal">r-</code>
[<code class="option">--enable-prof</code>]
</span></dt><dd><p>Filename prefix for profile dumps. If the prefix is
set to the empty string, no automatic dumps will occur; this is
primarily useful for disabling the automatic final heap dump (which
also disables leak reporting, if enabled). The default prefix is
<code class="filename">jeprof</code>.</p></dd><dt><a name="opt.prof_active"></a><span class="term">
"<code class="mallctl">opt.prof_active</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
[<code class="option">--enable-prof</code>]
</span></dt><dd><p>Profiling activated/deactivated. This is a secondary
control mechanism that makes it possible to start the application with
profiling enabled (see the <a class="link" href="#opt.prof">
"<code class="mallctl">opt.prof</code>"
</a> option) but
inactive, then toggle profiling at any time during program execution
with the <a class="link" href="#prof.active">
"<code class="mallctl">prof.active</code>"
</a> mallctl.
This option is enabled by default.</p></dd><dt><a name="opt.prof_thread_active_init"></a><span class="term">
"<code class="mallctl">opt.prof_thread_active_init</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
[<code class="option">--enable-prof</code>]
</span></dt><dd><p>Initial setting for <a class="link" href="#thread.prof.active">
"<code class="mallctl">thread.prof.active</code>"
</a>
in newly created threads. The initial setting for newly created threads
can also be changed during execution via the <a class="link" href="#prof.thread_active_init">
"<code class="mallctl">prof.thread_active_init</code>"
</a>
mallctl. This option is enabled by default.</p></dd><dt><a name="opt.lg_prof_sample"></a><span class="term">
"<code class="mallctl">opt.lg_prof_sample</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-prof</code>]
</span></dt><dd><p>Average interval (log base 2) between allocation
samples, as measured in bytes of allocation activity. Increasing the
sampling interval decreases profile fidelity, but also decreases the
computational overhead. The default sample interval is 512 KiB (2^19
B).</p></dd><dt><a name="opt.prof_accum"></a><span class="term">
"<code class="mallctl">opt.prof_accum</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
[<code class="option">--enable-prof</code>]
</span></dt><dd><p>Reporting of cumulative object/byte counts in profile
dumps enabled/disabled. If this option is enabled, every unique
backtrace must be stored for the duration of execution. Depending on
the application, this can impose a large memory overhead, and the
cumulative counts are not always of interest. This option is disabled
by default.</p></dd><dt><a name="opt.lg_prof_interval"></a><span class="term">
"<code class="mallctl">opt.lg_prof_interval</code>"
(<span class="type">ssize_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-prof</code>]
</span></dt><dd><p>Average interval (log base 2) between memory profile
dumps, as measured in bytes of allocation activity. The actual
interval between dumps may be sporadic because decentralized allocation
counters are used to avoid synchronization bottlenecks. Profiles are
dumped to files named according to the pattern
<code class="filename">&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.i&lt;iseq&gt;.heap</code>,
where <code class="literal">&lt;prefix&gt;</code> is controlled by the
<a class="link" href="#opt.prof_prefix">
"<code class="mallctl">opt.prof_prefix</code>"
</a>
option. By default, interval-triggered profile dumping is disabled
(encoded as -1).
</p></dd><dt><a name="opt.prof_gdump"></a><span class="term">
"<code class="mallctl">opt.prof_gdump</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
[<code class="option">--enable-prof</code>]
</span></dt><dd><p>Set the initial state of <a class="link" href="#prof.gdump">
"<code class="mallctl">prof.gdump</code>"
</a>, which when
enabled triggers a memory profile dump every time the total virtual
memory exceeds the previous maximum. This option is disabled by
default.</p></dd><dt><a name="opt.prof_final"></a><span class="term">
"<code class="mallctl">opt.prof_final</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
[<code class="option">--enable-prof</code>]
</span></dt><dd><p>Use an
<span class="citerefentry"><span class="refentrytitle">atexit</span>(3)</span> function to dump final memory
usage to a file named according to the pattern
<code class="filename">&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.f.heap</code>,
where <code class="literal">&lt;prefix&gt;</code> is controlled by the <a class="link" href="#opt.prof_prefix">
"<code class="mallctl">opt.prof_prefix</code>"
</a>
option. Note that <code class="function">atexit</code>(<em class="parameter"><code></code></em>) may allocate
memory during application initialization and then deadlock internally
when jemalloc in turn calls <code class="function">atexit</code>(<em class="parameter"><code></code></em>), so
this option is not univerally usable (though the application can
register its own <code class="function">atexit</code>(<em class="parameter"><code></code></em>) function with
equivalent functionality). This option is disabled by
default.</p></dd><dt><a name="opt.prof_leak"></a><span class="term">
"<code class="mallctl">opt.prof_leak</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
[<code class="option">--enable-prof</code>]
</span></dt><dd><p>Leak reporting enabled/disabled. If enabled, use an
<span class="citerefentry"><span class="refentrytitle">atexit</span>(3)</span> function to report memory leaks
detected by allocation sampling. See the
<a class="link" href="#opt.prof">
"<code class="mallctl">opt.prof</code>"
</a> option for
information on analyzing heap profile output. This option is disabled
by default.</p></dd><dt><a name="thread.arena"></a><span class="term">
"<code class="mallctl">thread.arena</code>"
(<span class="type">unsigned</span>)
<code class="literal">rw</code>
</span></dt><dd><p>Get or set the arena associated with the calling
thread. If the specified arena was not initialized beforehand (see the
<a class="link" href="#arenas.initialized">
"<code class="mallctl">arenas.initialized</code>"
</a>
mallctl), it will be automatically initialized as a side effect of
calling this interface.</p></dd><dt><a name="thread.allocated"></a><span class="term">
"<code class="mallctl">thread.allocated</code>"
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Get the total number of bytes ever allocated by the
calling thread. This counter has the potential to wrap around; it is
up to the application to appropriately interpret the counter in such
cases.</p></dd><dt><a name="thread.allocatedp"></a><span class="term">
"<code class="mallctl">thread.allocatedp</code>"
(<span class="type">uint64_t *</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Get a pointer to the the value that is returned by the
<a class="link" href="#thread.allocated">
"<code class="mallctl">thread.allocated</code>"
</a>
mallctl. This is useful for avoiding the overhead of repeated
<code class="function">mallctl*</code>(<em class="parameter"><code></code></em>) calls.</p></dd><dt><a name="thread.deallocated"></a><span class="term">
"<code class="mallctl">thread.deallocated</code>"
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Get the total number of bytes ever deallocated by the
calling thread. This counter has the potential to wrap around; it is
up to the application to appropriately interpret the counter in such
cases.</p></dd><dt><a name="thread.deallocatedp"></a><span class="term">
"<code class="mallctl">thread.deallocatedp</code>"
(<span class="type">uint64_t *</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Get a pointer to the the value that is returned by the
<a class="link" href="#thread.deallocated">
"<code class="mallctl">thread.deallocated</code>"
</a>
mallctl. This is useful for avoiding the overhead of repeated
<code class="function">mallctl*</code>(<em class="parameter"><code></code></em>) calls.</p></dd><dt><a name="thread.tcache.enabled"></a><span class="term">
"<code class="mallctl">thread.tcache.enabled</code>"
(<span class="type">bool</span>)
<code class="literal">rw</code>
[<code class="option">--enable-tcache</code>]
</span></dt><dd><p>Enable/disable calling thread's tcache. The tcache is
implicitly flushed as a side effect of becoming
disabled (see <a class="link" href="#thread.tcache.flush">
"<code class="mallctl">thread.tcache.flush</code>"
</a>).
</p></dd><dt><a name="thread.tcache.flush"></a><span class="term">
"<code class="mallctl">thread.tcache.flush</code>"
(<span class="type">void</span>)
<code class="literal">--</code>
[<code class="option">--enable-tcache</code>]
</span></dt><dd><p>Flush calling thread's thread-specific cache (tcache).
This interface releases all cached objects and internal data structures
associated with the calling thread's tcache. Ordinarily, this interface
need not be called, since automatic periodic incremental garbage
collection occurs, and the thread cache is automatically discarded when
a thread exits. However, garbage collection is triggered by allocation
activity, so it is possible for a thread that stops
allocating/deallocating to retain its cache indefinitely, in which case
the developer may find manual flushing useful.</p></dd><dt><a name="thread.prof.name"></a><span class="term">
"<code class="mallctl">thread.prof.name</code>"
(<span class="type">const char *</span>)
<code class="literal">r-</code> or
<code class="literal">-w</code>
[<code class="option">--enable-prof</code>]
</span></dt><dd><p>Get/set the descriptive name associated with the calling
thread in memory profile dumps. An internal copy of the name string is
created, so the input string need not be maintained after this interface
completes execution. The output string of this interface should be
copied for non-ephemeral uses, because multiple implementation details
can cause asynchronous string deallocation. Furthermore, each
invocation of this interface can only read or write; simultaneous
read/write is not supported due to string lifetime limitations. The
name string must nil-terminated and comprised only of characters in the
sets recognized
by <span class="citerefentry"><span class="refentrytitle">isgraph</span>(3)</span> and
<span class="citerefentry"><span class="refentrytitle">isblank</span>(3)</span>.</p></dd><dt><a name="thread.prof.active"></a><span class="term">
"<code class="mallctl">thread.prof.active</code>"
(<span class="type">bool</span>)
<code class="literal">rw</code>
[<code class="option">--enable-prof</code>]
</span></dt><dd><p>Control whether sampling is currently active for the
calling thread. This is an activation mechanism in addition to <a class="link" href="#prof.active">
"<code class="mallctl">prof.active</code>"
</a>; both must
be active for the calling thread to sample. This flag is enabled by
default.</p></dd><dt><a name="tcache.create"></a><span class="term">
"<code class="mallctl">tcache.create</code>"
(<span class="type">unsigned</span>)
<code class="literal">r-</code>
[<code class="option">--enable-tcache</code>]
</span></dt><dd><p>Create an explicit thread-specific cache (tcache) and
return an identifier that can be passed to the <a class="link" href="#MALLOCX_TCACHE"><code class="constant">MALLOCX_TCACHE(<em class="parameter"><code>tc</code></em>)</code></a>
macro to explicitly use the specified cache rather than the
automatically managed one that is used by default. Each explicit cache
can be used by only one thread at a time; the application must assure
that this constraint holds.
</p></dd><dt><a name="tcache.flush"></a><span class="term">
"<code class="mallctl">tcache.flush</code>"
(<span class="type">unsigned</span>)
<code class="literal">-w</code>
[<code class="option">--enable-tcache</code>]
</span></dt><dd><p>Flush the specified thread-specific cache (tcache). The
same considerations apply to this interface as to <a class="link" href="#thread.tcache.flush">
"<code class="mallctl">thread.tcache.flush</code>"
</a>,
except that the tcache will never be automatically be discarded.
</p></dd><dt><a name="tcache.destroy"></a><span class="term">
"<code class="mallctl">tcache.destroy</code>"
(<span class="type">unsigned</span>)
<code class="literal">-w</code>
[<code class="option">--enable-tcache</code>]
</span></dt><dd><p>Flush the specified thread-specific cache (tcache) and
make the identifier available for use during a future tcache creation.
</p></dd><dt><a name="arena.i.purge"></a><span class="term">
"<code class="mallctl">arena.&lt;i&gt;.purge</code>"
(<span class="type">void</span>)
<code class="literal">--</code>
</span></dt><dd><p>Purge unused dirty pages for arena &lt;i&gt;, or for
all arenas if &lt;i&gt; equals <a class="link" href="#arenas.narenas">
"<code class="mallctl">arenas.narenas</code>"
</a>.
</p></dd><dt><a name="arena.i.dss"></a><span class="term">
"<code class="mallctl">arena.&lt;i&gt;.dss</code>"
(<span class="type">const char *</span>)
<code class="literal">rw</code>
</span></dt><dd><p>Set the precedence of dss allocation as related to mmap
allocation for arena &lt;i&gt;, or for all arenas if &lt;i&gt; equals
<a class="link" href="#arenas.narenas">
"<code class="mallctl">arenas.narenas</code>"
</a>. See
<a class="link" href="#opt.dss">
"<code class="mallctl">opt.dss</code>"
</a> for supported
settings.</p></dd><dt><a name="arena.i.lg_dirty_mult"></a><span class="term">
"<code class="mallctl">arena.&lt;i&gt;.lg_dirty_mult</code>"
(<span class="type">ssize_t</span>)
<code class="literal">rw</code>
</span></dt><dd><p>Current per-arena minimum ratio (log base 2) of active
to dirty pages for arena &lt;i&gt;. Each time this interface is set and
the ratio is increased, pages are synchronously purged as necessary to
impose the new ratio. See <a class="link" href="#opt.lg_dirty_mult">
"<code class="mallctl">opt.lg_dirty_mult</code>"
</a>
for additional information.</p></dd><dt><a name="arena.i.chunk_hooks"></a><span class="term">
"<code class="mallctl">arena.&lt;i&gt;.chunk_hooks</code>"
(<span class="type">chunk_hooks_t</span>)
<code class="literal">rw</code>
</span></dt><dd><p>Get or set the chunk management hook functions for arena
&lt;i&gt;. The functions must be capable of operating on all extant
chunks associated with arena &lt;i&gt;, usually by passing unknown
chunks to the replaced functions. In practice, it is feasible to
control allocation for arenas created via <a class="link" href="#arenas.extend">
"<code class="mallctl">arenas.extend</code>"
</a> such
that all chunks originate from an application-supplied chunk allocator
(by setting custom chunk hook functions just after arena creation), but
the automatically created arenas may have already created chunks prior
to the application having an opportunity to take over chunk
allocation.</p><pre class="programlisting">
typedef struct {
chunk_alloc_t *alloc;
chunk_dalloc_t *dalloc;
chunk_commit_t *commit;
chunk_decommit_t *decommit;
chunk_purge_t *purge;
chunk_split_t *split;
chunk_merge_t *merge;
} chunk_hooks_t;</pre><p>The <span class="type">chunk_hooks_t</span> structure comprises function
pointers which are described individually below. jemalloc uses these
functions to manage chunk lifetime, which starts off with allocation of
mapped committed memory, in the simplest case followed by deallocation.
However, there are performance and platform reasons to retain chunks for
later reuse. Cleanup attempts cascade from deallocation to decommit to
purging, which gives the chunk management functions opportunities to
reject the most permanent cleanup operations in favor of less permanent
(and often less costly) operations. The chunk splitting and merging
operations can also be opted out of, but this is mainly intended to
support platforms on which virtual memory mappings provided by the
operating system kernel do not automatically coalesce and split, e.g.
Windows.</p><div class="funcsynopsis"><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">typedef void *<b class="fsfunc">(chunk_alloc_t)</b>(</code></td><td>void *<var class="pdparam">chunk</var>, </td></tr><tr><td></td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td></td><td>size_t <var class="pdparam">alignment</var>, </td></tr><tr><td></td><td>bool *<var class="pdparam">zero</var>, </td></tr><tr><td></td><td>bool *<var class="pdparam">commit</var>, </td></tr><tr><td></td><td>unsigned <var class="pdparam">arena_ind</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"></div></div><div class="literallayout"><p></p></div><p>A chunk allocation function conforms to the
<span class="type">chunk_alloc_t</span> type and upon success returns a pointer to
<em class="parameter"><code>size</code></em> bytes of mapped memory on behalf of arena
<em class="parameter"><code>arena_ind</code></em> such that the chunk's base address is a
multiple of <em class="parameter"><code>alignment</code></em>, as well as setting
<em class="parameter"><code>*zero</code></em> to indicate whether the chunk is zeroed and
<em class="parameter"><code>*commit</code></em> to indicate whether the chunk is
committed. Upon error the function returns <code class="constant">NULL</code>
and leaves <em class="parameter"><code>*zero</code></em> and
<em class="parameter"><code>*commit</code></em> unmodified. The
<em class="parameter"><code>size</code></em> parameter is always a multiple of the chunk
size. The <em class="parameter"><code>alignment</code></em> parameter is always a power
of two at least as large as the chunk size. Zeroing is mandatory if
<em class="parameter"><code>*zero</code></em> is true upon function entry. Committing is
mandatory if <em class="parameter"><code>*commit</code></em> is true upon function entry.
If <em class="parameter"><code>chunk</code></em> is not <code class="constant">NULL</code>, the
returned pointer must be <em class="parameter"><code>chunk</code></em> on success or
<code class="constant">NULL</code> on error. Committed memory may be committed
in absolute terms as on a system that does not overcommit, or in
implicit terms as on a system that overcommits and satisfies physical
memory needs on demand via soft page faults. Note that replacing the
default chunk allocation function makes the arena's <a class="link" href="#arena.i.dss">
"<code class="mallctl">arena.&lt;i&gt;.dss</code>"
</a>
setting irrelevant.</p><div class="funcsynopsis"><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">typedef bool <b class="fsfunc">(chunk_dalloc_t)</b>(</code></td><td>void *<var class="pdparam">chunk</var>, </td></tr><tr><td></td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td></td><td>bool <var class="pdparam">committed</var>, </td></tr><tr><td></td><td>unsigned <var class="pdparam">arena_ind</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"></div></div><div class="literallayout"><p></p></div><p>
A chunk deallocation function conforms to the
<span class="type">chunk_dalloc_t</span> type and deallocates a
<em class="parameter"><code>chunk</code></em> of given <em class="parameter"><code>size</code></em> with
<em class="parameter"><code>committed</code></em>/decommited memory as indicated, on
behalf of arena <em class="parameter"><code>arena_ind</code></em>, returning false upon
success. If the function returns true, this indicates opt-out from
deallocation; the virtual memory mapping associated with the chunk
remains mapped, in the same commit state, and available for future use,
in which case it will be automatically retained for later reuse.</p><div class="funcsynopsis"><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">typedef bool <b class="fsfunc">(chunk_commit_t)</b>(</code></td><td>void *<var class="pdparam">chunk</var>, </td></tr><tr><td></td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td></td><td>size_t <var class="pdparam">offset</var>, </td></tr><tr><td></td><td>size_t <var class="pdparam">length</var>, </td></tr><tr><td></td><td>unsigned <var class="pdparam">arena_ind</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"></div></div><div class="literallayout"><p></p></div><p>A chunk commit function conforms to the
<span class="type">chunk_commit_t</span> type and commits zeroed physical memory to
back pages within a <em class="parameter"><code>chunk</code></em> of given
<em class="parameter"><code>size</code></em> at <em class="parameter"><code>offset</code></em> bytes,
extending for <em class="parameter"><code>length</code></em> on behalf of arena
<em class="parameter"><code>arena_ind</code></em>, returning false upon success.
Committed memory may be committed in absolute terms as on a system that
does not overcommit, or in implicit terms as on a system that
overcommits and satisfies physical memory needs on demand via soft page
faults. If the function returns true, this indicates insufficient
physical memory to satisfy the request.</p><div class="funcsynopsis"><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">typedef bool <b class="fsfunc">(chunk_decommit_t)</b>(</code></td><td>void *<var class="pdparam">chunk</var>, </td></tr><tr><td></td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td></td><td>size_t <var class="pdparam">offset</var>, </td></tr><tr><td></td><td>size_t <var class="pdparam">length</var>, </td></tr><tr><td></td><td>unsigned <var class="pdparam">arena_ind</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"></div></div><div class="literallayout"><p></p></div><p>A chunk decommit function conforms to the
<span class="type">chunk_decommit_t</span> type and decommits any physical memory
that is backing pages within a <em class="parameter"><code>chunk</code></em> of given
<em class="parameter"><code>size</code></em> at <em class="parameter"><code>offset</code></em> bytes,
extending for <em class="parameter"><code>length</code></em> on behalf of arena
<em class="parameter"><code>arena_ind</code></em>, returning false upon success, in which
case the pages will be committed via the chunk commit function before
being reused. If the function returns true, this indicates opt-out from
decommit; the memory remains committed and available for future use, in
which case it will be automatically retained for later reuse.</p><div class="funcsynopsis"><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">typedef bool <b class="fsfunc">(chunk_purge_t)</b>(</code></td><td>void *<var class="pdparam">chunk</var>, </td></tr><tr><td></td><td>size_t<var class="pdparam">size</var>, </td></tr><tr><td></td><td>size_t <var class="pdparam">offset</var>, </td></tr><tr><td></td><td>size_t <var class="pdparam">length</var>, </td></tr><tr><td></td><td>unsigned <var class="pdparam">arena_ind</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"></div></div><div class="literallayout"><p></p></div><p>A chunk purge function conforms to the <span class="type">chunk_purge_t</span>
type and optionally discards physical pages within the virtual memory
mapping associated with <em class="parameter"><code>chunk</code></em> of given
<em class="parameter"><code>size</code></em> at <em class="parameter"><code>offset</code></em> bytes,
extending for <em class="parameter"><code>length</code></em> on behalf of arena
<em class="parameter"><code>arena_ind</code></em>, returning false if pages within the
purged virtual memory range will be zero-filled the next time they are
accessed.</p><div class="funcsynopsis"><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">typedef bool <b class="fsfunc">(chunk_split_t)</b>(</code></td><td>void *<var class="pdparam">chunk</var>, </td></tr><tr><td></td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td></td><td>size_t <var class="pdparam">size_a</var>, </td></tr><tr><td></td><td>size_t <var class="pdparam">size_b</var>, </td></tr><tr><td></td><td>bool <var class="pdparam">committed</var>, </td></tr><tr><td></td><td>unsigned <var class="pdparam">arena_ind</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"></div></div><div class="literallayout"><p></p></div><p>A chunk split function conforms to the <span class="type">chunk_split_t</span>
type and optionally splits <em class="parameter"><code>chunk</code></em> of given
<em class="parameter"><code>size</code></em> into two adjacent chunks, the first of
<em class="parameter"><code>size_a</code></em> bytes, and the second of
<em class="parameter"><code>size_b</code></em> bytes, operating on
<em class="parameter"><code>committed</code></em>/decommitted memory as indicated, on
behalf of arena <em class="parameter"><code>arena_ind</code></em>, returning false upon
success. If the function returns true, this indicates that the chunk
remains unsplit and therefore should continue to be operated on as a
whole.</p><div class="funcsynopsis"><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">typedef bool <b class="fsfunc">(chunk_merge_t)</b>(</code></td><td>void *<var class="pdparam">chunk_a</var>, </td></tr><tr><td></td><td>size_t <var class="pdparam">size_a</var>, </td></tr><tr><td></td><td>void *<var class="pdparam">chunk_b</var>, </td></tr><tr><td></td><td>size_t <var class="pdparam">size_b</var>, </td></tr><tr><td></td><td>bool <var class="pdparam">committed</var>, </td></tr><tr><td></td><td>unsigned <var class="pdparam">arena_ind</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"></div></div><div class="literallayout"><p></p></div><p>A chunk merge function conforms to the <span class="type">chunk_merge_t</span>
type and optionally merges adjacent chunks,
<em class="parameter"><code>chunk_a</code></em> of given <em class="parameter"><code>size_a</code></em>
and <em class="parameter"><code>chunk_b</code></em> of given
<em class="parameter"><code>size_b</code></em> into one contiguous chunk, operating on
<em class="parameter"><code>committed</code></em>/decommitted memory as indicated, on
behalf of arena <em class="parameter"><code>arena_ind</code></em>, returning false upon
success. If the function returns true, this indicates that the chunks
remain distinct mappings and therefore should continue to be operated on
independently.</p></dd><dt><a name="arenas.narenas"></a><span class="term">
"<code class="mallctl">arenas.narenas</code>"
(<span class="type">unsigned</span>)
<code class="literal">r-</code>
</span></dt><dd><p>Current limit on number of arenas.</p></dd><dt><a name="arenas.initialized"></a><span class="term">
"<code class="mallctl">arenas.initialized</code>"
(<span class="type">bool *</span>)
<code class="literal">r-</code>
</span></dt><dd><p>An array of <a class="link" href="#arenas.narenas">
"<code class="mallctl">arenas.narenas</code>"
</a>
booleans. Each boolean indicates whether the corresponding arena is
initialized.</p></dd><dt><a name="arenas.lg_dirty_mult"></a><span class="term">
"<code class="mallctl">arenas.lg_dirty_mult</code>"
(<span class="type">ssize_t</span>)
<code class="literal">rw</code>
</span></dt><dd><p>Current default per-arena minimum ratio (log base 2) of
active to dirty pages, used to initialize <a class="link" href="#arena.i.lg_dirty_mult">
"<code class="mallctl">arena.&lt;i&gt;.lg_dirty_mult</code>"
</a>
during arena creation. See <a class="link" href="#opt.lg_dirty_mult">
"<code class="mallctl">opt.lg_dirty_mult</code>"
</a>
for additional information.</p></dd><dt><a name="arenas.quantum"></a><span class="term">
"<code class="mallctl">arenas.quantum</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
</span></dt><dd><p>Quantum size.</p></dd><dt><a name="arenas.page"></a><span class="term">
"<code class="mallctl">arenas.page</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
</span></dt><dd><p>Page size.</p></dd><dt><a name="arenas.tcache_max"></a><span class="term">
"<code class="mallctl">arenas.tcache_max</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-tcache</code>]
</span></dt><dd><p>Maximum thread-cached size class.</p></dd><dt><a name="arenas.nbins"></a><span class="term">
"<code class="mallctl">arenas.nbins</code>"
(<span class="type">unsigned</span>)
<code class="literal">r-</code>
</span></dt><dd><p>Number of bin size classes.</p></dd><dt><a name="arenas.nhbins"></a><span class="term">
"<code class="mallctl">arenas.nhbins</code>"
(<span class="type">unsigned</span>)
<code class="literal">r-</code>
[<code class="option">--enable-tcache</code>]
</span></dt><dd><p>Total number of thread cache bin size
classes.</p></dd><dt><a name="arenas.bin.i.size"></a><span class="term">
"<code class="mallctl">arenas.bin.&lt;i&gt;.size</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
</span></dt><dd><p>Maximum size supported by size class.</p></dd><dt><a name="arenas.bin.i.nregs"></a><span class="term">
"<code class="mallctl">arenas.bin.&lt;i&gt;.nregs</code>"
(<span class="type">uint32_t</span>)
<code class="literal">r-</code>
</span></dt><dd><p>Number of regions per page run.</p></dd><dt><a name="arenas.bin.i.run_size"></a><span class="term">
"<code class="mallctl">arenas.bin.&lt;i&gt;.run_size</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
</span></dt><dd><p>Number of bytes per page run.</p></dd><dt><a name="arenas.nlruns"></a><span class="term">
"<code class="mallctl">arenas.nlruns</code>"
(<span class="type">unsigned</span>)
<code class="literal">r-</code>
</span></dt><dd><p>Total number of large size classes.</p></dd><dt><a name="arenas.lrun.i.size"></a><span class="term">
"<code class="mallctl">arenas.lrun.&lt;i&gt;.size</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
</span></dt><dd><p>Maximum size supported by this large size
class.</p></dd><dt><a name="arenas.nhchunks"></a><span class="term">
"<code class="mallctl">arenas.nhchunks</code>"
(<span class="type">unsigned</span>)
<code class="literal">r-</code>
</span></dt><dd><p>Total number of huge size classes.</p></dd><dt><a name="arenas.hchunk.i.size"></a><span class="term">
"<code class="mallctl">arenas.hchunk.&lt;i&gt;.size</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
</span></dt><dd><p>Maximum size supported by this huge size
class.</p></dd><dt><a name="arenas.extend"></a><span class="term">
"<code class="mallctl">arenas.extend</code>"
(<span class="type">unsigned</span>)
<code class="literal">r-</code>
</span></dt><dd><p>Extend the array of arenas by appending a new arena,
and returning the new arena index.</p></dd><dt><a name="prof.thread_active_init"></a><span class="term">
"<code class="mallctl">prof.thread_active_init</code>"
(<span class="type">bool</span>)
<code class="literal">rw</code>
[<code class="option">--enable-prof</code>]
</span></dt><dd><p>Control the initial setting for <a class="link" href="#thread.prof.active">
"<code class="mallctl">thread.prof.active</code>"
</a>
in newly created threads. See the <a class="link" href="#opt.prof_thread_active_init">
"<code class="mallctl">opt.prof_thread_active_init</code>"
</a>
option for additional information.</p></dd><dt><a name="prof.active"></a><span class="term">
"<code class="mallctl">prof.active</code>"
(<span class="type">bool</span>)
<code class="literal">rw</code>
[<code class="option">--enable-prof</code>]
</span></dt><dd><p>Control whether sampling is currently active. See the
<a class="link" href="#opt.prof_active">
"<code class="mallctl">opt.prof_active</code>"
</a>
option for additional information, as well as the interrelated <a class="link" href="#thread.prof.active">
"<code class="mallctl">thread.prof.active</code>"
</a>
mallctl.</p></dd><dt><a name="prof.dump"></a><span class="term">
"<code class="mallctl">prof.dump</code>"
(<span class="type">const char *</span>)
<code class="literal">-w</code>
[<code class="option">--enable-prof</code>]
</span></dt><dd><p>Dump a memory profile to the specified file, or if NULL
is specified, to a file according to the pattern
<code class="filename">&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.m&lt;mseq&gt;.heap</code>,
where <code class="literal">&lt;prefix&gt;</code> is controlled by the
<a class="link" href="#opt.prof_prefix">
"<code class="mallctl">opt.prof_prefix</code>"
</a>
option.</p></dd><dt><a name="prof.gdump"></a><span class="term">
"<code class="mallctl">prof.gdump</code>"
(<span class="type">bool</span>)
<code class="literal">rw</code>
[<code class="option">--enable-prof</code>]
</span></dt><dd><p>When enabled, trigger a memory profile dump every time
the total virtual memory exceeds the previous maximum. Profiles are
dumped to files named according to the pattern
<code class="filename">&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.u&lt;useq&gt;.heap</code>,
where <code class="literal">&lt;prefix&gt;</code> is controlled by the <a class="link" href="#opt.prof_prefix">
"<code class="mallctl">opt.prof_prefix</code>"
</a>
option.</p></dd><dt><a name="prof.reset"></a><span class="term">
"<code class="mallctl">prof.reset</code>"
(<span class="type">size_t</span>)
<code class="literal">-w</code>
[<code class="option">--enable-prof</code>]
</span></dt><dd><p>Reset all memory profile statistics, and optionally
update the sample rate (see <a class="link" href="#opt.lg_prof_sample">
"<code class="mallctl">opt.lg_prof_sample</code>"
</a>
and <a class="link" href="#prof.lg_sample">
"<code class="mallctl">prof.lg_sample</code>"
</a>).
</p></dd><dt><a name="prof.lg_sample"></a><span class="term">
"<code class="mallctl">prof.lg_sample</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-prof</code>]
</span></dt><dd><p>Get the current sample rate (see <a class="link" href="#opt.lg_prof_sample">
"<code class="mallctl">opt.lg_prof_sample</code>"
</a>).
</p></dd><dt><a name="prof.interval"></a><span class="term">
"<code class="mallctl">prof.interval</code>"
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-prof</code>]
</span></dt><dd><p>Average number of bytes allocated between
inverval-based profile dumps. See the
<a class="link" href="#opt.lg_prof_interval">
"<code class="mallctl">opt.lg_prof_interval</code>"
</a>
option for additional information.</p></dd><dt><a name="stats.cactive"></a><span class="term">
"<code class="mallctl">stats.cactive</code>"
(<span class="type">size_t *</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Pointer to a counter that contains an approximate count
of the current number of bytes in active pages. The estimate may be
high, but never low, because each arena rounds up when computing its
contribution to the counter. Note that the <a class="link" href="#epoch">
"<code class="mallctl">epoch</code>"
</a> mallctl has no bearing
on this counter. Furthermore, counter consistency is maintained via
atomic operations, so it is necessary to use an atomic operation in
order to guarantee a consistent read when dereferencing the pointer.
</p></dd><dt><a name="stats.allocated"></a><span class="term">
"<code class="mallctl">stats.allocated</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Total number of bytes allocated by the
application.</p></dd><dt><a name="stats.active"></a><span class="term">
"<code class="mallctl">stats.active</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Total number of bytes in active pages allocated by the
application. This is a multiple of the page size, and greater than or
equal to <a class="link" href="#stats.allocated">
"<code class="mallctl">stats.allocated</code>"
</a>.
This does not include <a class="link" href="#stats.arenas.i.pdirty">
"<code class="mallctl">stats.arenas.&lt;i&gt;.pdirty</code>"
</a>, nor pages
entirely devoted to allocator metadata.</p></dd><dt><a name="stats.metadata"></a><span class="term">
"<code class="mallctl">stats.metadata</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Total number of bytes dedicated to metadata, which
comprise base allocations used for bootstrap-sensitive internal
allocator data structures, arena chunk headers (see <a class="link" href="#stats.arenas.i.metadata.mapped">
"<code class="mallctl">stats.arenas.&lt;i&gt;.metadata.mapped</code>"
</a>),
and internal allocations (see <a class="link" href="#stats.arenas.i.metadata.allocated">
"<code class="mallctl">stats.arenas.&lt;i&gt;.metadata.allocated</code>"
</a>).</p></dd><dt><a name="stats.resident"></a><span class="term">
"<code class="mallctl">stats.resident</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Maximum number of bytes in physically resident data
pages mapped by the allocator, comprising all pages dedicated to
allocator metadata, pages backing active allocations, and unused dirty
pages. This is a maximum rather than precise because pages may not
actually be physically resident if they correspond to demand-zeroed
virtual memory that has not yet been touched. This is a multiple of the
page size, and is larger than <a class="link" href="#stats.active">
"<code class="mallctl">stats.active</code>"
</a>.</p></dd><dt><a name="stats.mapped"></a><span class="term">
"<code class="mallctl">stats.mapped</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Total number of bytes in active chunks mapped by the
allocator. This is a multiple of the chunk size, and is larger than
<a class="link" href="#stats.active">
"<code class="mallctl">stats.active</code>"
</a>.
This does not include inactive chunks, even those that contain unused
dirty pages, which means that there is no strict ordering between this
and <a class="link" href="#stats.resident">
"<code class="mallctl">stats.resident</code>"
</a>.</p></dd><dt><a name="stats.arenas.i.dss"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.dss</code>"
(<span class="type">const char *</span>)
<code class="literal">r-</code>
</span></dt><dd><p>dss (<span class="citerefentry"><span class="refentrytitle">sbrk</span>(2)</span>) allocation precedence as
related to <span class="citerefentry"><span class="refentrytitle">mmap</span>(2)</span> allocation. See <a class="link" href="#opt.dss">
"<code class="mallctl">opt.dss</code>"
</a> for details.
</p></dd><dt><a name="stats.arenas.i.lg_dirty_mult"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.lg_dirty_mult</code>"
(<span class="type">ssize_t</span>)
<code class="literal">r-</code>
</span></dt><dd><p>Minimum ratio (log base 2) of active to dirty pages.
See <a class="link" href="#opt.lg_dirty_mult">
"<code class="mallctl">opt.lg_dirty_mult</code>"
</a>
for details.</p></dd><dt><a name="stats.arenas.i.nthreads"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.nthreads</code>"
(<span class="type">unsigned</span>)
<code class="literal">r-</code>
</span></dt><dd><p>Number of threads currently assigned to
arena.</p></dd><dt><a name="stats.arenas.i.pactive"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.pactive</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
</span></dt><dd><p>Number of pages in active runs.</p></dd><dt><a name="stats.arenas.i.pdirty"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.pdirty</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
</span></dt><dd><p>Number of pages within unused runs that are potentially
dirty, and for which <code class="function">madvise</code>(<em class="parameter"><code>...</code></em>,
<em class="parameter"><code><code class="constant">MADV_DONTNEED</code></code></em>) or
similar has not been called.</p></dd><dt><a name="stats.arenas.i.mapped"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.mapped</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Number of mapped bytes.</p></dd><dt><a name="stats.arenas.i.metadata.mapped"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.metadata.mapped</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Number of mapped bytes in arena chunk headers, which
track the states of the non-metadata pages.</p></dd><dt><a name="stats.arenas.i.metadata.allocated"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.metadata.allocated</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Number of bytes dedicated to internal allocations.
Internal allocations differ from application-originated allocations in
that they are for internal use, and that they are omitted from heap
profiles. This statistic is reported separately from <a class="link" href="#stats.metadata">
"<code class="mallctl">stats.metadata</code>"
</a> and
<a class="link" href="#stats.arenas.i.metadata.mapped">
"<code class="mallctl">stats.arenas.&lt;i&gt;.metadata.mapped</code>"
</a>
because it overlaps with e.g. the <a class="link" href="#stats.allocated">
"<code class="mallctl">stats.allocated</code>"
</a> and
<a class="link" href="#stats.active">
"<code class="mallctl">stats.active</code>"
</a>
statistics, whereas the other metadata statistics do
not.</p></dd><dt><a name="stats.arenas.i.npurge"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.npurge</code>"
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Number of dirty page purge sweeps performed.
</p></dd><dt><a name="stats.arenas.i.nmadvise"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.nmadvise</code>"
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Number of <code class="function">madvise</code>(<em class="parameter"><code>...</code></em>,
<em class="parameter"><code><code class="constant">MADV_DONTNEED</code></code></em>) or
similar calls made to purge dirty pages.</p></dd><dt><a name="stats.arenas.i.purged"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.purged</code>"
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Number of pages purged.</p></dd><dt><a name="stats.arenas.i.small.allocated"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.small.allocated</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Number of bytes currently allocated by small objects.
</p></dd><dt><a name="stats.arenas.i.small.nmalloc"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.small.nmalloc</code>"
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of allocation requests served by
small bins.</p></dd><dt><a name="stats.arenas.i.small.ndalloc"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.small.ndalloc</code>"
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of small objects returned to bins.
</p></dd><dt><a name="stats.arenas.i.small.nrequests"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.small.nrequests</code>"
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of small allocation requests.
</p></dd><dt><a name="stats.arenas.i.large.allocated"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.large.allocated</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Number of bytes currently allocated by large objects.
</p></dd><dt><a name="stats.arenas.i.large.nmalloc"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.large.nmalloc</code>"
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of large allocation requests served
directly by the arena.</p></dd><dt><a name="stats.arenas.i.large.ndalloc"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.large.ndalloc</code>"
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of large deallocation requests served
directly by the arena.</p></dd><dt><a name="stats.arenas.i.large.nrequests"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.large.nrequests</code>"
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of large allocation requests.
</p></dd><dt><a name="stats.arenas.i.huge.allocated"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.huge.allocated</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Number of bytes currently allocated by huge objects.
</p></dd><dt><a name="stats.arenas.i.huge.nmalloc"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.huge.nmalloc</code>"
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of huge allocation requests served
directly by the arena.</p></dd><dt><a name="stats.arenas.i.huge.ndalloc"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.huge.ndalloc</code>"
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of huge deallocation requests served
directly by the arena.</p></dd><dt><a name="stats.arenas.i.huge.nrequests"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.huge.nrequests</code>"
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of huge allocation requests.
</p></dd><dt><a name="stats.arenas.i.bins.j.nmalloc"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nmalloc</code>"
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of allocations served by bin.
</p></dd><dt><a name="stats.arenas.i.bins.j.ndalloc"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.ndalloc</code>"
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of allocations returned to bin.
</p></dd><dt><a name="stats.arenas.i.bins.j.nrequests"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nrequests</code>"
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of allocation
requests.</p></dd><dt><a name="stats.arenas.i.bins.j.curregs"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.curregs</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Current number of regions for this size
class.</p></dd><dt><a name="stats.arenas.i.bins.j.nfills"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nfills</code>"
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code> <code class="option">--enable-tcache</code>]
</span></dt><dd><p>Cumulative number of tcache fills.</p></dd><dt><a name="stats.arenas.i.bins.j.nflushes"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nflushes</code>"
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code> <code class="option">--enable-tcache</code>]
</span></dt><dd><p>Cumulative number of tcache flushes.</p></dd><dt><a name="stats.arenas.i.bins.j.nruns"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nruns</code>"
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of runs created.</p></dd><dt><a name="stats.arenas.i.bins.j.nreruns"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nreruns</code>"
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of times the current run from which
to allocate changed.</p></dd><dt><a name="stats.arenas.i.bins.j.curruns"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.curruns</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Current number of runs.</p></dd><dt><a name="stats.arenas.i.lruns.j.nmalloc"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.lruns.&lt;j&gt;.nmalloc</code>"
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of allocation requests for this size
class served directly by the arena.</p></dd><dt><a name="stats.arenas.i.lruns.j.ndalloc"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.lruns.&lt;j&gt;.ndalloc</code>"
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of deallocation requests for this
size class served directly by the arena.</p></dd><dt><a name="stats.arenas.i.lruns.j.nrequests"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.lruns.&lt;j&gt;.nrequests</code>"
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of allocation requests for this size
class.</p></dd><dt><a name="stats.arenas.i.lruns.j.curruns"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.lruns.&lt;j&gt;.curruns</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Current number of runs for this size class.
</p></dd><dt><a name="stats.arenas.i.hchunks.j.nmalloc"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.hchunks.&lt;j&gt;.nmalloc</code>"
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of allocation requests for this size
class served directly by the arena.</p></dd><dt><a name="stats.arenas.i.hchunks.j.ndalloc"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.hchunks.&lt;j&gt;.ndalloc</code>"
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of deallocation requests for this
size class served directly by the arena.</p></dd><dt><a name="stats.arenas.i.hchunks.j.nrequests"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.hchunks.&lt;j&gt;.nrequests</code>"
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of allocation requests for this size
class.</p></dd><dt><a name="stats.arenas.i.hchunks.j.curhchunks"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.hchunks.&lt;j&gt;.curhchunks</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Current number of huge allocations for this size class.
</p></dd></dl></div></div><div class="refsect1"><a name="debugging_malloc_problems"></a><h2>DEBUGGING MALLOC PROBLEMS</h2><p>When debugging, it is a good idea to configure/build jemalloc with
the <code class="option">--enable-debug</code> and <code class="option">--enable-fill</code>
options, and recompile the program with suitable options and symbols for
debugger support. When so configured, jemalloc incorporates a wide variety
of run-time assertions that catch application errors such as double-free,
write-after-free, etc.</p><p>Programs often accidentally depend on &#8220;uninitialized&#8221;
memory actually being filled with zero bytes. Junk filling
(see the <a class="link" href="#opt.junk">
"<code class="mallctl">opt.junk</code>"
</a>
option) tends to expose such bugs in the form of obviously incorrect
results and/or coredumps. Conversely, zero
filling (see the <a class="link" href="#opt.zero">
"<code class="mallctl">opt.zero</code>"
</a> option) eliminates
the symptoms of such bugs. Between these two options, it is usually
possible to quickly detect, diagnose, and eliminate such bugs.</p><p>This implementation does not provide much detail about the problems
it detects, because the performance impact for storing such information
would be prohibitive. However, jemalloc does integrate with the most
excellent <a class="ulink" href="http://valgrind.org/" target="_top">Valgrind</a> tool if the
<code class="option">--enable-valgrind</code> configuration option is enabled.</p></div><div class="refsect1"><a name="diagnostic_messages"></a><h2>DIAGNOSTIC MESSAGES</h2><p>If any of the memory allocation/deallocation functions detect an
error or warning condition, a message will be printed to file descriptor
<code class="constant">STDERR_FILENO</code>. Errors will result in the process
dumping core. If the <a class="link" href="#opt.abort">
"<code class="mallctl">opt.abort</code>"
</a> option is set, most
warnings are treated as errors.</p><p>The <code class="varname">malloc_message</code> variable allows the programmer
to override the function which emits the text strings forming the errors
and warnings if for some reason the <code class="constant">STDERR_FILENO</code> file
descriptor is not suitable for this.
<code class="function">malloc_message</code>(<em class="parameter"><code></code></em>) takes the
<em class="parameter"><code>cbopaque</code></em> pointer argument that is
<code class="constant">NULL</code> unless overridden by the arguments in a call to
<code class="function">malloc_stats_print</code>(<em class="parameter"><code></code></em>), followed by a string
pointer. Please note that doing anything which tries to allocate memory in
this function is likely to result in a crash or deadlock.</p><p>All messages are prefixed by
&#8220;<code class="computeroutput">&lt;jemalloc&gt;: </code>&#8221;.</p></div><div class="refsect1"><a name="return_values"></a><h2>RETURN VALUES</h2><div class="refsect2"><a name="idp46949776"></a><h3>Standard API</h3><p>The <code class="function">malloc</code>(<em class="parameter"><code></code></em>) and
<code class="function">calloc</code>(<em class="parameter"><code></code></em>) functions return a pointer to the
allocated memory if successful; otherwise a <code class="constant">NULL</code>
pointer is returned and <code class="varname">errno</code> is set to
<span class="errorname">ENOMEM</span>.</p><p>The <code class="function">posix_memalign</code>(<em class="parameter"><code></code></em>) function
returns the value 0 if successful; otherwise it returns an error value.
The <code class="function">posix_memalign</code>(<em class="parameter"><code></code></em>) function will fail
if:
</p><div class="variablelist"><dl class="variablelist"><dt><span class="term"><span class="errorname">EINVAL</span></span></dt><dd><p>The <em class="parameter"><code>alignment</code></em> parameter is
not a power of 2 at least as large as
<code class="code">sizeof(<span class="type">void *</span>)</code>.
</p></dd><dt><span class="term"><span class="errorname">ENOMEM</span></span></dt><dd><p>Memory allocation error.</p></dd></dl></div><p>
</p><p>The <code class="function">aligned_alloc</code>(<em class="parameter"><code></code></em>) function returns
a pointer to the allocated memory if successful; otherwise a
<code class="constant">NULL</code> pointer is returned and
<code class="varname">errno</code> is set. The
<code class="function">aligned_alloc</code>(<em class="parameter"><code></code></em>) function will fail if:
</p><div class="variablelist"><dl class="variablelist"><dt><span class="term"><span class="errorname">EINVAL</span></span></dt><dd><p>The <em class="parameter"><code>alignment</code></em> parameter is
not a power of 2.
</p></dd><dt><span class="term"><span class="errorname">ENOMEM</span></span></dt><dd><p>Memory allocation error.</p></dd></dl></div><p>
</p><p>The <code class="function">realloc</code>(<em class="parameter"><code></code></em>) function returns a
pointer, possibly identical to <em class="parameter"><code>ptr</code></em>, to the
allocated memory if successful; otherwise a <code class="constant">NULL</code>
pointer is returned, and <code class="varname">errno</code> is set to
<span class="errorname">ENOMEM</span> if the error was the result of an
allocation failure. The <code class="function">realloc</code>(<em class="parameter"><code></code></em>)
function always leaves the original buffer intact when an error occurs.
</p><p>The <code class="function">free</code>(<em class="parameter"><code></code></em>) function returns no
value.</p></div><div class="refsect2"><a name="idp46974576"></a><h3>Non-standard API</h3><p>The <code class="function">mallocx</code>(<em class="parameter"><code></code></em>) and
<code class="function">rallocx</code>(<em class="parameter"><code></code></em>) functions return a pointer to
the allocated memory if successful; otherwise a <code class="constant">NULL</code>
pointer is returned to indicate insufficient contiguous memory was
available to service the allocation request. </p><p>The <code class="function">xallocx</code>(<em class="parameter"><code></code></em>) function returns the
real size of the resulting resized allocation pointed to by
<em class="parameter"><code>ptr</code></em>, which is a value less than
<em class="parameter"><code>size</code></em> if the allocation could not be adequately
grown in place. </p><p>The <code class="function">sallocx</code>(<em class="parameter"><code></code></em>) function returns the
real size of the allocation pointed to by <em class="parameter"><code>ptr</code></em>.
</p><p>The <code class="function">nallocx</code>(<em class="parameter"><code></code></em>) returns the real size
that would result from a successful equivalent
<code class="function">mallocx</code>(<em class="parameter"><code></code></em>) function call, or zero if
insufficient memory is available to perform the size computation. </p><p>The <code class="function">mallctl</code>(<em class="parameter"><code></code></em>),
<code class="function">mallctlnametomib</code>(<em class="parameter"><code></code></em>), and
<code class="function">mallctlbymib</code>(<em class="parameter"><code></code></em>) functions return 0 on
success; otherwise they return an error value. The functions will fail
if:
</p><div class="variablelist"><dl class="variablelist"><dt><span class="term"><span class="errorname">EINVAL</span></span></dt><dd><p><em class="parameter"><code>newp</code></em> is not
<code class="constant">NULL</code>, and <em class="parameter"><code>newlen</code></em> is too
large or too small. Alternatively, <em class="parameter"><code>*oldlenp</code></em>
is too large or too small; in this case as much data as possible
are read despite the error.</p></dd><dt><span class="term"><span class="errorname">ENOENT</span></span></dt><dd><p><em class="parameter"><code>name</code></em> or
<em class="parameter"><code>mib</code></em> specifies an unknown/invalid
value.</p></dd><dt><span class="term"><span class="errorname">EPERM</span></span></dt><dd><p>Attempt to read or write void value, or attempt to
write read-only value.</p></dd><dt><span class="term"><span class="errorname">EAGAIN</span></span></dt><dd><p>A memory allocation failure
occurred.</p></dd><dt><span class="term"><span class="errorname">EFAULT</span></span></dt><dd><p>An interface with side effects failed in some way
not directly related to <code class="function">mallctl*</code>(<em class="parameter"><code></code></em>)
read/write processing.</p></dd></dl></div><p>
</p><p>The <code class="function">malloc_usable_size</code>(<em class="parameter"><code></code></em>) function
returns the usable size of the allocation pointed to by
<em class="parameter"><code>ptr</code></em>. </p></div></div><div class="refsect1"><a name="environment"></a><h2>ENVIRONMENT</h2><p>The following environment variable affects the execution of the
allocation functions:
</p><div class="variablelist"><dl class="variablelist"><dt><span class="term"><code class="envar">MALLOC_CONF</code></span></dt><dd><p>If the environment variable
<code class="envar">MALLOC_CONF</code> is set, the characters it contains
will be interpreted as options.</p></dd></dl></div><p>
</p></div><div class="refsect1"><a name="examples"></a><h2>EXAMPLES</h2><p>To dump core whenever a problem occurs:
</p><pre class="screen">ln -s 'abort:true' /etc/malloc.conf</pre><p>
</p><p>To specify in the source a chunk size that is 16 MiB:
</p><pre class="programlisting">
malloc_conf = "lg_chunk:24";</pre></div><div class="refsect1"><a name="see_also"></a><h2>SEE ALSO</h2><p><span class="citerefentry"><span class="refentrytitle">madvise</span>(2)</span>,
<span class="citerefentry"><span class="refentrytitle">mmap</span>(2)</span>,
<span class="citerefentry"><span class="refentrytitle">sbrk</span>(2)</span>,
<span class="citerefentry"><span class="refentrytitle">utrace</span>(2)</span>,
<span class="citerefentry"><span class="refentrytitle">alloca</span>(3)</span>,
<span class="citerefentry"><span class="refentrytitle">atexit</span>(3)</span>,
<span class="citerefentry"><span class="refentrytitle">getpagesize</span>(3)</span></p></div><div class="refsect1"><a name="standards"></a><h2>STANDARDS</h2><p>The <code class="function">malloc</code>(<em class="parameter"><code></code></em>),
<code class="function">calloc</code>(<em class="parameter"><code></code></em>),
<code class="function">realloc</code>(<em class="parameter"><code></code></em>), and
<code class="function">free</code>(<em class="parameter"><code></code></em>) functions conform to ISO/IEC
9899:1990 (&#8220;ISO C90&#8221;).</p><p>The <code class="function">posix_memalign</code>(<em class="parameter"><code></code></em>) function conforms
to IEEE Std 1003.1-2001 (&#8220;POSIX.1&#8221;).</p></div></div></body></html>
...@@ -52,7 +52,7 @@ ...@@ -52,7 +52,7 @@
<title>LIBRARY</title> <title>LIBRARY</title>
<para>This manual describes jemalloc @jemalloc_version@. More information <para>This manual describes jemalloc @jemalloc_version@. More information
can be found at the <ulink can be found at the <ulink
url="http://www.canonware.com/jemalloc/">jemalloc website</ulink>.</para> url="http://jemalloc.net/">jemalloc website</ulink>.</para>
</refsect1> </refsect1>
<refsynopsisdiv> <refsynopsisdiv>
<title>SYNOPSIS</title> <title>SYNOPSIS</title>
...@@ -180,20 +180,20 @@ ...@@ -180,20 +180,20 @@
<refsect2> <refsect2>
<title>Standard API</title> <title>Standard API</title>
<para>The <function>malloc<parameter/></function> function allocates <para>The <function>malloc()</function> function allocates
<parameter>size</parameter> bytes of uninitialized memory. The allocated <parameter>size</parameter> bytes of uninitialized memory. The allocated
space is suitably aligned (after possible pointer coercion) for storage space is suitably aligned (after possible pointer coercion) for storage
of any type of object.</para> of any type of object.</para>
<para>The <function>calloc<parameter/></function> function allocates <para>The <function>calloc()</function> function allocates
space for <parameter>number</parameter> objects, each space for <parameter>number</parameter> objects, each
<parameter>size</parameter> bytes in length. The result is identical to <parameter>size</parameter> bytes in length. The result is identical to
calling <function>malloc<parameter/></function> with an argument of calling <function>malloc()</function> with an argument of
<parameter>number</parameter> * <parameter>size</parameter>, with the <parameter>number</parameter> * <parameter>size</parameter>, with the
exception that the allocated memory is explicitly initialized to zero exception that the allocated memory is explicitly initialized to zero
bytes.</para> bytes.</para>
<para>The <function>posix_memalign<parameter/></function> function <para>The <function>posix_memalign()</function> function
allocates <parameter>size</parameter> bytes of memory such that the allocates <parameter>size</parameter> bytes of memory such that the
allocation's base address is a multiple of allocation's base address is a multiple of
<parameter>alignment</parameter>, and returns the allocation in the value <parameter>alignment</parameter>, and returns the allocation in the value
...@@ -201,7 +201,7 @@ ...@@ -201,7 +201,7 @@
<parameter>alignment</parameter> must be a power of 2 at least as large as <parameter>alignment</parameter> must be a power of 2 at least as large as
<code language="C">sizeof(<type>void *</type>)</code>.</para> <code language="C">sizeof(<type>void *</type>)</code>.</para>
<para>The <function>aligned_alloc<parameter/></function> function <para>The <function>aligned_alloc()</function> function
allocates <parameter>size</parameter> bytes of memory such that the allocates <parameter>size</parameter> bytes of memory such that the
allocation's base address is a multiple of allocation's base address is a multiple of
<parameter>alignment</parameter>. The requested <parameter>alignment</parameter>. The requested
...@@ -209,7 +209,7 @@ ...@@ -209,7 +209,7 @@
undefined if <parameter>size</parameter> is not an integral multiple of undefined if <parameter>size</parameter> is not an integral multiple of
<parameter>alignment</parameter>.</para> <parameter>alignment</parameter>.</para>
<para>The <function>realloc<parameter/></function> function changes the <para>The <function>realloc()</function> function changes the
size of the previously allocated memory referenced by size of the previously allocated memory referenced by
<parameter>ptr</parameter> to <parameter>size</parameter> bytes. The <parameter>ptr</parameter> to <parameter>size</parameter> bytes. The
contents of the memory are unchanged up to the lesser of the new and old contents of the memory are unchanged up to the lesser of the new and old
...@@ -217,26 +217,26 @@ ...@@ -217,26 +217,26 @@
portion of the memory are undefined. Upon success, the memory referenced portion of the memory are undefined. Upon success, the memory referenced
by <parameter>ptr</parameter> is freed and a pointer to the newly by <parameter>ptr</parameter> is freed and a pointer to the newly
allocated memory is returned. Note that allocated memory is returned. Note that
<function>realloc<parameter/></function> may move the memory allocation, <function>realloc()</function> may move the memory allocation,
resulting in a different return value than <parameter>ptr</parameter>. resulting in a different return value than <parameter>ptr</parameter>.
If <parameter>ptr</parameter> is <constant>NULL</constant>, the If <parameter>ptr</parameter> is <constant>NULL</constant>, the
<function>realloc<parameter/></function> function behaves identically to <function>realloc()</function> function behaves identically to
<function>malloc<parameter/></function> for the specified size.</para> <function>malloc()</function> for the specified size.</para>
<para>The <function>free<parameter/></function> function causes the <para>The <function>free()</function> function causes the
allocated memory referenced by <parameter>ptr</parameter> to be made allocated memory referenced by <parameter>ptr</parameter> to be made
available for future allocations. If <parameter>ptr</parameter> is available for future allocations. If <parameter>ptr</parameter> is
<constant>NULL</constant>, no action occurs.</para> <constant>NULL</constant>, no action occurs.</para>
</refsect2> </refsect2>
<refsect2> <refsect2>
<title>Non-standard API</title> <title>Non-standard API</title>
<para>The <function>mallocx<parameter/></function>, <para>The <function>mallocx()</function>,
<function>rallocx<parameter/></function>, <function>rallocx()</function>,
<function>xallocx<parameter/></function>, <function>xallocx()</function>,
<function>sallocx<parameter/></function>, <function>sallocx()</function>,
<function>dallocx<parameter/></function>, <function>dallocx()</function>,
<function>sdallocx<parameter/></function>, and <function>sdallocx()</function>, and
<function>nallocx<parameter/></function> functions all have a <function>nallocx()</function> functions all have a
<parameter>flags</parameter> argument that can be used to specify <parameter>flags</parameter> argument that can be used to specify
options. The functions only check the options that are contextually options. The functions only check the options that are contextually
relevant. Use bitwise or (<code language="C">|</code>) operations to relevant. Use bitwise or (<code language="C">|</code>) operations to
...@@ -307,21 +307,19 @@ ...@@ -307,21 +307,19 @@
</variablelist> </variablelist>
</para> </para>
<para>The <function>mallocx<parameter/></function> function allocates at <para>The <function>mallocx()</function> function allocates at
least <parameter>size</parameter> bytes of memory, and returns a pointer least <parameter>size</parameter> bytes of memory, and returns a pointer
to the base address of the allocation. Behavior is undefined if to the base address of the allocation. Behavior is undefined if
<parameter>size</parameter> is <constant>0</constant>, or if request size <parameter>size</parameter> is <constant>0</constant>.</para>
overflows due to size class and/or alignment constraints.</para>
<para>The <function>rallocx<parameter/></function> function resizes the <para>The <function>rallocx()</function> function resizes the
allocation at <parameter>ptr</parameter> to be at least allocation at <parameter>ptr</parameter> to be at least
<parameter>size</parameter> bytes, and returns a pointer to the base <parameter>size</parameter> bytes, and returns a pointer to the base
address of the resulting allocation, which may or may not have moved from address of the resulting allocation, which may or may not have moved from
its original location. Behavior is undefined if its original location. Behavior is undefined if
<parameter>size</parameter> is <constant>0</constant>, or if request size <parameter>size</parameter> is <constant>0</constant>.</para>
overflows due to size class and/or alignment constraints.</para>
<para>The <function>xallocx<parameter/></function> function resizes the <para>The <function>xallocx()</function> function resizes the
allocation at <parameter>ptr</parameter> in place to be at least allocation at <parameter>ptr</parameter> in place to be at least
<parameter>size</parameter> bytes, and returns the real size of the <parameter>size</parameter> bytes, and returns the real size of the
allocation. If <parameter>extra</parameter> is non-zero, an attempt is allocation. If <parameter>extra</parameter> is non-zero, an attempt is
...@@ -334,32 +332,32 @@ ...@@ -334,32 +332,32 @@
language="C">(<parameter>size</parameter> + <parameter>extra</parameter> language="C">(<parameter>size</parameter> + <parameter>extra</parameter>
&gt; <constant>SIZE_T_MAX</constant>)</code>.</para> &gt; <constant>SIZE_T_MAX</constant>)</code>.</para>
<para>The <function>sallocx<parameter/></function> function returns the <para>The <function>sallocx()</function> function returns the
real size of the allocation at <parameter>ptr</parameter>.</para> real size of the allocation at <parameter>ptr</parameter>.</para>
<para>The <function>dallocx<parameter/></function> function causes the <para>The <function>dallocx()</function> function causes the
memory referenced by <parameter>ptr</parameter> to be made available for memory referenced by <parameter>ptr</parameter> to be made available for
future allocations.</para> future allocations.</para>
<para>The <function>sdallocx<parameter/></function> function is an <para>The <function>sdallocx()</function> function is an
extension of <function>dallocx<parameter/></function> with a extension of <function>dallocx()</function> with a
<parameter>size</parameter> parameter to allow the caller to pass in the <parameter>size</parameter> parameter to allow the caller to pass in the
allocation size as an optimization. The minimum valid input size is the allocation size as an optimization. The minimum valid input size is the
original requested size of the allocation, and the maximum valid input original requested size of the allocation, and the maximum valid input
size is the corresponding value returned by size is the corresponding value returned by
<function>nallocx<parameter/></function> or <function>nallocx()</function> or
<function>sallocx<parameter/></function>.</para> <function>sallocx()</function>.</para>
<para>The <function>nallocx<parameter/></function> function allocates no <para>The <function>nallocx()</function> function allocates no
memory, but it performs the same size computation as the memory, but it performs the same size computation as the
<function>mallocx<parameter/></function> function, and returns the real <function>mallocx()</function> function, and returns the real
size of the allocation that would result from the equivalent size of the allocation that would result from the equivalent
<function>mallocx<parameter/></function> function call. Behavior is <function>mallocx()</function> function call, or
undefined if <parameter>size</parameter> is <constant>0</constant>, or if <constant>0</constant> if the inputs exceed the maximum supported size
request size overflows due to size class and/or alignment class and/or alignment. Behavior is undefined if
constraints.</para> <parameter>size</parameter> is <constant>0</constant>.</para>
<para>The <function>mallctl<parameter/></function> function provides a <para>The <function>mallctl()</function> function provides a
general interface for introspecting the memory allocator, as well as general interface for introspecting the memory allocator, as well as
setting modifiable parameters and triggering actions. The setting modifiable parameters and triggering actions. The
period-separated <parameter>name</parameter> argument specifies a period-separated <parameter>name</parameter> argument specifies a
...@@ -374,12 +372,12 @@ ...@@ -374,12 +372,12 @@
<parameter>newlen</parameter>; otherwise pass <constant>NULL</constant> <parameter>newlen</parameter>; otherwise pass <constant>NULL</constant>
and <constant>0</constant>.</para> and <constant>0</constant>.</para>
<para>The <function>mallctlnametomib<parameter/></function> function <para>The <function>mallctlnametomib()</function> function
provides a way to avoid repeated name lookups for applications that provides a way to avoid repeated name lookups for applications that
repeatedly query the same portion of the namespace, by translating a name repeatedly query the same portion of the namespace, by translating a name
to a &ldquo;Management Information Base&rdquo; (MIB) that can be passed to a <quote>Management Information Base</quote> (MIB) that can be passed
repeatedly to <function>mallctlbymib<parameter/></function>. Upon repeatedly to <function>mallctlbymib()</function>. Upon
successful return from <function>mallctlnametomib<parameter/></function>, successful return from <function>mallctlnametomib()</function>,
<parameter>mibp</parameter> contains an array of <parameter>mibp</parameter> contains an array of
<parameter>*miblenp</parameter> integers, where <parameter>*miblenp</parameter> integers, where
<parameter>*miblenp</parameter> is the lesser of the number of components <parameter>*miblenp</parameter> is the lesser of the number of components
...@@ -408,43 +406,47 @@ for (i = 0; i < nbins; i++) { ...@@ -408,43 +406,47 @@ for (i = 0; i < nbins; i++) {
mib[2] = i; mib[2] = i;
len = sizeof(bin_size); len = sizeof(bin_size);
mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0); mallctlbymib(mib, miblen, (void *)&bin_size, &len, NULL, 0);
/* Do something with bin_size... */ /* Do something with bin_size... */
}]]></programlisting></para> }]]></programlisting></para>
<para>The <function>malloc_stats_print<parameter/></function> function <varlistentry id="malloc_stats_print_opts">
writes human-readable summary statistics via the </varlistentry>
<parameter>write_cb</parameter> callback function pointer and <para>The <function>malloc_stats_print()</function> function writes
<parameter>cbopaque</parameter> data passed to summary statistics via the <parameter>write_cb</parameter> callback
<parameter>write_cb</parameter>, or function pointer and <parameter>cbopaque</parameter> data passed to
<function>malloc_message<parameter/></function> if <parameter>write_cb</parameter>, or <function>malloc_message()</function>
<parameter>write_cb</parameter> is <constant>NULL</constant>. This if <parameter>write_cb</parameter> is <constant>NULL</constant>. The
function can be called repeatedly. General information that never statistics are presented in human-readable form unless <quote>J</quote> is
changes during execution can be omitted by specifying "g" as a character specified as a character within the <parameter>opts</parameter> string, in
which case the statistics are presented in <ulink
url="http://www.json.org/">JSON format</ulink>. This function can be
called repeatedly. General information that never changes during
execution can be omitted by specifying <quote>g</quote> as a character
within the <parameter>opts</parameter> string. Note that within the <parameter>opts</parameter> string. Note that
<function>malloc_message<parameter/></function> uses the <function>malloc_message()</function> uses the
<function>mallctl*<parameter/></function> functions internally, so <function>mallctl*()</function> functions internally, so inconsistent
inconsistent statistics can be reported if multiple threads use these statistics can be reported if multiple threads use these functions
functions simultaneously. If <option>--enable-stats</option> is simultaneously. If <option>--enable-stats</option> is specified during
specified during configuration, &ldquo;m&rdquo; and &ldquo;a&rdquo; can configuration, <quote>m</quote>, <quote>d</quote>, and <quote>a</quote>
be specified to omit merged arena and per arena statistics, respectively; can be specified to omit merged arena, destroyed merged arena, and per
&ldquo;b&rdquo;, &ldquo;l&rdquo;, and &ldquo;h&rdquo; can be specified to arena statistics, respectively; <quote>b</quote> and <quote>l</quote> can
omit per size class statistics for bins, large objects, and huge objects, be specified to omit per size class statistics for bins and large objects,
respectively. Unrecognized characters are silently ignored. Note that respectively; <quote>x</quote> can be specified to omit all mutex
statistics. Unrecognized characters are silently ignored. Note that
thread caching may prevent some statistics from being completely up to thread caching may prevent some statistics from being completely up to
date, since extra locking would be required to merge counters that track date, since extra locking would be required to merge counters that track
thread cache operations. thread cache operations.</para>
</para>
<para>The <function>malloc_usable_size<parameter/></function> function <para>The <function>malloc_usable_size()</function> function
returns the usable size of the allocation pointed to by returns the usable size of the allocation pointed to by
<parameter>ptr</parameter>. The return value may be larger than the size <parameter>ptr</parameter>. The return value may be larger than the size
that was requested during allocation. The that was requested during allocation. The
<function>malloc_usable_size<parameter/></function> function is not a <function>malloc_usable_size()</function> function is not a
mechanism for in-place <function>realloc<parameter/></function>; rather mechanism for in-place <function>realloc()</function>; rather
it is provided solely as a tool for introspection purposes. Any it is provided solely as a tool for introspection purposes. Any
discrepancy between the requested allocation size and the size reported discrepancy between the requested allocation size and the size reported
by <function>malloc_usable_size<parameter/></function> should not be by <function>malloc_usable_size()</function> should not be
depended on, since such behavior is entirely implementation-dependent. depended on, since such behavior is entirely implementation-dependent.
</para> </para>
</refsect2> </refsect2>
...@@ -455,19 +457,20 @@ for (i = 0; i < nbins; i++) { ...@@ -455,19 +457,20 @@ for (i = 0; i < nbins; i++) {
routines, the allocator initializes its internals based in part on various routines, the allocator initializes its internals based in part on various
options that can be specified at compile- or run-time.</para> options that can be specified at compile- or run-time.</para>
<para>The string pointed to by the global variable <para>The string specified via <option>--with-malloc-conf</option>, the
<varname>malloc_conf</varname>, the &ldquo;name&rdquo; of the file string pointed to by the global variable <varname>malloc_conf</varname>, the
referenced by the symbolic link named <filename <quote>name</quote> of the file referenced by the symbolic link named
class="symlink">/etc/malloc.conf</filename>, and the value of the <filename class="symlink">/etc/malloc.conf</filename>, and the value of the
environment variable <envar>MALLOC_CONF</envar>, will be interpreted, in environment variable <envar>MALLOC_CONF</envar>, will be interpreted, in
that order, from left to right as options. Note that that order, from left to right as options. Note that
<varname>malloc_conf</varname> may be read before <varname>malloc_conf</varname> may be read before
<function>main<parameter/></function> is entered, so the declaration of <function>main()</function> is entered, so the declaration of
<varname>malloc_conf</varname> should specify an initializer that contains <varname>malloc_conf</varname> should specify an initializer that contains
the final value to be read by jemalloc. <varname>malloc_conf</varname> is the final value to be read by jemalloc. <option>--with-malloc-conf</option>
a compile-time setting, whereas <filename and <varname>malloc_conf</varname> are compile-time mechanisms, whereas
class="symlink">/etc/malloc.conf</filename> and <envar>MALLOC_CONF</envar> <filename class="symlink">/etc/malloc.conf</filename> and
can be safely set any time prior to program invocation.</para> <envar>MALLOC_CONF</envar> can be safely set any time prior to program
invocation.</para>
<para>An options string is a comma-separated list of option:value pairs. <para>An options string is a comma-separated list of option:value pairs.
There is one key corresponding to each <link There is one key corresponding to each <link
...@@ -509,33 +512,21 @@ for (i = 0; i < nbins; i++) { ...@@ -509,33 +512,21 @@ for (i = 0; i < nbins; i++) {
sense to reduce the number of arenas if an application does not make much sense to reduce the number of arenas if an application does not make much
use of the allocation functions.</para> use of the allocation functions.</para>
<para>In addition to multiple arenas, unless <para>In addition to multiple arenas, this allocator supports
<option>--disable-tcache</option> is specified during configuration, this thread-specific caching, in order to make it possible to completely avoid
allocator supports thread-specific caching for small and large objects, in synchronization for most allocation requests. Such caching allows very fast
order to make it possible to completely avoid synchronization for most allocation in the common case, but it increases memory usage and
allocation requests. Such caching allows very fast allocation in the fragmentation, since a bounded number of objects can remain allocated in
common case, but it increases memory usage and fragmentation, since a each thread cache.</para>
bounded number of objects can remain allocated in each thread cache.</para>
<para>Memory is conceptually broken into extents. Extents are always
<para>Memory is conceptually broken into equal-sized chunks, where the aligned to multiples of the page size. This alignment makes it possible to
chunk size is a power of two that is greater than the page size. Chunks find metadata for user objects quickly. User objects are broken into two
are always aligned to multiples of the chunk size. This alignment makes it categories according to size: small and large. Contiguous small objects
possible to find metadata for user objects very quickly.</para> comprise a slab, which resides within a single extent, whereas large objects
each have their own extents backing them.</para>
<para>User objects are broken into three categories according to size:
small, large, and huge. Small and large objects are managed entirely by <para>Small objects are managed in groups by slabs. Each slab maintains
arenas; huge objects are additionally aggregated in a single data structure
that is shared by all threads. Huge objects are typically used by
applications infrequently enough that this single data structure is not a
scalability issue.</para>
<para>Each chunk that is managed by an arena tracks its contents as runs of
contiguous pages (unused, backing a set of small objects, or backing one
large object). The combination of chunk alignment and chunk page maps
makes it possible to determine all metadata regarding small and large
allocations in constant time.</para>
<para>Small objects are managed in groups by page runs. Each run maintains
a bitmap to track which regions are in use. Allocation requests that are no a bitmap to track which regions are in use. Allocation requests that are no
more than half the quantum (8 or 16, depending on architecture) are rounded more than half the quantum (8 or 16, depending on architecture) are rounded
up to the nearest power of two that is at least <code up to the nearest power of two that is at least <code
...@@ -543,11 +534,9 @@ for (i = 0; i < nbins; i++) { ...@@ -543,11 +534,9 @@ for (i = 0; i < nbins; i++) {
classes are multiples of the quantum, spaced such that there are four size classes are multiples of the quantum, spaced such that there are four size
classes for each doubling in size, which limits internal fragmentation to classes for each doubling in size, which limits internal fragmentation to
approximately 20% for all but the smallest size classes. Small size classes approximately 20% for all but the smallest size classes. Small size classes
are smaller than four times the page size, large size classes are smaller are smaller than four times the page size, and large size classes extend
than the chunk size (see the <link from four times the page size up to the largest size class that does not
linkend="opt.lg_chunk"><mallctl>opt.lg_chunk</mallctl></link> option), and exceed <constant>PTRDIFF_MAX</constant>.</para>
huge size classes extend from the chunk size up to one size class less than
the full address space size.</para>
<para>Allocations are packed tightly together, which can be an issue for <para>Allocations are packed tightly together, which can be an issue for
multi-threaded applications. If you need to assure that allocations do not multi-threaded applications. If you need to assure that allocations do not
...@@ -555,30 +544,28 @@ for (i = 0; i < nbins; i++) { ...@@ -555,30 +544,28 @@ for (i = 0; i < nbins; i++) {
nearest multiple of the cacheline size, or specify cacheline alignment when nearest multiple of the cacheline size, or specify cacheline alignment when
allocating.</para> allocating.</para>
<para>The <function>realloc<parameter/></function>, <para>The <function>realloc()</function>,
<function>rallocx<parameter/></function>, and <function>rallocx()</function>, and
<function>xallocx<parameter/></function> functions may resize allocations <function>xallocx()</function> functions may resize allocations
without moving them under limited circumstances. Unlike the without moving them under limited circumstances. Unlike the
<function>*allocx<parameter/></function> API, the standard API does not <function>*allocx()</function> API, the standard API does not
officially round up the usable size of an allocation to the nearest size officially round up the usable size of an allocation to the nearest size
class, so technically it is necessary to call class, so technically it is necessary to call
<function>realloc<parameter/></function> to grow e.g. a 9-byte allocation to <function>realloc()</function> to grow e.g. a 9-byte allocation to
16 bytes, or shrink a 16-byte allocation to 9 bytes. Growth and shrinkage 16 bytes, or shrink a 16-byte allocation to 9 bytes. Growth and shrinkage
trivially succeeds in place as long as the pre-size and post-size both round trivially succeeds in place as long as the pre-size and post-size both round
up to the same size class. No other API guarantees are made regarding up to the same size class. No other API guarantees are made regarding
in-place resizing, but the current implementation also tries to resize large in-place resizing, but the current implementation also tries to resize large
and huge allocations in place, as long as the pre-size and post-size are allocations in place, as long as the pre-size and post-size are both large.
both large or both huge. In such cases shrinkage always succeeds for large For shrinkage to succeed, the extent allocator must support splitting (see
size classes, but for huge size classes the chunk allocator must support <link
splitting (see <link linkend="arena.i.extent_hooks"><mallctl>arena.&lt;i&gt;.extent_hooks</mallctl></link>).
linkend="arena.i.chunk_hooks"><mallctl>arena.&lt;i&gt;.chunk_hooks</mallctl></link>). Growth only succeeds if the trailing memory is currently available, and the
Growth only succeeds if the trailing memory is currently available, and extent allocator supports merging.</para>
additionally for huge size classes the chunk allocator must support
merging.</para> <para>Assuming 4 KiB pages and a 16-byte quantum on a 64-bit system, the
size classes in each category are as shown in <xref linkend="size_classes"
<para>Assuming 2 MiB chunks, 4 KiB pages, and a 16-byte quantum on a xrefstyle="template:Table %n"/>.</para>
64-bit system, the size classes in each category are as shown in <xref
linkend="size_classes" xrefstyle="template:Table %n"/>.</para>
<table xml:id="size_classes" frame="all"> <table xml:id="size_classes" frame="all">
<title>Size classes</title> <title>Size classes</title>
...@@ -632,7 +619,7 @@ for (i = 0; i < nbins; i++) { ...@@ -632,7 +619,7 @@ for (i = 0; i < nbins; i++) {
<entry>[10 KiB, 12 KiB, 14 KiB]</entry> <entry>[10 KiB, 12 KiB, 14 KiB]</entry>
</row> </row>
<row> <row>
<entry morerows="7">Large</entry> <entry morerows="15">Large</entry>
<entry>2 KiB</entry> <entry>2 KiB</entry>
<entry>[16 KiB]</entry> <entry>[16 KiB]</entry>
</row> </row>
...@@ -662,12 +649,7 @@ for (i = 0; i < nbins; i++) { ...@@ -662,12 +649,7 @@ for (i = 0; i < nbins; i++) {
</row> </row>
<row> <row>
<entry>256 KiB</entry> <entry>256 KiB</entry>
<entry>[1280 KiB, 1536 KiB, 1792 KiB]</entry> <entry>[1280 KiB, 1536 KiB, 1792 KiB, 2 MiB]</entry>
</row>
<row>
<entry morerows="6">Huge</entry>
<entry>256 KiB</entry>
<entry>[2 MiB]</entry>
</row> </row>
<row> <row>
<entry>512 KiB</entry> <entry>512 KiB</entry>
...@@ -693,6 +675,14 @@ for (i = 0; i < nbins; i++) { ...@@ -693,6 +675,14 @@ for (i = 0; i < nbins; i++) {
<entry>...</entry> <entry>...</entry>
<entry>...</entry> <entry>...</entry>
</row> </row>
<row>
<entry>512 PiB</entry>
<entry>[2560 PiB, 3 EiB, 3584 PiB, 4 EiB]</entry>
</row>
<row>
<entry>1 EiB</entry>
<entry>[5 EiB, 6 EiB, 7 EiB]</entry>
</row>
</tbody> </tbody>
</tgroup> </tgroup>
</table> </table>
...@@ -700,19 +690,32 @@ for (i = 0; i < nbins; i++) { ...@@ -700,19 +690,32 @@ for (i = 0; i < nbins; i++) {
<refsect1 id="mallctl_namespace"> <refsect1 id="mallctl_namespace">
<title>MALLCTL NAMESPACE</title> <title>MALLCTL NAMESPACE</title>
<para>The following names are defined in the namespace accessible via the <para>The following names are defined in the namespace accessible via the
<function>mallctl*<parameter/></function> functions. Value types are <function>mallctl*()</function> functions. Value types are specified in
specified in parentheses, their readable/writable statuses are encoded as parentheses, their readable/writable statuses are encoded as
<literal>rw</literal>, <literal>r-</literal>, <literal>-w</literal>, or <literal>rw</literal>, <literal>r-</literal>, <literal>-w</literal>, or
<literal>--</literal>, and required build configuration flags follow, if <literal>--</literal>, and required build configuration flags follow, if
any. A name element encoded as <literal>&lt;i&gt;</literal> or any. A name element encoded as <literal>&lt;i&gt;</literal> or
<literal>&lt;j&gt;</literal> indicates an integer component, where the <literal>&lt;j&gt;</literal> indicates an integer component, where the
integer varies from 0 to some upper value that must be determined via integer varies from 0 to some upper value that must be determined via
introspection. In the case of <mallctl>stats.arenas.&lt;i&gt;.*</mallctl>, introspection. In the case of <mallctl>stats.arenas.&lt;i&gt;.*</mallctl>
<literal>&lt;i&gt;</literal> equal to <link and <mallctl>arena.&lt;i&gt;.{initialized,purge,decay,dss}</mallctl>,
linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link> can be <literal>&lt;i&gt;</literal> equal to
used to access the summation of statistics from all arenas. Take special <constant>MALLCTL_ARENAS_ALL</constant> can be used to operate on all arenas
note of the <link linkend="epoch"><mallctl>epoch</mallctl></link> mallctl, or access the summation of statistics from all arenas; similarly
which controls refreshing of cached dynamic statistics.</para> <literal>&lt;i&gt;</literal> equal to
<constant>MALLCTL_ARENAS_DESTROYED</constant> can be used to access the
summation of statistics from all destroyed arenas. These constants can be
utilized either via <function>mallctlnametomib()</function> followed by
<function>mallctlbymib()</function>, or via code such as the following:
<programlisting language="C"><![CDATA[
#define STRINGIFY_HELPER(x) #x
#define STRINGIFY(x) STRINGIFY_HELPER(x)
mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".decay",
NULL, NULL, NULL, 0);]]></programlisting>
Take special note of the <link
linkend="epoch"><mallctl>epoch</mallctl></link> mallctl, which controls
refreshing of cached dynamic statistics.</para>
<variablelist> <variablelist>
<varlistentry id="version"> <varlistentry id="version">
...@@ -731,11 +734,45 @@ for (i = 0; i < nbins; i++) { ...@@ -731,11 +734,45 @@ for (i = 0; i < nbins; i++) {
<literal>rw</literal> <literal>rw</literal>
</term> </term>
<listitem><para>If a value is passed in, refresh the data from which <listitem><para>If a value is passed in, refresh the data from which
the <function>mallctl*<parameter/></function> functions report values, the <function>mallctl*()</function> functions report values,
and increment the epoch. Return the current epoch. This is useful for and increment the epoch. Return the current epoch. This is useful for
detecting whether another thread caused a refresh.</para></listitem> detecting whether another thread caused a refresh.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="background_thread">
<term>
<mallctl>background_thread</mallctl>
(<type>bool</type>)
<literal>rw</literal>
</term>
<listitem><para>Enable/disable internal background worker threads. When
set to true, background threads are created on demand (the number of
background threads will be no more than the number of CPUs or active
arenas). Threads run periodically, and handle <link
linkend="arena.i.decay">purging</link> asynchronously. When switching
off, background threads are terminated synchronously. Note that after
<citerefentry><refentrytitle>fork</refentrytitle><manvolnum>2</manvolnum></citerefentry>
function, the state in the child process will be disabled regardless
the state in parent process. See <link
linkend="stats.background_thread.num_threads"><mallctl>stats.background_thread</mallctl></link>
for related stats. <link
linkend="opt.background_thread"><mallctl>opt.background_thread</mallctl></link>
can be used to set the default option. This option is only available on
selected pthread-based platforms.</para></listitem>
</varlistentry>
<varlistentry id="max_background_threads">
<term>
<mallctl>max_background_threads</mallctl>
(<type>size_t</type>)
<literal>rw</literal>
</term>
<listitem><para>Maximum number of background worker threads that will
be created. This value is capped at <link
linkend="opt.max_background_threads"><mallctl>opt.max_background_threads</mallctl></link> at
startup.</para></listitem>
</varlistentry>
<varlistentry id="config.cache_oblivious"> <varlistentry id="config.cache_oblivious">
<term> <term>
<mallctl>config.cache_oblivious</mallctl> <mallctl>config.cache_oblivious</mallctl>
...@@ -776,14 +813,15 @@ for (i = 0; i < nbins; i++) { ...@@ -776,14 +813,15 @@ for (i = 0; i < nbins; i++) {
during build configuration.</para></listitem> during build configuration.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="config.munmap"> <varlistentry id="config.malloc_conf">
<term> <term>
<mallctl>config.munmap</mallctl> <mallctl>config.malloc_conf</mallctl>
(<type>bool</type>) (<type>const char *</type>)
<literal>r-</literal> <literal>r-</literal>
</term> </term>
<listitem><para><option>--enable-munmap</option> was specified during <listitem><para>Embedded configure-time-specified run-time options
build configuration.</para></listitem> string, empty unless <option>--with-malloc-conf</option> was specified
during build configuration.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="config.prof"> <varlistentry id="config.prof">
...@@ -826,68 +864,94 @@ for (i = 0; i < nbins; i++) { ...@@ -826,68 +864,94 @@ for (i = 0; i < nbins; i++) {
build configuration.</para></listitem> build configuration.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="config.tcache">
<varlistentry id="config.utrace">
<term> <term>
<mallctl>config.tcache</mallctl> <mallctl>config.utrace</mallctl>
(<type>bool</type>) (<type>bool</type>)
<literal>r-</literal> <literal>r-</literal>
</term> </term>
<listitem><para><option>--disable-tcache</option> was not specified <listitem><para><option>--enable-utrace</option> was specified during
during build configuration.</para></listitem> build configuration.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="config.tls"> <varlistentry id="config.xmalloc">
<term> <term>
<mallctl>config.tls</mallctl> <mallctl>config.xmalloc</mallctl>
(<type>bool</type>) (<type>bool</type>)
<literal>r-</literal> <literal>r-</literal>
</term> </term>
<listitem><para><option>--disable-tls</option> was not specified during <listitem><para><option>--enable-xmalloc</option> was specified during
build configuration.</para></listitem> build configuration.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="config.utrace"> <varlistentry id="opt.abort">
<term> <term>
<mallctl>config.utrace</mallctl> <mallctl>opt.abort</mallctl>
(<type>bool</type>) (<type>bool</type>)
<literal>r-</literal> <literal>r-</literal>
</term> </term>
<listitem><para><option>--enable-utrace</option> was specified during <listitem><para>Abort-on-warning enabled/disabled. If true, most
build configuration.</para></listitem> warnings are fatal. Note that runtime option warnings are not included
(see <link
linkend="opt.abort_conf"><mallctl>opt.abort_conf</mallctl></link> for
that). The process will call
<citerefentry><refentrytitle>abort</refentrytitle>
<manvolnum>3</manvolnum></citerefentry> in these cases. This option is
disabled by default unless <option>--enable-debug</option> is
specified during configuration, in which case it is enabled by default.
</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="config.valgrind"> <varlistentry id="opt.abort_conf">
<term> <term>
<mallctl>config.valgrind</mallctl> <mallctl>opt.abort_conf</mallctl>
(<type>bool</type>) (<type>bool</type>)
<literal>r-</literal> <literal>r-</literal>
</term> </term>
<listitem><para><option>--enable-valgrind</option> was specified during <listitem><para>Abort-on-invalid-configuration enabled/disabled. If
build configuration.</para></listitem> true, invalid runtime options are fatal. The process will call
<citerefentry><refentrytitle>abort</refentrytitle>
<manvolnum>3</manvolnum></citerefentry> in these cases. This option is
disabled by default unless <option>--enable-debug</option> is
specified during configuration, in which case it is enabled by default.
</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="config.xmalloc"> <varlistentry id="opt.metadata_thp">
<term> <term>
<mallctl>config.xmalloc</mallctl> <mallctl>opt.metadata_thp</mallctl>
(<type>bool</type>) (<type>const char *</type>)
<literal>r-</literal> <literal>r-</literal>
</term> </term>
<listitem><para><option>--enable-xmalloc</option> was specified during <listitem><para>Controls whether to allow jemalloc to use transparent
build configuration.</para></listitem> huge page (THP) for internal metadata (see <link
linkend="stats.metadata">stats.metadata</link>). <quote>always</quote>
allows such usage. <quote>auto</quote> uses no THP initially, but may
begin to do so when metadata usage reaches certain level. The default
is <quote>disabled</quote>.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="opt.abort"> <varlistentry id="opt.retain">
<term> <term>
<mallctl>opt.abort</mallctl> <mallctl>opt.retain</mallctl>
(<type>bool</type>) (<type>bool</type>)
<literal>r-</literal> <literal>r-</literal>
</term> </term>
<listitem><para>Abort-on-warning enabled/disabled. If true, most <listitem><para>If true, retain unused virtual memory for later reuse
warnings are fatal. The process will call rather than discarding it by calling
<citerefentry><refentrytitle>abort</refentrytitle> <citerefentry><refentrytitle>munmap</refentrytitle>
<manvolnum>3</manvolnum></citerefentry> in these cases. This option is <manvolnum>2</manvolnum></citerefentry> or equivalent (see <link
disabled by default unless <option>--enable-debug</option> is linkend="stats.retained">stats.retained</link> for related details).
specified during configuration, in which case it is enabled by default. This option is disabled by default unless discarding virtual memory is
known to trigger
platform-specific performance problems, e.g. for [64-bit] Linux, which
has a quirk in its virtual memory allocation algorithm that causes
semi-permanent VM map holes under normal jemalloc operation. Although
<citerefentry><refentrytitle>munmap</refentrytitle>
<manvolnum>2</manvolnum></citerefentry> causes issues on 32-bit Linux as
well, retaining virtual memory for 32-bit Linux is disabled by default
due to the practical possibility of address space exhaustion.
</para></listitem> </para></listitem>
</varlistentry> </varlistentry>
...@@ -904,61 +968,136 @@ for (i = 0; i < nbins; i++) { ...@@ -904,61 +968,136 @@ for (i = 0; i < nbins; i++) {
settings are supported if settings are supported if
<citerefentry><refentrytitle>sbrk</refentrytitle> <citerefentry><refentrytitle>sbrk</refentrytitle>
<manvolnum>2</manvolnum></citerefentry> is supported by the operating <manvolnum>2</manvolnum></citerefentry> is supported by the operating
system: &ldquo;disabled&rdquo;, &ldquo;primary&rdquo;, and system: <quote>disabled</quote>, <quote>primary</quote>, and
&ldquo;secondary&rdquo;; otherwise only &ldquo;disabled&rdquo; is <quote>secondary</quote>; otherwise only <quote>disabled</quote> is
supported. The default is &ldquo;secondary&rdquo; if supported. The default is <quote>secondary</quote> if
<citerefentry><refentrytitle>sbrk</refentrytitle> <citerefentry><refentrytitle>sbrk</refentrytitle>
<manvolnum>2</manvolnum></citerefentry> is supported by the operating <manvolnum>2</manvolnum></citerefentry> is supported by the operating
system; &ldquo;disabled&rdquo; otherwise. system; <quote>disabled</quote> otherwise.
</para></listitem> </para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="opt.lg_chunk"> <varlistentry id="opt.narenas">
<term> <term>
<mallctl>opt.lg_chunk</mallctl> <mallctl>opt.narenas</mallctl>
(<type>size_t</type>) (<type>unsigned</type>)
<literal>r-</literal>
</term>
<listitem><para>Maximum number of arenas to use for automatic
multiplexing of threads and arenas. The default is four times the
number of CPUs, or one if there is a single CPU.</para></listitem>
</varlistentry>
<varlistentry id="opt.percpu_arena">
<term>
<mallctl>opt.percpu_arena</mallctl>
(<type>const char *</type>)
<literal>r-</literal> <literal>r-</literal>
</term> </term>
<listitem><para>Virtual memory chunk size (log base 2). If a chunk <listitem><para>Per CPU arena mode. Use the <quote>percpu</quote>
size outside the supported size range is specified, the size is setting to enable this feature, which uses number of CPUs to determine
silently clipped to the minimum/maximum supported size. The default number of arenas, and bind threads to arenas dynamically based on the
chunk size is 2 MiB (2^21). CPU the thread runs on currently. <quote>phycpu</quote> setting uses
one arena per physical CPU, which means the two hyper threads on the
same CPU share one arena. Note that no runtime checking regarding the
availability of hyper threading is done at the moment. When set to
<quote>disabled</quote>, narenas and thread to arena association will
not be impacted by this option. The default is <quote>disabled</quote>.
</para></listitem> </para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="opt.narenas"> <varlistentry id="opt.background_thread">
<term> <term>
<mallctl>opt.narenas</mallctl> <mallctl>opt.background_thread</mallctl>
(<type>size_t</type>) (<type>const bool</type>)
<literal>r-</literal> <literal>r-</literal>
</term> </term>
<listitem><para>Maximum number of arenas to use for automatic <listitem><para>Internal background worker threads enabled/disabled.
multiplexing of threads and arenas. The default is four times the Because of potential circular dependencies, enabling background thread
number of CPUs, or one if there is a single CPU.</para></listitem> using this option may cause crash or deadlock during initialization. For
a reliable way to use this feature, see <link
linkend="background_thread">background_thread</link> for dynamic control
options and details. This option is disabled by
default.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="opt.lg_dirty_mult"> <varlistentry id="opt.max_background_threads">
<term> <term>
<mallctl>opt.lg_dirty_mult</mallctl> <mallctl>opt.max_background_threads</mallctl>
(<type>const size_t</type>)
<literal>r-</literal>
</term>
<listitem><para>Maximum number of background threads that will be created
if <link linkend="background_thread">background_thread</link> is set.
Defaults to number of cpus.</para></listitem>
</varlistentry>
<varlistentry id="opt.dirty_decay_ms">
<term>
<mallctl>opt.dirty_decay_ms</mallctl>
(<type>ssize_t</type>)
<literal>r-</literal>
</term>
<listitem><para>Approximate time in milliseconds from the creation of a
set of unused dirty pages until an equivalent set of unused dirty pages
is purged (i.e. converted to muzzy via e.g.
<function>madvise(<parameter>...</parameter><parameter><constant>MADV_FREE</constant></parameter>)</function>
if supported by the operating system, or converted to clean otherwise)
and/or reused. Dirty pages are defined as previously having been
potentially written to by the application, and therefore consuming
physical memory, yet having no current use. The pages are incrementally
purged according to a sigmoidal decay curve that starts and ends with
zero purge rate. A decay time of 0 causes all unused dirty pages to be
purged immediately upon creation. A decay time of -1 disables purging.
The default decay time is 10 seconds. See <link
linkend="arenas.dirty_decay_ms"><mallctl>arenas.dirty_decay_ms</mallctl></link>
and <link
linkend="arena.i.dirty_decay_ms"><mallctl>arena.&lt;i&gt;.dirty_decay_ms</mallctl></link>
for related dynamic control options. See <link
linkend="opt.muzzy_decay_ms"><mallctl>opt.muzzy_decay_ms</mallctl></link>
for a description of muzzy pages.</para></listitem>
</varlistentry>
<varlistentry id="opt.muzzy_decay_ms">
<term>
<mallctl>opt.muzzy_decay_ms</mallctl>
(<type>ssize_t</type>) (<type>ssize_t</type>)
<literal>r-</literal> <literal>r-</literal>
</term> </term>
<listitem><para>Per-arena minimum ratio (log base 2) of active to dirty <listitem><para>Approximate time in milliseconds from the creation of a
pages. Some dirty unused pages may be allowed to accumulate, within set of unused muzzy pages until an equivalent set of unused muzzy pages
the limit set by the ratio (or one chunk worth of dirty pages, is purged (i.e. converted to clean) and/or reused. Muzzy pages are
whichever is greater), before informing the kernel about some of those defined as previously having been unused dirty pages that were
pages via <citerefentry><refentrytitle>madvise</refentrytitle> subsequently purged in a manner that left them subject to the
<manvolnum>2</manvolnum></citerefentry> or a similar system call. This reclamation whims of the operating system (e.g.
provides the kernel with sufficient information to recycle dirty pages <function>madvise(<parameter>...</parameter><parameter><constant>MADV_FREE</constant></parameter>)</function>),
if physical memory becomes scarce and the pages remain unused. The and therefore in an indeterminate state. The pages are incrementally
default minimum ratio is 8:1 (2^3:1); an option value of -1 will purged according to a sigmoidal decay curve that starts and ends with
disable dirty page purging. See <link zero purge rate. A decay time of 0 causes all unused muzzy pages to be
linkend="arenas.lg_dirty_mult"><mallctl>arenas.lg_dirty_mult</mallctl></link> purged immediately upon creation. A decay time of -1 disables purging.
The default decay time is 10 seconds. See <link
linkend="arenas.muzzy_decay_ms"><mallctl>arenas.muzzy_decay_ms</mallctl></link>
and <link and <link
linkend="arena.i.lg_dirty_mult"><mallctl>arena.&lt;i&gt;.lg_dirty_mult</mallctl></link> linkend="arena.i.muzzy_decay_ms"><mallctl>arena.&lt;i&gt;.muzzy_decay_ms</mallctl></link>
for related dynamic control options.</para></listitem> for related dynamic control options.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="opt.lg_extent_max_active_fit">
<term>
<mallctl>opt.lg_extent_max_active_fit</mallctl>
(<type>size_t</type>)
<literal>r-</literal>
</term>
<listitem><para>When reusing dirty extents, this determines the (log
base 2 of the) maximum ratio between the size of the active extent
selected (to split off from) and the size of the requested allocation.
This prevents the splitting of large active extents for smaller
allocations, which can reduce fragmentation over the long run
(especially for non-active extents). Lower value may reduce
fragmentation, at the cost of extra active extents. The default value
is 6, which gives a maximum ratio of 64 (2^6).</para></listitem>
</varlistentry>
<varlistentry id="opt.stats_print"> <varlistentry id="opt.stats_print">
<term> <term>
<mallctl>opt.stats_print</mallctl> <mallctl>opt.stats_print</mallctl>
...@@ -966,82 +1105,61 @@ for (i = 0; i < nbins; i++) { ...@@ -966,82 +1105,61 @@ for (i = 0; i < nbins; i++) {
<literal>r-</literal> <literal>r-</literal>
</term> </term>
<listitem><para>Enable/disable statistics printing at exit. If <listitem><para>Enable/disable statistics printing at exit. If
enabled, the <function>malloc_stats_print<parameter/></function> enabled, the <function>malloc_stats_print()</function>
function is called at program exit via an function is called at program exit via an
<citerefentry><refentrytitle>atexit</refentrytitle> <citerefentry><refentrytitle>atexit</refentrytitle>
<manvolnum>3</manvolnum></citerefentry> function. If <manvolnum>3</manvolnum></citerefentry> function. <link
linkend="opt.stats_print_opts"><mallctl>opt.stats_print_opts</mallctl></link>
can be combined to specify output options. If
<option>--enable-stats</option> is specified during configuration, this <option>--enable-stats</option> is specified during configuration, this
has the potential to cause deadlock for a multi-threaded process that has the potential to cause deadlock for a multi-threaded process that
exits while one or more threads are executing in the memory allocation exits while one or more threads are executing in the memory allocation
functions. Furthermore, <function>atexit<parameter/></function> may functions. Furthermore, <function>atexit()</function> may
allocate memory during application initialization and then deadlock allocate memory during application initialization and then deadlock
internally when jemalloc in turn calls internally when jemalloc in turn calls
<function>atexit<parameter/></function>, so this option is not <function>atexit()</function>, so this option is not
univerally usable (though the application can register its own universally usable (though the application can register its own
<function>atexit<parameter/></function> function with equivalent <function>atexit()</function> function with equivalent
functionality). Therefore, this option should only be used with care; functionality). Therefore, this option should only be used with care;
it is primarily intended as a performance tuning aid during application it is primarily intended as a performance tuning aid during application
development. This option is disabled by default.</para></listitem> development. This option is disabled by default.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="opt.junk"> <varlistentry id="opt.stats_print_opts">
<term> <term>
<mallctl>opt.junk</mallctl> <mallctl>opt.stats_print_opts</mallctl>
(<type>const char *</type>) (<type>const char *</type>)
<literal>r-</literal> <literal>r-</literal>
[<option>--enable-fill</option>]
</term> </term>
<listitem><para>Junk filling. If set to "alloc", each byte of <listitem><para>Options (the <parameter>opts</parameter> string) to pass
uninitialized allocated memory will be initialized to to the <function>malloc_stats_print()</function> at exit (enabled
<literal>0xa5</literal>. If set to "free", all deallocated memory will through <link
be initialized to <literal>0x5a</literal>. If set to "true", both linkend="opt.stats_print"><mallctl>opt.stats_print</mallctl></link>). See
allocated and deallocated memory will be initialized, and if set to available options in <link
"false", junk filling be disabled entirely. This is intended for linkend="malloc_stats_print_opts"><function>malloc_stats_print()</function></link>.
debugging and will impact performance negatively. This option is Has no effect unless <link
"false" by default unless <option>--enable-debug</option> is specified linkend="opt.stats_print"><mallctl>opt.stats_print</mallctl></link> is
during configuration, in which case it is "true" by default unless enabled. The default is <quote></quote>.</para></listitem>
running inside <ulink
url="http://valgrind.org/">Valgrind</ulink>.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="opt.quarantine"> <varlistentry id="opt.junk">
<term>
<mallctl>opt.quarantine</mallctl>
(<type>size_t</type>)
<literal>r-</literal>
[<option>--enable-fill</option>]
</term>
<listitem><para>Per thread quarantine size in bytes. If non-zero, each
thread maintains a FIFO object quarantine that stores up to the
specified number of bytes of memory. The quarantined memory is not
freed until it is released from quarantine, though it is immediately
junk-filled if the <link
linkend="opt.junk"><mallctl>opt.junk</mallctl></link> option is
enabled. This feature is of particular use in combination with <ulink
url="http://valgrind.org/">Valgrind</ulink>, which can detect attempts
to access quarantined objects. This is intended for debugging and will
impact performance negatively. The default quarantine size is 0 unless
running inside Valgrind, in which case the default is 16
MiB.</para></listitem>
</varlistentry>
<varlistentry id="opt.redzone">
<term> <term>
<mallctl>opt.redzone</mallctl> <mallctl>opt.junk</mallctl>
(<type>bool</type>) (<type>const char *</type>)
<literal>r-</literal> <literal>r-</literal>
[<option>--enable-fill</option>] [<option>--enable-fill</option>]
</term> </term>
<listitem><para>Redzones enabled/disabled. If enabled, small <listitem><para>Junk filling. If set to <quote>alloc</quote>, each byte
allocations have redzones before and after them. Furthermore, if the of uninitialized allocated memory will be initialized to
<link linkend="opt.junk"><mallctl>opt.junk</mallctl></link> option is <literal>0xa5</literal>. If set to <quote>free</quote>, all deallocated
enabled, the redzones are checked for corruption during deallocation. memory will be initialized to <literal>0x5a</literal>. If set to
However, the primary intended purpose of this feature is to be used in <quote>true</quote>, both allocated and deallocated memory will be
combination with <ulink url="http://valgrind.org/">Valgrind</ulink>, initialized, and if set to <quote>false</quote>, junk filling be
which needs redzones in order to do effective buffer overflow/underflow disabled entirely. This is intended for debugging and will impact
detection. This option is intended for debugging and will impact performance negatively. This option is <quote>false</quote> by default
performance negatively. This option is disabled by unless <option>--enable-debug</option> is specified during
default unless running inside Valgrind.</para></listitem> configuration, in which case it is <quote>true</quote> by
default.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="opt.zero"> <varlistentry id="opt.zero">
...@@ -1054,8 +1172,8 @@ for (i = 0; i < nbins; i++) { ...@@ -1054,8 +1172,8 @@ for (i = 0; i < nbins; i++) {
<listitem><para>Zero filling enabled/disabled. If enabled, each byte <listitem><para>Zero filling enabled/disabled. If enabled, each byte
of uninitialized allocated memory will be initialized to 0. Note that of uninitialized allocated memory will be initialized to 0. Note that
this initialization only happens once for each byte, so this initialization only happens once for each byte, so
<function>realloc<parameter/></function> and <function>realloc()</function> and
<function>rallocx<parameter/></function> calls do not zero memory that <function>rallocx()</function> calls do not zero memory that
was previously allocated. This is intended for debugging and will was previously allocated. This is intended for debugging and will
impact performance negatively. This option is disabled by default. impact performance negatively. This option is disabled by default.
</para></listitem> </para></listitem>
...@@ -1099,7 +1217,6 @@ malloc_conf = "xmalloc:true";]]></programlisting> ...@@ -1099,7 +1217,6 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<mallctl>opt.tcache</mallctl> <mallctl>opt.tcache</mallctl>
(<type>bool</type>) (<type>bool</type>)
<literal>r-</literal> <literal>r-</literal>
[<option>--enable-tcache</option>]
</term> </term>
<listitem><para>Thread-specific caching (tcache) enabled/disabled. When <listitem><para>Thread-specific caching (tcache) enabled/disabled. When
there are multiple threads, each thread uses a tcache for objects up to there are multiple threads, each thread uses a tcache for objects up to
...@@ -1108,9 +1225,7 @@ malloc_conf = "xmalloc:true";]]></programlisting> ...@@ -1108,9 +1225,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
increased memory use. See the <link increased memory use. See the <link
linkend="opt.lg_tcache_max"><mallctl>opt.lg_tcache_max</mallctl></link> linkend="opt.lg_tcache_max"><mallctl>opt.lg_tcache_max</mallctl></link>
option for related tuning information. This option is enabled by option for related tuning information. This option is enabled by
default unless running inside <ulink default.</para></listitem>
url="http://valgrind.org/">Valgrind</ulink>, in which case it is
forcefully disabled.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="opt.lg_tcache_max"> <varlistentry id="opt.lg_tcache_max">
...@@ -1118,7 +1233,6 @@ malloc_conf = "xmalloc:true";]]></programlisting> ...@@ -1118,7 +1233,6 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<mallctl>opt.lg_tcache_max</mallctl> <mallctl>opt.lg_tcache_max</mallctl>
(<type>size_t</type>) (<type>size_t</type>)
<literal>r-</literal> <literal>r-</literal>
[<option>--enable-tcache</option>]
</term> </term>
<listitem><para>Maximum size class (log base 2) to cache in the <listitem><para>Maximum size class (log base 2) to cache in the
thread-specific cache (tcache). At a minimum, all small size classes thread-specific cache (tcache). At a minimum, all small size classes
...@@ -1126,6 +1240,28 @@ malloc_conf = "xmalloc:true";]]></programlisting> ...@@ -1126,6 +1240,28 @@ malloc_conf = "xmalloc:true";]]></programlisting>
default maximum is 32 KiB (2^15).</para></listitem> default maximum is 32 KiB (2^15).</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="opt.thp">
<term>
<mallctl>opt.thp</mallctl>
(<type>const char *</type>)
<literal>r-</literal>
</term>
<listitem><para>Transparent hugepage (THP) mode. Settings "always",
"never" and "default" are available if THP is supported by the operating
system. The "always" setting enables transparent hugepage for all user
memory mappings with
<parameter><constant>MADV_HUGEPAGE</constant></parameter>; "never"
ensures no transparent hugepage with
<parameter><constant>MADV_NOHUGEPAGE</constant></parameter>; the default
setting "default" makes no changes. Note that: this option does not
affect THP for jemalloc internal metadata (see <link
linkend="opt.metadata_thp"><mallctl>opt.metadata_thp</mallctl></link>);
in addition, for arenas with customized <link
linkend="arena.i.extent_hooks"><mallctl>extent_hooks</mallctl></link>,
this option is bypassed as it is implemented as part of the default
extent hooks.</para></listitem>
</varlistentry>
<varlistentry id="opt.prof"> <varlistentry id="opt.prof">
<term> <term>
<mallctl>opt.prof</mallctl> <mallctl>opt.prof</mallctl>
...@@ -1150,7 +1286,8 @@ malloc_conf = "xmalloc:true";]]></programlisting> ...@@ -1150,7 +1286,8 @@ malloc_conf = "xmalloc:true";]]></programlisting>
the <command>jeprof</command> command, which is based on the the <command>jeprof</command> command, which is based on the
<command>pprof</command> that is developed as part of the <ulink <command>pprof</command> that is developed as part of the <ulink
url="http://code.google.com/p/gperftools/">gperftools url="http://code.google.com/p/gperftools/">gperftools
package</ulink>.</para></listitem> package</ulink>. See <link linkend="heap_profile_format">HEAP PROFILE
FORMAT</link> for heap profile format documentation.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="opt.prof_prefix"> <varlistentry id="opt.prof_prefix">
...@@ -1277,11 +1414,11 @@ malloc_conf = "xmalloc:true";]]></programlisting> ...@@ -1277,11 +1414,11 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<filename>&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.f.heap</filename>, <filename>&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.f.heap</filename>,
where <literal>&lt;prefix&gt;</literal> is controlled by the <link where <literal>&lt;prefix&gt;</literal> is controlled by the <link
linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link> linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link>
option. Note that <function>atexit<parameter/></function> may allocate option. Note that <function>atexit()</function> may allocate
memory during application initialization and then deadlock internally memory during application initialization and then deadlock internally
when jemalloc in turn calls <function>atexit<parameter/></function>, so when jemalloc in turn calls <function>atexit()</function>, so
this option is not univerally usable (though the application can this option is not universally usable (though the application can
register its own <function>atexit<parameter/></function> function with register its own <function>atexit()</function> function with
equivalent functionality). This option is disabled by equivalent functionality). This option is disabled by
default.</para></listitem> default.</para></listitem>
</varlistentry> </varlistentry>
...@@ -1311,7 +1448,7 @@ malloc_conf = "xmalloc:true";]]></programlisting> ...@@ -1311,7 +1448,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<listitem><para>Get or set the arena associated with the calling <listitem><para>Get or set the arena associated with the calling
thread. If the specified arena was not initialized beforehand (see the thread. If the specified arena was not initialized beforehand (see the
<link <link
linkend="arenas.initialized"><mallctl>arenas.initialized</mallctl></link> linkend="arena.i.initialized"><mallctl>arena.i.initialized</mallctl></link>
mallctl), it will be automatically initialized as a side effect of mallctl), it will be automatically initialized as a side effect of
calling this interface.</para></listitem> calling this interface.</para></listitem>
</varlistentry> </varlistentry>
...@@ -1340,7 +1477,7 @@ malloc_conf = "xmalloc:true";]]></programlisting> ...@@ -1340,7 +1477,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<link <link
linkend="thread.allocated"><mallctl>thread.allocated</mallctl></link> linkend="thread.allocated"><mallctl>thread.allocated</mallctl></link>
mallctl. This is useful for avoiding the overhead of repeated mallctl. This is useful for avoiding the overhead of repeated
<function>mallctl*<parameter/></function> calls.</para></listitem> <function>mallctl*()</function> calls.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="thread.deallocated"> <varlistentry id="thread.deallocated">
...@@ -1367,7 +1504,7 @@ malloc_conf = "xmalloc:true";]]></programlisting> ...@@ -1367,7 +1504,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<link <link
linkend="thread.deallocated"><mallctl>thread.deallocated</mallctl></link> linkend="thread.deallocated"><mallctl>thread.deallocated</mallctl></link>
mallctl. This is useful for avoiding the overhead of repeated mallctl. This is useful for avoiding the overhead of repeated
<function>mallctl*<parameter/></function> calls.</para></listitem> <function>mallctl*()</function> calls.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="thread.tcache.enabled"> <varlistentry id="thread.tcache.enabled">
...@@ -1375,7 +1512,6 @@ malloc_conf = "xmalloc:true";]]></programlisting> ...@@ -1375,7 +1512,6 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<mallctl>thread.tcache.enabled</mallctl> <mallctl>thread.tcache.enabled</mallctl>
(<type>bool</type>) (<type>bool</type>)
<literal>rw</literal> <literal>rw</literal>
[<option>--enable-tcache</option>]
</term> </term>
<listitem><para>Enable/disable calling thread's tcache. The tcache is <listitem><para>Enable/disable calling thread's tcache. The tcache is
implicitly flushed as a side effect of becoming implicitly flushed as a side effect of becoming
...@@ -1389,7 +1525,6 @@ malloc_conf = "xmalloc:true";]]></programlisting> ...@@ -1389,7 +1525,6 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<mallctl>thread.tcache.flush</mallctl> <mallctl>thread.tcache.flush</mallctl>
(<type>void</type>) (<type>void</type>)
<literal>--</literal> <literal>--</literal>
[<option>--enable-tcache</option>]
</term> </term>
<listitem><para>Flush calling thread's thread-specific cache (tcache). <listitem><para>Flush calling thread's thread-specific cache (tcache).
This interface releases all cached objects and internal data structures This interface releases all cached objects and internal data structures
...@@ -1418,8 +1553,8 @@ malloc_conf = "xmalloc:true";]]></programlisting> ...@@ -1418,8 +1553,8 @@ malloc_conf = "xmalloc:true";]]></programlisting>
can cause asynchronous string deallocation. Furthermore, each can cause asynchronous string deallocation. Furthermore, each
invocation of this interface can only read or write; simultaneous invocation of this interface can only read or write; simultaneous
read/write is not supported due to string lifetime limitations. The read/write is not supported due to string lifetime limitations. The
name string must nil-terminated and comprised only of characters in the name string must be nil-terminated and comprised only of characters in
sets recognized the sets recognized
by <citerefentry><refentrytitle>isgraph</refentrytitle> by <citerefentry><refentrytitle>isgraph</refentrytitle>
<manvolnum>3</manvolnum></citerefentry> and <manvolnum>3</manvolnum></citerefentry> and
<citerefentry><refentrytitle>isblank</refentrytitle> <citerefentry><refentrytitle>isblank</refentrytitle>
...@@ -1445,7 +1580,6 @@ malloc_conf = "xmalloc:true";]]></programlisting> ...@@ -1445,7 +1580,6 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<mallctl>tcache.create</mallctl> <mallctl>tcache.create</mallctl>
(<type>unsigned</type>) (<type>unsigned</type>)
<literal>r-</literal> <literal>r-</literal>
[<option>--enable-tcache</option>]
</term> </term>
<listitem><para>Create an explicit thread-specific cache (tcache) and <listitem><para>Create an explicit thread-specific cache (tcache) and
return an identifier that can be passed to the <link return an identifier that can be passed to the <link
...@@ -1462,12 +1596,11 @@ malloc_conf = "xmalloc:true";]]></programlisting> ...@@ -1462,12 +1596,11 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<mallctl>tcache.flush</mallctl> <mallctl>tcache.flush</mallctl>
(<type>unsigned</type>) (<type>unsigned</type>)
<literal>-w</literal> <literal>-w</literal>
[<option>--enable-tcache</option>]
</term> </term>
<listitem><para>Flush the specified thread-specific cache (tcache). The <listitem><para>Flush the specified thread-specific cache (tcache). The
same considerations apply to this interface as to <link same considerations apply to this interface as to <link
linkend="thread.tcache.flush"><mallctl>thread.tcache.flush</mallctl></link>, linkend="thread.tcache.flush"><mallctl>thread.tcache.flush</mallctl></link>,
except that the tcache will never be automatically be discarded. except that the tcache will never be automatically discarded.
</para></listitem> </para></listitem>
</varlistentry> </varlistentry>
...@@ -1476,25 +1609,86 @@ malloc_conf = "xmalloc:true";]]></programlisting> ...@@ -1476,25 +1609,86 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<mallctl>tcache.destroy</mallctl> <mallctl>tcache.destroy</mallctl>
(<type>unsigned</type>) (<type>unsigned</type>)
<literal>-w</literal> <literal>-w</literal>
[<option>--enable-tcache</option>]
</term> </term>
<listitem><para>Flush the specified thread-specific cache (tcache) and <listitem><para>Flush the specified thread-specific cache (tcache) and
make the identifier available for use during a future tcache creation. make the identifier available for use during a future tcache creation.
</para></listitem> </para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="arena.i.initialized">
<term>
<mallctl>arena.&lt;i&gt;.initialized</mallctl>
(<type>bool</type>)
<literal>r-</literal>
</term>
<listitem><para>Get whether the specified arena's statistics are
initialized (i.e. the arena was initialized prior to the current epoch).
This interface can also be nominally used to query whether the merged
statistics corresponding to <constant>MALLCTL_ARENAS_ALL</constant> are
initialized (always true).</para></listitem>
</varlistentry>
<varlistentry id="arena.i.decay">
<term>
<mallctl>arena.&lt;i&gt;.decay</mallctl>
(<type>void</type>)
<literal>--</literal>
</term>
<listitem><para>Trigger decay-based purging of unused dirty/muzzy pages
for arena &lt;i&gt;, or for all arenas if &lt;i&gt; equals
<constant>MALLCTL_ARENAS_ALL</constant>. The proportion of unused
dirty/muzzy pages to be purged depends on the current time; see <link
linkend="opt.dirty_decay_ms"><mallctl>opt.dirty_decay_ms</mallctl></link>
and <link
linkend="opt.muzzy_decay_ms"><mallctl>opt.muzy_decay_ms</mallctl></link>
for details.</para></listitem>
</varlistentry>
<varlistentry id="arena.i.purge"> <varlistentry id="arena.i.purge">
<term> <term>
<mallctl>arena.&lt;i&gt;.purge</mallctl> <mallctl>arena.&lt;i&gt;.purge</mallctl>
(<type>void</type>) (<type>void</type>)
<literal>--</literal> <literal>--</literal>
</term> </term>
<listitem><para>Purge unused dirty pages for arena &lt;i&gt;, or for <listitem><para>Purge all unused dirty pages for arena &lt;i&gt;, or for
all arenas if &lt;i&gt; equals <link all arenas if &lt;i&gt; equals <constant>MALLCTL_ARENAS_ALL</constant>.
linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link>.
</para></listitem> </para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="arena.i.reset">
<term>
<mallctl>arena.&lt;i&gt;.reset</mallctl>
(<type>void</type>)
<literal>--</literal>
</term>
<listitem><para>Discard all of the arena's extant allocations. This
interface can only be used with arenas explicitly created via <link
linkend="arenas.create"><mallctl>arenas.create</mallctl></link>. None
of the arena's discarded/cached allocations may accessed afterward. As
part of this requirement, all thread caches which were used to
allocate/deallocate in conjunction with the arena must be flushed
beforehand.</para></listitem>
</varlistentry>
<varlistentry id="arena.i.destroy">
<term>
<mallctl>arena.&lt;i&gt;.destroy</mallctl>
(<type>void</type>)
<literal>--</literal>
</term>
<listitem><para>Destroy the arena. Discard all of the arena's extant
allocations using the same mechanism as for <link
linkend="arena.i.reset"><mallctl>arena.&lt;i&gt;.reset</mallctl></link>
(with all the same constraints and side effects), merge the arena stats
into those accessible at arena index
<constant>MALLCTL_ARENAS_DESTROYED</constant>, and then completely
discard all metadata associated with the arena. Future calls to <link
linkend="arenas.create"><mallctl>arenas.create</mallctl></link> may
recycle the arena index. Destruction will fail if any threads are
currently associated with the arena as a result of calls to <link
linkend="thread.arena"><mallctl>thread.arena</mallctl></link>.</para></listitem>
</varlistentry>
<varlistentry id="arena.i.dss"> <varlistentry id="arena.i.dss">
<term> <term>
<mallctl>arena.&lt;i&gt;.dss</mallctl> <mallctl>arena.&lt;i&gt;.dss</mallctl>
...@@ -1503,71 +1697,109 @@ malloc_conf = "xmalloc:true";]]></programlisting> ...@@ -1503,71 +1697,109 @@ malloc_conf = "xmalloc:true";]]></programlisting>
</term> </term>
<listitem><para>Set the precedence of dss allocation as related to mmap <listitem><para>Set the precedence of dss allocation as related to mmap
allocation for arena &lt;i&gt;, or for all arenas if &lt;i&gt; equals allocation for arena &lt;i&gt;, or for all arenas if &lt;i&gt; equals
<link <constant>MALLCTL_ARENAS_ALL</constant>. See <link
linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link>. See linkend="opt.dss"><mallctl>opt.dss</mallctl></link> for supported
<link linkend="opt.dss"><mallctl>opt.dss</mallctl></link> for supported
settings.</para></listitem> settings.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="arena.i.lg_dirty_mult"> <varlistentry id="arena.i.dirty_decay_ms">
<term> <term>
<mallctl>arena.&lt;i&gt;.lg_dirty_mult</mallctl> <mallctl>arena.&lt;i&gt;.dirty_decay_ms</mallctl>
(<type>ssize_t</type>) (<type>ssize_t</type>)
<literal>rw</literal> <literal>rw</literal>
</term> </term>
<listitem><para>Current per-arena minimum ratio (log base 2) of active <listitem><para>Current per-arena approximate time in milliseconds from
to dirty pages for arena &lt;i&gt;. Each time this interface is set and the creation of a set of unused dirty pages until an equivalent set of
the ratio is increased, pages are synchronously purged as necessary to unused dirty pages is purged and/or reused. Each time this interface is
impose the new ratio. See <link set, all currently unused dirty pages are considered to have fully
linkend="opt.lg_dirty_mult"><mallctl>opt.lg_dirty_mult</mallctl></link> decayed, which causes immediate purging of all unused dirty pages unless
the decay time is set to -1 (i.e. purging disabled). See <link
linkend="opt.dirty_decay_ms"><mallctl>opt.dirty_decay_ms</mallctl></link>
for additional information.</para></listitem> for additional information.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="arena.i.chunk_hooks"> <varlistentry id="arena.i.muzzy_decay_ms">
<term> <term>
<mallctl>arena.&lt;i&gt;.chunk_hooks</mallctl> <mallctl>arena.&lt;i&gt;.muzzy_decay_ms</mallctl>
(<type>chunk_hooks_t</type>) (<type>ssize_t</type>)
<literal>rw</literal>
</term>
<listitem><para>Current per-arena approximate time in milliseconds from
the creation of a set of unused muzzy pages until an equivalent set of
unused muzzy pages is purged and/or reused. Each time this interface is
set, all currently unused muzzy pages are considered to have fully
decayed, which causes immediate purging of all unused muzzy pages unless
the decay time is set to -1 (i.e. purging disabled). See <link
linkend="opt.muzzy_decay_ms"><mallctl>opt.muzzy_decay_ms</mallctl></link>
for additional information.</para></listitem>
</varlistentry>
<varlistentry id="arena.i.retain_grow_limit">
<term>
<mallctl>arena.&lt;i&gt;.retain_grow_limit</mallctl>
(<type>size_t</type>)
<literal>rw</literal>
</term>
<listitem><para>Maximum size to grow retained region (only relevant when
<link linkend="opt.retain"><mallctl>opt.retain</mallctl></link> is
enabled). This controls the maximum increment to expand virtual memory,
or allocation through <link
linkend="arena.i.extent_hooks"><mallctl>arena.&lt;i&gt;extent_hooks</mallctl></link>.
In particular, if customized extent hooks reserve physical memory
(e.g. 1G huge pages), this is useful to control the allocation hook's
input size. The default is no limit.</para></listitem>
</varlistentry>
<varlistentry id="arena.i.extent_hooks">
<term>
<mallctl>arena.&lt;i&gt;.extent_hooks</mallctl>
(<type>extent_hooks_t *</type>)
<literal>rw</literal> <literal>rw</literal>
</term> </term>
<listitem><para>Get or set the chunk management hook functions for arena <listitem><para>Get or set the extent management hook functions for
&lt;i&gt;. The functions must be capable of operating on all extant arena &lt;i&gt;. The functions must be capable of operating on all
chunks associated with arena &lt;i&gt;, usually by passing unknown extant extents associated with arena &lt;i&gt;, usually by passing
chunks to the replaced functions. In practice, it is feasible to unknown extents to the replaced functions. In practice, it is feasible
control allocation for arenas created via <link to control allocation for arenas explicitly created via <link
linkend="arenas.extend"><mallctl>arenas.extend</mallctl></link> such linkend="arenas.create"><mallctl>arenas.create</mallctl></link> such
that all chunks originate from an application-supplied chunk allocator that all extents originate from an application-supplied extent allocator
(by setting custom chunk hook functions just after arena creation), but (by specifying the custom extent hook functions during arena creation),
the automatically created arenas may have already created chunks prior but the automatically created arenas will have already created extents
to the application having an opportunity to take over chunk prior to the application having an opportunity to take over extent
allocation.</para> allocation.</para>
<programlisting language="C"><![CDATA[ <programlisting language="C"><![CDATA[
typedef struct { typedef extent_hooks_s extent_hooks_t;
chunk_alloc_t *alloc; struct extent_hooks_s {
chunk_dalloc_t *dalloc; extent_alloc_t *alloc;
chunk_commit_t *commit; extent_dalloc_t *dalloc;
chunk_decommit_t *decommit; extent_destroy_t *destroy;
chunk_purge_t *purge; extent_commit_t *commit;
chunk_split_t *split; extent_decommit_t *decommit;
chunk_merge_t *merge; extent_purge_t *purge_lazy;
} chunk_hooks_t;]]></programlisting> extent_purge_t *purge_forced;
<para>The <type>chunk_hooks_t</type> structure comprises function extent_split_t *split;
extent_merge_t *merge;
};]]></programlisting>
<para>The <type>extent_hooks_t</type> structure comprises function
pointers which are described individually below. jemalloc uses these pointers which are described individually below. jemalloc uses these
functions to manage chunk lifetime, which starts off with allocation of functions to manage extent lifetime, which starts off with allocation of
mapped committed memory, in the simplest case followed by deallocation. mapped committed memory, in the simplest case followed by deallocation.
However, there are performance and platform reasons to retain chunks for However, there are performance and platform reasons to retain extents
later reuse. Cleanup attempts cascade from deallocation to decommit to for later reuse. Cleanup attempts cascade from deallocation to decommit
purging, which gives the chunk management functions opportunities to to forced purging to lazy purging, which gives the extent management
reject the most permanent cleanup operations in favor of less permanent functions opportunities to reject the most permanent cleanup operations
(and often less costly) operations. The chunk splitting and merging in favor of less permanent (and often less costly) operations. All
operations can also be opted out of, but this is mainly intended to operations except allocation can be universally opted out of by setting
support platforms on which virtual memory mappings provided by the the hook pointers to <constant>NULL</constant>, or selectively opted out
operating system kernel do not automatically coalesce and split, e.g. of by returning failure. Note that once the extent hook is set, the
Windows.</para> structure is accessed directly by the associated arenas, so it must
remain valid for the entire lifetime of the arenas.</para>
<funcsynopsis><funcprototype> <funcsynopsis><funcprototype>
<funcdef>typedef void *<function>(chunk_alloc_t)</function></funcdef> <funcdef>typedef void *<function>(extent_alloc_t)</function></funcdef>
<paramdef>void *<parameter>chunk</parameter></paramdef> <paramdef>extent_hooks_t *<parameter>extent_hooks</parameter></paramdef>
<paramdef>void *<parameter>new_addr</parameter></paramdef>
<paramdef>size_t <parameter>size</parameter></paramdef> <paramdef>size_t <parameter>size</parameter></paramdef>
<paramdef>size_t <parameter>alignment</parameter></paramdef> <paramdef>size_t <parameter>alignment</parameter></paramdef>
<paramdef>bool *<parameter>zero</parameter></paramdef> <paramdef>bool *<parameter>zero</parameter></paramdef>
...@@ -1575,62 +1807,83 @@ typedef struct { ...@@ -1575,62 +1807,83 @@ typedef struct {
<paramdef>unsigned <parameter>arena_ind</parameter></paramdef> <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
</funcprototype></funcsynopsis> </funcprototype></funcsynopsis>
<literallayout></literallayout> <literallayout></literallayout>
<para>A chunk allocation function conforms to the <para>An extent allocation function conforms to the
<type>chunk_alloc_t</type> type and upon success returns a pointer to <type>extent_alloc_t</type> type and upon success returns a pointer to
<parameter>size</parameter> bytes of mapped memory on behalf of arena <parameter>size</parameter> bytes of mapped memory on behalf of arena
<parameter>arena_ind</parameter> such that the chunk's base address is a <parameter>arena_ind</parameter> such that the extent's base address is
multiple of <parameter>alignment</parameter>, as well as setting a multiple of <parameter>alignment</parameter>, as well as setting
<parameter>*zero</parameter> to indicate whether the chunk is zeroed and <parameter>*zero</parameter> to indicate whether the extent is zeroed
<parameter>*commit</parameter> to indicate whether the chunk is and <parameter>*commit</parameter> to indicate whether the extent is
committed. Upon error the function returns <constant>NULL</constant> committed. Upon error the function returns <constant>NULL</constant>
and leaves <parameter>*zero</parameter> and and leaves <parameter>*zero</parameter> and
<parameter>*commit</parameter> unmodified. The <parameter>*commit</parameter> unmodified. The
<parameter>size</parameter> parameter is always a multiple of the chunk <parameter>size</parameter> parameter is always a multiple of the page
size. The <parameter>alignment</parameter> parameter is always a power size. The <parameter>alignment</parameter> parameter is always a power
of two at least as large as the chunk size. Zeroing is mandatory if of two at least as large as the page size. Zeroing is mandatory if
<parameter>*zero</parameter> is true upon function entry. Committing is <parameter>*zero</parameter> is true upon function entry. Committing is
mandatory if <parameter>*commit</parameter> is true upon function entry. mandatory if <parameter>*commit</parameter> is true upon function entry.
If <parameter>chunk</parameter> is not <constant>NULL</constant>, the If <parameter>new_addr</parameter> is not <constant>NULL</constant>, the
returned pointer must be <parameter>chunk</parameter> on success or returned pointer must be <parameter>new_addr</parameter> on success or
<constant>NULL</constant> on error. Committed memory may be committed <constant>NULL</constant> on error. Committed memory may be committed
in absolute terms as on a system that does not overcommit, or in in absolute terms as on a system that does not overcommit, or in
implicit terms as on a system that overcommits and satisfies physical implicit terms as on a system that overcommits and satisfies physical
memory needs on demand via soft page faults. Note that replacing the memory needs on demand via soft page faults. Note that replacing the
default chunk allocation function makes the arena's <link default extent allocation function makes the arena's <link
linkend="arena.i.dss"><mallctl>arena.&lt;i&gt;.dss</mallctl></link> linkend="arena.i.dss"><mallctl>arena.&lt;i&gt;.dss</mallctl></link>
setting irrelevant.</para> setting irrelevant.</para>
<funcsynopsis><funcprototype> <funcsynopsis><funcprototype>
<funcdef>typedef bool <function>(chunk_dalloc_t)</function></funcdef> <funcdef>typedef bool <function>(extent_dalloc_t)</function></funcdef>
<paramdef>void *<parameter>chunk</parameter></paramdef> <paramdef>extent_hooks_t *<parameter>extent_hooks</parameter></paramdef>
<paramdef>void *<parameter>addr</parameter></paramdef>
<paramdef>size_t <parameter>size</parameter></paramdef> <paramdef>size_t <parameter>size</parameter></paramdef>
<paramdef>bool <parameter>committed</parameter></paramdef> <paramdef>bool <parameter>committed</parameter></paramdef>
<paramdef>unsigned <parameter>arena_ind</parameter></paramdef> <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
</funcprototype></funcsynopsis> </funcprototype></funcsynopsis>
<literallayout></literallayout> <literallayout></literallayout>
<para> <para>
A chunk deallocation function conforms to the An extent deallocation function conforms to the
<type>chunk_dalloc_t</type> type and deallocates a <type>extent_dalloc_t</type> type and deallocates an extent at given
<parameter>chunk</parameter> of given <parameter>size</parameter> with <parameter>addr</parameter> and <parameter>size</parameter> with
<parameter>committed</parameter>/decommited memory as indicated, on <parameter>committed</parameter>/decommited memory as indicated, on
behalf of arena <parameter>arena_ind</parameter>, returning false upon behalf of arena <parameter>arena_ind</parameter>, returning false upon
success. If the function returns true, this indicates opt-out from success. If the function returns true, this indicates opt-out from
deallocation; the virtual memory mapping associated with the chunk deallocation; the virtual memory mapping associated with the extent
remains mapped, in the same commit state, and available for future use, remains mapped, in the same commit state, and available for future use,
in which case it will be automatically retained for later reuse.</para> in which case it will be automatically retained for later reuse.</para>
<funcsynopsis><funcprototype> <funcsynopsis><funcprototype>
<funcdef>typedef bool <function>(chunk_commit_t)</function></funcdef> <funcdef>typedef void <function>(extent_destroy_t)</function></funcdef>
<paramdef>void *<parameter>chunk</parameter></paramdef> <paramdef>extent_hooks_t *<parameter>extent_hooks</parameter></paramdef>
<paramdef>void *<parameter>addr</parameter></paramdef>
<paramdef>size_t <parameter>size</parameter></paramdef>
<paramdef>bool <parameter>committed</parameter></paramdef>
<paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
</funcprototype></funcsynopsis>
<literallayout></literallayout>
<para>
An extent destruction function conforms to the
<type>extent_destroy_t</type> type and unconditionally destroys an
extent at given <parameter>addr</parameter> and
<parameter>size</parameter> with
<parameter>committed</parameter>/decommited memory as indicated, on
behalf of arena <parameter>arena_ind</parameter>. This function may be
called to destroy retained extents during arena destruction (see <link
linkend="arena.i.destroy"><mallctl>arena.&lt;i&gt;.destroy</mallctl></link>).</para>
<funcsynopsis><funcprototype>
<funcdef>typedef bool <function>(extent_commit_t)</function></funcdef>
<paramdef>extent_hooks_t *<parameter>extent_hooks</parameter></paramdef>
<paramdef>void *<parameter>addr</parameter></paramdef>
<paramdef>size_t <parameter>size</parameter></paramdef> <paramdef>size_t <parameter>size</parameter></paramdef>
<paramdef>size_t <parameter>offset</parameter></paramdef> <paramdef>size_t <parameter>offset</parameter></paramdef>
<paramdef>size_t <parameter>length</parameter></paramdef> <paramdef>size_t <parameter>length</parameter></paramdef>
<paramdef>unsigned <parameter>arena_ind</parameter></paramdef> <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
</funcprototype></funcsynopsis> </funcprototype></funcsynopsis>
<literallayout></literallayout> <literallayout></literallayout>
<para>A chunk commit function conforms to the <para>An extent commit function conforms to the
<type>chunk_commit_t</type> type and commits zeroed physical memory to <type>extent_commit_t</type> type and commits zeroed physical memory to
back pages within a <parameter>chunk</parameter> of given back pages within an extent at given <parameter>addr</parameter> and
<parameter>size</parameter> at <parameter>offset</parameter> bytes, <parameter>size</parameter> at <parameter>offset</parameter> bytes,
extending for <parameter>length</parameter> on behalf of arena extending for <parameter>length</parameter> on behalf of arena
<parameter>arena_ind</parameter>, returning false upon success. <parameter>arena_ind</parameter>, returning false upon success.
...@@ -1641,46 +1894,56 @@ typedef struct { ...@@ -1641,46 +1894,56 @@ typedef struct {
physical memory to satisfy the request.</para> physical memory to satisfy the request.</para>
<funcsynopsis><funcprototype> <funcsynopsis><funcprototype>
<funcdef>typedef bool <function>(chunk_decommit_t)</function></funcdef> <funcdef>typedef bool <function>(extent_decommit_t)</function></funcdef>
<paramdef>void *<parameter>chunk</parameter></paramdef> <paramdef>extent_hooks_t *<parameter>extent_hooks</parameter></paramdef>
<paramdef>void *<parameter>addr</parameter></paramdef>
<paramdef>size_t <parameter>size</parameter></paramdef> <paramdef>size_t <parameter>size</parameter></paramdef>
<paramdef>size_t <parameter>offset</parameter></paramdef> <paramdef>size_t <parameter>offset</parameter></paramdef>
<paramdef>size_t <parameter>length</parameter></paramdef> <paramdef>size_t <parameter>length</parameter></paramdef>
<paramdef>unsigned <parameter>arena_ind</parameter></paramdef> <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
</funcprototype></funcsynopsis> </funcprototype></funcsynopsis>
<literallayout></literallayout> <literallayout></literallayout>
<para>A chunk decommit function conforms to the <para>An extent decommit function conforms to the
<type>chunk_decommit_t</type> type and decommits any physical memory <type>extent_decommit_t</type> type and decommits any physical memory
that is backing pages within a <parameter>chunk</parameter> of given that is backing pages within an extent at given
<parameter>size</parameter> at <parameter>offset</parameter> bytes, <parameter>addr</parameter> and <parameter>size</parameter> at
extending for <parameter>length</parameter> on behalf of arena <parameter>offset</parameter> bytes, extending for
<parameter>length</parameter> on behalf of arena
<parameter>arena_ind</parameter>, returning false upon success, in which <parameter>arena_ind</parameter>, returning false upon success, in which
case the pages will be committed via the chunk commit function before case the pages will be committed via the extent commit function before
being reused. If the function returns true, this indicates opt-out from being reused. If the function returns true, this indicates opt-out from
decommit; the memory remains committed and available for future use, in decommit; the memory remains committed and available for future use, in
which case it will be automatically retained for later reuse.</para> which case it will be automatically retained for later reuse.</para>
<funcsynopsis><funcprototype> <funcsynopsis><funcprototype>
<funcdef>typedef bool <function>(chunk_purge_t)</function></funcdef> <funcdef>typedef bool <function>(extent_purge_t)</function></funcdef>
<paramdef>void *<parameter>chunk</parameter></paramdef> <paramdef>extent_hooks_t *<parameter>extent_hooks</parameter></paramdef>
<paramdef>size_t<parameter>size</parameter></paramdef> <paramdef>void *<parameter>addr</parameter></paramdef>
<paramdef>size_t <parameter>size</parameter></paramdef>
<paramdef>size_t <parameter>offset</parameter></paramdef> <paramdef>size_t <parameter>offset</parameter></paramdef>
<paramdef>size_t <parameter>length</parameter></paramdef> <paramdef>size_t <parameter>length</parameter></paramdef>
<paramdef>unsigned <parameter>arena_ind</parameter></paramdef> <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
</funcprototype></funcsynopsis> </funcprototype></funcsynopsis>
<literallayout></literallayout> <literallayout></literallayout>
<para>A chunk purge function conforms to the <type>chunk_purge_t</type> <para>An extent purge function conforms to the
type and optionally discards physical pages within the virtual memory <type>extent_purge_t</type> type and discards physical pages
mapping associated with <parameter>chunk</parameter> of given within the virtual memory mapping associated with an extent at given
<parameter>size</parameter> at <parameter>offset</parameter> bytes, <parameter>addr</parameter> and <parameter>size</parameter> at
extending for <parameter>length</parameter> on behalf of arena <parameter>offset</parameter> bytes, extending for
<parameter>arena_ind</parameter>, returning false if pages within the <parameter>length</parameter> on behalf of arena
purged virtual memory range will be zero-filled the next time they are <parameter>arena_ind</parameter>. A lazy extent purge function (e.g.
accessed.</para> implemented via
<function>madvise(<parameter>...</parameter><parameter><constant>MADV_FREE</constant></parameter>)</function>)
can delay purging indefinitely and leave the pages within the purged
virtual memory range in an indeterminite state, whereas a forced extent
purge function immediately purges, and the pages within the virtual
memory range will be zero-filled the next time they are accessed. If
the function returns true, this indicates failure to purge.</para>
<funcsynopsis><funcprototype> <funcsynopsis><funcprototype>
<funcdef>typedef bool <function>(chunk_split_t)</function></funcdef> <funcdef>typedef bool <function>(extent_split_t)</function></funcdef>
<paramdef>void *<parameter>chunk</parameter></paramdef> <paramdef>extent_hooks_t *<parameter>extent_hooks</parameter></paramdef>
<paramdef>void *<parameter>addr</parameter></paramdef>
<paramdef>size_t <parameter>size</parameter></paramdef> <paramdef>size_t <parameter>size</parameter></paramdef>
<paramdef>size_t <parameter>size_a</parameter></paramdef> <paramdef>size_t <parameter>size_a</parameter></paramdef>
<paramdef>size_t <parameter>size_b</parameter></paramdef> <paramdef>size_t <parameter>size_b</parameter></paramdef>
...@@ -1688,35 +1951,36 @@ typedef struct { ...@@ -1688,35 +1951,36 @@ typedef struct {
<paramdef>unsigned <parameter>arena_ind</parameter></paramdef> <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
</funcprototype></funcsynopsis> </funcprototype></funcsynopsis>
<literallayout></literallayout> <literallayout></literallayout>
<para>A chunk split function conforms to the <type>chunk_split_t</type> <para>An extent split function conforms to the
type and optionally splits <parameter>chunk</parameter> of given <type>extent_split_t</type> type and optionally splits an extent at
<parameter>size</parameter> into two adjacent chunks, the first of given <parameter>addr</parameter> and <parameter>size</parameter> into
<parameter>size_a</parameter> bytes, and the second of two adjacent extents, the first of <parameter>size_a</parameter> bytes,
<parameter>size_b</parameter> bytes, operating on and the second of <parameter>size_b</parameter> bytes, operating on
<parameter>committed</parameter>/decommitted memory as indicated, on <parameter>committed</parameter>/decommitted memory as indicated, on
behalf of arena <parameter>arena_ind</parameter>, returning false upon behalf of arena <parameter>arena_ind</parameter>, returning false upon
success. If the function returns true, this indicates that the chunk success. If the function returns true, this indicates that the extent
remains unsplit and therefore should continue to be operated on as a remains unsplit and therefore should continue to be operated on as a
whole.</para> whole.</para>
<funcsynopsis><funcprototype> <funcsynopsis><funcprototype>
<funcdef>typedef bool <function>(chunk_merge_t)</function></funcdef> <funcdef>typedef bool <function>(extent_merge_t)</function></funcdef>
<paramdef>void *<parameter>chunk_a</parameter></paramdef> <paramdef>extent_hooks_t *<parameter>extent_hooks</parameter></paramdef>
<paramdef>void *<parameter>addr_a</parameter></paramdef>
<paramdef>size_t <parameter>size_a</parameter></paramdef> <paramdef>size_t <parameter>size_a</parameter></paramdef>
<paramdef>void *<parameter>chunk_b</parameter></paramdef> <paramdef>void *<parameter>addr_b</parameter></paramdef>
<paramdef>size_t <parameter>size_b</parameter></paramdef> <paramdef>size_t <parameter>size_b</parameter></paramdef>
<paramdef>bool <parameter>committed</parameter></paramdef> <paramdef>bool <parameter>committed</parameter></paramdef>
<paramdef>unsigned <parameter>arena_ind</parameter></paramdef> <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
</funcprototype></funcsynopsis> </funcprototype></funcsynopsis>
<literallayout></literallayout> <literallayout></literallayout>
<para>A chunk merge function conforms to the <type>chunk_merge_t</type> <para>An extent merge function conforms to the
type and optionally merges adjacent chunks, <type>extent_merge_t</type> type and optionally merges adjacent extents,
<parameter>chunk_a</parameter> of given <parameter>size_a</parameter> at given <parameter>addr_a</parameter> and <parameter>size_a</parameter>
and <parameter>chunk_b</parameter> of given with given <parameter>addr_b</parameter> and
<parameter>size_b</parameter> into one contiguous chunk, operating on <parameter>size_b</parameter> into one contiguous extent, operating on
<parameter>committed</parameter>/decommitted memory as indicated, on <parameter>committed</parameter>/decommitted memory as indicated, on
behalf of arena <parameter>arena_ind</parameter>, returning false upon behalf of arena <parameter>arena_ind</parameter>, returning false upon
success. If the function returns true, this indicates that the chunks success. If the function returns true, this indicates that the extents
remain distinct mappings and therefore should continue to be operated on remain distinct mappings and therefore should continue to be operated on
independently.</para> independently.</para>
</listitem> </listitem>
...@@ -1731,29 +1995,35 @@ typedef struct { ...@@ -1731,29 +1995,35 @@ typedef struct {
<listitem><para>Current limit on number of arenas.</para></listitem> <listitem><para>Current limit on number of arenas.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="arenas.initialized"> <varlistentry id="arenas.dirty_decay_ms">
<term> <term>
<mallctl>arenas.initialized</mallctl> <mallctl>arenas.dirty_decay_ms</mallctl>
(<type>bool *</type>) (<type>ssize_t</type>)
<literal>r-</literal> <literal>rw</literal>
</term> </term>
<listitem><para>An array of <link <listitem><para>Current default per-arena approximate time in
linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link> milliseconds from the creation of a set of unused dirty pages until an
booleans. Each boolean indicates whether the corresponding arena is equivalent set of unused dirty pages is purged and/or reused, used to
initialized.</para></listitem> initialize <link
linkend="arena.i.dirty_decay_ms"><mallctl>arena.&lt;i&gt;.dirty_decay_ms</mallctl></link>
during arena creation. See <link
linkend="opt.dirty_decay_ms"><mallctl>opt.dirty_decay_ms</mallctl></link>
for additional information.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="arenas.lg_dirty_mult"> <varlistentry id="arenas.muzzy_decay_ms">
<term> <term>
<mallctl>arenas.lg_dirty_mult</mallctl> <mallctl>arenas.muzzy_decay_ms</mallctl>
(<type>ssize_t</type>) (<type>ssize_t</type>)
<literal>rw</literal> <literal>rw</literal>
</term> </term>
<listitem><para>Current default per-arena minimum ratio (log base 2) of <listitem><para>Current default per-arena approximate time in
active to dirty pages, used to initialize <link milliseconds from the creation of a set of unused muzzy pages until an
linkend="arena.i.lg_dirty_mult"><mallctl>arena.&lt;i&gt;.lg_dirty_mult</mallctl></link> equivalent set of unused muzzy pages is purged and/or reused, used to
initialize <link
linkend="arena.i.muzzy_decay_ms"><mallctl>arena.&lt;i&gt;.muzzy_decay_ms</mallctl></link>
during arena creation. See <link during arena creation. See <link
linkend="opt.lg_dirty_mult"><mallctl>opt.lg_dirty_mult</mallctl></link> linkend="opt.muzzy_decay_ms"><mallctl>opt.muzzy_decay_ms</mallctl></link>
for additional information.</para></listitem> for additional information.</para></listitem>
</varlistentry> </varlistentry>
...@@ -1780,7 +2050,6 @@ typedef struct { ...@@ -1780,7 +2050,6 @@ typedef struct {
<mallctl>arenas.tcache_max</mallctl> <mallctl>arenas.tcache_max</mallctl>
(<type>size_t</type>) (<type>size_t</type>)
<literal>r-</literal> <literal>r-</literal>
[<option>--enable-tcache</option>]
</term> </term>
<listitem><para>Maximum thread-cached size class.</para></listitem> <listitem><para>Maximum thread-cached size class.</para></listitem>
</varlistentry> </varlistentry>
...@@ -1799,7 +2068,6 @@ typedef struct { ...@@ -1799,7 +2068,6 @@ typedef struct {
<mallctl>arenas.nhbins</mallctl> <mallctl>arenas.nhbins</mallctl>
(<type>unsigned</type>) (<type>unsigned</type>)
<literal>r-</literal> <literal>r-</literal>
[<option>--enable-tcache</option>]
</term> </term>
<listitem><para>Total number of thread cache bin size <listitem><para>Total number of thread cache bin size
classes.</para></listitem> classes.</para></listitem>
...@@ -1820,30 +2088,30 @@ typedef struct { ...@@ -1820,30 +2088,30 @@ typedef struct {
(<type>uint32_t</type>) (<type>uint32_t</type>)
<literal>r-</literal> <literal>r-</literal>
</term> </term>
<listitem><para>Number of regions per page run.</para></listitem> <listitem><para>Number of regions per slab.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="arenas.bin.i.run_size"> <varlistentry id="arenas.bin.i.slab_size">
<term> <term>
<mallctl>arenas.bin.&lt;i&gt;.run_size</mallctl> <mallctl>arenas.bin.&lt;i&gt;.slab_size</mallctl>
(<type>size_t</type>) (<type>size_t</type>)
<literal>r-</literal> <literal>r-</literal>
</term> </term>
<listitem><para>Number of bytes per page run.</para></listitem> <listitem><para>Number of bytes per slab.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="arenas.nlruns"> <varlistentry id="arenas.nlextents">
<term> <term>
<mallctl>arenas.nlruns</mallctl> <mallctl>arenas.nlextents</mallctl>
(<type>unsigned</type>) (<type>unsigned</type>)
<literal>r-</literal> <literal>r-</literal>
</term> </term>
<listitem><para>Total number of large size classes.</para></listitem> <listitem><para>Total number of large size classes.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="arenas.lrun.i.size"> <varlistentry id="arenas.lextent.i.size">
<term> <term>
<mallctl>arenas.lrun.&lt;i&gt;.size</mallctl> <mallctl>arenas.lextent.&lt;i&gt;.size</mallctl>
(<type>size_t</type>) (<type>size_t</type>)
<literal>r-</literal> <literal>r-</literal>
</term> </term>
...@@ -1851,33 +2119,24 @@ typedef struct { ...@@ -1851,33 +2119,24 @@ typedef struct {
class.</para></listitem> class.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="arenas.nhchunks"> <varlistentry id="arenas.create">
<term>
<mallctl>arenas.nhchunks</mallctl>
(<type>unsigned</type>)
<literal>r-</literal>
</term>
<listitem><para>Total number of huge size classes.</para></listitem>
</varlistentry>
<varlistentry id="arenas.hchunk.i.size">
<term> <term>
<mallctl>arenas.hchunk.&lt;i&gt;.size</mallctl> <mallctl>arenas.create</mallctl>
(<type>size_t</type>) (<type>unsigned</type>, <type>extent_hooks_t *</type>)
<literal>r-</literal> <literal>rw</literal>
</term> </term>
<listitem><para>Maximum size supported by this huge size <listitem><para>Explicitly create a new arena outside the range of
class.</para></listitem> automatically managed arenas, with optionally specified extent hooks,
and return the new arena index.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="arenas.extend"> <varlistentry id="arenas.lookup">
<term> <term>
<mallctl>arenas.extend</mallctl> <mallctl>arenas.lookup</mallctl>
(<type>unsigned</type>) (<type>unsigned</type>, <type>void*</type>)
<literal>r-</literal> <literal>rw</literal>
</term> </term>
<listitem><para>Extend the array of arenas by appending a new arena, <listitem><para>Index of the arena to which an allocation belongs to.</para></listitem>
and returning the new arena index.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="prof.thread_active_init"> <varlistentry id="prof.thread_active_init">
...@@ -1976,30 +2235,12 @@ typedef struct { ...@@ -1976,30 +2235,12 @@ typedef struct {
[<option>--enable-prof</option>] [<option>--enable-prof</option>]
</term> </term>
<listitem><para>Average number of bytes allocated between <listitem><para>Average number of bytes allocated between
inverval-based profile dumps. See the interval-based profile dumps. See the
<link <link
linkend="opt.lg_prof_interval"><mallctl>opt.lg_prof_interval</mallctl></link> linkend="opt.lg_prof_interval"><mallctl>opt.lg_prof_interval</mallctl></link>
option for additional information.</para></listitem> option for additional information.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.cactive">
<term>
<mallctl>stats.cactive</mallctl>
(<type>size_t *</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Pointer to a counter that contains an approximate count
of the current number of bytes in active pages. The estimate may be
high, but never low, because each arena rounds up when computing its
contribution to the counter. Note that the <link
linkend="epoch"><mallctl>epoch</mallctl></link> mallctl has no bearing
on this counter. Furthermore, counter consistency is maintained via
atomic operations, so it is necessary to use an atomic operation in
order to guarantee a consistent read when dereferencing the pointer.
</para></listitem>
</varlistentry>
<varlistentry id="stats.allocated"> <varlistentry id="stats.allocated">
<term> <term>
<mallctl>stats.allocated</mallctl> <mallctl>stats.allocated</mallctl>
...@@ -2023,7 +2264,9 @@ typedef struct { ...@@ -2023,7 +2264,9 @@ typedef struct {
equal to <link equal to <link
linkend="stats.allocated"><mallctl>stats.allocated</mallctl></link>. linkend="stats.allocated"><mallctl>stats.allocated</mallctl></link>.
This does not include <link linkend="stats.arenas.i.pdirty"> This does not include <link linkend="stats.arenas.i.pdirty">
<mallctl>stats.arenas.&lt;i&gt;.pdirty</mallctl></link>, nor pages <mallctl>stats.arenas.&lt;i&gt;.pdirty</mallctl></link>,
<link linkend="stats.arenas.i.pmuzzy">
<mallctl>stats.arenas.&lt;i&gt;.pmuzzy</mallctl></link>, nor pages
entirely devoted to allocator metadata.</para></listitem> entirely devoted to allocator metadata.</para></listitem>
</varlistentry> </varlistentry>
...@@ -2035,11 +2278,28 @@ typedef struct { ...@@ -2035,11 +2278,28 @@ typedef struct {
[<option>--enable-stats</option>] [<option>--enable-stats</option>]
</term> </term>
<listitem><para>Total number of bytes dedicated to metadata, which <listitem><para>Total number of bytes dedicated to metadata, which
comprise base allocations used for bootstrap-sensitive internal comprise base allocations used for bootstrap-sensitive allocator
allocator data structures, arena chunk headers (see <link metadata structures (see <link
linkend="stats.arenas.i.metadata.mapped"><mallctl>stats.arenas.&lt;i&gt;.metadata.mapped</mallctl></link>), linkend="stats.arenas.i.base"><mallctl>stats.arenas.&lt;i&gt;.base</mallctl></link>)
and internal allocations (see <link and internal allocations (see <link
linkend="stats.arenas.i.metadata.allocated"><mallctl>stats.arenas.&lt;i&gt;.metadata.allocated</mallctl></link>).</para></listitem> linkend="stats.arenas.i.internal"><mallctl>stats.arenas.&lt;i&gt;.internal</mallctl></link>).
Transparent huge page (enabled with <link
linkend="opt.metadata_thp">opt.metadata_thp</link>) usage is not
considered.</para></listitem>
</varlistentry>
<varlistentry id="stats.metadata_thp">
<term>
<mallctl>stats.metadata_thp</mallctl>
(<type>size_t</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Number of transparent huge pages (THP) used for
metadata. See <link
linkend="stats.metadata"><mallctl>stats.metadata</mallctl></link> and
<link linkend="opt.metadata_thp">opt.metadata_thp</link>) for
details.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.resident"> <varlistentry id="stats.resident">
...@@ -2066,15 +2326,155 @@ typedef struct { ...@@ -2066,15 +2326,155 @@ typedef struct {
<literal>r-</literal> <literal>r-</literal>
[<option>--enable-stats</option>] [<option>--enable-stats</option>]
</term> </term>
<listitem><para>Total number of bytes in active chunks mapped by the <listitem><para>Total number of bytes in active extents mapped by the
allocator. This is a multiple of the chunk size, and is larger than allocator. This is larger than <link
<link linkend="stats.active"><mallctl>stats.active</mallctl></link>. linkend="stats.active"><mallctl>stats.active</mallctl></link>. This
This does not include inactive chunks, even those that contain unused does not include inactive extents, even those that contain unused dirty
dirty pages, which means that there is no strict ordering between this pages, which means that there is no strict ordering between this and
and <link <link
linkend="stats.resident"><mallctl>stats.resident</mallctl></link>.</para></listitem> linkend="stats.resident"><mallctl>stats.resident</mallctl></link>.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.retained">
<term>
<mallctl>stats.retained</mallctl>
(<type>size_t</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Total number of bytes in virtual memory mappings that
were retained rather than being returned to the operating system via
e.g. <citerefentry><refentrytitle>munmap</refentrytitle>
<manvolnum>2</manvolnum></citerefentry> or similar. Retained virtual
memory is typically untouched, decommitted, or purged, so it has no
strongly associated physical memory (see <link
linkend="arena.i.extent_hooks">extent hooks</link> for details).
Retained memory is excluded from mapped memory statistics, e.g. <link
linkend="stats.mapped"><mallctl>stats.mapped</mallctl></link>.
</para></listitem>
</varlistentry>
<varlistentry id="stats.background_thread.num_threads">
<term>
<mallctl>stats.background_thread.num_threads</mallctl>
(<type>size_t</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para> Number of <link linkend="background_thread">background
threads</link> running currently.</para></listitem>
</varlistentry>
<varlistentry id="stats.background_thread.num_runs">
<term>
<mallctl>stats.background_thread.num_runs</mallctl>
(<type>uint64_t</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para> Total number of runs from all <link
linkend="background_thread">background threads</link>.</para></listitem>
</varlistentry>
<varlistentry id="stats.background_thread.run_interval">
<term>
<mallctl>stats.background_thread.run_interval</mallctl>
(<type>uint64_t</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para> Average run interval in nanoseconds of <link
linkend="background_thread">background threads</link>.</para></listitem>
</varlistentry>
<varlistentry id="stats.mutexes.ctl">
<term>
<mallctl>stats.mutexes.ctl.{counter};</mallctl>
(<type>counter specific type</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Statistics on <varname>ctl</varname> mutex (global
scope; mallctl related). <mallctl>{counter}</mallctl> is one of the
counters below:</para>
<varlistentry id="mutex_counters">
<listitem><para><varname>num_ops</varname> (<type>uint64_t</type>):
Total number of lock acquisition operations on this mutex.</para>
<para><varname>num_spin_acq</varname> (<type>uint64_t</type>): Number
of times the mutex was spin-acquired. When the mutex is currently
locked and cannot be acquired immediately, a short period of
spin-retry within jemalloc will be performed. Acquired through spin
generally means the contention was lightweight and not causing context
switches.</para>
<para><varname>num_wait</varname> (<type>uint64_t</type>): Number of
times the mutex was wait-acquired, which means the mutex contention
was not solved by spin-retry, and blocking operation was likely
involved in order to acquire the mutex. This event generally implies
higher cost / longer delay, and should be investigated if it happens
often.</para>
<para><varname>max_wait_time</varname> (<type>uint64_t</type>):
Maximum length of time in nanoseconds spent on a single wait-acquired
lock operation. Note that to avoid profiling overhead on the common
path, this does not consider spin-acquired cases.</para>
<para><varname>total_wait_time</varname> (<type>uint64_t</type>):
Cumulative time in nanoseconds spent on wait-acquired lock operations.
Similarly, spin-acquired cases are not considered.</para>
<para><varname>max_num_thds</varname> (<type>uint32_t</type>): Maximum
number of threads waiting on this mutex simultaneously. Similarly,
spin-acquired cases are not considered.</para>
<para><varname>num_owner_switch</varname> (<type>uint64_t</type>):
Number of times the current mutex owner is different from the previous
one. This event does not generally imply an issue; rather it is an
indicator of how often the protected data are accessed by different
threads.
</para>
</listitem>
</varlistentry>
</listitem>
</varlistentry>
<varlistentry id="stats.mutexes.background_thread">
<term>
<mallctl>stats.mutexes.background_thread.{counter}</mallctl>
(<type>counter specific type</type>) <literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Statistics on <varname>background_thread</varname> mutex
(global scope; <link
linkend="background_thread"><mallctl>background_thread</mallctl></link>
related). <mallctl>{counter}</mallctl> is one of the counters in <link
linkend="mutex_counters">mutex profiling
counters</link>.</para></listitem>
</varlistentry>
<varlistentry id="stats.mutexes.prof">
<term>
<mallctl>stats.mutexes.prof.{counter}</mallctl>
(<type>counter specific type</type>) <literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Statistics on <varname>prof</varname> mutex (global
scope; profiling related). <mallctl>{counter}</mallctl> is one of the
counters in <link linkend="mutex_counters">mutex profiling
counters</link>.</para></listitem>
</varlistentry>
<varlistentry id="stats.mutexes.reset">
<term>
<mallctl>stats.mutexes.reset</mallctl>
(<type>void</type>) <literal>--</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Reset all mutex profile statistics, including global
mutexes, arena mutexes and bin mutexes.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.dss"> <varlistentry id="stats.arenas.i.dss">
<term> <term>
<mallctl>stats.arenas.&lt;i&gt;.dss</mallctl> <mallctl>stats.arenas.&lt;i&gt;.dss</mallctl>
...@@ -2089,15 +2489,29 @@ typedef struct { ...@@ -2089,15 +2489,29 @@ typedef struct {
</para></listitem> </para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.lg_dirty_mult"> <varlistentry id="stats.arenas.i.dirty_decay_ms">
<term> <term>
<mallctl>stats.arenas.&lt;i&gt;.lg_dirty_mult</mallctl> <mallctl>stats.arenas.&lt;i&gt;.dirty_decay_ms</mallctl>
(<type>ssize_t</type>) (<type>ssize_t</type>)
<literal>r-</literal> <literal>r-</literal>
</term> </term>
<listitem><para>Minimum ratio (log base 2) of active to dirty pages. <listitem><para>Approximate time in milliseconds from the creation of a
See <link set of unused dirty pages until an equivalent set of unused dirty pages
linkend="opt.lg_dirty_mult"><mallctl>opt.lg_dirty_mult</mallctl></link> is purged and/or reused. See <link
linkend="opt.dirty_decay_ms"><mallctl>opt.dirty_decay_ms</mallctl></link>
for details.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.muzzy_decay_ms">
<term>
<mallctl>stats.arenas.&lt;i&gt;.muzzy_decay_ms</mallctl>
(<type>ssize_t</type>)
<literal>r-</literal>
</term>
<listitem><para>Approximate time in milliseconds from the creation of a
set of unused muzzy pages until an equivalent set of unused muzzy pages
is purged and/or reused. See <link
linkend="opt.muzzy_decay_ms"><mallctl>opt.muzzy_decay_ms</mallctl></link>
for details.</para></listitem> for details.</para></listitem>
</varlistentry> </varlistentry>
...@@ -2111,13 +2525,25 @@ typedef struct { ...@@ -2111,13 +2525,25 @@ typedef struct {
arena.</para></listitem> arena.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.uptime">
<term>
<mallctl>stats.arenas.&lt;i&gt;.uptime</mallctl>
(<type>uint64_t</type>)
<literal>r-</literal>
</term>
<listitem><para>Time elapsed (in nanoseconds) since the arena was
created. If &lt;i&gt; equals <constant>0</constant> or
<constant>MALLCTL_ARENAS_ALL</constant>, this is the uptime since malloc
initialization.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.pactive"> <varlistentry id="stats.arenas.i.pactive">
<term> <term>
<mallctl>stats.arenas.&lt;i&gt;.pactive</mallctl> <mallctl>stats.arenas.&lt;i&gt;.pactive</mallctl>
(<type>size_t</type>) (<type>size_t</type>)
<literal>r-</literal> <literal>r-</literal>
</term> </term>
<listitem><para>Number of pages in active runs.</para></listitem> <listitem><para>Number of pages in active extents.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.pdirty"> <varlistentry id="stats.arenas.i.pdirty">
...@@ -2126,10 +2552,23 @@ typedef struct { ...@@ -2126,10 +2552,23 @@ typedef struct {
(<type>size_t</type>) (<type>size_t</type>)
<literal>r-</literal> <literal>r-</literal>
</term> </term>
<listitem><para>Number of pages within unused runs that are potentially <listitem><para>Number of pages within unused extents that are
dirty, and for which <function>madvise<parameter>...</parameter> potentially dirty, and for which <function>madvise()</function> or
<parameter><constant>MADV_DONTNEED</constant></parameter></function> or similar has not been called. See <link
similar has not been called.</para></listitem> linkend="opt.dirty_decay_ms"><mallctl>opt.dirty_decay_ms</mallctl></link>
for a description of dirty pages.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.pmuzzy">
<term>
<mallctl>stats.arenas.&lt;i&gt;.pmuzzy</mallctl>
(<type>size_t</type>)
<literal>r-</literal>
</term>
<listitem><para>Number of pages within unused extents that are muzzy.
See <link
linkend="opt.muzzy_decay_ms"><mallctl>opt.muzzy_decay_ms</mallctl></link>
for a description of muzzy pages.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.mapped"> <varlistentry id="stats.arenas.i.mapped">
...@@ -2142,20 +2581,33 @@ typedef struct { ...@@ -2142,20 +2581,33 @@ typedef struct {
<listitem><para>Number of mapped bytes.</para></listitem> <listitem><para>Number of mapped bytes.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.metadata.mapped"> <varlistentry id="stats.arenas.i.retained">
<term> <term>
<mallctl>stats.arenas.&lt;i&gt;.metadata.mapped</mallctl> <mallctl>stats.arenas.&lt;i&gt;.retained</mallctl>
(<type>size_t</type>) (<type>size_t</type>)
<literal>r-</literal> <literal>r-</literal>
[<option>--enable-stats</option>] [<option>--enable-stats</option>]
</term> </term>
<listitem><para>Number of mapped bytes in arena chunk headers, which <listitem><para>Number of retained bytes. See <link
track the states of the non-metadata pages.</para></listitem> linkend="stats.retained"><mallctl>stats.retained</mallctl></link> for
details.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.metadata.allocated"> <varlistentry id="stats.arenas.i.base">
<term> <term>
<mallctl>stats.arenas.&lt;i&gt;.metadata.allocated</mallctl> <mallctl>stats.arenas.&lt;i&gt;.base</mallctl>
(<type>size_t</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>
Number of bytes dedicated to bootstrap-sensitive allocator metadata
structures.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.internal">
<term>
<mallctl>stats.arenas.&lt;i&gt;.internal</mallctl>
(<type>size_t</type>) (<type>size_t</type>)
<literal>r-</literal> <literal>r-</literal>
[<option>--enable-stats</option>] [<option>--enable-stats</option>]
...@@ -2163,180 +2615,199 @@ typedef struct { ...@@ -2163,180 +2615,199 @@ typedef struct {
<listitem><para>Number of bytes dedicated to internal allocations. <listitem><para>Number of bytes dedicated to internal allocations.
Internal allocations differ from application-originated allocations in Internal allocations differ from application-originated allocations in
that they are for internal use, and that they are omitted from heap that they are for internal use, and that they are omitted from heap
profiles. This statistic is reported separately from <link profiles.</para></listitem>
linkend="stats.metadata"><mallctl>stats.metadata</mallctl></link> and
<link
linkend="stats.arenas.i.metadata.mapped"><mallctl>stats.arenas.&lt;i&gt;.metadata.mapped</mallctl></link>
because it overlaps with e.g. the <link
linkend="stats.allocated"><mallctl>stats.allocated</mallctl></link> and
<link linkend="stats.active"><mallctl>stats.active</mallctl></link>
statistics, whereas the other metadata statistics do
not.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.npurge"> <varlistentry id="stats.arenas.i.metadata_thp">
<term> <term>
<mallctl>stats.arenas.&lt;i&gt;.npurge</mallctl> <mallctl>stats.arenas.&lt;i&gt;.metadata_thp</mallctl>
(<type>uint64_t</type>) (<type>size_t</type>)
<literal>r-</literal> <literal>r-</literal>
[<option>--enable-stats</option>] [<option>--enable-stats</option>]
</term> </term>
<listitem><para>Number of dirty page purge sweeps performed. <listitem><para>Number of transparent huge pages (THP) used for
</para></listitem> metadata. See <link linkend="opt.metadata_thp">opt.metadata_thp</link>
for details.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.nmadvise"> <varlistentry id="stats.arenas.i.resident">
<term> <term>
<mallctl>stats.arenas.&lt;i&gt;.nmadvise</mallctl> <mallctl>stats.arenas.&lt;i&gt;.resident</mallctl>
(<type>uint64_t</type>) (<type>size_t</type>)
<literal>r-</literal> <literal>r-</literal>
[<option>--enable-stats</option>] [<option>--enable-stats</option>]
</term> </term>
<listitem><para>Number of <function>madvise<parameter>...</parameter> <listitem><para>Maximum number of bytes in physically resident data
<parameter><constant>MADV_DONTNEED</constant></parameter></function> or pages mapped by the arena, comprising all pages dedicated to allocator
similar calls made to purge dirty pages.</para></listitem> metadata, pages backing active allocations, and unused dirty pages.
This is a maximum rather than precise because pages may not actually be
physically resident if they correspond to demand-zeroed virtual memory
that has not yet been touched. This is a multiple of the page
size.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.purged"> <varlistentry id="stats.arenas.i.dirty_npurge">
<term> <term>
<mallctl>stats.arenas.&lt;i&gt;.purged</mallctl> <mallctl>stats.arenas.&lt;i&gt;.dirty_npurge</mallctl>
(<type>uint64_t</type>) (<type>uint64_t</type>)
<literal>r-</literal> <literal>r-</literal>
[<option>--enable-stats</option>] [<option>--enable-stats</option>]
</term> </term>
<listitem><para>Number of pages purged.</para></listitem> <listitem><para>Number of dirty page purge sweeps performed.
</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.small.allocated"> <varlistentry id="stats.arenas.i.dirty_nmadvise">
<term> <term>
<mallctl>stats.arenas.&lt;i&gt;.small.allocated</mallctl> <mallctl>stats.arenas.&lt;i&gt;.dirty_nmadvise</mallctl>
(<type>size_t</type>) (<type>uint64_t</type>)
<literal>r-</literal> <literal>r-</literal>
[<option>--enable-stats</option>] [<option>--enable-stats</option>]
</term> </term>
<listitem><para>Number of bytes currently allocated by small objects. <listitem><para>Number of <function>madvise()</function> or similar
</para></listitem> calls made to purge dirty pages.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.small.nmalloc"> <varlistentry id="stats.arenas.i.dirty_purged">
<term> <term>
<mallctl>stats.arenas.&lt;i&gt;.small.nmalloc</mallctl> <mallctl>stats.arenas.&lt;i&gt;.dirty_purged</mallctl>
(<type>uint64_t</type>) (<type>uint64_t</type>)
<literal>r-</literal> <literal>r-</literal>
[<option>--enable-stats</option>] [<option>--enable-stats</option>]
</term> </term>
<listitem><para>Cumulative number of allocation requests served by <listitem><para>Number of dirty pages purged.</para></listitem>
small bins.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.small.ndalloc"> <varlistentry id="stats.arenas.i.muzzy_npurge">
<term> <term>
<mallctl>stats.arenas.&lt;i&gt;.small.ndalloc</mallctl> <mallctl>stats.arenas.&lt;i&gt;.muzzy_npurge</mallctl>
(<type>uint64_t</type>) (<type>uint64_t</type>)
<literal>r-</literal> <literal>r-</literal>
[<option>--enable-stats</option>] [<option>--enable-stats</option>]
</term> </term>
<listitem><para>Cumulative number of small objects returned to bins. <listitem><para>Number of muzzy page purge sweeps performed.
</para></listitem> </para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.small.nrequests"> <varlistentry id="stats.arenas.i.muzzy_nmadvise">
<term> <term>
<mallctl>stats.arenas.&lt;i&gt;.small.nrequests</mallctl> <mallctl>stats.arenas.&lt;i&gt;.muzzy_nmadvise</mallctl>
(<type>uint64_t</type>) (<type>uint64_t</type>)
<literal>r-</literal> <literal>r-</literal>
[<option>--enable-stats</option>] [<option>--enable-stats</option>]
</term> </term>
<listitem><para>Cumulative number of small allocation requests. <listitem><para>Number of <function>madvise()</function> or similar
</para></listitem> calls made to purge muzzy pages.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.large.allocated"> <varlistentry id="stats.arenas.i.muzzy_purged">
<term> <term>
<mallctl>stats.arenas.&lt;i&gt;.large.allocated</mallctl> <mallctl>stats.arenas.&lt;i&gt;.muzzy_purged</mallctl>
(<type>uint64_t</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Number of muzzy pages purged.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.small.allocated">
<term>
<mallctl>stats.arenas.&lt;i&gt;.small.allocated</mallctl>
(<type>size_t</type>) (<type>size_t</type>)
<literal>r-</literal> <literal>r-</literal>
[<option>--enable-stats</option>] [<option>--enable-stats</option>]
</term> </term>
<listitem><para>Number of bytes currently allocated by large objects. <listitem><para>Number of bytes currently allocated by small objects.
</para></listitem> </para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.large.nmalloc"> <varlistentry id="stats.arenas.i.small.nmalloc">
<term> <term>
<mallctl>stats.arenas.&lt;i&gt;.large.nmalloc</mallctl> <mallctl>stats.arenas.&lt;i&gt;.small.nmalloc</mallctl>
(<type>uint64_t</type>) (<type>uint64_t</type>)
<literal>r-</literal> <literal>r-</literal>
[<option>--enable-stats</option>] [<option>--enable-stats</option>]
</term> </term>
<listitem><para>Cumulative number of large allocation requests served <listitem><para>Cumulative number of times a small allocation was
directly by the arena.</para></listitem> requested from the arena's bins, whether to fill the relevant tcache if
<link linkend="opt.tcache"><mallctl>opt.tcache</mallctl></link> is
enabled, or to directly satisfy an allocation request
otherwise.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.large.ndalloc"> <varlistentry id="stats.arenas.i.small.ndalloc">
<term> <term>
<mallctl>stats.arenas.&lt;i&gt;.large.ndalloc</mallctl> <mallctl>stats.arenas.&lt;i&gt;.small.ndalloc</mallctl>
(<type>uint64_t</type>) (<type>uint64_t</type>)
<literal>r-</literal> <literal>r-</literal>
[<option>--enable-stats</option>] [<option>--enable-stats</option>]
</term> </term>
<listitem><para>Cumulative number of large deallocation requests served <listitem><para>Cumulative number of times a small allocation was
directly by the arena.</para></listitem> returned to the arena's bins, whether to flush the relevant tcache if
<link linkend="opt.tcache"><mallctl>opt.tcache</mallctl></link> is
enabled, or to directly deallocate an allocation
otherwise.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.large.nrequests"> <varlistentry id="stats.arenas.i.small.nrequests">
<term> <term>
<mallctl>stats.arenas.&lt;i&gt;.large.nrequests</mallctl> <mallctl>stats.arenas.&lt;i&gt;.small.nrequests</mallctl>
(<type>uint64_t</type>) (<type>uint64_t</type>)
<literal>r-</literal> <literal>r-</literal>
[<option>--enable-stats</option>] [<option>--enable-stats</option>]
</term> </term>
<listitem><para>Cumulative number of large allocation requests. <listitem><para>Cumulative number of allocation requests satisfied by
</para></listitem> all bin size classes.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.huge.allocated"> <varlistentry id="stats.arenas.i.large.allocated">
<term> <term>
<mallctl>stats.arenas.&lt;i&gt;.huge.allocated</mallctl> <mallctl>stats.arenas.&lt;i&gt;.large.allocated</mallctl>
(<type>size_t</type>) (<type>size_t</type>)
<literal>r-</literal> <literal>r-</literal>
[<option>--enable-stats</option>] [<option>--enable-stats</option>]
</term> </term>
<listitem><para>Number of bytes currently allocated by huge objects. <listitem><para>Number of bytes currently allocated by large objects.
</para></listitem> </para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.huge.nmalloc"> <varlistentry id="stats.arenas.i.large.nmalloc">
<term> <term>
<mallctl>stats.arenas.&lt;i&gt;.huge.nmalloc</mallctl> <mallctl>stats.arenas.&lt;i&gt;.large.nmalloc</mallctl>
(<type>uint64_t</type>) (<type>uint64_t</type>)
<literal>r-</literal> <literal>r-</literal>
[<option>--enable-stats</option>] [<option>--enable-stats</option>]
</term> </term>
<listitem><para>Cumulative number of huge allocation requests served <listitem><para>Cumulative number of times a large extent was allocated
directly by the arena.</para></listitem> from the arena, whether to fill the relevant tcache if <link
linkend="opt.tcache"><mallctl>opt.tcache</mallctl></link> is enabled and
the size class is within the range being cached, or to directly satisfy
an allocation request otherwise.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.huge.ndalloc"> <varlistentry id="stats.arenas.i.large.ndalloc">
<term> <term>
<mallctl>stats.arenas.&lt;i&gt;.huge.ndalloc</mallctl> <mallctl>stats.arenas.&lt;i&gt;.large.ndalloc</mallctl>
(<type>uint64_t</type>) (<type>uint64_t</type>)
<literal>r-</literal> <literal>r-</literal>
[<option>--enable-stats</option>] [<option>--enable-stats</option>]
</term> </term>
<listitem><para>Cumulative number of huge deallocation requests served <listitem><para>Cumulative number of times a large extent was returned
directly by the arena.</para></listitem> to the arena, whether to flush the relevant tcache if <link
linkend="opt.tcache"><mallctl>opt.tcache</mallctl></link> is enabled and
the size class is within the range being cached, or to directly
deallocate an allocation otherwise.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.huge.nrequests"> <varlistentry id="stats.arenas.i.large.nrequests">
<term> <term>
<mallctl>stats.arenas.&lt;i&gt;.huge.nrequests</mallctl> <mallctl>stats.arenas.&lt;i&gt;.large.nrequests</mallctl>
(<type>uint64_t</type>) (<type>uint64_t</type>)
<literal>r-</literal> <literal>r-</literal>
[<option>--enable-stats</option>] [<option>--enable-stats</option>]
</term> </term>
<listitem><para>Cumulative number of huge allocation requests. <listitem><para>Cumulative number of allocation requests satisfied by
</para></listitem> all large size classes.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.bins.j.nmalloc"> <varlistentry id="stats.arenas.i.bins.j.nmalloc">
...@@ -2346,8 +2817,11 @@ typedef struct { ...@@ -2346,8 +2817,11 @@ typedef struct {
<literal>r-</literal> <literal>r-</literal>
[<option>--enable-stats</option>] [<option>--enable-stats</option>]
</term> </term>
<listitem><para>Cumulative number of allocations served by bin. <listitem><para>Cumulative number of times a bin region of the
</para></listitem> corresponding size class was allocated from the arena, whether to fill
the relevant tcache if <link
linkend="opt.tcache"><mallctl>opt.tcache</mallctl></link> is enabled, or
to directly satisfy an allocation request otherwise.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.bins.j.ndalloc"> <varlistentry id="stats.arenas.i.bins.j.ndalloc">
...@@ -2357,8 +2831,11 @@ typedef struct { ...@@ -2357,8 +2831,11 @@ typedef struct {
<literal>r-</literal> <literal>r-</literal>
[<option>--enable-stats</option>] [<option>--enable-stats</option>]
</term> </term>
<listitem><para>Cumulative number of allocations returned to bin. <listitem><para>Cumulative number of times a bin region of the
</para></listitem> corresponding size class was returned to the arena, whether to flush the
relevant tcache if <link
linkend="opt.tcache"><mallctl>opt.tcache</mallctl></link> is enabled, or
to directly deallocate an allocation otherwise.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.bins.j.nrequests"> <varlistentry id="stats.arenas.i.bins.j.nrequests">
...@@ -2368,8 +2845,8 @@ typedef struct { ...@@ -2368,8 +2845,8 @@ typedef struct {
<literal>r-</literal> <literal>r-</literal>
[<option>--enable-stats</option>] [<option>--enable-stats</option>]
</term> </term>
<listitem><para>Cumulative number of allocation <listitem><para>Cumulative number of allocation requests satisfied by
requests.</para></listitem> bin regions of the corresponding size class.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.bins.j.curregs"> <varlistentry id="stats.arenas.i.bins.j.curregs">
...@@ -2388,7 +2865,6 @@ typedef struct { ...@@ -2388,7 +2865,6 @@ typedef struct {
<mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nfills</mallctl> <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nfills</mallctl>
(<type>uint64_t</type>) (<type>uint64_t</type>)
<literal>r-</literal> <literal>r-</literal>
[<option>--enable-stats</option> <option>--enable-tcache</option>]
</term> </term>
<listitem><para>Cumulative number of tcache fills.</para></listitem> <listitem><para>Cumulative number of tcache fills.</para></listitem>
</varlistentry> </varlistentry>
...@@ -2398,131 +2874,273 @@ typedef struct { ...@@ -2398,131 +2874,273 @@ typedef struct {
<mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nflushes</mallctl> <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nflushes</mallctl>
(<type>uint64_t</type>) (<type>uint64_t</type>)
<literal>r-</literal> <literal>r-</literal>
[<option>--enable-stats</option> <option>--enable-tcache</option>]
</term> </term>
<listitem><para>Cumulative number of tcache flushes.</para></listitem> <listitem><para>Cumulative number of tcache flushes.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.bins.j.nruns"> <varlistentry id="stats.arenas.i.bins.j.nslabs">
<term> <term>
<mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nruns</mallctl> <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nslabs</mallctl>
(<type>uint64_t</type>) (<type>uint64_t</type>)
<literal>r-</literal> <literal>r-</literal>
[<option>--enable-stats</option>] [<option>--enable-stats</option>]
</term> </term>
<listitem><para>Cumulative number of runs created.</para></listitem> <listitem><para>Cumulative number of slabs created.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.bins.j.nreruns"> <varlistentry id="stats.arenas.i.bins.j.nreslabs">
<term> <term>
<mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nreruns</mallctl> <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nreslabs</mallctl>
(<type>uint64_t</type>) (<type>uint64_t</type>)
<literal>r-</literal> <literal>r-</literal>
[<option>--enable-stats</option>] [<option>--enable-stats</option>]
</term> </term>
<listitem><para>Cumulative number of times the current run from which <listitem><para>Cumulative number of times the current slab from which
to allocate changed.</para></listitem> to allocate changed.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.bins.j.curruns"> <varlistentry id="stats.arenas.i.bins.j.curslabs">
<term> <term>
<mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.curruns</mallctl> <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.curslabs</mallctl>
(<type>size_t</type>) (<type>size_t</type>)
<literal>r-</literal> <literal>r-</literal>
[<option>--enable-stats</option>] [<option>--enable-stats</option>]
</term> </term>
<listitem><para>Current number of runs.</para></listitem> <listitem><para>Current number of slabs.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.lruns.j.nmalloc"> <varlistentry id="stats.arenas.i.bins.mutex">
<term> <term>
<mallctl>stats.arenas.&lt;i&gt;.lruns.&lt;j&gt;.nmalloc</mallctl> <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.mutex.{counter}</mallctl>
(<type>counter specific type</type>) <literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Statistics on
<varname>arena.&lt;i&gt;.bins.&lt;j&gt;</varname> mutex (arena bin
scope; bin operation related). <mallctl>{counter}</mallctl> is one of
the counters in <link linkend="mutex_counters">mutex profiling
counters</link>.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.lextents.j.nmalloc">
<term>
<mallctl>stats.arenas.&lt;i&gt;.lextents.&lt;j&gt;.nmalloc</mallctl>
(<type>uint64_t</type>) (<type>uint64_t</type>)
<literal>r-</literal> <literal>r-</literal>
[<option>--enable-stats</option>] [<option>--enable-stats</option>]
</term> </term>
<listitem><para>Cumulative number of allocation requests for this size <listitem><para>Cumulative number of times a large extent of the
class served directly by the arena.</para></listitem> corresponding size class was allocated from the arena, whether to fill
the relevant tcache if <link
linkend="opt.tcache"><mallctl>opt.tcache</mallctl></link> is enabled and
the size class is within the range being cached, or to directly satisfy
an allocation request otherwise.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.lruns.j.ndalloc"> <varlistentry id="stats.arenas.i.lextents.j.ndalloc">
<term> <term>
<mallctl>stats.arenas.&lt;i&gt;.lruns.&lt;j&gt;.ndalloc</mallctl> <mallctl>stats.arenas.&lt;i&gt;.lextents.&lt;j&gt;.ndalloc</mallctl>
(<type>uint64_t</type>) (<type>uint64_t</type>)
<literal>r-</literal> <literal>r-</literal>
[<option>--enable-stats</option>] [<option>--enable-stats</option>]
</term> </term>
<listitem><para>Cumulative number of deallocation requests for this <listitem><para>Cumulative number of times a large extent of the
size class served directly by the arena.</para></listitem> corresponding size class was returned to the arena, whether to flush the
relevant tcache if <link
linkend="opt.tcache"><mallctl>opt.tcache</mallctl></link> is enabled and
the size class is within the range being cached, or to directly
deallocate an allocation otherwise.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.lruns.j.nrequests"> <varlistentry id="stats.arenas.i.lextents.j.nrequests">
<term> <term>
<mallctl>stats.arenas.&lt;i&gt;.lruns.&lt;j&gt;.nrequests</mallctl> <mallctl>stats.arenas.&lt;i&gt;.lextents.&lt;j&gt;.nrequests</mallctl>
(<type>uint64_t</type>) (<type>uint64_t</type>)
<literal>r-</literal> <literal>r-</literal>
[<option>--enable-stats</option>] [<option>--enable-stats</option>]
</term> </term>
<listitem><para>Cumulative number of allocation requests for this size <listitem><para>Cumulative number of allocation requests satisfied by
class.</para></listitem> large extents of the corresponding size class.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.lruns.j.curruns"> <varlistentry id="stats.arenas.i.lextents.j.curlextents">
<term> <term>
<mallctl>stats.arenas.&lt;i&gt;.lruns.&lt;j&gt;.curruns</mallctl> <mallctl>stats.arenas.&lt;i&gt;.lextents.&lt;j&gt;.curlextents</mallctl>
(<type>size_t</type>) (<type>size_t</type>)
<literal>r-</literal> <literal>r-</literal>
[<option>--enable-stats</option>] [<option>--enable-stats</option>]
</term> </term>
<listitem><para>Current number of runs for this size class. <listitem><para>Current number of large allocations for this size class.
</para></listitem> </para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.hchunks.j.nmalloc"> <varlistentry id="stats.arenas.i.mutexes.large">
<term> <term>
<mallctl>stats.arenas.&lt;i&gt;.hchunks.&lt;j&gt;.nmalloc</mallctl> <mallctl>stats.arenas.&lt;i&gt;.mutexes.large.{counter}</mallctl>
(<type>uint64_t</type>) (<type>counter specific type</type>) <literal>r-</literal>
<literal>r-</literal>
[<option>--enable-stats</option>] [<option>--enable-stats</option>]
</term> </term>
<listitem><para>Cumulative number of allocation requests for this size <listitem><para>Statistics on <varname>arena.&lt;i&gt;.large</varname>
class served directly by the arena.</para></listitem> mutex (arena scope; large allocation related).
<mallctl>{counter}</mallctl> is one of the counters in <link
linkend="mutex_counters">mutex profiling
counters</link>.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.hchunks.j.ndalloc"> <varlistentry id="stats.arenas.i.mutexes.extent_avail">
<term> <term>
<mallctl>stats.arenas.&lt;i&gt;.hchunks.&lt;j&gt;.ndalloc</mallctl> <mallctl>stats.arenas.&lt;i&gt;.mutexes.extent_avail.{counter}</mallctl>
(<type>uint64_t</type>) (<type>counter specific type</type>) <literal>r-</literal>
<literal>r-</literal>
[<option>--enable-stats</option>] [<option>--enable-stats</option>]
</term> </term>
<listitem><para>Cumulative number of deallocation requests for this <listitem><para>Statistics on <varname>arena.&lt;i&gt;.extent_avail
size class served directly by the arena.</para></listitem> </varname> mutex (arena scope; extent avail related).
<mallctl>{counter}</mallctl> is one of the counters in <link
linkend="mutex_counters">mutex profiling
counters</link>.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.hchunks.j.nrequests"> <varlistentry id="stats.arenas.i.mutexes.extents_dirty">
<term> <term>
<mallctl>stats.arenas.&lt;i&gt;.hchunks.&lt;j&gt;.nrequests</mallctl> <mallctl>stats.arenas.&lt;i&gt;.mutexes.extents_dirty.{counter}</mallctl>
(<type>uint64_t</type>) (<type>counter specific type</type>) <literal>r-</literal>
<literal>r-</literal>
[<option>--enable-stats</option>] [<option>--enable-stats</option>]
</term> </term>
<listitem><para>Cumulative number of allocation requests for this size <listitem><para>Statistics on <varname>arena.&lt;i&gt;.extents_dirty
class.</para></listitem> </varname> mutex (arena scope; dirty extents related).
<mallctl>{counter}</mallctl> is one of the counters in <link
linkend="mutex_counters">mutex profiling
counters</link>.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.hchunks.j.curhchunks"> <varlistentry id="stats.arenas.i.mutexes.extents_muzzy">
<term> <term>
<mallctl>stats.arenas.&lt;i&gt;.hchunks.&lt;j&gt;.curhchunks</mallctl> <mallctl>stats.arenas.&lt;i&gt;.mutexes.extents_muzzy.{counter}</mallctl>
(<type>size_t</type>) (<type>counter specific type</type>) <literal>r-</literal>
<literal>r-</literal>
[<option>--enable-stats</option>] [<option>--enable-stats</option>]
</term> </term>
<listitem><para>Current number of huge allocations for this size class. <listitem><para>Statistics on <varname>arena.&lt;i&gt;.extents_muzzy
</para></listitem> </varname> mutex (arena scope; muzzy extents related).
<mallctl>{counter}</mallctl> is one of the counters in <link
linkend="mutex_counters">mutex profiling
counters</link>.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.mutexes.extents_retained">
<term>
<mallctl>stats.arenas.&lt;i&gt;.mutexes.extents_retained.{counter}</mallctl>
(<type>counter specific type</type>) <literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Statistics on <varname>arena.&lt;i&gt;.extents_retained
</varname> mutex (arena scope; retained extents related).
<mallctl>{counter}</mallctl> is one of the counters in <link
linkend="mutex_counters">mutex profiling
counters</link>.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.mutexes.decay_dirty">
<term>
<mallctl>stats.arenas.&lt;i&gt;.mutexes.decay_dirty.{counter}</mallctl>
(<type>counter specific type</type>) <literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Statistics on <varname>arena.&lt;i&gt;.decay_dirty
</varname> mutex (arena scope; decay for dirty pages related).
<mallctl>{counter}</mallctl> is one of the counters in <link
linkend="mutex_counters">mutex profiling
counters</link>.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.mutexes.decay_muzzy">
<term>
<mallctl>stats.arenas.&lt;i&gt;.mutexes.decay_muzzy.{counter}</mallctl>
(<type>counter specific type</type>) <literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Statistics on <varname>arena.&lt;i&gt;.decay_muzzy
</varname> mutex (arena scope; decay for muzzy pages related).
<mallctl>{counter}</mallctl> is one of the counters in <link
linkend="mutex_counters">mutex profiling
counters</link>.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.mutexes.base">
<term>
<mallctl>stats.arenas.&lt;i&gt;.mutexes.base.{counter}</mallctl>
(<type>counter specific type</type>) <literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Statistics on <varname>arena.&lt;i&gt;.base</varname>
mutex (arena scope; base allocator related).
<mallctl>{counter}</mallctl> is one of the counters in <link
linkend="mutex_counters">mutex profiling
counters</link>.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.mutexes.tcache_list">
<term>
<mallctl>stats.arenas.&lt;i&gt;.mutexes.tcache_list.{counter}</mallctl>
(<type>counter specific type</type>) <literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Statistics on
<varname>arena.&lt;i&gt;.tcache_list</varname> mutex (arena scope;
tcache to arena association related). This mutex is expected to be
accessed less often. <mallctl>{counter}</mallctl> is one of the
counters in <link linkend="mutex_counters">mutex profiling
counters</link>.</para></listitem>
</varlistentry>
</variablelist> </variablelist>
</refsect1> </refsect1>
<refsect1 id="heap_profile_format">
<title>HEAP PROFILE FORMAT</title>
<para>Although the heap profiling functionality was originally designed to
be compatible with the
<command>pprof</command> command that is developed as part of the <ulink
url="http://code.google.com/p/gperftools/">gperftools
package</ulink>, the addition of per thread heap profiling functionality
required a different heap profile format. The <command>jeprof</command>
command is derived from <command>pprof</command>, with enhancements to
support the heap profile format described here.</para>
<para>In the following hypothetical heap profile, <constant>[...]</constant>
indicates elision for the sake of compactness. <programlisting><![CDATA[
heap_v2/524288
t*: 28106: 56637512 [0: 0]
[...]
t3: 352: 16777344 [0: 0]
[...]
t99: 17754: 29341640 [0: 0]
[...]
@ 0x5f86da8 0x5f5a1dc [...] 0x29e4d4e 0xa200316 0xabb2988 [...]
t*: 13: 6688 [0: 0]
t3: 12: 6496 [0: ]
t99: 1: 192 [0: 0]
[...]
MAPPED_LIBRARIES:
[...]]]></programlisting> The following matches the above heap profile, but most
tokens are replaced with <constant>&lt;description&gt;</constant> to indicate
descriptions of the corresponding fields. <programlisting><![CDATA[
<heap_profile_format_version>/<mean_sample_interval>
<aggregate>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
[...]
<thread_3_aggregate>: <curobjs>: <curbytes>[<cumobjs>: <cumbytes>]
[...]
<thread_99_aggregate>: <curobjs>: <curbytes>[<cumobjs>: <cumbytes>]
[...]
@ <top_frame> <frame> [...] <frame> <frame> <frame> [...]
<backtrace_aggregate>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
<backtrace_thread_3>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
<backtrace_thread_99>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
[...]
MAPPED_LIBRARIES:
</proc/<pid>/maps>]]></programlisting></para>
</refsect1>
<refsect1 id="debugging_malloc_problems"> <refsect1 id="debugging_malloc_problems">
<title>DEBUGGING MALLOC PROBLEMS</title> <title>DEBUGGING MALLOC PROBLEMS</title>
<para>When debugging, it is a good idea to configure/build jemalloc with <para>When debugging, it is a good idea to configure/build jemalloc with
...@@ -2532,7 +3150,7 @@ typedef struct { ...@@ -2532,7 +3150,7 @@ typedef struct {
of run-time assertions that catch application errors such as double-free, of run-time assertions that catch application errors such as double-free,
write-after-free, etc.</para> write-after-free, etc.</para>
<para>Programs often accidentally depend on &ldquo;uninitialized&rdquo; <para>Programs often accidentally depend on <quote>uninitialized</quote>
memory actually being filled with zero bytes. Junk filling memory actually being filled with zero bytes. Junk filling
(see the <link linkend="opt.junk"><mallctl>opt.junk</mallctl></link> (see the <link linkend="opt.junk"><mallctl>opt.junk</mallctl></link>
option) tends to expose such bugs in the form of obviously incorrect option) tends to expose such bugs in the form of obviously incorrect
...@@ -2544,9 +3162,7 @@ typedef struct { ...@@ -2544,9 +3162,7 @@ typedef struct {
<para>This implementation does not provide much detail about the problems <para>This implementation does not provide much detail about the problems
it detects, because the performance impact for storing such information it detects, because the performance impact for storing such information
would be prohibitive. However, jemalloc does integrate with the most would be prohibitive.</para>
excellent <ulink url="http://valgrind.org/">Valgrind</ulink> tool if the
<option>--enable-valgrind</option> configuration option is enabled.</para>
</refsect1> </refsect1>
<refsect1 id="diagnostic_messages"> <refsect1 id="diagnostic_messages">
<title>DIAGNOSTIC MESSAGES</title> <title>DIAGNOSTIC MESSAGES</title>
...@@ -2561,29 +3177,29 @@ typedef struct { ...@@ -2561,29 +3177,29 @@ typedef struct {
to override the function which emits the text strings forming the errors to override the function which emits the text strings forming the errors
and warnings if for some reason the <constant>STDERR_FILENO</constant> file and warnings if for some reason the <constant>STDERR_FILENO</constant> file
descriptor is not suitable for this. descriptor is not suitable for this.
<function>malloc_message<parameter/></function> takes the <function>malloc_message()</function> takes the
<parameter>cbopaque</parameter> pointer argument that is <parameter>cbopaque</parameter> pointer argument that is
<constant>NULL</constant> unless overridden by the arguments in a call to <constant>NULL</constant> unless overridden by the arguments in a call to
<function>malloc_stats_print<parameter/></function>, followed by a string <function>malloc_stats_print()</function>, followed by a string
pointer. Please note that doing anything which tries to allocate memory in pointer. Please note that doing anything which tries to allocate memory in
this function is likely to result in a crash or deadlock.</para> this function is likely to result in a crash or deadlock.</para>
<para>All messages are prefixed by <para>All messages are prefixed by
&ldquo;<computeroutput>&lt;jemalloc&gt;: </computeroutput>&rdquo;.</para> <quote><computeroutput>&lt;jemalloc&gt;: </computeroutput></quote>.</para>
</refsect1> </refsect1>
<refsect1 id="return_values"> <refsect1 id="return_values">
<title>RETURN VALUES</title> <title>RETURN VALUES</title>
<refsect2> <refsect2>
<title>Standard API</title> <title>Standard API</title>
<para>The <function>malloc<parameter/></function> and <para>The <function>malloc()</function> and
<function>calloc<parameter/></function> functions return a pointer to the <function>calloc()</function> functions return a pointer to the
allocated memory if successful; otherwise a <constant>NULL</constant> allocated memory if successful; otherwise a <constant>NULL</constant>
pointer is returned and <varname>errno</varname> is set to pointer is returned and <varname>errno</varname> is set to
<errorname>ENOMEM</errorname>.</para> <errorname>ENOMEM</errorname>.</para>
<para>The <function>posix_memalign<parameter/></function> function <para>The <function>posix_memalign()</function> function
returns the value 0 if successful; otherwise it returns an error value. returns the value 0 if successful; otherwise it returns an error value.
The <function>posix_memalign<parameter/></function> function will fail The <function>posix_memalign()</function> function will fail
if: if:
<variablelist> <variablelist>
<varlistentry> <varlistentry>
...@@ -2602,11 +3218,11 @@ typedef struct { ...@@ -2602,11 +3218,11 @@ typedef struct {
</variablelist> </variablelist>
</para> </para>
<para>The <function>aligned_alloc<parameter/></function> function returns <para>The <function>aligned_alloc()</function> function returns
a pointer to the allocated memory if successful; otherwise a a pointer to the allocated memory if successful; otherwise a
<constant>NULL</constant> pointer is returned and <constant>NULL</constant> pointer is returned and
<varname>errno</varname> is set. The <varname>errno</varname> is set. The
<function>aligned_alloc<parameter/></function> function will fail if: <function>aligned_alloc()</function> function will fail if:
<variablelist> <variablelist>
<varlistentry> <varlistentry>
<term><errorname>EINVAL</errorname></term> <term><errorname>EINVAL</errorname></term>
...@@ -2623,44 +3239,44 @@ typedef struct { ...@@ -2623,44 +3239,44 @@ typedef struct {
</variablelist> </variablelist>
</para> </para>
<para>The <function>realloc<parameter/></function> function returns a <para>The <function>realloc()</function> function returns a
pointer, possibly identical to <parameter>ptr</parameter>, to the pointer, possibly identical to <parameter>ptr</parameter>, to the
allocated memory if successful; otherwise a <constant>NULL</constant> allocated memory if successful; otherwise a <constant>NULL</constant>
pointer is returned, and <varname>errno</varname> is set to pointer is returned, and <varname>errno</varname> is set to
<errorname>ENOMEM</errorname> if the error was the result of an <errorname>ENOMEM</errorname> if the error was the result of an
allocation failure. The <function>realloc<parameter/></function> allocation failure. The <function>realloc()</function>
function always leaves the original buffer intact when an error occurs. function always leaves the original buffer intact when an error occurs.
</para> </para>
<para>The <function>free<parameter/></function> function returns no <para>The <function>free()</function> function returns no
value.</para> value.</para>
</refsect2> </refsect2>
<refsect2> <refsect2>
<title>Non-standard API</title> <title>Non-standard API</title>
<para>The <function>mallocx<parameter/></function> and <para>The <function>mallocx()</function> and
<function>rallocx<parameter/></function> functions return a pointer to <function>rallocx()</function> functions return a pointer to
the allocated memory if successful; otherwise a <constant>NULL</constant> the allocated memory if successful; otherwise a <constant>NULL</constant>
pointer is returned to indicate insufficient contiguous memory was pointer is returned to indicate insufficient contiguous memory was
available to service the allocation request. </para> available to service the allocation request. </para>
<para>The <function>xallocx<parameter/></function> function returns the <para>The <function>xallocx()</function> function returns the
real size of the resulting resized allocation pointed to by real size of the resulting resized allocation pointed to by
<parameter>ptr</parameter>, which is a value less than <parameter>ptr</parameter>, which is a value less than
<parameter>size</parameter> if the allocation could not be adequately <parameter>size</parameter> if the allocation could not be adequately
grown in place. </para> grown in place. </para>
<para>The <function>sallocx<parameter/></function> function returns the <para>The <function>sallocx()</function> function returns the
real size of the allocation pointed to by <parameter>ptr</parameter>. real size of the allocation pointed to by <parameter>ptr</parameter>.
</para> </para>
<para>The <function>nallocx<parameter/></function> returns the real size <para>The <function>nallocx()</function> returns the real size
that would result from a successful equivalent that would result from a successful equivalent
<function>mallocx<parameter/></function> function call, or zero if <function>mallocx()</function> function call, or zero if
insufficient memory is available to perform the size computation. </para> insufficient memory is available to perform the size computation. </para>
<para>The <function>mallctl<parameter/></function>, <para>The <function>mallctl()</function>,
<function>mallctlnametomib<parameter/></function>, and <function>mallctlnametomib()</function>, and
<function>mallctlbymib<parameter/></function> functions return 0 on <function>mallctlbymib()</function> functions return 0 on
success; otherwise they return an error value. The functions will fail success; otherwise they return an error value. The functions will fail
if: if:
<variablelist> <variablelist>
...@@ -2696,13 +3312,13 @@ typedef struct { ...@@ -2696,13 +3312,13 @@ typedef struct {
<term><errorname>EFAULT</errorname></term> <term><errorname>EFAULT</errorname></term>
<listitem><para>An interface with side effects failed in some way <listitem><para>An interface with side effects failed in some way
not directly related to <function>mallctl*<parameter/></function> not directly related to <function>mallctl*()</function>
read/write processing.</para></listitem> read/write processing.</para></listitem>
</varlistentry> </varlistentry>
</variablelist> </variablelist>
</para> </para>
<para>The <function>malloc_usable_size<parameter/></function> function <para>The <function>malloc_usable_size()</function> function
returns the usable size of the allocation pointed to by returns the usable size of the allocation pointed to by
<parameter>ptr</parameter>. </para> <parameter>ptr</parameter>. </para>
</refsect2> </refsect2>
...@@ -2727,9 +3343,10 @@ typedef struct { ...@@ -2727,9 +3343,10 @@ typedef struct {
<para>To dump core whenever a problem occurs: <para>To dump core whenever a problem occurs:
<screen>ln -s 'abort:true' /etc/malloc.conf</screen> <screen>ln -s 'abort:true' /etc/malloc.conf</screen>
</para> </para>
<para>To specify in the source a chunk size that is 16 MiB: <para>To specify in the source that only one arena should be automatically
created:
<programlisting language="C"><![CDATA[ <programlisting language="C"><![CDATA[
malloc_conf = "lg_chunk:24";]]></programlisting></para> malloc_conf = "narenas:1";]]></programlisting></para>
</refsect1> </refsect1>
<refsect1 id="see_also"> <refsect1 id="see_also">
<title>SEE ALSO</title> <title>SEE ALSO</title>
...@@ -2750,13 +3367,13 @@ malloc_conf = "lg_chunk:24";]]></programlisting></para> ...@@ -2750,13 +3367,13 @@ malloc_conf = "lg_chunk:24";]]></programlisting></para>
</refsect1> </refsect1>
<refsect1 id="standards"> <refsect1 id="standards">
<title>STANDARDS</title> <title>STANDARDS</title>
<para>The <function>malloc<parameter/></function>, <para>The <function>malloc()</function>,
<function>calloc<parameter/></function>, <function>calloc()</function>,
<function>realloc<parameter/></function>, and <function>realloc()</function>, and
<function>free<parameter/></function> functions conform to ISO/IEC <function>free()</function> functions conform to ISO/IEC
9899:1990 (&ldquo;ISO C90&rdquo;).</para> 9899:1990 (<quote>ISO C90</quote>).</para>
<para>The <function>posix_memalign<parameter/></function> function conforms <para>The <function>posix_memalign()</function> function conforms
to IEEE Std 1003.1-2001 (&ldquo;POSIX.1&rdquo;).</para> to IEEE Std 1003.1-2001 (<quote>POSIX.1</quote>).</para>
</refsect1> </refsect1>
</refentry> </refentry>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0"> <xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:param name="funcsynopsis.style">ansi</xsl:param> <xsl:param name="funcsynopsis.style">ansi</xsl:param>
<xsl:param name="function.parens" select="1"/> <xsl:param name="function.parens" select="0"/>
<xsl:template match="function">
<xsl:call-template name="inline.monoseq"/>
</xsl:template>
<xsl:template match="mallctl"> <xsl:template match="mallctl">
"<xsl:call-template name="inline.monoseq"/>" <quote><xsl:call-template name="inline.monoseq"/></quote>
</xsl:template> </xsl:template>
</xsl:stylesheet> </xsl:stylesheet>
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
/* Maximum number of regions in one run. */
#define LG_RUN_MAXREGS (LG_PAGE - LG_TINY_MIN)
#define RUN_MAXREGS (1U << LG_RUN_MAXREGS)
/*
* Minimum redzone size. Redzones may be larger than this if necessary to
* preserve region alignment.
*/
#define REDZONE_MINSIZE 16
/*
* The minimum ratio of active:dirty pages per arena is computed as:
*
* (nactive >> lg_dirty_mult) >= ndirty
*
* So, supposing that lg_dirty_mult is 3, there can be no less than 8 times as
* many active pages as dirty pages.
*/
#define LG_DIRTY_MULT_DEFAULT 3
typedef struct arena_runs_dirty_link_s arena_runs_dirty_link_t;
typedef struct arena_run_s arena_run_t;
typedef struct arena_chunk_map_bits_s arena_chunk_map_bits_t;
typedef struct arena_chunk_map_misc_s arena_chunk_map_misc_t;
typedef struct arena_chunk_s arena_chunk_t;
typedef struct arena_bin_info_s arena_bin_info_t;
typedef struct arena_bin_s arena_bin_t;
typedef struct arena_s arena_t;
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#ifdef JEMALLOC_ARENA_STRUCTS_A
struct arena_run_s {
/* Index of bin this run is associated with. */
szind_t binind;
/* Number of free regions in run. */
unsigned nfree;
/* Per region allocated/deallocated bitmap. */
bitmap_t bitmap[BITMAP_GROUPS_MAX];
};
/* Each element of the chunk map corresponds to one page within the chunk. */
struct arena_chunk_map_bits_s {
/*
* Run address (or size) and various flags are stored together. The bit
* layout looks like (assuming 32-bit system):
*
* ???????? ???????? ???nnnnn nnndumla
*
* ? : Unallocated: Run address for first/last pages, unset for internal
* pages.
* Small: Run page offset.
* Large: Run page count for first page, unset for trailing pages.
* n : binind for small size class, BININD_INVALID for large size class.
* d : dirty?
* u : unzeroed?
* m : decommitted?
* l : large?
* a : allocated?
*
* Following are example bit patterns for the three types of runs.
*
* p : run page offset
* s : run size
* n : binind for size class; large objects set these to BININD_INVALID
* x : don't care
* - : 0
* + : 1
* [DUMLA] : bit set
* [dumla] : bit unset
*
* Unallocated (clean):
* ssssssss ssssssss sss+++++ +++dum-a
* xxxxxxxx xxxxxxxx xxxxxxxx xxx-Uxxx
* ssssssss ssssssss sss+++++ +++dUm-a
*
* Unallocated (dirty):
* ssssssss ssssssss sss+++++ +++D-m-a
* xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
* ssssssss ssssssss sss+++++ +++D-m-a
*
* Small:
* pppppppp pppppppp pppnnnnn nnnd---A
* pppppppp pppppppp pppnnnnn nnn----A
* pppppppp pppppppp pppnnnnn nnnd---A
*
* Large:
* ssssssss ssssssss sss+++++ +++D--LA
* xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
* -------- -------- ---+++++ +++D--LA
*
* Large (sampled, size <= LARGE_MINCLASS):
* ssssssss ssssssss sssnnnnn nnnD--LA
* xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
* -------- -------- ---+++++ +++D--LA
*
* Large (not sampled, size == LARGE_MINCLASS):
* ssssssss ssssssss sss+++++ +++D--LA
* xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
* -------- -------- ---+++++ +++D--LA
*/
size_t bits;
#define CHUNK_MAP_ALLOCATED ((size_t)0x01U)
#define CHUNK_MAP_LARGE ((size_t)0x02U)
#define CHUNK_MAP_STATE_MASK ((size_t)0x3U)
#define CHUNK_MAP_DECOMMITTED ((size_t)0x04U)
#define CHUNK_MAP_UNZEROED ((size_t)0x08U)
#define CHUNK_MAP_DIRTY ((size_t)0x10U)
#define CHUNK_MAP_FLAGS_MASK ((size_t)0x1cU)
#define CHUNK_MAP_BININD_SHIFT 5
#define BININD_INVALID ((size_t)0xffU)
#define CHUNK_MAP_BININD_MASK (BININD_INVALID << CHUNK_MAP_BININD_SHIFT)
#define CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK
#define CHUNK_MAP_RUNIND_SHIFT (CHUNK_MAP_BININD_SHIFT + 8)
#define CHUNK_MAP_SIZE_SHIFT (CHUNK_MAP_RUNIND_SHIFT - LG_PAGE)
#define CHUNK_MAP_SIZE_MASK \
(~(CHUNK_MAP_BININD_MASK | CHUNK_MAP_FLAGS_MASK | CHUNK_MAP_STATE_MASK))
};
struct arena_runs_dirty_link_s {
qr(arena_runs_dirty_link_t) rd_link;
};
/*
* Each arena_chunk_map_misc_t corresponds to one page within the chunk, just
* like arena_chunk_map_bits_t. Two separate arrays are stored within each
* chunk header in order to improve cache locality.
*/
struct arena_chunk_map_misc_s {
/*
* Linkage for run trees. There are two disjoint uses:
*
* 1) arena_t's runs_avail tree.
* 2) arena_run_t conceptually uses this linkage for in-use non-full
* runs, rather than directly embedding linkage.
*/
rb_node(arena_chunk_map_misc_t) rb_link;
union {
/* Linkage for list of dirty runs. */
arena_runs_dirty_link_t rd;
/* Profile counters, used for large object runs. */
union {
void *prof_tctx_pun;
prof_tctx_t *prof_tctx;
};
/* Small region run metadata. */
arena_run_t run;
};
};
typedef rb_tree(arena_chunk_map_misc_t) arena_avail_tree_t;
typedef rb_tree(arena_chunk_map_misc_t) arena_run_tree_t;
#endif /* JEMALLOC_ARENA_STRUCTS_A */
#ifdef JEMALLOC_ARENA_STRUCTS_B
/* Arena chunk header. */
struct arena_chunk_s {
/*
* A pointer to the arena that owns the chunk is stored within the node.
* This field as a whole is used by chunks_rtree to support both
* ivsalloc() and core-based debugging.
*/
extent_node_t node;
/*
* Map of pages within chunk that keeps track of free/large/small. The
* first map_bias entries are omitted, since the chunk header does not
* need to be tracked in the map. This omission saves a header page
* for common chunk sizes (e.g. 4 MiB).
*/
arena_chunk_map_bits_t map_bits[1]; /* Dynamically sized. */
};
/*
* Read-only information associated with each element of arena_t's bins array
* is stored separately, partly to reduce memory usage (only one copy, rather
* than one per arena), but mainly to avoid false cacheline sharing.
*
* Each run has the following layout:
*
* /--------------------\
* | pad? |
* |--------------------|
* | redzone |
* reg0_offset | region 0 |
* | redzone |
* |--------------------| \
* | redzone | |
* | region 1 | > reg_interval
* | redzone | /
* |--------------------|
* | ... |
* | ... |
* | ... |
* |--------------------|
* | redzone |
* | region nregs-1 |
* | redzone |
* |--------------------|
* | alignment pad? |
* \--------------------/
*
* reg_interval has at least the same minimum alignment as reg_size; this
* preserves the alignment constraint that sa2u() depends on. Alignment pad is
* either 0 or redzone_size; it is present only if needed to align reg0_offset.
*/
struct arena_bin_info_s {
/* Size of regions in a run for this bin's size class. */
size_t reg_size;
/* Redzone size. */
size_t redzone_size;
/* Interval between regions (reg_size + (redzone_size << 1)). */
size_t reg_interval;
/* Total size of a run for this bin's size class. */
size_t run_size;
/* Total number of regions in a run for this bin's size class. */
uint32_t nregs;
/*
* Metadata used to manipulate bitmaps for runs associated with this
* bin.
*/
bitmap_info_t bitmap_info;
/* Offset of first region in a run for this bin's size class. */
uint32_t reg0_offset;
};
struct arena_bin_s {
/*
* All operations on runcur, runs, and stats require that lock be
* locked. Run allocation/deallocation are protected by the arena lock,
* which may be acquired while holding one or more bin locks, but not
* vise versa.
*/
malloc_mutex_t lock;
/*
* Current run being used to service allocations of this bin's size
* class.
*/
arena_run_t *runcur;
/*
* Tree of non-full runs. This tree is used when looking for an
* existing run when runcur is no longer usable. We choose the
* non-full run that is lowest in memory; this policy tends to keep
* objects packed well, and it can also help reduce the number of
* almost-empty chunks.
*/
arena_run_tree_t runs;
/* Bin statistics. */
malloc_bin_stats_t stats;
};
struct arena_s {
/* This arena's index within the arenas array. */
unsigned ind;
/*
* Number of threads currently assigned to this arena. This field is
* protected by arenas_lock.
*/
unsigned nthreads;
/*
* There are three classes of arena operations from a locking
* perspective:
* 1) Thread assignment (modifies nthreads) is protected by arenas_lock.
* 2) Bin-related operations are protected by bin locks.
* 3) Chunk- and run-related operations are protected by this mutex.
*/
malloc_mutex_t lock;
arena_stats_t stats;
/*
* List of tcaches for extant threads associated with this arena.
* Stats from these are merged incrementally, and at exit if
* opt_stats_print is enabled.
*/
ql_head(tcache_t) tcache_ql;
uint64_t prof_accumbytes;
/*
* PRNG state for cache index randomization of large allocation base
* pointers.
*/
uint64_t offset_state;
dss_prec_t dss_prec;
/*
* In order to avoid rapid chunk allocation/deallocation when an arena
* oscillates right on the cusp of needing a new chunk, cache the most
* recently freed chunk. The spare is left in the arena's chunk trees
* until it is deleted.
*
* There is one spare chunk per arena, rather than one spare total, in
* order to avoid interactions between multiple threads that could make
* a single spare inadequate.
*/
arena_chunk_t *spare;
/* Minimum ratio (log base 2) of nactive:ndirty. */
ssize_t lg_dirty_mult;
/* True if a thread is currently executing arena_purge(). */
bool purging;
/* Number of pages in active runs and huge regions. */
size_t nactive;
/*
* Current count of pages within unused runs that are potentially
* dirty, and for which madvise(... MADV_DONTNEED) has not been called.
* By tracking this, we can institute a limit on how much dirty unused
* memory is mapped for each arena.
*/
size_t ndirty;
/*
* Size/address-ordered tree of this arena's available runs. The tree
* is used for first-best-fit run allocation.
*/
arena_avail_tree_t runs_avail;
/*
* Unused dirty memory this arena manages. Dirty memory is conceptually
* tracked as an arbitrarily interleaved LRU of dirty runs and cached
* chunks, but the list linkage is actually semi-duplicated in order to
* avoid extra arena_chunk_map_misc_t space overhead.
*
* LRU-----------------------------------------------------------MRU
*
* /-- arena ---\
* | |
* | |
* |------------| /- chunk -\
* ...->|chunks_cache|<--------------------------->| /----\ |<--...
* |------------| | |node| |
* | | | | | |
* | | /- run -\ /- run -\ | | | |
* | | | | | | | | | |
* | | | | | | | | | |
* |------------| |-------| |-------| | |----| |
* ...->|runs_dirty |<-->|rd |<-->|rd |<---->|rd |<----...
* |------------| |-------| |-------| | |----| |
* | | | | | | | | | |
* | | | | | | | \----/ |
* | | \-------/ \-------/ | |
* | | | |
* | | | |
* \------------/ \---------/
*/
arena_runs_dirty_link_t runs_dirty;
extent_node_t chunks_cache;
/* Extant huge allocations. */
ql_head(extent_node_t) huge;
/* Synchronizes all huge allocation/update/deallocation. */
malloc_mutex_t huge_mtx;
/*
* Trees of chunks that were previously allocated (trees differ only in
* node ordering). These are used when allocating chunks, in an attempt
* to re-use address space. Depending on function, different tree
* orderings are needed, which is why there are two trees with the same
* contents.
*/
extent_tree_t chunks_szad_cached;
extent_tree_t chunks_ad_cached;
extent_tree_t chunks_szad_retained;
extent_tree_t chunks_ad_retained;
malloc_mutex_t chunks_mtx;
/* Cache of nodes that were allocated via base_alloc(). */
ql_head(extent_node_t) node_cache;
malloc_mutex_t node_cache_mtx;
/* User-configurable chunk hook functions. */
chunk_hooks_t chunk_hooks;
/* bins is used to store trees of free regions. */
arena_bin_t bins[NBINS];
};
#endif /* JEMALLOC_ARENA_STRUCTS_B */
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
static const size_t large_pad =
#ifdef JEMALLOC_CACHE_OBLIVIOUS
PAGE
#else
0
#endif
;
extern ssize_t opt_lg_dirty_mult;
extern arena_bin_info_t arena_bin_info[NBINS];
extern size_t map_bias; /* Number of arena chunk header pages. */
extern size_t map_misc_offset;
extern size_t arena_maxrun; /* Max run size for arenas. */
extern size_t large_maxclass; /* Max large size class. */
extern unsigned nlclasses; /* Number of large size classes. */
extern unsigned nhclasses; /* Number of huge size classes. */
void arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node,
bool cache);
void arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node,
bool cache);
extent_node_t *arena_node_alloc(arena_t *arena);
void arena_node_dalloc(arena_t *arena, extent_node_t *node);
void *arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
bool *zero);
void arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize);
void arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk,
size_t oldsize, size_t usize);
void arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk,
size_t oldsize, size_t usize);
bool arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk,
size_t oldsize, size_t usize, bool *zero);
ssize_t arena_lg_dirty_mult_get(arena_t *arena);
bool arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult);
void arena_maybe_purge(arena_t *arena);
void arena_purge_all(arena_t *arena);
void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
szind_t binind, uint64_t prof_accumbytes);
void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
bool zero);
#ifdef JEMALLOC_JET
typedef void (arena_redzone_corruption_t)(void *, size_t, bool, size_t,
uint8_t);
extern arena_redzone_corruption_t *arena_redzone_corruption;
typedef void (arena_dalloc_junk_small_t)(void *, arena_bin_info_t *);
extern arena_dalloc_junk_small_t *arena_dalloc_junk_small;
#else
void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info);
#endif
void arena_quarantine_junk_small(void *ptr, size_t usize);
void *arena_malloc_small(arena_t *arena, size_t size, bool zero);
void *arena_malloc_large(arena_t *arena, size_t size, bool zero);
void *arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize,
size_t alignment, bool zero, tcache_t *tcache);
void arena_prof_promoted(const void *ptr, size_t size);
void arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk,
void *ptr, arena_chunk_map_bits_t *bitselm);
void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t pageind, arena_chunk_map_bits_t *bitselm);
void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t pageind);
#ifdef JEMALLOC_JET
typedef void (arena_dalloc_junk_large_t)(void *, size_t);
extern arena_dalloc_junk_large_t *arena_dalloc_junk_large;
#else
void arena_dalloc_junk_large(void *ptr, size_t usize);
#endif
void arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk,
void *ptr);
void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr);
#ifdef JEMALLOC_JET
typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t);
extern arena_ralloc_junk_large_t *arena_ralloc_junk_large;
#endif
bool arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero);
void *arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
size_t size, size_t alignment, bool zero, tcache_t *tcache);
dss_prec_t arena_dss_prec_get(arena_t *arena);
bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
ssize_t arena_lg_dirty_mult_default_get(void);
bool arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult);
void arena_stats_merge(arena_t *arena, const char **dss,
ssize_t *lg_dirty_mult, size_t *nactive, size_t *ndirty,
arena_stats_t *astats, malloc_bin_stats_t *bstats,
malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats);
arena_t *arena_new(unsigned ind);
bool arena_boot(void);
void arena_prefork(arena_t *arena);
void arena_postfork_parent(arena_t *arena);
void arena_postfork_child(arena_t *arena);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
arena_chunk_map_bits_t *arena_bitselm_get(arena_chunk_t *chunk,
size_t pageind);
arena_chunk_map_misc_t *arena_miscelm_get(arena_chunk_t *chunk,
size_t pageind);
size_t arena_miscelm_to_pageind(arena_chunk_map_misc_t *miscelm);
void *arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm);
arena_chunk_map_misc_t *arena_rd_to_miscelm(arena_runs_dirty_link_t *rd);
arena_chunk_map_misc_t *arena_run_to_miscelm(arena_run_t *run);
size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbitsp_read(size_t *mapbitsp);
size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_size_decode(size_t mapbits);
size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk,
size_t pageind);
size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind);
szind_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind);
void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits);
size_t arena_mapbits_size_encode(size_t size);
void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
size_t size, size_t flags);
void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
size_t size);
void arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind,
size_t flags);
void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind,
size_t size, size_t flags);
void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
szind_t binind);
void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind,
size_t runind, szind_t binind, size_t flags);
void arena_metadata_allocated_add(arena_t *arena, size_t size);
void arena_metadata_allocated_sub(arena_t *arena, size_t size);
size_t arena_metadata_allocated_get(arena_t *arena);
bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
bool arena_prof_accum(arena_t *arena, uint64_t accumbytes);
szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
const void *ptr);
prof_tctx_t *arena_prof_tctx_get(const void *ptr);
void arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx);
void arena_prof_tctx_reset(const void *ptr, size_t usize,
const void *old_ptr, prof_tctx_t *old_tctx);
void *arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
tcache_t *tcache);
arena_t *arena_aalloc(const void *ptr);
size_t arena_salloc(const void *ptr, bool demote);
void arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache);
void arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
# ifdef JEMALLOC_ARENA_INLINE_A
JEMALLOC_ALWAYS_INLINE arena_chunk_map_bits_t *
arena_bitselm_get(arena_chunk_t *chunk, size_t pageind)
{
assert(pageind >= map_bias);
assert(pageind < chunk_npages);
return (&chunk->map_bits[pageind-map_bias]);
}
JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
arena_miscelm_get(arena_chunk_t *chunk, size_t pageind)
{
assert(pageind >= map_bias);
assert(pageind < chunk_npages);
return ((arena_chunk_map_misc_t *)((uintptr_t)chunk +
(uintptr_t)map_misc_offset) + pageind-map_bias);
}
JEMALLOC_ALWAYS_INLINE size_t
arena_miscelm_to_pageind(arena_chunk_map_misc_t *miscelm)
{
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
size_t pageind = ((uintptr_t)miscelm - ((uintptr_t)chunk +
map_misc_offset)) / sizeof(arena_chunk_map_misc_t) + map_bias;
assert(pageind >= map_bias);
assert(pageind < chunk_npages);
return (pageind);
}
JEMALLOC_ALWAYS_INLINE void *
arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm)
{
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
size_t pageind = arena_miscelm_to_pageind(miscelm);
return ((void *)((uintptr_t)chunk + (pageind << LG_PAGE)));
}
JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
arena_rd_to_miscelm(arena_runs_dirty_link_t *rd)
{
arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t
*)((uintptr_t)rd - offsetof(arena_chunk_map_misc_t, rd));
assert(arena_miscelm_to_pageind(miscelm) >= map_bias);
assert(arena_miscelm_to_pageind(miscelm) < chunk_npages);
return (miscelm);
}
JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
arena_run_to_miscelm(arena_run_t *run)
{
arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t
*)((uintptr_t)run - offsetof(arena_chunk_map_misc_t, run));
assert(arena_miscelm_to_pageind(miscelm) >= map_bias);
assert(arena_miscelm_to_pageind(miscelm) < chunk_npages);
return (miscelm);
}
JEMALLOC_ALWAYS_INLINE size_t *
arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind)
{
return (&arena_bitselm_get(chunk, pageind)->bits);
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbitsp_read(size_t *mapbitsp)
{
return (*mapbitsp);
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_get(arena_chunk_t *chunk, size_t pageind)
{
return (arena_mapbitsp_read(arena_mapbitsp_get(chunk, pageind)));
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_size_decode(size_t mapbits)
{
size_t size;
#if CHUNK_MAP_SIZE_SHIFT > 0
size = (mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT;
#elif CHUNK_MAP_SIZE_SHIFT == 0
size = mapbits & CHUNK_MAP_SIZE_MASK;
#else
size = (mapbits & CHUNK_MAP_SIZE_MASK) << -CHUNK_MAP_SIZE_SHIFT;
#endif
return (size);
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
mapbits = arena_mapbits_get(chunk, pageind);
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
return (arena_mapbits_size_decode(mapbits));
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
mapbits = arena_mapbits_get(chunk, pageind);
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
(CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED));
return (arena_mapbits_size_decode(mapbits));
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
mapbits = arena_mapbits_get(chunk, pageind);
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
CHUNK_MAP_ALLOCATED);
return (mapbits >> CHUNK_MAP_RUNIND_SHIFT);
}
JEMALLOC_ALWAYS_INLINE szind_t
arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
szind_t binind;
mapbits = arena_mapbits_get(chunk, pageind);
binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
assert(binind < NBINS || binind == BININD_INVALID);
return (binind);
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
mapbits = arena_mapbits_get(chunk, pageind);
assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits &
(CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
return (mapbits & CHUNK_MAP_DIRTY);
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
mapbits = arena_mapbits_get(chunk, pageind);
assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits &
(CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
return (mapbits & CHUNK_MAP_UNZEROED);
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
mapbits = arena_mapbits_get(chunk, pageind);
assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits &
(CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
return (mapbits & CHUNK_MAP_DECOMMITTED);
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
mapbits = arena_mapbits_get(chunk, pageind);
return (mapbits & CHUNK_MAP_LARGE);
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
mapbits = arena_mapbits_get(chunk, pageind);
return (mapbits & CHUNK_MAP_ALLOCATED);
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits)
{
*mapbitsp = mapbits;
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_size_encode(size_t size)
{
size_t mapbits;
#if CHUNK_MAP_SIZE_SHIFT > 0
mapbits = size << CHUNK_MAP_SIZE_SHIFT;
#elif CHUNK_MAP_SIZE_SHIFT == 0
mapbits = size;
#else
mapbits = size >> -CHUNK_MAP_SIZE_SHIFT;
#endif
assert((mapbits & ~CHUNK_MAP_SIZE_MASK) == 0);
return (mapbits);
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t flags)
{
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert((size & PAGE_MASK) == 0);
assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags &
(CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
CHUNK_MAP_BININD_INVALID | flags);
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
size_t size)
{
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
size_t mapbits = arena_mapbitsp_read(mapbitsp);
assert((size & PAGE_MASK) == 0);
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
(mapbits & ~CHUNK_MAP_SIZE_MASK));
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, size_t flags)
{
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert((flags & CHUNK_MAP_UNZEROED) == flags);
arena_mapbitsp_write(mapbitsp, flags);
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t flags)
{
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert((size & PAGE_MASK) == 0);
assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags &
(CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
CHUNK_MAP_BININD_INVALID | flags | CHUNK_MAP_LARGE |
CHUNK_MAP_ALLOCATED);
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
szind_t binind)
{
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
size_t mapbits = arena_mapbitsp_read(mapbitsp);
assert(binind <= BININD_INVALID);
assert(arena_mapbits_large_size_get(chunk, pageind) == LARGE_MINCLASS +
large_pad);
arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) |
(binind << CHUNK_MAP_BININD_SHIFT));
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
szind_t binind, size_t flags)
{
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert(binind < BININD_INVALID);
assert(pageind - runind >= map_bias);
assert((flags & CHUNK_MAP_UNZEROED) == flags);
arena_mapbitsp_write(mapbitsp, (runind << CHUNK_MAP_RUNIND_SHIFT) |
(binind << CHUNK_MAP_BININD_SHIFT) | flags | CHUNK_MAP_ALLOCATED);
}
JEMALLOC_INLINE void
arena_metadata_allocated_add(arena_t *arena, size_t size)
{
atomic_add_z(&arena->stats.metadata_allocated, size);
}
JEMALLOC_INLINE void
arena_metadata_allocated_sub(arena_t *arena, size_t size)
{
atomic_sub_z(&arena->stats.metadata_allocated, size);
}
JEMALLOC_INLINE size_t
arena_metadata_allocated_get(arena_t *arena)
{
return (atomic_read_z(&arena->stats.metadata_allocated));
}
JEMALLOC_INLINE bool
arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes)
{
cassert(config_prof);
assert(prof_interval != 0);
arena->prof_accumbytes += accumbytes;
if (arena->prof_accumbytes >= prof_interval) {
arena->prof_accumbytes -= prof_interval;
return (true);
}
return (false);
}
JEMALLOC_INLINE bool
arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes)
{
cassert(config_prof);
if (likely(prof_interval == 0))
return (false);
return (arena_prof_accum_impl(arena, accumbytes));
}
JEMALLOC_INLINE bool
arena_prof_accum(arena_t *arena, uint64_t accumbytes)
{
cassert(config_prof);
if (likely(prof_interval == 0))
return (false);
{
bool ret;
malloc_mutex_lock(&arena->lock);
ret = arena_prof_accum_impl(arena, accumbytes);
malloc_mutex_unlock(&arena->lock);
return (ret);
}
}
JEMALLOC_ALWAYS_INLINE szind_t
arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
{
szind_t binind;
binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
if (config_debug) {
arena_chunk_t *chunk;
arena_t *arena;
size_t pageind;
size_t actual_mapbits;
size_t rpages_ind;
arena_run_t *run;
arena_bin_t *bin;
szind_t run_binind, actual_binind;
arena_bin_info_t *bin_info;
arena_chunk_map_misc_t *miscelm;
void *rpages;
assert(binind != BININD_INVALID);
assert(binind < NBINS);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
arena = extent_node_arena_get(&chunk->node);
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
actual_mapbits = arena_mapbits_get(chunk, pageind);
assert(mapbits == actual_mapbits);
assert(arena_mapbits_large_get(chunk, pageind) == 0);
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
rpages_ind = pageind - arena_mapbits_small_runind_get(chunk,
pageind);
miscelm = arena_miscelm_get(chunk, rpages_ind);
run = &miscelm->run;
run_binind = run->binind;
bin = &arena->bins[run_binind];
actual_binind = bin - arena->bins;
assert(run_binind == actual_binind);
bin_info = &arena_bin_info[actual_binind];
rpages = arena_miscelm_to_rpages(miscelm);
assert(((uintptr_t)ptr - ((uintptr_t)rpages +
(uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval
== 0);
}
return (binind);
}
# endif /* JEMALLOC_ARENA_INLINE_A */
# ifdef JEMALLOC_ARENA_INLINE_B
JEMALLOC_INLINE szind_t
arena_bin_index(arena_t *arena, arena_bin_t *bin)
{
szind_t binind = bin - arena->bins;
assert(binind < NBINS);
return (binind);
}
JEMALLOC_INLINE unsigned
arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
{
unsigned shift, diff, regind;
size_t interval;
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
void *rpages = arena_miscelm_to_rpages(miscelm);
/*
* Freeing a pointer lower than region zero can cause assertion
* failure.
*/
assert((uintptr_t)ptr >= (uintptr_t)rpages +
(uintptr_t)bin_info->reg0_offset);
/*
* Avoid doing division with a variable divisor if possible. Using
* actual division here can reduce allocator throughput by over 20%!
*/
diff = (unsigned)((uintptr_t)ptr - (uintptr_t)rpages -
bin_info->reg0_offset);
/* Rescale (factor powers of 2 out of the numerator and denominator). */
interval = bin_info->reg_interval;
shift = jemalloc_ffs(interval) - 1;
diff >>= shift;
interval >>= shift;
if (interval == 1) {
/* The divisor was a power of 2. */
regind = diff;
} else {
/*
* To divide by a number D that is not a power of two we
* multiply by (2^21 / D) and then right shift by 21 positions.
*
* X / D
*
* becomes
*
* (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT
*
* We can omit the first three elements, because we never
* divide by 0, and 1 and 2 are both powers of two, which are
* handled above.
*/
#define SIZE_INV_SHIFT ((sizeof(unsigned) << 3) - LG_RUN_MAXREGS)
#define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s)) + 1)
static const unsigned interval_invs[] = {
SIZE_INV(3),
SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15),
SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19),
SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23),
SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27),
SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
};
if (likely(interval <= ((sizeof(interval_invs) /
sizeof(unsigned)) + 2))) {
regind = (diff * interval_invs[interval - 3]) >>
SIZE_INV_SHIFT;
} else
regind = diff / interval;
#undef SIZE_INV
#undef SIZE_INV_SHIFT
}
assert(diff == regind * interval);
assert(regind < bin_info->nregs);
return (regind);
}
JEMALLOC_INLINE prof_tctx_t *
arena_prof_tctx_get(const void *ptr)
{
prof_tctx_t *ret;
arena_chunk_t *chunk;
cassert(config_prof);
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (likely(chunk != ptr)) {
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
size_t mapbits = arena_mapbits_get(chunk, pageind);
assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
if (likely((mapbits & CHUNK_MAP_LARGE) == 0))
ret = (prof_tctx_t *)(uintptr_t)1U;
else {
arena_chunk_map_misc_t *elm = arena_miscelm_get(chunk,
pageind);
ret = atomic_read_p(&elm->prof_tctx_pun);
}
} else
ret = huge_prof_tctx_get(ptr);
return (ret);
}
JEMALLOC_INLINE void
arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx)
{
arena_chunk_t *chunk;
cassert(config_prof);
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (likely(chunk != ptr)) {
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
if (unlikely(usize > SMALL_MAXCLASS || (uintptr_t)tctx >
(uintptr_t)1U)) {
arena_chunk_map_misc_t *elm;
assert(arena_mapbits_large_get(chunk, pageind) != 0);
elm = arena_miscelm_get(chunk, pageind);
atomic_write_p(&elm->prof_tctx_pun, tctx);
} else {
/*
* tctx must always be initialized for large runs.
* Assert that the surrounding conditional logic is
* equivalent to checking whether ptr refers to a large
* run.
*/
assert(arena_mapbits_large_get(chunk, pageind) == 0);
}
} else
huge_prof_tctx_set(ptr, tctx);
}
JEMALLOC_INLINE void
arena_prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
prof_tctx_t *old_tctx)
{
cassert(config_prof);
assert(ptr != NULL);
if (unlikely(usize > SMALL_MAXCLASS || (ptr == old_ptr &&
(uintptr_t)old_tctx > (uintptr_t)1U))) {
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (likely(chunk != ptr)) {
size_t pageind;
arena_chunk_map_misc_t *elm;
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
LG_PAGE;
assert(arena_mapbits_allocated_get(chunk, pageind) !=
0);
assert(arena_mapbits_large_get(chunk, pageind) != 0);
elm = arena_miscelm_get(chunk, pageind);
atomic_write_p(&elm->prof_tctx_pun,
(prof_tctx_t *)(uintptr_t)1U);
} else
huge_prof_tctx_reset(ptr);
}
}
JEMALLOC_ALWAYS_INLINE void *
arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
tcache_t *tcache)
{
assert(size != 0);
arena = arena_choose(tsd, arena);
if (unlikely(arena == NULL))
return (NULL);
if (likely(size <= SMALL_MAXCLASS)) {
if (likely(tcache != NULL)) {
return (tcache_alloc_small(tsd, arena, tcache, size,
zero));
} else
return (arena_malloc_small(arena, size, zero));
} else if (likely(size <= large_maxclass)) {
/*
* Initialize tcache after checking size in order to avoid
* infinite recursion during tcache initialization.
*/
if (likely(tcache != NULL) && size <= tcache_maxclass) {
return (tcache_alloc_large(tsd, arena, tcache, size,
zero));
} else
return (arena_malloc_large(arena, size, zero));
} else
return (huge_malloc(tsd, arena, size, zero, tcache));
}
JEMALLOC_ALWAYS_INLINE arena_t *
arena_aalloc(const void *ptr)
{
arena_chunk_t *chunk;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (likely(chunk != ptr))
return (extent_node_arena_get(&chunk->node));
else
return (huge_aalloc(ptr));
}
/* Return the size of the allocation pointed to by ptr. */
JEMALLOC_ALWAYS_INLINE size_t
arena_salloc(const void *ptr, bool demote)
{
size_t ret;
arena_chunk_t *chunk;
size_t pageind;
szind_t binind;
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (likely(chunk != ptr)) {
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
binind = arena_mapbits_binind_get(chunk, pageind);
if (unlikely(binind == BININD_INVALID || (config_prof && !demote
&& arena_mapbits_large_get(chunk, pageind) != 0))) {
/*
* Large allocation. In the common case (demote), and
* as this is an inline function, most callers will only
* end up looking at binind to determine that ptr is a
* small allocation.
*/
assert(config_cache_oblivious || ((uintptr_t)ptr &
PAGE_MASK) == 0);
ret = arena_mapbits_large_size_get(chunk, pageind) -
large_pad;
assert(ret != 0);
assert(pageind + ((ret+large_pad)>>LG_PAGE) <=
chunk_npages);
assert(arena_mapbits_dirty_get(chunk, pageind) ==
arena_mapbits_dirty_get(chunk,
pageind+((ret+large_pad)>>LG_PAGE)-1));
} else {
/*
* Small allocation (possibly promoted to a large
* object).
*/
assert(arena_mapbits_large_get(chunk, pageind) != 0 ||
arena_ptr_small_binind_get(ptr,
arena_mapbits_get(chunk, pageind)) == binind);
ret = index2size(binind);
}
} else
ret = huge_salloc(ptr);
return (ret);
}
JEMALLOC_ALWAYS_INLINE void
arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
{
arena_chunk_t *chunk;
size_t pageind, mapbits;
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (likely(chunk != ptr)) {
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
mapbits = arena_mapbits_get(chunk, pageind);
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) {
/* Small allocation. */
if (likely(tcache != NULL)) {
szind_t binind = arena_ptr_small_binind_get(ptr,
mapbits);
tcache_dalloc_small(tsd, tcache, ptr, binind);
} else {
arena_dalloc_small(extent_node_arena_get(
&chunk->node), chunk, ptr, pageind);
}
} else {
size_t size = arena_mapbits_large_size_get(chunk,
pageind);
assert(config_cache_oblivious || ((uintptr_t)ptr &
PAGE_MASK) == 0);
if (likely(tcache != NULL) && size - large_pad <=
tcache_maxclass) {
tcache_dalloc_large(tsd, tcache, ptr, size -
large_pad);
} else {
arena_dalloc_large(extent_node_arena_get(
&chunk->node), chunk, ptr);
}
}
} else
huge_dalloc(tsd, ptr, tcache);
}
JEMALLOC_ALWAYS_INLINE void
arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
{
arena_chunk_t *chunk;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (likely(chunk != ptr)) {
if (config_prof && opt_prof) {
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
LG_PAGE;
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
if (arena_mapbits_large_get(chunk, pageind) != 0) {
/*
* Make sure to use promoted size, not request
* size.
*/
size = arena_mapbits_large_size_get(chunk,
pageind) - large_pad;
}
}
assert(s2u(size) == s2u(arena_salloc(ptr, false)));
if (likely(size <= SMALL_MAXCLASS)) {
/* Small allocation. */
if (likely(tcache != NULL)) {
szind_t binind = size2index(size);
tcache_dalloc_small(tsd, tcache, ptr, binind);
} else {
size_t pageind = ((uintptr_t)ptr -
(uintptr_t)chunk) >> LG_PAGE;
arena_dalloc_small(extent_node_arena_get(
&chunk->node), chunk, ptr, pageind);
}
} else {
assert(config_cache_oblivious || ((uintptr_t)ptr &
PAGE_MASK) == 0);
if (likely(tcache != NULL) && size <= tcache_maxclass)
tcache_dalloc_large(tsd, tcache, ptr, size);
else {
arena_dalloc_large(extent_node_arena_get(
&chunk->node), chunk, ptr);
}
}
} else
huge_dalloc(tsd, ptr, tcache);
}
# endif /* JEMALLOC_ARENA_INLINE_B */
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
#ifndef JEMALLOC_INTERNAL_ARENA_EXTERNS_H
#define JEMALLOC_INTERNAL_ARENA_EXTERNS_H
#include "jemalloc/internal/bin.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats.h"
extern ssize_t opt_dirty_decay_ms;
extern ssize_t opt_muzzy_decay_ms;
extern percpu_arena_mode_t opt_percpu_arena;
extern const char *percpu_arena_mode_names[];
extern const uint64_t h_steps[SMOOTHSTEP_NSTEPS];
extern malloc_mutex_t arenas_lock;
void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms,
ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy);
void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
bin_stats_t *bstats, arena_stats_large_t *lstats);
void arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent);
#ifdef JEMALLOC_JET
size_t arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr);
#endif
extent_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena,
size_t usize, size_t alignment, bool *zero);
void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena,
extent_t *extent);
void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena,
extent_t *extent, size_t oldsize);
void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena,
extent_t *extent, size_t oldsize);
ssize_t arena_dirty_decay_ms_get(arena_t *arena);
bool arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms);
ssize_t arena_muzzy_decay_ms_get(arena_t *arena);
bool arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms);
void arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
bool all);
void arena_reset(tsd_t *tsd, arena_t *arena);
void arena_destroy(tsd_t *tsd, arena_t *arena);
void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes);
void arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info,
bool zero);
typedef void (arena_dalloc_junk_small_t)(void *, const bin_info_t *);
extern arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small;
void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
szind_t ind, bool zero);
void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool zero, tcache_t *tcache);
void arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize);
void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
bool slow_path);
void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
extent_t *extent, void *ptr);
void arena_dalloc_small(tsdn_t *tsdn, void *ptr);
bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero);
void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
size_t size, size_t alignment, bool zero, tcache_t *tcache);
dss_prec_t arena_dss_prec_get(arena_t *arena);
bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
ssize_t arena_dirty_decay_ms_default_get(void);
bool arena_dirty_decay_ms_default_set(ssize_t decay_ms);
ssize_t arena_muzzy_decay_ms_default_get(void);
bool arena_muzzy_decay_ms_default_set(ssize_t decay_ms);
bool arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena,
size_t *old_limit, size_t *new_limit);
unsigned arena_nthreads_get(arena_t *arena, bool internal);
void arena_nthreads_inc(arena_t *arena, bool internal);
void arena_nthreads_dec(arena_t *arena, bool internal);
size_t arena_extent_sn_next(arena_t *arena);
arena_t *arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
void arena_boot(void);
void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
void arena_prefork1(tsdn_t *tsdn, arena_t *arena);
void arena_prefork2(tsdn_t *tsdn, arena_t *arena);
void arena_prefork3(tsdn_t *tsdn, arena_t *arena);
void arena_prefork4(tsdn_t *tsdn, arena_t *arena);
void arena_prefork5(tsdn_t *tsdn, arena_t *arena);
void arena_prefork6(tsdn_t *tsdn, arena_t *arena);
void arena_prefork7(tsdn_t *tsdn, arena_t *arena);
void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena);
void arena_postfork_child(tsdn_t *tsdn, arena_t *arena);
#endif /* JEMALLOC_INTERNAL_ARENA_EXTERNS_H */
#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_A_H
#define JEMALLOC_INTERNAL_ARENA_INLINES_A_H
static inline unsigned
arena_ind_get(const arena_t *arena) {
return base_ind_get(arena->base);
}
static inline void
arena_internal_add(arena_t *arena, size_t size) {
atomic_fetch_add_zu(&arena->stats.internal, size, ATOMIC_RELAXED);
}
static inline void
arena_internal_sub(arena_t *arena, size_t size) {
atomic_fetch_sub_zu(&arena->stats.internal, size, ATOMIC_RELAXED);
}
static inline size_t
arena_internal_get(arena_t *arena) {
return atomic_load_zu(&arena->stats.internal, ATOMIC_RELAXED);
}
static inline bool
arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) {
cassert(config_prof);
if (likely(prof_interval == 0 || !prof_active_get_unlocked())) {
return false;
}
return prof_accum_add(tsdn, &arena->prof_accum, accumbytes);
}
static inline void
percpu_arena_update(tsd_t *tsd, unsigned cpu) {
assert(have_percpu_arena);
arena_t *oldarena = tsd_arena_get(tsd);
assert(oldarena != NULL);
unsigned oldind = arena_ind_get(oldarena);
if (oldind != cpu) {
unsigned newind = cpu;
arena_t *newarena = arena_get(tsd_tsdn(tsd), newind, true);
assert(newarena != NULL);
/* Set new arena/tcache associations. */
arena_migrate(tsd, oldind, newind);
tcache_t *tcache = tcache_get(tsd);
if (tcache != NULL) {
tcache_arena_reassociate(tsd_tsdn(tsd), tcache,
newarena);
}
}
}
#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_A_H */
#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_B_H
#define JEMALLOC_INTERNAL_ARENA_INLINES_B_H
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/ticker.h"
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
cassert(config_prof);
assert(ptr != NULL);
/* Static check. */
if (alloc_ctx == NULL) {
const extent_t *extent = iealloc(tsdn, ptr);
if (unlikely(!extent_slab_get(extent))) {
return large_prof_tctx_get(tsdn, extent);
}
} else {
if (unlikely(!alloc_ctx->slab)) {
return large_prof_tctx_get(tsdn, iealloc(tsdn, ptr));
}
}
return (prof_tctx_t *)(uintptr_t)1U;
}
JEMALLOC_ALWAYS_INLINE void
arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, UNUSED size_t usize,
alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) {
cassert(config_prof);
assert(ptr != NULL);
/* Static check. */
if (alloc_ctx == NULL) {
extent_t *extent = iealloc(tsdn, ptr);
if (unlikely(!extent_slab_get(extent))) {
large_prof_tctx_set(tsdn, extent, tctx);
}
} else {
if (unlikely(!alloc_ctx->slab)) {
large_prof_tctx_set(tsdn, iealloc(tsdn, ptr), tctx);
}
}
}
static inline void
arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, UNUSED prof_tctx_t *tctx) {
cassert(config_prof);
assert(ptr != NULL);
extent_t *extent = iealloc(tsdn, ptr);
assert(!extent_slab_get(extent));
large_prof_tctx_reset(tsdn, extent);
}
JEMALLOC_ALWAYS_INLINE void
arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) {
tsd_t *tsd;
ticker_t *decay_ticker;
if (unlikely(tsdn_null(tsdn))) {
return;
}
tsd = tsdn_tsd(tsdn);
decay_ticker = decay_ticker_get(tsd, arena_ind_get(arena));
if (unlikely(decay_ticker == NULL)) {
return;
}
if (unlikely(ticker_ticks(decay_ticker, nticks))) {
arena_decay(tsdn, arena, false, false);
}
}
JEMALLOC_ALWAYS_INLINE void
arena_decay_tick(tsdn_t *tsdn, arena_t *arena) {
malloc_mutex_assert_not_owner(tsdn, &arena->decay_dirty.mtx);
malloc_mutex_assert_not_owner(tsdn, &arena->decay_muzzy.mtx);
arena_decay_ticks(tsdn, arena, 1);
}
JEMALLOC_ALWAYS_INLINE void *
arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
tcache_t *tcache, bool slow_path) {
assert(!tsdn_null(tsdn) || tcache == NULL);
assert(size != 0);
if (likely(tcache != NULL)) {
if (likely(size <= SMALL_MAXCLASS)) {
return tcache_alloc_small(tsdn_tsd(tsdn), arena,
tcache, size, ind, zero, slow_path);
}
if (likely(size <= tcache_maxclass)) {
return tcache_alloc_large(tsdn_tsd(tsdn), arena,
tcache, size, ind, zero, slow_path);
}
/* (size > tcache_maxclass) case falls through. */
assert(size > tcache_maxclass);
}
return arena_malloc_hard(tsdn, arena, size, ind, zero);
}
JEMALLOC_ALWAYS_INLINE arena_t *
arena_aalloc(tsdn_t *tsdn, const void *ptr) {
return extent_arena_get(iealloc(tsdn, ptr));
}
JEMALLOC_ALWAYS_INLINE size_t
arena_salloc(tsdn_t *tsdn, const void *ptr) {
assert(ptr != NULL);
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
szind_t szind = rtree_szind_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true);
assert(szind != NSIZES);
return sz_index2size(szind);
}
JEMALLOC_ALWAYS_INLINE size_t
arena_vsalloc(tsdn_t *tsdn, const void *ptr) {
/*
* Return 0 if ptr is not within an extent managed by jemalloc. This
* function has two extra costs relative to isalloc():
* - The rtree calls cannot claim to be dependent lookups, which induces
* rtree lookup load dependencies.
* - The lookup may fail, so there is an extra branch to check for
* failure.
*/
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
extent_t *extent;
szind_t szind;
if (rtree_extent_szind_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, false, &extent, &szind)) {
return 0;
}
if (extent == NULL) {
return 0;
}
assert(extent_state_get(extent) == extent_state_active);
/* Only slab members should be looked up via interior pointers. */
assert(extent_addr_get(extent) == ptr || extent_slab_get(extent));
assert(szind != NSIZES);
return sz_index2size(szind);
}
static inline void
arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) {
assert(ptr != NULL);
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
szind_t szind;
bool slab;
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
true, &szind, &slab);
if (config_debug) {
extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
rtree_ctx, (uintptr_t)ptr, true);
assert(szind == extent_szind_get(extent));
assert(szind < NSIZES);
assert(slab == extent_slab_get(extent));
}
if (likely(slab)) {
/* Small allocation. */
arena_dalloc_small(tsdn, ptr);
} else {
extent_t *extent = iealloc(tsdn, ptr);
large_dalloc(tsdn, extent);
}
}
JEMALLOC_ALWAYS_INLINE void
arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
alloc_ctx_t *alloc_ctx, bool slow_path) {
assert(!tsdn_null(tsdn) || tcache == NULL);
assert(ptr != NULL);
if (unlikely(tcache == NULL)) {
arena_dalloc_no_tcache(tsdn, ptr);
return;
}
szind_t szind;
bool slab;
rtree_ctx_t *rtree_ctx;
if (alloc_ctx != NULL) {
szind = alloc_ctx->szind;
slab = alloc_ctx->slab;
assert(szind != NSIZES);
} else {
rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true, &szind, &slab);
}
if (config_debug) {
rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
rtree_ctx, (uintptr_t)ptr, true);
assert(szind == extent_szind_get(extent));
assert(szind < NSIZES);
assert(slab == extent_slab_get(extent));
}
if (likely(slab)) {
/* Small allocation. */
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind,
slow_path);
} else {
if (szind < nhbins) {
if (config_prof && unlikely(szind < NBINS)) {
arena_dalloc_promoted(tsdn, ptr, tcache,
slow_path);
} else {
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
szind, slow_path);
}
} else {
extent_t *extent = iealloc(tsdn, ptr);
large_dalloc(tsdn, extent);
}
}
}
static inline void
arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
assert(ptr != NULL);
assert(size <= LARGE_MAXCLASS);
szind_t szind;
bool slab;
if (!config_prof || !opt_prof) {
/*
* There is no risk of being confused by a promoted sampled
* object, so base szind and slab on the given size.
*/
szind = sz_size2index(size);
slab = (szind < NBINS);
}
if ((config_prof && opt_prof) || config_debug) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
&rtree_ctx_fallback);
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true, &szind, &slab);
assert(szind == sz_size2index(size));
assert((config_prof && opt_prof) || slab == (szind < NBINS));
if (config_debug) {
extent_t *extent = rtree_extent_read(tsdn,
&extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
assert(szind == extent_szind_get(extent));
assert(slab == extent_slab_get(extent));
}
}
if (likely(slab)) {
/* Small allocation. */
arena_dalloc_small(tsdn, ptr);
} else {
extent_t *extent = iealloc(tsdn, ptr);
large_dalloc(tsdn, extent);
}
}
JEMALLOC_ALWAYS_INLINE void
arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
alloc_ctx_t *alloc_ctx, bool slow_path) {
assert(!tsdn_null(tsdn) || tcache == NULL);
assert(ptr != NULL);
assert(size <= LARGE_MAXCLASS);
if (unlikely(tcache == NULL)) {
arena_sdalloc_no_tcache(tsdn, ptr, size);
return;
}
szind_t szind;
bool slab;
UNUSED alloc_ctx_t local_ctx;
if (config_prof && opt_prof) {
if (alloc_ctx == NULL) {
/* Uncommon case and should be a static check. */
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
&rtree_ctx_fallback);
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true, &local_ctx.szind,
&local_ctx.slab);
assert(local_ctx.szind == sz_size2index(size));
alloc_ctx = &local_ctx;
}
slab = alloc_ctx->slab;
szind = alloc_ctx->szind;
} else {
/*
* There is no risk of being confused by a promoted sampled
* object, so base szind and slab on the given size.
*/
szind = sz_size2index(size);
slab = (szind < NBINS);
}
if (config_debug) {
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true, &szind, &slab);
extent_t *extent = rtree_extent_read(tsdn,
&extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
assert(szind == extent_szind_get(extent));
assert(slab == extent_slab_get(extent));
}
if (likely(slab)) {
/* Small allocation. */
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind,
slow_path);
} else {
if (szind < nhbins) {
if (config_prof && unlikely(szind < NBINS)) {
arena_dalloc_promoted(tsdn, ptr, tcache,
slow_path);
} else {
tcache_dalloc_large(tsdn_tsd(tsdn),
tcache, ptr, szind, slow_path);
}
} else {
extent_t *extent = iealloc(tsdn, ptr);
large_dalloc(tsdn, extent);
}
}
}
#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_B_H */
#ifndef JEMALLOC_INTERNAL_ARENA_STATS_H
#define JEMALLOC_INTERNAL_ARENA_STATS_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex_prof.h"
#include "jemalloc/internal/size_classes.h"
/*
* In those architectures that support 64-bit atomics, we use atomic updates for
* our 64-bit values. Otherwise, we use a plain uint64_t and synchronize
* externally.
*/
#ifdef JEMALLOC_ATOMIC_U64
typedef atomic_u64_t arena_stats_u64_t;
#else
/* Must hold the arena stats mutex while reading atomically. */
typedef uint64_t arena_stats_u64_t;
#endif
typedef struct arena_stats_large_s arena_stats_large_t;
struct arena_stats_large_s {
/*
* Total number of allocation/deallocation requests served directly by
* the arena.
*/
arena_stats_u64_t nmalloc;
arena_stats_u64_t ndalloc;
/*
* Number of allocation requests that correspond to this size class.
* This includes requests served by tcache, though tcache only
* periodically merges into this counter.
*/
arena_stats_u64_t nrequests; /* Partially derived. */
/* Current number of allocations of this size class. */
size_t curlextents; /* Derived. */
};
typedef struct arena_stats_decay_s arena_stats_decay_t;
struct arena_stats_decay_s {
/* Total number of purge sweeps. */
arena_stats_u64_t npurge;
/* Total number of madvise calls made. */
arena_stats_u64_t nmadvise;
/* Total number of pages purged. */
arena_stats_u64_t purged;
};
/*
* Arena stats. Note that fields marked "derived" are not directly maintained
* within the arena code; rather their values are derived during stats merge
* requests.
*/
typedef struct arena_stats_s arena_stats_t;
struct arena_stats_s {
#ifndef JEMALLOC_ATOMIC_U64
malloc_mutex_t mtx;
#endif
/* Number of bytes currently mapped, excluding retained memory. */
atomic_zu_t mapped; /* Partially derived. */
/*
* Number of unused virtual memory bytes currently retained. Retained
* bytes are technically mapped (though always decommitted or purged),
* but they are excluded from the mapped statistic (above).
*/
atomic_zu_t retained; /* Derived. */
arena_stats_decay_t decay_dirty;
arena_stats_decay_t decay_muzzy;
atomic_zu_t base; /* Derived. */
atomic_zu_t internal;
atomic_zu_t resident; /* Derived. */
atomic_zu_t metadata_thp;
atomic_zu_t allocated_large; /* Derived. */
arena_stats_u64_t nmalloc_large; /* Derived. */
arena_stats_u64_t ndalloc_large; /* Derived. */
arena_stats_u64_t nrequests_large; /* Derived. */
/* Number of bytes cached in tcache associated with this arena. */
atomic_zu_t tcache_bytes; /* Derived. */
mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes];
/* One element for each large size class. */
arena_stats_large_t lstats[NSIZES - NBINS];
/* Arena uptime. */
nstime_t uptime;
};
static inline bool
arena_stats_init(UNUSED tsdn_t *tsdn, arena_stats_t *arena_stats) {
if (config_debug) {
for (size_t i = 0; i < sizeof(arena_stats_t); i++) {
assert(((char *)arena_stats)[i] == 0);
}
}
#ifndef JEMALLOC_ATOMIC_U64
if (malloc_mutex_init(&arena_stats->mtx, "arena_stats",
WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) {
return true;
}
#endif
/* Memory is zeroed, so there is no need to clear stats. */
return false;
}
static inline void
arena_stats_lock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
#ifndef JEMALLOC_ATOMIC_U64
malloc_mutex_lock(tsdn, &arena_stats->mtx);
#endif
}
static inline void
arena_stats_unlock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
#ifndef JEMALLOC_ATOMIC_U64
malloc_mutex_unlock(tsdn, &arena_stats->mtx);
#endif
}
static inline uint64_t
arena_stats_read_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
arena_stats_u64_t *p) {
#ifdef JEMALLOC_ATOMIC_U64
return atomic_load_u64(p, ATOMIC_RELAXED);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
return *p;
#endif
}
static inline void
arena_stats_add_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
arena_stats_u64_t *p, uint64_t x) {
#ifdef JEMALLOC_ATOMIC_U64
atomic_fetch_add_u64(p, x, ATOMIC_RELAXED);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
*p += x;
#endif
}
UNUSED static inline void
arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
arena_stats_u64_t *p, uint64_t x) {
#ifdef JEMALLOC_ATOMIC_U64
UNUSED uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED);
assert(r - x <= r);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
*p -= x;
assert(*p + x >= *p);
#endif
}
/*
* Non-atomically sets *dst += src. *dst needs external synchronization.
* This lets us avoid the cost of a fetch_add when its unnecessary (note that
* the types here are atomic).
*/
static inline void
arena_stats_accum_u64(arena_stats_u64_t *dst, uint64_t src) {
#ifdef JEMALLOC_ATOMIC_U64
uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
atomic_store_u64(dst, src + cur_dst, ATOMIC_RELAXED);
#else
*dst += src;
#endif
}
static inline size_t
arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p) {
#ifdef JEMALLOC_ATOMIC_U64
return atomic_load_zu(p, ATOMIC_RELAXED);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
return atomic_load_zu(p, ATOMIC_RELAXED);
#endif
}
static inline void
arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p,
size_t x) {
#ifdef JEMALLOC_ATOMIC_U64
atomic_fetch_add_zu(p, x, ATOMIC_RELAXED);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
atomic_store_zu(p, cur + x, ATOMIC_RELAXED);
#endif
}
static inline void
arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p,
size_t x) {
#ifdef JEMALLOC_ATOMIC_U64
UNUSED size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED);
assert(r - x <= r);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
atomic_store_zu(p, cur - x, ATOMIC_RELAXED);
#endif
}
/* Like the _u64 variant, needs an externally synchronized *dst. */
static inline void
arena_stats_accum_zu(atomic_zu_t *dst, size_t src) {
size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
atomic_store_zu(dst, src + cur_dst, ATOMIC_RELAXED);
}
static inline void
arena_stats_large_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
szind_t szind, uint64_t nrequests) {
arena_stats_lock(tsdn, arena_stats);
arena_stats_add_u64(tsdn, arena_stats, &arena_stats->lstats[szind -
NBINS].nrequests, nrequests);
arena_stats_unlock(tsdn, arena_stats);
}
static inline void
arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t size) {
arena_stats_lock(tsdn, arena_stats);
arena_stats_add_zu(tsdn, arena_stats, &arena_stats->mapped, size);
arena_stats_unlock(tsdn, arena_stats);
}
#endif /* JEMALLOC_INTERNAL_ARENA_STATS_H */
#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H
#define JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H
#include "jemalloc/internal/bitmap.h"
struct arena_slab_data_s {
/* Per region allocated/deallocated bitmap. */
bitmap_t bitmap[BITMAP_GROUPS_MAX];
};
#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H */
#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
#define JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
#include "jemalloc/internal/arena_stats.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bin.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/ticker.h"
struct arena_decay_s {
/* Synchronizes all non-atomic fields. */
malloc_mutex_t mtx;
/*
* True if a thread is currently purging the extents associated with
* this decay structure.
*/
bool purging;
/*
* Approximate time in milliseconds from the creation of a set of unused
* dirty pages until an equivalent set of unused dirty pages is purged
* and/or reused.
*/
atomic_zd_t time_ms;
/* time / SMOOTHSTEP_NSTEPS. */
nstime_t interval;
/*
* Time at which the current decay interval logically started. We do
* not actually advance to a new epoch until sometime after it starts
* because of scheduling and computation delays, and it is even possible
* to completely skip epochs. In all cases, during epoch advancement we
* merge all relevant activity into the most recently recorded epoch.
*/
nstime_t epoch;
/* Deadline randomness generator. */
uint64_t jitter_state;
/*
* Deadline for current epoch. This is the sum of interval and per
* epoch jitter which is a uniform random variable in [0..interval).
* Epochs always advance by precise multiples of interval, but we
* randomize the deadline to reduce the likelihood of arenas purging in
* lockstep.
*/
nstime_t deadline;
/*
* Number of unpurged pages at beginning of current epoch. During epoch
* advancement we use the delta between arena->decay_*.nunpurged and
* extents_npages_get(&arena->extents_*) to determine how many dirty
* pages, if any, were generated.
*/
size_t nunpurged;
/*
* Trailing log of how many unused dirty pages were generated during
* each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
* element is the most recent epoch. Corresponding epoch times are
* relative to epoch.
*/
size_t backlog[SMOOTHSTEP_NSTEPS];
/*
* Pointer to associated stats. These stats are embedded directly in
* the arena's stats due to how stats structures are shared between the
* arena and ctl code.
*
* Synchronization: Same as associated arena's stats field. */
arena_stats_decay_t *stats;
/* Peak number of pages in associated extents. Used for debug only. */
uint64_t ceil_npages;
};
struct arena_s {
/*
* Number of threads currently assigned to this arena. Each thread has
* two distinct assignments, one for application-serving allocation, and
* the other for internal metadata allocation. Internal metadata must
* not be allocated from arenas explicitly created via the arenas.create
* mallctl, because the arena.<i>.reset mallctl indiscriminately
* discards all allocations for the affected arena.
*
* 0: Application allocation.
* 1: Internal metadata allocation.
*
* Synchronization: atomic.
*/
atomic_u_t nthreads[2];
/*
* When percpu_arena is enabled, to amortize the cost of reading /
* updating the current CPU id, track the most recent thread accessing
* this arena, and only read CPU if there is a mismatch.
*/
tsdn_t *last_thd;
/* Synchronization: internal. */
arena_stats_t stats;
/*
* Lists of tcaches and cache_bin_array_descriptors for extant threads
* associated with this arena. Stats from these are merged
* incrementally, and at exit if opt_stats_print is enabled.
*
* Synchronization: tcache_ql_mtx.
*/
ql_head(tcache_t) tcache_ql;
ql_head(cache_bin_array_descriptor_t) cache_bin_array_descriptor_ql;
malloc_mutex_t tcache_ql_mtx;
/* Synchronization: internal. */
prof_accum_t prof_accum;
uint64_t prof_accumbytes;
/*
* PRNG state for cache index randomization of large allocation base
* pointers.
*
* Synchronization: atomic.
*/
atomic_zu_t offset_state;
/*
* Extent serial number generator state.
*
* Synchronization: atomic.
*/
atomic_zu_t extent_sn_next;
/*
* Represents a dss_prec_t, but atomically.
*
* Synchronization: atomic.
*/
atomic_u_t dss_prec;
/*
* Number of pages in active extents.
*
* Synchronization: atomic.
*/
atomic_zu_t nactive;
/*
* Extant large allocations.
*
* Synchronization: large_mtx.
*/
extent_list_t large;
/* Synchronizes all large allocation/update/deallocation. */
malloc_mutex_t large_mtx;
/*
* Collections of extents that were previously allocated. These are
* used when allocating extents, in an attempt to re-use address space.
*
* Synchronization: internal.
*/
extents_t extents_dirty;
extents_t extents_muzzy;
extents_t extents_retained;
/*
* Decay-based purging state, responsible for scheduling extent state
* transitions.
*
* Synchronization: internal.
*/
arena_decay_t decay_dirty; /* dirty --> muzzy */
arena_decay_t decay_muzzy; /* muzzy --> retained */
/*
* Next extent size class in a growing series to use when satisfying a
* request via the extent hooks (only if opt_retain). This limits the
* number of disjoint virtual memory ranges so that extent merging can
* be effective even if multiple arenas' extent allocation requests are
* highly interleaved.
*
* retain_grow_limit is the max allowed size ind to expand (unless the
* required size is greater). Default is no limit, and controlled
* through mallctl only.
*
* Synchronization: extent_grow_mtx
*/
pszind_t extent_grow_next;
pszind_t retain_grow_limit;
malloc_mutex_t extent_grow_mtx;
/*
* Available extent structures that were allocated via
* base_alloc_extent().
*
* Synchronization: extent_avail_mtx.
*/
extent_tree_t extent_avail;
malloc_mutex_t extent_avail_mtx;
/*
* bins is used to store heaps of free regions.
*
* Synchronization: internal.
*/
bin_t bins[NBINS];
/*
* Base allocator, from which arena metadata are allocated.
*
* Synchronization: internal.
*/
base_t *base;
/* Used to determine uptime. Read-only after initialization. */
nstime_t create_time;
};
/* Used in conjunction with tsd for fast arena-related context lookup. */
struct arena_tdata_s {
ticker_t decay_ticker;
};
/* Used to pass rtree lookup context down the path. */
struct alloc_ctx_s {
szind_t szind;
bool slab;
};
#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H */
#ifndef JEMALLOC_INTERNAL_ARENA_TYPES_H
#define JEMALLOC_INTERNAL_ARENA_TYPES_H
/* Maximum number of regions in one slab. */
#define LG_SLAB_MAXREGS (LG_PAGE - LG_TINY_MIN)
#define SLAB_MAXREGS (1U << LG_SLAB_MAXREGS)
/* Default decay times in milliseconds. */
#define DIRTY_DECAY_MS_DEFAULT ZD(10 * 1000)
#define MUZZY_DECAY_MS_DEFAULT ZD(10 * 1000)
/* Number of event ticks between time checks. */
#define DECAY_NTICKS_PER_UPDATE 1000
typedef struct arena_slab_data_s arena_slab_data_t;
typedef struct arena_decay_s arena_decay_t;
typedef struct arena_s arena_t;
typedef struct arena_tdata_s arena_tdata_t;
typedef struct alloc_ctx_s alloc_ctx_t;
typedef enum {
percpu_arena_mode_names_base = 0, /* Used for options processing. */
/*
* *_uninit are used only during bootstrapping, and must correspond
* to initialized variant plus percpu_arena_mode_enabled_base.
*/
percpu_arena_uninit = 0,
per_phycpu_arena_uninit = 1,
/* All non-disabled modes must come after percpu_arena_disabled. */
percpu_arena_disabled = 2,
percpu_arena_mode_names_limit = 3, /* Used for options processing. */
percpu_arena_mode_enabled_base = 3,
percpu_arena = 3,
per_phycpu_arena = 4 /* Hyper threads share arena. */
} percpu_arena_mode_t;
#define PERCPU_ARENA_ENABLED(m) ((m) >= percpu_arena_mode_enabled_base)
#define PERCPU_ARENA_DEFAULT percpu_arena_disabled
#endif /* JEMALLOC_INTERNAL_ARENA_TYPES_H */
#include "jemalloc/internal/malloc_io.h"
#include "jemalloc/internal/util.h"
/*
* Define a custom assert() in order to reduce the chances of deadlock during
* assertion failure.
*/
#ifndef assert
#define assert(e) do { \
if (unlikely(config_debug && !(e))) { \
malloc_printf( \
"<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
__FILE__, __LINE__, #e); \
abort(); \
} \
} while (0)
#endif
#ifndef not_reached
#define not_reached() do { \
if (config_debug) { \
malloc_printf( \
"<jemalloc>: %s:%d: Unreachable code reached\n", \
__FILE__, __LINE__); \
abort(); \
} \
unreachable(); \
} while (0)
#endif
#ifndef not_implemented
#define not_implemented() do { \
if (config_debug) { \
malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
__FILE__, __LINE__); \
abort(); \
} \
} while (0)
#endif
#ifndef assert_not_implemented
#define assert_not_implemented(e) do { \
if (unlikely(config_debug && !(e))) { \
not_implemented(); \
} \
} while (0)
#endif
/* Use to assert a particular configuration, e.g., cassert(config_debug). */
#ifndef cassert
#define cassert(c) do { \
if (unlikely(!(c))) { \
not_reached(); \
} \
} while (0)
#endif
/******************************************************************************/ #ifndef JEMALLOC_INTERNAL_ATOMIC_H
#ifdef JEMALLOC_H_TYPES #define JEMALLOC_INTERNAL_ATOMIC_H
#endif /* JEMALLOC_H_TYPES */ #define ATOMIC_INLINE static inline
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS #if defined(JEMALLOC_GCC_ATOMIC_ATOMICS)
# include "jemalloc/internal/atomic_gcc_atomic.h"
#endif /* JEMALLOC_H_STRUCTS */ #elif defined(JEMALLOC_GCC_SYNC_ATOMICS)
/******************************************************************************/ # include "jemalloc/internal/atomic_gcc_sync.h"
#ifdef JEMALLOC_H_EXTERNS #elif defined(_MSC_VER)
# include "jemalloc/internal/atomic_msvc.h"
#define atomic_read_uint64(p) atomic_add_uint64(p, 0) #elif defined(JEMALLOC_C11_ATOMICS)
#define atomic_read_uint32(p) atomic_add_uint32(p, 0) # include "jemalloc/internal/atomic_c11.h"
#define atomic_read_p(p) atomic_add_p(p, NULL) #else
#define atomic_read_z(p) atomic_add_z(p, 0) # error "Don't have atomics implemented on this platform."
#define atomic_read_u(p) atomic_add_u(p, 0) #endif
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
/* /*
* All arithmetic functions return the arithmetic result of the atomic * This header gives more or less a backport of C11 atomics. The user can write
* operation. Some atomic operation APIs return the value prior to mutation, in * JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_sizeof_type); to generate
* which case the following functions must redundantly compute the result so * counterparts of the C11 atomic functions for type, as so:
* that it can be returned. These functions are normally inlined, so the extra * JEMALLOC_GENERATE_ATOMICS(int *, pi, 3);
* operations can be optimized away if the return values aren't used by the * and then write things like:
* callers. * int *some_ptr;
* atomic_pi_t atomic_ptr_to_int;
* atomic_store_pi(&atomic_ptr_to_int, some_ptr, ATOMIC_RELAXED);
* int *prev_value = atomic_exchange_pi(&ptr_to_int, NULL, ATOMIC_ACQ_REL);
* assert(some_ptr == prev_value);
* and expect things to work in the obvious way.
* *
* <t> atomic_read_<t>(<t> *p) { return (*p); } * Also included (with naming differences to avoid conflicts with the standard
* <t> atomic_add_<t>(<t> *p, <t> x) { return (*p + x); } * library):
* <t> atomic_sub_<t>(<t> *p, <t> x) { return (*p - x); } * atomic_fence(atomic_memory_order_t) (mimics C11's atomic_thread_fence).
* bool atomic_cas_<t>(<t> *p, <t> c, <t> s) * ATOMIC_INIT (mimics C11's ATOMIC_VAR_INIT).
* {
* if (*p != c)
* return (true);
* *p = s;
* return (false);
* }
* void atomic_write_<t>(<t> *p, <t> x) { *p = x; }
*/ */
#ifndef JEMALLOC_ENABLE_INLINE /*
uint64_t atomic_add_uint64(uint64_t *p, uint64_t x); * Pure convenience, so that we don't have to type "atomic_memory_order_"
uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x); * quite so often.
bool atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s);
void atomic_write_uint64(uint64_t *p, uint64_t x);
uint32_t atomic_add_uint32(uint32_t *p, uint32_t x);
uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x);
bool atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s);
void atomic_write_uint32(uint32_t *p, uint32_t x);
void *atomic_add_p(void **p, void *x);
void *atomic_sub_p(void **p, void *x);
bool atomic_cas_p(void **p, void *c, void *s);
void atomic_write_p(void **p, const void *x);
size_t atomic_add_z(size_t *p, size_t x);
size_t atomic_sub_z(size_t *p, size_t x);
bool atomic_cas_z(size_t *p, size_t c, size_t s);
void atomic_write_z(size_t *p, size_t x);
unsigned atomic_add_u(unsigned *p, unsigned x);
unsigned atomic_sub_u(unsigned *p, unsigned x);
bool atomic_cas_u(unsigned *p, unsigned c, unsigned s);
void atomic_write_u(unsigned *p, unsigned x);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_))
/******************************************************************************/
/* 64-bit operations. */
#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
# if (defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
uint64_t t = x;
asm volatile (
"lock; xaddq %0, %1;"
: "+r" (t), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
return (t + x);
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
uint64_t t;
x = (uint64_t)(-(int64_t)x);
t = x;
asm volatile (
"lock; xaddq %0, %1;"
: "+r" (t), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
return (t + x);
}
JEMALLOC_INLINE bool
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
{
uint8_t success;
asm volatile (
"lock; cmpxchgq %4, %0;"
"sete %1;"
: "=m" (*p), "=a" (success) /* Outputs. */
: "m" (*p), "a" (c), "r" (s) /* Inputs. */
: "memory" /* Clobbers. */
);
return (!(bool)success);
}
JEMALLOC_INLINE void
atomic_write_uint64(uint64_t *p, uint64_t x)
{
asm volatile (
"xchgq %1, %0;" /* Lock is implied by xchgq. */
: "=m" (*p), "+r" (x) /* Outputs. */
: "m" (*p) /* Inputs. */
: "memory" /* Clobbers. */
);
}
# elif (defined(JEMALLOC_C11ATOMICS))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
return (atomic_fetch_add(a, x) + x);
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
return (atomic_fetch_sub(a, x) - x);
}
JEMALLOC_INLINE bool
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
{
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
return (!atomic_compare_exchange_strong(a, &c, s));
}
JEMALLOC_INLINE void
atomic_write_uint64(uint64_t *p, uint64_t x)
{
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
atomic_store(a, x);
}
# elif (defined(JEMALLOC_ATOMIC9))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
/*
* atomic_fetchadd_64() doesn't exist, but we only ever use this
* function on LP64 systems, so atomic_fetchadd_long() will do.
*/ */
assert(sizeof(uint64_t) == sizeof(unsigned long)); #define ATOMIC_RELAXED atomic_memory_order_relaxed
#define ATOMIC_ACQUIRE atomic_memory_order_acquire
return (atomic_fetchadd_long(p, (unsigned long)x) + x); #define ATOMIC_RELEASE atomic_memory_order_release
} #define ATOMIC_ACQ_REL atomic_memory_order_acq_rel
#define ATOMIC_SEQ_CST atomic_memory_order_seq_cst
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
assert(sizeof(uint64_t) == sizeof(unsigned long));
return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x);
}
JEMALLOC_INLINE bool
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
{
assert(sizeof(uint64_t) == sizeof(unsigned long));
return (!atomic_cmpset_long(p, (unsigned long)c, (unsigned long)s));
}
JEMALLOC_INLINE void
atomic_write_uint64(uint64_t *p, uint64_t x)
{
assert(sizeof(uint64_t) == sizeof(unsigned long));
atomic_store_rel_long(p, x);
}
# elif (defined(JEMALLOC_OSATOMIC))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
return (OSAtomicAdd64((int64_t)x, (int64_t *)p));
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p));
}
JEMALLOC_INLINE bool
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
{
return (!OSAtomicCompareAndSwap64(c, s, (int64_t *)p));
}
JEMALLOC_INLINE void
atomic_write_uint64(uint64_t *p, uint64_t x)
{
uint64_t o;
/*The documented OSAtomic*() API does not expose an atomic exchange. */
do {
o = atomic_read_uint64(p);
} while (atomic_cas_uint64(p, o, x));
}
# elif (defined(_MSC_VER))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
return (InterlockedExchangeAdd64(p, x) + x);
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
return (InterlockedExchangeAdd64(p, -((int64_t)x)) - x);
}
JEMALLOC_INLINE bool
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
{
uint64_t o;
o = InterlockedCompareExchange64(p, s, c);
return (o != c);
}
JEMALLOC_INLINE void
atomic_write_uint64(uint64_t *p, uint64_t x)
{
InterlockedExchange64(p, x);
}
# elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) || \
defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
return (__sync_add_and_fetch(p, x));
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
return (__sync_sub_and_fetch(p, x));
}
JEMALLOC_INLINE bool
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
{
return (!__sync_bool_compare_and_swap(p, c, s));
}
JEMALLOC_INLINE void
atomic_write_uint64(uint64_t *p, uint64_t x)
{
__sync_lock_test_and_set(p, x);
}
# else
# error "Missing implementation for 64-bit atomic operations"
# endif
#endif
/******************************************************************************/
/* 32-bit operations. */
#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
uint32_t t = x;
asm volatile (
"lock; xaddl %0, %1;"
: "+r" (t), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
return (t + x);
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
uint32_t t;
x = (uint32_t)(-(int32_t)x);
t = x;
asm volatile (
"lock; xaddl %0, %1;"
: "+r" (t), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
return (t + x);
}
JEMALLOC_INLINE bool
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
{
uint8_t success;
asm volatile (
"lock; cmpxchgl %4, %0;"
"sete %1;"
: "=m" (*p), "=a" (success) /* Outputs. */
: "m" (*p), "a" (c), "r" (s) /* Inputs. */
: "memory"
);
return (!(bool)success);
}
JEMALLOC_INLINE void
atomic_write_uint32(uint32_t *p, uint32_t x)
{
asm volatile ( /*
"xchgl %1, %0;" /* Lock is implied by xchgl. */ * Not all platforms have 64-bit atomics. If we do, this #define exposes that
: "=m" (*p), "+r" (x) /* Outputs. */ * fact.
: "m" (*p) /* Inputs. */ */
: "memory" /* Clobbers. */ #if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
); # define JEMALLOC_ATOMIC_U64
}
# elif (defined(JEMALLOC_C11ATOMICS))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
return (atomic_fetch_add(a, x) + x);
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
return (atomic_fetch_sub(a, x) - x);
}
JEMALLOC_INLINE bool
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
{
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
return (!atomic_compare_exchange_strong(a, &c, s));
}
JEMALLOC_INLINE void
atomic_write_uint32(uint32_t *p, uint32_t x)
{
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
atomic_store(a, x);
}
#elif (defined(JEMALLOC_ATOMIC9))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
return (atomic_fetchadd_32(p, x) + x);
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x);
}
JEMALLOC_INLINE bool
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
{
return (!atomic_cmpset_32(p, c, s));
}
JEMALLOC_INLINE void
atomic_write_uint32(uint32_t *p, uint32_t x)
{
atomic_store_rel_32(p, x);
}
#elif (defined(JEMALLOC_OSATOMIC))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
return (OSAtomicAdd32((int32_t)x, (int32_t *)p));
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p));
}
JEMALLOC_INLINE bool
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
{
return (!OSAtomicCompareAndSwap32(c, s, (int32_t *)p));
}
JEMALLOC_INLINE void
atomic_write_uint32(uint32_t *p, uint32_t x)
{
uint32_t o;
/*The documented OSAtomic*() API does not expose an atomic exchange. */
do {
o = atomic_read_uint32(p);
} while (atomic_cas_uint32(p, o, x));
}
#elif (defined(_MSC_VER))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
return (InterlockedExchangeAdd(p, x) + x);
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
return (InterlockedExchangeAdd(p, -((int32_t)x)) - x);
}
JEMALLOC_INLINE bool
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
{
uint32_t o;
o = InterlockedCompareExchange(p, s, c);
return (o != c);
}
JEMALLOC_INLINE void
atomic_write_uint32(uint32_t *p, uint32_t x)
{
InterlockedExchange(p, x);
}
#elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || \
defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
return (__sync_add_and_fetch(p, x));
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
return (__sync_sub_and_fetch(p, x));
}
JEMALLOC_INLINE bool
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
{
return (!__sync_bool_compare_and_swap(p, c, s));
}
JEMALLOC_INLINE void
atomic_write_uint32(uint32_t *p, uint32_t x)
{
__sync_lock_test_and_set(p, x);
}
#else
# error "Missing implementation for 32-bit atomic operations"
#endif
/******************************************************************************/
/* Pointer operations. */
JEMALLOC_INLINE void *
atomic_add_p(void **p, void *x)
{
#if (LG_SIZEOF_PTR == 3)
return ((void *)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
#elif (LG_SIZEOF_PTR == 2)
return ((void *)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
#endif
}
JEMALLOC_INLINE void *
atomic_sub_p(void **p, void *x)
{
#if (LG_SIZEOF_PTR == 3)
return ((void *)atomic_add_uint64((uint64_t *)p,
(uint64_t)-((int64_t)x)));
#elif (LG_SIZEOF_PTR == 2)
return ((void *)atomic_add_uint32((uint32_t *)p,
(uint32_t)-((int32_t)x)));
#endif
}
JEMALLOC_INLINE bool
atomic_cas_p(void **p, void *c, void *s)
{
#if (LG_SIZEOF_PTR == 3)
return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
#elif (LG_SIZEOF_PTR == 2)
return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
#endif
}
JEMALLOC_INLINE void
atomic_write_p(void **p, const void *x)
{
#if (LG_SIZEOF_PTR == 3)
atomic_write_uint64((uint64_t *)p, (uint64_t)x);
#elif (LG_SIZEOF_PTR == 2)
atomic_write_uint32((uint32_t *)p, (uint32_t)x);
#endif
}
/******************************************************************************/
/* size_t operations. */
JEMALLOC_INLINE size_t
atomic_add_z(size_t *p, size_t x)
{
#if (LG_SIZEOF_PTR == 3)
return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
#elif (LG_SIZEOF_PTR == 2)
return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
#endif
}
JEMALLOC_INLINE size_t
atomic_sub_z(size_t *p, size_t x)
{
#if (LG_SIZEOF_PTR == 3)
return ((size_t)atomic_add_uint64((uint64_t *)p,
(uint64_t)-((int64_t)x)));
#elif (LG_SIZEOF_PTR == 2)
return ((size_t)atomic_add_uint32((uint32_t *)p,
(uint32_t)-((int32_t)x)));
#endif
}
JEMALLOC_INLINE bool
atomic_cas_z(size_t *p, size_t c, size_t s)
{
#if (LG_SIZEOF_PTR == 3)
return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
#elif (LG_SIZEOF_PTR == 2)
return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
#endif
}
JEMALLOC_INLINE void
atomic_write_z(size_t *p, size_t x)
{
#if (LG_SIZEOF_PTR == 3)
atomic_write_uint64((uint64_t *)p, (uint64_t)x);
#elif (LG_SIZEOF_PTR == 2)
atomic_write_uint32((uint32_t *)p, (uint32_t)x);
#endif #endif
}
/******************************************************************************/
/* unsigned operations. */
JEMALLOC_INLINE unsigned
atomic_add_u(unsigned *p, unsigned x)
{
#if (LG_SIZEOF_INT == 3) JEMALLOC_GENERATE_ATOMICS(void *, p, LG_SIZEOF_PTR)
return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
#elif (LG_SIZEOF_INT == 2)
return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
#endif
}
JEMALLOC_INLINE unsigned /*
atomic_sub_u(unsigned *p, unsigned x) * There's no actual guarantee that sizeof(bool) == 1, but it's true on the only
{ * platform that actually needs to know the size, MSVC.
*/
JEMALLOC_GENERATE_ATOMICS(bool, b, 0)
#if (LG_SIZEOF_INT == 3) JEMALLOC_GENERATE_INT_ATOMICS(unsigned, u, LG_SIZEOF_INT)
return ((unsigned)atomic_add_uint64((uint64_t *)p,
(uint64_t)-((int64_t)x)));
#elif (LG_SIZEOF_INT == 2)
return ((unsigned)atomic_add_uint32((uint32_t *)p,
(uint32_t)-((int32_t)x)));
#endif
}
JEMALLOC_INLINE bool JEMALLOC_GENERATE_INT_ATOMICS(size_t, zu, LG_SIZEOF_PTR)
atomic_cas_u(unsigned *p, unsigned c, unsigned s)
{
#if (LG_SIZEOF_INT == 3) JEMALLOC_GENERATE_INT_ATOMICS(ssize_t, zd, LG_SIZEOF_PTR)
return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
#elif (LG_SIZEOF_INT == 2)
return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
#endif
}
JEMALLOC_INLINE void JEMALLOC_GENERATE_INT_ATOMICS(uint32_t, u32, 2)
atomic_write_u(unsigned *p, unsigned x)
{
#if (LG_SIZEOF_INT == 3) #ifdef JEMALLOC_ATOMIC_U64
atomic_write_uint64((uint64_t *)p, (uint64_t)x); JEMALLOC_GENERATE_INT_ATOMICS(uint64_t, u64, 3)
#elif (LG_SIZEOF_INT == 2)
atomic_write_uint32((uint32_t *)p, (uint32_t)x);
#endif #endif
}
/******************************************************************************/ #undef ATOMIC_INLINE
#endif
#endif /* JEMALLOC_H_INLINES */ #endif /* JEMALLOC_INTERNAL_ATOMIC_H */
/******************************************************************************/
#ifndef JEMALLOC_INTERNAL_ATOMIC_C11_H
#define JEMALLOC_INTERNAL_ATOMIC_C11_H
#include <stdatomic.h>
#define ATOMIC_INIT(...) ATOMIC_VAR_INIT(__VA_ARGS__)
#define atomic_memory_order_t memory_order
#define atomic_memory_order_relaxed memory_order_relaxed
#define atomic_memory_order_acquire memory_order_acquire
#define atomic_memory_order_release memory_order_release
#define atomic_memory_order_acq_rel memory_order_acq_rel
#define atomic_memory_order_seq_cst memory_order_seq_cst
#define atomic_fence atomic_thread_fence
#define JEMALLOC_GENERATE_ATOMICS(type, short_type, \
/* unused */ lg_size) \
typedef _Atomic(type) atomic_##short_type##_t; \
\
ATOMIC_INLINE type \
atomic_load_##short_type(const atomic_##short_type##_t *a, \
atomic_memory_order_t mo) { \
/* \
* A strict interpretation of the C standard prevents \
* atomic_load from taking a const argument, but it's \
* convenient for our purposes. This cast is a workaround. \
*/ \
atomic_##short_type##_t* a_nonconst = \
(atomic_##short_type##_t*)a; \
return atomic_load_explicit(a_nonconst, mo); \
} \
\
ATOMIC_INLINE void \
atomic_store_##short_type(atomic_##short_type##_t *a, \
type val, atomic_memory_order_t mo) { \
atomic_store_explicit(a, val, mo); \
} \
\
ATOMIC_INLINE type \
atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
return atomic_exchange_explicit(a, val, mo); \
} \
\
ATOMIC_INLINE bool \
atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \
type *expected, type desired, atomic_memory_order_t success_mo, \
atomic_memory_order_t failure_mo) { \
return atomic_compare_exchange_weak_explicit(a, expected, \
desired, success_mo, failure_mo); \
} \
\
ATOMIC_INLINE bool \
atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \
type *expected, type desired, atomic_memory_order_t success_mo, \
atomic_memory_order_t failure_mo) { \
return atomic_compare_exchange_strong_explicit(a, expected, \
desired, success_mo, failure_mo); \
}
/*
* Integral types have some special operations available that non-integral ones
* lack.
*/
#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \
/* unused */ lg_size) \
JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \
\
ATOMIC_INLINE type \
atomic_fetch_add_##short_type(atomic_##short_type##_t *a, \
type val, atomic_memory_order_t mo) { \
return atomic_fetch_add_explicit(a, val, mo); \
} \
\
ATOMIC_INLINE type \
atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, \
type val, atomic_memory_order_t mo) { \
return atomic_fetch_sub_explicit(a, val, mo); \
} \
ATOMIC_INLINE type \
atomic_fetch_and_##short_type(atomic_##short_type##_t *a, \
type val, atomic_memory_order_t mo) { \
return atomic_fetch_and_explicit(a, val, mo); \
} \
ATOMIC_INLINE type \
atomic_fetch_or_##short_type(atomic_##short_type##_t *a, \
type val, atomic_memory_order_t mo) { \
return atomic_fetch_or_explicit(a, val, mo); \
} \
ATOMIC_INLINE type \
atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, \
type val, atomic_memory_order_t mo) { \
return atomic_fetch_xor_explicit(a, val, mo); \
}
#endif /* JEMALLOC_INTERNAL_ATOMIC_C11_H */
#ifndef JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H
#define JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H
#include "jemalloc/internal/assert.h"
#define ATOMIC_INIT(...) {__VA_ARGS__}
typedef enum {
atomic_memory_order_relaxed,
atomic_memory_order_acquire,
atomic_memory_order_release,
atomic_memory_order_acq_rel,
atomic_memory_order_seq_cst
} atomic_memory_order_t;
ATOMIC_INLINE int
atomic_enum_to_builtin(atomic_memory_order_t mo) {
switch (mo) {
case atomic_memory_order_relaxed:
return __ATOMIC_RELAXED;
case atomic_memory_order_acquire:
return __ATOMIC_ACQUIRE;
case atomic_memory_order_release:
return __ATOMIC_RELEASE;
case atomic_memory_order_acq_rel:
return __ATOMIC_ACQ_REL;
case atomic_memory_order_seq_cst:
return __ATOMIC_SEQ_CST;
}
/* Can't happen; the switch is exhaustive. */
not_reached();
}
ATOMIC_INLINE void
atomic_fence(atomic_memory_order_t mo) {
__atomic_thread_fence(atomic_enum_to_builtin(mo));
}
#define JEMALLOC_GENERATE_ATOMICS(type, short_type, \
/* unused */ lg_size) \
typedef struct { \
type repr; \
} atomic_##short_type##_t; \
\
ATOMIC_INLINE type \
atomic_load_##short_type(const atomic_##short_type##_t *a, \
atomic_memory_order_t mo) { \
type result; \
__atomic_load(&a->repr, &result, atomic_enum_to_builtin(mo)); \
return result; \
} \
\
ATOMIC_INLINE void \
atomic_store_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
__atomic_store(&a->repr, &val, atomic_enum_to_builtin(mo)); \
} \
\
ATOMIC_INLINE type \
atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
type result; \
__atomic_exchange(&a->repr, &val, &result, \
atomic_enum_to_builtin(mo)); \
return result; \
} \
\
ATOMIC_INLINE bool \
atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \
type *expected, type desired, atomic_memory_order_t success_mo, \
atomic_memory_order_t failure_mo) { \
return __atomic_compare_exchange(&a->repr, expected, &desired, \
true, atomic_enum_to_builtin(success_mo), \
atomic_enum_to_builtin(failure_mo)); \
} \
\
ATOMIC_INLINE bool \
atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \
type *expected, type desired, atomic_memory_order_t success_mo, \
atomic_memory_order_t failure_mo) { \
return __atomic_compare_exchange(&a->repr, expected, &desired, \
false, \
atomic_enum_to_builtin(success_mo), \
atomic_enum_to_builtin(failure_mo)); \
}
#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \
/* unused */ lg_size) \
JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \
\
ATOMIC_INLINE type \
atomic_fetch_add_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
return __atomic_fetch_add(&a->repr, val, \
atomic_enum_to_builtin(mo)); \
} \
\
ATOMIC_INLINE type \
atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
return __atomic_fetch_sub(&a->repr, val, \
atomic_enum_to_builtin(mo)); \
} \
\
ATOMIC_INLINE type \
atomic_fetch_and_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
return __atomic_fetch_and(&a->repr, val, \
atomic_enum_to_builtin(mo)); \
} \
\
ATOMIC_INLINE type \
atomic_fetch_or_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
return __atomic_fetch_or(&a->repr, val, \
atomic_enum_to_builtin(mo)); \
} \
\
ATOMIC_INLINE type \
atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
return __atomic_fetch_xor(&a->repr, val, \
atomic_enum_to_builtin(mo)); \
}
#endif /* JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H */
#ifndef JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H
#define JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H
#define ATOMIC_INIT(...) {__VA_ARGS__}
typedef enum {
atomic_memory_order_relaxed,
atomic_memory_order_acquire,
atomic_memory_order_release,
atomic_memory_order_acq_rel,
atomic_memory_order_seq_cst
} atomic_memory_order_t;
ATOMIC_INLINE void
atomic_fence(atomic_memory_order_t mo) {
/* Easy cases first: no barrier, and full barrier. */
if (mo == atomic_memory_order_relaxed) {
asm volatile("" ::: "memory");
return;
}
if (mo == atomic_memory_order_seq_cst) {
asm volatile("" ::: "memory");
__sync_synchronize();
asm volatile("" ::: "memory");
return;
}
asm volatile("" ::: "memory");
# if defined(__i386__) || defined(__x86_64__)
/* This is implicit on x86. */
# elif defined(__ppc__)
asm volatile("lwsync");
# elif defined(__sparc__) && defined(__arch64__)
if (mo == atomic_memory_order_acquire) {
asm volatile("membar #LoadLoad | #LoadStore");
} else if (mo == atomic_memory_order_release) {
asm volatile("membar #LoadStore | #StoreStore");
} else {
asm volatile("membar #LoadLoad | #LoadStore | #StoreStore");
}
# else
__sync_synchronize();
# endif
asm volatile("" ::: "memory");
}
/*
* A correct implementation of seq_cst loads and stores on weakly ordered
* architectures could do either of the following:
* 1. store() is weak-fence -> store -> strong fence, load() is load ->
* strong-fence.
* 2. store() is strong-fence -> store, load() is strong-fence -> load ->
* weak-fence.
* The tricky thing is, load() and store() above can be the load or store
* portions of a gcc __sync builtin, so we have to follow GCC's lead, which
* means going with strategy 2.
* On strongly ordered architectures, the natural strategy is to stick a strong
* fence after seq_cst stores, and have naked loads. So we want the strong
* fences in different places on different architectures.
* atomic_pre_sc_load_fence and atomic_post_sc_store_fence allow us to
* accomplish this.
*/
ATOMIC_INLINE void
atomic_pre_sc_load_fence() {
# if defined(__i386__) || defined(__x86_64__) || \
(defined(__sparc__) && defined(__arch64__))
atomic_fence(atomic_memory_order_relaxed);
# else
atomic_fence(atomic_memory_order_seq_cst);
# endif
}
ATOMIC_INLINE void
atomic_post_sc_store_fence() {
# if defined(__i386__) || defined(__x86_64__) || \
(defined(__sparc__) && defined(__arch64__))
atomic_fence(atomic_memory_order_seq_cst);
# else
atomic_fence(atomic_memory_order_relaxed);
# endif
}
#define JEMALLOC_GENERATE_ATOMICS(type, short_type, \
/* unused */ lg_size) \
typedef struct { \
type volatile repr; \
} atomic_##short_type##_t; \
\
ATOMIC_INLINE type \
atomic_load_##short_type(const atomic_##short_type##_t *a, \
atomic_memory_order_t mo) { \
if (mo == atomic_memory_order_seq_cst) { \
atomic_pre_sc_load_fence(); \
} \
type result = a->repr; \
if (mo != atomic_memory_order_relaxed) { \
atomic_fence(atomic_memory_order_acquire); \
} \
return result; \
} \
\
ATOMIC_INLINE void \
atomic_store_##short_type(atomic_##short_type##_t *a, \
type val, atomic_memory_order_t mo) { \
if (mo != atomic_memory_order_relaxed) { \
atomic_fence(atomic_memory_order_release); \
} \
a->repr = val; \
if (mo == atomic_memory_order_seq_cst) { \
atomic_post_sc_store_fence(); \
} \
} \
\
ATOMIC_INLINE type \
atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
/* \
* Because of FreeBSD, we care about gcc 4.2, which doesn't have\
* an atomic exchange builtin. We fake it with a CAS loop. \
*/ \
while (true) { \
type old = a->repr; \
if (__sync_bool_compare_and_swap(&a->repr, old, val)) { \
return old; \
} \
} \
} \
\
ATOMIC_INLINE bool \
atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \
type *expected, type desired, atomic_memory_order_t success_mo, \
atomic_memory_order_t failure_mo) { \
type prev = __sync_val_compare_and_swap(&a->repr, *expected, \
desired); \
if (prev == *expected) { \
return true; \
} else { \
*expected = prev; \
return false; \
} \
} \
ATOMIC_INLINE bool \
atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \
type *expected, type desired, atomic_memory_order_t success_mo, \
atomic_memory_order_t failure_mo) { \
type prev = __sync_val_compare_and_swap(&a->repr, *expected, \
desired); \
if (prev == *expected) { \
return true; \
} else { \
*expected = prev; \
return false; \
} \
}
#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \
/* unused */ lg_size) \
JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \
\
ATOMIC_INLINE type \
atomic_fetch_add_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
return __sync_fetch_and_add(&a->repr, val); \
} \
\
ATOMIC_INLINE type \
atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
return __sync_fetch_and_sub(&a->repr, val); \
} \
\
ATOMIC_INLINE type \
atomic_fetch_and_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
return __sync_fetch_and_and(&a->repr, val); \
} \
\
ATOMIC_INLINE type \
atomic_fetch_or_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
return __sync_fetch_and_or(&a->repr, val); \
} \
\
ATOMIC_INLINE type \
atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
return __sync_fetch_and_xor(&a->repr, val); \
}
#endif /* JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H */
#ifndef JEMALLOC_INTERNAL_ATOMIC_MSVC_H
#define JEMALLOC_INTERNAL_ATOMIC_MSVC_H
#define ATOMIC_INIT(...) {__VA_ARGS__}
typedef enum {
atomic_memory_order_relaxed,
atomic_memory_order_acquire,
atomic_memory_order_release,
atomic_memory_order_acq_rel,
atomic_memory_order_seq_cst
} atomic_memory_order_t;
typedef char atomic_repr_0_t;
typedef short atomic_repr_1_t;
typedef long atomic_repr_2_t;
typedef __int64 atomic_repr_3_t;
ATOMIC_INLINE void
atomic_fence(atomic_memory_order_t mo) {
_ReadWriteBarrier();
# if defined(_M_ARM) || defined(_M_ARM64)
/* ARM needs a barrier for everything but relaxed. */
if (mo != atomic_memory_order_relaxed) {
MemoryBarrier();
}
# elif defined(_M_IX86) || defined (_M_X64)
/* x86 needs a barrier only for seq_cst. */
if (mo == atomic_memory_order_seq_cst) {
MemoryBarrier();
}
# else
# error "Don't know how to create atomics for this platform for MSVC."
# endif
_ReadWriteBarrier();
}
#define ATOMIC_INTERLOCKED_REPR(lg_size) atomic_repr_ ## lg_size ## _t
#define ATOMIC_CONCAT(a, b) ATOMIC_RAW_CONCAT(a, b)
#define ATOMIC_RAW_CONCAT(a, b) a ## b
#define ATOMIC_INTERLOCKED_NAME(base_name, lg_size) ATOMIC_CONCAT( \
base_name, ATOMIC_INTERLOCKED_SUFFIX(lg_size))
#define ATOMIC_INTERLOCKED_SUFFIX(lg_size) \
ATOMIC_CONCAT(ATOMIC_INTERLOCKED_SUFFIX_, lg_size)
#define ATOMIC_INTERLOCKED_SUFFIX_0 8
#define ATOMIC_INTERLOCKED_SUFFIX_1 16
#define ATOMIC_INTERLOCKED_SUFFIX_2
#define ATOMIC_INTERLOCKED_SUFFIX_3 64
#define JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_size) \
typedef struct { \
ATOMIC_INTERLOCKED_REPR(lg_size) repr; \
} atomic_##short_type##_t; \
\
ATOMIC_INLINE type \
atomic_load_##short_type(const atomic_##short_type##_t *a, \
atomic_memory_order_t mo) { \
ATOMIC_INTERLOCKED_REPR(lg_size) ret = a->repr; \
if (mo != atomic_memory_order_relaxed) { \
atomic_fence(atomic_memory_order_acquire); \
} \
return (type) ret; \
} \
\
ATOMIC_INLINE void \
atomic_store_##short_type(atomic_##short_type##_t *a, \
type val, atomic_memory_order_t mo) { \
if (mo != atomic_memory_order_relaxed) { \
atomic_fence(atomic_memory_order_release); \
} \
a->repr = (ATOMIC_INTERLOCKED_REPR(lg_size)) val; \
if (mo == atomic_memory_order_seq_cst) { \
atomic_fence(atomic_memory_order_seq_cst); \
} \
} \
\
ATOMIC_INLINE type \
atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedExchange, \
lg_size)(&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
} \
\
ATOMIC_INLINE bool \
atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \
type *expected, type desired, atomic_memory_order_t success_mo, \
atomic_memory_order_t failure_mo) { \
ATOMIC_INTERLOCKED_REPR(lg_size) e = \
(ATOMIC_INTERLOCKED_REPR(lg_size))*expected; \
ATOMIC_INTERLOCKED_REPR(lg_size) d = \
(ATOMIC_INTERLOCKED_REPR(lg_size))desired; \
ATOMIC_INTERLOCKED_REPR(lg_size) old = \
ATOMIC_INTERLOCKED_NAME(_InterlockedCompareExchange, \
lg_size)(&a->repr, d, e); \
if (old == e) { \
return true; \
} else { \
*expected = (type)old; \
return false; \
} \
} \
\
ATOMIC_INLINE bool \
atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \
type *expected, type desired, atomic_memory_order_t success_mo, \
atomic_memory_order_t failure_mo) { \
/* We implement the weak version with strong semantics. */ \
return atomic_compare_exchange_weak_##short_type(a, expected, \
desired, success_mo, failure_mo); \
}
#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, lg_size) \
JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_size) \
\
ATOMIC_INLINE type \
atomic_fetch_add_##short_type(atomic_##short_type##_t *a, \
type val, atomic_memory_order_t mo) { \
return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedExchangeAdd, \
lg_size)(&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
} \
\
ATOMIC_INLINE type \
atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, \
type val, atomic_memory_order_t mo) { \
/* \
* MSVC warns on negation of unsigned operands, but for us it \
* gives exactly the right semantics (MAX_TYPE + 1 - operand). \
*/ \
__pragma(warning(push)) \
__pragma(warning(disable: 4146)) \
return atomic_fetch_add_##short_type(a, -val, mo); \
__pragma(warning(pop)) \
} \
ATOMIC_INLINE type \
atomic_fetch_and_##short_type(atomic_##short_type##_t *a, \
type val, atomic_memory_order_t mo) { \
return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedAnd, lg_size)( \
&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
} \
ATOMIC_INLINE type \
atomic_fetch_or_##short_type(atomic_##short_type##_t *a, \
type val, atomic_memory_order_t mo) { \
return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedOr, lg_size)( \
&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
} \
ATOMIC_INLINE type \
atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, \
type val, atomic_memory_order_t mo) { \
return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedXor, lg_size)( \
&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
}
#endif /* JEMALLOC_INTERNAL_ATOMIC_MSVC_H */
#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H
#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H
extern bool opt_background_thread;
extern size_t opt_max_background_threads;
extern malloc_mutex_t background_thread_lock;
extern atomic_b_t background_thread_enabled_state;
extern size_t n_background_threads;
extern size_t max_background_threads;
extern background_thread_info_t *background_thread_info;
extern bool can_enable_background_thread;
bool background_thread_create(tsd_t *tsd, unsigned arena_ind);
bool background_threads_enable(tsd_t *tsd);
bool background_threads_disable(tsd_t *tsd);
void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
arena_decay_t *decay, size_t npages_new);
void background_thread_prefork0(tsdn_t *tsdn);
void background_thread_prefork1(tsdn_t *tsdn);
void background_thread_postfork_parent(tsdn_t *tsdn);
void background_thread_postfork_child(tsdn_t *tsdn);
bool background_thread_stats_read(tsdn_t *tsdn,
background_thread_stats_t *stats);
void background_thread_ctl_init(tsdn_t *tsdn);
#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
extern int pthread_create_wrapper(pthread_t *__restrict, const pthread_attr_t *,
void *(*)(void *), void *__restrict);
#endif
bool background_thread_boot0(void);
bool background_thread_boot1(tsdn_t *tsdn);
#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H */
#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H
#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H
JEMALLOC_ALWAYS_INLINE bool
background_thread_enabled(void) {
return atomic_load_b(&background_thread_enabled_state, ATOMIC_RELAXED);
}
JEMALLOC_ALWAYS_INLINE void
background_thread_enabled_set(tsdn_t *tsdn, bool state) {
malloc_mutex_assert_owner(tsdn, &background_thread_lock);
atomic_store_b(&background_thread_enabled_state, state, ATOMIC_RELAXED);
}
JEMALLOC_ALWAYS_INLINE background_thread_info_t *
arena_background_thread_info_get(arena_t *arena) {
unsigned arena_ind = arena_ind_get(arena);
return &background_thread_info[arena_ind % ncpus];
}
JEMALLOC_ALWAYS_INLINE uint64_t
background_thread_wakeup_time_get(background_thread_info_t *info) {
uint64_t next_wakeup = nstime_ns(&info->next_wakeup);
assert(atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE) ==
(next_wakeup == BACKGROUND_THREAD_INDEFINITE_SLEEP));
return next_wakeup;
}
JEMALLOC_ALWAYS_INLINE void
background_thread_wakeup_time_set(tsdn_t *tsdn, background_thread_info_t *info,
uint64_t wakeup_time) {
malloc_mutex_assert_owner(tsdn, &info->mtx);
atomic_store_b(&info->indefinite_sleep,
wakeup_time == BACKGROUND_THREAD_INDEFINITE_SLEEP, ATOMIC_RELEASE);
nstime_init(&info->next_wakeup, wakeup_time);
}
JEMALLOC_ALWAYS_INLINE bool
background_thread_indefinite_sleep(background_thread_info_t *info) {
return atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE);
}
JEMALLOC_ALWAYS_INLINE void
arena_background_thread_inactivity_check(tsdn_t *tsdn, arena_t *arena,
bool is_background_thread) {
if (!background_thread_enabled() || is_background_thread) {
return;
}
background_thread_info_t *info =
arena_background_thread_info_get(arena);
if (background_thread_indefinite_sleep(info)) {
background_thread_interval_check(tsdn, arena,
&arena->decay_dirty, 0);
}
}
#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H */
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment