Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
994ed2bc
Commit
994ed2bc
authored
Jun 25, 2011
by
antirez
Browse files
unstable merge conflicts resolved
parents
45ec3243
d5b36c51
Changes
110
Hide whitespace changes
Inline
Side-by-side
deps/jemalloc/doc/stylesheet.xsl
0 → 100644
View file @
994ed2bc
<xsl:stylesheet
xmlns:xsl=
"http://www.w3.org/1999/XSL/Transform"
version=
"1.0"
>
<xsl:param
name=
"funcsynopsis.style"
>
ansi
</xsl:param>
<xsl:param
name=
"function.parens"
select=
"1"
/>
<xsl:template
match=
"mallctl"
>
"
<xsl:call-template
name=
"inline.monoseq"
/>
"
</xsl:template>
</xsl:stylesheet>
deps/jemalloc/include/jemalloc/internal/arena.h
0 → 100644
View file @
994ed2bc
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/*
* Subpages are an artificially designated partitioning of pages. Their only
* purpose is to support subpage-spaced size classes.
*
* There must be at least 4 subpages per page, due to the way size classes are
* handled.
*/
#define LG_SUBPAGE 8
#define SUBPAGE ((size_t)(1U << LG_SUBPAGE))
#define SUBPAGE_MASK (SUBPAGE - 1)
/* Return the smallest subpage multiple that is >= s. */
#define SUBPAGE_CEILING(s) \
(((s) + SUBPAGE_MASK) & ~SUBPAGE_MASK)
#ifdef JEMALLOC_TINY
/* Smallest size class to support. */
# define LG_TINY_MIN LG_SIZEOF_PTR
# define TINY_MIN (1U << LG_TINY_MIN)
#endif
/*
* Maximum size class that is a multiple of the quantum, but not (necessarily)
* a power of 2. Above this size, allocations are rounded up to the nearest
* power of 2.
*/
#define LG_QSPACE_MAX_DEFAULT 7
/*
* Maximum size class that is a multiple of the cacheline, but not (necessarily)
* a power of 2. Above this size, allocations are rounded up to the nearest
* power of 2.
*/
#define LG_CSPACE_MAX_DEFAULT 9
/*
* RUN_MAX_OVRHD indicates maximum desired run header overhead. Runs are sized
* as small as possible such that this setting is still honored, without
* violating other constraints. The goal is to make runs as small as possible
* without exceeding a per run external fragmentation threshold.
*
* We use binary fixed point math for overhead computations, where the binary
* point is implicitly RUN_BFP bits to the left.
*
* Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be
* honored for some/all object sizes, since when heap profiling is enabled
* there is one pointer of header overhead per object (plus a constant). This
* constraint is relaxed (ignored) for runs that are so small that the
* per-region overhead is greater than:
*
* (RUN_MAX_OVRHD / (reg_size << (3+RUN_BFP))
*/
#define RUN_BFP 12
/* \/ Implicit binary fixed point. */
#define RUN_MAX_OVRHD 0x0000003dU
#define RUN_MAX_OVRHD_RELAX 0x00001800U
/* Maximum number of regions in one run. */
#define LG_RUN_MAXREGS 11
#define RUN_MAXREGS (1U << LG_RUN_MAXREGS)
/*
* The minimum ratio of active:dirty pages per arena is computed as:
*
* (nactive >> opt_lg_dirty_mult) >= ndirty
*
* So, supposing that opt_lg_dirty_mult is 5, there can be no less than 32
* times as many active pages as dirty pages.
*/
#define LG_DIRTY_MULT_DEFAULT 5
typedef
struct
arena_chunk_map_s
arena_chunk_map_t
;
typedef
struct
arena_chunk_s
arena_chunk_t
;
typedef
struct
arena_run_s
arena_run_t
;
typedef
struct
arena_bin_info_s
arena_bin_info_t
;
typedef
struct
arena_bin_s
arena_bin_t
;
typedef
struct
arena_s
arena_t
;
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
/* Each element of the chunk map corresponds to one page within the chunk. */
struct
arena_chunk_map_s
{
union
{
/*
* Linkage for run trees. There are two disjoint uses:
*
* 1) arena_t's runs_avail_{clean,dirty} trees.
* 2) arena_run_t conceptually uses this linkage for in-use
* non-full runs, rather than directly embedding linkage.
*/
rb_node
(
arena_chunk_map_t
)
rb_link
;
/*
* List of runs currently in purgatory. arena_chunk_purge()
* temporarily allocates runs that contain dirty pages while
* purging, so that other threads cannot use the runs while the
* purging thread is operating without the arena lock held.
*/
ql_elm
(
arena_chunk_map_t
)
ql_link
;
}
u
;
#ifdef JEMALLOC_PROF
/* Profile counters, used for large object runs. */
prof_ctx_t
*
prof_ctx
;
#endif
/*
* Run address (or size) and various flags are stored together. The bit
* layout looks like (assuming 32-bit system):
*
* ???????? ???????? ????---- ----dula
*
* ? : Unallocated: Run address for first/last pages, unset for internal
* pages.
* Small: Run page offset.
* Large: Run size for first page, unset for trailing pages.
* - : Unused.
* d : dirty?
* u : unzeroed?
* l : large?
* a : allocated?
*
* Following are example bit patterns for the three types of runs.
*
* p : run page offset
* s : run size
* c : (binind+1) for size class (used only if prof_promote is true)
* x : don't care
* - : 0
* + : 1
* [DULA] : bit set
* [dula] : bit unset
*
* Unallocated (clean):
* ssssssss ssssssss ssss---- ----du-a
* xxxxxxxx xxxxxxxx xxxx---- -----Uxx
* ssssssss ssssssss ssss---- ----dU-a
*
* Unallocated (dirty):
* ssssssss ssssssss ssss---- ----D--a
* xxxxxxxx xxxxxxxx xxxx---- ----xxxx
* ssssssss ssssssss ssss---- ----D--a
*
* Small:
* pppppppp pppppppp pppp---- ----d--A
* pppppppp pppppppp pppp---- -------A
* pppppppp pppppppp pppp---- ----d--A
*
* Large:
* ssssssss ssssssss ssss---- ----D-LA
* xxxxxxxx xxxxxxxx xxxx---- ----xxxx
* -------- -------- -------- ----D-LA
*
* Large (sampled, size <= PAGE_SIZE):
* ssssssss ssssssss sssscccc ccccD-LA
*
* Large (not sampled, size == PAGE_SIZE):
* ssssssss ssssssss ssss---- ----D-LA
*/
size_t
bits
;
#ifdef JEMALLOC_PROF
#define CHUNK_MAP_CLASS_SHIFT 4
#define CHUNK_MAP_CLASS_MASK ((size_t)0xff0U)
#endif
#define CHUNK_MAP_FLAGS_MASK ((size_t)0xfU)
#define CHUNK_MAP_DIRTY ((size_t)0x8U)
#define CHUNK_MAP_UNZEROED ((size_t)0x4U)
#define CHUNK_MAP_LARGE ((size_t)0x2U)
#define CHUNK_MAP_ALLOCATED ((size_t)0x1U)
#define CHUNK_MAP_KEY CHUNK_MAP_ALLOCATED
};
typedef
rb_tree
(
arena_chunk_map_t
)
arena_avail_tree_t
;
typedef
rb_tree
(
arena_chunk_map_t
)
arena_run_tree_t
;
/* Arena chunk header. */
struct
arena_chunk_s
{
/* Arena that owns the chunk. */
arena_t
*
arena
;
/* Linkage for the arena's chunks_dirty list. */
ql_elm
(
arena_chunk_t
)
link_dirty
;
/*
* True if the chunk is currently in the chunks_dirty list, due to
* having at some point contained one or more dirty pages. Removal
* from chunks_dirty is lazy, so (dirtied && ndirty == 0) is possible.
*/
bool
dirtied
;
/* Number of dirty pages. */
size_t
ndirty
;
/*
* Map of pages within chunk that keeps track of free/large/small. The
* first map_bias entries are omitted, since the chunk header does not
* need to be tracked in the map. This omission saves a header page
* for common chunk sizes (e.g. 4 MiB).
*/
arena_chunk_map_t
map
[
1
];
/* Dynamically sized. */
};
typedef
rb_tree
(
arena_chunk_t
)
arena_chunk_tree_t
;
struct
arena_run_s
{
#ifdef JEMALLOC_DEBUG
uint32_t
magic
;
# define ARENA_RUN_MAGIC 0x384adf93
#endif
/* Bin this run is associated with. */
arena_bin_t
*
bin
;
/* Index of next region that has never been allocated, or nregs. */
uint32_t
nextind
;
/* Number of free regions in run. */
unsigned
nfree
;
};
/*
* Read-only information associated with each element of arena_t's bins array
* is stored separately, partly to reduce memory usage (only one copy, rather
* than one per arena), but mainly to avoid false cacheline sharing.
*/
struct
arena_bin_info_s
{
/* Size of regions in a run for this bin's size class. */
size_t
reg_size
;
/* Total size of a run for this bin's size class. */
size_t
run_size
;
/* Total number of regions in a run for this bin's size class. */
uint32_t
nregs
;
/*
* Offset of first bitmap_t element in a run header for this bin's size
* class.
*/
uint32_t
bitmap_offset
;
/*
* Metadata used to manipulate bitmaps for runs associated with this
* bin.
*/
bitmap_info_t
bitmap_info
;
#ifdef JEMALLOC_PROF
/*
* Offset of first (prof_ctx_t *) in a run header for this bin's size
* class, or 0 if (opt_prof == false).
*/
uint32_t
ctx0_offset
;
#endif
/* Offset of first region in a run for this bin's size class. */
uint32_t
reg0_offset
;
};
struct
arena_bin_s
{
/*
* All operations on runcur, runs, and stats require that lock be
* locked. Run allocation/deallocation are protected by the arena lock,
* which may be acquired while holding one or more bin locks, but not
* vise versa.
*/
malloc_mutex_t
lock
;
/*
* Current run being used to service allocations of this bin's size
* class.
*/
arena_run_t
*
runcur
;
/*
* Tree of non-full runs. This tree is used when looking for an
* existing run when runcur is no longer usable. We choose the
* non-full run that is lowest in memory; this policy tends to keep
* objects packed well, and it can also help reduce the number of
* almost-empty chunks.
*/
arena_run_tree_t
runs
;
#ifdef JEMALLOC_STATS
/* Bin statistics. */
malloc_bin_stats_t
stats
;
#endif
};
struct
arena_s
{
#ifdef JEMALLOC_DEBUG
uint32_t
magic
;
# define ARENA_MAGIC 0x947d3d24
#endif
/* This arena's index within the arenas array. */
unsigned
ind
;
/*
* Number of threads currently assigned to this arena. This field is
* protected by arenas_lock.
*/
unsigned
nthreads
;
/*
* There are three classes of arena operations from a locking
* perspective:
* 1) Thread asssignment (modifies nthreads) is protected by
* arenas_lock.
* 2) Bin-related operations are protected by bin locks.
* 3) Chunk- and run-related operations are protected by this mutex.
*/
malloc_mutex_t
lock
;
#ifdef JEMALLOC_STATS
arena_stats_t
stats
;
# ifdef JEMALLOC_TCACHE
/*
* List of tcaches for extant threads associated with this arena.
* Stats from these are merged incrementally, and at exit.
*/
ql_head
(
tcache_t
)
tcache_ql
;
# endif
#endif
#ifdef JEMALLOC_PROF
uint64_t
prof_accumbytes
;
#endif
/* List of dirty-page-containing chunks this arena manages. */
ql_head
(
arena_chunk_t
)
chunks_dirty
;
/*
* In order to avoid rapid chunk allocation/deallocation when an arena
* oscillates right on the cusp of needing a new chunk, cache the most
* recently freed chunk. The spare is left in the arena's chunk trees
* until it is deleted.
*
* There is one spare chunk per arena, rather than one spare total, in
* order to avoid interactions between multiple threads that could make
* a single spare inadequate.
*/
arena_chunk_t
*
spare
;
/* Number of pages in active runs. */
size_t
nactive
;
/*
* Current count of pages within unused runs that are potentially
* dirty, and for which madvise(... MADV_DONTNEED) has not been called.
* By tracking this, we can institute a limit on how much dirty unused
* memory is mapped for each arena.
*/
size_t
ndirty
;
/*
* Approximate number of pages being purged. It is possible for
* multiple threads to purge dirty pages concurrently, and they use
* npurgatory to indicate the total number of pages all threads are
* attempting to purge.
*/
size_t
npurgatory
;
/*
* Size/address-ordered trees of this arena's available runs. The trees
* are used for first-best-fit run allocation. The dirty tree contains
* runs with dirty pages (i.e. very likely to have been touched and
* therefore have associated physical pages), whereas the clean tree
* contains runs with pages that either have no associated physical
* pages, or have pages that the kernel may recycle at any time due to
* previous madvise(2) calls. The dirty tree is used in preference to
* the clean tree for allocations, because using dirty pages reduces
* the amount of dirty purging necessary to keep the active:dirty page
* ratio below the purge threshold.
*/
arena_avail_tree_t
runs_avail_clean
;
arena_avail_tree_t
runs_avail_dirty
;
/*
* bins is used to store trees of free regions of the following sizes,
* assuming a 64-bit system with 16-byte quantum, 4 KiB page size, and
* default MALLOC_CONF.
*
* bins[i] | size |
* --------+--------+
* 0 | 8 |
* --------+--------+
* 1 | 16 |
* 2 | 32 |
* 3 | 48 |
* : :
* 6 | 96 |
* 7 | 112 |
* 8 | 128 |
* --------+--------+
* 9 | 192 |
* 10 | 256 |
* 11 | 320 |
* 12 | 384 |
* 13 | 448 |
* 14 | 512 |
* --------+--------+
* 15 | 768 |
* 16 | 1024 |
* 17 | 1280 |
* : :
* 25 | 3328 |
* 26 | 3584 |
* 27 | 3840 |
* --------+--------+
*/
arena_bin_t
bins
[
1
];
/* Dynamically sized. */
};
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern
size_t
opt_lg_qspace_max
;
extern
size_t
opt_lg_cspace_max
;
extern
ssize_t
opt_lg_dirty_mult
;
/*
* small_size2bin is a compact lookup table that rounds request sizes up to
* size classes. In order to reduce cache footprint, the table is compressed,
* and all accesses are via the SMALL_SIZE2BIN macro.
*/
extern
uint8_t
const
*
small_size2bin
;
#define SMALL_SIZE2BIN(s) (small_size2bin[(s-1) >> LG_TINY_MIN])
extern
arena_bin_info_t
*
arena_bin_info
;
/* Various bin-related settings. */
#ifdef JEMALLOC_TINY
/* Number of (2^n)-spaced tiny bins. */
# define ntbins ((unsigned)(LG_QUANTUM - LG_TINY_MIN))
#else
# define ntbins 0
#endif
extern
unsigned
nqbins
;
/* Number of quantum-spaced bins. */
extern
unsigned
ncbins
;
/* Number of cacheline-spaced bins. */
extern
unsigned
nsbins
;
/* Number of subpage-spaced bins. */
extern
unsigned
nbins
;
#ifdef JEMALLOC_TINY
# define tspace_max ((size_t)(QUANTUM >> 1))
#endif
#define qspace_min QUANTUM
extern
size_t
qspace_max
;
extern
size_t
cspace_min
;
extern
size_t
cspace_max
;
extern
size_t
sspace_min
;
extern
size_t
sspace_max
;
#define small_maxclass sspace_max
#define nlclasses (chunk_npages - map_bias)
void
arena_purge_all
(
arena_t
*
arena
);
#ifdef JEMALLOC_PROF
void
arena_prof_accum
(
arena_t
*
arena
,
uint64_t
accumbytes
);
#endif
#ifdef JEMALLOC_TCACHE
void
arena_tcache_fill_small
(
arena_t
*
arena
,
tcache_bin_t
*
tbin
,
size_t
binind
# ifdef JEMALLOC_PROF
,
uint64_t
prof_accumbytes
# endif
);
#endif
void
*
arena_malloc_small
(
arena_t
*
arena
,
size_t
size
,
bool
zero
);
void
*
arena_malloc_large
(
arena_t
*
arena
,
size_t
size
,
bool
zero
);
void
*
arena_malloc
(
size_t
size
,
bool
zero
);
void
*
arena_palloc
(
arena_t
*
arena
,
size_t
size
,
size_t
alloc_size
,
size_t
alignment
,
bool
zero
);
size_t
arena_salloc
(
const
void
*
ptr
);
#ifdef JEMALLOC_PROF
void
arena_prof_promoted
(
const
void
*
ptr
,
size_t
size
);
size_t
arena_salloc_demote
(
const
void
*
ptr
);
#endif
void
arena_dalloc_bin
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
void
*
ptr
,
arena_chunk_map_t
*
mapelm
);
void
arena_dalloc_large
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
void
*
ptr
);
#ifdef JEMALLOC_STATS
void
arena_stats_merge
(
arena_t
*
arena
,
size_t
*
nactive
,
size_t
*
ndirty
,
arena_stats_t
*
astats
,
malloc_bin_stats_t
*
bstats
,
malloc_large_stats_t
*
lstats
);
#endif
void
*
arena_ralloc_no_move
(
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
extra
,
bool
zero
);
void
*
arena_ralloc
(
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
extra
,
size_t
alignment
,
bool
zero
);
bool
arena_new
(
arena_t
*
arena
,
unsigned
ind
);
bool
arena_boot
(
void
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
size_t
arena_bin_index
(
arena_t
*
arena
,
arena_bin_t
*
bin
);
unsigned
arena_run_regind
(
arena_run_t
*
run
,
arena_bin_info_t
*
bin_info
,
const
void
*
ptr
);
# ifdef JEMALLOC_PROF
prof_ctx_t
*
arena_prof_ctx_get
(
const
void
*
ptr
);
void
arena_prof_ctx_set
(
const
void
*
ptr
,
prof_ctx_t
*
ctx
);
# endif
void
arena_dalloc
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
void
*
ptr
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
JEMALLOC_INLINE
size_t
arena_bin_index
(
arena_t
*
arena
,
arena_bin_t
*
bin
)
{
size_t
binind
=
bin
-
arena
->
bins
;
assert
(
binind
<
nbins
);
return
(
binind
);
}
JEMALLOC_INLINE
unsigned
arena_run_regind
(
arena_run_t
*
run
,
arena_bin_info_t
*
bin_info
,
const
void
*
ptr
)
{
unsigned
shift
,
diff
,
regind
;
size_t
size
;
dassert
(
run
->
magic
==
ARENA_RUN_MAGIC
);
/*
* Freeing a pointer lower than region zero can cause assertion
* failure.
*/
assert
((
uintptr_t
)
ptr
>=
(
uintptr_t
)
run
+
(
uintptr_t
)
bin_info
->
reg0_offset
);
/*
* Avoid doing division with a variable divisor if possible. Using
* actual division here can reduce allocator throughput by over 20%!
*/
diff
=
(
unsigned
)((
uintptr_t
)
ptr
-
(
uintptr_t
)
run
-
bin_info
->
reg0_offset
);
/* Rescale (factor powers of 2 out of the numerator and denominator). */
size
=
bin_info
->
reg_size
;
shift
=
ffs
(
size
)
-
1
;
diff
>>=
shift
;
size
>>=
shift
;
if
(
size
==
1
)
{
/* The divisor was a power of 2. */
regind
=
diff
;
}
else
{
/*
* To divide by a number D that is not a power of two we
* multiply by (2^21 / D) and then right shift by 21 positions.
*
* X / D
*
* becomes
*
* (X * size_invs[D - 3]) >> SIZE_INV_SHIFT
*
* We can omit the first three elements, because we never
* divide by 0, and 1 and 2 are both powers of two, which are
* handled above.
*/
#define SIZE_INV_SHIFT ((sizeof(unsigned) << 3) - LG_RUN_MAXREGS)
#define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s)) + 1)
static
const
unsigned
size_invs
[]
=
{
SIZE_INV
(
3
),
SIZE_INV
(
4
),
SIZE_INV
(
5
),
SIZE_INV
(
6
),
SIZE_INV
(
7
),
SIZE_INV
(
8
),
SIZE_INV
(
9
),
SIZE_INV
(
10
),
SIZE_INV
(
11
),
SIZE_INV
(
12
),
SIZE_INV
(
13
),
SIZE_INV
(
14
),
SIZE_INV
(
15
),
SIZE_INV
(
16
),
SIZE_INV
(
17
),
SIZE_INV
(
18
),
SIZE_INV
(
19
),
SIZE_INV
(
20
),
SIZE_INV
(
21
),
SIZE_INV
(
22
),
SIZE_INV
(
23
),
SIZE_INV
(
24
),
SIZE_INV
(
25
),
SIZE_INV
(
26
),
SIZE_INV
(
27
),
SIZE_INV
(
28
),
SIZE_INV
(
29
),
SIZE_INV
(
30
),
SIZE_INV
(
31
)
};
if
(
size
<=
((
sizeof
(
size_invs
)
/
sizeof
(
unsigned
))
+
2
))
regind
=
(
diff
*
size_invs
[
size
-
3
])
>>
SIZE_INV_SHIFT
;
else
regind
=
diff
/
size
;
#undef SIZE_INV
#undef SIZE_INV_SHIFT
}
assert
(
diff
==
regind
*
size
);
assert
(
regind
<
bin_info
->
nregs
);
return
(
regind
);
}
#ifdef JEMALLOC_PROF
JEMALLOC_INLINE
prof_ctx_t
*
arena_prof_ctx_get
(
const
void
*
ptr
)
{
prof_ctx_t
*
ret
;
arena_chunk_t
*
chunk
;
size_t
pageind
,
mapbits
;
assert
(
ptr
!=
NULL
);
assert
(
CHUNK_ADDR2BASE
(
ptr
)
!=
ptr
);
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
ptr
);
pageind
=
((
uintptr_t
)
ptr
-
(
uintptr_t
)
chunk
)
>>
PAGE_SHIFT
;
mapbits
=
chunk
->
map
[
pageind
-
map_bias
].
bits
;
assert
((
mapbits
&
CHUNK_MAP_ALLOCATED
)
!=
0
);
if
((
mapbits
&
CHUNK_MAP_LARGE
)
==
0
)
{
if
(
prof_promote
)
ret
=
(
prof_ctx_t
*
)(
uintptr_t
)
1U
;
else
{
arena_run_t
*
run
=
(
arena_run_t
*
)((
uintptr_t
)
chunk
+
(
uintptr_t
)((
pageind
-
(
mapbits
>>
PAGE_SHIFT
))
<<
PAGE_SHIFT
));
size_t
binind
=
arena_bin_index
(
chunk
->
arena
,
run
->
bin
);
arena_bin_info_t
*
bin_info
=
&
arena_bin_info
[
binind
];
unsigned
regind
;
dassert
(
run
->
magic
==
ARENA_RUN_MAGIC
);
regind
=
arena_run_regind
(
run
,
bin_info
,
ptr
);
ret
=
*
(
prof_ctx_t
**
)((
uintptr_t
)
run
+
bin_info
->
ctx0_offset
+
(
regind
*
sizeof
(
prof_ctx_t
*
)));
}
}
else
ret
=
chunk
->
map
[
pageind
-
map_bias
].
prof_ctx
;
return
(
ret
);
}
JEMALLOC_INLINE
void
arena_prof_ctx_set
(
const
void
*
ptr
,
prof_ctx_t
*
ctx
)
{
arena_chunk_t
*
chunk
;
size_t
pageind
,
mapbits
;
assert
(
ptr
!=
NULL
);
assert
(
CHUNK_ADDR2BASE
(
ptr
)
!=
ptr
);
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
ptr
);
pageind
=
((
uintptr_t
)
ptr
-
(
uintptr_t
)
chunk
)
>>
PAGE_SHIFT
;
mapbits
=
chunk
->
map
[
pageind
-
map_bias
].
bits
;
assert
((
mapbits
&
CHUNK_MAP_ALLOCATED
)
!=
0
);
if
((
mapbits
&
CHUNK_MAP_LARGE
)
==
0
)
{
if
(
prof_promote
==
false
)
{
arena_run_t
*
run
=
(
arena_run_t
*
)((
uintptr_t
)
chunk
+
(
uintptr_t
)((
pageind
-
(
mapbits
>>
PAGE_SHIFT
))
<<
PAGE_SHIFT
));
arena_bin_t
*
bin
=
run
->
bin
;
size_t
binind
;
arena_bin_info_t
*
bin_info
;
unsigned
regind
;
dassert
(
run
->
magic
==
ARENA_RUN_MAGIC
);
binind
=
arena_bin_index
(
chunk
->
arena
,
bin
);
bin_info
=
&
arena_bin_info
[
binind
];
regind
=
arena_run_regind
(
run
,
bin_info
,
ptr
);
*
((
prof_ctx_t
**
)((
uintptr_t
)
run
+
bin_info
->
ctx0_offset
+
(
regind
*
sizeof
(
prof_ctx_t
*
))))
=
ctx
;
}
else
assert
((
uintptr_t
)
ctx
==
(
uintptr_t
)
1U
);
}
else
chunk
->
map
[
pageind
-
map_bias
].
prof_ctx
=
ctx
;
}
#endif
JEMALLOC_INLINE
void
arena_dalloc
(
arena_t
*
arena
,
arena_chunk_t
*
chunk
,
void
*
ptr
)
{
size_t
pageind
;
arena_chunk_map_t
*
mapelm
;
assert
(
arena
!=
NULL
);
dassert
(
arena
->
magic
==
ARENA_MAGIC
);
assert
(
chunk
->
arena
==
arena
);
assert
(
ptr
!=
NULL
);
assert
(
CHUNK_ADDR2BASE
(
ptr
)
!=
ptr
);
pageind
=
((
uintptr_t
)
ptr
-
(
uintptr_t
)
chunk
)
>>
PAGE_SHIFT
;
mapelm
=
&
chunk
->
map
[
pageind
-
map_bias
];
assert
((
mapelm
->
bits
&
CHUNK_MAP_ALLOCATED
)
!=
0
);
if
((
mapelm
->
bits
&
CHUNK_MAP_LARGE
)
==
0
)
{
/* Small allocation. */
#ifdef JEMALLOC_TCACHE
tcache_t
*
tcache
;
if
((
tcache
=
tcache_get
())
!=
NULL
)
tcache_dalloc_small
(
tcache
,
ptr
);
else
{
#endif
arena_run_t
*
run
;
arena_bin_t
*
bin
;
run
=
(
arena_run_t
*
)((
uintptr_t
)
chunk
+
(
uintptr_t
)((
pageind
-
(
mapelm
->
bits
>>
PAGE_SHIFT
))
<<
PAGE_SHIFT
));
dassert
(
run
->
magic
==
ARENA_RUN_MAGIC
);
bin
=
run
->
bin
;
#ifdef JEMALLOC_DEBUG
{
size_t
binind
=
arena_bin_index
(
arena
,
bin
);
arena_bin_info_t
*
bin_info
=
&
arena_bin_info
[
binind
];
assert
(((
uintptr_t
)
ptr
-
((
uintptr_t
)
run
+
(
uintptr_t
)
bin_info
->
reg0_offset
))
%
bin_info
->
reg_size
==
0
);
}
#endif
malloc_mutex_lock
(
&
bin
->
lock
);
arena_dalloc_bin
(
arena
,
chunk
,
ptr
,
mapelm
);
malloc_mutex_unlock
(
&
bin
->
lock
);
#ifdef JEMALLOC_TCACHE
}
#endif
}
else
{
#ifdef JEMALLOC_TCACHE
size_t
size
=
mapelm
->
bits
&
~
PAGE_MASK
;
assert
(((
uintptr_t
)
ptr
&
PAGE_MASK
)
==
0
);
if
(
size
<=
tcache_maxclass
)
{
tcache_t
*
tcache
;
if
((
tcache
=
tcache_get
())
!=
NULL
)
tcache_dalloc_large
(
tcache
,
ptr
,
size
);
else
{
malloc_mutex_lock
(
&
arena
->
lock
);
arena_dalloc_large
(
arena
,
chunk
,
ptr
);
malloc_mutex_unlock
(
&
arena
->
lock
);
}
}
else
{
malloc_mutex_lock
(
&
arena
->
lock
);
arena_dalloc_large
(
arena
,
chunk
,
ptr
);
malloc_mutex_unlock
(
&
arena
->
lock
);
}
#else
assert
(((
uintptr_t
)
ptr
&
PAGE_MASK
)
==
0
);
malloc_mutex_lock
(
&
arena
->
lock
);
arena_dalloc_large
(
arena
,
chunk
,
ptr
);
malloc_mutex_unlock
(
&
arena
->
lock
);
#endif
}
}
#endif
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/atomic.h
0 → 100644
View file @
994ed2bc
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#define atomic_read_uint64(p) atomic_add_uint64(p, 0)
#define atomic_read_uint32(p) atomic_add_uint32(p, 0)
#if (LG_SIZEOF_PTR == 3)
# define atomic_read_z(p) \
(size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)0)
# define atomic_add_z(p, x) \
(size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x)
# define atomic_sub_z(p, x) \
(size_t)atomic_sub_uint64((uint64_t *)p, (uint64_t)x)
#elif (LG_SIZEOF_PTR == 2)
# define atomic_read_z(p) \
(size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)0)
# define atomic_add_z(p, x) \
(size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x)
# define atomic_sub_z(p, x) \
(size_t)atomic_sub_uint32((uint32_t *)p, (uint32_t)x)
#endif
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
uint64_t
atomic_add_uint64
(
uint64_t
*
p
,
uint64_t
x
);
uint64_t
atomic_sub_uint64
(
uint64_t
*
p
,
uint64_t
x
);
uint32_t
atomic_add_uint32
(
uint32_t
*
p
,
uint32_t
x
);
uint32_t
atomic_sub_uint32
(
uint32_t
*
p
,
uint32_t
x
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_))
/******************************************************************************/
/* 64-bit operations. */
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8
JEMALLOC_INLINE
uint64_t
atomic_add_uint64
(
uint64_t
*
p
,
uint64_t
x
)
{
return
(
__sync_add_and_fetch
(
p
,
x
));
}
JEMALLOC_INLINE
uint64_t
atomic_sub_uint64
(
uint64_t
*
p
,
uint64_t
x
)
{
return
(
__sync_sub_and_fetch
(
p
,
x
));
}
#elif (defined(JEMALLOC_OSATOMIC))
JEMALLOC_INLINE
uint64_t
atomic_add_uint64
(
uint64_t
*
p
,
uint64_t
x
)
{
return
(
OSAtomicAdd64
((
int64_t
)
x
,
(
int64_t
*
)
p
));
}
JEMALLOC_INLINE
uint64_t
atomic_sub_uint64
(
uint64_t
*
p
,
uint64_t
x
)
{
return
(
OSAtomicAdd64
(
-
((
int64_t
)
x
),
(
int64_t
*
)
p
));
}
#elif (defined(__amd64_) || defined(__x86_64__))
JEMALLOC_INLINE
uint64_t
atomic_add_uint64
(
uint64_t
*
p
,
uint64_t
x
)
{
asm
volatile
(
"lock; xaddq %0, %1;"
:
"+r"
(
x
),
"=m"
(
*
p
)
/* Outputs. */
:
"m"
(
*
p
)
/* Inputs. */
);
return
(
x
);
}
JEMALLOC_INLINE
uint64_t
atomic_sub_uint64
(
uint64_t
*
p
,
uint64_t
x
)
{
x
=
(
uint64_t
)(
-
(
int64_t
)
x
);
asm
volatile
(
"lock; xaddq %0, %1;"
:
"+r"
(
x
),
"=m"
(
*
p
)
/* Outputs. */
:
"m"
(
*
p
)
/* Inputs. */
);
return
(
x
);
}
#else
# if (LG_SIZEOF_PTR == 3)
# error "Missing implementation for 64-bit atomic operations"
# endif
#endif
/******************************************************************************/
/* 32-bit operations. */
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
JEMALLOC_INLINE
uint32_t
atomic_add_uint32
(
uint32_t
*
p
,
uint32_t
x
)
{
return
(
__sync_add_and_fetch
(
p
,
x
));
}
JEMALLOC_INLINE
uint32_t
atomic_sub_uint32
(
uint32_t
*
p
,
uint32_t
x
)
{
return
(
__sync_sub_and_fetch
(
p
,
x
));
}
#elif (defined(JEMALLOC_OSATOMIC))
JEMALLOC_INLINE
uint32_t
atomic_add_uint32
(
uint32_t
*
p
,
uint32_t
x
)
{
return
(
OSAtomicAdd32
((
int32_t
)
x
,
(
int32_t
*
)
p
));
}
JEMALLOC_INLINE
uint32_t
atomic_sub_uint32
(
uint32_t
*
p
,
uint32_t
x
)
{
return
(
OSAtomicAdd32
(
-
((
int32_t
)
x
),
(
int32_t
*
)
p
));
}
#elif (defined(__i386__) || defined(__amd64_) || defined(__x86_64__))
JEMALLOC_INLINE
uint32_t
atomic_add_uint32
(
uint32_t
*
p
,
uint32_t
x
)
{
asm
volatile
(
"lock; xaddl %0, %1;"
:
"+r"
(
x
),
"=m"
(
*
p
)
/* Outputs. */
:
"m"
(
*
p
)
/* Inputs. */
);
return
(
x
);
}
JEMALLOC_INLINE
uint32_t
atomic_sub_uint32
(
uint32_t
*
p
,
uint32_t
x
)
{
x
=
(
uint32_t
)(
-
(
int32_t
)
x
);
asm
volatile
(
"lock; xaddl %0, %1;"
:
"+r"
(
x
),
"=m"
(
*
p
)
/* Outputs. */
:
"m"
(
*
p
)
/* Inputs. */
);
return
(
x
);
}
#else
# error "Missing implementation for 32-bit atomic operations"
#endif
#endif
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/base.h
0 → 100644
View file @
994ed2bc
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern
malloc_mutex_t
base_mtx
;
void
*
base_alloc
(
size_t
size
);
extent_node_t
*
base_node_alloc
(
void
);
void
base_node_dealloc
(
extent_node_t
*
node
);
bool
base_boot
(
void
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/bitmap.h
0 → 100644
View file @
994ed2bc
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
#define LG_BITMAP_MAXBITS LG_RUN_MAXREGS
typedef
struct
bitmap_level_s
bitmap_level_t
;
typedef
struct
bitmap_info_s
bitmap_info_t
;
typedef
unsigned
long
bitmap_t
;
#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG
/* Number of bits per group. */
#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3)
#define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS)
#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)
/* Maximum number of levels possible. */
#define BITMAP_MAX_LEVELS \
(LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \
+ !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP)
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct
bitmap_level_s
{
/* Offset of this level's groups within the array of groups. */
size_t
group_offset
;
};
struct
bitmap_info_s
{
/* Logical number of bits in bitmap (stored at bottom level). */
size_t
nbits
;
/* Number of levels necessary for nbits. */
unsigned
nlevels
;
/*
* Only the first (nlevels+1) elements are used, and levels are ordered
* bottom to top (e.g. the bottom level is stored in levels[0]).
*/
bitmap_level_t
levels
[
BITMAP_MAX_LEVELS
+
1
];
};
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void
bitmap_info_init
(
bitmap_info_t
*
binfo
,
size_t
nbits
);
size_t
bitmap_info_ngroups
(
const
bitmap_info_t
*
binfo
);
size_t
bitmap_size
(
size_t
nbits
);
void
bitmap_init
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
bool
bitmap_full
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
);
bool
bitmap_get
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
,
size_t
bit
);
void
bitmap_set
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
,
size_t
bit
);
size_t
bitmap_sfu
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
);
void
bitmap_unset
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
,
size_t
bit
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BITMAP_C_))
JEMALLOC_INLINE
bool
bitmap_full
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
)
{
unsigned
rgoff
=
binfo
->
levels
[
binfo
->
nlevels
].
group_offset
-
1
;
bitmap_t
rg
=
bitmap
[
rgoff
];
/* The bitmap is full iff the root group is 0. */
return
(
rg
==
0
);
}
JEMALLOC_INLINE
bool
bitmap_get
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
,
size_t
bit
)
{
size_t
goff
;
bitmap_t
g
;
assert
(
bit
<
binfo
->
nbits
);
goff
=
bit
>>
LG_BITMAP_GROUP_NBITS
;
g
=
bitmap
[
goff
];
return
(
!
(
g
&
(
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
))));
}
JEMALLOC_INLINE
void
bitmap_set
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
,
size_t
bit
)
{
size_t
goff
;
bitmap_t
*
gp
;
bitmap_t
g
;
assert
(
bit
<
binfo
->
nbits
);
assert
(
bitmap_get
(
bitmap
,
binfo
,
bit
)
==
false
);
goff
=
bit
>>
LG_BITMAP_GROUP_NBITS
;
gp
=
&
bitmap
[
goff
];
g
=
*
gp
;
assert
(
g
&
(
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)));
g
^=
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
);
*
gp
=
g
;
assert
(
bitmap_get
(
bitmap
,
binfo
,
bit
));
/* Propagate group state transitions up the tree. */
if
(
g
==
0
)
{
unsigned
i
;
for
(
i
=
1
;
i
<
binfo
->
nlevels
;
i
++
)
{
bit
=
goff
;
goff
=
bit
>>
LG_BITMAP_GROUP_NBITS
;
gp
=
&
bitmap
[
binfo
->
levels
[
i
].
group_offset
+
goff
];
g
=
*
gp
;
assert
(
g
&
(
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)));
g
^=
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
);
*
gp
=
g
;
if
(
g
!=
0
)
break
;
}
}
}
/* sfu: set first unset. */
JEMALLOC_INLINE
size_t
bitmap_sfu
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
)
{
size_t
bit
;
bitmap_t
g
;
unsigned
i
;
assert
(
bitmap_full
(
bitmap
,
binfo
)
==
false
);
i
=
binfo
->
nlevels
-
1
;
g
=
bitmap
[
binfo
->
levels
[
i
].
group_offset
];
bit
=
ffsl
(
g
)
-
1
;
while
(
i
>
0
)
{
i
--
;
g
=
bitmap
[
binfo
->
levels
[
i
].
group_offset
+
bit
];
bit
=
(
bit
<<
LG_BITMAP_GROUP_NBITS
)
+
(
ffsl
(
g
)
-
1
);
}
bitmap_set
(
bitmap
,
binfo
,
bit
);
return
(
bit
);
}
JEMALLOC_INLINE
void
bitmap_unset
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
,
size_t
bit
)
{
size_t
goff
;
bitmap_t
*
gp
;
bitmap_t
g
;
bool
propagate
;
assert
(
bit
<
binfo
->
nbits
);
assert
(
bitmap_get
(
bitmap
,
binfo
,
bit
));
goff
=
bit
>>
LG_BITMAP_GROUP_NBITS
;
gp
=
&
bitmap
[
goff
];
g
=
*
gp
;
propagate
=
(
g
==
0
);
assert
((
g
&
(
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)))
==
0
);
g
^=
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
);
*
gp
=
g
;
assert
(
bitmap_get
(
bitmap
,
binfo
,
bit
)
==
false
);
/* Propagate group state transitions up the tree. */
if
(
propagate
)
{
unsigned
i
;
for
(
i
=
1
;
i
<
binfo
->
nlevels
;
i
++
)
{
bit
=
goff
;
goff
=
bit
>>
LG_BITMAP_GROUP_NBITS
;
gp
=
&
bitmap
[
binfo
->
levels
[
i
].
group_offset
+
goff
];
g
=
*
gp
;
propagate
=
(
g
==
0
);
assert
((
g
&
(
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)))
==
0
);
g
^=
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
);
*
gp
=
g
;
if
(
propagate
==
false
)
break
;
}
}
}
#endif
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/chunk.h
0 → 100644
View file @
994ed2bc
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/*
* Size and alignment of memory chunks that are allocated by the OS's virtual
* memory system.
*/
#define LG_CHUNK_DEFAULT 22
/* Return the chunk address for allocation address a. */
#define CHUNK_ADDR2BASE(a) \
((void *)((uintptr_t)(a) & ~chunksize_mask))
/* Return the chunk offset of address a. */
#define CHUNK_ADDR2OFFSET(a) \
((size_t)((uintptr_t)(a) & chunksize_mask))
/* Return the smallest chunk multiple that is >= s. */
#define CHUNK_CEILING(s) \
(((s) + chunksize_mask) & ~chunksize_mask)
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern
size_t
opt_lg_chunk
;
#ifdef JEMALLOC_SWAP
extern
bool
opt_overcommit
;
#endif
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
/* Protects stats_chunks; currently not used for any other purpose. */
extern
malloc_mutex_t
chunks_mtx
;
/* Chunk statistics. */
extern
chunk_stats_t
stats_chunks
;
#endif
#ifdef JEMALLOC_IVSALLOC
extern
rtree_t
*
chunks_rtree
;
#endif
extern
size_t
chunksize
;
extern
size_t
chunksize_mask
;
/* (chunksize - 1). */
extern
size_t
chunk_npages
;
extern
size_t
map_bias
;
/* Number of arena chunk header pages. */
extern
size_t
arena_maxclass
;
/* Max size class for arenas. */
void
*
chunk_alloc
(
size_t
size
,
bool
base
,
bool
*
zero
);
void
chunk_dealloc
(
void
*
chunk
,
size_t
size
);
bool
chunk_boot
(
void
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
#include "jemalloc/internal/chunk_swap.h"
#include "jemalloc/internal/chunk_dss.h"
#include "jemalloc/internal/chunk_mmap.h"
deps/jemalloc/include/jemalloc/internal/chunk_dss.h
0 → 100644
View file @
994ed2bc
#ifdef JEMALLOC_DSS
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
/*
* Protects sbrk() calls. This avoids malloc races among threads, though it
* does not protect against races with threads that call sbrk() directly.
*/
extern
malloc_mutex_t
dss_mtx
;
void
*
chunk_alloc_dss
(
size_t
size
,
bool
*
zero
);
bool
chunk_in_dss
(
void
*
chunk
);
bool
chunk_dealloc_dss
(
void
*
chunk
,
size_t
size
);
bool
chunk_dss_boot
(
void
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
#endif
/* JEMALLOC_DSS */
deps/jemalloc/include/jemalloc/internal/chunk_mmap.h
0 → 100644
View file @
994ed2bc
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void
*
chunk_alloc_mmap
(
size_t
size
);
void
*
chunk_alloc_mmap_noreserve
(
size_t
size
);
void
chunk_dealloc_mmap
(
void
*
chunk
,
size_t
size
);
bool
chunk_mmap_boot
(
void
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/chunk_swap.h
0 → 100644
View file @
994ed2bc
#ifdef JEMALLOC_SWAP
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern
malloc_mutex_t
swap_mtx
;
extern
bool
swap_enabled
;
extern
bool
swap_prezeroed
;
extern
size_t
swap_nfds
;
extern
int
*
swap_fds
;
#ifdef JEMALLOC_STATS
extern
size_t
swap_avail
;
#endif
void
*
chunk_alloc_swap
(
size_t
size
,
bool
*
zero
);
bool
chunk_in_swap
(
void
*
chunk
);
bool
chunk_dealloc_swap
(
void
*
chunk
,
size_t
size
);
bool
chunk_swap_enable
(
const
int
*
fds
,
unsigned
nfds
,
bool
prezeroed
);
bool
chunk_swap_boot
(
void
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
#endif
/* JEMALLOC_SWAP */
deps/jemalloc/include/jemalloc/internal/ckh.h
0 → 100644
View file @
994ed2bc
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef
struct
ckh_s
ckh_t
;
typedef
struct
ckhc_s
ckhc_t
;
/* Typedefs to allow easy function pointer passing. */
typedef
void
ckh_hash_t
(
const
void
*
,
unsigned
,
size_t
*
,
size_t
*
);
typedef
bool
ckh_keycomp_t
(
const
void
*
,
const
void
*
);
/* Maintain counters used to get an idea of performance. */
/* #define CKH_COUNT */
/* Print counter values in ckh_delete() (requires CKH_COUNT). */
/* #define CKH_VERBOSE */
/*
* There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit
* one bucket per L1 cache line.
*/
#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
/* Hash table cell. */
struct
ckhc_s
{
const
void
*
key
;
const
void
*
data
;
};
struct
ckh_s
{
#ifdef JEMALLOC_DEBUG
#define CKH_MAGIC 0x3af2489d
uint32_t
magic
;
#endif
#ifdef CKH_COUNT
/* Counters used to get an idea of performance. */
uint64_t
ngrows
;
uint64_t
nshrinks
;
uint64_t
nshrinkfails
;
uint64_t
ninserts
;
uint64_t
nrelocs
;
#endif
/* Used for pseudo-random number generation. */
#define CKH_A 1103515241
#define CKH_C 12347
uint32_t
prn_state
;
/* Total number of items. */
size_t
count
;
/*
* Minimum and current number of hash table buckets. There are
* 2^LG_CKH_BUCKET_CELLS cells per bucket.
*/
unsigned
lg_minbuckets
;
unsigned
lg_curbuckets
;
/* Hash and comparison functions. */
ckh_hash_t
*
hash
;
ckh_keycomp_t
*
keycomp
;
/* Hash table with 2^lg_curbuckets buckets. */
ckhc_t
*
tab
;
};
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
bool
ckh_new
(
ckh_t
*
ckh
,
size_t
minitems
,
ckh_hash_t
*
hash
,
ckh_keycomp_t
*
keycomp
);
void
ckh_delete
(
ckh_t
*
ckh
);
size_t
ckh_count
(
ckh_t
*
ckh
);
bool
ckh_iter
(
ckh_t
*
ckh
,
size_t
*
tabind
,
void
**
key
,
void
**
data
);
bool
ckh_insert
(
ckh_t
*
ckh
,
const
void
*
key
,
const
void
*
data
);
bool
ckh_remove
(
ckh_t
*
ckh
,
const
void
*
searchkey
,
void
**
key
,
void
**
data
);
bool
ckh_search
(
ckh_t
*
ckh
,
const
void
*
seachkey
,
void
**
key
,
void
**
data
);
void
ckh_string_hash
(
const
void
*
key
,
unsigned
minbits
,
size_t
*
hash1
,
size_t
*
hash2
);
bool
ckh_string_keycomp
(
const
void
*
k1
,
const
void
*
k2
);
void
ckh_pointer_hash
(
const
void
*
key
,
unsigned
minbits
,
size_t
*
hash1
,
size_t
*
hash2
);
bool
ckh_pointer_keycomp
(
const
void
*
k1
,
const
void
*
k2
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/ctl.h
0 → 100644
View file @
994ed2bc
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef
struct
ctl_node_s
ctl_node_t
;
typedef
struct
ctl_arena_stats_s
ctl_arena_stats_t
;
typedef
struct
ctl_stats_s
ctl_stats_t
;
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct
ctl_node_s
{
bool
named
;
union
{
struct
{
const
char
*
name
;
/* If (nchildren == 0), this is a terminal node. */
unsigned
nchildren
;
const
ctl_node_t
*
children
;
}
named
;
struct
{
const
ctl_node_t
*
(
*
index
)(
const
size_t
*
,
size_t
,
size_t
);
}
indexed
;
}
u
;
int
(
*
ctl
)(
const
size_t
*
,
size_t
,
void
*
,
size_t
*
,
void
*
,
size_t
);
};
struct
ctl_arena_stats_s
{
bool
initialized
;
unsigned
nthreads
;
size_t
pactive
;
size_t
pdirty
;
#ifdef JEMALLOC_STATS
arena_stats_t
astats
;
/* Aggregate stats for small size classes, based on bin stats. */
size_t
allocated_small
;
uint64_t
nmalloc_small
;
uint64_t
ndalloc_small
;
uint64_t
nrequests_small
;
malloc_bin_stats_t
*
bstats
;
/* nbins elements. */
malloc_large_stats_t
*
lstats
;
/* nlclasses elements. */
#endif
};
struct
ctl_stats_s
{
#ifdef JEMALLOC_STATS
size_t
allocated
;
size_t
active
;
size_t
mapped
;
struct
{
size_t
current
;
/* stats_chunks.curchunks */
uint64_t
total
;
/* stats_chunks.nchunks */
size_t
high
;
/* stats_chunks.highchunks */
}
chunks
;
struct
{
size_t
allocated
;
/* huge_allocated */
uint64_t
nmalloc
;
/* huge_nmalloc */
uint64_t
ndalloc
;
/* huge_ndalloc */
}
huge
;
#endif
ctl_arena_stats_t
*
arenas
;
/* (narenas + 1) elements. */
#ifdef JEMALLOC_SWAP
size_t
swap_avail
;
#endif
};
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
int
ctl_byname
(
const
char
*
name
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
);
int
ctl_nametomib
(
const
char
*
name
,
size_t
*
mibp
,
size_t
*
miblenp
);
int
ctl_bymib
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
);
bool
ctl_boot
(
void
);
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
if (JEMALLOC_P(mallctl)(name, oldp, oldlenp, newp, newlen) \
!= 0) { \
malloc_write("<jemalloc>: Failure in xmallctl(\""); \
malloc_write(name); \
malloc_write("\", ...)\n"); \
abort(); \
} \
} while (0)
#define xmallctlnametomib(name, mibp, miblenp) do { \
if (JEMALLOC_P(mallctlnametomib)(name, mibp, miblenp) != 0) { \
malloc_write( \
"<jemalloc>: Failure in xmallctlnametomib(\""); \
malloc_write(name); \
malloc_write("\", ...)\n"); \
abort(); \
} \
} while (0)
#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \
if (JEMALLOC_P(mallctlbymib)(mib, miblen, oldp, oldlenp, newp, \
newlen) != 0) { \
malloc_write( \
"<jemalloc>: Failure in xmallctlbymib()\n"); \
abort(); \
} \
} while (0)
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/extent.h
0 → 100644
View file @
994ed2bc
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef
struct
extent_node_s
extent_node_t
;
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
/* Tree of extents. */
struct
extent_node_s
{
#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
/* Linkage for the size/address-ordered tree. */
rb_node
(
extent_node_t
)
link_szad
;
#endif
/* Linkage for the address-ordered tree. */
rb_node
(
extent_node_t
)
link_ad
;
#ifdef JEMALLOC_PROF
/* Profile counters, used for huge objects. */
prof_ctx_t
*
prof_ctx
;
#endif
/* Pointer to the extent that this tree node is responsible for. */
void
*
addr
;
/* Total region size. */
size_t
size
;
};
typedef
rb_tree
(
extent_node_t
)
extent_tree_t
;
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
rb_proto
(,
extent_tree_szad_
,
extent_tree_t
,
extent_node_t
)
#endif
rb_proto
(,
extent_tree_ad_
,
extent_tree_t
,
extent_node_t
)
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/hash.h
0 → 100644
View file @
994ed2bc
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
uint64_t
hash
(
const
void
*
key
,
size_t
len
,
uint64_t
seed
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_))
/*
* The following hash function is based on MurmurHash64A(), placed into the
* public domain by Austin Appleby. See http://murmurhash.googlepages.com/ for
* details.
*/
JEMALLOC_INLINE
uint64_t
hash
(
const
void
*
key
,
size_t
len
,
uint64_t
seed
)
{
const
uint64_t
m
=
0xc6a4a7935bd1e995
;
const
int
r
=
47
;
uint64_t
h
=
seed
^
(
len
*
m
);
const
uint64_t
*
data
=
(
const
uint64_t
*
)
key
;
const
uint64_t
*
end
=
data
+
(
len
/
8
);
const
unsigned
char
*
data2
;
assert
(((
uintptr_t
)
key
&
0x7
)
==
0
);
while
(
data
!=
end
)
{
uint64_t
k
=
*
data
++
;
k
*=
m
;
k
^=
k
>>
r
;
k
*=
m
;
h
^=
k
;
h
*=
m
;
}
data2
=
(
const
unsigned
char
*
)
data
;
switch
(
len
&
7
)
{
case
7
:
h
^=
((
uint64_t
)(
data2
[
6
]))
<<
48
;
case
6
:
h
^=
((
uint64_t
)(
data2
[
5
]))
<<
40
;
case
5
:
h
^=
((
uint64_t
)(
data2
[
4
]))
<<
32
;
case
4
:
h
^=
((
uint64_t
)(
data2
[
3
]))
<<
24
;
case
3
:
h
^=
((
uint64_t
)(
data2
[
2
]))
<<
16
;
case
2
:
h
^=
((
uint64_t
)(
data2
[
1
]))
<<
8
;
case
1
:
h
^=
((
uint64_t
)(
data2
[
0
]));
h
*=
m
;
}
h
^=
h
>>
r
;
h
*=
m
;
h
^=
h
>>
r
;
return
(
h
);
}
#endif
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/huge.h
0 → 100644
View file @
994ed2bc
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#ifdef JEMALLOC_STATS
/* Huge allocation statistics. */
extern
uint64_t
huge_nmalloc
;
extern
uint64_t
huge_ndalloc
;
extern
size_t
huge_allocated
;
#endif
/* Protects chunk-related data structures. */
extern
malloc_mutex_t
huge_mtx
;
void
*
huge_malloc
(
size_t
size
,
bool
zero
);
void
*
huge_palloc
(
size_t
size
,
size_t
alignment
,
bool
zero
);
void
*
huge_ralloc_no_move
(
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
extra
);
void
*
huge_ralloc
(
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
extra
,
size_t
alignment
,
bool
zero
);
void
huge_dalloc
(
void
*
ptr
,
bool
unmap
);
size_t
huge_salloc
(
const
void
*
ptr
);
#ifdef JEMALLOC_PROF
prof_ctx_t
*
huge_prof_ctx_get
(
const
void
*
ptr
);
void
huge_prof_ctx_set
(
const
void
*
ptr
,
prof_ctx_t
*
ctx
);
#endif
bool
huge_boot
(
void
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in
0 → 100644
View file @
994ed2bc
#include <sys/mman.h>
#include <sys/param.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/sysctl.h>
#include <sys/uio.h>
#include <errno.h>
#include <limits.h>
#ifndef SIZE_T_MAX
# define SIZE_T_MAX SIZE_MAX
#endif
#include <pthread.h>
#include <sched.h>
#include <stdarg.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <stddef.h>
#ifndef offsetof
# define offsetof(type, member) ((size_t)&(((type *)NULL)->member))
#endif
#include <inttypes.h>
#include <string.h>
#include <strings.h>
#include <ctype.h>
#include <unistd.h>
#include <fcntl.h>
#include <pthread.h>
#include <math.h>
#define JEMALLOC_MANGLE
#include "../jemalloc@install_suffix@.h"
#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
#include <libkern/OSAtomic.h>
#endif
#ifdef JEMALLOC_ZONE
#include <mach/mach_error.h>
#include <mach/mach_init.h>
#include <mach/vm_map.h>
#include <malloc/malloc.h>
#endif
#ifdef JEMALLOC_LAZY_LOCK
#include <dlfcn.h>
#endif
#define RB_COMPACT
#include "jemalloc/internal/rb.h"
#include "jemalloc/internal/qr.h"
#include "jemalloc/internal/ql.h"
extern void (*JEMALLOC_P(malloc_message))(void *wcbopaque, const char *s);
/*
* Define a custom assert() in order to reduce the chances of deadlock during
* assertion failure.
*/
#ifndef assert
# ifdef JEMALLOC_DEBUG
# define assert(e) do { \
if (!(e)) { \
char line_buf[UMAX2S_BUFSIZE]; \
malloc_write("<jemalloc>: "); \
malloc_write(__FILE__); \
malloc_write(":"); \
malloc_write(u2s(__LINE__, 10, line_buf)); \
malloc_write(": Failed assertion: "); \
malloc_write("\""); \
malloc_write(#e); \
malloc_write("\"\n"); \
abort(); \
} \
} while (0)
# else
# define assert(e)
# endif
#endif
#ifdef JEMALLOC_DEBUG
# define dassert(e) assert(e)
#else
# define dassert(e)
#endif
/*
* jemalloc can conceptually be broken into components (arena, tcache, etc.),
* but there are circular dependencies that cannot be broken without
* substantial performance degradation. In order to reduce the effect on
* visual code flow, read the header files in multiple passes, with one of the
* following cpp variables defined during each pass:
*
* JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data
* types.
* JEMALLOC_H_STRUCTS : Data structures.
* JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
* JEMALLOC_H_INLINES : Inline functions.
*/
/******************************************************************************/
#define JEMALLOC_H_TYPES
#define ALLOCM_LG_ALIGN_MASK ((int)0x3f)
#define ZU(z) ((size_t)z)
#ifndef __DECONST
# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
#endif
#ifdef JEMALLOC_DEBUG
/* Disable inlining to make debugging easier. */
# define JEMALLOC_INLINE
# define inline
#else
# define JEMALLOC_ENABLE_INLINE
# define JEMALLOC_INLINE static inline
#endif
/* Size of stack-allocated buffer passed to buferror(). */
#define BUFERROR_BUF 64
/* Minimum alignment of allocations is 2^LG_QUANTUM bytes. */
#ifdef __i386__
# define LG_QUANTUM 4
#endif
#ifdef __ia64__
# define LG_QUANTUM 4
#endif
#ifdef __alpha__
# define LG_QUANTUM 4
#endif
#ifdef __sparc64__
# define LG_QUANTUM 4
#endif
#if (defined(__amd64__) || defined(__x86_64__))
# define LG_QUANTUM 4
#endif
#ifdef __arm__
# define LG_QUANTUM 3
#endif
#ifdef __mips__
# define LG_QUANTUM 3
#endif
#ifdef __powerpc__
# define LG_QUANTUM 4
#endif
#ifdef __s390x__
# define LG_QUANTUM 4
#endif
#define QUANTUM ((size_t)(1U << LG_QUANTUM))
#define QUANTUM_MASK (QUANTUM - 1)
/* Return the smallest quantum multiple that is >= a. */
#define QUANTUM_CEILING(a) \
(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
#define LONG ((size_t)(1U << LG_SIZEOF_LONG))
#define LONG_MASK (LONG - 1)
/* Return the smallest long multiple that is >= a. */
#define LONG_CEILING(a) \
(((a) + LONG_MASK) & ~LONG_MASK)
#define SIZEOF_PTR (1U << LG_SIZEOF_PTR)
#define PTR_MASK (SIZEOF_PTR - 1)
/* Return the smallest (void *) multiple that is >= a. */
#define PTR_CEILING(a) \
(((a) + PTR_MASK) & ~PTR_MASK)
/*
* Maximum size of L1 cache line. This is used to avoid cache line aliasing.
* In addition, this controls the spacing of cacheline-spaced size classes.
*/
#define LG_CACHELINE 6
#define CACHELINE ((size_t)(1U << LG_CACHELINE))
#define CACHELINE_MASK (CACHELINE - 1)
/* Return the smallest cacheline multiple that is >= s. */
#define CACHELINE_CEILING(s) \
(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
/*
* Page size. STATIC_PAGE_SHIFT is determined by the configure script. If
* DYNAMIC_PAGE_SHIFT is enabled, only use the STATIC_PAGE_* macros where
* compile-time values are required for the purposes of defining data
* structures.
*/
#define STATIC_PAGE_SIZE ((size_t)(1U << STATIC_PAGE_SHIFT))
#define STATIC_PAGE_MASK ((size_t)(STATIC_PAGE_SIZE - 1))
#ifdef PAGE_SHIFT
# undef PAGE_SHIFT
#endif
#ifdef PAGE_SIZE
# undef PAGE_SIZE
#endif
#ifdef PAGE_MASK
# undef PAGE_MASK
#endif
#ifdef DYNAMIC_PAGE_SHIFT
# define PAGE_SHIFT lg_pagesize
# define PAGE_SIZE pagesize
# define PAGE_MASK pagesize_mask
#else
# define PAGE_SHIFT STATIC_PAGE_SHIFT
# define PAGE_SIZE STATIC_PAGE_SIZE
# define PAGE_MASK STATIC_PAGE_MASK
#endif
/* Return the smallest pagesize multiple that is >= s. */
#define PAGE_CEILING(s) \
(((s) + PAGE_MASK) & ~PAGE_MASK)
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prn.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/arena.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/base.h"
#include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h"
#ifdef JEMALLOC_ZONE
#include "jemalloc/internal/zone.h"
#endif
#include "jemalloc/internal/prof.h"
#undef JEMALLOC_H_TYPES
/******************************************************************************/
#define JEMALLOC_H_STRUCTS
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prn.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/arena.h"
#include "jemalloc/internal/base.h"
#include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h"
#ifdef JEMALLOC_ZONE
#include "jemalloc/internal/zone.h"
#endif
#include "jemalloc/internal/prof.h"
#ifdef JEMALLOC_STATS
typedef struct {
uint64_t allocated;
uint64_t deallocated;
} thread_allocated_t;
#endif
#undef JEMALLOC_H_STRUCTS
/******************************************************************************/
#define JEMALLOC_H_EXTERNS
extern bool opt_abort;
#ifdef JEMALLOC_FILL
extern bool opt_junk;
#endif
#ifdef JEMALLOC_SYSV
extern bool opt_sysv;
#endif
#ifdef JEMALLOC_XMALLOC
extern bool opt_xmalloc;
#endif
#ifdef JEMALLOC_FILL
extern bool opt_zero;
#endif
extern size_t opt_narenas;
#ifdef DYNAMIC_PAGE_SHIFT
extern size_t pagesize;
extern size_t pagesize_mask;
extern size_t lg_pagesize;
#endif
/* Number of CPUs. */
extern unsigned ncpus;
extern malloc_mutex_t arenas_lock; /* Protects arenas initialization. */
extern pthread_key_t arenas_tsd;
#ifndef NO_TLS
/*
* Map of pthread_self() --> arenas[???], used for selecting an arena to use
* for allocations.
*/
extern __thread arena_t *arenas_tls JEMALLOC_ATTR(tls_model("initial-exec"));
# define ARENA_GET() arenas_tls
# define ARENA_SET(v) do { \
arenas_tls = (v); \
pthread_setspecific(arenas_tsd, (void *)(v)); \
} while (0)
#else
# define ARENA_GET() ((arena_t *)pthread_getspecific(arenas_tsd))
# define ARENA_SET(v) do { \
pthread_setspecific(arenas_tsd, (void *)(v)); \
} while (0)
#endif
/*
* Arenas that are used to service external requests. Not all elements of the
* arenas array are necessarily used; arenas are created lazily as needed.
*/
extern arena_t **arenas;
extern unsigned narenas;
#ifdef JEMALLOC_STATS
# ifndef NO_TLS
extern __thread thread_allocated_t thread_allocated_tls;
# define ALLOCATED_GET() (thread_allocated_tls.allocated)
# define ALLOCATEDP_GET() (&thread_allocated_tls.allocated)
# define DEALLOCATED_GET() (thread_allocated_tls.deallocated)
# define DEALLOCATEDP_GET() (&thread_allocated_tls.deallocated)
# define ALLOCATED_ADD(a, d) do { \
thread_allocated_tls.allocated += a; \
thread_allocated_tls.deallocated += d; \
} while (0)
# else
extern pthread_key_t thread_allocated_tsd;
thread_allocated_t *thread_allocated_get_hard(void);
# define ALLOCATED_GET() (thread_allocated_get()->allocated)
# define ALLOCATEDP_GET() (&thread_allocated_get()->allocated)
# define DEALLOCATED_GET() (thread_allocated_get()->deallocated)
# define DEALLOCATEDP_GET() (&thread_allocated_get()->deallocated)
# define ALLOCATED_ADD(a, d) do { \
thread_allocated_t *thread_allocated = thread_allocated_get(); \
thread_allocated->allocated += (a); \
thread_allocated->deallocated += (d); \
} while (0)
# endif
#endif
arena_t *arenas_extend(unsigned ind);
arena_t *choose_arena_hard(void);
int buferror(int errnum, char *buf, size_t buflen);
void jemalloc_prefork(void);
void jemalloc_postfork(void);
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prn.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/arena.h"
#include "jemalloc/internal/base.h"
#include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h"
#ifdef JEMALLOC_ZONE
#include "jemalloc/internal/zone.h"
#endif
#include "jemalloc/internal/prof.h"
#undef JEMALLOC_H_EXTERNS
/******************************************************************************/
#define JEMALLOC_H_INLINES
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prn.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/base.h"
#include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h"
#ifndef JEMALLOC_ENABLE_INLINE
size_t pow2_ceil(size_t x);
size_t s2u(size_t size);
size_t sa2u(size_t size, size_t alignment, size_t *run_size_p);
void malloc_write(const char *s);
arena_t *choose_arena(void);
# if (defined(JEMALLOC_STATS) && defined(NO_TLS))
thread_allocated_t *thread_allocated_get(void);
# endif
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
/* Compute the smallest power of 2 that is >= x. */
JEMALLOC_INLINE size_t
pow2_ceil(size_t x)
{
x--;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
#if (LG_SIZEOF_PTR == 3)
x |= x >> 32;
#endif
x++;
return (x);
}
/*
* Compute usable size that would result from allocating an object with the
* specified size.
*/
JEMALLOC_INLINE size_t
s2u(size_t size)
{
if (size <= small_maxclass)
return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size);
if (size <= arena_maxclass)
return (PAGE_CEILING(size));
return (CHUNK_CEILING(size));
}
/*
* Compute usable size that would result from allocating an object with the
* specified size and alignment.
*/
JEMALLOC_INLINE size_t
sa2u(size_t size, size_t alignment, size_t *run_size_p)
{
size_t usize;
/*
* Round size up to the nearest multiple of alignment.
*
* This done, we can take advantage of the fact that for each small
* size class, every object is aligned at the smallest power of two
* that is non-zero in the base two representation of the size. For
* example:
*
* Size | Base 2 | Minimum alignment
* -----+----------+------------------
* 96 | 1100000 | 32
* 144 | 10100000 | 32
* 192 | 11000000 | 64
*
* Depending on runtime settings, it is possible that arena_malloc()
* will further round up to a power of two, but that never causes
* correctness issues.
*/
usize = (size + (alignment - 1)) & (-alignment);
/*
* (usize < size) protects against the combination of maximal
* alignment and size greater than maximal alignment.
*/
if (usize < size) {
/* size_t overflow. */
return (0);
}
if (usize <= arena_maxclass && alignment <= PAGE_SIZE) {
if (usize <= small_maxclass)
return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size);
return (PAGE_CEILING(usize));
} else {
size_t run_size;
/*
* We can't achieve subpage alignment, so round up alignment
* permanently; it makes later calculations simpler.
*/
alignment = PAGE_CEILING(alignment);
usize = PAGE_CEILING(size);
/*
* (usize < size) protects against very large sizes within
* PAGE_SIZE of SIZE_T_MAX.
*
* (usize + alignment < usize) protects against the
* combination of maximal alignment and usize large enough
* to cause overflow. This is similar to the first overflow
* check above, but it needs to be repeated due to the new
* usize value, which may now be *equal* to maximal
* alignment, whereas before we only detected overflow if the
* original size was *greater* than maximal alignment.
*/
if (usize < size || usize + alignment < usize) {
/* size_t overflow. */
return (0);
}
/*
* Calculate the size of the over-size run that arena_palloc()
* would need to allocate in order to guarantee the alignment.
*/
if (usize >= alignment)
run_size = usize + alignment - PAGE_SIZE;
else {
/*
* It is possible that (alignment << 1) will cause
* overflow, but it doesn't matter because we also
* subtract PAGE_SIZE, which in the case of overflow
* leaves us with a very large run_size. That causes
* the first conditional below to fail, which means
* that the bogus run_size value never gets used for
* anything important.
*/
run_size = (alignment << 1) - PAGE_SIZE;
}
if (run_size_p != NULL)
*run_size_p = run_size;
if (run_size <= arena_maxclass)
return (PAGE_CEILING(usize));
return (CHUNK_CEILING(usize));
}
}
/*
* Wrapper around malloc_message() that avoids the need for
* JEMALLOC_P(malloc_message)(...) throughout the code.
*/
JEMALLOC_INLINE void
malloc_write(const char *s)
{
JEMALLOC_P(malloc_message)(NULL, s);
}
/*
* Choose an arena based on a per-thread value (fast-path code, calls slow-path
* code if necessary).
*/
JEMALLOC_INLINE arena_t *
choose_arena(void)
{
arena_t *ret;
ret = ARENA_GET();
if (ret == NULL) {
ret = choose_arena_hard();
assert(ret != NULL);
}
return (ret);
}
#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
JEMALLOC_INLINE thread_allocated_t *
thread_allocated_get(void)
{
thread_allocated_t *thread_allocated = (thread_allocated_t *)
pthread_getspecific(thread_allocated_tsd);
if (thread_allocated == NULL)
return (thread_allocated_get_hard());
return (thread_allocated);
}
#endif
#endif
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/arena.h"
#include "jemalloc/internal/hash.h"
#ifdef JEMALLOC_ZONE
#include "jemalloc/internal/zone.h"
#endif
#ifndef JEMALLOC_ENABLE_INLINE
void *imalloc(size_t size);
void *icalloc(size_t size);
void *ipalloc(size_t usize, size_t alignment, bool zero);
size_t isalloc(const void *ptr);
# ifdef JEMALLOC_IVSALLOC
size_t ivsalloc(const void *ptr);
# endif
void idalloc(void *ptr);
void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
bool zero, bool no_move);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
JEMALLOC_INLINE void *
imalloc(size_t size)
{
assert(size != 0);
if (size <= arena_maxclass)
return (arena_malloc(size, false));
else
return (huge_malloc(size, false));
}
JEMALLOC_INLINE void *
icalloc(size_t size)
{
if (size <= arena_maxclass)
return (arena_malloc(size, true));
else
return (huge_malloc(size, true));
}
JEMALLOC_INLINE void *
ipalloc(size_t usize, size_t alignment, bool zero)
{
void *ret;
assert(usize != 0);
assert(usize == sa2u(usize, alignment, NULL));
if (usize <= arena_maxclass && alignment <= PAGE_SIZE)
ret = arena_malloc(usize, zero);
else {
size_t run_size = 0;
/*
* Ideally we would only ever call sa2u() once per aligned
* allocation request, and the caller of this function has
* already done so once. However, it's rather burdensome to
* require every caller to pass in run_size, especially given
* that it's only relevant to large allocations. Therefore,
* just call it again here in order to get run_size.
*/
sa2u(usize, alignment, &run_size);
if (run_size <= arena_maxclass) {
ret = arena_palloc(choose_arena(), usize, run_size,
alignment, zero);
} else if (alignment <= chunksize)
ret = huge_malloc(usize, zero);
else
ret = huge_palloc(usize, alignment, zero);
}
assert(((uintptr_t)ret & (alignment - 1)) == 0);
return (ret);
}
JEMALLOC_INLINE size_t
isalloc(const void *ptr)
{
size_t ret;
arena_chunk_t *chunk;
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr) {
/* Region. */
dassert(chunk->arena->magic == ARENA_MAGIC);
#ifdef JEMALLOC_PROF
ret = arena_salloc_demote(ptr);
#else
ret = arena_salloc(ptr);
#endif
} else
ret = huge_salloc(ptr);
return (ret);
}
#ifdef JEMALLOC_IVSALLOC
JEMALLOC_INLINE size_t
ivsalloc(const void *ptr)
{
/* Return 0 if ptr is not within a chunk managed by jemalloc. */
if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL)
return (0);
return (isalloc(ptr));
}
#endif
JEMALLOC_INLINE void
idalloc(void *ptr)
{
arena_chunk_t *chunk;
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr)
arena_dalloc(chunk->arena, chunk, ptr);
else
huge_dalloc(ptr, true);
}
JEMALLOC_INLINE void *
iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
bool no_move)
{
void *ret;
size_t oldsize;
assert(ptr != NULL);
assert(size != 0);
oldsize = isalloc(ptr);
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
!= 0) {
size_t usize, copysize;
/*
* Existing object alignment is inadquate; allocate new space
* and copy.
*/
if (no_move)
return (NULL);
usize = sa2u(size + extra, alignment, NULL);
if (usize == 0)
return (NULL);
ret = ipalloc(usize, alignment, zero);
if (ret == NULL) {
if (extra == 0)
return (NULL);
/* Try again, without extra this time. */
usize = sa2u(size, alignment, NULL);
if (usize == 0)
return (NULL);
ret = ipalloc(usize, alignment, zero);
if (ret == NULL)
return (NULL);
}
/*
* Copy at most size bytes (not size+extra), since the caller
* has no expectation that the extra bytes will be reliably
* preserved.
*/
copysize = (size < oldsize) ? size : oldsize;
memcpy(ret, ptr, copysize);
idalloc(ptr);
return (ret);
}
if (no_move) {
if (size <= arena_maxclass) {
return (arena_ralloc_no_move(ptr, oldsize, size,
extra, zero));
} else {
return (huge_ralloc_no_move(ptr, oldsize, size,
extra));
}
} else {
if (size + extra <= arena_maxclass) {
return (arena_ralloc(ptr, oldsize, size, extra,
alignment, zero));
} else {
return (huge_ralloc(ptr, oldsize, size, extra,
alignment, zero));
}
}
}
#endif
#include "jemalloc/internal/prof.h"
#undef JEMALLOC_H_INLINES
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/mb.h
0 → 100644
View file @
994ed2bc
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
void
mb_write
(
void
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MB_C_))
#ifdef __i386__
/*
* According to the Intel Architecture Software Developer's Manual, current
* processors execute instructions in order from the perspective of other
* processors in a multiprocessor system, but 1) Intel reserves the right to
* change that, and 2) the compiler's optimizer could re-order instructions if
* there weren't some form of barrier. Therefore, even if running on an
* architecture that does not need memory barriers (everything through at least
* i686), an "optimizer barrier" is necessary.
*/
JEMALLOC_INLINE
void
mb_write
(
void
)
{
# if 0
/* This is a true memory barrier. */
asm
volatile
(
"pusha;"
"xor %%eax,%%eax;"
"cpuid;"
"popa;"
:
/* Outputs. */
:
/* Inputs. */
:
"memory"
/* Clobbers. */
);
#else
/*
* This is hopefully enough to keep the compiler from reordering
* instructions around this one.
*/
asm
volatile
(
"nop;"
:
/* Outputs. */
:
/* Inputs. */
:
"memory"
/* Clobbers. */
);
#endif
}
#elif (defined(__amd64_) || defined(__x86_64__))
JEMALLOC_INLINE
void
mb_write
(
void
)
{
asm
volatile
(
"sfence"
:
/* Outputs. */
:
/* Inputs. */
:
"memory"
/* Clobbers. */
);
}
#elif defined(__powerpc__)
JEMALLOC_INLINE
void
mb_write
(
void
)
{
asm
volatile
(
"eieio"
:
/* Outputs. */
:
/* Inputs. */
:
"memory"
/* Clobbers. */
);
}
#elif defined(__sparc64__)
JEMALLOC_INLINE
void
mb_write
(
void
)
{
asm
volatile
(
"membar #StoreStore"
:
/* Outputs. */
:
/* Inputs. */
:
"memory"
/* Clobbers. */
);
}
#else
/*
* This is much slower than a simple memory barrier, but the semantics of mutex
* unlock make this work.
*/
JEMALLOC_INLINE
void
mb_write
(
void
)
{
malloc_mutex_t
mtx
;
malloc_mutex_init
(
&
mtx
);
malloc_mutex_lock
(
&
mtx
);
malloc_mutex_unlock
(
&
mtx
);
}
#endif
#endif
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/mutex.h
0 → 100644
View file @
994ed2bc
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#ifdef JEMALLOC_OSSPIN
typedef
OSSpinLock
malloc_mutex_t
;
#else
typedef
pthread_mutex_t
malloc_mutex_t
;
#endif
#ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
# define MALLOC_MUTEX_INITIALIZER PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
#else
# define MALLOC_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
#endif
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#ifdef JEMALLOC_LAZY_LOCK
extern
bool
isthreaded
;
#else
# define isthreaded true
#endif
bool
malloc_mutex_init
(
malloc_mutex_t
*
mutex
);
void
malloc_mutex_destroy
(
malloc_mutex_t
*
mutex
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
void
malloc_mutex_lock
(
malloc_mutex_t
*
mutex
);
bool
malloc_mutex_trylock
(
malloc_mutex_t
*
mutex
);
void
malloc_mutex_unlock
(
malloc_mutex_t
*
mutex
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
JEMALLOC_INLINE
void
malloc_mutex_lock
(
malloc_mutex_t
*
mutex
)
{
if
(
isthreaded
)
{
#ifdef JEMALLOC_OSSPIN
OSSpinLockLock
(
mutex
);
#else
pthread_mutex_lock
(
mutex
);
#endif
}
}
JEMALLOC_INLINE
bool
malloc_mutex_trylock
(
malloc_mutex_t
*
mutex
)
{
if
(
isthreaded
)
{
#ifdef JEMALLOC_OSSPIN
return
(
OSSpinLockTry
(
mutex
)
==
false
);
#else
return
(
pthread_mutex_trylock
(
mutex
)
!=
0
);
#endif
}
else
return
(
false
);
}
JEMALLOC_INLINE
void
malloc_mutex_unlock
(
malloc_mutex_t
*
mutex
)
{
if
(
isthreaded
)
{
#ifdef JEMALLOC_OSSPIN
OSSpinLockUnlock
(
mutex
);
#else
pthread_mutex_unlock
(
mutex
);
#endif
}
}
#endif
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/prn.h
0 → 100644
View file @
994ed2bc
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/*
* Simple linear congruential pseudo-random number generator:
*
* prn(y) = (a*x + c) % m
*
* where the following constants ensure maximal period:
*
* a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4.
* c == Odd number (relatively prime to 2^n).
* m == 2^32
*
* See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints.
*
* This choice of m has the disadvantage that the quality of the bits is
* proportional to bit position. For example. the lowest bit has a cycle of 2,
* the next has a cycle of 4, etc. For this reason, we prefer to use the upper
* bits.
*
* Macro parameters:
* uint32_t r : Result.
* unsigned lg_range : (0..32], number of least significant bits to return.
* uint32_t state : Seed value.
* const uint32_t a, c : See above discussion.
*/
#define prn32(r, lg_range, state, a, c) do { \
assert(lg_range > 0); \
assert(lg_range <= 32); \
\
r = (state * (a)) + (c); \
state = r; \
r >>= (32 - lg_range); \
} while (false)
/* Same as prn32(), but 64 bits of pseudo-randomness, using uint64_t. */
#define prn64(r, lg_range, state, a, c) do { \
assert(lg_range > 0); \
assert(lg_range <= 64); \
\
r = (state * (a)) + (c); \
state = r; \
r >>= (64 - lg_range); \
} while (false)
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/prof.h
0 → 100644
View file @
994ed2bc
#ifdef JEMALLOC_PROF
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef
struct
prof_bt_s
prof_bt_t
;
typedef
struct
prof_cnt_s
prof_cnt_t
;
typedef
struct
prof_thr_cnt_s
prof_thr_cnt_t
;
typedef
struct
prof_ctx_s
prof_ctx_t
;
typedef
struct
prof_tdata_s
prof_tdata_t
;
/* Option defaults. */
#define PROF_PREFIX_DEFAULT "jeprof"
#define LG_PROF_BT_MAX_DEFAULT 7
#define LG_PROF_SAMPLE_DEFAULT 0
#define LG_PROF_INTERVAL_DEFAULT -1
#define LG_PROF_TCMAX_DEFAULT -1
/*
* Hard limit on stack backtrace depth. Note that the version of
* prof_backtrace() that is based on __builtin_return_address() necessarily has
* a hard-coded number of backtrace frame handlers.
*/
#if (defined(JEMALLOC_PROF_LIBGCC) || defined(JEMALLOC_PROF_LIBUNWIND))
# define LG_PROF_BT_MAX ((ZU(1) << (LG_SIZEOF_PTR+3)) - 1)
#else
# define LG_PROF_BT_MAX 7
/* >= LG_PROF_BT_MAX_DEFAULT */
#endif
#define PROF_BT_MAX (1U << LG_PROF_BT_MAX)
/* Initial hash table size. */
#define PROF_CKH_MINITEMS 64
/* Size of memory buffer to use when writing dump files. */
#define PROF_DUMP_BUF_SIZE 65536
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct
prof_bt_s
{
/* Backtrace, stored as len program counters. */
void
**
vec
;
unsigned
len
;
};
#ifdef JEMALLOC_PROF_LIBGCC
/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
typedef
struct
{
prof_bt_t
*
bt
;
unsigned
nignore
;
unsigned
max
;
}
prof_unwind_data_t
;
#endif
struct
prof_cnt_s
{
/*
* Profiling counters. An allocation/deallocation pair can operate on
* different prof_thr_cnt_t objects that are linked into the same
* prof_ctx_t cnts_ql, so it is possible for the cur* counters to go
* negative. In principle it is possible for the *bytes counters to
* overflow/underflow, but a general solution would require something
* like 128-bit counters; this implementation doesn't bother to solve
* that problem.
*/
int64_t
curobjs
;
int64_t
curbytes
;
uint64_t
accumobjs
;
uint64_t
accumbytes
;
};
struct
prof_thr_cnt_s
{
/* Linkage into prof_ctx_t's cnts_ql. */
ql_elm
(
prof_thr_cnt_t
)
cnts_link
;
/* Linkage into thread's LRU. */
ql_elm
(
prof_thr_cnt_t
)
lru_link
;
/*
* Associated context. If a thread frees an object that it did not
* allocate, it is possible that the context is not cached in the
* thread's hash table, in which case it must be able to look up the
* context, insert a new prof_thr_cnt_t into the thread's hash table,
* and link it into the prof_ctx_t's cnts_ql.
*/
prof_ctx_t
*
ctx
;
/*
* Threads use memory barriers to update the counters. Since there is
* only ever one writer, the only challenge is for the reader to get a
* consistent read of the counters.
*
* The writer uses this series of operations:
*
* 1) Increment epoch to an odd number.
* 2) Update counters.
* 3) Increment epoch to an even number.
*
* The reader must assure 1) that the epoch is even while it reads the
* counters, and 2) that the epoch doesn't change between the time it
* starts and finishes reading the counters.
*/
unsigned
epoch
;
/* Profiling counters. */
prof_cnt_t
cnts
;
};
struct
prof_ctx_s
{
/* Associated backtrace. */
prof_bt_t
*
bt
;
/* Protects cnt_merged and cnts_ql. */
malloc_mutex_t
lock
;
/* Temporary storage for summation during dump. */
prof_cnt_t
cnt_summed
;
/* When threads exit, they merge their stats into cnt_merged. */
prof_cnt_t
cnt_merged
;
/*
* List of profile counters, one for each thread that has allocated in
* this context.
*/
ql_head
(
prof_thr_cnt_t
)
cnts_ql
;
};
struct
prof_tdata_s
{
/*
* Hash of (prof_bt_t *)-->(prof_thr_cnt_t *). Each thread keeps a
* cache of backtraces, with associated thread-specific prof_thr_cnt_t
* objects. Other threads may read the prof_thr_cnt_t contents, but no
* others will ever write them.
*
* Upon thread exit, the thread must merge all the prof_thr_cnt_t
* counter data into the associated prof_ctx_t objects, and unlink/free
* the prof_thr_cnt_t objects.
*/
ckh_t
bt2cnt
;
/* LRU for contents of bt2cnt. */
ql_head
(
prof_thr_cnt_t
)
lru_ql
;
/* Backtrace vector, used for calls to prof_backtrace(). */
void
**
vec
;
/* Sampling state. */
uint64_t
prn_state
;
uint64_t
threshold
;
uint64_t
accum
;
};
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern
bool
opt_prof
;
/*
* Even if opt_prof is true, sampling can be temporarily disabled by setting
* opt_prof_active to false. No locking is used when updating opt_prof_active,
* so there are no guarantees regarding how long it will take for all threads
* to notice state changes.
*/
extern
bool
opt_prof_active
;
extern
size_t
opt_lg_prof_bt_max
;
/* Maximum backtrace depth. */
extern
size_t
opt_lg_prof_sample
;
/* Mean bytes between samples. */
extern
ssize_t
opt_lg_prof_interval
;
/* lg(prof_interval). */
extern
bool
opt_prof_gdump
;
/* High-water memory dumping. */
extern
bool
opt_prof_leak
;
/* Dump leak summary at exit. */
extern
bool
opt_prof_accum
;
/* Report cumulative bytes. */
extern
ssize_t
opt_lg_prof_tcmax
;
/* lg(max per thread bactrace cache) */
extern
char
opt_prof_prefix
[
PATH_MAX
+
1
];
/*
* Profile dump interval, measured in bytes allocated. Each arena triggers a
* profile dump when it reaches this threshold. The effect is that the
* interval between profile dumps averages prof_interval, though the actual
* interval between dumps will tend to be sporadic, and the interval will be a
* maximum of approximately (prof_interval * narenas).
*/
extern
uint64_t
prof_interval
;
/*
* If true, promote small sampled objects to large objects, since small run
* headers do not have embedded profile context pointers.
*/
extern
bool
prof_promote
;
/* (1U << opt_lg_prof_bt_max). */
extern
unsigned
prof_bt_max
;
/* Thread-specific backtrace cache, used to reduce bt2ctx contention. */
#ifndef NO_TLS
extern
__thread
prof_tdata_t
*
prof_tdata_tls
JEMALLOC_ATTR
(
tls_model
(
"initial-exec"
));
# define PROF_TCACHE_GET() prof_tdata_tls
# define PROF_TCACHE_SET(v) do { \
prof_tdata_tls = (v); \
pthread_setspecific(prof_tdata_tsd, (void *)(v)); \
} while (0)
#else
# define PROF_TCACHE_GET() \
((prof_tdata_t *)pthread_getspecific(prof_tdata_tsd))
# define PROF_TCACHE_SET(v) do { \
pthread_setspecific(prof_tdata_tsd, (void *)(v)); \
} while (0)
#endif
/*
* Same contents as b2cnt_tls, but initialized such that the TSD destructor is
* called when a thread exits, so that prof_tdata_tls contents can be merged,
* unlinked, and deallocated.
*/
extern
pthread_key_t
prof_tdata_tsd
;
void
bt_init
(
prof_bt_t
*
bt
,
void
**
vec
);
void
prof_backtrace
(
prof_bt_t
*
bt
,
unsigned
nignore
,
unsigned
max
);
prof_thr_cnt_t
*
prof_lookup
(
prof_bt_t
*
bt
);
void
prof_idump
(
void
);
bool
prof_mdump
(
const
char
*
filename
);
void
prof_gdump
(
void
);
prof_tdata_t
*
prof_tdata_init
(
void
);
void
prof_boot0
(
void
);
void
prof_boot1
(
void
);
bool
prof_boot2
(
void
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
void
prof_sample_threshold_update
(
prof_tdata_t
*
prof_tdata
);
prof_thr_cnt_t
*
prof_alloc_prep
(
size_t
size
);
prof_ctx_t
*
prof_ctx_get
(
const
void
*
ptr
);
void
prof_ctx_set
(
const
void
*
ptr
,
prof_ctx_t
*
ctx
);
bool
prof_sample_accum_update
(
size_t
size
);
void
prof_malloc
(
const
void
*
ptr
,
size_t
size
,
prof_thr_cnt_t
*
cnt
);
void
prof_realloc
(
const
void
*
ptr
,
size_t
size
,
prof_thr_cnt_t
*
cnt
,
size_t
old_size
,
prof_ctx_t
*
old_ctx
);
void
prof_free
(
const
void
*
ptr
,
size_t
size
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_))
JEMALLOC_INLINE
void
prof_sample_threshold_update
(
prof_tdata_t
*
prof_tdata
)
{
uint64_t
r
;
double
u
;
/*
* Compute sample threshold as a geometrically distributed random
* variable with mean (2^opt_lg_prof_sample).
*
* __ __
* | log(u) | 1
* prof_tdata->threshold = | -------- |, where p = -------------------
* | log(1-p) | opt_lg_prof_sample
* 2
*
* For more information on the math, see:
*
* Non-Uniform Random Variate Generation
* Luc Devroye
* Springer-Verlag, New York, 1986
* pp 500
* (http://cg.scs.carleton.ca/~luc/rnbookindex.html)
*/
prn64
(
r
,
53
,
prof_tdata
->
prn_state
,
(
uint64_t
)
6364136223846793005LLU
,
(
uint64_t
)
1442695040888963407LLU
);
u
=
(
double
)
r
*
(
1
.
0
/
9007199254740992
.
0L
);
prof_tdata
->
threshold
=
(
uint64_t
)(
log
(
u
)
/
log
(
1
.
0
-
(
1
.
0
/
(
double
)((
uint64_t
)
1U
<<
opt_lg_prof_sample
))))
+
(
uint64_t
)
1U
;
}
JEMALLOC_INLINE
prof_thr_cnt_t
*
prof_alloc_prep
(
size_t
size
)
{
#ifdef JEMALLOC_ENABLE_INLINE
/* This function does not have its own stack frame, because it is inlined. */
# define NIGNORE 1
#else
# define NIGNORE 2
#endif
prof_thr_cnt_t
*
ret
;
prof_tdata_t
*
prof_tdata
;
prof_bt_t
bt
;
assert
(
size
==
s2u
(
size
));
prof_tdata
=
PROF_TCACHE_GET
();
if
(
prof_tdata
==
NULL
)
{
prof_tdata
=
prof_tdata_init
();
if
(
prof_tdata
==
NULL
)
return
(
NULL
);
}
if
(
opt_prof_active
==
false
)
{
/* Sampling is currently inactive, so avoid sampling. */
ret
=
(
prof_thr_cnt_t
*
)(
uintptr_t
)
1U
;
}
else
if
(
opt_lg_prof_sample
==
0
)
{
/*
* Don't bother with sampling logic, since sampling interval is
* 1.
*/
bt_init
(
&
bt
,
prof_tdata
->
vec
);
prof_backtrace
(
&
bt
,
NIGNORE
,
prof_bt_max
);
ret
=
prof_lookup
(
&
bt
);
}
else
{
if
(
prof_tdata
->
threshold
==
0
)
{
/*
* Initialize. Seed the prng differently for each
* thread.
*/
prof_tdata
->
prn_state
=
(
uint64_t
)(
uintptr_t
)
&
size
;
prof_sample_threshold_update
(
prof_tdata
);
}
/*
* Determine whether to capture a backtrace based on whether
* size is enough for prof_accum to reach
* prof_tdata->threshold. However, delay updating these
* variables until prof_{m,re}alloc(), because we don't know
* for sure that the allocation will succeed.
*
* Use subtraction rather than addition to avoid potential
* integer overflow.
*/
if
(
size
>=
prof_tdata
->
threshold
-
prof_tdata
->
accum
)
{
bt_init
(
&
bt
,
prof_tdata
->
vec
);
prof_backtrace
(
&
bt
,
NIGNORE
,
prof_bt_max
);
ret
=
prof_lookup
(
&
bt
);
}
else
ret
=
(
prof_thr_cnt_t
*
)(
uintptr_t
)
1U
;
}
return
(
ret
);
#undef NIGNORE
}
JEMALLOC_INLINE
prof_ctx_t
*
prof_ctx_get
(
const
void
*
ptr
)
{
prof_ctx_t
*
ret
;
arena_chunk_t
*
chunk
;
assert
(
ptr
!=
NULL
);
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
ptr
);
if
(
chunk
!=
ptr
)
{
/* Region. */
dassert
(
chunk
->
arena
->
magic
==
ARENA_MAGIC
);
ret
=
arena_prof_ctx_get
(
ptr
);
}
else
ret
=
huge_prof_ctx_get
(
ptr
);
return
(
ret
);
}
JEMALLOC_INLINE
void
prof_ctx_set
(
const
void
*
ptr
,
prof_ctx_t
*
ctx
)
{
arena_chunk_t
*
chunk
;
assert
(
ptr
!=
NULL
);
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
ptr
);
if
(
chunk
!=
ptr
)
{
/* Region. */
dassert
(
chunk
->
arena
->
magic
==
ARENA_MAGIC
);
arena_prof_ctx_set
(
ptr
,
ctx
);
}
else
huge_prof_ctx_set
(
ptr
,
ctx
);
}
JEMALLOC_INLINE
bool
prof_sample_accum_update
(
size_t
size
)
{
prof_tdata_t
*
prof_tdata
;
/* Sampling logic is unnecessary if the interval is 1. */
assert
(
opt_lg_prof_sample
!=
0
);
prof_tdata
=
PROF_TCACHE_GET
();
assert
(
prof_tdata
!=
NULL
);
/* Take care to avoid integer overflow. */
if
(
size
>=
prof_tdata
->
threshold
-
prof_tdata
->
accum
)
{
prof_tdata
->
accum
-=
(
prof_tdata
->
threshold
-
size
);
/* Compute new sample threshold. */
prof_sample_threshold_update
(
prof_tdata
);
while
(
prof_tdata
->
accum
>=
prof_tdata
->
threshold
)
{
prof_tdata
->
accum
-=
prof_tdata
->
threshold
;
prof_sample_threshold_update
(
prof_tdata
);
}
return
(
false
);
}
else
{
prof_tdata
->
accum
+=
size
;
return
(
true
);
}
}
JEMALLOC_INLINE
void
prof_malloc
(
const
void
*
ptr
,
size_t
size
,
prof_thr_cnt_t
*
cnt
)
{
assert
(
ptr
!=
NULL
);
assert
(
size
==
isalloc
(
ptr
));
if
(
opt_lg_prof_sample
!=
0
)
{
if
(
prof_sample_accum_update
(
size
))
{
/*
* Don't sample. For malloc()-like allocation, it is
* always possible to tell in advance how large an
* object's usable size will be, so there should never
* be a difference between the size passed to
* prof_alloc_prep() and prof_malloc().
*/
assert
((
uintptr_t
)
cnt
==
(
uintptr_t
)
1U
);
}
}
if
((
uintptr_t
)
cnt
>
(
uintptr_t
)
1U
)
{
prof_ctx_set
(
ptr
,
cnt
->
ctx
);
cnt
->
epoch
++
;
/*********/
mb_write
();
/*********/
cnt
->
cnts
.
curobjs
++
;
cnt
->
cnts
.
curbytes
+=
size
;
if
(
opt_prof_accum
)
{
cnt
->
cnts
.
accumobjs
++
;
cnt
->
cnts
.
accumbytes
+=
size
;
}
/*********/
mb_write
();
/*********/
cnt
->
epoch
++
;
/*********/
mb_write
();
/*********/
}
else
prof_ctx_set
(
ptr
,
(
prof_ctx_t
*
)(
uintptr_t
)
1U
);
}
JEMALLOC_INLINE
void
prof_realloc
(
const
void
*
ptr
,
size_t
size
,
prof_thr_cnt_t
*
cnt
,
size_t
old_size
,
prof_ctx_t
*
old_ctx
)
{
prof_thr_cnt_t
*
told_cnt
;
assert
(
ptr
!=
NULL
||
(
uintptr_t
)
cnt
<=
(
uintptr_t
)
1U
);
if
(
ptr
!=
NULL
)
{
assert
(
size
==
isalloc
(
ptr
));
if
(
opt_lg_prof_sample
!=
0
)
{
if
(
prof_sample_accum_update
(
size
))
{
/*
* Don't sample. The size passed to
* prof_alloc_prep() was larger than what
* actually got allocated, so a backtrace was
* captured for this allocation, even though
* its actual size was insufficient to cross
* the sample threshold.
*/
cnt
=
(
prof_thr_cnt_t
*
)(
uintptr_t
)
1U
;
}
}
}
if
((
uintptr_t
)
old_ctx
>
(
uintptr_t
)
1U
)
{
told_cnt
=
prof_lookup
(
old_ctx
->
bt
);
if
(
told_cnt
==
NULL
)
{
/*
* It's too late to propagate OOM for this realloc(),
* so operate directly on old_cnt->ctx->cnt_merged.
*/
malloc_mutex_lock
(
&
old_ctx
->
lock
);
old_ctx
->
cnt_merged
.
curobjs
--
;
old_ctx
->
cnt_merged
.
curbytes
-=
old_size
;
malloc_mutex_unlock
(
&
old_ctx
->
lock
);
told_cnt
=
(
prof_thr_cnt_t
*
)(
uintptr_t
)
1U
;
}
}
else
told_cnt
=
(
prof_thr_cnt_t
*
)(
uintptr_t
)
1U
;
if
((
uintptr_t
)
told_cnt
>
(
uintptr_t
)
1U
)
told_cnt
->
epoch
++
;
if
((
uintptr_t
)
cnt
>
(
uintptr_t
)
1U
)
{
prof_ctx_set
(
ptr
,
cnt
->
ctx
);
cnt
->
epoch
++
;
}
else
prof_ctx_set
(
ptr
,
(
prof_ctx_t
*
)(
uintptr_t
)
1U
);
/*********/
mb_write
();
/*********/
if
((
uintptr_t
)
told_cnt
>
(
uintptr_t
)
1U
)
{
told_cnt
->
cnts
.
curobjs
--
;
told_cnt
->
cnts
.
curbytes
-=
old_size
;
}
if
((
uintptr_t
)
cnt
>
(
uintptr_t
)
1U
)
{
cnt
->
cnts
.
curobjs
++
;
cnt
->
cnts
.
curbytes
+=
size
;
if
(
opt_prof_accum
)
{
cnt
->
cnts
.
accumobjs
++
;
cnt
->
cnts
.
accumbytes
+=
size
;
}
}
/*********/
mb_write
();
/*********/
if
((
uintptr_t
)
told_cnt
>
(
uintptr_t
)
1U
)
told_cnt
->
epoch
++
;
if
((
uintptr_t
)
cnt
>
(
uintptr_t
)
1U
)
cnt
->
epoch
++
;
/*********/
mb_write
();
/* Not strictly necessary. */
}
JEMALLOC_INLINE
void
prof_free
(
const
void
*
ptr
,
size_t
size
)
{
prof_ctx_t
*
ctx
=
prof_ctx_get
(
ptr
);
if
((
uintptr_t
)
ctx
>
(
uintptr_t
)
1
)
{
assert
(
size
==
isalloc
(
ptr
));
prof_thr_cnt_t
*
tcnt
=
prof_lookup
(
ctx
->
bt
);
if
(
tcnt
!=
NULL
)
{
tcnt
->
epoch
++
;
/*********/
mb_write
();
/*********/
tcnt
->
cnts
.
curobjs
--
;
tcnt
->
cnts
.
curbytes
-=
size
;
/*********/
mb_write
();
/*********/
tcnt
->
epoch
++
;
/*********/
mb_write
();
/*********/
}
else
{
/*
* OOM during free() cannot be propagated, so operate
* directly on cnt->ctx->cnt_merged.
*/
malloc_mutex_lock
(
&
ctx
->
lock
);
ctx
->
cnt_merged
.
curobjs
--
;
ctx
->
cnt_merged
.
curbytes
-=
size
;
malloc_mutex_unlock
(
&
ctx
->
lock
);
}
}
}
#endif
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
#endif
/* JEMALLOC_PROF */
deps/jemalloc/include/jemalloc/internal/ql.h
0 → 100644
View file @
994ed2bc
/*
* List definitions.
*/
#define ql_head(a_type) \
struct { \
a_type *qlh_first; \
}
#define ql_head_initializer(a_head) {NULL}
#define ql_elm(a_type) qr(a_type)
/* List functions. */
#define ql_new(a_head) do { \
(a_head)->qlh_first = NULL; \
} while (0)
#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
#define ql_first(a_head) ((a_head)->qlh_first)
#define ql_last(a_head, a_field) \
((ql_first(a_head) != NULL) \
? qr_prev(ql_first(a_head), a_field) : NULL)
#define ql_next(a_head, a_elm, a_field) \
((ql_last(a_head, a_field) != (a_elm)) \
? qr_next((a_elm), a_field) : NULL)
#define ql_prev(a_head, a_elm, a_field) \
((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \
: NULL)
#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
qr_before_insert((a_qlelm), (a_elm), a_field); \
if (ql_first(a_head) == (a_qlelm)) { \
ql_first(a_head) = (a_elm); \
} \
} while (0)
#define ql_after_insert(a_qlelm, a_elm, a_field) \
qr_after_insert((a_qlelm), (a_elm), a_field)
#define ql_head_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = (a_elm); \
} while (0)
#define ql_tail_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = qr_next((a_elm), a_field); \
} while (0)
#define ql_remove(a_head, a_elm, a_field) do { \
if (ql_first(a_head) == (a_elm)) { \
ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
} \
if (ql_first(a_head) != (a_elm)) { \
qr_remove((a_elm), a_field); \
} else { \
ql_first(a_head) = NULL; \
} \
} while (0)
#define ql_head_remove(a_head, a_type, a_field) do { \
a_type *t = ql_first(a_head); \
ql_remove((a_head), t, a_field); \
} while (0)
#define ql_tail_remove(a_head, a_type, a_field) do { \
a_type *t = ql_last(a_head, a_field); \
ql_remove((a_head), t, a_field); \
} while (0)
#define ql_foreach(a_var, a_head, a_field) \
qr_foreach((a_var), ql_first(a_head), a_field)
#define ql_reverse_foreach(a_var, a_head, a_field) \
qr_reverse_foreach((a_var), ql_first(a_head), a_field)
Prev
1
2
3
4
5
6
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment