Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
08e1c8e8
Commit
08e1c8e8
authored
May 24, 2018
by
antirez
Browse files
Jemalloc upgraded to version 5.0.1.
parent
8f4e2075
Changes
170
Hide whitespace changes
Inline
Side-by-side
Too many changes to show.
To preserve performance only
170 of 170+
files are displayed.
Plain diff
Email patch
deps/jemalloc/include/jemalloc/internal/base.h
deleted
100644 → 0
View file @
8f4e2075
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void
*
base_alloc
(
size_t
size
);
void
base_stats_get
(
size_t
*
allocated
,
size_t
*
resident
,
size_t
*
mapped
);
bool
base_boot
(
void
);
void
base_prefork
(
void
);
void
base_postfork_parent
(
void
);
void
base_postfork_child
(
void
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/base_externs.h
0 → 100644
View file @
08e1c8e8
#ifndef JEMALLOC_INTERNAL_BASE_EXTERNS_H
#define JEMALLOC_INTERNAL_BASE_EXTERNS_H
extern
metadata_thp_mode_t
opt_metadata_thp
;
extern
const
char
*
metadata_thp_mode_names
[];
base_t
*
b0get
(
void
);
base_t
*
base_new
(
tsdn_t
*
tsdn
,
unsigned
ind
,
extent_hooks_t
*
extent_hooks
);
void
base_delete
(
tsdn_t
*
tsdn
,
base_t
*
base
);
extent_hooks_t
*
base_extent_hooks_get
(
base_t
*
base
);
extent_hooks_t
*
base_extent_hooks_set
(
base_t
*
base
,
extent_hooks_t
*
extent_hooks
);
void
*
base_alloc
(
tsdn_t
*
tsdn
,
base_t
*
base
,
size_t
size
,
size_t
alignment
);
extent_t
*
base_alloc_extent
(
tsdn_t
*
tsdn
,
base_t
*
base
);
void
base_stats_get
(
tsdn_t
*
tsdn
,
base_t
*
base
,
size_t
*
allocated
,
size_t
*
resident
,
size_t
*
mapped
,
size_t
*
n_thp
);
void
base_prefork
(
tsdn_t
*
tsdn
,
base_t
*
base
);
void
base_postfork_parent
(
tsdn_t
*
tsdn
,
base_t
*
base
);
void
base_postfork_child
(
tsdn_t
*
tsdn
,
base_t
*
base
);
bool
base_boot
(
tsdn_t
*
tsdn
);
#endif
/* JEMALLOC_INTERNAL_BASE_EXTERNS_H */
deps/jemalloc/include/jemalloc/internal/base_inlines.h
0 → 100644
View file @
08e1c8e8
#ifndef JEMALLOC_INTERNAL_BASE_INLINES_H
#define JEMALLOC_INTERNAL_BASE_INLINES_H
static
inline
unsigned
base_ind_get
(
const
base_t
*
base
)
{
return
base
->
ind
;
}
static
inline
bool
metadata_thp_enabled
(
void
)
{
return
(
opt_metadata_thp
!=
metadata_thp_disabled
);
}
#endif
/* JEMALLOC_INTERNAL_BASE_INLINES_H */
deps/jemalloc/include/jemalloc/internal/base_structs.h
0 → 100644
View file @
08e1c8e8
#ifndef JEMALLOC_INTERNAL_BASE_STRUCTS_H
#define JEMALLOC_INTERNAL_BASE_STRUCTS_H
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/size_classes.h"
/* Embedded at the beginning of every block of base-managed virtual memory. */
struct
base_block_s
{
/* Total size of block's virtual memory mapping. */
size_t
size
;
/* Next block in list of base's blocks. */
base_block_t
*
next
;
/* Tracks unused trailing space. */
extent_t
extent
;
};
struct
base_s
{
/* Associated arena's index within the arenas array. */
unsigned
ind
;
/*
* User-configurable extent hook functions. Points to an
* extent_hooks_t.
*/
atomic_p_t
extent_hooks
;
/* Protects base_alloc() and base_stats_get() operations. */
malloc_mutex_t
mtx
;
/* Using THP when true (metadata_thp auto mode). */
bool
auto_thp_switched
;
/*
* Most recent size class in the series of increasingly large base
* extents. Logarithmic spacing between subsequent allocations ensures
* that the total number of distinct mappings remains small.
*/
pszind_t
pind_last
;
/* Serial number generation state. */
size_t
extent_sn_next
;
/* Chain of all blocks associated with base. */
base_block_t
*
blocks
;
/* Heap of extents that track unused trailing space within blocks. */
extent_heap_t
avail
[
NSIZES
];
/* Stats, only maintained if config_stats. */
size_t
allocated
;
size_t
resident
;
size_t
mapped
;
/* Number of THP regions touched. */
size_t
n_thp
;
};
#endif
/* JEMALLOC_INTERNAL_BASE_STRUCTS_H */
deps/jemalloc/include/jemalloc/internal/base_types.h
0 → 100644
View file @
08e1c8e8
#ifndef JEMALLOC_INTERNAL_BASE_TYPES_H
#define JEMALLOC_INTERNAL_BASE_TYPES_H
typedef
struct
base_block_s
base_block_t
;
typedef
struct
base_s
base_t
;
#define METADATA_THP_DEFAULT metadata_thp_disabled
/*
* In auto mode, arenas switch to huge pages for the base allocator on the
* second base block. a0 switches to thp on the 5th block (after 20 megabytes
* of metadata), since more metadata (e.g. rtree nodes) come from a0's base.
*/
#define BASE_AUTO_THP_THRESHOLD 2
#define BASE_AUTO_THP_THRESHOLD_A0 5
typedef
enum
{
metadata_thp_disabled
=
0
,
/*
* Lazily enable hugepage for metadata. To avoid high RSS caused by THP
* + low usage arena (i.e. THP becomes a significant percentage), the
* "auto" option only starts using THP after a base allocator used up
* the first THP region. Starting from the second hugepage (in a single
* arena), "auto" behaves the same as "always", i.e. madvise hugepage
* right away.
*/
metadata_thp_auto
=
1
,
metadata_thp_always
=
2
,
metadata_thp_mode_limit
=
3
}
metadata_thp_mode_t
;
#endif
/* JEMALLOC_INTERNAL_BASE_TYPES_H */
deps/jemalloc/include/jemalloc/internal/bin.h
0 → 100644
View file @
08e1c8e8
#ifndef JEMALLOC_INTERNAL_BIN_H
#define JEMALLOC_INTERNAL_BIN_H
#include "jemalloc/internal/extent_types.h"
#include "jemalloc/internal/extent_structs.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/bin_stats.h"
/*
* A bin contains a set of extents that are currently being used for slab
* allocations.
*/
/*
* Read-only information associated with each element of arena_t's bins array
* is stored separately, partly to reduce memory usage (only one copy, rather
* than one per arena), but mainly to avoid false cacheline sharing.
*
* Each slab has the following layout:
*
* /--------------------\
* | region 0 |
* |--------------------|
* | region 1 |
* |--------------------|
* | ... |
* | ... |
* | ... |
* |--------------------|
* | region nregs-1 |
* \--------------------/
*/
typedef
struct
bin_info_s
bin_info_t
;
struct
bin_info_s
{
/* Size of regions in a slab for this bin's size class. */
size_t
reg_size
;
/* Total size of a slab for this bin's size class. */
size_t
slab_size
;
/* Total number of regions in a slab for this bin's size class. */
uint32_t
nregs
;
/*
* Metadata used to manipulate bitmaps for slabs associated with this
* bin.
*/
bitmap_info_t
bitmap_info
;
};
extern
const
bin_info_t
bin_infos
[
NBINS
];
typedef
struct
bin_s
bin_t
;
struct
bin_s
{
/* All operations on bin_t fields require lock ownership. */
malloc_mutex_t
lock
;
/*
* Current slab being used to service allocations of this bin's size
* class. slabcur is independent of slabs_{nonfull,full}; whenever
* slabcur is reassigned, the previous slab must be deallocated or
* inserted into slabs_{nonfull,full}.
*/
extent_t
*
slabcur
;
/*
* Heap of non-full slabs. This heap is used to assure that new
* allocations come from the non-full slab that is oldest/lowest in
* memory.
*/
extent_heap_t
slabs_nonfull
;
/* List used to track full slabs. */
extent_list_t
slabs_full
;
/* Bin statistics. */
bin_stats_t
stats
;
};
/* Initializes a bin to empty. Returns true on error. */
bool
bin_init
(
bin_t
*
bin
);
/* Forking. */
void
bin_prefork
(
tsdn_t
*
tsdn
,
bin_t
*
bin
);
void
bin_postfork_parent
(
tsdn_t
*
tsdn
,
bin_t
*
bin
);
void
bin_postfork_child
(
tsdn_t
*
tsdn
,
bin_t
*
bin
);
/* Stats. */
static
inline
void
bin_stats_merge
(
tsdn_t
*
tsdn
,
bin_stats_t
*
dst_bin_stats
,
bin_t
*
bin
)
{
malloc_mutex_lock
(
tsdn
,
&
bin
->
lock
);
malloc_mutex_prof_read
(
tsdn
,
&
dst_bin_stats
->
mutex_data
,
&
bin
->
lock
);
dst_bin_stats
->
nmalloc
+=
bin
->
stats
.
nmalloc
;
dst_bin_stats
->
ndalloc
+=
bin
->
stats
.
ndalloc
;
dst_bin_stats
->
nrequests
+=
bin
->
stats
.
nrequests
;
dst_bin_stats
->
curregs
+=
bin
->
stats
.
curregs
;
dst_bin_stats
->
nfills
+=
bin
->
stats
.
nfills
;
dst_bin_stats
->
nflushes
+=
bin
->
stats
.
nflushes
;
dst_bin_stats
->
nslabs
+=
bin
->
stats
.
nslabs
;
dst_bin_stats
->
reslabs
+=
bin
->
stats
.
reslabs
;
dst_bin_stats
->
curslabs
+=
bin
->
stats
.
curslabs
;
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
}
#endif
/* JEMALLOC_INTERNAL_BIN_H */
deps/jemalloc/include/jemalloc/internal/bin_stats.h
0 → 100644
View file @
08e1c8e8
#ifndef JEMALLOC_INTERNAL_BIN_STATS_H
#define JEMALLOC_INTERNAL_BIN_STATS_H
#include "jemalloc/internal/mutex_prof.h"
typedef
struct
bin_stats_s
bin_stats_t
;
struct
bin_stats_s
{
/*
* Total number of allocation/deallocation requests served directly by
* the bin. Note that tcache may allocate an object, then recycle it
* many times, resulting many increments to nrequests, but only one
* each to nmalloc and ndalloc.
*/
uint64_t
nmalloc
;
uint64_t
ndalloc
;
/*
* Number of allocation requests that correspond to the size of this
* bin. This includes requests served by tcache, though tcache only
* periodically merges into this counter.
*/
uint64_t
nrequests
;
/*
* Current number of regions of this size class, including regions
* currently cached by tcache.
*/
size_t
curregs
;
/* Number of tcache fills from this bin. */
uint64_t
nfills
;
/* Number of tcache flushes to this bin. */
uint64_t
nflushes
;
/* Total number of slabs created for this bin's size class. */
uint64_t
nslabs
;
/*
* Total number of slabs reused by extracting them from the slabs heap
* for this bin's size class.
*/
uint64_t
reslabs
;
/* Current number of slabs in this bin. */
size_t
curslabs
;
mutex_prof_data_t
mutex_data
;
};
#endif
/* JEMALLOC_INTERNAL_BIN_STATS_H */
deps/jemalloc/include/jemalloc/internal/bit_util.h
0 → 100644
View file @
08e1c8e8
#ifndef JEMALLOC_INTERNAL_BIT_UTIL_H
#define JEMALLOC_INTERNAL_BIT_UTIL_H
#include "jemalloc/internal/assert.h"
#define BIT_UTIL_INLINE static inline
/* Sanity check. */
#if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \
|| !defined(JEMALLOC_INTERNAL_FFS)
# error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure
#endif
BIT_UTIL_INLINE
unsigned
ffs_llu
(
unsigned
long
long
bitmap
)
{
return
JEMALLOC_INTERNAL_FFSLL
(
bitmap
);
}
BIT_UTIL_INLINE
unsigned
ffs_lu
(
unsigned
long
bitmap
)
{
return
JEMALLOC_INTERNAL_FFSL
(
bitmap
);
}
BIT_UTIL_INLINE
unsigned
ffs_u
(
unsigned
bitmap
)
{
return
JEMALLOC_INTERNAL_FFS
(
bitmap
);
}
BIT_UTIL_INLINE
unsigned
ffs_zu
(
size_t
bitmap
)
{
#if LG_SIZEOF_PTR == LG_SIZEOF_INT
return
ffs_u
(
bitmap
);
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG
return
ffs_lu
(
bitmap
);
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG
return
ffs_llu
(
bitmap
);
#else
#error No implementation for size_t ffs()
#endif
}
BIT_UTIL_INLINE
unsigned
ffs_u64
(
uint64_t
bitmap
)
{
#if LG_SIZEOF_LONG == 3
return
ffs_lu
(
bitmap
);
#elif LG_SIZEOF_LONG_LONG == 3
return
ffs_llu
(
bitmap
);
#else
#error No implementation for 64-bit ffs()
#endif
}
BIT_UTIL_INLINE
unsigned
ffs_u32
(
uint32_t
bitmap
)
{
#if LG_SIZEOF_INT == 2
return
ffs_u
(
bitmap
);
#else
#error No implementation for 32-bit ffs()
#endif
return
ffs_u
(
bitmap
);
}
BIT_UTIL_INLINE
uint64_t
pow2_ceil_u64
(
uint64_t
x
)
{
x
--
;
x
|=
x
>>
1
;
x
|=
x
>>
2
;
x
|=
x
>>
4
;
x
|=
x
>>
8
;
x
|=
x
>>
16
;
x
|=
x
>>
32
;
x
++
;
return
x
;
}
BIT_UTIL_INLINE
uint32_t
pow2_ceil_u32
(
uint32_t
x
)
{
x
--
;
x
|=
x
>>
1
;
x
|=
x
>>
2
;
x
|=
x
>>
4
;
x
|=
x
>>
8
;
x
|=
x
>>
16
;
x
++
;
return
x
;
}
/* Compute the smallest power of 2 that is >= x. */
BIT_UTIL_INLINE
size_t
pow2_ceil_zu
(
size_t
x
)
{
#if (LG_SIZEOF_PTR == 3)
return
pow2_ceil_u64
(
x
);
#else
return
pow2_ceil_u32
(
x
);
#endif
}
#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
BIT_UTIL_INLINE
unsigned
lg_floor
(
size_t
x
)
{
size_t
ret
;
assert
(
x
!=
0
);
asm
(
"bsr %1, %0"
:
"=r"
(
ret
)
// Outputs.
:
"r"
(
x
)
// Inputs.
);
assert
(
ret
<
UINT_MAX
);
return
(
unsigned
)
ret
;
}
#elif (defined(_MSC_VER))
BIT_UTIL_INLINE
unsigned
lg_floor
(
size_t
x
)
{
unsigned
long
ret
;
assert
(
x
!=
0
);
#if (LG_SIZEOF_PTR == 3)
_BitScanReverse64
(
&
ret
,
x
);
#elif (LG_SIZEOF_PTR == 2)
_BitScanReverse
(
&
ret
,
x
);
#else
# error "Unsupported type size for lg_floor()"
#endif
assert
(
ret
<
UINT_MAX
);
return
(
unsigned
)
ret
;
}
#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
BIT_UTIL_INLINE
unsigned
lg_floor
(
size_t
x
)
{
assert
(
x
!=
0
);
#if (LG_SIZEOF_PTR == LG_SIZEOF_INT)
return
((
8
<<
LG_SIZEOF_PTR
)
-
1
)
-
__builtin_clz
(
x
);
#elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG)
return
((
8
<<
LG_SIZEOF_PTR
)
-
1
)
-
__builtin_clzl
(
x
);
#else
# error "Unsupported type size for lg_floor()"
#endif
}
#else
BIT_UTIL_INLINE
unsigned
lg_floor
(
size_t
x
)
{
assert
(
x
!=
0
);
x
|=
(
x
>>
1
);
x
|=
(
x
>>
2
);
x
|=
(
x
>>
4
);
x
|=
(
x
>>
8
);
x
|=
(
x
>>
16
);
#if (LG_SIZEOF_PTR == 3)
x
|=
(
x
>>
32
);
#endif
if
(
x
==
SIZE_T_MAX
)
{
return
(
8
<<
LG_SIZEOF_PTR
)
-
1
;
}
x
++
;
return
ffs_zu
(
x
)
-
2
;
}
#endif
#undef BIT_UTIL_INLINE
#endif
/* JEMALLOC_INTERNAL_BIT_UTIL_H */
deps/jemalloc/include/jemalloc/internal/bitmap.h
View file @
08e1c8e8
/******************************************************************************/
#ifndef JEMALLOC_INTERNAL_BITMAP_H
#
if
def JEMALLOC_
H_TYPES
#def
ine
JEMALLOC_
INTERNAL_BITMAP_H
/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
#include "jemalloc/internal/arena_types.h"
#
define LG_BITMAP_MAXBITS LG_RUN_MAXREGS
#
include "jemalloc/internal/bit_util.h"
#
define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS)
#
include "jemalloc/internal/size_classes.h"
typedef
struct
bitmap_level_s
bitmap_level_t
;
typedef
struct
bitmap_info_s
bitmap_info_t
;
typedef
unsigned
long
bitmap_t
;
typedef
unsigned
long
bitmap_t
;
#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG
#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG
/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
#if LG_SLAB_MAXREGS > LG_CEIL_NSIZES
/* Maximum bitmap bit count is determined by maximum regions per slab. */
# define LG_BITMAP_MAXBITS LG_SLAB_MAXREGS
#else
/* Maximum bitmap bit count is determined by number of extent size classes. */
# define LG_BITMAP_MAXBITS LG_CEIL_NSIZES
#endif
#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS)
/* Number of bits per group. */
/* Number of bits per group. */
#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3)
#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3)
#define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS)
#define BITMAP_GROUP_NBITS (1U << LG_BITMAP_GROUP_NBITS)
#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)
#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)
/*
* Do some analysis on how big the bitmap is before we use a tree. For a brute
* force linear search, if we would have to call ffs_lu() more than 2^3 times,
* use a tree instead.
*/
#if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3
# define BITMAP_USE_TREE
#endif
/* Number of groups required to store a given number of bits. */
/* Number of groups required to store a given number of bits. */
#define
BITMAP_BITS2GROUPS(nbits) \
#define
BITMAP_BITS2GROUPS(nbits) \
((nbits + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS)
((
(
nbits
)
+ BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS)
/*
/*
* Number of groups required at a particular level for a given number of bits.
* Number of groups required at a particular level for a given number of bits.
*/
*/
#define
BITMAP_GROUPS_L0(nbits) \
#define
BITMAP_GROUPS_L0(nbits) \
BITMAP_BITS2GROUPS(nbits)
BITMAP_BITS2GROUPS(nbits)
#define
BITMAP_GROUPS_L1(nbits) \
#define
BITMAP_GROUPS_L1(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits))
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits))
#define
BITMAP_GROUPS_L2(nbits) \
#define
BITMAP_GROUPS_L2(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))
#define
BITMAP_GROUPS_L3(nbits) \
#define
BITMAP_GROUPS_L3(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
BITMAP_BITS2GROUPS((nbits)))))
BITMAP_BITS2GROUPS((nbits)))))
#define BITMAP_GROUPS_L4(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))))
/*
/*
* Assuming the number of levels, number of groups required for a given number
* Assuming the number of levels, number of groups required for a given number
* of bits.
* of bits.
*/
*/
#define
BITMAP_GROUPS_1_LEVEL(nbits) \
#define
BITMAP_GROUPS_1_LEVEL(nbits) \
BITMAP_GROUPS_L0(nbits)
BITMAP_GROUPS_L0(nbits)
#define
BITMAP_GROUPS_2_LEVEL(nbits) \
#define
BITMAP_GROUPS_2_LEVEL(nbits) \
(BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits))
(BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits))
#define
BITMAP_GROUPS_3_LEVEL(nbits) \
#define
BITMAP_GROUPS_3_LEVEL(nbits) \
(BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits))
(BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits))
#define
BITMAP_GROUPS_4_LEVEL(nbits) \
#define
BITMAP_GROUPS_4_LEVEL(nbits) \
(BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits))
(BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits))
#define BITMAP_GROUPS_5_LEVEL(nbits) \
(BITMAP_GROUPS_4_LEVEL(nbits) + BITMAP_GROUPS_L4(nbits))
/*
/*
* Maximum number of groups required to support LG_BITMAP_MAXBITS.
* Maximum number of groups required to support LG_BITMAP_MAXBITS.
*/
*/
#ifdef BITMAP_USE_TREE
#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS
#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_1_LEVEL(nbits)
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS)
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_2_LEVEL(nbits)
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS)
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_3_LEVEL(nbits)
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS)
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_4_LEVEL(nbits)
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS)
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 5
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_5_LEVEL(nbits)
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_5_LEVEL(BITMAP_MAXBITS)
#else
#else
# error "Unsupported bitmap size"
# error "Unsupported bitmap size"
#endif
#endif
/* Maximum number of levels possible. */
/*
#define BITMAP_MAX_LEVELS \
* Maximum number of levels possible. This could be statically computed based
(LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \
* on LG_BITMAP_MAXBITS:
+ !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP)
*
* #define BITMAP_MAX_LEVELS \
* (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \
* + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP)
*
* However, that would not allow the generic BITMAP_INFO_INITIALIZER() macro, so
* instead hardcode BITMAP_MAX_LEVELS to the largest number supported by the
* various cascading macros. The only additional cost this incurs is some
* unused trailing entries in bitmap_info_t structures; the bitmaps themselves
* are not impacted.
*/
#define BITMAP_MAX_LEVELS 5
#define BITMAP_INFO_INITIALIZER(nbits) { \
/* nbits. */
\
nbits, \
/* nlevels. */
\
(BITMAP_GROUPS_L0(nbits) > BITMAP_GROUPS_L1(nbits)) + \
(BITMAP_GROUPS_L1(nbits) > BITMAP_GROUPS_L2(nbits)) + \
(BITMAP_GROUPS_L2(nbits) > BITMAP_GROUPS_L3(nbits)) + \
(BITMAP_GROUPS_L3(nbits) > BITMAP_GROUPS_L4(nbits)) + 1, \
/* levels. */
\
{ \
{0}, \
{BITMAP_GROUPS_L0(nbits)}, \
{BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \
{BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) + \
BITMAP_GROUPS_L0(nbits)}, \
{BITMAP_GROUPS_L3(nbits) + BITMAP_GROUPS_L2(nbits) + \
BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \
{BITMAP_GROUPS_L4(nbits) + BITMAP_GROUPS_L3(nbits) + \
BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) \
+ BITMAP_GROUPS_L0(nbits)} \
} \
}
#else
/* BITMAP_USE_TREE */
#endif
/* JEMALLOC_H_TYPES */
#define BITMAP_GROUPS(nbits) BITMAP_BITS2GROUPS(nbits)
/******************************************************************************/
#define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS)
#ifdef JEMALLOC_H_STRUCTS
struct
bitmap_level_s
{
#define BITMAP_INFO_INITIALIZER(nbits) { \
/* nbits. */
\
nbits, \
/* ngroups. */
\
BITMAP_BITS2GROUPS(nbits) \
}
#endif
/* BITMAP_USE_TREE */
typedef
struct
bitmap_level_s
{
/* Offset of this level's groups within the array of groups. */
/* Offset of this level's groups within the array of groups. */
size_t
group_offset
;
size_t
group_offset
;
};
}
bitmap_level_t
;
struct
bitmap_info_s
{
typedef
struct
bitmap_info_s
{
/* Logical number of bits in bitmap (stored at bottom level). */
/* Logical number of bits in bitmap (stored at bottom level). */
size_t
nbits
;
size_t
nbits
;
#ifdef BITMAP_USE_TREE
/* Number of levels necessary for nbits. */
/* Number of levels necessary for nbits. */
unsigned
nlevels
;
unsigned
nlevels
;
...
@@ -86,54 +162,48 @@ struct bitmap_info_s {
...
@@ -86,54 +162,48 @@ struct bitmap_info_s {
* bottom to top (e.g. the bottom level is stored in levels[0]).
* bottom to top (e.g. the bottom level is stored in levels[0]).
*/
*/
bitmap_level_t
levels
[
BITMAP_MAX_LEVELS
+
1
];
bitmap_level_t
levels
[
BITMAP_MAX_LEVELS
+
1
];
};
#else
/* BITMAP_USE_TREE */
/* Number of groups necessary for nbits. */
#endif
/* JEMALLOC_H_STRUCTS */
size_t
ngroups
;
/******************************************************************************/
#endif
/* BITMAP_USE_TREE */
#ifdef JEMALLOC_H_EXTERNS
}
bitmap_info_t
;
void
bitmap_info_init
(
bitmap_info_t
*
binfo
,
size_t
nbits
);
size_t
bitmap_info_ngroups
(
const
bitmap_info_t
*
binfo
);
size_t
bitmap_size
(
size_t
nbits
);
void
bitmap_init
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
bool
bitmap_full
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
);
bool
bitmap_get
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
,
size_t
bit
);
void
bitmap_set
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
,
size_t
bit
);
size_t
bitmap_sfu
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
);
void
bitmap_unset
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
,
size_t
bit
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BITMAP_C_))
void
bitmap_info_init
(
bitmap_info_t
*
binfo
,
size_t
nbits
);
JEMALLOC_INLINE
bool
void
bitmap_init
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
,
bool
fill
);
bitmap_full
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
)
size_t
bitmap_size
(
const
bitmap_info_t
*
binfo
);
{
unsigned
rgoff
=
binfo
->
levels
[
binfo
->
nlevels
].
group_offset
-
1
;
static
inline
bool
bitmap_full
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
)
{
#ifdef BITMAP_USE_TREE
size_t
rgoff
=
binfo
->
levels
[
binfo
->
nlevels
].
group_offset
-
1
;
bitmap_t
rg
=
bitmap
[
rgoff
];
bitmap_t
rg
=
bitmap
[
rgoff
];
/* The bitmap is full iff the root group is 0. */
/* The bitmap is full iff the root group is 0. */
return
(
rg
==
0
);
return
(
rg
==
0
);
#else
size_t
i
;
for
(
i
=
0
;
i
<
binfo
->
ngroups
;
i
++
)
{
if
(
bitmap
[
i
]
!=
0
)
{
return
false
;
}
}
return
true
;
#endif
}
}
JEMALLOC_INLINE
bool
static
inline
bool
bitmap_get
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
,
size_t
bit
)
bitmap_get
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
,
size_t
bit
)
{
{
size_t
goff
;
size_t
goff
;
bitmap_t
g
;
bitmap_t
g
;
assert
(
bit
<
binfo
->
nbits
);
assert
(
bit
<
binfo
->
nbits
);
goff
=
bit
>>
LG_BITMAP_GROUP_NBITS
;
goff
=
bit
>>
LG_BITMAP_GROUP_NBITS
;
g
=
bitmap
[
goff
];
g
=
bitmap
[
goff
];
return
(
!
(
g
&
(
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)))
)
;
return
!
(
g
&
(
ZU
(
1
)
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)));
}
}
JEMALLOC_INLINE
void
static
inline
void
bitmap_set
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
,
size_t
bit
)
bitmap_set
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
,
size_t
bit
)
{
{
size_t
goff
;
size_t
goff
;
bitmap_t
*
gp
;
bitmap_t
*
gp
;
bitmap_t
g
;
bitmap_t
g
;
...
@@ -143,10 +213,11 @@ bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
...
@@ -143,10 +213,11 @@ bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
goff
=
bit
>>
LG_BITMAP_GROUP_NBITS
;
goff
=
bit
>>
LG_BITMAP_GROUP_NBITS
;
gp
=
&
bitmap
[
goff
];
gp
=
&
bitmap
[
goff
];
g
=
*
gp
;
g
=
*
gp
;
assert
(
g
&
(
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)));
assert
(
g
&
(
ZU
(
1
)
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)));
g
^=
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
);
g
^=
ZU
(
1
)
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
);
*
gp
=
g
;
*
gp
=
g
;
assert
(
bitmap_get
(
bitmap
,
binfo
,
bit
));
assert
(
bitmap_get
(
bitmap
,
binfo
,
bit
));
#ifdef BITMAP_USE_TREE
/* Propagate group state transitions up the tree. */
/* Propagate group state transitions up the tree. */
if
(
g
==
0
)
{
if
(
g
==
0
)
{
unsigned
i
;
unsigned
i
;
...
@@ -155,45 +226,113 @@ bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
...
@@ -155,45 +226,113 @@ bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
goff
=
bit
>>
LG_BITMAP_GROUP_NBITS
;
goff
=
bit
>>
LG_BITMAP_GROUP_NBITS
;
gp
=
&
bitmap
[
binfo
->
levels
[
i
].
group_offset
+
goff
];
gp
=
&
bitmap
[
binfo
->
levels
[
i
].
group_offset
+
goff
];
g
=
*
gp
;
g
=
*
gp
;
assert
(
g
&
(
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)));
assert
(
g
&
(
ZU
(
1
)
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)));
g
^=
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
);
g
^=
ZU
(
1
)
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
);
*
gp
=
g
;
*
gp
=
g
;
if
(
g
!=
0
)
if
(
g
!=
0
)
{
break
;
break
;
}
}
}
}
}
#endif
}
/* ffu: find first unset >= bit. */
static
inline
size_t
bitmap_ffu
(
const
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
,
size_t
min_bit
)
{
assert
(
min_bit
<
binfo
->
nbits
);
#ifdef BITMAP_USE_TREE
size_t
bit
=
0
;
for
(
unsigned
level
=
binfo
->
nlevels
;
level
--
;)
{
size_t
lg_bits_per_group
=
(
LG_BITMAP_GROUP_NBITS
*
(
level
+
1
));
bitmap_t
group
=
bitmap
[
binfo
->
levels
[
level
].
group_offset
+
(
bit
>>
lg_bits_per_group
)];
unsigned
group_nmask
=
(
unsigned
)(((
min_bit
>
bit
)
?
(
min_bit
-
bit
)
:
0
)
>>
(
lg_bits_per_group
-
LG_BITMAP_GROUP_NBITS
));
assert
(
group_nmask
<=
BITMAP_GROUP_NBITS
);
bitmap_t
group_mask
=
~
((
1LU
<<
group_nmask
)
-
1
);
bitmap_t
group_masked
=
group
&
group_mask
;
if
(
group_masked
==
0LU
)
{
if
(
group
==
0LU
)
{
return
binfo
->
nbits
;
}
/*
* min_bit was preceded by one or more unset bits in
* this group, but there are no other unset bits in this
* group. Try again starting at the first bit of the
* next sibling. This will recurse at most once per
* non-root level.
*/
size_t
sib_base
=
bit
+
(
ZU
(
1
)
<<
lg_bits_per_group
);
assert
(
sib_base
>
min_bit
);
assert
(
sib_base
>
bit
);
if
(
sib_base
>=
binfo
->
nbits
)
{
return
binfo
->
nbits
;
}
return
bitmap_ffu
(
bitmap
,
binfo
,
sib_base
);
}
bit
+=
((
size_t
)(
ffs_lu
(
group_masked
)
-
1
))
<<
(
lg_bits_per_group
-
LG_BITMAP_GROUP_NBITS
);
}
assert
(
bit
>=
min_bit
);
assert
(
bit
<
binfo
->
nbits
);
return
bit
;
#else
size_t
i
=
min_bit
>>
LG_BITMAP_GROUP_NBITS
;
bitmap_t
g
=
bitmap
[
i
]
&
~
((
1LU
<<
(
min_bit
&
BITMAP_GROUP_NBITS_MASK
))
-
1
);
size_t
bit
;
do
{
bit
=
ffs_lu
(
g
);
if
(
bit
!=
0
)
{
return
(
i
<<
LG_BITMAP_GROUP_NBITS
)
+
(
bit
-
1
);
}
i
++
;
g
=
bitmap
[
i
];
}
while
(
i
<
binfo
->
ngroups
);
return
binfo
->
nbits
;
#endif
}
}
/* sfu: set first unset. */
/* sfu: set first unset. */
JEMALLOC_INLINE
size_t
static
inline
size_t
bitmap_sfu
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
)
bitmap_sfu
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
)
{
{
size_t
bit
;
size_t
bit
;
bitmap_t
g
;
bitmap_t
g
;
unsigned
i
;
unsigned
i
;
assert
(
!
bitmap_full
(
bitmap
,
binfo
));
assert
(
!
bitmap_full
(
bitmap
,
binfo
));
#ifdef BITMAP_USE_TREE
i
=
binfo
->
nlevels
-
1
;
i
=
binfo
->
nlevels
-
1
;
g
=
bitmap
[
binfo
->
levels
[
i
].
group_offset
];
g
=
bitmap
[
binfo
->
levels
[
i
].
group_offset
];
bit
=
jemalloc_ffsl
(
g
)
-
1
;
bit
=
ffs_lu
(
g
)
-
1
;
while
(
i
>
0
)
{
while
(
i
>
0
)
{
i
--
;
i
--
;
g
=
bitmap
[
binfo
->
levels
[
i
].
group_offset
+
bit
];
g
=
bitmap
[
binfo
->
levels
[
i
].
group_offset
+
bit
];
bit
=
(
bit
<<
LG_BITMAP_GROUP_NBITS
)
+
(
jemalloc_ffsl
(
g
)
-
1
);
bit
=
(
bit
<<
LG_BITMAP_GROUP_NBITS
)
+
(
ffs_lu
(
g
)
-
1
);
}
}
#else
i
=
0
;
g
=
bitmap
[
0
];
while
((
bit
=
ffs_lu
(
g
))
==
0
)
{
i
++
;
g
=
bitmap
[
i
];
}
bit
=
(
i
<<
LG_BITMAP_GROUP_NBITS
)
+
(
bit
-
1
);
#endif
bitmap_set
(
bitmap
,
binfo
,
bit
);
bitmap_set
(
bitmap
,
binfo
,
bit
);
return
(
bit
)
;
return
bit
;
}
}
JEMALLOC_INLINE
void
static
inline
void
bitmap_unset
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
,
size_t
bit
)
bitmap_unset
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
,
size_t
bit
)
{
{
size_t
goff
;
size_t
goff
;
bitmap_t
*
gp
;
bitmap_t
*
gp
;
bitmap_t
g
;
bitmap_t
g
;
bool
propagate
;
UNUSED
bool
propagate
;
assert
(
bit
<
binfo
->
nbits
);
assert
(
bit
<
binfo
->
nbits
);
assert
(
bitmap_get
(
bitmap
,
binfo
,
bit
));
assert
(
bitmap_get
(
bitmap
,
binfo
,
bit
));
...
@@ -201,10 +340,11 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
...
@@ -201,10 +340,11 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
gp
=
&
bitmap
[
goff
];
gp
=
&
bitmap
[
goff
];
g
=
*
gp
;
g
=
*
gp
;
propagate
=
(
g
==
0
);
propagate
=
(
g
==
0
);
assert
((
g
&
(
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)))
==
0
);
assert
((
g
&
(
ZU
(
1
)
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)))
==
0
);
g
^=
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
);
g
^=
ZU
(
1
)
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
);
*
gp
=
g
;
*
gp
=
g
;
assert
(
!
bitmap_get
(
bitmap
,
binfo
,
bit
));
assert
(
!
bitmap_get
(
bitmap
,
binfo
,
bit
));
#ifdef BITMAP_USE_TREE
/* Propagate group state transitions up the tree. */
/* Propagate group state transitions up the tree. */
if
(
propagate
)
{
if
(
propagate
)
{
unsigned
i
;
unsigned
i
;
...
@@ -214,17 +354,16 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
...
@@ -214,17 +354,16 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
gp
=
&
bitmap
[
binfo
->
levels
[
i
].
group_offset
+
goff
];
gp
=
&
bitmap
[
binfo
->
levels
[
i
].
group_offset
+
goff
];
g
=
*
gp
;
g
=
*
gp
;
propagate
=
(
g
==
0
);
propagate
=
(
g
==
0
);
assert
((
g
&
(
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)))
assert
((
g
&
(
ZU
(
1
)
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)))
==
0
);
==
0
);
g
^=
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
);
g
^=
ZU
(
1
)
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
);
*
gp
=
g
;
*
gp
=
g
;
if
(
!
propagate
)
if
(
!
propagate
)
{
break
;
break
;
}
}
}
}
}
#endif
/* BITMAP_USE_TREE */
}
}
#endif
#endif
/* JEMALLOC_INTERNAL_BITMAP_H */
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/cache_bin.h
0 → 100644
View file @
08e1c8e8
#ifndef JEMALLOC_INTERNAL_CACHE_BIN_H
#define JEMALLOC_INTERNAL_CACHE_BIN_H
#include "jemalloc/internal/ql.h"
/*
* The cache_bins are the mechanism that the tcache and the arena use to
* communicate. The tcache fills from and flushes to the arena by passing a
* cache_bin_t to fill/flush. When the arena needs to pull stats from the
* tcaches associated with it, it does so by iterating over its
* cache_bin_array_descriptor_t objects and reading out per-bin stats it
* contains. This makes it so that the arena need not know about the existence
* of the tcache at all.
*/
/*
* The count of the number of cached allocations in a bin. We make this signed
* so that negative numbers can encode "invalid" states (e.g. a low water mark
* of -1 for a cache that has been depleted).
*/
typedef
int32_t
cache_bin_sz_t
;
typedef
struct
cache_bin_stats_s
cache_bin_stats_t
;
struct
cache_bin_stats_s
{
/*
* Number of allocation requests that corresponded to the size of this
* bin.
*/
uint64_t
nrequests
;
};
/*
* Read-only information associated with each element of tcache_t's tbins array
* is stored separately, mainly to reduce memory usage.
*/
typedef
struct
cache_bin_info_s
cache_bin_info_t
;
struct
cache_bin_info_s
{
/* Upper limit on ncached. */
cache_bin_sz_t
ncached_max
;
};
typedef
struct
cache_bin_s
cache_bin_t
;
struct
cache_bin_s
{
/* Min # cached since last GC. */
cache_bin_sz_t
low_water
;
/* # of cached objects. */
cache_bin_sz_t
ncached
;
/*
* ncached and stats are both modified frequently. Let's keep them
* close so that they have a higher chance of being on the same
* cacheline, thus less write-backs.
*/
cache_bin_stats_t
tstats
;
/*
* Stack of available objects.
*
* To make use of adjacent cacheline prefetch, the items in the avail
* stack goes to higher address for newer allocations. avail points
* just above the available space, which means that
* avail[-ncached, ... -1] are available items and the lowest item will
* be allocated first.
*/
void
**
avail
;
};
typedef
struct
cache_bin_array_descriptor_s
cache_bin_array_descriptor_t
;
struct
cache_bin_array_descriptor_s
{
/*
* The arena keeps a list of the cache bins associated with it, for
* stats collection.
*/
ql_elm
(
cache_bin_array_descriptor_t
)
link
;
/* Pointers to the tcache bins. */
cache_bin_t
*
bins_small
;
cache_bin_t
*
bins_large
;
};
static
inline
void
cache_bin_array_descriptor_init
(
cache_bin_array_descriptor_t
*
descriptor
,
cache_bin_t
*
bins_small
,
cache_bin_t
*
bins_large
)
{
ql_elm_new
(
descriptor
,
link
);
descriptor
->
bins_small
=
bins_small
;
descriptor
->
bins_large
=
bins_large
;
}
JEMALLOC_ALWAYS_INLINE
void
*
cache_bin_alloc_easy
(
cache_bin_t
*
bin
,
bool
*
success
)
{
void
*
ret
;
if
(
unlikely
(
bin
->
ncached
==
0
))
{
bin
->
low_water
=
-
1
;
*
success
=
false
;
return
NULL
;
}
/*
* success (instead of ret) should be checked upon the return of this
* function. We avoid checking (ret == NULL) because there is never a
* null stored on the avail stack (which is unknown to the compiler),
* and eagerly checking ret would cause pipeline stall (waiting for the
* cacheline).
*/
*
success
=
true
;
ret
=
*
(
bin
->
avail
-
bin
->
ncached
);
bin
->
ncached
--
;
if
(
unlikely
(
bin
->
ncached
<
bin
->
low_water
))
{
bin
->
low_water
=
bin
->
ncached
;
}
return
ret
;
}
#endif
/* JEMALLOC_INTERNAL_CACHE_BIN_H */
deps/jemalloc/include/jemalloc/internal/chunk.h
deleted
100644 → 0
View file @
8f4e2075
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/*
* Size and alignment of memory chunks that are allocated by the OS's virtual
* memory system.
*/
#define LG_CHUNK_DEFAULT 21
/* Return the chunk address for allocation address a. */
#define CHUNK_ADDR2BASE(a) \
((void *)((uintptr_t)(a) & ~chunksize_mask))
/* Return the chunk offset of address a. */
#define CHUNK_ADDR2OFFSET(a) \
((size_t)((uintptr_t)(a) & chunksize_mask))
/* Return the smallest chunk multiple that is >= s. */
#define CHUNK_CEILING(s) \
(((s) + chunksize_mask) & ~chunksize_mask)
#define CHUNK_HOOKS_INITIALIZER { \
NULL, \
NULL, \
NULL, \
NULL, \
NULL, \
NULL, \
NULL \
}
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern
size_t
opt_lg_chunk
;
extern
const
char
*
opt_dss
;
extern
rtree_t
chunks_rtree
;
extern
size_t
chunksize
;
extern
size_t
chunksize_mask
;
/* (chunksize - 1). */
extern
size_t
chunk_npages
;
extern
const
chunk_hooks_t
chunk_hooks_default
;
chunk_hooks_t
chunk_hooks_get
(
arena_t
*
arena
);
chunk_hooks_t
chunk_hooks_set
(
arena_t
*
arena
,
const
chunk_hooks_t
*
chunk_hooks
);
bool
chunk_register
(
const
void
*
chunk
,
const
extent_node_t
*
node
);
void
chunk_deregister
(
const
void
*
chunk
,
const
extent_node_t
*
node
);
void
*
chunk_alloc_base
(
size_t
size
);
void
*
chunk_alloc_cache
(
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
dalloc_node
);
void
*
chunk_alloc_wrapper
(
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
*
commit
);
void
chunk_dalloc_cache
(
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
chunk
,
size_t
size
,
bool
committed
);
void
chunk_dalloc_arena
(
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
chunk
,
size_t
size
,
bool
zeroed
,
bool
committed
);
void
chunk_dalloc_wrapper
(
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
chunk
,
size_t
size
,
bool
committed
);
bool
chunk_purge_arena
(
arena_t
*
arena
,
void
*
chunk
,
size_t
offset
,
size_t
length
);
bool
chunk_purge_wrapper
(
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
chunk
,
size_t
size
,
size_t
offset
,
size_t
length
);
bool
chunk_boot
(
void
);
void
chunk_prefork
(
void
);
void
chunk_postfork_parent
(
void
);
void
chunk_postfork_child
(
void
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
extent_node_t
*
chunk_lookup
(
const
void
*
chunk
,
bool
dependent
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_CHUNK_C_))
JEMALLOC_INLINE
extent_node_t
*
chunk_lookup
(
const
void
*
ptr
,
bool
dependent
)
{
return
(
rtree_get
(
&
chunks_rtree
,
(
uintptr_t
)
ptr
,
dependent
));
}
#endif
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
#include "jemalloc/internal/chunk_dss.h"
#include "jemalloc/internal/chunk_mmap.h"
deps/jemalloc/include/jemalloc/internal/chunk_dss.h
deleted
100644 → 0
View file @
8f4e2075
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef
enum
{
dss_prec_disabled
=
0
,
dss_prec_primary
=
1
,
dss_prec_secondary
=
2
,
dss_prec_limit
=
3
}
dss_prec_t
;
#define DSS_PREC_DEFAULT dss_prec_secondary
#define DSS_DEFAULT "secondary"
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
extern
const
char
*
dss_prec_names
[];
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
dss_prec_t
chunk_dss_prec_get
(
void
);
bool
chunk_dss_prec_set
(
dss_prec_t
dss_prec
);
void
*
chunk_alloc_dss
(
arena_t
*
arena
,
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
*
commit
);
bool
chunk_in_dss
(
void
*
chunk
);
bool
chunk_dss_boot
(
void
);
void
chunk_dss_prefork
(
void
);
void
chunk_dss_postfork_parent
(
void
);
void
chunk_dss_postfork_child
(
void
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/chunk_mmap.h
deleted
100644 → 0
View file @
8f4e2075
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void
*
chunk_alloc_mmap
(
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
*
commit
);
bool
chunk_dalloc_mmap
(
void
*
chunk
,
size_t
size
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/ckh.h
View file @
08e1c8e8
/******************************************************************************/
#ifndef JEMALLOC_INTERNAL_CKH_H
#
if
def JEMALLOC_
H_TYPES
#def
ine
JEMALLOC_
INTERNAL_CKH_H
typedef
struct
ckh_s
ckh_t
;
#include "jemalloc/internal/tsd.h"
typedef
struct
ckhc_s
ckhc_t
;
/* Typedefs to allow easy function pointer passing. */
/* Cuckoo hashing implementation. Skip to the end for the interface. */
typedef
void
ckh_hash_t
(
const
void
*
,
size_t
[
2
]);
typedef
bool
ckh_keycomp_t
(
const
void
*
,
const
void
*
);
/******************************************************************************/
/* INTERNAL DEFINITIONS -- IGNORE */
/******************************************************************************/
/* Maintain counters used to get an idea of performance. */
/* Maintain counters used to get an idea of performance. */
/* #define
CKH_COUNT */
/* #define
CKH_COUNT */
/* Print counter values in ckh_delete() (requires CKH_COUNT). */
/* Print counter values in ckh_delete() (requires CKH_COUNT). */
/* #define
CKH_VERBOSE */
/* #define
CKH_VERBOSE */
/*
/*
* There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit
* There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit
* one bucket per L1 cache line.
* one bucket per L1 cache line.
*/
*/
#define
LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
#define
LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
#endif
/* JEMALLOC_H_TYPES
*/
/* Typedefs to allow easy function pointer passing.
*/
/******************************************************************************/
typedef
void
ckh_hash_t
(
const
void
*
,
size_t
[
2
]);
#ifdef JEMALLOC_H_STRUCTS
typedef
bool
ckh_keycomp_t
(
const
void
*
,
const
void
*
);
/* Hash table cell. */
/* Hash table cell. */
struct
ckhc_s
{
typedef
struct
{
const
void
*
key
;
const
void
*
key
;
const
void
*
data
;
const
void
*
data
;
};
}
ckhc_t
;
struct
ckh_s
{
/* The hash table itself. */
typedef
struct
{
#ifdef CKH_COUNT
#ifdef CKH_COUNT
/* Counters used to get an idea of performance. */
/* Counters used to get an idea of performance. */
uint64_t
ngrows
;
uint64_t
ngrows
;
uint64_t
nshrinks
;
uint64_t
nshrinks
;
uint64_t
nshrinkfails
;
uint64_t
nshrinkfails
;
uint64_t
ninserts
;
uint64_t
ninserts
;
uint64_t
nrelocs
;
uint64_t
nrelocs
;
#endif
#endif
/* Used for pseudo-random number generation. */
/* Used for pseudo-random number generation. */
#define CKH_A 1103515241
uint64_t
prng_state
;
#define CKH_C 12347
uint32_t
prng_state
;
/* Total number of items. */
/* Total number of items. */
size_t
count
;
size_t
count
;
/*
/*
* Minimum and current number of hash table buckets. There are
* Minimum and current number of hash table buckets. There are
* 2^LG_CKH_BUCKET_CELLS cells per bucket.
* 2^LG_CKH_BUCKET_CELLS cells per bucket.
*/
*/
unsigned
lg_minbuckets
;
unsigned
lg_minbuckets
;
unsigned
lg_curbuckets
;
unsigned
lg_curbuckets
;
/* Hash and comparison functions. */
/* Hash and comparison functions. */
ckh_hash_t
*
hash
;
ckh_hash_t
*
hash
;
ckh_keycomp_t
*
keycomp
;
ckh_keycomp_t
*
keycomp
;
/* Hash table with 2^lg_curbuckets buckets. */
/* Hash table with 2^lg_curbuckets buckets. */
ckhc_t
*
tab
;
ckhc_t
*
tab
;
};
}
ckh_t
;
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
/* BEGIN PUBLIC API */
/******************************************************************************/
bool
ckh_new
(
tsd_t
*
tsd
,
ckh_t
*
ckh
,
size_t
minitems
,
ckh_hash_t
*
hash
,
/* Lifetime management. Minitems is the initial capacity. */
bool
ckh_new
(
tsd_t
*
tsd
,
ckh_t
*
ckh
,
size_t
minitems
,
ckh_hash_t
*
hash
,
ckh_keycomp_t
*
keycomp
);
ckh_keycomp_t
*
keycomp
);
void
ckh_delete
(
tsd_t
*
tsd
,
ckh_t
*
ckh
);
void
ckh_delete
(
tsd_t
*
tsd
,
ckh_t
*
ckh
);
size_t
ckh_count
(
ckh_t
*
ckh
);
bool
ckh_iter
(
ckh_t
*
ckh
,
size_t
*
tabind
,
void
**
key
,
void
**
data
);
/* Get the number of elements in the set. */
bool
ckh_insert
(
tsd_t
*
tsd
,
ckh_t
*
ckh
,
const
void
*
key
,
const
void
*
data
);
size_t
ckh_count
(
ckh_t
*
ckh
);
bool
ckh_remove
(
tsd_t
*
tsd
,
ckh_t
*
ckh
,
const
void
*
searchkey
,
void
**
key
,
/*
* To iterate over the elements in the table, initialize *tabind to 0 and call
* this function until it returns true. Each call that returns false will
* update *key and *data to the next element in the table, assuming the pointers
* are non-NULL.
*/
bool
ckh_iter
(
ckh_t
*
ckh
,
size_t
*
tabind
,
void
**
key
,
void
**
data
);
/*
* Basic hash table operations -- insert, removal, lookup. For ckh_remove and
* ckh_search, key or data can be NULL. The hash-table only stores pointers to
* the key and value, and doesn't do any lifetime management.
*/
bool
ckh_insert
(
tsd_t
*
tsd
,
ckh_t
*
ckh
,
const
void
*
key
,
const
void
*
data
);
bool
ckh_remove
(
tsd_t
*
tsd
,
ckh_t
*
ckh
,
const
void
*
searchkey
,
void
**
key
,
void
**
data
);
void
**
data
);
bool
ckh_search
(
ckh_t
*
ckh
,
const
void
*
seachkey
,
void
**
key
,
void
**
data
);
bool
ckh_search
(
ckh_t
*
ckh
,
const
void
*
searchkey
,
void
**
key
,
void
**
data
);
void
ckh_string_hash
(
const
void
*
key
,
size_t
r_hash
[
2
]);
bool
ckh_string_keycomp
(
const
void
*
k1
,
const
void
*
k2
);
void
ckh_pointer_hash
(
const
void
*
key
,
size_t
r_hash
[
2
]);
bool
ckh_pointer_keycomp
(
const
void
*
k1
,
const
void
*
k2
);
#endif
/* JEMALLOC_H_EXTERNS */
/* Some useful hash and comparison functions for strings and pointers. */
/******************************************************************************/
void
ckh_string_hash
(
const
void
*
key
,
size_t
r_hash
[
2
]);
#ifdef JEMALLOC_H_INLINES
bool
ckh_string_keycomp
(
const
void
*
k1
,
const
void
*
k2
);
void
ckh_pointer_hash
(
const
void
*
key
,
size_t
r_hash
[
2
]);
bool
ckh_pointer_keycomp
(
const
void
*
k1
,
const
void
*
k2
);
#endif
/* JEMALLOC_H_INLINES */
#endif
/* JEMALLOC_INTERNAL_CKH_H */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/ctl.h
View file @
08e1c8e8
/******************************************************************************/
#ifndef JEMALLOC_INTERNAL_CTL_H
#
if
def JEMALLOC_
H_TYPES
#def
ine
JEMALLOC_
INTERNAL_CTL_H
typedef
struct
ctl_node_s
ctl_node_t
;
#include "jemalloc/internal/jemalloc_internal_types.h"
typedef
struct
ctl_named_node_s
ctl_named_node_t
;
#include "jemalloc/internal/malloc_io.h"
typedef
struct
ctl_indexed_node_s
ctl_indexed_node_t
;
#include "jemalloc/internal/mutex_prof.h"
typedef
struct
ctl_arena_stats_s
ctl_arena_stats_t
;
#include "jemalloc/internal/ql.h"
typedef
struct
ctl_stats_s
ctl_stats_t
;
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats.h"
#endif
/* JEMALLOC_H_TYPES */
/*
****************************************************************************
*/
/*
Maximum ctl tree depth.
*/
#
if
def
JEMALLOC_H_STRUCTS
#def
ine CTL_MAX_DEPTH 7
struct
ctl_node_s
{
typedef
struct
ctl_node_s
{
bool
named
;
bool
named
;
};
}
ctl_node_t
;
struct
ctl_named_node_s
{
typedef
struct
ctl_named_node_s
{
struct
ctl_node_
s
node
;
ctl_node_
t
node
;
const
char
*
name
;
const
char
*
name
;
/* If (nchildren == 0), this is a terminal node. */
/* If (nchildren == 0), this is a terminal node. */
unsigned
nchildren
;
size_t
nchildren
;
const
ctl_node_t
*
children
;
const
ctl_node_t
*
children
;
int
(
*
ctl
)(
const
size_t
*
,
size_t
,
void
*
,
size_t
*
,
int
(
*
ctl
)(
tsd_t
*
,
const
size_t
*
,
size_t
,
void
*
,
size_t
*
,
void
*
,
void
*
,
size_t
);
size_t
);
};
}
ctl_named_node_t
;
struct
ctl_indexed_node_s
{
typedef
struct
ctl_indexed_node_s
{
struct
ctl_node_s
node
;
struct
ctl_node_s
node
;
const
ctl_named_node_t
*
(
*
index
)(
const
size_t
*
,
size_t
,
size_t
);
const
ctl_named_node_t
*
(
*
index
)(
tsdn_t
*
,
const
size_t
*
,
size_t
,
};
size_t
);
}
ctl_indexed_node_t
;
struct
ctl_arena_stats_s
{
typedef
struct
ctl_arena_stats_s
{
bool
initialized
;
arena_stats_t
astats
;
unsigned
nthreads
;
const
char
*
dss
;
ssize_t
lg_dirty_mult
;
size_t
pactive
;
size_t
pdirty
;
arena_stats_t
astats
;
/* Aggregate stats for small size classes, based on bin stats. */
/* Aggregate stats for small size classes, based on bin stats. */
size_t
allocated_small
;
size_t
allocated_small
;
uint64_t
nmalloc_small
;
uint64_t
nmalloc_small
;
uint64_t
ndalloc_small
;
uint64_t
ndalloc_small
;
uint64_t
nrequests_small
;
uint64_t
nrequests_small
;
malloc_bin_stats_t
bstats
[
NBINS
];
bin_stats_t
bstats
[
NBINS
];
malloc_large_stats_t
*
lstats
;
/* nlclasses elements. */
arena_stats_large_t
lstats
[
NSIZES
-
NBINS
];
malloc_huge_stats_t
*
hstats
;
/* nhclasses elements. */
}
ctl_arena_stats_t
;
typedef
struct
ctl_stats_s
{
size_t
allocated
;
size_t
active
;
size_t
metadata
;
size_t
metadata_thp
;
size_t
resident
;
size_t
mapped
;
size_t
retained
;
background_thread_stats_t
background_thread
;
mutex_prof_data_t
mutex_prof_data
[
mutex_prof_num_global_mutexes
];
}
ctl_stats_t
;
typedef
struct
ctl_arena_s
ctl_arena_t
;
struct
ctl_arena_s
{
unsigned
arena_ind
;
bool
initialized
;
ql_elm
(
ctl_arena_t
)
destroyed_link
;
/* Basic stats, supported even if !config_stats. */
unsigned
nthreads
;
const
char
*
dss
;
ssize_t
dirty_decay_ms
;
ssize_t
muzzy_decay_ms
;
size_t
pactive
;
size_t
pdirty
;
size_t
pmuzzy
;
/* NULL if !config_stats. */
ctl_arena_stats_t
*
astats
;
};
};
struct
ctl_stats_s
{
typedef
struct
ctl_arenas_s
{
size_t
allocated
;
uint64_t
epoch
;
size_t
active
;
unsigned
narenas
;
size_t
metadata
;
ql_head
(
ctl_arena_t
)
destroyed
;
size_t
resident
;
size_t
mapped
;
/*
unsigned
narenas
;
* Element 0 corresponds to merged stats for extant arenas (accessed via
ctl_arena_stats_t
*
arenas
;
/* (narenas + 1) elements. */
* MALLCTL_ARENAS_ALL), element 1 corresponds to merged stats for
};
* destroyed arenas (accessed via MALLCTL_ARENAS_DESTROYED), and the
* remaining MALLOCX_ARENA_LIMIT elements correspond to arenas.
#endif
/* JEMALLOC_H_STRUCTS */
*/
/******************************************************************************/
ctl_arena_t
*
arenas
[
2
+
MALLOCX_ARENA_LIMIT
];
#ifdef JEMALLOC_H_EXTERNS
}
ctl_arenas_t
;
int
ctl_byname
(
const
char
*
name
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
int
ctl_byname
(
tsd_t
*
tsd
,
const
char
*
name
,
void
*
oldp
,
size_t
*
oldlenp
,
size_t
newlen
);
int
ctl_nametomib
(
const
char
*
name
,
size_t
*
mibp
,
size_t
*
miblenp
);
int
ctl_bymib
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
);
void
*
newp
,
size_t
newlen
);
bool
ctl_boot
(
void
);
int
ctl_nametomib
(
tsd_t
*
tsd
,
const
char
*
name
,
size_t
*
mibp
,
size_t
*
miblenp
);
void
ctl_prefork
(
void
);
void
ctl_postfork_parent
(
void
);
void
ctl_postfork_child
(
void
);
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
int
ctl_bymib
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
);
bool
ctl_boot
(
void
);
void
ctl_prefork
(
tsdn_t
*
tsdn
);
void
ctl_postfork_parent
(
tsdn_t
*
tsdn
);
void
ctl_postfork_child
(
tsdn_t
*
tsdn
);
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
!= 0) { \
!= 0) { \
malloc_printf( \
malloc_printf( \
...
@@ -85,7 +111,7 @@ void ctl_postfork_child(void);
...
@@ -85,7 +111,7 @@ void ctl_postfork_child(void);
} \
} \
} while (0)
} while (0)
#define
xmallctlnametomib(name, mibp, miblenp) do { \
#define
xmallctlnametomib(name, mibp, miblenp) do { \
if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \
if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \
malloc_printf("<jemalloc>: Failure in " \
malloc_printf("<jemalloc>: Failure in " \
"xmallctlnametomib(\"%s\", ...)\n", name); \
"xmallctlnametomib(\"%s\", ...)\n", name); \
...
@@ -93,7 +119,7 @@ void ctl_postfork_child(void);
...
@@ -93,7 +119,7 @@ void ctl_postfork_child(void);
} \
} \
} while (0)
} while (0)
#define
xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \
#define
xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \
if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \
if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \
newlen) != 0) { \
newlen) != 0) { \
malloc_write( \
malloc_write( \
...
@@ -102,10 +128,4 @@ void ctl_postfork_child(void);
...
@@ -102,10 +128,4 @@ void ctl_postfork_child(void);
} \
} \
} while (0)
} while (0)
#endif
/* JEMALLOC_H_EXTERNS */
#endif
/* JEMALLOC_INTERNAL_CTL_H */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/div.h
0 → 100644
View file @
08e1c8e8
#ifndef JEMALLOC_INTERNAL_DIV_H
#define JEMALLOC_INTERNAL_DIV_H
#include "jemalloc/internal/assert.h"
/*
* This module does the division that computes the index of a region in a slab,
* given its offset relative to the base.
* That is, given a divisor d, an n = i * d (all integers), we'll return i.
* We do some pre-computation to do this more quickly than a CPU division
* instruction.
* We bound n < 2^32, and don't support dividing by one.
*/
typedef
struct
div_info_s
div_info_t
;
struct
div_info_s
{
uint32_t
magic
;
#ifdef JEMALLOC_DEBUG
size_t
d
;
#endif
};
void
div_init
(
div_info_t
*
div_info
,
size_t
divisor
);
static
inline
size_t
div_compute
(
div_info_t
*
div_info
,
size_t
n
)
{
assert
(
n
<=
(
uint32_t
)
-
1
);
/*
* This generates, e.g. mov; imul; shr on x86-64. On a 32-bit machine,
* the compilers I tried were all smart enough to turn this into the
* appropriate "get the high 32 bits of the result of a multiply" (e.g.
* mul; mov edx eax; on x86, umull on arm, etc.).
*/
size_t
i
=
((
uint64_t
)
n
*
(
uint64_t
)
div_info
->
magic
)
>>
32
;
#ifdef JEMALLOC_DEBUG
assert
(
i
*
div_info
->
d
==
n
);
#endif
return
i
;
}
#endif
/* JEMALLOC_INTERNAL_DIV_H */
deps/jemalloc/include/jemalloc/internal/emitter.h
0 → 100644
View file @
08e1c8e8
#ifndef JEMALLOC_INTERNAL_EMITTER_H
#define JEMALLOC_INTERNAL_EMITTER_H
#include "jemalloc/internal/ql.h"
typedef
enum
emitter_output_e
emitter_output_t
;
enum
emitter_output_e
{
emitter_output_json
,
emitter_output_table
};
typedef
enum
emitter_justify_e
emitter_justify_t
;
enum
emitter_justify_e
{
emitter_justify_left
,
emitter_justify_right
,
/* Not for users; just to pass to internal functions. */
emitter_justify_none
};
typedef
enum
emitter_type_e
emitter_type_t
;
enum
emitter_type_e
{
emitter_type_bool
,
emitter_type_int
,
emitter_type_unsigned
,
emitter_type_uint32
,
emitter_type_uint64
,
emitter_type_size
,
emitter_type_ssize
,
emitter_type_string
,
/*
* A title is a column title in a table; it's just a string, but it's
* not quoted.
*/
emitter_type_title
,
};
typedef
struct
emitter_col_s
emitter_col_t
;
struct
emitter_col_s
{
/* Filled in by the user. */
emitter_justify_t
justify
;
int
width
;
emitter_type_t
type
;
union
{
bool
bool_val
;
int
int_val
;
unsigned
unsigned_val
;
uint32_t
uint32_val
;
uint64_t
uint64_val
;
size_t
size_val
;
ssize_t
ssize_val
;
const
char
*
str_val
;
};
/* Filled in by initialization. */
ql_elm
(
emitter_col_t
)
link
;
};
typedef
struct
emitter_row_s
emitter_row_t
;
struct
emitter_row_s
{
ql_head
(
emitter_col_t
)
cols
;
};
static
inline
void
emitter_row_init
(
emitter_row_t
*
row
)
{
ql_new
(
&
row
->
cols
);
}
static
inline
void
emitter_col_init
(
emitter_col_t
*
col
,
emitter_row_t
*
row
)
{
ql_elm_new
(
col
,
link
);
ql_tail_insert
(
&
row
->
cols
,
col
,
link
);
}
typedef
struct
emitter_s
emitter_t
;
struct
emitter_s
{
emitter_output_t
output
;
/* The output information. */
void
(
*
write_cb
)(
void
*
,
const
char
*
);
void
*
cbopaque
;
int
nesting_depth
;
/* True if we've already emitted a value at the given depth. */
bool
item_at_depth
;
};
static
inline
void
emitter_init
(
emitter_t
*
emitter
,
emitter_output_t
emitter_output
,
void
(
*
write_cb
)(
void
*
,
const
char
*
),
void
*
cbopaque
)
{
emitter
->
output
=
emitter_output
;
emitter
->
write_cb
=
write_cb
;
emitter
->
cbopaque
=
cbopaque
;
emitter
->
item_at_depth
=
false
;
emitter
->
nesting_depth
=
0
;
}
/* Internal convenience function. Write to the emitter the given string. */
JEMALLOC_FORMAT_PRINTF
(
2
,
3
)
static
inline
void
emitter_printf
(
emitter_t
*
emitter
,
const
char
*
format
,
...)
{
va_list
ap
;
va_start
(
ap
,
format
);
malloc_vcprintf
(
emitter
->
write_cb
,
emitter
->
cbopaque
,
format
,
ap
);
va_end
(
ap
);
}
/* Write to the emitter the given string, but only in table mode. */
JEMALLOC_FORMAT_PRINTF
(
2
,
3
)
static
inline
void
emitter_table_printf
(
emitter_t
*
emitter
,
const
char
*
format
,
...)
{
if
(
emitter
->
output
==
emitter_output_table
)
{
va_list
ap
;
va_start
(
ap
,
format
);
malloc_vcprintf
(
emitter
->
write_cb
,
emitter
->
cbopaque
,
format
,
ap
);
va_end
(
ap
);
}
}
static
inline
void
emitter_gen_fmt
(
char
*
out_fmt
,
size_t
out_size
,
const
char
*
fmt_specifier
,
emitter_justify_t
justify
,
int
width
)
{
size_t
written
;
if
(
justify
==
emitter_justify_none
)
{
written
=
malloc_snprintf
(
out_fmt
,
out_size
,
"%%%s"
,
fmt_specifier
);
}
else
if
(
justify
==
emitter_justify_left
)
{
written
=
malloc_snprintf
(
out_fmt
,
out_size
,
"%%-%d%s"
,
width
,
fmt_specifier
);
}
else
{
written
=
malloc_snprintf
(
out_fmt
,
out_size
,
"%%%d%s"
,
width
,
fmt_specifier
);
}
/* Only happens in case of bad format string, which *we* choose. */
assert
(
written
<
out_size
);
}
/*
* Internal. Emit the given value type in the relevant encoding (so that the
* bool true gets mapped to json "true", but the string "true" gets mapped to
* json "\"true\"", for instance.
*
* Width is ignored if justify is emitter_justify_none.
*/
static
inline
void
emitter_print_value
(
emitter_t
*
emitter
,
emitter_justify_t
justify
,
int
width
,
emitter_type_t
value_type
,
const
void
*
value
)
{
size_t
str_written
;
#define BUF_SIZE 256
#define FMT_SIZE 10
/*
* We dynamically generate a format string to emit, to let us use the
* snprintf machinery. This is kinda hacky, but gets the job done
* quickly without having to think about the various snprintf edge
* cases.
*/
char
fmt
[
FMT_SIZE
];
char
buf
[
BUF_SIZE
];
#define EMIT_SIMPLE(type, format) \
emitter_gen_fmt(fmt, FMT_SIZE, format, justify, width); \
emitter_printf(emitter, fmt, *(const type *)value); \
switch
(
value_type
)
{
case
emitter_type_bool
:
emitter_gen_fmt
(
fmt
,
FMT_SIZE
,
"s"
,
justify
,
width
);
emitter_printf
(
emitter
,
fmt
,
*
(
const
bool
*
)
value
?
"true"
:
"false"
);
break
;
case
emitter_type_int
:
EMIT_SIMPLE
(
int
,
"d"
)
break
;
case
emitter_type_unsigned
:
EMIT_SIMPLE
(
unsigned
,
"u"
)
break
;
case
emitter_type_ssize
:
EMIT_SIMPLE
(
ssize_t
,
"zd"
)
break
;
case
emitter_type_size
:
EMIT_SIMPLE
(
size_t
,
"zu"
)
break
;
case
emitter_type_string
:
str_written
=
malloc_snprintf
(
buf
,
BUF_SIZE
,
"
\"
%s
\"
"
,
*
(
const
char
*
const
*
)
value
);
/*
* We control the strings we output; we shouldn't get anything
* anywhere near the fmt size.
*/
assert
(
str_written
<
BUF_SIZE
);
emitter_gen_fmt
(
fmt
,
FMT_SIZE
,
"s"
,
justify
,
width
);
emitter_printf
(
emitter
,
fmt
,
buf
);
break
;
case
emitter_type_uint32
:
EMIT_SIMPLE
(
uint32_t
,
FMTu32
)
break
;
case
emitter_type_uint64
:
EMIT_SIMPLE
(
uint64_t
,
FMTu64
)
break
;
case
emitter_type_title
:
EMIT_SIMPLE
(
char
*
const
,
"s"
);
break
;
default:
unreachable
();
}
#undef BUF_SIZE
#undef FMT_SIZE
}
/* Internal functions. In json mode, tracks nesting state. */
static
inline
void
emitter_nest_inc
(
emitter_t
*
emitter
)
{
emitter
->
nesting_depth
++
;
emitter
->
item_at_depth
=
false
;
}
static
inline
void
emitter_nest_dec
(
emitter_t
*
emitter
)
{
emitter
->
nesting_depth
--
;
emitter
->
item_at_depth
=
true
;
}
static
inline
void
emitter_indent
(
emitter_t
*
emitter
)
{
int
amount
=
emitter
->
nesting_depth
;
const
char
*
indent_str
;
if
(
emitter
->
output
==
emitter_output_json
)
{
indent_str
=
"
\t
"
;
}
else
{
amount
*=
2
;
indent_str
=
" "
;
}
for
(
int
i
=
0
;
i
<
amount
;
i
++
)
{
emitter_printf
(
emitter
,
"%s"
,
indent_str
);
}
}
static
inline
void
emitter_json_key_prefix
(
emitter_t
*
emitter
)
{
emitter_printf
(
emitter
,
"%s
\n
"
,
emitter
->
item_at_depth
?
","
:
""
);
emitter_indent
(
emitter
);
}
static
inline
void
emitter_begin
(
emitter_t
*
emitter
)
{
if
(
emitter
->
output
==
emitter_output_json
)
{
assert
(
emitter
->
nesting_depth
==
0
);
emitter_printf
(
emitter
,
"{"
);
emitter_nest_inc
(
emitter
);
}
else
{
// tabular init
emitter_printf
(
emitter
,
"%s"
,
""
);
}
}
static
inline
void
emitter_end
(
emitter_t
*
emitter
)
{
if
(
emitter
->
output
==
emitter_output_json
)
{
assert
(
emitter
->
nesting_depth
==
1
);
emitter_nest_dec
(
emitter
);
emitter_printf
(
emitter
,
"
\n
}
\n
"
);
}
}
/*
* Note emits a different kv pair as well, but only in table mode. Omits the
* note if table_note_key is NULL.
*/
static
inline
void
emitter_kv_note
(
emitter_t
*
emitter
,
const
char
*
json_key
,
const
char
*
table_key
,
emitter_type_t
value_type
,
const
void
*
value
,
const
char
*
table_note_key
,
emitter_type_t
table_note_value_type
,
const
void
*
table_note_value
)
{
if
(
emitter
->
output
==
emitter_output_json
)
{
assert
(
emitter
->
nesting_depth
>
0
);
emitter_json_key_prefix
(
emitter
);
emitter_printf
(
emitter
,
"
\"
%s
\"
: "
,
json_key
);
emitter_print_value
(
emitter
,
emitter_justify_none
,
-
1
,
value_type
,
value
);
}
else
{
emitter_indent
(
emitter
);
emitter_printf
(
emitter
,
"%s: "
,
table_key
);
emitter_print_value
(
emitter
,
emitter_justify_none
,
-
1
,
value_type
,
value
);
if
(
table_note_key
!=
NULL
)
{
emitter_printf
(
emitter
,
" (%s: "
,
table_note_key
);
emitter_print_value
(
emitter
,
emitter_justify_none
,
-
1
,
table_note_value_type
,
table_note_value
);
emitter_printf
(
emitter
,
")"
);
}
emitter_printf
(
emitter
,
"
\n
"
);
}
emitter
->
item_at_depth
=
true
;
}
static
inline
void
emitter_kv
(
emitter_t
*
emitter
,
const
char
*
json_key
,
const
char
*
table_key
,
emitter_type_t
value_type
,
const
void
*
value
)
{
emitter_kv_note
(
emitter
,
json_key
,
table_key
,
value_type
,
value
,
NULL
,
emitter_type_bool
,
NULL
);
}
static
inline
void
emitter_json_kv
(
emitter_t
*
emitter
,
const
char
*
json_key
,
emitter_type_t
value_type
,
const
void
*
value
)
{
if
(
emitter
->
output
==
emitter_output_json
)
{
emitter_kv
(
emitter
,
json_key
,
NULL
,
value_type
,
value
);
}
}
static
inline
void
emitter_table_kv
(
emitter_t
*
emitter
,
const
char
*
table_key
,
emitter_type_t
value_type
,
const
void
*
value
)
{
if
(
emitter
->
output
==
emitter_output_table
)
{
emitter_kv
(
emitter
,
NULL
,
table_key
,
value_type
,
value
);
}
}
static
inline
void
emitter_dict_begin
(
emitter_t
*
emitter
,
const
char
*
json_key
,
const
char
*
table_header
)
{
if
(
emitter
->
output
==
emitter_output_json
)
{
emitter_json_key_prefix
(
emitter
);
emitter_printf
(
emitter
,
"
\"
%s
\"
: {"
,
json_key
);
emitter_nest_inc
(
emitter
);
}
else
{
emitter_indent
(
emitter
);
emitter_printf
(
emitter
,
"%s
\n
"
,
table_header
);
emitter_nest_inc
(
emitter
);
}
}
static
inline
void
emitter_dict_end
(
emitter_t
*
emitter
)
{
if
(
emitter
->
output
==
emitter_output_json
)
{
assert
(
emitter
->
nesting_depth
>
0
);
emitter_nest_dec
(
emitter
);
emitter_printf
(
emitter
,
"
\n
"
);
emitter_indent
(
emitter
);
emitter_printf
(
emitter
,
"}"
);
}
else
{
emitter_nest_dec
(
emitter
);
}
}
static
inline
void
emitter_json_dict_begin
(
emitter_t
*
emitter
,
const
char
*
json_key
)
{
if
(
emitter
->
output
==
emitter_output_json
)
{
emitter_dict_begin
(
emitter
,
json_key
,
NULL
);
}
}
static
inline
void
emitter_json_dict_end
(
emitter_t
*
emitter
)
{
if
(
emitter
->
output
==
emitter_output_json
)
{
emitter_dict_end
(
emitter
);
}
}
static
inline
void
emitter_table_dict_begin
(
emitter_t
*
emitter
,
const
char
*
table_key
)
{
if
(
emitter
->
output
==
emitter_output_table
)
{
emitter_dict_begin
(
emitter
,
NULL
,
table_key
);
}
}
static
inline
void
emitter_table_dict_end
(
emitter_t
*
emitter
)
{
if
(
emitter
->
output
==
emitter_output_table
)
{
emitter_dict_end
(
emitter
);
}
}
static
inline
void
emitter_json_arr_begin
(
emitter_t
*
emitter
,
const
char
*
json_key
)
{
if
(
emitter
->
output
==
emitter_output_json
)
{
emitter_json_key_prefix
(
emitter
);
emitter_printf
(
emitter
,
"
\"
%s
\"
: ["
,
json_key
);
emitter_nest_inc
(
emitter
);
}
}
static
inline
void
emitter_json_arr_end
(
emitter_t
*
emitter
)
{
if
(
emitter
->
output
==
emitter_output_json
)
{
assert
(
emitter
->
nesting_depth
>
0
);
emitter_nest_dec
(
emitter
);
emitter_printf
(
emitter
,
"
\n
"
);
emitter_indent
(
emitter
);
emitter_printf
(
emitter
,
"]"
);
}
}
static
inline
void
emitter_json_arr_obj_begin
(
emitter_t
*
emitter
)
{
if
(
emitter
->
output
==
emitter_output_json
)
{
emitter_json_key_prefix
(
emitter
);
emitter_printf
(
emitter
,
"{"
);
emitter_nest_inc
(
emitter
);
}
}
static
inline
void
emitter_json_arr_obj_end
(
emitter_t
*
emitter
)
{
if
(
emitter
->
output
==
emitter_output_json
)
{
assert
(
emitter
->
nesting_depth
>
0
);
emitter_nest_dec
(
emitter
);
emitter_printf
(
emitter
,
"
\n
"
);
emitter_indent
(
emitter
);
emitter_printf
(
emitter
,
"}"
);
}
}
static
inline
void
emitter_json_arr_value
(
emitter_t
*
emitter
,
emitter_type_t
value_type
,
const
void
*
value
)
{
if
(
emitter
->
output
==
emitter_output_json
)
{
emitter_json_key_prefix
(
emitter
);
emitter_print_value
(
emitter
,
emitter_justify_none
,
-
1
,
value_type
,
value
);
}
}
static
inline
void
emitter_table_row
(
emitter_t
*
emitter
,
emitter_row_t
*
row
)
{
if
(
emitter
->
output
!=
emitter_output_table
)
{
return
;
}
emitter_col_t
*
col
;
ql_foreach
(
col
,
&
row
->
cols
,
link
)
{
emitter_print_value
(
emitter
,
col
->
justify
,
col
->
width
,
col
->
type
,
(
const
void
*
)
&
col
->
bool_val
);
}
emitter_table_printf
(
emitter
,
"
\n
"
);
}
#endif
/* JEMALLOC_INTERNAL_EMITTER_H */
deps/jemalloc/include/jemalloc/internal/extent.h
deleted
100644 → 0
View file @
8f4e2075
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef
struct
extent_node_s
extent_node_t
;
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
/* Tree of extents. Use accessor functions for en_* fields. */
struct
extent_node_s
{
/* Arena from which this extent came, if any. */
arena_t
*
en_arena
;
/* Pointer to the extent that this tree node is responsible for. */
void
*
en_addr
;
/* Total region size. */
size_t
en_size
;
/*
* The zeroed flag is used by chunk recycling code to track whether
* memory is zero-filled.
*/
bool
en_zeroed
;
/*
* True if physical memory is committed to the extent, whether
* explicitly or implicitly as on a system that overcommits and
* satisfies physical memory needs on demand via soft page faults.
*/
bool
en_committed
;
/*
* The achunk flag is used to validate that huge allocation lookups
* don't return arena chunks.
*/
bool
en_achunk
;
/* Profile counters, used for huge objects. */
prof_tctx_t
*
en_prof_tctx
;
/* Linkage for arena's runs_dirty and chunks_cache rings. */
arena_runs_dirty_link_t
rd
;
qr
(
extent_node_t
)
cc_link
;
union
{
/* Linkage for the size/address-ordered tree. */
rb_node
(
extent_node_t
)
szad_link
;
/* Linkage for arena's huge and node_cache lists. */
ql_elm
(
extent_node_t
)
ql_link
;
};
/* Linkage for the address-ordered tree. */
rb_node
(
extent_node_t
)
ad_link
;
};
typedef
rb_tree
(
extent_node_t
)
extent_tree_t
;
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
rb_proto
(,
extent_tree_szad_
,
extent_tree_t
,
extent_node_t
)
rb_proto
(,
extent_tree_ad_
,
extent_tree_t
,
extent_node_t
)
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
arena_t
*
extent_node_arena_get
(
const
extent_node_t
*
node
);
void
*
extent_node_addr_get
(
const
extent_node_t
*
node
);
size_t
extent_node_size_get
(
const
extent_node_t
*
node
);
bool
extent_node_zeroed_get
(
const
extent_node_t
*
node
);
bool
extent_node_committed_get
(
const
extent_node_t
*
node
);
bool
extent_node_achunk_get
(
const
extent_node_t
*
node
);
prof_tctx_t
*
extent_node_prof_tctx_get
(
const
extent_node_t
*
node
);
void
extent_node_arena_set
(
extent_node_t
*
node
,
arena_t
*
arena
);
void
extent_node_addr_set
(
extent_node_t
*
node
,
void
*
addr
);
void
extent_node_size_set
(
extent_node_t
*
node
,
size_t
size
);
void
extent_node_zeroed_set
(
extent_node_t
*
node
,
bool
zeroed
);
void
extent_node_committed_set
(
extent_node_t
*
node
,
bool
committed
);
void
extent_node_achunk_set
(
extent_node_t
*
node
,
bool
achunk
);
void
extent_node_prof_tctx_set
(
extent_node_t
*
node
,
prof_tctx_t
*
tctx
);
void
extent_node_init
(
extent_node_t
*
node
,
arena_t
*
arena
,
void
*
addr
,
size_t
size
,
bool
zeroed
,
bool
committed
);
void
extent_node_dirty_linkage_init
(
extent_node_t
*
node
);
void
extent_node_dirty_insert
(
extent_node_t
*
node
,
arena_runs_dirty_link_t
*
runs_dirty
,
extent_node_t
*
chunks_dirty
);
void
extent_node_dirty_remove
(
extent_node_t
*
node
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_))
JEMALLOC_INLINE
arena_t
*
extent_node_arena_get
(
const
extent_node_t
*
node
)
{
return
(
node
->
en_arena
);
}
JEMALLOC_INLINE
void
*
extent_node_addr_get
(
const
extent_node_t
*
node
)
{
return
(
node
->
en_addr
);
}
JEMALLOC_INLINE
size_t
extent_node_size_get
(
const
extent_node_t
*
node
)
{
return
(
node
->
en_size
);
}
JEMALLOC_INLINE
bool
extent_node_zeroed_get
(
const
extent_node_t
*
node
)
{
return
(
node
->
en_zeroed
);
}
JEMALLOC_INLINE
bool
extent_node_committed_get
(
const
extent_node_t
*
node
)
{
assert
(
!
node
->
en_achunk
);
return
(
node
->
en_committed
);
}
JEMALLOC_INLINE
bool
extent_node_achunk_get
(
const
extent_node_t
*
node
)
{
return
(
node
->
en_achunk
);
}
JEMALLOC_INLINE
prof_tctx_t
*
extent_node_prof_tctx_get
(
const
extent_node_t
*
node
)
{
return
(
node
->
en_prof_tctx
);
}
JEMALLOC_INLINE
void
extent_node_arena_set
(
extent_node_t
*
node
,
arena_t
*
arena
)
{
node
->
en_arena
=
arena
;
}
JEMALLOC_INLINE
void
extent_node_addr_set
(
extent_node_t
*
node
,
void
*
addr
)
{
node
->
en_addr
=
addr
;
}
JEMALLOC_INLINE
void
extent_node_size_set
(
extent_node_t
*
node
,
size_t
size
)
{
node
->
en_size
=
size
;
}
JEMALLOC_INLINE
void
extent_node_zeroed_set
(
extent_node_t
*
node
,
bool
zeroed
)
{
node
->
en_zeroed
=
zeroed
;
}
JEMALLOC_INLINE
void
extent_node_committed_set
(
extent_node_t
*
node
,
bool
committed
)
{
node
->
en_committed
=
committed
;
}
JEMALLOC_INLINE
void
extent_node_achunk_set
(
extent_node_t
*
node
,
bool
achunk
)
{
node
->
en_achunk
=
achunk
;
}
JEMALLOC_INLINE
void
extent_node_prof_tctx_set
(
extent_node_t
*
node
,
prof_tctx_t
*
tctx
)
{
node
->
en_prof_tctx
=
tctx
;
}
JEMALLOC_INLINE
void
extent_node_init
(
extent_node_t
*
node
,
arena_t
*
arena
,
void
*
addr
,
size_t
size
,
bool
zeroed
,
bool
committed
)
{
extent_node_arena_set
(
node
,
arena
);
extent_node_addr_set
(
node
,
addr
);
extent_node_size_set
(
node
,
size
);
extent_node_zeroed_set
(
node
,
zeroed
);
extent_node_committed_set
(
node
,
committed
);
extent_node_achunk_set
(
node
,
false
);
if
(
config_prof
)
extent_node_prof_tctx_set
(
node
,
NULL
);
}
JEMALLOC_INLINE
void
extent_node_dirty_linkage_init
(
extent_node_t
*
node
)
{
qr_new
(
&
node
->
rd
,
rd_link
);
qr_new
(
node
,
cc_link
);
}
JEMALLOC_INLINE
void
extent_node_dirty_insert
(
extent_node_t
*
node
,
arena_runs_dirty_link_t
*
runs_dirty
,
extent_node_t
*
chunks_dirty
)
{
qr_meld
(
runs_dirty
,
&
node
->
rd
,
rd_link
);
qr_meld
(
chunks_dirty
,
node
,
cc_link
);
}
JEMALLOC_INLINE
void
extent_node_dirty_remove
(
extent_node_t
*
node
)
{
qr_remove
(
&
node
->
rd
,
rd_link
);
qr_remove
(
node
,
cc_link
);
}
#endif
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/extent_dss.h
0 → 100644
View file @
08e1c8e8
#ifndef JEMALLOC_INTERNAL_EXTENT_DSS_H
#define JEMALLOC_INTERNAL_EXTENT_DSS_H
typedef
enum
{
dss_prec_disabled
=
0
,
dss_prec_primary
=
1
,
dss_prec_secondary
=
2
,
dss_prec_limit
=
3
}
dss_prec_t
;
#define DSS_PREC_DEFAULT dss_prec_secondary
#define DSS_DEFAULT "secondary"
extern
const
char
*
dss_prec_names
[];
extern
const
char
*
opt_dss
;
dss_prec_t
extent_dss_prec_get
(
void
);
bool
extent_dss_prec_set
(
dss_prec_t
dss_prec
);
void
*
extent_alloc_dss
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
*
commit
);
bool
extent_in_dss
(
void
*
addr
);
bool
extent_dss_mergeable
(
void
*
addr_a
,
void
*
addr_b
);
void
extent_dss_boot
(
void
);
#endif
/* JEMALLOC_INTERNAL_EXTENT_DSS_H */
deps/jemalloc/include/jemalloc/internal/extent_externs.h
0 → 100644
View file @
08e1c8e8
#ifndef JEMALLOC_INTERNAL_EXTENT_EXTERNS_H
#define JEMALLOC_INTERNAL_EXTENT_EXTERNS_H
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex_pool.h"
#include "jemalloc/internal/ph.h"
#include "jemalloc/internal/rtree.h"
extern
size_t
opt_lg_extent_max_active_fit
;
extern
rtree_t
extents_rtree
;
extern
const
extent_hooks_t
extent_hooks_default
;
extern
mutex_pool_t
extent_mutex_pool
;
extent_t
*
extent_alloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
);
void
extent_dalloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_t
*
extent
);
extent_hooks_t
*
extent_hooks_get
(
arena_t
*
arena
);
extent_hooks_t
*
extent_hooks_set
(
tsd_t
*
tsd
,
arena_t
*
arena
,
extent_hooks_t
*
extent_hooks
);
#ifdef JEMALLOC_JET
size_t
extent_size_quantize_floor
(
size_t
size
);
size_t
extent_size_quantize_ceil
(
size_t
size
);
#endif
rb_proto
(,
extent_avail_
,
extent_tree_t
,
extent_t
)
ph_proto
(,
extent_heap_
,
extent_heap_t
,
extent_t
)
bool
extents_init
(
tsdn_t
*
tsdn
,
extents_t
*
extents
,
extent_state_t
state
,
bool
delay_coalesce
);
extent_state_t
extents_state_get
(
const
extents_t
*
extents
);
size_t
extents_npages_get
(
extents_t
*
extents
);
extent_t
*
extents_alloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extents_t
*
extents
,
void
*
new_addr
,
size_t
size
,
size_t
pad
,
size_t
alignment
,
bool
slab
,
szind_t
szind
,
bool
*
zero
,
bool
*
commit
);
void
extents_dalloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extents_t
*
extents
,
extent_t
*
extent
);
extent_t
*
extents_evict
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extents_t
*
extents
,
size_t
npages_min
);
void
extents_prefork
(
tsdn_t
*
tsdn
,
extents_t
*
extents
);
void
extents_postfork_parent
(
tsdn_t
*
tsdn
,
extents_t
*
extents
);
void
extents_postfork_child
(
tsdn_t
*
tsdn
,
extents_t
*
extents
);
extent_t
*
extent_alloc_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
void
*
new_addr
,
size_t
size
,
size_t
pad
,
size_t
alignment
,
bool
slab
,
szind_t
szind
,
bool
*
zero
,
bool
*
commit
);
void
extent_dalloc_gap
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_t
*
extent
);
void
extent_dalloc_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
);
void
extent_destroy_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
);
bool
extent_commit_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
,
size_t
offset
,
size_t
length
);
bool
extent_decommit_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
,
size_t
offset
,
size_t
length
);
bool
extent_purge_lazy_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
,
size_t
offset
,
size_t
length
);
bool
extent_purge_forced_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
,
size_t
offset
,
size_t
length
);
extent_t
*
extent_split_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
,
size_t
size_a
,
szind_t
szind_a
,
bool
slab_a
,
size_t
size_b
,
szind_t
szind_b
,
bool
slab_b
);
bool
extent_merge_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
a
,
extent_t
*
b
);
bool
extent_boot
(
void
);
#endif
/* JEMALLOC_INTERNAL_EXTENT_EXTERNS_H */
Prev
1
2
3
4
5
6
7
…
9
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment