Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
4a884343
Commit
4a884343
authored
Oct 10, 2021
by
Yoav Steinberg
Browse files
Delete old jemalloc before pulling in subtree.
parent
7ff7536e
Changes
169
Show whitespace changes
Inline
Side-by-side
Too many changes to show.
To preserve performance only
169 of 169+
files are displayed.
Plain diff
Email patch
deps/jemalloc/include/jemalloc/internal/background_thread_inlines.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H
#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H
JEMALLOC_ALWAYS_INLINE
bool
background_thread_enabled
(
void
)
{
return
atomic_load_b
(
&
background_thread_enabled_state
,
ATOMIC_RELAXED
);
}
JEMALLOC_ALWAYS_INLINE
void
background_thread_enabled_set
(
tsdn_t
*
tsdn
,
bool
state
)
{
malloc_mutex_assert_owner
(
tsdn
,
&
background_thread_lock
);
atomic_store_b
(
&
background_thread_enabled_state
,
state
,
ATOMIC_RELAXED
);
}
JEMALLOC_ALWAYS_INLINE
background_thread_info_t
*
arena_background_thread_info_get
(
arena_t
*
arena
)
{
unsigned
arena_ind
=
arena_ind_get
(
arena
);
return
&
background_thread_info
[
arena_ind
%
ncpus
];
}
JEMALLOC_ALWAYS_INLINE
uint64_t
background_thread_wakeup_time_get
(
background_thread_info_t
*
info
)
{
uint64_t
next_wakeup
=
nstime_ns
(
&
info
->
next_wakeup
);
assert
(
atomic_load_b
(
&
info
->
indefinite_sleep
,
ATOMIC_ACQUIRE
)
==
(
next_wakeup
==
BACKGROUND_THREAD_INDEFINITE_SLEEP
));
return
next_wakeup
;
}
JEMALLOC_ALWAYS_INLINE
void
background_thread_wakeup_time_set
(
tsdn_t
*
tsdn
,
background_thread_info_t
*
info
,
uint64_t
wakeup_time
)
{
malloc_mutex_assert_owner
(
tsdn
,
&
info
->
mtx
);
atomic_store_b
(
&
info
->
indefinite_sleep
,
wakeup_time
==
BACKGROUND_THREAD_INDEFINITE_SLEEP
,
ATOMIC_RELEASE
);
nstime_init
(
&
info
->
next_wakeup
,
wakeup_time
);
}
JEMALLOC_ALWAYS_INLINE
bool
background_thread_indefinite_sleep
(
background_thread_info_t
*
info
)
{
return
atomic_load_b
(
&
info
->
indefinite_sleep
,
ATOMIC_ACQUIRE
);
}
JEMALLOC_ALWAYS_INLINE
void
arena_background_thread_inactivity_check
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
bool
is_background_thread
)
{
if
(
!
background_thread_enabled
()
||
is_background_thread
)
{
return
;
}
background_thread_info_t
*
info
=
arena_background_thread_info_get
(
arena
);
if
(
background_thread_indefinite_sleep
(
info
))
{
background_thread_interval_check
(
tsdn
,
arena
,
&
arena
->
decay_dirty
,
0
);
}
}
#endif
/* JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H */
deps/jemalloc/include/jemalloc/internal/background_thread_structs.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H
#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H
/* This file really combines "structs" and "types", but only transitionally. */
#if defined(JEMALLOC_BACKGROUND_THREAD) || defined(JEMALLOC_LAZY_LOCK)
# define JEMALLOC_PTHREAD_CREATE_WRAPPER
#endif
#define BACKGROUND_THREAD_INDEFINITE_SLEEP UINT64_MAX
#define MAX_BACKGROUND_THREAD_LIMIT MALLOCX_ARENA_LIMIT
typedef
enum
{
background_thread_stopped
,
background_thread_started
,
/* Thread waits on the global lock when paused (for arena_reset). */
background_thread_paused
,
}
background_thread_state_t
;
struct
background_thread_info_s
{
#ifdef JEMALLOC_BACKGROUND_THREAD
/* Background thread is pthread specific. */
pthread_t
thread
;
pthread_cond_t
cond
;
#endif
malloc_mutex_t
mtx
;
background_thread_state_t
state
;
/* When true, it means no wakeup scheduled. */
atomic_b_t
indefinite_sleep
;
/* Next scheduled wakeup time (absolute time in ns). */
nstime_t
next_wakeup
;
/*
* Since the last background thread run, newly added number of pages
* that need to be purged by the next wakeup. This is adjusted on
* epoch advance, and is used to determine whether we should signal the
* background thread to wake up earlier.
*/
size_t
npages_to_purge_new
;
/* Stats: total number of runs since started. */
uint64_t
tot_n_runs
;
/* Stats: total sleep time since started. */
nstime_t
tot_sleep_time
;
};
typedef
struct
background_thread_info_s
background_thread_info_t
;
struct
background_thread_stats_s
{
size_t
num_threads
;
uint64_t
num_runs
;
nstime_t
run_interval
;
};
typedef
struct
background_thread_stats_s
background_thread_stats_t
;
#endif
/* JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H */
deps/jemalloc/include/jemalloc/internal/base_externs.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_BASE_EXTERNS_H
#define JEMALLOC_INTERNAL_BASE_EXTERNS_H
extern
metadata_thp_mode_t
opt_metadata_thp
;
extern
const
char
*
metadata_thp_mode_names
[];
base_t
*
b0get
(
void
);
base_t
*
base_new
(
tsdn_t
*
tsdn
,
unsigned
ind
,
extent_hooks_t
*
extent_hooks
);
void
base_delete
(
tsdn_t
*
tsdn
,
base_t
*
base
);
extent_hooks_t
*
base_extent_hooks_get
(
base_t
*
base
);
extent_hooks_t
*
base_extent_hooks_set
(
base_t
*
base
,
extent_hooks_t
*
extent_hooks
);
void
*
base_alloc
(
tsdn_t
*
tsdn
,
base_t
*
base
,
size_t
size
,
size_t
alignment
);
extent_t
*
base_alloc_extent
(
tsdn_t
*
tsdn
,
base_t
*
base
);
void
base_stats_get
(
tsdn_t
*
tsdn
,
base_t
*
base
,
size_t
*
allocated
,
size_t
*
resident
,
size_t
*
mapped
,
size_t
*
n_thp
);
void
base_prefork
(
tsdn_t
*
tsdn
,
base_t
*
base
);
void
base_postfork_parent
(
tsdn_t
*
tsdn
,
base_t
*
base
);
void
base_postfork_child
(
tsdn_t
*
tsdn
,
base_t
*
base
);
bool
base_boot
(
tsdn_t
*
tsdn
);
#endif
/* JEMALLOC_INTERNAL_BASE_EXTERNS_H */
deps/jemalloc/include/jemalloc/internal/base_inlines.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_BASE_INLINES_H
#define JEMALLOC_INTERNAL_BASE_INLINES_H
static
inline
unsigned
base_ind_get
(
const
base_t
*
base
)
{
return
base
->
ind
;
}
static
inline
bool
metadata_thp_enabled
(
void
)
{
return
(
opt_metadata_thp
!=
metadata_thp_disabled
);
}
#endif
/* JEMALLOC_INTERNAL_BASE_INLINES_H */
deps/jemalloc/include/jemalloc/internal/base_structs.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_BASE_STRUCTS_H
#define JEMALLOC_INTERNAL_BASE_STRUCTS_H
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/size_classes.h"
/* Embedded at the beginning of every block of base-managed virtual memory. */
struct
base_block_s
{
/* Total size of block's virtual memory mapping. */
size_t
size
;
/* Next block in list of base's blocks. */
base_block_t
*
next
;
/* Tracks unused trailing space. */
extent_t
extent
;
};
struct
base_s
{
/* Associated arena's index within the arenas array. */
unsigned
ind
;
/*
* User-configurable extent hook functions. Points to an
* extent_hooks_t.
*/
atomic_p_t
extent_hooks
;
/* Protects base_alloc() and base_stats_get() operations. */
malloc_mutex_t
mtx
;
/* Using THP when true (metadata_thp auto mode). */
bool
auto_thp_switched
;
/*
* Most recent size class in the series of increasingly large base
* extents. Logarithmic spacing between subsequent allocations ensures
* that the total number of distinct mappings remains small.
*/
pszind_t
pind_last
;
/* Serial number generation state. */
size_t
extent_sn_next
;
/* Chain of all blocks associated with base. */
base_block_t
*
blocks
;
/* Heap of extents that track unused trailing space within blocks. */
extent_heap_t
avail
[
NSIZES
];
/* Stats, only maintained if config_stats. */
size_t
allocated
;
size_t
resident
;
size_t
mapped
;
/* Number of THP regions touched. */
size_t
n_thp
;
};
#endif
/* JEMALLOC_INTERNAL_BASE_STRUCTS_H */
deps/jemalloc/include/jemalloc/internal/base_types.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_BASE_TYPES_H
#define JEMALLOC_INTERNAL_BASE_TYPES_H
typedef
struct
base_block_s
base_block_t
;
typedef
struct
base_s
base_t
;
#define METADATA_THP_DEFAULT metadata_thp_disabled
/*
* In auto mode, arenas switch to huge pages for the base allocator on the
* second base block. a0 switches to thp on the 5th block (after 20 megabytes
* of metadata), since more metadata (e.g. rtree nodes) come from a0's base.
*/
#define BASE_AUTO_THP_THRESHOLD 2
#define BASE_AUTO_THP_THRESHOLD_A0 5
typedef
enum
{
metadata_thp_disabled
=
0
,
/*
* Lazily enable hugepage for metadata. To avoid high RSS caused by THP
* + low usage arena (i.e. THP becomes a significant percentage), the
* "auto" option only starts using THP after a base allocator used up
* the first THP region. Starting from the second hugepage (in a single
* arena), "auto" behaves the same as "always", i.e. madvise hugepage
* right away.
*/
metadata_thp_auto
=
1
,
metadata_thp_always
=
2
,
metadata_thp_mode_limit
=
3
}
metadata_thp_mode_t
;
#endif
/* JEMALLOC_INTERNAL_BASE_TYPES_H */
deps/jemalloc/include/jemalloc/internal/bin.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_BIN_H
#define JEMALLOC_INTERNAL_BIN_H
#include "jemalloc/internal/extent_types.h"
#include "jemalloc/internal/extent_structs.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/bin_stats.h"
/*
* A bin contains a set of extents that are currently being used for slab
* allocations.
*/
/*
* Read-only information associated with each element of arena_t's bins array
* is stored separately, partly to reduce memory usage (only one copy, rather
* than one per arena), but mainly to avoid false cacheline sharing.
*
* Each slab has the following layout:
*
* /--------------------\
* | region 0 |
* |--------------------|
* | region 1 |
* |--------------------|
* | ... |
* | ... |
* | ... |
* |--------------------|
* | region nregs-1 |
* \--------------------/
*/
typedef
struct
bin_info_s
bin_info_t
;
struct
bin_info_s
{
/* Size of regions in a slab for this bin's size class. */
size_t
reg_size
;
/* Total size of a slab for this bin's size class. */
size_t
slab_size
;
/* Total number of regions in a slab for this bin's size class. */
uint32_t
nregs
;
/*
* Metadata used to manipulate bitmaps for slabs associated with this
* bin.
*/
bitmap_info_t
bitmap_info
;
};
extern
const
bin_info_t
bin_infos
[
NBINS
];
typedef
struct
bin_s
bin_t
;
struct
bin_s
{
/* All operations on bin_t fields require lock ownership. */
malloc_mutex_t
lock
;
/*
* Current slab being used to service allocations of this bin's size
* class. slabcur is independent of slabs_{nonfull,full}; whenever
* slabcur is reassigned, the previous slab must be deallocated or
* inserted into slabs_{nonfull,full}.
*/
extent_t
*
slabcur
;
/*
* Heap of non-full slabs. This heap is used to assure that new
* allocations come from the non-full slab that is oldest/lowest in
* memory.
*/
extent_heap_t
slabs_nonfull
;
/* List used to track full slabs. */
extent_list_t
slabs_full
;
/* Bin statistics. */
bin_stats_t
stats
;
};
/* Initializes a bin to empty. Returns true on error. */
bool
bin_init
(
bin_t
*
bin
);
/* Forking. */
void
bin_prefork
(
tsdn_t
*
tsdn
,
bin_t
*
bin
);
void
bin_postfork_parent
(
tsdn_t
*
tsdn
,
bin_t
*
bin
);
void
bin_postfork_child
(
tsdn_t
*
tsdn
,
bin_t
*
bin
);
/* Stats. */
static
inline
void
bin_stats_merge
(
tsdn_t
*
tsdn
,
bin_stats_t
*
dst_bin_stats
,
bin_t
*
bin
)
{
malloc_mutex_lock
(
tsdn
,
&
bin
->
lock
);
malloc_mutex_prof_read
(
tsdn
,
&
dst_bin_stats
->
mutex_data
,
&
bin
->
lock
);
dst_bin_stats
->
nmalloc
+=
bin
->
stats
.
nmalloc
;
dst_bin_stats
->
ndalloc
+=
bin
->
stats
.
ndalloc
;
dst_bin_stats
->
nrequests
+=
bin
->
stats
.
nrequests
;
dst_bin_stats
->
curregs
+=
bin
->
stats
.
curregs
;
dst_bin_stats
->
nfills
+=
bin
->
stats
.
nfills
;
dst_bin_stats
->
nflushes
+=
bin
->
stats
.
nflushes
;
dst_bin_stats
->
nslabs
+=
bin
->
stats
.
nslabs
;
dst_bin_stats
->
reslabs
+=
bin
->
stats
.
reslabs
;
dst_bin_stats
->
curslabs
+=
bin
->
stats
.
curslabs
;
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
}
#endif
/* JEMALLOC_INTERNAL_BIN_H */
deps/jemalloc/include/jemalloc/internal/bin_stats.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_BIN_STATS_H
#define JEMALLOC_INTERNAL_BIN_STATS_H
#include "jemalloc/internal/mutex_prof.h"
typedef
struct
bin_stats_s
bin_stats_t
;
struct
bin_stats_s
{
/*
* Total number of allocation/deallocation requests served directly by
* the bin. Note that tcache may allocate an object, then recycle it
* many times, resulting many increments to nrequests, but only one
* each to nmalloc and ndalloc.
*/
uint64_t
nmalloc
;
uint64_t
ndalloc
;
/*
* Number of allocation requests that correspond to the size of this
* bin. This includes requests served by tcache, though tcache only
* periodically merges into this counter.
*/
uint64_t
nrequests
;
/*
* Current number of regions of this size class, including regions
* currently cached by tcache.
*/
size_t
curregs
;
/* Number of tcache fills from this bin. */
uint64_t
nfills
;
/* Number of tcache flushes to this bin. */
uint64_t
nflushes
;
/* Total number of slabs created for this bin's size class. */
uint64_t
nslabs
;
/*
* Total number of slabs reused by extracting them from the slabs heap
* for this bin's size class.
*/
uint64_t
reslabs
;
/* Current number of slabs in this bin. */
size_t
curslabs
;
mutex_prof_data_t
mutex_data
;
};
#endif
/* JEMALLOC_INTERNAL_BIN_STATS_H */
deps/jemalloc/include/jemalloc/internal/bit_util.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_BIT_UTIL_H
#define JEMALLOC_INTERNAL_BIT_UTIL_H
#include "jemalloc/internal/assert.h"
#define BIT_UTIL_INLINE static inline
/* Sanity check. */
#if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \
|| !defined(JEMALLOC_INTERNAL_FFS)
# error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure
#endif
BIT_UTIL_INLINE
unsigned
ffs_llu
(
unsigned
long
long
bitmap
)
{
return
JEMALLOC_INTERNAL_FFSLL
(
bitmap
);
}
BIT_UTIL_INLINE
unsigned
ffs_lu
(
unsigned
long
bitmap
)
{
return
JEMALLOC_INTERNAL_FFSL
(
bitmap
);
}
BIT_UTIL_INLINE
unsigned
ffs_u
(
unsigned
bitmap
)
{
return
JEMALLOC_INTERNAL_FFS
(
bitmap
);
}
BIT_UTIL_INLINE
unsigned
ffs_zu
(
size_t
bitmap
)
{
#if LG_SIZEOF_PTR == LG_SIZEOF_INT
return
ffs_u
(
bitmap
);
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG
return
ffs_lu
(
bitmap
);
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG
return
ffs_llu
(
bitmap
);
#else
#error No implementation for size_t ffs()
#endif
}
BIT_UTIL_INLINE
unsigned
ffs_u64
(
uint64_t
bitmap
)
{
#if LG_SIZEOF_LONG == 3
return
ffs_lu
(
bitmap
);
#elif LG_SIZEOF_LONG_LONG == 3
return
ffs_llu
(
bitmap
);
#else
#error No implementation for 64-bit ffs()
#endif
}
BIT_UTIL_INLINE
unsigned
ffs_u32
(
uint32_t
bitmap
)
{
#if LG_SIZEOF_INT == 2
return
ffs_u
(
bitmap
);
#else
#error No implementation for 32-bit ffs()
#endif
return
ffs_u
(
bitmap
);
}
BIT_UTIL_INLINE
uint64_t
pow2_ceil_u64
(
uint64_t
x
)
{
x
--
;
x
|=
x
>>
1
;
x
|=
x
>>
2
;
x
|=
x
>>
4
;
x
|=
x
>>
8
;
x
|=
x
>>
16
;
x
|=
x
>>
32
;
x
++
;
return
x
;
}
BIT_UTIL_INLINE
uint32_t
pow2_ceil_u32
(
uint32_t
x
)
{
x
--
;
x
|=
x
>>
1
;
x
|=
x
>>
2
;
x
|=
x
>>
4
;
x
|=
x
>>
8
;
x
|=
x
>>
16
;
x
++
;
return
x
;
}
/* Compute the smallest power of 2 that is >= x. */
BIT_UTIL_INLINE
size_t
pow2_ceil_zu
(
size_t
x
)
{
#if (LG_SIZEOF_PTR == 3)
return
pow2_ceil_u64
(
x
);
#else
return
pow2_ceil_u32
(
x
);
#endif
}
#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
BIT_UTIL_INLINE
unsigned
lg_floor
(
size_t
x
)
{
size_t
ret
;
assert
(
x
!=
0
);
asm
(
"bsr %1, %0"
:
"=r"
(
ret
)
// Outputs.
:
"r"
(
x
)
// Inputs.
);
assert
(
ret
<
UINT_MAX
);
return
(
unsigned
)
ret
;
}
#elif (defined(_MSC_VER))
BIT_UTIL_INLINE
unsigned
lg_floor
(
size_t
x
)
{
unsigned
long
ret
;
assert
(
x
!=
0
);
#if (LG_SIZEOF_PTR == 3)
_BitScanReverse64
(
&
ret
,
x
);
#elif (LG_SIZEOF_PTR == 2)
_BitScanReverse
(
&
ret
,
x
);
#else
# error "Unsupported type size for lg_floor()"
#endif
assert
(
ret
<
UINT_MAX
);
return
(
unsigned
)
ret
;
}
#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
BIT_UTIL_INLINE
unsigned
lg_floor
(
size_t
x
)
{
assert
(
x
!=
0
);
#if (LG_SIZEOF_PTR == LG_SIZEOF_INT)
return
((
8
<<
LG_SIZEOF_PTR
)
-
1
)
-
__builtin_clz
(
x
);
#elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG)
return
((
8
<<
LG_SIZEOF_PTR
)
-
1
)
-
__builtin_clzl
(
x
);
#else
# error "Unsupported type size for lg_floor()"
#endif
}
#else
BIT_UTIL_INLINE
unsigned
lg_floor
(
size_t
x
)
{
assert
(
x
!=
0
);
x
|=
(
x
>>
1
);
x
|=
(
x
>>
2
);
x
|=
(
x
>>
4
);
x
|=
(
x
>>
8
);
x
|=
(
x
>>
16
);
#if (LG_SIZEOF_PTR == 3)
x
|=
(
x
>>
32
);
#endif
if
(
x
==
SIZE_T_MAX
)
{
return
(
8
<<
LG_SIZEOF_PTR
)
-
1
;
}
x
++
;
return
ffs_zu
(
x
)
-
2
;
}
#endif
#undef BIT_UTIL_INLINE
#endif
/* JEMALLOC_INTERNAL_BIT_UTIL_H */
deps/jemalloc/include/jemalloc/internal/bitmap.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_BITMAP_H
#define JEMALLOC_INTERNAL_BITMAP_H
#include "jemalloc/internal/arena_types.h"
#include "jemalloc/internal/bit_util.h"
#include "jemalloc/internal/size_classes.h"
typedef
unsigned
long
bitmap_t
;
#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG
/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
#if LG_SLAB_MAXREGS > LG_CEIL_NSIZES
/* Maximum bitmap bit count is determined by maximum regions per slab. */
# define LG_BITMAP_MAXBITS LG_SLAB_MAXREGS
#else
/* Maximum bitmap bit count is determined by number of extent size classes. */
# define LG_BITMAP_MAXBITS LG_CEIL_NSIZES
#endif
#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS)
/* Number of bits per group. */
#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3)
#define BITMAP_GROUP_NBITS (1U << LG_BITMAP_GROUP_NBITS)
#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)
/*
* Do some analysis on how big the bitmap is before we use a tree. For a brute
* force linear search, if we would have to call ffs_lu() more than 2^3 times,
* use a tree instead.
*/
#if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3
# define BITMAP_USE_TREE
#endif
/* Number of groups required to store a given number of bits. */
#define BITMAP_BITS2GROUPS(nbits) \
(((nbits) + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS)
/*
* Number of groups required at a particular level for a given number of bits.
*/
#define BITMAP_GROUPS_L0(nbits) \
BITMAP_BITS2GROUPS(nbits)
#define BITMAP_GROUPS_L1(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits))
#define BITMAP_GROUPS_L2(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))
#define BITMAP_GROUPS_L3(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
BITMAP_BITS2GROUPS((nbits)))))
#define BITMAP_GROUPS_L4(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))))
/*
* Assuming the number of levels, number of groups required for a given number
* of bits.
*/
#define BITMAP_GROUPS_1_LEVEL(nbits) \
BITMAP_GROUPS_L0(nbits)
#define BITMAP_GROUPS_2_LEVEL(nbits) \
(BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits))
#define BITMAP_GROUPS_3_LEVEL(nbits) \
(BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits))
#define BITMAP_GROUPS_4_LEVEL(nbits) \
(BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits))
#define BITMAP_GROUPS_5_LEVEL(nbits) \
(BITMAP_GROUPS_4_LEVEL(nbits) + BITMAP_GROUPS_L4(nbits))
/*
* Maximum number of groups required to support LG_BITMAP_MAXBITS.
*/
#ifdef BITMAP_USE_TREE
#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_1_LEVEL(nbits)
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_2_LEVEL(nbits)
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_3_LEVEL(nbits)
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_4_LEVEL(nbits)
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 5
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_5_LEVEL(nbits)
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_5_LEVEL(BITMAP_MAXBITS)
#else
# error "Unsupported bitmap size"
#endif
/*
* Maximum number of levels possible. This could be statically computed based
* on LG_BITMAP_MAXBITS:
*
* #define BITMAP_MAX_LEVELS \
* (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \
* + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP)
*
* However, that would not allow the generic BITMAP_INFO_INITIALIZER() macro, so
* instead hardcode BITMAP_MAX_LEVELS to the largest number supported by the
* various cascading macros. The only additional cost this incurs is some
* unused trailing entries in bitmap_info_t structures; the bitmaps themselves
* are not impacted.
*/
#define BITMAP_MAX_LEVELS 5
#define BITMAP_INFO_INITIALIZER(nbits) { \
/* nbits. */
\
nbits, \
/* nlevels. */
\
(BITMAP_GROUPS_L0(nbits) > BITMAP_GROUPS_L1(nbits)) + \
(BITMAP_GROUPS_L1(nbits) > BITMAP_GROUPS_L2(nbits)) + \
(BITMAP_GROUPS_L2(nbits) > BITMAP_GROUPS_L3(nbits)) + \
(BITMAP_GROUPS_L3(nbits) > BITMAP_GROUPS_L4(nbits)) + 1, \
/* levels. */
\
{ \
{0}, \
{BITMAP_GROUPS_L0(nbits)}, \
{BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \
{BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) + \
BITMAP_GROUPS_L0(nbits)}, \
{BITMAP_GROUPS_L3(nbits) + BITMAP_GROUPS_L2(nbits) + \
BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \
{BITMAP_GROUPS_L4(nbits) + BITMAP_GROUPS_L3(nbits) + \
BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) \
+ BITMAP_GROUPS_L0(nbits)} \
} \
}
#else
/* BITMAP_USE_TREE */
#define BITMAP_GROUPS(nbits) BITMAP_BITS2GROUPS(nbits)
#define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS)
#define BITMAP_INFO_INITIALIZER(nbits) { \
/* nbits. */
\
nbits, \
/* ngroups. */
\
BITMAP_BITS2GROUPS(nbits) \
}
#endif
/* BITMAP_USE_TREE */
typedef
struct
bitmap_level_s
{
/* Offset of this level's groups within the array of groups. */
size_t
group_offset
;
}
bitmap_level_t
;
typedef
struct
bitmap_info_s
{
/* Logical number of bits in bitmap (stored at bottom level). */
size_t
nbits
;
#ifdef BITMAP_USE_TREE
/* Number of levels necessary for nbits. */
unsigned
nlevels
;
/*
* Only the first (nlevels+1) elements are used, and levels are ordered
* bottom to top (e.g. the bottom level is stored in levels[0]).
*/
bitmap_level_t
levels
[
BITMAP_MAX_LEVELS
+
1
];
#else
/* BITMAP_USE_TREE */
/* Number of groups necessary for nbits. */
size_t
ngroups
;
#endif
/* BITMAP_USE_TREE */
}
bitmap_info_t
;
void
bitmap_info_init
(
bitmap_info_t
*
binfo
,
size_t
nbits
);
void
bitmap_init
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
,
bool
fill
);
size_t
bitmap_size
(
const
bitmap_info_t
*
binfo
);
static
inline
bool
bitmap_full
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
)
{
#ifdef BITMAP_USE_TREE
size_t
rgoff
=
binfo
->
levels
[
binfo
->
nlevels
].
group_offset
-
1
;
bitmap_t
rg
=
bitmap
[
rgoff
];
/* The bitmap is full iff the root group is 0. */
return
(
rg
==
0
);
#else
size_t
i
;
for
(
i
=
0
;
i
<
binfo
->
ngroups
;
i
++
)
{
if
(
bitmap
[
i
]
!=
0
)
{
return
false
;
}
}
return
true
;
#endif
}
static
inline
bool
bitmap_get
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
,
size_t
bit
)
{
size_t
goff
;
bitmap_t
g
;
assert
(
bit
<
binfo
->
nbits
);
goff
=
bit
>>
LG_BITMAP_GROUP_NBITS
;
g
=
bitmap
[
goff
];
return
!
(
g
&
(
ZU
(
1
)
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)));
}
static
inline
void
bitmap_set
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
,
size_t
bit
)
{
size_t
goff
;
bitmap_t
*
gp
;
bitmap_t
g
;
assert
(
bit
<
binfo
->
nbits
);
assert
(
!
bitmap_get
(
bitmap
,
binfo
,
bit
));
goff
=
bit
>>
LG_BITMAP_GROUP_NBITS
;
gp
=
&
bitmap
[
goff
];
g
=
*
gp
;
assert
(
g
&
(
ZU
(
1
)
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)));
g
^=
ZU
(
1
)
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
);
*
gp
=
g
;
assert
(
bitmap_get
(
bitmap
,
binfo
,
bit
));
#ifdef BITMAP_USE_TREE
/* Propagate group state transitions up the tree. */
if
(
g
==
0
)
{
unsigned
i
;
for
(
i
=
1
;
i
<
binfo
->
nlevels
;
i
++
)
{
bit
=
goff
;
goff
=
bit
>>
LG_BITMAP_GROUP_NBITS
;
gp
=
&
bitmap
[
binfo
->
levels
[
i
].
group_offset
+
goff
];
g
=
*
gp
;
assert
(
g
&
(
ZU
(
1
)
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)));
g
^=
ZU
(
1
)
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
);
*
gp
=
g
;
if
(
g
!=
0
)
{
break
;
}
}
}
#endif
}
/* ffu: find first unset >= bit. */
static
inline
size_t
bitmap_ffu
(
const
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
,
size_t
min_bit
)
{
assert
(
min_bit
<
binfo
->
nbits
);
#ifdef BITMAP_USE_TREE
size_t
bit
=
0
;
for
(
unsigned
level
=
binfo
->
nlevels
;
level
--
;)
{
size_t
lg_bits_per_group
=
(
LG_BITMAP_GROUP_NBITS
*
(
level
+
1
));
bitmap_t
group
=
bitmap
[
binfo
->
levels
[
level
].
group_offset
+
(
bit
>>
lg_bits_per_group
)];
unsigned
group_nmask
=
(
unsigned
)(((
min_bit
>
bit
)
?
(
min_bit
-
bit
)
:
0
)
>>
(
lg_bits_per_group
-
LG_BITMAP_GROUP_NBITS
));
assert
(
group_nmask
<=
BITMAP_GROUP_NBITS
);
bitmap_t
group_mask
=
~
((
1LU
<<
group_nmask
)
-
1
);
bitmap_t
group_masked
=
group
&
group_mask
;
if
(
group_masked
==
0LU
)
{
if
(
group
==
0LU
)
{
return
binfo
->
nbits
;
}
/*
* min_bit was preceded by one or more unset bits in
* this group, but there are no other unset bits in this
* group. Try again starting at the first bit of the
* next sibling. This will recurse at most once per
* non-root level.
*/
size_t
sib_base
=
bit
+
(
ZU
(
1
)
<<
lg_bits_per_group
);
assert
(
sib_base
>
min_bit
);
assert
(
sib_base
>
bit
);
if
(
sib_base
>=
binfo
->
nbits
)
{
return
binfo
->
nbits
;
}
return
bitmap_ffu
(
bitmap
,
binfo
,
sib_base
);
}
bit
+=
((
size_t
)(
ffs_lu
(
group_masked
)
-
1
))
<<
(
lg_bits_per_group
-
LG_BITMAP_GROUP_NBITS
);
}
assert
(
bit
>=
min_bit
);
assert
(
bit
<
binfo
->
nbits
);
return
bit
;
#else
size_t
i
=
min_bit
>>
LG_BITMAP_GROUP_NBITS
;
bitmap_t
g
=
bitmap
[
i
]
&
~
((
1LU
<<
(
min_bit
&
BITMAP_GROUP_NBITS_MASK
))
-
1
);
size_t
bit
;
do
{
bit
=
ffs_lu
(
g
);
if
(
bit
!=
0
)
{
return
(
i
<<
LG_BITMAP_GROUP_NBITS
)
+
(
bit
-
1
);
}
i
++
;
g
=
bitmap
[
i
];
}
while
(
i
<
binfo
->
ngroups
);
return
binfo
->
nbits
;
#endif
}
/* sfu: set first unset. */
static
inline
size_t
bitmap_sfu
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
)
{
size_t
bit
;
bitmap_t
g
;
unsigned
i
;
assert
(
!
bitmap_full
(
bitmap
,
binfo
));
#ifdef BITMAP_USE_TREE
i
=
binfo
->
nlevels
-
1
;
g
=
bitmap
[
binfo
->
levels
[
i
].
group_offset
];
bit
=
ffs_lu
(
g
)
-
1
;
while
(
i
>
0
)
{
i
--
;
g
=
bitmap
[
binfo
->
levels
[
i
].
group_offset
+
bit
];
bit
=
(
bit
<<
LG_BITMAP_GROUP_NBITS
)
+
(
ffs_lu
(
g
)
-
1
);
}
#else
i
=
0
;
g
=
bitmap
[
0
];
while
((
bit
=
ffs_lu
(
g
))
==
0
)
{
i
++
;
g
=
bitmap
[
i
];
}
bit
=
(
i
<<
LG_BITMAP_GROUP_NBITS
)
+
(
bit
-
1
);
#endif
bitmap_set
(
bitmap
,
binfo
,
bit
);
return
bit
;
}
static
inline
void
bitmap_unset
(
bitmap_t
*
bitmap
,
const
bitmap_info_t
*
binfo
,
size_t
bit
)
{
size_t
goff
;
bitmap_t
*
gp
;
bitmap_t
g
;
UNUSED
bool
propagate
;
assert
(
bit
<
binfo
->
nbits
);
assert
(
bitmap_get
(
bitmap
,
binfo
,
bit
));
goff
=
bit
>>
LG_BITMAP_GROUP_NBITS
;
gp
=
&
bitmap
[
goff
];
g
=
*
gp
;
propagate
=
(
g
==
0
);
assert
((
g
&
(
ZU
(
1
)
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)))
==
0
);
g
^=
ZU
(
1
)
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
);
*
gp
=
g
;
assert
(
!
bitmap_get
(
bitmap
,
binfo
,
bit
));
#ifdef BITMAP_USE_TREE
/* Propagate group state transitions up the tree. */
if
(
propagate
)
{
unsigned
i
;
for
(
i
=
1
;
i
<
binfo
->
nlevels
;
i
++
)
{
bit
=
goff
;
goff
=
bit
>>
LG_BITMAP_GROUP_NBITS
;
gp
=
&
bitmap
[
binfo
->
levels
[
i
].
group_offset
+
goff
];
g
=
*
gp
;
propagate
=
(
g
==
0
);
assert
((
g
&
(
ZU
(
1
)
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)))
==
0
);
g
^=
ZU
(
1
)
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
);
*
gp
=
g
;
if
(
!
propagate
)
{
break
;
}
}
}
#endif
/* BITMAP_USE_TREE */
}
#endif
/* JEMALLOC_INTERNAL_BITMAP_H */
deps/jemalloc/include/jemalloc/internal/cache_bin.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_CACHE_BIN_H
#define JEMALLOC_INTERNAL_CACHE_BIN_H
#include "jemalloc/internal/ql.h"
/*
* The cache_bins are the mechanism that the tcache and the arena use to
* communicate. The tcache fills from and flushes to the arena by passing a
* cache_bin_t to fill/flush. When the arena needs to pull stats from the
* tcaches associated with it, it does so by iterating over its
* cache_bin_array_descriptor_t objects and reading out per-bin stats it
* contains. This makes it so that the arena need not know about the existence
* of the tcache at all.
*/
/*
* The count of the number of cached allocations in a bin. We make this signed
* so that negative numbers can encode "invalid" states (e.g. a low water mark
* of -1 for a cache that has been depleted).
*/
typedef
int32_t
cache_bin_sz_t
;
typedef
struct
cache_bin_stats_s
cache_bin_stats_t
;
struct
cache_bin_stats_s
{
/*
* Number of allocation requests that corresponded to the size of this
* bin.
*/
uint64_t
nrequests
;
};
/*
* Read-only information associated with each element of tcache_t's tbins array
* is stored separately, mainly to reduce memory usage.
*/
typedef
struct
cache_bin_info_s
cache_bin_info_t
;
struct
cache_bin_info_s
{
/* Upper limit on ncached. */
cache_bin_sz_t
ncached_max
;
};
typedef
struct
cache_bin_s
cache_bin_t
;
struct
cache_bin_s
{
/* Min # cached since last GC. */
cache_bin_sz_t
low_water
;
/* # of cached objects. */
cache_bin_sz_t
ncached
;
/*
* ncached and stats are both modified frequently. Let's keep them
* close so that they have a higher chance of being on the same
* cacheline, thus less write-backs.
*/
cache_bin_stats_t
tstats
;
/*
* Stack of available objects.
*
* To make use of adjacent cacheline prefetch, the items in the avail
* stack goes to higher address for newer allocations. avail points
* just above the available space, which means that
* avail[-ncached, ... -1] are available items and the lowest item will
* be allocated first.
*/
void
**
avail
;
};
typedef
struct
cache_bin_array_descriptor_s
cache_bin_array_descriptor_t
;
struct
cache_bin_array_descriptor_s
{
/*
* The arena keeps a list of the cache bins associated with it, for
* stats collection.
*/
ql_elm
(
cache_bin_array_descriptor_t
)
link
;
/* Pointers to the tcache bins. */
cache_bin_t
*
bins_small
;
cache_bin_t
*
bins_large
;
};
static
inline
void
cache_bin_array_descriptor_init
(
cache_bin_array_descriptor_t
*
descriptor
,
cache_bin_t
*
bins_small
,
cache_bin_t
*
bins_large
)
{
ql_elm_new
(
descriptor
,
link
);
descriptor
->
bins_small
=
bins_small
;
descriptor
->
bins_large
=
bins_large
;
}
JEMALLOC_ALWAYS_INLINE
void
*
cache_bin_alloc_easy
(
cache_bin_t
*
bin
,
bool
*
success
)
{
void
*
ret
;
if
(
unlikely
(
bin
->
ncached
==
0
))
{
bin
->
low_water
=
-
1
;
*
success
=
false
;
return
NULL
;
}
/*
* success (instead of ret) should be checked upon the return of this
* function. We avoid checking (ret == NULL) because there is never a
* null stored on the avail stack (which is unknown to the compiler),
* and eagerly checking ret would cause pipeline stall (waiting for the
* cacheline).
*/
*
success
=
true
;
ret
=
*
(
bin
->
avail
-
bin
->
ncached
);
bin
->
ncached
--
;
if
(
unlikely
(
bin
->
ncached
<
bin
->
low_water
))
{
bin
->
low_water
=
bin
->
ncached
;
}
return
ret
;
}
#endif
/* JEMALLOC_INTERNAL_CACHE_BIN_H */
deps/jemalloc/include/jemalloc/internal/ckh.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_CKH_H
#define JEMALLOC_INTERNAL_CKH_H
#include "jemalloc/internal/tsd.h"
/* Cuckoo hashing implementation. Skip to the end for the interface. */
/******************************************************************************/
/* INTERNAL DEFINITIONS -- IGNORE */
/******************************************************************************/
/* Maintain counters used to get an idea of performance. */
/* #define CKH_COUNT */
/* Print counter values in ckh_delete() (requires CKH_COUNT). */
/* #define CKH_VERBOSE */
/*
* There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit
* one bucket per L1 cache line.
*/
#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
/* Typedefs to allow easy function pointer passing. */
typedef
void
ckh_hash_t
(
const
void
*
,
size_t
[
2
]);
typedef
bool
ckh_keycomp_t
(
const
void
*
,
const
void
*
);
/* Hash table cell. */
typedef
struct
{
const
void
*
key
;
const
void
*
data
;
}
ckhc_t
;
/* The hash table itself. */
typedef
struct
{
#ifdef CKH_COUNT
/* Counters used to get an idea of performance. */
uint64_t
ngrows
;
uint64_t
nshrinks
;
uint64_t
nshrinkfails
;
uint64_t
ninserts
;
uint64_t
nrelocs
;
#endif
/* Used for pseudo-random number generation. */
uint64_t
prng_state
;
/* Total number of items. */
size_t
count
;
/*
* Minimum and current number of hash table buckets. There are
* 2^LG_CKH_BUCKET_CELLS cells per bucket.
*/
unsigned
lg_minbuckets
;
unsigned
lg_curbuckets
;
/* Hash and comparison functions. */
ckh_hash_t
*
hash
;
ckh_keycomp_t
*
keycomp
;
/* Hash table with 2^lg_curbuckets buckets. */
ckhc_t
*
tab
;
}
ckh_t
;
/******************************************************************************/
/* BEGIN PUBLIC API */
/******************************************************************************/
/* Lifetime management. Minitems is the initial capacity. */
bool
ckh_new
(
tsd_t
*
tsd
,
ckh_t
*
ckh
,
size_t
minitems
,
ckh_hash_t
*
hash
,
ckh_keycomp_t
*
keycomp
);
void
ckh_delete
(
tsd_t
*
tsd
,
ckh_t
*
ckh
);
/* Get the number of elements in the set. */
size_t
ckh_count
(
ckh_t
*
ckh
);
/*
* To iterate over the elements in the table, initialize *tabind to 0 and call
* this function until it returns true. Each call that returns false will
* update *key and *data to the next element in the table, assuming the pointers
* are non-NULL.
*/
bool
ckh_iter
(
ckh_t
*
ckh
,
size_t
*
tabind
,
void
**
key
,
void
**
data
);
/*
* Basic hash table operations -- insert, removal, lookup. For ckh_remove and
* ckh_search, key or data can be NULL. The hash-table only stores pointers to
* the key and value, and doesn't do any lifetime management.
*/
bool
ckh_insert
(
tsd_t
*
tsd
,
ckh_t
*
ckh
,
const
void
*
key
,
const
void
*
data
);
bool
ckh_remove
(
tsd_t
*
tsd
,
ckh_t
*
ckh
,
const
void
*
searchkey
,
void
**
key
,
void
**
data
);
bool
ckh_search
(
ckh_t
*
ckh
,
const
void
*
searchkey
,
void
**
key
,
void
**
data
);
/* Some useful hash and comparison functions for strings and pointers. */
void
ckh_string_hash
(
const
void
*
key
,
size_t
r_hash
[
2
]);
bool
ckh_string_keycomp
(
const
void
*
k1
,
const
void
*
k2
);
void
ckh_pointer_hash
(
const
void
*
key
,
size_t
r_hash
[
2
]);
bool
ckh_pointer_keycomp
(
const
void
*
k1
,
const
void
*
k2
);
#endif
/* JEMALLOC_INTERNAL_CKH_H */
deps/jemalloc/include/jemalloc/internal/ctl.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_CTL_H
#define JEMALLOC_INTERNAL_CTL_H
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/malloc_io.h"
#include "jemalloc/internal/mutex_prof.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats.h"
/* Maximum ctl tree depth. */
#define CTL_MAX_DEPTH 7
typedef
struct
ctl_node_s
{
bool
named
;
}
ctl_node_t
;
typedef
struct
ctl_named_node_s
{
ctl_node_t
node
;
const
char
*
name
;
/* If (nchildren == 0), this is a terminal node. */
size_t
nchildren
;
const
ctl_node_t
*
children
;
int
(
*
ctl
)(
tsd_t
*
,
const
size_t
*
,
size_t
,
void
*
,
size_t
*
,
void
*
,
size_t
);
}
ctl_named_node_t
;
typedef
struct
ctl_indexed_node_s
{
struct
ctl_node_s
node
;
const
ctl_named_node_t
*
(
*
index
)(
tsdn_t
*
,
const
size_t
*
,
size_t
,
size_t
);
}
ctl_indexed_node_t
;
typedef
struct
ctl_arena_stats_s
{
arena_stats_t
astats
;
/* Aggregate stats for small size classes, based on bin stats. */
size_t
allocated_small
;
uint64_t
nmalloc_small
;
uint64_t
ndalloc_small
;
uint64_t
nrequests_small
;
bin_stats_t
bstats
[
NBINS
];
arena_stats_large_t
lstats
[
NSIZES
-
NBINS
];
}
ctl_arena_stats_t
;
typedef
struct
ctl_stats_s
{
size_t
allocated
;
size_t
active
;
size_t
metadata
;
size_t
metadata_thp
;
size_t
resident
;
size_t
mapped
;
size_t
retained
;
background_thread_stats_t
background_thread
;
mutex_prof_data_t
mutex_prof_data
[
mutex_prof_num_global_mutexes
];
}
ctl_stats_t
;
typedef
struct
ctl_arena_s
ctl_arena_t
;
struct
ctl_arena_s
{
unsigned
arena_ind
;
bool
initialized
;
ql_elm
(
ctl_arena_t
)
destroyed_link
;
/* Basic stats, supported even if !config_stats. */
unsigned
nthreads
;
const
char
*
dss
;
ssize_t
dirty_decay_ms
;
ssize_t
muzzy_decay_ms
;
size_t
pactive
;
size_t
pdirty
;
size_t
pmuzzy
;
/* NULL if !config_stats. */
ctl_arena_stats_t
*
astats
;
};
typedef
struct
ctl_arenas_s
{
uint64_t
epoch
;
unsigned
narenas
;
ql_head
(
ctl_arena_t
)
destroyed
;
/*
* Element 0 corresponds to merged stats for extant arenas (accessed via
* MALLCTL_ARENAS_ALL), element 1 corresponds to merged stats for
* destroyed arenas (accessed via MALLCTL_ARENAS_DESTROYED), and the
* remaining MALLOCX_ARENA_LIMIT elements correspond to arenas.
*/
ctl_arena_t
*
arenas
[
2
+
MALLOCX_ARENA_LIMIT
];
}
ctl_arenas_t
;
int
ctl_byname
(
tsd_t
*
tsd
,
const
char
*
name
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
);
int
ctl_nametomib
(
tsd_t
*
tsd
,
const
char
*
name
,
size_t
*
mibp
,
size_t
*
miblenp
);
int
ctl_bymib
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
);
bool
ctl_boot
(
void
);
void
ctl_prefork
(
tsdn_t
*
tsdn
);
void
ctl_postfork_parent
(
tsdn_t
*
tsdn
);
void
ctl_postfork_child
(
tsdn_t
*
tsdn
);
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
!= 0) { \
malloc_printf( \
"<jemalloc>: Failure in xmallctl(\"%s\", ...)\n", \
name); \
abort(); \
} \
} while (0)
#define xmallctlnametomib(name, mibp, miblenp) do { \
if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \
malloc_printf("<jemalloc>: Failure in " \
"xmallctlnametomib(\"%s\", ...)\n", name); \
abort(); \
} \
} while (0)
#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \
if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \
newlen) != 0) { \
malloc_write( \
"<jemalloc>: Failure in xmallctlbymib()\n"); \
abort(); \
} \
} while (0)
#endif
/* JEMALLOC_INTERNAL_CTL_H */
deps/jemalloc/include/jemalloc/internal/div.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_DIV_H
#define JEMALLOC_INTERNAL_DIV_H
#include "jemalloc/internal/assert.h"
/*
* This module does the division that computes the index of a region in a slab,
* given its offset relative to the base.
* That is, given a divisor d, an n = i * d (all integers), we'll return i.
* We do some pre-computation to do this more quickly than a CPU division
* instruction.
* We bound n < 2^32, and don't support dividing by one.
*/
typedef
struct
div_info_s
div_info_t
;
struct
div_info_s
{
uint32_t
magic
;
#ifdef JEMALLOC_DEBUG
size_t
d
;
#endif
};
void
div_init
(
div_info_t
*
div_info
,
size_t
divisor
);
static
inline
size_t
div_compute
(
div_info_t
*
div_info
,
size_t
n
)
{
assert
(
n
<=
(
uint32_t
)
-
1
);
/*
* This generates, e.g. mov; imul; shr on x86-64. On a 32-bit machine,
* the compilers I tried were all smart enough to turn this into the
* appropriate "get the high 32 bits of the result of a multiply" (e.g.
* mul; mov edx eax; on x86, umull on arm, etc.).
*/
size_t
i
=
((
uint64_t
)
n
*
(
uint64_t
)
div_info
->
magic
)
>>
32
;
#ifdef JEMALLOC_DEBUG
assert
(
i
*
div_info
->
d
==
n
);
#endif
return
i
;
}
#endif
/* JEMALLOC_INTERNAL_DIV_H */
deps/jemalloc/include/jemalloc/internal/emitter.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_EMITTER_H
#define JEMALLOC_INTERNAL_EMITTER_H
#include "jemalloc/internal/ql.h"
typedef
enum
emitter_output_e
emitter_output_t
;
enum
emitter_output_e
{
emitter_output_json
,
emitter_output_table
};
typedef
enum
emitter_justify_e
emitter_justify_t
;
enum
emitter_justify_e
{
emitter_justify_left
,
emitter_justify_right
,
/* Not for users; just to pass to internal functions. */
emitter_justify_none
};
typedef
enum
emitter_type_e
emitter_type_t
;
enum
emitter_type_e
{
emitter_type_bool
,
emitter_type_int
,
emitter_type_unsigned
,
emitter_type_uint32
,
emitter_type_uint64
,
emitter_type_size
,
emitter_type_ssize
,
emitter_type_string
,
/*
* A title is a column title in a table; it's just a string, but it's
* not quoted.
*/
emitter_type_title
,
};
typedef
struct
emitter_col_s
emitter_col_t
;
struct
emitter_col_s
{
/* Filled in by the user. */
emitter_justify_t
justify
;
int
width
;
emitter_type_t
type
;
union
{
bool
bool_val
;
int
int_val
;
unsigned
unsigned_val
;
uint32_t
uint32_val
;
uint64_t
uint64_val
;
size_t
size_val
;
ssize_t
ssize_val
;
const
char
*
str_val
;
};
/* Filled in by initialization. */
ql_elm
(
emitter_col_t
)
link
;
};
typedef
struct
emitter_row_s
emitter_row_t
;
struct
emitter_row_s
{
ql_head
(
emitter_col_t
)
cols
;
};
static
inline
void
emitter_row_init
(
emitter_row_t
*
row
)
{
ql_new
(
&
row
->
cols
);
}
static
inline
void
emitter_col_init
(
emitter_col_t
*
col
,
emitter_row_t
*
row
)
{
ql_elm_new
(
col
,
link
);
ql_tail_insert
(
&
row
->
cols
,
col
,
link
);
}
typedef
struct
emitter_s
emitter_t
;
struct
emitter_s
{
emitter_output_t
output
;
/* The output information. */
void
(
*
write_cb
)(
void
*
,
const
char
*
);
void
*
cbopaque
;
int
nesting_depth
;
/* True if we've already emitted a value at the given depth. */
bool
item_at_depth
;
};
static
inline
void
emitter_init
(
emitter_t
*
emitter
,
emitter_output_t
emitter_output
,
void
(
*
write_cb
)(
void
*
,
const
char
*
),
void
*
cbopaque
)
{
emitter
->
output
=
emitter_output
;
emitter
->
write_cb
=
write_cb
;
emitter
->
cbopaque
=
cbopaque
;
emitter
->
item_at_depth
=
false
;
emitter
->
nesting_depth
=
0
;
}
/* Internal convenience function. Write to the emitter the given string. */
JEMALLOC_FORMAT_PRINTF
(
2
,
3
)
static
inline
void
emitter_printf
(
emitter_t
*
emitter
,
const
char
*
format
,
...)
{
va_list
ap
;
va_start
(
ap
,
format
);
malloc_vcprintf
(
emitter
->
write_cb
,
emitter
->
cbopaque
,
format
,
ap
);
va_end
(
ap
);
}
/* Write to the emitter the given string, but only in table mode. */
JEMALLOC_FORMAT_PRINTF
(
2
,
3
)
static
inline
void
emitter_table_printf
(
emitter_t
*
emitter
,
const
char
*
format
,
...)
{
if
(
emitter
->
output
==
emitter_output_table
)
{
va_list
ap
;
va_start
(
ap
,
format
);
malloc_vcprintf
(
emitter
->
write_cb
,
emitter
->
cbopaque
,
format
,
ap
);
va_end
(
ap
);
}
}
static
inline
void
emitter_gen_fmt
(
char
*
out_fmt
,
size_t
out_size
,
const
char
*
fmt_specifier
,
emitter_justify_t
justify
,
int
width
)
{
size_t
written
;
if
(
justify
==
emitter_justify_none
)
{
written
=
malloc_snprintf
(
out_fmt
,
out_size
,
"%%%s"
,
fmt_specifier
);
}
else
if
(
justify
==
emitter_justify_left
)
{
written
=
malloc_snprintf
(
out_fmt
,
out_size
,
"%%-%d%s"
,
width
,
fmt_specifier
);
}
else
{
written
=
malloc_snprintf
(
out_fmt
,
out_size
,
"%%%d%s"
,
width
,
fmt_specifier
);
}
/* Only happens in case of bad format string, which *we* choose. */
assert
(
written
<
out_size
);
}
/*
* Internal. Emit the given value type in the relevant encoding (so that the
* bool true gets mapped to json "true", but the string "true" gets mapped to
* json "\"true\"", for instance.
*
* Width is ignored if justify is emitter_justify_none.
*/
static
inline
void
emitter_print_value
(
emitter_t
*
emitter
,
emitter_justify_t
justify
,
int
width
,
emitter_type_t
value_type
,
const
void
*
value
)
{
size_t
str_written
;
#define BUF_SIZE 256
#define FMT_SIZE 10
/*
* We dynamically generate a format string to emit, to let us use the
* snprintf machinery. This is kinda hacky, but gets the job done
* quickly without having to think about the various snprintf edge
* cases.
*/
char
fmt
[
FMT_SIZE
];
char
buf
[
BUF_SIZE
];
#define EMIT_SIMPLE(type, format) \
emitter_gen_fmt(fmt, FMT_SIZE, format, justify, width); \
emitter_printf(emitter, fmt, *(const type *)value); \
switch
(
value_type
)
{
case
emitter_type_bool
:
emitter_gen_fmt
(
fmt
,
FMT_SIZE
,
"s"
,
justify
,
width
);
emitter_printf
(
emitter
,
fmt
,
*
(
const
bool
*
)
value
?
"true"
:
"false"
);
break
;
case
emitter_type_int
:
EMIT_SIMPLE
(
int
,
"d"
)
break
;
case
emitter_type_unsigned
:
EMIT_SIMPLE
(
unsigned
,
"u"
)
break
;
case
emitter_type_ssize
:
EMIT_SIMPLE
(
ssize_t
,
"zd"
)
break
;
case
emitter_type_size
:
EMIT_SIMPLE
(
size_t
,
"zu"
)
break
;
case
emitter_type_string
:
str_written
=
malloc_snprintf
(
buf
,
BUF_SIZE
,
"
\"
%s
\"
"
,
*
(
const
char
*
const
*
)
value
);
/*
* We control the strings we output; we shouldn't get anything
* anywhere near the fmt size.
*/
assert
(
str_written
<
BUF_SIZE
);
emitter_gen_fmt
(
fmt
,
FMT_SIZE
,
"s"
,
justify
,
width
);
emitter_printf
(
emitter
,
fmt
,
buf
);
break
;
case
emitter_type_uint32
:
EMIT_SIMPLE
(
uint32_t
,
FMTu32
)
break
;
case
emitter_type_uint64
:
EMIT_SIMPLE
(
uint64_t
,
FMTu64
)
break
;
case
emitter_type_title
:
EMIT_SIMPLE
(
char
*
const
,
"s"
);
break
;
default:
unreachable
();
}
#undef BUF_SIZE
#undef FMT_SIZE
}
/* Internal functions. In json mode, tracks nesting state. */
static
inline
void
emitter_nest_inc
(
emitter_t
*
emitter
)
{
emitter
->
nesting_depth
++
;
emitter
->
item_at_depth
=
false
;
}
static
inline
void
emitter_nest_dec
(
emitter_t
*
emitter
)
{
emitter
->
nesting_depth
--
;
emitter
->
item_at_depth
=
true
;
}
static
inline
void
emitter_indent
(
emitter_t
*
emitter
)
{
int
amount
=
emitter
->
nesting_depth
;
const
char
*
indent_str
;
if
(
emitter
->
output
==
emitter_output_json
)
{
indent_str
=
"
\t
"
;
}
else
{
amount
*=
2
;
indent_str
=
" "
;
}
for
(
int
i
=
0
;
i
<
amount
;
i
++
)
{
emitter_printf
(
emitter
,
"%s"
,
indent_str
);
}
}
static
inline
void
emitter_json_key_prefix
(
emitter_t
*
emitter
)
{
emitter_printf
(
emitter
,
"%s
\n
"
,
emitter
->
item_at_depth
?
","
:
""
);
emitter_indent
(
emitter
);
}
static
inline
void
emitter_begin
(
emitter_t
*
emitter
)
{
if
(
emitter
->
output
==
emitter_output_json
)
{
assert
(
emitter
->
nesting_depth
==
0
);
emitter_printf
(
emitter
,
"{"
);
emitter_nest_inc
(
emitter
);
}
else
{
// tabular init
emitter_printf
(
emitter
,
"%s"
,
""
);
}
}
static
inline
void
emitter_end
(
emitter_t
*
emitter
)
{
if
(
emitter
->
output
==
emitter_output_json
)
{
assert
(
emitter
->
nesting_depth
==
1
);
emitter_nest_dec
(
emitter
);
emitter_printf
(
emitter
,
"
\n
}
\n
"
);
}
}
/*
* Note emits a different kv pair as well, but only in table mode. Omits the
* note if table_note_key is NULL.
*/
static
inline
void
emitter_kv_note
(
emitter_t
*
emitter
,
const
char
*
json_key
,
const
char
*
table_key
,
emitter_type_t
value_type
,
const
void
*
value
,
const
char
*
table_note_key
,
emitter_type_t
table_note_value_type
,
const
void
*
table_note_value
)
{
if
(
emitter
->
output
==
emitter_output_json
)
{
assert
(
emitter
->
nesting_depth
>
0
);
emitter_json_key_prefix
(
emitter
);
emitter_printf
(
emitter
,
"
\"
%s
\"
: "
,
json_key
);
emitter_print_value
(
emitter
,
emitter_justify_none
,
-
1
,
value_type
,
value
);
}
else
{
emitter_indent
(
emitter
);
emitter_printf
(
emitter
,
"%s: "
,
table_key
);
emitter_print_value
(
emitter
,
emitter_justify_none
,
-
1
,
value_type
,
value
);
if
(
table_note_key
!=
NULL
)
{
emitter_printf
(
emitter
,
" (%s: "
,
table_note_key
);
emitter_print_value
(
emitter
,
emitter_justify_none
,
-
1
,
table_note_value_type
,
table_note_value
);
emitter_printf
(
emitter
,
")"
);
}
emitter_printf
(
emitter
,
"
\n
"
);
}
emitter
->
item_at_depth
=
true
;
}
static
inline
void
emitter_kv
(
emitter_t
*
emitter
,
const
char
*
json_key
,
const
char
*
table_key
,
emitter_type_t
value_type
,
const
void
*
value
)
{
emitter_kv_note
(
emitter
,
json_key
,
table_key
,
value_type
,
value
,
NULL
,
emitter_type_bool
,
NULL
);
}
static
inline
void
emitter_json_kv
(
emitter_t
*
emitter
,
const
char
*
json_key
,
emitter_type_t
value_type
,
const
void
*
value
)
{
if
(
emitter
->
output
==
emitter_output_json
)
{
emitter_kv
(
emitter
,
json_key
,
NULL
,
value_type
,
value
);
}
}
static
inline
void
emitter_table_kv
(
emitter_t
*
emitter
,
const
char
*
table_key
,
emitter_type_t
value_type
,
const
void
*
value
)
{
if
(
emitter
->
output
==
emitter_output_table
)
{
emitter_kv
(
emitter
,
NULL
,
table_key
,
value_type
,
value
);
}
}
static
inline
void
emitter_dict_begin
(
emitter_t
*
emitter
,
const
char
*
json_key
,
const
char
*
table_header
)
{
if
(
emitter
->
output
==
emitter_output_json
)
{
emitter_json_key_prefix
(
emitter
);
emitter_printf
(
emitter
,
"
\"
%s
\"
: {"
,
json_key
);
emitter_nest_inc
(
emitter
);
}
else
{
emitter_indent
(
emitter
);
emitter_printf
(
emitter
,
"%s
\n
"
,
table_header
);
emitter_nest_inc
(
emitter
);
}
}
static
inline
void
emitter_dict_end
(
emitter_t
*
emitter
)
{
if
(
emitter
->
output
==
emitter_output_json
)
{
assert
(
emitter
->
nesting_depth
>
0
);
emitter_nest_dec
(
emitter
);
emitter_printf
(
emitter
,
"
\n
"
);
emitter_indent
(
emitter
);
emitter_printf
(
emitter
,
"}"
);
}
else
{
emitter_nest_dec
(
emitter
);
}
}
static
inline
void
emitter_json_dict_begin
(
emitter_t
*
emitter
,
const
char
*
json_key
)
{
if
(
emitter
->
output
==
emitter_output_json
)
{
emitter_dict_begin
(
emitter
,
json_key
,
NULL
);
}
}
static
inline
void
emitter_json_dict_end
(
emitter_t
*
emitter
)
{
if
(
emitter
->
output
==
emitter_output_json
)
{
emitter_dict_end
(
emitter
);
}
}
static
inline
void
emitter_table_dict_begin
(
emitter_t
*
emitter
,
const
char
*
table_key
)
{
if
(
emitter
->
output
==
emitter_output_table
)
{
emitter_dict_begin
(
emitter
,
NULL
,
table_key
);
}
}
static
inline
void
emitter_table_dict_end
(
emitter_t
*
emitter
)
{
if
(
emitter
->
output
==
emitter_output_table
)
{
emitter_dict_end
(
emitter
);
}
}
static
inline
void
emitter_json_arr_begin
(
emitter_t
*
emitter
,
const
char
*
json_key
)
{
if
(
emitter
->
output
==
emitter_output_json
)
{
emitter_json_key_prefix
(
emitter
);
emitter_printf
(
emitter
,
"
\"
%s
\"
: ["
,
json_key
);
emitter_nest_inc
(
emitter
);
}
}
static
inline
void
emitter_json_arr_end
(
emitter_t
*
emitter
)
{
if
(
emitter
->
output
==
emitter_output_json
)
{
assert
(
emitter
->
nesting_depth
>
0
);
emitter_nest_dec
(
emitter
);
emitter_printf
(
emitter
,
"
\n
"
);
emitter_indent
(
emitter
);
emitter_printf
(
emitter
,
"]"
);
}
}
static
inline
void
emitter_json_arr_obj_begin
(
emitter_t
*
emitter
)
{
if
(
emitter
->
output
==
emitter_output_json
)
{
emitter_json_key_prefix
(
emitter
);
emitter_printf
(
emitter
,
"{"
);
emitter_nest_inc
(
emitter
);
}
}
static
inline
void
emitter_json_arr_obj_end
(
emitter_t
*
emitter
)
{
if
(
emitter
->
output
==
emitter_output_json
)
{
assert
(
emitter
->
nesting_depth
>
0
);
emitter_nest_dec
(
emitter
);
emitter_printf
(
emitter
,
"
\n
"
);
emitter_indent
(
emitter
);
emitter_printf
(
emitter
,
"}"
);
}
}
static
inline
void
emitter_json_arr_value
(
emitter_t
*
emitter
,
emitter_type_t
value_type
,
const
void
*
value
)
{
if
(
emitter
->
output
==
emitter_output_json
)
{
emitter_json_key_prefix
(
emitter
);
emitter_print_value
(
emitter
,
emitter_justify_none
,
-
1
,
value_type
,
value
);
}
}
static
inline
void
emitter_table_row
(
emitter_t
*
emitter
,
emitter_row_t
*
row
)
{
if
(
emitter
->
output
!=
emitter_output_table
)
{
return
;
}
emitter_col_t
*
col
;
ql_foreach
(
col
,
&
row
->
cols
,
link
)
{
emitter_print_value
(
emitter
,
col
->
justify
,
col
->
width
,
col
->
type
,
(
const
void
*
)
&
col
->
bool_val
);
}
emitter_table_printf
(
emitter
,
"
\n
"
);
}
#endif
/* JEMALLOC_INTERNAL_EMITTER_H */
deps/jemalloc/include/jemalloc/internal/extent_dss.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_EXTENT_DSS_H
#define JEMALLOC_INTERNAL_EXTENT_DSS_H
typedef
enum
{
dss_prec_disabled
=
0
,
dss_prec_primary
=
1
,
dss_prec_secondary
=
2
,
dss_prec_limit
=
3
}
dss_prec_t
;
#define DSS_PREC_DEFAULT dss_prec_secondary
#define DSS_DEFAULT "secondary"
extern
const
char
*
dss_prec_names
[];
extern
const
char
*
opt_dss
;
dss_prec_t
extent_dss_prec_get
(
void
);
bool
extent_dss_prec_set
(
dss_prec_t
dss_prec
);
void
*
extent_alloc_dss
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
*
commit
);
bool
extent_in_dss
(
void
*
addr
);
bool
extent_dss_mergeable
(
void
*
addr_a
,
void
*
addr_b
);
void
extent_dss_boot
(
void
);
#endif
/* JEMALLOC_INTERNAL_EXTENT_DSS_H */
deps/jemalloc/include/jemalloc/internal/extent_externs.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_EXTENT_EXTERNS_H
#define JEMALLOC_INTERNAL_EXTENT_EXTERNS_H
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex_pool.h"
#include "jemalloc/internal/ph.h"
#include "jemalloc/internal/rtree.h"
extern
size_t
opt_lg_extent_max_active_fit
;
extern
rtree_t
extents_rtree
;
extern
const
extent_hooks_t
extent_hooks_default
;
extern
mutex_pool_t
extent_mutex_pool
;
extent_t
*
extent_alloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
);
void
extent_dalloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_t
*
extent
);
extent_hooks_t
*
extent_hooks_get
(
arena_t
*
arena
);
extent_hooks_t
*
extent_hooks_set
(
tsd_t
*
tsd
,
arena_t
*
arena
,
extent_hooks_t
*
extent_hooks
);
#ifdef JEMALLOC_JET
size_t
extent_size_quantize_floor
(
size_t
size
);
size_t
extent_size_quantize_ceil
(
size_t
size
);
#endif
rb_proto
(,
extent_avail_
,
extent_tree_t
,
extent_t
)
ph_proto
(,
extent_heap_
,
extent_heap_t
,
extent_t
)
bool
extents_init
(
tsdn_t
*
tsdn
,
extents_t
*
extents
,
extent_state_t
state
,
bool
delay_coalesce
);
extent_state_t
extents_state_get
(
const
extents_t
*
extents
);
size_t
extents_npages_get
(
extents_t
*
extents
);
extent_t
*
extents_alloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extents_t
*
extents
,
void
*
new_addr
,
size_t
size
,
size_t
pad
,
size_t
alignment
,
bool
slab
,
szind_t
szind
,
bool
*
zero
,
bool
*
commit
);
void
extents_dalloc
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extents_t
*
extents
,
extent_t
*
extent
);
extent_t
*
extents_evict
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extents_t
*
extents
,
size_t
npages_min
);
void
extents_prefork
(
tsdn_t
*
tsdn
,
extents_t
*
extents
);
void
extents_postfork_parent
(
tsdn_t
*
tsdn
,
extents_t
*
extents
);
void
extents_postfork_child
(
tsdn_t
*
tsdn
,
extents_t
*
extents
);
extent_t
*
extent_alloc_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
void
*
new_addr
,
size_t
size
,
size_t
pad
,
size_t
alignment
,
bool
slab
,
szind_t
szind
,
bool
*
zero
,
bool
*
commit
);
void
extent_dalloc_gap
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_t
*
extent
);
void
extent_dalloc_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
);
void
extent_destroy_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
);
bool
extent_commit_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
,
size_t
offset
,
size_t
length
);
bool
extent_decommit_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
,
size_t
offset
,
size_t
length
);
bool
extent_purge_lazy_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
,
size_t
offset
,
size_t
length
);
bool
extent_purge_forced_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
,
size_t
offset
,
size_t
length
);
extent_t
*
extent_split_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
extent
,
size_t
size_a
,
szind_t
szind_a
,
bool
slab_a
,
size_t
size_b
,
szind_t
szind_b
,
bool
slab_b
);
bool
extent_merge_wrapper
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
extent_hooks_t
**
r_extent_hooks
,
extent_t
*
a
,
extent_t
*
b
);
bool
extent_boot
(
void
);
#endif
/* JEMALLOC_INTERNAL_EXTENT_EXTERNS_H */
deps/jemalloc/include/jemalloc/internal/extent_inlines.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_EXTENT_INLINES_H
#define JEMALLOC_INTERNAL_EXTENT_INLINES_H
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex_pool.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/sz.h"
static
inline
void
extent_lock
(
tsdn_t
*
tsdn
,
extent_t
*
extent
)
{
assert
(
extent
!=
NULL
);
mutex_pool_lock
(
tsdn
,
&
extent_mutex_pool
,
(
uintptr_t
)
extent
);
}
static
inline
void
extent_unlock
(
tsdn_t
*
tsdn
,
extent_t
*
extent
)
{
assert
(
extent
!=
NULL
);
mutex_pool_unlock
(
tsdn
,
&
extent_mutex_pool
,
(
uintptr_t
)
extent
);
}
static
inline
void
extent_lock2
(
tsdn_t
*
tsdn
,
extent_t
*
extent1
,
extent_t
*
extent2
)
{
assert
(
extent1
!=
NULL
&&
extent2
!=
NULL
);
mutex_pool_lock2
(
tsdn
,
&
extent_mutex_pool
,
(
uintptr_t
)
extent1
,
(
uintptr_t
)
extent2
);
}
static
inline
void
extent_unlock2
(
tsdn_t
*
tsdn
,
extent_t
*
extent1
,
extent_t
*
extent2
)
{
assert
(
extent1
!=
NULL
&&
extent2
!=
NULL
);
mutex_pool_unlock2
(
tsdn
,
&
extent_mutex_pool
,
(
uintptr_t
)
extent1
,
(
uintptr_t
)
extent2
);
}
static
inline
arena_t
*
extent_arena_get
(
const
extent_t
*
extent
)
{
unsigned
arena_ind
=
(
unsigned
)((
extent
->
e_bits
&
EXTENT_BITS_ARENA_MASK
)
>>
EXTENT_BITS_ARENA_SHIFT
);
/*
* The following check is omitted because we should never actually read
* a NULL arena pointer.
*/
if
(
false
&&
arena_ind
>=
MALLOCX_ARENA_LIMIT
)
{
return
NULL
;
}
assert
(
arena_ind
<
MALLOCX_ARENA_LIMIT
);
return
(
arena_t
*
)
atomic_load_p
(
&
arenas
[
arena_ind
],
ATOMIC_ACQUIRE
);
}
static
inline
szind_t
extent_szind_get_maybe_invalid
(
const
extent_t
*
extent
)
{
szind_t
szind
=
(
szind_t
)((
extent
->
e_bits
&
EXTENT_BITS_SZIND_MASK
)
>>
EXTENT_BITS_SZIND_SHIFT
);
assert
(
szind
<=
NSIZES
);
return
szind
;
}
static
inline
szind_t
extent_szind_get
(
const
extent_t
*
extent
)
{
szind_t
szind
=
extent_szind_get_maybe_invalid
(
extent
);
assert
(
szind
<
NSIZES
);
/* Never call when "invalid". */
return
szind
;
}
static
inline
size_t
extent_usize_get
(
const
extent_t
*
extent
)
{
return
sz_index2size
(
extent_szind_get
(
extent
));
}
static
inline
size_t
extent_sn_get
(
const
extent_t
*
extent
)
{
return
(
size_t
)((
extent
->
e_bits
&
EXTENT_BITS_SN_MASK
)
>>
EXTENT_BITS_SN_SHIFT
);
}
static
inline
extent_state_t
extent_state_get
(
const
extent_t
*
extent
)
{
return
(
extent_state_t
)((
extent
->
e_bits
&
EXTENT_BITS_STATE_MASK
)
>>
EXTENT_BITS_STATE_SHIFT
);
}
static
inline
bool
extent_zeroed_get
(
const
extent_t
*
extent
)
{
return
(
bool
)((
extent
->
e_bits
&
EXTENT_BITS_ZEROED_MASK
)
>>
EXTENT_BITS_ZEROED_SHIFT
);
}
static
inline
bool
extent_committed_get
(
const
extent_t
*
extent
)
{
return
(
bool
)((
extent
->
e_bits
&
EXTENT_BITS_COMMITTED_MASK
)
>>
EXTENT_BITS_COMMITTED_SHIFT
);
}
static
inline
bool
extent_dumpable_get
(
const
extent_t
*
extent
)
{
return
(
bool
)((
extent
->
e_bits
&
EXTENT_BITS_DUMPABLE_MASK
)
>>
EXTENT_BITS_DUMPABLE_SHIFT
);
}
static
inline
bool
extent_slab_get
(
const
extent_t
*
extent
)
{
return
(
bool
)((
extent
->
e_bits
&
EXTENT_BITS_SLAB_MASK
)
>>
EXTENT_BITS_SLAB_SHIFT
);
}
static
inline
unsigned
extent_nfree_get
(
const
extent_t
*
extent
)
{
assert
(
extent_slab_get
(
extent
));
return
(
unsigned
)((
extent
->
e_bits
&
EXTENT_BITS_NFREE_MASK
)
>>
EXTENT_BITS_NFREE_SHIFT
);
}
static
inline
void
*
extent_base_get
(
const
extent_t
*
extent
)
{
assert
(
extent
->
e_addr
==
PAGE_ADDR2BASE
(
extent
->
e_addr
)
||
!
extent_slab_get
(
extent
));
return
PAGE_ADDR2BASE
(
extent
->
e_addr
);
}
static
inline
void
*
extent_addr_get
(
const
extent_t
*
extent
)
{
assert
(
extent
->
e_addr
==
PAGE_ADDR2BASE
(
extent
->
e_addr
)
||
!
extent_slab_get
(
extent
));
return
extent
->
e_addr
;
}
static
inline
size_t
extent_size_get
(
const
extent_t
*
extent
)
{
return
(
extent
->
e_size_esn
&
EXTENT_SIZE_MASK
);
}
static
inline
size_t
extent_esn_get
(
const
extent_t
*
extent
)
{
return
(
extent
->
e_size_esn
&
EXTENT_ESN_MASK
);
}
static
inline
size_t
extent_bsize_get
(
const
extent_t
*
extent
)
{
return
extent
->
e_bsize
;
}
static
inline
void
*
extent_before_get
(
const
extent_t
*
extent
)
{
return
(
void
*
)((
uintptr_t
)
extent_base_get
(
extent
)
-
PAGE
);
}
static
inline
void
*
extent_last_get
(
const
extent_t
*
extent
)
{
return
(
void
*
)((
uintptr_t
)
extent_base_get
(
extent
)
+
extent_size_get
(
extent
)
-
PAGE
);
}
static
inline
void
*
extent_past_get
(
const
extent_t
*
extent
)
{
return
(
void
*
)((
uintptr_t
)
extent_base_get
(
extent
)
+
extent_size_get
(
extent
));
}
static
inline
arena_slab_data_t
*
extent_slab_data_get
(
extent_t
*
extent
)
{
assert
(
extent_slab_get
(
extent
));
return
&
extent
->
e_slab_data
;
}
static
inline
const
arena_slab_data_t
*
extent_slab_data_get_const
(
const
extent_t
*
extent
)
{
assert
(
extent_slab_get
(
extent
));
return
&
extent
->
e_slab_data
;
}
static
inline
prof_tctx_t
*
extent_prof_tctx_get
(
const
extent_t
*
extent
)
{
return
(
prof_tctx_t
*
)
atomic_load_p
(
&
extent
->
e_prof_tctx
,
ATOMIC_ACQUIRE
);
}
static
inline
void
extent_arena_set
(
extent_t
*
extent
,
arena_t
*
arena
)
{
unsigned
arena_ind
=
(
arena
!=
NULL
)
?
arena_ind_get
(
arena
)
:
((
1U
<<
MALLOCX_ARENA_BITS
)
-
1
);
extent
->
e_bits
=
(
extent
->
e_bits
&
~
EXTENT_BITS_ARENA_MASK
)
|
((
uint64_t
)
arena_ind
<<
EXTENT_BITS_ARENA_SHIFT
);
}
static
inline
void
extent_addr_set
(
extent_t
*
extent
,
void
*
addr
)
{
extent
->
e_addr
=
addr
;
}
static
inline
void
extent_addr_randomize
(
UNUSED
tsdn_t
*
tsdn
,
extent_t
*
extent
,
size_t
alignment
)
{
assert
(
extent_base_get
(
extent
)
==
extent_addr_get
(
extent
));
if
(
alignment
<
PAGE
)
{
unsigned
lg_range
=
LG_PAGE
-
lg_floor
(
CACHELINE_CEILING
(
alignment
));
size_t
r
;
if
(
!
tsdn_null
(
tsdn
))
{
tsd_t
*
tsd
=
tsdn_tsd
(
tsdn
);
r
=
(
size_t
)
prng_lg_range_u64
(
tsd_offset_statep_get
(
tsd
),
lg_range
);
}
else
{
r
=
prng_lg_range_zu
(
&
extent_arena_get
(
extent
)
->
offset_state
,
lg_range
,
true
);
}
uintptr_t
random_offset
=
((
uintptr_t
)
r
)
<<
(
LG_PAGE
-
lg_range
);
extent
->
e_addr
=
(
void
*
)((
uintptr_t
)
extent
->
e_addr
+
random_offset
);
assert
(
ALIGNMENT_ADDR2BASE
(
extent
->
e_addr
,
alignment
)
==
extent
->
e_addr
);
}
}
static
inline
void
extent_size_set
(
extent_t
*
extent
,
size_t
size
)
{
assert
((
size
&
~
EXTENT_SIZE_MASK
)
==
0
);
extent
->
e_size_esn
=
size
|
(
extent
->
e_size_esn
&
~
EXTENT_SIZE_MASK
);
}
static
inline
void
extent_esn_set
(
extent_t
*
extent
,
size_t
esn
)
{
extent
->
e_size_esn
=
(
extent
->
e_size_esn
&
~
EXTENT_ESN_MASK
)
|
(
esn
&
EXTENT_ESN_MASK
);
}
static
inline
void
extent_bsize_set
(
extent_t
*
extent
,
size_t
bsize
)
{
extent
->
e_bsize
=
bsize
;
}
static
inline
void
extent_szind_set
(
extent_t
*
extent
,
szind_t
szind
)
{
assert
(
szind
<=
NSIZES
);
/* NSIZES means "invalid". */
extent
->
e_bits
=
(
extent
->
e_bits
&
~
EXTENT_BITS_SZIND_MASK
)
|
((
uint64_t
)
szind
<<
EXTENT_BITS_SZIND_SHIFT
);
}
static
inline
void
extent_nfree_set
(
extent_t
*
extent
,
unsigned
nfree
)
{
assert
(
extent_slab_get
(
extent
));
extent
->
e_bits
=
(
extent
->
e_bits
&
~
EXTENT_BITS_NFREE_MASK
)
|
((
uint64_t
)
nfree
<<
EXTENT_BITS_NFREE_SHIFT
);
}
static
inline
void
extent_nfree_inc
(
extent_t
*
extent
)
{
assert
(
extent_slab_get
(
extent
));
extent
->
e_bits
+=
((
uint64_t
)
1U
<<
EXTENT_BITS_NFREE_SHIFT
);
}
static
inline
void
extent_nfree_dec
(
extent_t
*
extent
)
{
assert
(
extent_slab_get
(
extent
));
extent
->
e_bits
-=
((
uint64_t
)
1U
<<
EXTENT_BITS_NFREE_SHIFT
);
}
static
inline
void
extent_sn_set
(
extent_t
*
extent
,
size_t
sn
)
{
extent
->
e_bits
=
(
extent
->
e_bits
&
~
EXTENT_BITS_SN_MASK
)
|
((
uint64_t
)
sn
<<
EXTENT_BITS_SN_SHIFT
);
}
static
inline
void
extent_state_set
(
extent_t
*
extent
,
extent_state_t
state
)
{
extent
->
e_bits
=
(
extent
->
e_bits
&
~
EXTENT_BITS_STATE_MASK
)
|
((
uint64_t
)
state
<<
EXTENT_BITS_STATE_SHIFT
);
}
static
inline
void
extent_zeroed_set
(
extent_t
*
extent
,
bool
zeroed
)
{
extent
->
e_bits
=
(
extent
->
e_bits
&
~
EXTENT_BITS_ZEROED_MASK
)
|
((
uint64_t
)
zeroed
<<
EXTENT_BITS_ZEROED_SHIFT
);
}
static
inline
void
extent_committed_set
(
extent_t
*
extent
,
bool
committed
)
{
extent
->
e_bits
=
(
extent
->
e_bits
&
~
EXTENT_BITS_COMMITTED_MASK
)
|
((
uint64_t
)
committed
<<
EXTENT_BITS_COMMITTED_SHIFT
);
}
static
inline
void
extent_dumpable_set
(
extent_t
*
extent
,
bool
dumpable
)
{
extent
->
e_bits
=
(
extent
->
e_bits
&
~
EXTENT_BITS_DUMPABLE_MASK
)
|
((
uint64_t
)
dumpable
<<
EXTENT_BITS_DUMPABLE_SHIFT
);
}
static
inline
void
extent_slab_set
(
extent_t
*
extent
,
bool
slab
)
{
extent
->
e_bits
=
(
extent
->
e_bits
&
~
EXTENT_BITS_SLAB_MASK
)
|
((
uint64_t
)
slab
<<
EXTENT_BITS_SLAB_SHIFT
);
}
static
inline
void
extent_prof_tctx_set
(
extent_t
*
extent
,
prof_tctx_t
*
tctx
)
{
atomic_store_p
(
&
extent
->
e_prof_tctx
,
tctx
,
ATOMIC_RELEASE
);
}
static
inline
void
extent_init
(
extent_t
*
extent
,
arena_t
*
arena
,
void
*
addr
,
size_t
size
,
bool
slab
,
szind_t
szind
,
size_t
sn
,
extent_state_t
state
,
bool
zeroed
,
bool
committed
,
bool
dumpable
)
{
assert
(
addr
==
PAGE_ADDR2BASE
(
addr
)
||
!
slab
);
extent_arena_set
(
extent
,
arena
);
extent_addr_set
(
extent
,
addr
);
extent_size_set
(
extent
,
size
);
extent_slab_set
(
extent
,
slab
);
extent_szind_set
(
extent
,
szind
);
extent_sn_set
(
extent
,
sn
);
extent_state_set
(
extent
,
state
);
extent_zeroed_set
(
extent
,
zeroed
);
extent_committed_set
(
extent
,
committed
);
extent_dumpable_set
(
extent
,
dumpable
);
ql_elm_new
(
extent
,
ql_link
);
if
(
config_prof
)
{
extent_prof_tctx_set
(
extent
,
NULL
);
}
}
static
inline
void
extent_binit
(
extent_t
*
extent
,
void
*
addr
,
size_t
bsize
,
size_t
sn
)
{
extent_arena_set
(
extent
,
NULL
);
extent_addr_set
(
extent
,
addr
);
extent_bsize_set
(
extent
,
bsize
);
extent_slab_set
(
extent
,
false
);
extent_szind_set
(
extent
,
NSIZES
);
extent_sn_set
(
extent
,
sn
);
extent_state_set
(
extent
,
extent_state_active
);
extent_zeroed_set
(
extent
,
true
);
extent_committed_set
(
extent
,
true
);
extent_dumpable_set
(
extent
,
true
);
}
static
inline
void
extent_list_init
(
extent_list_t
*
list
)
{
ql_new
(
list
);
}
static
inline
extent_t
*
extent_list_first
(
const
extent_list_t
*
list
)
{
return
ql_first
(
list
);
}
static
inline
extent_t
*
extent_list_last
(
const
extent_list_t
*
list
)
{
return
ql_last
(
list
,
ql_link
);
}
static
inline
void
extent_list_append
(
extent_list_t
*
list
,
extent_t
*
extent
)
{
ql_tail_insert
(
list
,
extent
,
ql_link
);
}
static
inline
void
extent_list_prepend
(
extent_list_t
*
list
,
extent_t
*
extent
)
{
ql_head_insert
(
list
,
extent
,
ql_link
);
}
static
inline
void
extent_list_replace
(
extent_list_t
*
list
,
extent_t
*
to_remove
,
extent_t
*
to_insert
)
{
ql_after_insert
(
to_remove
,
to_insert
,
ql_link
);
ql_remove
(
list
,
to_remove
,
ql_link
);
}
static
inline
void
extent_list_remove
(
extent_list_t
*
list
,
extent_t
*
extent
)
{
ql_remove
(
list
,
extent
,
ql_link
);
}
static
inline
int
extent_sn_comp
(
const
extent_t
*
a
,
const
extent_t
*
b
)
{
size_t
a_sn
=
extent_sn_get
(
a
);
size_t
b_sn
=
extent_sn_get
(
b
);
return
(
a_sn
>
b_sn
)
-
(
a_sn
<
b_sn
);
}
static
inline
int
extent_esn_comp
(
const
extent_t
*
a
,
const
extent_t
*
b
)
{
size_t
a_esn
=
extent_esn_get
(
a
);
size_t
b_esn
=
extent_esn_get
(
b
);
return
(
a_esn
>
b_esn
)
-
(
a_esn
<
b_esn
);
}
static
inline
int
extent_ad_comp
(
const
extent_t
*
a
,
const
extent_t
*
b
)
{
uintptr_t
a_addr
=
(
uintptr_t
)
extent_addr_get
(
a
);
uintptr_t
b_addr
=
(
uintptr_t
)
extent_addr_get
(
b
);
return
(
a_addr
>
b_addr
)
-
(
a_addr
<
b_addr
);
}
static
inline
int
extent_ead_comp
(
const
extent_t
*
a
,
const
extent_t
*
b
)
{
uintptr_t
a_eaddr
=
(
uintptr_t
)
a
;
uintptr_t
b_eaddr
=
(
uintptr_t
)
b
;
return
(
a_eaddr
>
b_eaddr
)
-
(
a_eaddr
<
b_eaddr
);
}
static
inline
int
extent_snad_comp
(
const
extent_t
*
a
,
const
extent_t
*
b
)
{
int
ret
;
ret
=
extent_sn_comp
(
a
,
b
);
if
(
ret
!=
0
)
{
return
ret
;
}
ret
=
extent_ad_comp
(
a
,
b
);
return
ret
;
}
static
inline
int
extent_esnead_comp
(
const
extent_t
*
a
,
const
extent_t
*
b
)
{
int
ret
;
ret
=
extent_esn_comp
(
a
,
b
);
if
(
ret
!=
0
)
{
return
ret
;
}
ret
=
extent_ead_comp
(
a
,
b
);
return
ret
;
}
#endif
/* JEMALLOC_INTERNAL_EXTENT_INLINES_H */
deps/jemalloc/include/jemalloc/internal/extent_mmap.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H
#define JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H
extern
bool
opt_retain
;
void
*
extent_alloc_mmap
(
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
*
commit
);
bool
extent_dalloc_mmap
(
void
*
addr
,
size_t
size
);
#endif
/* JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H */
deps/jemalloc/include/jemalloc/internal/extent_structs.h
deleted
100644 → 0
View file @
7ff7536e
#ifndef JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
#define JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/ph.h"
#include "jemalloc/internal/size_classes.h"
typedef
enum
{
extent_state_active
=
0
,
extent_state_dirty
=
1
,
extent_state_muzzy
=
2
,
extent_state_retained
=
3
}
extent_state_t
;
/* Extent (span of pages). Use accessor functions for e_* fields. */
struct
extent_s
{
/*
* Bitfield containing several fields:
*
* a: arena_ind
* b: slab
* c: committed
* d: dumpable
* z: zeroed
* t: state
* i: szind
* f: nfree
* n: sn
*
* nnnnnnnn ... nnnnffff ffffffii iiiiiitt zdcbaaaa aaaaaaaa
*
* arena_ind: Arena from which this extent came, or all 1 bits if
* unassociated.
*
* slab: The slab flag indicates whether the extent is used for a slab
* of small regions. This helps differentiate small size classes,
* and it indicates whether interior pointers can be looked up via
* iealloc().
*
* committed: The committed flag indicates whether physical memory is
* committed to the extent, whether explicitly or implicitly
* as on a system that overcommits and satisfies physical
* memory needs on demand via soft page faults.
*
* dumpable: The dumpable flag indicates whether or not we've set the
* memory in question to be dumpable. Note that this
* interacts somewhat subtly with user-specified extent hooks,
* since we don't know if *they* are fiddling with
* dumpability (in which case, we don't want to undo whatever
* they're doing). To deal with this scenario, we:
* - Make dumpable false only for memory allocated with the
* default hooks.
* - Only allow memory to go from non-dumpable to dumpable,
* and only once.
* - Never make the OS call to allow dumping when the
* dumpable bit is already set.
* These three constraints mean that we will never
* accidentally dump user memory that the user meant to set
* nondumpable with their extent hooks.
*
*
* zeroed: The zeroed flag is used by extent recycling code to track
* whether memory is zero-filled.
*
* state: The state flag is an extent_state_t.
*
* szind: The szind flag indicates usable size class index for
* allocations residing in this extent, regardless of whether the
* extent is a slab. Extent size and usable size often differ
* even for non-slabs, either due to sz_large_pad or promotion of
* sampled small regions.
*
* nfree: Number of free regions in slab.
*
* sn: Serial number (potentially non-unique).
*
* Serial numbers may wrap around if !opt_retain, but as long as
* comparison functions fall back on address comparison for equal
* serial numbers, stable (if imperfect) ordering is maintained.
*
* Serial numbers may not be unique even in the absence of
* wrap-around, e.g. when splitting an extent and assigning the same
* serial number to both resulting adjacent extents.
*/
uint64_t
e_bits
;
#define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT))
#define EXTENT_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS
#define EXTENT_BITS_ARENA_SHIFT 0
#define EXTENT_BITS_ARENA_MASK MASK(EXTENT_BITS_ARENA_WIDTH, EXTENT_BITS_ARENA_SHIFT)
#define EXTENT_BITS_SLAB_WIDTH 1
#define EXTENT_BITS_SLAB_SHIFT (EXTENT_BITS_ARENA_WIDTH + EXTENT_BITS_ARENA_SHIFT)
#define EXTENT_BITS_SLAB_MASK MASK(EXTENT_BITS_SLAB_WIDTH, EXTENT_BITS_SLAB_SHIFT)
#define EXTENT_BITS_COMMITTED_WIDTH 1
#define EXTENT_BITS_COMMITTED_SHIFT (EXTENT_BITS_SLAB_WIDTH + EXTENT_BITS_SLAB_SHIFT)
#define EXTENT_BITS_COMMITTED_MASK MASK(EXTENT_BITS_COMMITTED_WIDTH, EXTENT_BITS_COMMITTED_SHIFT)
#define EXTENT_BITS_DUMPABLE_WIDTH 1
#define EXTENT_BITS_DUMPABLE_SHIFT (EXTENT_BITS_COMMITTED_WIDTH + EXTENT_BITS_COMMITTED_SHIFT)
#define EXTENT_BITS_DUMPABLE_MASK MASK(EXTENT_BITS_DUMPABLE_WIDTH, EXTENT_BITS_DUMPABLE_SHIFT)
#define EXTENT_BITS_ZEROED_WIDTH 1
#define EXTENT_BITS_ZEROED_SHIFT (EXTENT_BITS_DUMPABLE_WIDTH + EXTENT_BITS_DUMPABLE_SHIFT)
#define EXTENT_BITS_ZEROED_MASK MASK(EXTENT_BITS_ZEROED_WIDTH, EXTENT_BITS_ZEROED_SHIFT)
#define EXTENT_BITS_STATE_WIDTH 2
#define EXTENT_BITS_STATE_SHIFT (EXTENT_BITS_ZEROED_WIDTH + EXTENT_BITS_ZEROED_SHIFT)
#define EXTENT_BITS_STATE_MASK MASK(EXTENT_BITS_STATE_WIDTH, EXTENT_BITS_STATE_SHIFT)
#define EXTENT_BITS_SZIND_WIDTH LG_CEIL_NSIZES
#define EXTENT_BITS_SZIND_SHIFT (EXTENT_BITS_STATE_WIDTH + EXTENT_BITS_STATE_SHIFT)
#define EXTENT_BITS_SZIND_MASK MASK(EXTENT_BITS_SZIND_WIDTH, EXTENT_BITS_SZIND_SHIFT)
#define EXTENT_BITS_NFREE_WIDTH (LG_SLAB_MAXREGS + 1)
#define EXTENT_BITS_NFREE_SHIFT (EXTENT_BITS_SZIND_WIDTH + EXTENT_BITS_SZIND_SHIFT)
#define EXTENT_BITS_NFREE_MASK MASK(EXTENT_BITS_NFREE_WIDTH, EXTENT_BITS_NFREE_SHIFT)
#define EXTENT_BITS_SN_SHIFT (EXTENT_BITS_NFREE_WIDTH + EXTENT_BITS_NFREE_SHIFT)
#define EXTENT_BITS_SN_MASK (UINT64_MAX << EXTENT_BITS_SN_SHIFT)
/* Pointer to the extent that this structure is responsible for. */
void
*
e_addr
;
union
{
/*
* Extent size and serial number associated with the extent
* structure (different than the serial number for the extent at
* e_addr).
*
* ssssssss [...] ssssssss ssssnnnn nnnnnnnn
*/
size_t
e_size_esn
;
#define EXTENT_SIZE_MASK ((size_t)~(PAGE-1))
#define EXTENT_ESN_MASK ((size_t)PAGE-1)
/* Base extent size, which may not be a multiple of PAGE. */
size_t
e_bsize
;
};
/*
* List linkage, used by a variety of lists:
* - bin_t's slabs_full
* - extents_t's LRU
* - stashed dirty extents
* - arena's large allocations
*/
ql_elm
(
extent_t
)
ql_link
;
/*
* Linkage for per size class sn/address-ordered heaps, and
* for extent_avail
*/
phn
(
extent_t
)
ph_link
;
union
{
/* Small region slab metadata. */
arena_slab_data_t
e_slab_data
;
/*
* Profile counters, used for large objects. Points to a
* prof_tctx_t.
*/
atomic_p_t
e_prof_tctx
;
};
};
typedef
ql_head
(
extent_t
)
extent_list_t
;
typedef
ph
(
extent_t
)
extent_tree_t
;
typedef
ph
(
extent_t
)
extent_heap_t
;
/* Quantized collection of extents, with built-in LRU queue. */
struct
extents_s
{
malloc_mutex_t
mtx
;
/*
* Quantized per size class heaps of extents.
*
* Synchronization: mtx.
*/
extent_heap_t
heaps
[
NPSIZES
+
1
];
/*
* Bitmap for which set bits correspond to non-empty heaps.
*
* Synchronization: mtx.
*/
bitmap_t
bitmap
[
BITMAP_GROUPS
(
NPSIZES
+
1
)];
/*
* LRU of all extents in heaps.
*
* Synchronization: mtx.
*/
extent_list_t
lru
;
/*
* Page sum for all extents in heaps.
*
* The synchronization here is a little tricky. Modifications to npages
* must hold mtx, but reads need not (though, a reader who sees npages
* without holding the mutex can't assume anything about the rest of the
* state of the extents_t).
*/
atomic_zu_t
npages
;
/* All stored extents must be in the same state. */
extent_state_t
state
;
/*
* If true, delay coalescing until eviction; otherwise coalesce during
* deallocation.
*/
bool
delay_coalesce
;
};
#endif
/* JEMALLOC_INTERNAL_EXTENT_STRUCTS_H */
Prev
1
2
3
4
5
6
7
…
9
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment