Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
d4439bd4
Commit
d4439bd4
authored
May 15, 2023
by
Oran Agra
Browse files
Merge remote-tracking branch 'origin/unstable' into 7.2
parents
e26a769d
2ffde15a
Changes
199
Hide whitespace changes
Inline
Side-by-side
Too many changes to show.
To preserve performance only
199 of 199+
files are displayed.
Plain diff
Email patch
deps/jemalloc/include/jemalloc/internal/extent_structs.h
deleted
100644 → 0
View file @
e26a769d
#ifndef JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
#define JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bit_util.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/ph.h"
#include "jemalloc/internal/sc.h"
typedef
enum
{
extent_state_active
=
0
,
extent_state_dirty
=
1
,
extent_state_muzzy
=
2
,
extent_state_retained
=
3
}
extent_state_t
;
/* Extent (span of pages). Use accessor functions for e_* fields. */
struct
extent_s
{
/*
* Bitfield containing several fields:
*
* a: arena_ind
* b: slab
* c: committed
* d: dumpable
* z: zeroed
* t: state
* i: szind
* f: nfree
* s: bin_shard
* n: sn
*
* nnnnnnnn ... nnnnnnss ssssffff ffffffii iiiiiitt zdcbaaaa aaaaaaaa
*
* arena_ind: Arena from which this extent came, or all 1 bits if
* unassociated.
*
* slab: The slab flag indicates whether the extent is used for a slab
* of small regions. This helps differentiate small size classes,
* and it indicates whether interior pointers can be looked up via
* iealloc().
*
* committed: The committed flag indicates whether physical memory is
* committed to the extent, whether explicitly or implicitly
* as on a system that overcommits and satisfies physical
* memory needs on demand via soft page faults.
*
* dumpable: The dumpable flag indicates whether or not we've set the
* memory in question to be dumpable. Note that this
* interacts somewhat subtly with user-specified extent hooks,
* since we don't know if *they* are fiddling with
* dumpability (in which case, we don't want to undo whatever
* they're doing). To deal with this scenario, we:
* - Make dumpable false only for memory allocated with the
* default hooks.
* - Only allow memory to go from non-dumpable to dumpable,
* and only once.
* - Never make the OS call to allow dumping when the
* dumpable bit is already set.
* These three constraints mean that we will never
* accidentally dump user memory that the user meant to set
* nondumpable with their extent hooks.
*
*
* zeroed: The zeroed flag is used by extent recycling code to track
* whether memory is zero-filled.
*
* state: The state flag is an extent_state_t.
*
* szind: The szind flag indicates usable size class index for
* allocations residing in this extent, regardless of whether the
* extent is a slab. Extent size and usable size often differ
* even for non-slabs, either due to sz_large_pad or promotion of
* sampled small regions.
*
* nfree: Number of free regions in slab.
*
* bin_shard: the shard of the bin from which this extent came.
*
* sn: Serial number (potentially non-unique).
*
* Serial numbers may wrap around if !opt_retain, but as long as
* comparison functions fall back on address comparison for equal
* serial numbers, stable (if imperfect) ordering is maintained.
*
* Serial numbers may not be unique even in the absence of
* wrap-around, e.g. when splitting an extent and assigning the same
* serial number to both resulting adjacent extents.
*/
uint64_t
e_bits
;
#define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT))
#define EXTENT_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS
#define EXTENT_BITS_ARENA_SHIFT 0
#define EXTENT_BITS_ARENA_MASK MASK(EXTENT_BITS_ARENA_WIDTH, EXTENT_BITS_ARENA_SHIFT)
#define EXTENT_BITS_SLAB_WIDTH 1
#define EXTENT_BITS_SLAB_SHIFT (EXTENT_BITS_ARENA_WIDTH + EXTENT_BITS_ARENA_SHIFT)
#define EXTENT_BITS_SLAB_MASK MASK(EXTENT_BITS_SLAB_WIDTH, EXTENT_BITS_SLAB_SHIFT)
#define EXTENT_BITS_COMMITTED_WIDTH 1
#define EXTENT_BITS_COMMITTED_SHIFT (EXTENT_BITS_SLAB_WIDTH + EXTENT_BITS_SLAB_SHIFT)
#define EXTENT_BITS_COMMITTED_MASK MASK(EXTENT_BITS_COMMITTED_WIDTH, EXTENT_BITS_COMMITTED_SHIFT)
#define EXTENT_BITS_DUMPABLE_WIDTH 1
#define EXTENT_BITS_DUMPABLE_SHIFT (EXTENT_BITS_COMMITTED_WIDTH + EXTENT_BITS_COMMITTED_SHIFT)
#define EXTENT_BITS_DUMPABLE_MASK MASK(EXTENT_BITS_DUMPABLE_WIDTH, EXTENT_BITS_DUMPABLE_SHIFT)
#define EXTENT_BITS_ZEROED_WIDTH 1
#define EXTENT_BITS_ZEROED_SHIFT (EXTENT_BITS_DUMPABLE_WIDTH + EXTENT_BITS_DUMPABLE_SHIFT)
#define EXTENT_BITS_ZEROED_MASK MASK(EXTENT_BITS_ZEROED_WIDTH, EXTENT_BITS_ZEROED_SHIFT)
#define EXTENT_BITS_STATE_WIDTH 2
#define EXTENT_BITS_STATE_SHIFT (EXTENT_BITS_ZEROED_WIDTH + EXTENT_BITS_ZEROED_SHIFT)
#define EXTENT_BITS_STATE_MASK MASK(EXTENT_BITS_STATE_WIDTH, EXTENT_BITS_STATE_SHIFT)
#define EXTENT_BITS_SZIND_WIDTH LG_CEIL(SC_NSIZES)
#define EXTENT_BITS_SZIND_SHIFT (EXTENT_BITS_STATE_WIDTH + EXTENT_BITS_STATE_SHIFT)
#define EXTENT_BITS_SZIND_MASK MASK(EXTENT_BITS_SZIND_WIDTH, EXTENT_BITS_SZIND_SHIFT)
#define EXTENT_BITS_NFREE_WIDTH (LG_SLAB_MAXREGS + 1)
#define EXTENT_BITS_NFREE_SHIFT (EXTENT_BITS_SZIND_WIDTH + EXTENT_BITS_SZIND_SHIFT)
#define EXTENT_BITS_NFREE_MASK MASK(EXTENT_BITS_NFREE_WIDTH, EXTENT_BITS_NFREE_SHIFT)
#define EXTENT_BITS_BINSHARD_WIDTH 6
#define EXTENT_BITS_BINSHARD_SHIFT (EXTENT_BITS_NFREE_WIDTH + EXTENT_BITS_NFREE_SHIFT)
#define EXTENT_BITS_BINSHARD_MASK MASK(EXTENT_BITS_BINSHARD_WIDTH, EXTENT_BITS_BINSHARD_SHIFT)
#define EXTENT_BITS_IS_HEAD_WIDTH 1
#define EXTENT_BITS_IS_HEAD_SHIFT (EXTENT_BITS_BINSHARD_WIDTH + EXTENT_BITS_BINSHARD_SHIFT)
#define EXTENT_BITS_IS_HEAD_MASK MASK(EXTENT_BITS_IS_HEAD_WIDTH, EXTENT_BITS_IS_HEAD_SHIFT)
#define EXTENT_BITS_SN_SHIFT (EXTENT_BITS_IS_HEAD_WIDTH + EXTENT_BITS_IS_HEAD_SHIFT)
#define EXTENT_BITS_SN_MASK (UINT64_MAX << EXTENT_BITS_SN_SHIFT)
/* Pointer to the extent that this structure is responsible for. */
void
*
e_addr
;
union
{
/*
* Extent size and serial number associated with the extent
* structure (different than the serial number for the extent at
* e_addr).
*
* ssssssss [...] ssssssss ssssnnnn nnnnnnnn
*/
size_t
e_size_esn
;
#define EXTENT_SIZE_MASK ((size_t)~(PAGE-1))
#define EXTENT_ESN_MASK ((size_t)PAGE-1)
/* Base extent size, which may not be a multiple of PAGE. */
size_t
e_bsize
;
};
/*
* List linkage, used by a variety of lists:
* - bin_t's slabs_full
* - extents_t's LRU
* - stashed dirty extents
* - arena's large allocations
*/
ql_elm
(
extent_t
)
ql_link
;
/*
* Linkage for per size class sn/address-ordered heaps, and
* for extent_avail
*/
phn
(
extent_t
)
ph_link
;
union
{
/* Small region slab metadata. */
arena_slab_data_t
e_slab_data
;
/* Profiling data, used for large objects. */
struct
{
/* Time when this was allocated. */
nstime_t
e_alloc_time
;
/* Points to a prof_tctx_t. */
atomic_p_t
e_prof_tctx
;
};
};
};
typedef
ql_head
(
extent_t
)
extent_list_t
;
typedef
ph
(
extent_t
)
extent_tree_t
;
typedef
ph
(
extent_t
)
extent_heap_t
;
/* Quantized collection of extents, with built-in LRU queue. */
struct
extents_s
{
malloc_mutex_t
mtx
;
/*
* Quantized per size class heaps of extents.
*
* Synchronization: mtx.
*/
extent_heap_t
heaps
[
SC_NPSIZES
+
1
];
atomic_zu_t
nextents
[
SC_NPSIZES
+
1
];
atomic_zu_t
nbytes
[
SC_NPSIZES
+
1
];
/*
* Bitmap for which set bits correspond to non-empty heaps.
*
* Synchronization: mtx.
*/
bitmap_t
bitmap
[
BITMAP_GROUPS
(
SC_NPSIZES
+
1
)];
/*
* LRU of all extents in heaps.
*
* Synchronization: mtx.
*/
extent_list_t
lru
;
/*
* Page sum for all extents in heaps.
*
* The synchronization here is a little tricky. Modifications to npages
* must hold mtx, but reads need not (though, a reader who sees npages
* without holding the mutex can't assume anything about the rest of the
* state of the extents_t).
*/
atomic_zu_t
npages
;
/* All stored extents must be in the same state. */
extent_state_t
state
;
/*
* If true, delay coalescing until eviction; otherwise coalesce during
* deallocation.
*/
bool
delay_coalesce
;
};
/*
* The following two structs are for experimental purposes. See
* experimental_utilization_query_ctl and
* experimental_utilization_batch_query_ctl in src/ctl.c.
*/
struct
extent_util_stats_s
{
size_t
nfree
;
size_t
nregs
;
size_t
size
;
};
struct
extent_util_stats_verbose_s
{
void
*
slabcur_addr
;
size_t
nfree
;
size_t
nregs
;
size_t
size
;
size_t
bin_nfree
;
size_t
bin_nregs
;
};
#endif
/* JEMALLOC_INTERNAL_EXTENT_STRUCTS_H */
deps/jemalloc/include/jemalloc/internal/extent_types.h
deleted
100644 → 0
View file @
e26a769d
#ifndef JEMALLOC_INTERNAL_EXTENT_TYPES_H
#define JEMALLOC_INTERNAL_EXTENT_TYPES_H
typedef
struct
extent_s
extent_t
;
typedef
struct
extents_s
extents_t
;
typedef
struct
extent_util_stats_s
extent_util_stats_t
;
typedef
struct
extent_util_stats_verbose_s
extent_util_stats_verbose_t
;
#define EXTENT_HOOKS_INITIALIZER NULL
/*
* When reuse (and split) an active extent, (1U << opt_lg_extent_max_active_fit)
* is the max ratio between the size of the active extent and the new extent.
*/
#define LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT 6
typedef
enum
{
EXTENT_NOT_HEAD
,
EXTENT_IS_HEAD
/* Only relevant for Windows && opt.retain. */
}
extent_head_state_t
;
#endif
/* JEMALLOC_INTERNAL_EXTENT_TYPES_H */
deps/jemalloc/include/jemalloc/internal/fb.h
0 → 100644
View file @
d4439bd4
#ifndef JEMALLOC_INTERNAL_FB_H
#define JEMALLOC_INTERNAL_FB_H
/*
* The flat bitmap module. This has a larger API relative to the bitmap module
* (supporting things like backwards searches, and searching for both set and
* unset bits), at the cost of slower operations for very large bitmaps.
*
* Initialized flat bitmaps start at all-zeros (all bits unset).
*/
typedef
unsigned
long
fb_group_t
;
#define FB_GROUP_BITS (ZU(1) << (LG_SIZEOF_LONG + 3))
#define FB_NGROUPS(nbits) ((nbits) / FB_GROUP_BITS \
+ ((nbits) % FB_GROUP_BITS == 0 ? 0 : 1))
static
inline
void
fb_init
(
fb_group_t
*
fb
,
size_t
nbits
)
{
size_t
ngroups
=
FB_NGROUPS
(
nbits
);
memset
(
fb
,
0
,
ngroups
*
sizeof
(
fb_group_t
));
}
static
inline
bool
fb_empty
(
fb_group_t
*
fb
,
size_t
nbits
)
{
size_t
ngroups
=
FB_NGROUPS
(
nbits
);
for
(
size_t
i
=
0
;
i
<
ngroups
;
i
++
)
{
if
(
fb
[
i
]
!=
0
)
{
return
false
;
}
}
return
true
;
}
static
inline
bool
fb_full
(
fb_group_t
*
fb
,
size_t
nbits
)
{
size_t
ngroups
=
FB_NGROUPS
(
nbits
);
size_t
trailing_bits
=
nbits
%
FB_GROUP_BITS
;
size_t
limit
=
(
trailing_bits
==
0
?
ngroups
:
ngroups
-
1
);
for
(
size_t
i
=
0
;
i
<
limit
;
i
++
)
{
if
(
fb
[
i
]
!=
~
(
fb_group_t
)
0
)
{
return
false
;
}
}
if
(
trailing_bits
==
0
)
{
return
true
;
}
return
fb
[
ngroups
-
1
]
==
((
fb_group_t
)
1
<<
trailing_bits
)
-
1
;
}
static
inline
bool
fb_get
(
fb_group_t
*
fb
,
size_t
nbits
,
size_t
bit
)
{
assert
(
bit
<
nbits
);
size_t
group_ind
=
bit
/
FB_GROUP_BITS
;
size_t
bit_ind
=
bit
%
FB_GROUP_BITS
;
return
(
bool
)(
fb
[
group_ind
]
&
((
fb_group_t
)
1
<<
bit_ind
));
}
static
inline
void
fb_set
(
fb_group_t
*
fb
,
size_t
nbits
,
size_t
bit
)
{
assert
(
bit
<
nbits
);
size_t
group_ind
=
bit
/
FB_GROUP_BITS
;
size_t
bit_ind
=
bit
%
FB_GROUP_BITS
;
fb
[
group_ind
]
|=
((
fb_group_t
)
1
<<
bit_ind
);
}
static
inline
void
fb_unset
(
fb_group_t
*
fb
,
size_t
nbits
,
size_t
bit
)
{
assert
(
bit
<
nbits
);
size_t
group_ind
=
bit
/
FB_GROUP_BITS
;
size_t
bit_ind
=
bit
%
FB_GROUP_BITS
;
fb
[
group_ind
]
&=
~
((
fb_group_t
)
1
<<
bit_ind
);
}
/*
* Some implementation details. This visitation function lets us apply a group
* visitor to each group in the bitmap (potentially modifying it). The mask
* indicates which bits are logically part of the visitation.
*/
typedef
void
(
*
fb_group_visitor_t
)(
void
*
ctx
,
fb_group_t
*
fb
,
fb_group_t
mask
);
JEMALLOC_ALWAYS_INLINE
void
fb_visit_impl
(
fb_group_t
*
fb
,
size_t
nbits
,
fb_group_visitor_t
visit
,
void
*
ctx
,
size_t
start
,
size_t
cnt
)
{
assert
(
cnt
>
0
);
assert
(
start
+
cnt
<=
nbits
);
size_t
group_ind
=
start
/
FB_GROUP_BITS
;
size_t
start_bit_ind
=
start
%
FB_GROUP_BITS
;
/*
* The first group is special; it's the only one we don't start writing
* to from bit 0.
*/
size_t
first_group_cnt
=
(
start_bit_ind
+
cnt
>
FB_GROUP_BITS
?
FB_GROUP_BITS
-
start_bit_ind
:
cnt
);
/*
* We can basically split affected words into:
* - The first group, where we touch only the high bits
* - The last group, where we touch only the low bits
* - The middle, where we set all the bits to the same thing.
* We treat each case individually. The last two could be merged, but
* this can lead to bad codegen for those middle words.
*/
/* First group */
fb_group_t
mask
=
((
~
(
fb_group_t
)
0
)
>>
(
FB_GROUP_BITS
-
first_group_cnt
))
<<
start_bit_ind
;
visit
(
ctx
,
&
fb
[
group_ind
],
mask
);
cnt
-=
first_group_cnt
;
group_ind
++
;
/* Middle groups */
while
(
cnt
>
FB_GROUP_BITS
)
{
visit
(
ctx
,
&
fb
[
group_ind
],
~
(
fb_group_t
)
0
);
cnt
-=
FB_GROUP_BITS
;
group_ind
++
;
}
/* Last group */
if
(
cnt
!=
0
)
{
mask
=
(
~
(
fb_group_t
)
0
)
>>
(
FB_GROUP_BITS
-
cnt
);
visit
(
ctx
,
&
fb
[
group_ind
],
mask
);
}
}
JEMALLOC_ALWAYS_INLINE
void
fb_assign_visitor
(
void
*
ctx
,
fb_group_t
*
fb
,
fb_group_t
mask
)
{
bool
val
=
*
(
bool
*
)
ctx
;
if
(
val
)
{
*
fb
|=
mask
;
}
else
{
*
fb
&=
~
mask
;
}
}
/* Sets the cnt bits starting at position start. Must not have a 0 count. */
static
inline
void
fb_set_range
(
fb_group_t
*
fb
,
size_t
nbits
,
size_t
start
,
size_t
cnt
)
{
bool
val
=
true
;
fb_visit_impl
(
fb
,
nbits
,
&
fb_assign_visitor
,
&
val
,
start
,
cnt
);
}
/* Unsets the cnt bits starting at position start. Must not have a 0 count. */
static
inline
void
fb_unset_range
(
fb_group_t
*
fb
,
size_t
nbits
,
size_t
start
,
size_t
cnt
)
{
bool
val
=
false
;
fb_visit_impl
(
fb
,
nbits
,
&
fb_assign_visitor
,
&
val
,
start
,
cnt
);
}
JEMALLOC_ALWAYS_INLINE
void
fb_scount_visitor
(
void
*
ctx
,
fb_group_t
*
fb
,
fb_group_t
mask
)
{
size_t
*
scount
=
(
size_t
*
)
ctx
;
*
scount
+=
popcount_lu
(
*
fb
&
mask
);
}
/* Finds the number of set bit in the of length cnt starting at start. */
JEMALLOC_ALWAYS_INLINE
size_t
fb_scount
(
fb_group_t
*
fb
,
size_t
nbits
,
size_t
start
,
size_t
cnt
)
{
size_t
scount
=
0
;
fb_visit_impl
(
fb
,
nbits
,
&
fb_scount_visitor
,
&
scount
,
start
,
cnt
);
return
scount
;
}
/* Finds the number of unset bit in the of length cnt starting at start. */
JEMALLOC_ALWAYS_INLINE
size_t
fb_ucount
(
fb_group_t
*
fb
,
size_t
nbits
,
size_t
start
,
size_t
cnt
)
{
size_t
scount
=
fb_scount
(
fb
,
nbits
,
start
,
cnt
);
return
cnt
-
scount
;
}
/*
* An implementation detail; find the first bit at position >= min_bit with the
* value val.
*
* Returns the number of bits in the bitmap if no such bit exists.
*/
JEMALLOC_ALWAYS_INLINE
ssize_t
fb_find_impl
(
fb_group_t
*
fb
,
size_t
nbits
,
size_t
start
,
bool
val
,
bool
forward
)
{
assert
(
start
<
nbits
);
size_t
ngroups
=
FB_NGROUPS
(
nbits
);
ssize_t
group_ind
=
start
/
FB_GROUP_BITS
;
size_t
bit_ind
=
start
%
FB_GROUP_BITS
;
fb_group_t
maybe_invert
=
(
val
?
0
:
(
fb_group_t
)
-
1
);
fb_group_t
group
=
fb
[
group_ind
];
group
^=
maybe_invert
;
if
(
forward
)
{
/* Only keep ones in bits bit_ind and above. */
group
&=
~
((
1LU
<<
bit_ind
)
-
1
);
}
else
{
/*
* Only keep ones in bits bit_ind and below. You might more
* naturally express this as (1 << (bit_ind + 1)) - 1, but
* that shifts by an invalid amount if bit_ind is one less than
* FB_GROUP_BITS.
*/
group
&=
((
2LU
<<
bit_ind
)
-
1
);
}
ssize_t
group_ind_bound
=
forward
?
(
ssize_t
)
ngroups
:
-
1
;
while
(
group
==
0
)
{
group_ind
+=
forward
?
1
:
-
1
;
if
(
group_ind
==
group_ind_bound
)
{
return
forward
?
(
ssize_t
)
nbits
:
(
ssize_t
)
-
1
;
}
group
=
fb
[
group_ind
];
group
^=
maybe_invert
;
}
assert
(
group
!=
0
);
size_t
bit
=
forward
?
ffs_lu
(
group
)
:
fls_lu
(
group
);
size_t
pos
=
group_ind
*
FB_GROUP_BITS
+
bit
;
/*
* The high bits of a partially filled last group are zeros, so if we're
* looking for zeros we don't want to report an invalid result.
*/
if
(
forward
&&
!
val
&&
pos
>
nbits
)
{
return
nbits
;
}
return
pos
;
}
/*
* Find the first set bit in the bitmap with an index >= min_bit. Returns the
* number of bits in the bitmap if no such bit exists.
*/
static
inline
size_t
fb_ffu
(
fb_group_t
*
fb
,
size_t
nbits
,
size_t
min_bit
)
{
return
(
size_t
)
fb_find_impl
(
fb
,
nbits
,
min_bit
,
/* val */
false
,
/* forward */
true
);
}
/* The same, but looks for an unset bit. */
static
inline
size_t
fb_ffs
(
fb_group_t
*
fb
,
size_t
nbits
,
size_t
min_bit
)
{
return
(
size_t
)
fb_find_impl
(
fb
,
nbits
,
min_bit
,
/* val */
true
,
/* forward */
true
);
}
/*
* Find the last set bit in the bitmap with an index <= max_bit. Returns -1 if
* no such bit exists.
*/
static
inline
ssize_t
fb_flu
(
fb_group_t
*
fb
,
size_t
nbits
,
size_t
max_bit
)
{
return
fb_find_impl
(
fb
,
nbits
,
max_bit
,
/* val */
false
,
/* forward */
false
);
}
static
inline
ssize_t
fb_fls
(
fb_group_t
*
fb
,
size_t
nbits
,
size_t
max_bit
)
{
return
fb_find_impl
(
fb
,
nbits
,
max_bit
,
/* val */
true
,
/* forward */
false
);
}
/* Returns whether or not we found a range. */
JEMALLOC_ALWAYS_INLINE
bool
fb_iter_range_impl
(
fb_group_t
*
fb
,
size_t
nbits
,
size_t
start
,
size_t
*
r_begin
,
size_t
*
r_len
,
bool
val
,
bool
forward
)
{
assert
(
start
<
nbits
);
ssize_t
next_range_begin
=
fb_find_impl
(
fb
,
nbits
,
start
,
val
,
forward
);
if
((
forward
&&
next_range_begin
==
(
ssize_t
)
nbits
)
||
(
!
forward
&&
next_range_begin
==
(
ssize_t
)
-
1
))
{
return
false
;
}
/* Half open range; the set bits are [begin, end). */
ssize_t
next_range_end
=
fb_find_impl
(
fb
,
nbits
,
next_range_begin
,
!
val
,
forward
);
if
(
forward
)
{
*
r_begin
=
next_range_begin
;
*
r_len
=
next_range_end
-
next_range_begin
;
}
else
{
*
r_begin
=
next_range_end
+
1
;
*
r_len
=
next_range_begin
-
next_range_end
;
}
return
true
;
}
/*
* Used to iterate through ranges of set bits.
*
* Tries to find the next contiguous sequence of set bits with a first index >=
* start. If one exists, puts the earliest bit of the range in *r_begin, its
* length in *r_len, and returns true. Otherwise, returns false (without
* touching *r_begin or *r_end).
*/
static
inline
bool
fb_srange_iter
(
fb_group_t
*
fb
,
size_t
nbits
,
size_t
start
,
size_t
*
r_begin
,
size_t
*
r_len
)
{
return
fb_iter_range_impl
(
fb
,
nbits
,
start
,
r_begin
,
r_len
,
/* val */
true
,
/* forward */
true
);
}
/*
* The same as fb_srange_iter, but searches backwards from start rather than
* forwards. (The position returned is still the earliest bit in the range).
*/
static
inline
bool
fb_srange_riter
(
fb_group_t
*
fb
,
size_t
nbits
,
size_t
start
,
size_t
*
r_begin
,
size_t
*
r_len
)
{
return
fb_iter_range_impl
(
fb
,
nbits
,
start
,
r_begin
,
r_len
,
/* val */
true
,
/* forward */
false
);
}
/* Similar to fb_srange_iter, but searches for unset bits. */
static
inline
bool
fb_urange_iter
(
fb_group_t
*
fb
,
size_t
nbits
,
size_t
start
,
size_t
*
r_begin
,
size_t
*
r_len
)
{
return
fb_iter_range_impl
(
fb
,
nbits
,
start
,
r_begin
,
r_len
,
/* val */
false
,
/* forward */
true
);
}
/* Similar to fb_srange_riter, but searches for unset bits. */
static
inline
bool
fb_urange_riter
(
fb_group_t
*
fb
,
size_t
nbits
,
size_t
start
,
size_t
*
r_begin
,
size_t
*
r_len
)
{
return
fb_iter_range_impl
(
fb
,
nbits
,
start
,
r_begin
,
r_len
,
/* val */
false
,
/* forward */
false
);
}
JEMALLOC_ALWAYS_INLINE
size_t
fb_range_longest_impl
(
fb_group_t
*
fb
,
size_t
nbits
,
bool
val
)
{
size_t
begin
=
0
;
size_t
longest_len
=
0
;
size_t
len
=
0
;
while
(
begin
<
nbits
&&
fb_iter_range_impl
(
fb
,
nbits
,
begin
,
&
begin
,
&
len
,
val
,
/* forward */
true
))
{
if
(
len
>
longest_len
)
{
longest_len
=
len
;
}
begin
+=
len
;
}
return
longest_len
;
}
static
inline
size_t
fb_srange_longest
(
fb_group_t
*
fb
,
size_t
nbits
)
{
return
fb_range_longest_impl
(
fb
,
nbits
,
/* val */
true
);
}
static
inline
size_t
fb_urange_longest
(
fb_group_t
*
fb
,
size_t
nbits
)
{
return
fb_range_longest_impl
(
fb
,
nbits
,
/* val */
false
);
}
/*
* Initializes each bit of dst with the bitwise-AND of the corresponding bits of
* src1 and src2. All bitmaps must be the same size.
*/
static
inline
void
fb_bit_and
(
fb_group_t
*
dst
,
fb_group_t
*
src1
,
fb_group_t
*
src2
,
size_t
nbits
)
{
size_t
ngroups
=
FB_NGROUPS
(
nbits
);
for
(
size_t
i
=
0
;
i
<
ngroups
;
i
++
)
{
dst
[
i
]
=
src1
[
i
]
&
src2
[
i
];
}
}
/* Like fb_bit_and, but with bitwise-OR. */
static
inline
void
fb_bit_or
(
fb_group_t
*
dst
,
fb_group_t
*
src1
,
fb_group_t
*
src2
,
size_t
nbits
)
{
size_t
ngroups
=
FB_NGROUPS
(
nbits
);
for
(
size_t
i
=
0
;
i
<
ngroups
;
i
++
)
{
dst
[
i
]
=
src1
[
i
]
|
src2
[
i
];
}
}
/* Initializes dst bit i to the negation of source bit i. */
static
inline
void
fb_bit_not
(
fb_group_t
*
dst
,
fb_group_t
*
src
,
size_t
nbits
)
{
size_t
ngroups
=
FB_NGROUPS
(
nbits
);
for
(
size_t
i
=
0
;
i
<
ngroups
;
i
++
)
{
dst
[
i
]
=
~
src
[
i
];
}
}
#endif
/* JEMALLOC_INTERNAL_FB_H */
deps/jemalloc/include/jemalloc/internal/fxp.h
0 → 100644
View file @
d4439bd4
#ifndef JEMALLOC_INTERNAL_FXP_H
#define JEMALLOC_INTERNAL_FXP_H
/*
* A simple fixed-point math implementation, supporting only unsigned values
* (with overflow being an error).
*
* It's not in general safe to use floating point in core code, because various
* libc implementations we get linked against can assume that malloc won't touch
* floating point state and call it with an unusual calling convention.
*/
/*
* High 16 bits are the integer part, low 16 are the fractional part. Or
* equivalently, repr == 2**16 * val, where we use "val" to refer to the
* (imaginary) fractional representation of the true value.
*
* We pick a uint32_t here since it's convenient in some places to
* double the representation size (i.e. multiplication and division use
* 64-bit integer types), and a uint64_t is the largest type we're
* certain is available.
*/
typedef
uint32_t
fxp_t
;
#define FXP_INIT_INT(x) ((x) << 16)
#define FXP_INIT_PERCENT(pct) (((pct) << 16) / 100)
/*
* Amount of precision used in parsing and printing numbers. The integer bound
* is simply because the integer part of the number gets 16 bits, and so is
* bounded by 65536.
*
* We use a lot of precision for the fractional part, even though most of it
* gets rounded off; this lets us get exact values for the important special
* case where the denominator is a small power of 2 (for instance,
* 1/512 == 0.001953125 is exactly representable even with only 16 bits of
* fractional precision). We need to left-shift by 16 before dividing by
* 10**precision, so we pick precision to be floor(log(2**48)) = 14.
*/
#define FXP_INTEGER_PART_DIGITS 5
#define FXP_FRACTIONAL_PART_DIGITS 14
/*
* In addition to the integer and fractional parts of the number, we need to
* include a null character and (possibly) a decimal point.
*/
#define FXP_BUF_SIZE (FXP_INTEGER_PART_DIGITS + FXP_FRACTIONAL_PART_DIGITS + 2)
static
inline
fxp_t
fxp_add
(
fxp_t
a
,
fxp_t
b
)
{
return
a
+
b
;
}
static
inline
fxp_t
fxp_sub
(
fxp_t
a
,
fxp_t
b
)
{
assert
(
a
>=
b
);
return
a
-
b
;
}
static
inline
fxp_t
fxp_mul
(
fxp_t
a
,
fxp_t
b
)
{
uint64_t
unshifted
=
(
uint64_t
)
a
*
(
uint64_t
)
b
;
/*
* Unshifted is (a.val * 2**16) * (b.val * 2**16)
* == (a.val * b.val) * 2**32, but we want
* (a.val * b.val) * 2 ** 16.
*/
return
(
uint32_t
)(
unshifted
>>
16
);
}
static
inline
fxp_t
fxp_div
(
fxp_t
a
,
fxp_t
b
)
{
assert
(
b
!=
0
);
uint64_t
unshifted
=
((
uint64_t
)
a
<<
32
)
/
(
uint64_t
)
b
;
/*
* Unshifted is (a.val * 2**16) * (2**32) / (b.val * 2**16)
* == (a.val / b.val) * (2 ** 32), which again corresponds to a right
* shift of 16.
*/
return
(
uint32_t
)(
unshifted
>>
16
);
}
static
inline
uint32_t
fxp_round_down
(
fxp_t
a
)
{
return
a
>>
16
;
}
static
inline
uint32_t
fxp_round_nearest
(
fxp_t
a
)
{
uint32_t
fractional_part
=
(
a
&
((
1U
<<
16
)
-
1
));
uint32_t
increment
=
(
uint32_t
)(
fractional_part
>=
(
1U
<<
15
));
return
(
a
>>
16
)
+
increment
;
}
/*
* Approximately computes x * frac, without the size limitations that would be
* imposed by converting u to an fxp_t.
*/
static
inline
size_t
fxp_mul_frac
(
size_t
x_orig
,
fxp_t
frac
)
{
assert
(
frac
<=
(
1U
<<
16
));
/*
* Work around an over-enthusiastic warning about type limits below (on
* 32-bit platforms, a size_t is always less than 1ULL << 48).
*/
uint64_t
x
=
(
uint64_t
)
x_orig
;
/*
* If we can guarantee no overflow, multiply first before shifting, to
* preserve some precision. Otherwise, shift first and then multiply.
* In the latter case, we only lose the low 16 bits of a 48-bit number,
* so we're still accurate to within 1/2**32.
*/
if
(
x
<
(
1ULL
<<
48
))
{
return
(
size_t
)((
x
*
frac
)
>>
16
);
}
else
{
return
(
size_t
)((
x
>>
16
)
*
(
uint64_t
)
frac
);
}
}
/*
* Returns true on error. Otherwise, returns false and updates *ptr to point to
* the first character not parsed (because it wasn't a digit).
*/
bool
fxp_parse
(
fxp_t
*
a
,
const
char
*
ptr
,
char
**
end
);
void
fxp_print
(
fxp_t
a
,
char
buf
[
FXP_BUF_SIZE
]);
#endif
/* JEMALLOC_INTERNAL_FXP_H */
deps/jemalloc/include/jemalloc/internal/hash.h
View file @
d4439bd4
...
...
@@ -104,8 +104,8 @@ hash_x86_32(const void *key, int len, uint32_t seed) {
uint32_t
k1
=
0
;
switch
(
len
&
3
)
{
case
3
:
k1
^=
tail
[
2
]
<<
16
;
JEMALLOC_FALLTHROUGH
case
2
:
k1
^=
tail
[
1
]
<<
8
;
JEMALLOC_FALLTHROUGH
case
3
:
k1
^=
tail
[
2
]
<<
16
;
JEMALLOC_FALLTHROUGH
;
case
2
:
k1
^=
tail
[
1
]
<<
8
;
JEMALLOC_FALLTHROUGH
;
case
1
:
k1
^=
tail
[
0
];
k1
*=
c1
;
k1
=
hash_rotl_32
(
k1
,
15
);
k1
*=
c2
;
h1
^=
k1
;
}
...
...
@@ -177,29 +177,29 @@ hash_x86_128(const void *key, const int len, uint32_t seed,
uint32_t
k4
=
0
;
switch
(
len
&
15
)
{
case
15
:
k4
^=
tail
[
14
]
<<
16
;
JEMALLOC_FALLTHROUGH
case
14
:
k4
^=
tail
[
13
]
<<
8
;
JEMALLOC_FALLTHROUGH
case
15
:
k4
^=
tail
[
14
]
<<
16
;
JEMALLOC_FALLTHROUGH
;
case
14
:
k4
^=
tail
[
13
]
<<
8
;
JEMALLOC_FALLTHROUGH
;
case
13
:
k4
^=
tail
[
12
]
<<
0
;
k4
*=
c4
;
k4
=
hash_rotl_32
(
k4
,
18
);
k4
*=
c1
;
h4
^=
k4
;
JEMALLOC_FALLTHROUGH
case
12
:
k3
^=
tail
[
11
]
<<
24
;
JEMALLOC_FALLTHROUGH
case
11
:
k3
^=
tail
[
10
]
<<
16
;
JEMALLOC_FALLTHROUGH
case
10
:
k3
^=
tail
[
9
]
<<
8
;
JEMALLOC_FALLTHROUGH
JEMALLOC_FALLTHROUGH
;
case
12
:
k3
^=
(
uint32_t
)
tail
[
11
]
<<
24
;
JEMALLOC_FALLTHROUGH
;
case
11
:
k3
^=
tail
[
10
]
<<
16
;
JEMALLOC_FALLTHROUGH
;
case
10
:
k3
^=
tail
[
9
]
<<
8
;
JEMALLOC_FALLTHROUGH
;
case
9
:
k3
^=
tail
[
8
]
<<
0
;
k3
*=
c3
;
k3
=
hash_rotl_32
(
k3
,
17
);
k3
*=
c4
;
h3
^=
k3
;
JEMALLOC_FALLTHROUGH
case
8
:
k2
^=
tail
[
7
]
<<
24
;
JEMALLOC_FALLTHROUGH
case
7
:
k2
^=
tail
[
6
]
<<
16
;
JEMALLOC_FALLTHROUGH
case
6
:
k2
^=
tail
[
5
]
<<
8
;
JEMALLOC_FALLTHROUGH
k3
*=
c3
;
k3
=
hash_rotl_32
(
k3
,
17
);
k3
*=
c4
;
h3
^=
k3
;
JEMALLOC_FALLTHROUGH
;
case
8
:
k2
^=
(
uint32_t
)
tail
[
7
]
<<
24
;
JEMALLOC_FALLTHROUGH
;
case
7
:
k2
^=
tail
[
6
]
<<
16
;
JEMALLOC_FALLTHROUGH
;
case
6
:
k2
^=
tail
[
5
]
<<
8
;
JEMALLOC_FALLTHROUGH
;
case
5
:
k2
^=
tail
[
4
]
<<
0
;
k2
*=
c2
;
k2
=
hash_rotl_32
(
k2
,
16
);
k2
*=
c3
;
h2
^=
k2
;
JEMALLOC_FALLTHROUGH
case
4
:
k1
^=
tail
[
3
]
<<
24
;
JEMALLOC_FALLTHROUGH
case
3
:
k1
^=
tail
[
2
]
<<
16
;
JEMALLOC_FALLTHROUGH
case
2
:
k1
^=
tail
[
1
]
<<
8
;
JEMALLOC_FALLTHROUGH
JEMALLOC_FALLTHROUGH
;
case
4
:
k1
^=
(
uint32_t
)
tail
[
3
]
<<
24
;
JEMALLOC_FALLTHROUGH
;
case
3
:
k1
^=
tail
[
2
]
<<
16
;
JEMALLOC_FALLTHROUGH
;
case
2
:
k1
^=
tail
[
1
]
<<
8
;
JEMALLOC_FALLTHROUGH
;
case
1
:
k1
^=
tail
[
0
]
<<
0
;
k1
*=
c1
;
k1
=
hash_rotl_32
(
k1
,
15
);
k1
*=
c2
;
h1
^=
k1
;
JEMALLOC_FALLTHROUGH
break
;
}
}
...
...
@@ -261,24 +261,25 @@ hash_x64_128(const void *key, const int len, const uint32_t seed,
uint64_t
k2
=
0
;
switch
(
len
&
15
)
{
case
15
:
k2
^=
((
uint64_t
)(
tail
[
14
]))
<<
48
;
JEMALLOC_FALLTHROUGH
case
14
:
k2
^=
((
uint64_t
)(
tail
[
13
]))
<<
40
;
JEMALLOC_FALLTHROUGH
case
13
:
k2
^=
((
uint64_t
)(
tail
[
12
]))
<<
32
;
JEMALLOC_FALLTHROUGH
case
12
:
k2
^=
((
uint64_t
)(
tail
[
11
]))
<<
24
;
JEMALLOC_FALLTHROUGH
case
11
:
k2
^=
((
uint64_t
)(
tail
[
10
]))
<<
16
;
JEMALLOC_FALLTHROUGH
case
10
:
k2
^=
((
uint64_t
)(
tail
[
9
]))
<<
8
;
JEMALLOC_FALLTHROUGH
case
15
:
k2
^=
((
uint64_t
)(
tail
[
14
]))
<<
48
;
JEMALLOC_FALLTHROUGH
;
case
14
:
k2
^=
((
uint64_t
)(
tail
[
13
]))
<<
40
;
JEMALLOC_FALLTHROUGH
;
case
13
:
k2
^=
((
uint64_t
)(
tail
[
12
]))
<<
32
;
JEMALLOC_FALLTHROUGH
;
case
12
:
k2
^=
((
uint64_t
)(
tail
[
11
]))
<<
24
;
JEMALLOC_FALLTHROUGH
;
case
11
:
k2
^=
((
uint64_t
)(
tail
[
10
]))
<<
16
;
JEMALLOC_FALLTHROUGH
;
case
10
:
k2
^=
((
uint64_t
)(
tail
[
9
]))
<<
8
;
JEMALLOC_FALLTHROUGH
;
case
9
:
k2
^=
((
uint64_t
)(
tail
[
8
]))
<<
0
;
k2
*=
c2
;
k2
=
hash_rotl_64
(
k2
,
33
);
k2
*=
c1
;
h2
^=
k2
;
JEMALLOC_FALLTHROUGH
case
8
:
k1
^=
((
uint64_t
)(
tail
[
7
]))
<<
56
;
JEMALLOC_FALLTHROUGH
case
7
:
k1
^=
((
uint64_t
)(
tail
[
6
]))
<<
48
;
JEMALLOC_FALLTHROUGH
case
6
:
k1
^=
((
uint64_t
)(
tail
[
5
]))
<<
40
;
JEMALLOC_FALLTHROUGH
case
5
:
k1
^=
((
uint64_t
)(
tail
[
4
]))
<<
32
;
JEMALLOC_FALLTHROUGH
case
4
:
k1
^=
((
uint64_t
)(
tail
[
3
]))
<<
24
;
JEMALLOC_FALLTHROUGH
case
3
:
k1
^=
((
uint64_t
)(
tail
[
2
]))
<<
16
;
JEMALLOC_FALLTHROUGH
case
2
:
k1
^=
((
uint64_t
)(
tail
[
1
]))
<<
8
;
JEMALLOC_FALLTHROUGH
JEMALLOC_FALLTHROUGH
;
case
8
:
k1
^=
((
uint64_t
)(
tail
[
7
]))
<<
56
;
JEMALLOC_FALLTHROUGH
;
case
7
:
k1
^=
((
uint64_t
)(
tail
[
6
]))
<<
48
;
JEMALLOC_FALLTHROUGH
;
case
6
:
k1
^=
((
uint64_t
)(
tail
[
5
]))
<<
40
;
JEMALLOC_FALLTHROUGH
;
case
5
:
k1
^=
((
uint64_t
)(
tail
[
4
]))
<<
32
;
JEMALLOC_FALLTHROUGH
;
case
4
:
k1
^=
((
uint64_t
)(
tail
[
3
]))
<<
24
;
JEMALLOC_FALLTHROUGH
;
case
3
:
k1
^=
((
uint64_t
)(
tail
[
2
]))
<<
16
;
JEMALLOC_FALLTHROUGH
;
case
2
:
k1
^=
((
uint64_t
)(
tail
[
1
]))
<<
8
;
JEMALLOC_FALLTHROUGH
;
case
1
:
k1
^=
((
uint64_t
)(
tail
[
0
]))
<<
0
;
k1
*=
c1
;
k1
=
hash_rotl_64
(
k1
,
31
);
k1
*=
c2
;
h1
^=
k1
;
break
;
}
}
...
...
deps/jemalloc/include/jemalloc/internal/hpa.h
0 → 100644
View file @
d4439bd4
#ifndef JEMALLOC_INTERNAL_HPA_H
#define JEMALLOC_INTERNAL_HPA_H
#include "jemalloc/internal/exp_grow.h"
#include "jemalloc/internal/hpa_hooks.h"
#include "jemalloc/internal/hpa_opts.h"
#include "jemalloc/internal/pai.h"
#include "jemalloc/internal/psset.h"
typedef
struct
hpa_central_s
hpa_central_t
;
struct
hpa_central_s
{
/*
* The mutex guarding most of the operations on the central data
* structure.
*/
malloc_mutex_t
mtx
;
/*
* Guards expansion of eden. We separate this from the regular mutex so
* that cheaper operations can still continue while we're doing the OS
* call.
*/
malloc_mutex_t
grow_mtx
;
/*
* Either NULL (if empty), or some integer multiple of a
* hugepage-aligned number of hugepages. We carve them off one at a
* time to satisfy new pageslab requests.
*
* Guarded by grow_mtx.
*/
void
*
eden
;
size_t
eden_len
;
/* Source for metadata. */
base_t
*
base
;
/* Number of grow operations done on this hpa_central_t. */
uint64_t
age_counter
;
/* The HPA hooks. */
hpa_hooks_t
hooks
;
};
typedef
struct
hpa_shard_nonderived_stats_s
hpa_shard_nonderived_stats_t
;
struct
hpa_shard_nonderived_stats_s
{
/*
* The number of times we've purged within a hugepage.
*
* Guarded by mtx.
*/
uint64_t
npurge_passes
;
/*
* The number of individual purge calls we perform (which should always
* be bigger than npurge_passes, since each pass purges at least one
* extent within a hugepage.
*
* Guarded by mtx.
*/
uint64_t
npurges
;
/*
* The number of times we've hugified a pageslab.
*
* Guarded by mtx.
*/
uint64_t
nhugifies
;
/*
* The number of times we've dehugified a pageslab.
*
* Guarded by mtx.
*/
uint64_t
ndehugifies
;
};
/* Completely derived; only used by CTL. */
typedef
struct
hpa_shard_stats_s
hpa_shard_stats_t
;
struct
hpa_shard_stats_s
{
psset_stats_t
psset_stats
;
hpa_shard_nonderived_stats_t
nonderived_stats
;
};
typedef
struct
hpa_shard_s
hpa_shard_t
;
struct
hpa_shard_s
{
/*
* pai must be the first member; we cast from a pointer to it to a
* pointer to the hpa_shard_t.
*/
pai_t
pai
;
/* The central allocator we get our hugepages from. */
hpa_central_t
*
central
;
/* Protects most of this shard's state. */
malloc_mutex_t
mtx
;
/*
* Guards the shard's access to the central allocator (preventing
* multiple threads operating on this shard from accessing the central
* allocator).
*/
malloc_mutex_t
grow_mtx
;
/* The base metadata allocator. */
base_t
*
base
;
/*
* This edata cache is the one we use when allocating a small extent
* from a pageslab. The pageslab itself comes from the centralized
* allocator, and so will use its edata_cache.
*/
edata_cache_fast_t
ecf
;
psset_t
psset
;
/*
* How many grow operations have occurred.
*
* Guarded by grow_mtx.
*/
uint64_t
age_counter
;
/* The arena ind we're associated with. */
unsigned
ind
;
/*
* Our emap. This is just a cache of the emap pointer in the associated
* hpa_central.
*/
emap_t
*
emap
;
/* The configuration choices for this hpa shard. */
hpa_shard_opts_t
opts
;
/*
* How many pages have we started but not yet finished purging in this
* hpa shard.
*/
size_t
npending_purge
;
/*
* Those stats which are copied directly into the CTL-centric hpa shard
* stats.
*/
hpa_shard_nonderived_stats_t
stats
;
/*
* Last time we performed purge on this shard.
*/
nstime_t
last_purge
;
};
/*
* Whether or not the HPA can be used given the current configuration. This is
* is not necessarily a guarantee that it backs its allocations by hugepages,
* just that it can function properly given the system it's running on.
*/
bool
hpa_supported
();
bool
hpa_central_init
(
hpa_central_t
*
central
,
base_t
*
base
,
const
hpa_hooks_t
*
hooks
);
bool
hpa_shard_init
(
hpa_shard_t
*
shard
,
hpa_central_t
*
central
,
emap_t
*
emap
,
base_t
*
base
,
edata_cache_t
*
edata_cache
,
unsigned
ind
,
const
hpa_shard_opts_t
*
opts
);
void
hpa_shard_stats_accum
(
hpa_shard_stats_t
*
dst
,
hpa_shard_stats_t
*
src
);
void
hpa_shard_stats_merge
(
tsdn_t
*
tsdn
,
hpa_shard_t
*
shard
,
hpa_shard_stats_t
*
dst
);
/*
* Notify the shard that we won't use it for allocations much longer. Due to
* the possibility of races, we don't actually prevent allocations; just flush
* and disable the embedded edata_cache_small.
*/
void
hpa_shard_disable
(
tsdn_t
*
tsdn
,
hpa_shard_t
*
shard
);
void
hpa_shard_destroy
(
tsdn_t
*
tsdn
,
hpa_shard_t
*
shard
);
void
hpa_shard_set_deferral_allowed
(
tsdn_t
*
tsdn
,
hpa_shard_t
*
shard
,
bool
deferral_allowed
);
void
hpa_shard_do_deferred_work
(
tsdn_t
*
tsdn
,
hpa_shard_t
*
shard
);
/*
* We share the fork ordering with the PA and arena prefork handling; that's why
* these are 3 and 4 rather than 0 and 1.
*/
void
hpa_shard_prefork3
(
tsdn_t
*
tsdn
,
hpa_shard_t
*
shard
);
void
hpa_shard_prefork4
(
tsdn_t
*
tsdn
,
hpa_shard_t
*
shard
);
void
hpa_shard_postfork_parent
(
tsdn_t
*
tsdn
,
hpa_shard_t
*
shard
);
void
hpa_shard_postfork_child
(
tsdn_t
*
tsdn
,
hpa_shard_t
*
shard
);
#endif
/* JEMALLOC_INTERNAL_HPA_H */
deps/jemalloc/include/jemalloc/internal/hpa_hooks.h
0 → 100644
View file @
d4439bd4
#ifndef JEMALLOC_INTERNAL_HPA_HOOKS_H
#define JEMALLOC_INTERNAL_HPA_HOOKS_H
typedef
struct
hpa_hooks_s
hpa_hooks_t
;
struct
hpa_hooks_s
{
void
*
(
*
map
)(
size_t
size
);
void
(
*
unmap
)(
void
*
ptr
,
size_t
size
);
void
(
*
purge
)(
void
*
ptr
,
size_t
size
);
void
(
*
hugify
)(
void
*
ptr
,
size_t
size
);
void
(
*
dehugify
)(
void
*
ptr
,
size_t
size
);
void
(
*
curtime
)(
nstime_t
*
r_time
,
bool
first_reading
);
uint64_t
(
*
ms_since
)(
nstime_t
*
r_time
);
};
extern
hpa_hooks_t
hpa_hooks_default
;
#endif
/* JEMALLOC_INTERNAL_HPA_HOOKS_H */
deps/jemalloc/include/jemalloc/internal/hpa_opts.h
0 → 100644
View file @
d4439bd4
#ifndef JEMALLOC_INTERNAL_HPA_OPTS_H
#define JEMALLOC_INTERNAL_HPA_OPTS_H
#include "jemalloc/internal/fxp.h"
/*
* This file is morally part of hpa.h, but is split out for header-ordering
* reasons.
*/
typedef
struct
hpa_shard_opts_s
hpa_shard_opts_t
;
struct
hpa_shard_opts_s
{
/*
* The largest size we'll allocate out of the shard. For those
* allocations refused, the caller (in practice, the PA module) will
* fall back to the more general (for now) PAC, which can always handle
* any allocation request.
*/
size_t
slab_max_alloc
;
/*
* When the number of active bytes in a hugepage is >=
* hugification_threshold, we force hugify it.
*/
size_t
hugification_threshold
;
/*
* The HPA purges whenever the number of pages exceeds dirty_mult *
* active_pages. This may be set to (fxp_t)-1 to disable purging.
*/
fxp_t
dirty_mult
;
/*
* Whether or not the PAI methods are allowed to defer work to a
* subsequent hpa_shard_do_deferred_work() call. Practically, this
* corresponds to background threads being enabled. We track this
* ourselves for encapsulation purposes.
*/
bool
deferral_allowed
;
/*
* How long a hugepage has to be a hugification candidate before it will
* actually get hugified.
*/
uint64_t
hugify_delay_ms
;
/*
* Minimum amount of time between purges.
*/
uint64_t
min_purge_interval_ms
;
};
#define HPA_SHARD_OPTS_DEFAULT { \
/* slab_max_alloc */
\
64 * 1024, \
/* hugification_threshold */
\
HUGEPAGE * 95 / 100, \
/* dirty_mult */
\
FXP_INIT_PERCENT(25), \
/* \
* deferral_allowed \
* \
* Really, this is always set by the arena during creation \
* or by an hpa_shard_set_deferral_allowed call, so the value \
* we put here doesn't matter. \
*/
\
false, \
/* hugify_delay_ms */
\
10 * 1000, \
/* min_purge_interval_ms */
\
5 * 1000 \
}
#endif
/* JEMALLOC_INTERNAL_HPA_OPTS_H */
deps/jemalloc/include/jemalloc/internal/hpdata.h
0 → 100644
View file @
d4439bd4
#ifndef JEMALLOC_INTERNAL_HPDATA_H
#define JEMALLOC_INTERNAL_HPDATA_H
#include "jemalloc/internal/fb.h"
#include "jemalloc/internal/ph.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/typed_list.h"
/*
* The metadata representation we use for extents in hugepages. While the PAC
* uses the edata_t to represent both active and inactive extents, the HP only
* uses the edata_t for active ones; instead, inactive extent state is tracked
* within hpdata associated with the enclosing hugepage-sized, hugepage-aligned
* region of virtual address space.
*
* An hpdata need not be "truly" backed by a hugepage (which is not necessarily
* an observable property of any given region of address space). It's just
* hugepage-sized and hugepage-aligned; it's *potentially* huge.
*/
typedef
struct
hpdata_s
hpdata_t
;
ph_structs
(
hpdata_age_heap
,
hpdata_t
);
struct
hpdata_s
{
/*
* We likewise follow the edata convention of mangling names and forcing
* the use of accessors -- this lets us add some consistency checks on
* access.
*/
/*
* The address of the hugepage in question. This can't be named h_addr,
* since that conflicts with a macro defined in Windows headers.
*/
void
*
h_address
;
/* Its age (measured in psset operations). */
uint64_t
h_age
;
/* Whether or not we think the hugepage is mapped that way by the OS. */
bool
h_huge
;
/*
* For some properties, we keep parallel sets of bools; h_foo_allowed
* and h_in_psset_foo_container. This is a decoupling mechanism to
* avoid bothering the hpa (which manages policies) from the psset
* (which is the mechanism used to enforce those policies). This allows
* all the container management logic to live in one place, without the
* HPA needing to know or care how that happens.
*/
/*
* Whether or not the hpdata is allowed to be used to serve allocations,
* and whether or not the psset is currently tracking it as such.
*/
bool
h_alloc_allowed
;
bool
h_in_psset_alloc_container
;
/*
* The same, but with purging. There's no corresponding
* h_in_psset_purge_container, because the psset (currently) always
* removes hpdatas from their containers during updates (to implement
* LRU for purging).
*/
bool
h_purge_allowed
;
/* And with hugifying. */
bool
h_hugify_allowed
;
/* When we became a hugification candidate. */
nstime_t
h_time_hugify_allowed
;
bool
h_in_psset_hugify_container
;
/* Whether or not a purge or hugify is currently happening. */
bool
h_mid_purge
;
bool
h_mid_hugify
;
/*
* Whether or not the hpdata is being updated in the psset (i.e. if
* there has been a psset_update_begin call issued without a matching
* psset_update_end call). Eventually this will expand to other types
* of updates.
*/
bool
h_updating
;
/* Whether or not the hpdata is in a psset. */
bool
h_in_psset
;
union
{
/* When nonempty (and also nonfull), used by the psset bins. */
hpdata_age_heap_link_t
age_link
;
/*
* When empty (or not corresponding to any hugepage), list
* linkage.
*/
ql_elm
(
hpdata_t
)
ql_link_empty
;
};
/*
* Linkage for the psset to track candidates for purging and hugifying.
*/
ql_elm
(
hpdata_t
)
ql_link_purge
;
ql_elm
(
hpdata_t
)
ql_link_hugify
;
/* The length of the largest contiguous sequence of inactive pages. */
size_t
h_longest_free_range
;
/* Number of active pages. */
size_t
h_nactive
;
/* A bitmap with bits set in the active pages. */
fb_group_t
active_pages
[
FB_NGROUPS
(
HUGEPAGE_PAGES
)];
/*
* Number of dirty or active pages, and a bitmap tracking them. One
* way to think of this is as which pages are dirty from the OS's
* perspective.
*/
size_t
h_ntouched
;
/* The touched pages (using the same definition as above). */
fb_group_t
touched_pages
[
FB_NGROUPS
(
HUGEPAGE_PAGES
)];
};
TYPED_LIST
(
hpdata_empty_list
,
hpdata_t
,
ql_link_empty
)
TYPED_LIST
(
hpdata_purge_list
,
hpdata_t
,
ql_link_purge
)
TYPED_LIST
(
hpdata_hugify_list
,
hpdata_t
,
ql_link_hugify
)
ph_proto
(,
hpdata_age_heap
,
hpdata_t
);
static
inline
void
*
hpdata_addr_get
(
const
hpdata_t
*
hpdata
)
{
return
hpdata
->
h_address
;
}
static
inline
void
hpdata_addr_set
(
hpdata_t
*
hpdata
,
void
*
addr
)
{
assert
(
HUGEPAGE_ADDR2BASE
(
addr
)
==
addr
);
hpdata
->
h_address
=
addr
;
}
static
inline
uint64_t
hpdata_age_get
(
const
hpdata_t
*
hpdata
)
{
return
hpdata
->
h_age
;
}
static
inline
void
hpdata_age_set
(
hpdata_t
*
hpdata
,
uint64_t
age
)
{
hpdata
->
h_age
=
age
;
}
static
inline
bool
hpdata_huge_get
(
const
hpdata_t
*
hpdata
)
{
return
hpdata
->
h_huge
;
}
static
inline
bool
hpdata_alloc_allowed_get
(
const
hpdata_t
*
hpdata
)
{
return
hpdata
->
h_alloc_allowed
;
}
static
inline
void
hpdata_alloc_allowed_set
(
hpdata_t
*
hpdata
,
bool
alloc_allowed
)
{
hpdata
->
h_alloc_allowed
=
alloc_allowed
;
}
static
inline
bool
hpdata_in_psset_alloc_container_get
(
const
hpdata_t
*
hpdata
)
{
return
hpdata
->
h_in_psset_alloc_container
;
}
static
inline
void
hpdata_in_psset_alloc_container_set
(
hpdata_t
*
hpdata
,
bool
in_container
)
{
assert
(
in_container
!=
hpdata
->
h_in_psset_alloc_container
);
hpdata
->
h_in_psset_alloc_container
=
in_container
;
}
static
inline
bool
hpdata_purge_allowed_get
(
const
hpdata_t
*
hpdata
)
{
return
hpdata
->
h_purge_allowed
;
}
static
inline
void
hpdata_purge_allowed_set
(
hpdata_t
*
hpdata
,
bool
purge_allowed
)
{
assert
(
purge_allowed
==
false
||
!
hpdata
->
h_mid_purge
);
hpdata
->
h_purge_allowed
=
purge_allowed
;
}
static
inline
bool
hpdata_hugify_allowed_get
(
const
hpdata_t
*
hpdata
)
{
return
hpdata
->
h_hugify_allowed
;
}
static
inline
void
hpdata_allow_hugify
(
hpdata_t
*
hpdata
,
nstime_t
now
)
{
assert
(
!
hpdata
->
h_mid_hugify
);
hpdata
->
h_hugify_allowed
=
true
;
hpdata
->
h_time_hugify_allowed
=
now
;
}
static
inline
nstime_t
hpdata_time_hugify_allowed
(
hpdata_t
*
hpdata
)
{
return
hpdata
->
h_time_hugify_allowed
;
}
static
inline
void
hpdata_disallow_hugify
(
hpdata_t
*
hpdata
)
{
hpdata
->
h_hugify_allowed
=
false
;
}
static
inline
bool
hpdata_in_psset_hugify_container_get
(
const
hpdata_t
*
hpdata
)
{
return
hpdata
->
h_in_psset_hugify_container
;
}
static
inline
void
hpdata_in_psset_hugify_container_set
(
hpdata_t
*
hpdata
,
bool
in_container
)
{
assert
(
in_container
!=
hpdata
->
h_in_psset_hugify_container
);
hpdata
->
h_in_psset_hugify_container
=
in_container
;
}
static
inline
bool
hpdata_mid_purge_get
(
const
hpdata_t
*
hpdata
)
{
return
hpdata
->
h_mid_purge
;
}
static
inline
void
hpdata_mid_purge_set
(
hpdata_t
*
hpdata
,
bool
mid_purge
)
{
assert
(
mid_purge
!=
hpdata
->
h_mid_purge
);
hpdata
->
h_mid_purge
=
mid_purge
;
}
static
inline
bool
hpdata_mid_hugify_get
(
const
hpdata_t
*
hpdata
)
{
return
hpdata
->
h_mid_hugify
;
}
static
inline
void
hpdata_mid_hugify_set
(
hpdata_t
*
hpdata
,
bool
mid_hugify
)
{
assert
(
mid_hugify
!=
hpdata
->
h_mid_hugify
);
hpdata
->
h_mid_hugify
=
mid_hugify
;
}
static
inline
bool
hpdata_changing_state_get
(
const
hpdata_t
*
hpdata
)
{
return
hpdata
->
h_mid_purge
||
hpdata
->
h_mid_hugify
;
}
static
inline
bool
hpdata_updating_get
(
const
hpdata_t
*
hpdata
)
{
return
hpdata
->
h_updating
;
}
static
inline
void
hpdata_updating_set
(
hpdata_t
*
hpdata
,
bool
updating
)
{
assert
(
updating
!=
hpdata
->
h_updating
);
hpdata
->
h_updating
=
updating
;
}
static
inline
bool
hpdata_in_psset_get
(
const
hpdata_t
*
hpdata
)
{
return
hpdata
->
h_in_psset
;
}
static
inline
void
hpdata_in_psset_set
(
hpdata_t
*
hpdata
,
bool
in_psset
)
{
assert
(
in_psset
!=
hpdata
->
h_in_psset
);
hpdata
->
h_in_psset
=
in_psset
;
}
static
inline
size_t
hpdata_longest_free_range_get
(
const
hpdata_t
*
hpdata
)
{
return
hpdata
->
h_longest_free_range
;
}
static
inline
void
hpdata_longest_free_range_set
(
hpdata_t
*
hpdata
,
size_t
longest_free_range
)
{
assert
(
longest_free_range
<=
HUGEPAGE_PAGES
);
hpdata
->
h_longest_free_range
=
longest_free_range
;
}
static
inline
size_t
hpdata_nactive_get
(
hpdata_t
*
hpdata
)
{
return
hpdata
->
h_nactive
;
}
static
inline
size_t
hpdata_ntouched_get
(
hpdata_t
*
hpdata
)
{
return
hpdata
->
h_ntouched
;
}
static
inline
size_t
hpdata_ndirty_get
(
hpdata_t
*
hpdata
)
{
return
hpdata
->
h_ntouched
-
hpdata
->
h_nactive
;
}
static
inline
size_t
hpdata_nretained_get
(
hpdata_t
*
hpdata
)
{
return
HUGEPAGE_PAGES
-
hpdata
->
h_ntouched
;
}
static
inline
void
hpdata_assert_empty
(
hpdata_t
*
hpdata
)
{
assert
(
fb_empty
(
hpdata
->
active_pages
,
HUGEPAGE_PAGES
));
assert
(
hpdata
->
h_nactive
==
0
);
}
/*
* Only used in tests, and in hpdata_assert_consistent, below. Verifies some
* consistency properties of the hpdata (e.g. that cached counts of page stats
* match computed ones).
*/
static
inline
bool
hpdata_consistent
(
hpdata_t
*
hpdata
)
{
if
(
fb_urange_longest
(
hpdata
->
active_pages
,
HUGEPAGE_PAGES
)
!=
hpdata_longest_free_range_get
(
hpdata
))
{
return
false
;
}
if
(
fb_scount
(
hpdata
->
active_pages
,
HUGEPAGE_PAGES
,
0
,
HUGEPAGE_PAGES
)
!=
hpdata
->
h_nactive
)
{
return
false
;
}
if
(
fb_scount
(
hpdata
->
touched_pages
,
HUGEPAGE_PAGES
,
0
,
HUGEPAGE_PAGES
)
!=
hpdata
->
h_ntouched
)
{
return
false
;
}
if
(
hpdata
->
h_ntouched
<
hpdata
->
h_nactive
)
{
return
false
;
}
if
(
hpdata
->
h_huge
&&
hpdata
->
h_ntouched
!=
HUGEPAGE_PAGES
)
{
return
false
;
}
if
(
hpdata_changing_state_get
(
hpdata
)
&&
((
hpdata
->
h_purge_allowed
)
||
hpdata
->
h_hugify_allowed
))
{
return
false
;
}
if
(
hpdata_hugify_allowed_get
(
hpdata
)
!=
hpdata_in_psset_hugify_container_get
(
hpdata
))
{
return
false
;
}
return
true
;
}
static
inline
void
hpdata_assert_consistent
(
hpdata_t
*
hpdata
)
{
assert
(
hpdata_consistent
(
hpdata
));
}
static
inline
bool
hpdata_empty
(
hpdata_t
*
hpdata
)
{
return
hpdata
->
h_nactive
==
0
;
}
static
inline
bool
hpdata_full
(
hpdata_t
*
hpdata
)
{
return
hpdata
->
h_nactive
==
HUGEPAGE_PAGES
;
}
void
hpdata_init
(
hpdata_t
*
hpdata
,
void
*
addr
,
uint64_t
age
);
/*
* Given an hpdata which can serve an allocation request, pick and reserve an
* offset within that allocation.
*/
void
*
hpdata_reserve_alloc
(
hpdata_t
*
hpdata
,
size_t
sz
);
void
hpdata_unreserve
(
hpdata_t
*
hpdata
,
void
*
begin
,
size_t
sz
);
/*
* The hpdata_purge_prepare_t allows grabbing the metadata required to purge
* subranges of a hugepage while holding a lock, drop the lock during the actual
* purging of them, and reacquire it to update the metadata again.
*/
typedef
struct
hpdata_purge_state_s
hpdata_purge_state_t
;
struct
hpdata_purge_state_s
{
size_t
npurged
;
size_t
ndirty_to_purge
;
fb_group_t
to_purge
[
FB_NGROUPS
(
HUGEPAGE_PAGES
)];
size_t
next_purge_search_begin
;
};
/*
* Initializes purge state. The access to hpdata must be externally
* synchronized with other hpdata_* calls.
*
* You can tell whether or not a thread is purging or hugifying a given hpdata
* via hpdata_changing_state_get(hpdata). Racing hugification or purging
* operations aren't allowed.
*
* Once you begin purging, you have to follow through and call hpdata_purge_next
* until you're done, and then end. Allocating out of an hpdata undergoing
* purging is not allowed.
*
* Returns the number of dirty pages that will be purged.
*/
size_t
hpdata_purge_begin
(
hpdata_t
*
hpdata
,
hpdata_purge_state_t
*
purge_state
);
/*
* If there are more extents to purge, sets *r_purge_addr and *r_purge_size to
* true, and returns true. Otherwise, returns false to indicate that we're
* done.
*
* This requires exclusive access to the purge state, but *not* to the hpdata.
* In particular, unreserve calls are allowed while purging (i.e. you can dalloc
* into one part of the hpdata while purging a different part).
*/
bool
hpdata_purge_next
(
hpdata_t
*
hpdata
,
hpdata_purge_state_t
*
purge_state
,
void
**
r_purge_addr
,
size_t
*
r_purge_size
);
/*
* Updates the hpdata metadata after all purging is done. Needs external
* synchronization.
*/
void
hpdata_purge_end
(
hpdata_t
*
hpdata
,
hpdata_purge_state_t
*
purge_state
);
void
hpdata_hugify
(
hpdata_t
*
hpdata
);
void
hpdata_dehugify
(
hpdata_t
*
hpdata
);
#endif
/* JEMALLOC_INTERNAL_HPDATA_H */
deps/jemalloc/include/jemalloc/internal/inspect.h
0 → 100644
View file @
d4439bd4
#ifndef JEMALLOC_INTERNAL_INSPECT_H
#define JEMALLOC_INTERNAL_INSPECT_H
/*
* This module contains the heap introspection capabilities. For now they are
* exposed purely through mallctl APIs in the experimental namespace, but this
* may change over time.
*/
/*
* The following two structs are for experimental purposes. See
* experimental_utilization_query_ctl and
* experimental_utilization_batch_query_ctl in src/ctl.c.
*/
typedef
struct
inspect_extent_util_stats_s
inspect_extent_util_stats_t
;
struct
inspect_extent_util_stats_s
{
size_t
nfree
;
size_t
nregs
;
size_t
size
;
};
typedef
struct
inspect_extent_util_stats_verbose_s
inspect_extent_util_stats_verbose_t
;
struct
inspect_extent_util_stats_verbose_s
{
void
*
slabcur_addr
;
size_t
nfree
;
size_t
nregs
;
size_t
size
;
size_t
bin_nfree
;
size_t
bin_nregs
;
};
void
inspect_extent_util_stats_get
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
size_t
*
nfree
,
size_t
*
nregs
,
size_t
*
size
);
void
inspect_extent_util_stats_verbose_get
(
tsdn_t
*
tsdn
,
const
void
*
ptr
,
size_t
*
nfree
,
size_t
*
nregs
,
size_t
*
size
,
size_t
*
bin_nfree
,
size_t
*
bin_nregs
,
void
**
slabcur_addr
);
#endif
/* JEMALLOC_INTERNAL_INSPECT_H */
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h
View file @
d4439bd4
...
...
@@ -5,6 +5,7 @@
#ifdef _WIN32
# include <windows.h>
# include "msvc_compat/windows_extra.h"
# include "msvc_compat/strings.h"
# ifdef _WIN64
# if LG_VADDR <= 32
# error Generate the headers using x64 vcargs
...
...
@@ -31,8 +32,12 @@
# include <sys/uio.h>
# endif
# include <pthread.h>
# ifdef
__FreeBSD__
# if
def
ined(
__FreeBSD__
) || defined(__DragonFly__)
# include <pthread_np.h>
# include <sched.h>
# if defined(__FreeBSD__)
# define cpu_set_t cpuset_t
# endif
# endif
# include <signal.h>
# ifdef JEMALLOC_OS_UNFAIR_LOCK
...
...
@@ -91,4 +96,13 @@ isblank(int c) {
#endif
#include <fcntl.h>
/*
* The Win32 midl compiler has #define small char; we don't use midl, but
* "small" is a nice identifier to have available when talking about size
* classes.
*/
#ifdef small
# undef small
#endif
#endif
/* JEMALLOC_INTERNAL_H */
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in
View file @
d4439bd4
...
...
@@ -85,6 +85,12 @@
/* Defined if pthread_setname_np(3) is available. */
#undef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
/* Defined if pthread_getname_np(3) is available. */
#undef JEMALLOC_HAVE_PTHREAD_GETNAME_NP
/* Defined if pthread_get_name_np(3) is available. */
#undef JEMALLOC_HAVE_PTHREAD_GET_NAME_NP
/*
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
*/
...
...
@@ -100,6 +106,11 @@
*/
#undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
/*
* Defined if clock_gettime(CLOCK_REALTIME, ...) is available.
*/
#undef JEMALLOC_HAVE_CLOCK_REALTIME
/*
* Defined if _malloc_thread_cleanup() exists. At least in the case of
* FreeBSD, pthread_key_create() allocates, which if used during malloc
...
...
@@ -162,6 +173,9 @@
/* Support utrace(2)-based tracing. */
#undef JEMALLOC_UTRACE
/* Support utrace(2)-based tracing (label based signature). */
#undef JEMALLOC_UTRACE_LABEL
/* Support optional abort() on OOM. */
#undef JEMALLOC_XMALLOC
...
...
@@ -177,6 +191,9 @@
/* One page is 2^LG_PAGE bytes. */
#undef LG_PAGE
/* Maximum number of regions in a slab. */
#undef CONFIG_LG_SLAB_MAXREGS
/*
* One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
* system does not explicitly support huge pages; system calls that require
...
...
@@ -290,12 +307,41 @@
*/
#undef JEMALLOC_MADVISE_DONTDUMP
/*
* Defined if MADV_[NO]CORE is supported as an argument to madvise.
*/
#undef JEMALLOC_MADVISE_NOCORE
/* Defined if mprotect(2) is available. */
#undef JEMALLOC_HAVE_MPROTECT
/*
* Defined if transparent huge pages (THPs) are supported via the
* MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
*/
#undef JEMALLOC_THP
/* Defined if posix_madvise is available. */
#undef JEMALLOC_HAVE_POSIX_MADVISE
/*
* Method for purging unused pages using posix_madvise.
*
* posix_madvise(..., POSIX_MADV_DONTNEED)
*/
#undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED
#undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS
/*
* Defined if memcntl page admin call is supported
*/
#undef JEMALLOC_HAVE_MEMCNTL
/*
* Defined if malloc_size is supported
*/
#undef JEMALLOC_HAVE_MALLOC_SIZE
/* Define if operating system has alloca.h header. */
#undef JEMALLOC_HAS_ALLOCA_H
...
...
@@ -363,4 +409,19 @@
/* Performs additional safety checks when defined. */
#undef JEMALLOC_OPT_SAFETY_CHECKS
/* Is C++ support being built? */
#undef JEMALLOC_ENABLE_CXX
/* Performs additional size checks when defined. */
#undef JEMALLOC_OPT_SIZE_CHECKS
/* Allows sampled junk and stash for checking use-after-free when defined. */
#undef JEMALLOC_UAF_DETECTION
/* Darwin VM_MAKE_TAG support */
#undef JEMALLOC_HAVE_VM_MAKE_TAG
/* If defined, realloc(ptr, 0) defaults to "free" instead of "alloc". */
#undef JEMALLOC_ZERO_REALLOC_DEFAULT_FREE
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h
View file @
d4439bd4
...
...
@@ -2,7 +2,10 @@
#define JEMALLOC_INTERNAL_EXTERNS_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/hpa_opts.h"
#include "jemalloc/internal/sec_opts.h"
#include "jemalloc/internal/tsd_types.h"
#include "jemalloc/internal/nstime.h"
/* TSD checks this to set thread local slow state accordingly. */
extern
bool
malloc_slow
;
...
...
@@ -10,14 +13,30 @@ extern bool malloc_slow;
/* Run-time options. */
extern
bool
opt_abort
;
extern
bool
opt_abort_conf
;
extern
bool
opt_trust_madvise
;
extern
bool
opt_confirm_conf
;
extern
bool
opt_hpa
;
extern
hpa_shard_opts_t
opt_hpa_opts
;
extern
sec_opts_t
opt_hpa_sec_opts
;
extern
const
char
*
opt_junk
;
extern
bool
opt_junk_alloc
;
extern
bool
opt_junk_free
;
extern
void
(
*
junk_free_callback
)(
void
*
ptr
,
size_t
size
);
extern
void
(
*
junk_alloc_callback
)(
void
*
ptr
,
size_t
size
);
extern
bool
opt_utrace
;
extern
bool
opt_xmalloc
;
extern
bool
opt_experimental_infallible_new
;
extern
bool
opt_zero
;
extern
unsigned
opt_narenas
;
extern
zero_realloc_action_t
opt_zero_realloc_action
;
extern
malloc_init_t
malloc_init_state
;
extern
const
char
*
zero_realloc_mode_names
[];
extern
atomic_zu_t
zero_realloc_count
;
extern
bool
opt_cache_oblivious
;
/* Escape free-fastpath when ptr & mask == 0 (for sanitization purpose). */
extern
uintptr_t
san_cache_bin_nonfast_mask
;
/* Number of CPUs. */
extern
unsigned
ncpus
;
...
...
@@ -41,17 +60,16 @@ void *bootstrap_calloc(size_t num, size_t size);
void
bootstrap_free
(
void
*
ptr
);
void
arena_set
(
unsigned
ind
,
arena_t
*
arena
);
unsigned
narenas_total_get
(
void
);
arena_t
*
arena_init
(
tsdn_t
*
tsdn
,
unsigned
ind
,
extent_hooks_t
*
extent_hooks
);
arena_tdata_t
*
arena_tdata_get_hard
(
tsd_t
*
tsd
,
unsigned
ind
);
arena_t
*
arena_init
(
tsdn_t
*
tsdn
,
unsigned
ind
,
const
arena_config_t
*
config
);
arena_t
*
arena_choose_hard
(
tsd_t
*
tsd
,
bool
internal
);
void
arena_migrate
(
tsd_t
*
tsd
,
unsigned
oldind
,
unsigned
newind
);
void
arena_migrate
(
tsd_t
*
tsd
,
arena_t
*
oldarena
,
arena_t
*
newarena
);
void
iarena_cleanup
(
tsd_t
*
tsd
);
void
arena_cleanup
(
tsd_t
*
tsd
);
void
arenas_tdata_cleanup
(
tsd_t
*
tsd
);
size_t
batch_alloc
(
void
**
ptrs
,
size_t
num
,
size_t
size
,
int
flags
);
void
jemalloc_prefork
(
void
);
void
jemalloc_postfork_parent
(
void
);
void
jemalloc_postfork_child
(
void
);
bool
malloc_initialized
(
void
);
void
je_sdallocx_noflags
(
void
*
ptr
,
size_t
size
);
void
*
malloc_default
(
size_t
size
);
#endif
/* JEMALLOC_INTERNAL_EXTERNS_H */
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h
View file @
d4439bd4
...
...
@@ -10,7 +10,7 @@
* structs, externs, and inlines), and included each header file multiple times
* in this file, picking out the portion we want on each pass using the
* following #defines:
* JEMALLOC_H_TYPES : Preprocessor-defined constants and ps
u
edo-opaque data
* JEMALLOC_H_TYPES : Preprocessor-defined constants and pse
u
do-opaque data
* types.
* JEMALLOC_H_STRUCTS : Data structures.
* JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
...
...
@@ -40,8 +40,6 @@
/* TYPES */
/******************************************************************************/
#include "jemalloc/internal/extent_types.h"
#include "jemalloc/internal/base_types.h"
#include "jemalloc/internal/arena_types.h"
#include "jemalloc/internal/tcache_types.h"
#include "jemalloc/internal/prof_types.h"
...
...
@@ -50,11 +48,8 @@
/* STRUCTS */
/******************************************************************************/
#include "jemalloc/internal/arena_structs_a.h"
#include "jemalloc/internal/extent_structs.h"
#include "jemalloc/internal/base_structs.h"
#include "jemalloc/internal/prof_structs.h"
#include "jemalloc/internal/arena_structs
_b
.h"
#include "jemalloc/internal/arena_structs.h"
#include "jemalloc/internal/tcache_structs.h"
#include "jemalloc/internal/background_thread_structs.h"
...
...
@@ -63,8 +58,6 @@
/******************************************************************************/
#include "jemalloc/internal/jemalloc_internal_externs.h"
#include "jemalloc/internal/extent_externs.h"
#include "jemalloc/internal/base_externs.h"
#include "jemalloc/internal/arena_externs.h"
#include "jemalloc/internal/large_externs.h"
#include "jemalloc/internal/tcache_externs.h"
...
...
@@ -76,19 +69,16 @@
/******************************************************************************/
#include "jemalloc/internal/jemalloc_internal_inlines_a.h"
#include "jemalloc/internal/base_inlines.h"
/*
* Include portions of arena code interleaved with tcache code in order to
* resolve circular dependencies.
*/
#include "jemalloc/internal/prof_inlines_a.h"
#include "jemalloc/internal/arena_inlines_a.h"
#include "jemalloc/internal/extent_inlines.h"
#include "jemalloc/internal/jemalloc_internal_inlines_b.h"
#include "jemalloc/internal/tcache_inlines.h"
#include "jemalloc/internal/arena_inlines_b.h"
#include "jemalloc/internal/jemalloc_internal_inlines_c.h"
#include "jemalloc/internal/prof_inlines
_b
.h"
#include "jemalloc/internal/prof_inlines.h"
#include "jemalloc/internal/background_thread_inlines.h"
#endif
/* JEMALLOC_INTERNAL_INCLUDES_H */
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h
View file @
d4439bd4
...
...
@@ -56,31 +56,6 @@ percpu_arena_ind_limit(percpu_arena_mode_t mode) {
}
}
static
inline
arena_tdata_t
*
arena_tdata_get
(
tsd_t
*
tsd
,
unsigned
ind
,
bool
refresh_if_missing
)
{
arena_tdata_t
*
tdata
;
arena_tdata_t
*
arenas_tdata
=
tsd_arenas_tdata_get
(
tsd
);
if
(
unlikely
(
arenas_tdata
==
NULL
))
{
/* arenas_tdata hasn't been initialized yet. */
return
arena_tdata_get_hard
(
tsd
,
ind
);
}
if
(
unlikely
(
ind
>=
tsd_narenas_tdata_get
(
tsd
)))
{
/*
* ind is invalid, cache is old (too small), or tdata to be
* initialized.
*/
return
(
refresh_if_missing
?
arena_tdata_get_hard
(
tsd
,
ind
)
:
NULL
);
}
tdata
=
&
arenas_tdata
[
ind
];
if
(
likely
(
tdata
!=
NULL
)
||
!
refresh_if_missing
)
{
return
tdata
;
}
return
arena_tdata_get_hard
(
tsd
,
ind
);
}
static
inline
arena_t
*
arena_get
(
tsdn_t
*
tsdn
,
unsigned
ind
,
bool
init_if_missing
)
{
arena_t
*
ret
;
...
...
@@ -90,36 +65,12 @@ arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) {
ret
=
(
arena_t
*
)
atomic_load_p
(
&
arenas
[
ind
],
ATOMIC_ACQUIRE
);
if
(
unlikely
(
ret
==
NULL
))
{
if
(
init_if_missing
)
{
ret
=
arena_init
(
tsdn
,
ind
,
(
extent_hooks_t
*
)
&
extent_hooks_default
);
ret
=
arena_init
(
tsdn
,
ind
,
&
arena_config_default
);
}
}
return
ret
;
}
static
inline
ticker_t
*
decay_ticker_get
(
tsd_t
*
tsd
,
unsigned
ind
)
{
arena_tdata_t
*
tdata
;
tdata
=
arena_tdata_get
(
tsd
,
ind
,
true
);
if
(
unlikely
(
tdata
==
NULL
))
{
return
NULL
;
}
return
&
tdata
->
decay_ticker
;
}
JEMALLOC_ALWAYS_INLINE
cache_bin_t
*
tcache_small_bin_get
(
tcache_t
*
tcache
,
szind_t
binind
)
{
assert
(
binind
<
SC_NBINS
);
return
&
tcache
->
bins_small
[
binind
];
}
JEMALLOC_ALWAYS_INLINE
cache_bin_t
*
tcache_large_bin_get
(
tcache_t
*
tcache
,
szind_t
binind
)
{
assert
(
binind
>=
SC_NBINS
&&
binind
<
nhbins
);
return
&
tcache
->
bins_large
[
binind
-
SC_NBINS
];
}
JEMALLOC_ALWAYS_INLINE
bool
tcache_available
(
tsd_t
*
tsd
)
{
/*
...
...
@@ -129,9 +80,9 @@ tcache_available(tsd_t *tsd) {
*/
if
(
likely
(
tsd_tcache_enabled_get
(
tsd
)))
{
/* Associated arena == NULL implies tcache init in progress. */
assert
(
tsd_tcachep_get
(
tsd
)
->
arena
=
=
NULL
||
tcache_
small_bin_get
(
tsd_tcachep_get
(
tsd
)
,
0
)
->
avail
!=
NULL
);
if
(
config_debug
&&
tsd_tcache
_slow
p_get
(
tsd
)
->
arena
!
=
NULL
)
{
tcache_
assert_initialized
(
tsd_tcachep_get
(
tsd
)
);
}
return
true
;
}
...
...
@@ -147,28 +98,25 @@ tcache_get(tsd_t *tsd) {
return
tsd_tcachep_get
(
tsd
);
}
JEMALLOC_ALWAYS_INLINE
tcache_slow_t
*
tcache_slow_get
(
tsd_t
*
tsd
)
{
if
(
!
tcache_available
(
tsd
))
{
return
NULL
;
}
return
tsd_tcache_slowp_get
(
tsd
);
}
static
inline
void
pre_reentrancy
(
tsd_t
*
tsd
,
arena_t
*
arena
)
{
/* arena is the current context. Reentry from a0 is not allowed. */
assert
(
arena
!=
arena_get
(
tsd_tsdn
(
tsd
),
0
,
false
));
bool
fast
=
tsd_fast
(
tsd
);
assert
(
tsd_reentrancy_level_get
(
tsd
)
<
INT8_MAX
);
++*
tsd_reentrancy_levelp_get
(
tsd
);
if
(
fast
)
{
/* Prepare slow path for reentrancy. */
tsd_slow_update
(
tsd
);
assert
(
tsd_state_get
(
tsd
)
==
tsd_state_nominal_slow
);
}
tsd_pre_reentrancy_raw
(
tsd
);
}
static
inline
void
post_reentrancy
(
tsd_t
*
tsd
)
{
int8_t
*
reentrancy_level
=
tsd_reentrancy_levelp_get
(
tsd
);
assert
(
*
reentrancy_level
>
0
);
if
(
--*
reentrancy_level
==
0
)
{
tsd_slow_update
(
tsd
);
}
tsd_post_reentrancy_raw
(
tsd
);
}
#endif
/* JEMALLOC_INTERNAL_INLINES_A_H */
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h
View file @
d4439bd4
#ifndef JEMALLOC_INTERNAL_INLINES_B_H
#define JEMALLOC_INTERNAL_INLINES_B_H
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/extent.h"
static
inline
void
percpu_arena_update
(
tsd_t
*
tsd
,
unsigned
cpu
)
{
assert
(
have_percpu_arena
);
arena_t
*
oldarena
=
tsd_arena_get
(
tsd
);
assert
(
oldarena
!=
NULL
);
unsigned
oldind
=
arena_ind_get
(
oldarena
);
if
(
oldind
!=
cpu
)
{
unsigned
newind
=
cpu
;
arena_t
*
newarena
=
arena_get
(
tsd_tsdn
(
tsd
),
newind
,
true
);
assert
(
newarena
!=
NULL
);
/* Set new arena/tcache associations. */
arena_migrate
(
tsd
,
oldarena
,
newarena
);
tcache_t
*
tcache
=
tcache_get
(
tsd
);
if
(
tcache
!=
NULL
)
{
tcache_slow_t
*
tcache_slow
=
tsd_tcache_slowp_get
(
tsd
);
tcache_arena_reassociate
(
tsd_tsdn
(
tsd
),
tcache_slow
,
tcache
,
newarena
);
}
}
}
/* Choose an arena based on a per-thread value. */
static
inline
arena_t
*
...
...
@@ -22,18 +46,19 @@ arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) {
ret
=
arena_choose_hard
(
tsd
,
internal
);
assert
(
ret
);
if
(
tcache_available
(
tsd
))
{
tcache_t
*
tcache
=
tcache_get
(
tsd
);
if
(
tcache
->
arena
!=
NULL
)
{
/* See comments in tcache_data_init().*/
assert
(
tcache
->
arena
==
tcache_slow_t
*
tcache_slow
=
tsd_tcache_slowp_get
(
tsd
);
tcache_t
*
tcache
=
tsd_tcachep_get
(
tsd
);
if
(
tcache_slow
->
arena
!=
NULL
)
{
/* See comments in tsd_tcache_data_init().*/
assert
(
tcache_slow
->
arena
==
arena_get
(
tsd_tsdn
(
tsd
),
0
,
false
));
if
(
tcache
->
arena
!=
ret
)
{
if
(
tcache
_slow
->
arena
!=
ret
)
{
tcache_arena_reassociate
(
tsd_tsdn
(
tsd
),
tcache
,
ret
);
tcache_slow
,
tcache
,
ret
);
}
}
else
{
tcache_arena_associate
(
tsd_tsdn
(
tsd
),
tcache
,
ret
);
tcache_arena_associate
(
tsd_tsdn
(
tsd
),
tcache_slow
,
tcache
,
ret
);
}
}
}
...
...
@@ -75,13 +100,4 @@ arena_is_auto(arena_t *arena) {
return
(
arena_ind_get
(
arena
)
<
manual_arena_base
);
}
JEMALLOC_ALWAYS_INLINE
extent_t
*
iealloc
(
tsdn_t
*
tsdn
,
const
void
*
ptr
)
{
rtree_ctx_t
rtree_ctx_fallback
;
rtree_ctx_t
*
rtree_ctx
=
tsdn_rtree_ctx
(
tsdn
,
&
rtree_ctx_fallback
);
return
rtree_extent_read
(
tsdn
,
&
extents_rtree
,
rtree_ctx
,
(
uintptr_t
)
ptr
,
true
);
}
#endif
/* JEMALLOC_INTERNAL_INLINES_B_H */
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h
View file @
d4439bd4
...
...
@@ -3,7 +3,9 @@
#include "jemalloc/internal/hook.h"
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/log.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/thread_event.h"
#include "jemalloc/internal/witness.h"
/*
...
...
@@ -101,8 +103,8 @@ ivsalloc(tsdn_t *tsdn, const void *ptr) {
}
JEMALLOC_ALWAYS_INLINE
void
idalloctm
(
tsdn_t
*
tsdn
,
void
*
ptr
,
tcache_t
*
tcache
,
alloc_ctx_t
*
alloc_ctx
,
bool
is_internal
,
bool
slow_path
)
{
idalloctm
(
tsdn_t
*
tsdn
,
void
*
ptr
,
tcache_t
*
tcache
,
emap_alloc_ctx_t
*
alloc_ctx
,
bool
is_internal
,
bool
slow_path
)
{
assert
(
ptr
!=
NULL
);
assert
(
!
is_internal
||
tcache
==
NULL
);
assert
(
!
is_internal
||
arena_is_auto
(
iaalloc
(
tsdn
,
ptr
)));
...
...
@@ -125,7 +127,7 @@ idalloc(tsd_t *tsd, void *ptr) {
JEMALLOC_ALWAYS_INLINE
void
isdalloct
(
tsdn_t
*
tsdn
,
void
*
ptr
,
size_t
size
,
tcache_t
*
tcache
,
alloc_ctx_t
*
alloc_ctx
,
bool
slow_path
)
{
emap_
alloc_ctx_t
*
alloc_ctx
,
bool
slow_path
)
{
witness_assert_depth_to_rank
(
tsdn_witness_tsdp_get
(
tsdn
),
WITNESS_RANK_CORE
,
0
);
arena_sdalloc
(
tsdn
,
ptr
,
size
,
tcache
,
alloc_ctx
,
slow_path
);
...
...
@@ -219,25 +221,140 @@ ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra,
newsize
);
}
JEMALLOC_ALWAYS_INLINE
void
fastpath_success_finish
(
tsd_t
*
tsd
,
uint64_t
allocated_after
,
cache_bin_t
*
bin
,
void
*
ret
)
{
thread_allocated_set
(
tsd
,
allocated_after
);
if
(
config_stats
)
{
bin
->
tstats
.
nrequests
++
;
}
LOG
(
"core.malloc.exit"
,
"result: %p"
,
ret
);
}
JEMALLOC_ALWAYS_INLINE
bool
malloc_initialized
(
void
)
{
return
(
malloc_init_state
==
malloc_init_initialized
);
}
/*
* malloc() fastpath. Included here so that we can inline it into operator new;
* function call overhead there is non-negligible as a fraction of total CPU in
* allocation-heavy C++ programs. We take the fallback alloc to allow malloc
* (which can return NULL) to differ in its behavior from operator new (which
* can't). It matches the signature of malloc / operator new so that we can
* tail-call the fallback allocator, allowing us to avoid setting up the call
* frame in the common case.
*
* Fastpath assumes size <= SC_LOOKUP_MAXCLASS, and that we hit
* tcache. If either of these is false, we tail-call to the slowpath,
* malloc_default(). Tail-calling is used to avoid any caller-saved
* registers.
*
* fastpath supports ticker and profiling, both of which will also
* tail-call to the slowpath if they fire.
*/
JEMALLOC_ALWAYS_INLINE
void
*
imalloc_fastpath
(
size_t
size
,
void
*
(
fallback_alloc
)(
size_t
))
{
LOG
(
"core.malloc.entry"
,
"size: %zu"
,
size
);
if
(
tsd_get_allocates
()
&&
unlikely
(
!
malloc_initialized
()))
{
return
fallback_alloc
(
size
);
}
tsd_t
*
tsd
=
tsd_get
(
false
);
if
(
unlikely
((
size
>
SC_LOOKUP_MAXCLASS
)
||
tsd
==
NULL
))
{
return
fallback_alloc
(
size
);
}
/*
* The code below till the branch checking the next_event threshold may
* execute before malloc_init(), in which case the threshold is 0 to
* trigger slow path and initialization.
*
* Note that when uninitialized, only the fast-path variants of the sz /
* tsd facilities may be called.
*/
szind_t
ind
;
/*
* The thread_allocated counter in tsd serves as a general purpose
* accumulator for bytes of allocation to trigger different types of
* events. usize is always needed to advance thread_allocated, though
* it's not always needed in the core allocation logic.
*/
size_t
usize
;
sz_size2index_usize_fastpath
(
size
,
&
ind
,
&
usize
);
/* Fast path relies on size being a bin. */
assert
(
ind
<
SC_NBINS
);
assert
((
SC_LOOKUP_MAXCLASS
<
SC_SMALL_MAXCLASS
)
&&
(
size
<=
SC_SMALL_MAXCLASS
));
uint64_t
allocated
,
threshold
;
te_malloc_fastpath_ctx
(
tsd
,
&
allocated
,
&
threshold
);
uint64_t
allocated_after
=
allocated
+
usize
;
/*
* The ind and usize might be uninitialized (or partially) before
* malloc_init(). The assertions check for: 1) full correctness (usize
* & ind) when initialized; and 2) guaranteed slow-path (threshold == 0)
* when !initialized.
*/
if
(
!
malloc_initialized
())
{
assert
(
threshold
==
0
);
}
else
{
assert
(
ind
==
sz_size2index
(
size
));
assert
(
usize
>
0
&&
usize
==
sz_index2size
(
ind
));
}
/*
* Check for events and tsd non-nominal (fast_threshold will be set to
* 0) in a single branch.
*/
if
(
unlikely
(
allocated_after
>=
threshold
))
{
return
fallback_alloc
(
size
);
}
assert
(
tsd_fast
(
tsd
));
tcache_t
*
tcache
=
tsd_tcachep_get
(
tsd
);
assert
(
tcache
==
tcache_get
(
tsd
));
cache_bin_t
*
bin
=
&
tcache
->
bins
[
ind
];
bool
tcache_success
;
void
*
ret
;
/*
* We split up the code this way so that redundant low-water
* computation doesn't happen on the (more common) case in which we
* don't touch the low water mark. The compiler won't do this
* duplication on its own.
*/
ret
=
cache_bin_alloc_easy
(
bin
,
&
tcache_success
);
if
(
tcache_success
)
{
fastpath_success_finish
(
tsd
,
allocated_after
,
bin
,
ret
);
return
ret
;
}
ret
=
cache_bin_alloc
(
bin
,
&
tcache_success
);
if
(
tcache_success
)
{
fastpath_success_finish
(
tsd
,
allocated_after
,
bin
,
ret
);
return
ret
;
}
return
fallback_alloc
(
size
);
}
JEMALLOC_ALWAYS_INLINE
int
iget_defrag_hint
(
tsdn_t
*
tsdn
,
void
*
ptr
)
{
int
defrag
=
0
;
rtree_ctx_t
rtree_ctx_fallback
;
rtree_ctx_t
*
rtree_ctx
=
tsdn_rtree_ctx
(
tsdn
,
&
rtree_ctx_fallback
);
szind_t
szind
;
bool
is_slab
;
rtree_szind_slab_read
(
tsdn
,
&
extents_rtree
,
rtree_ctx
,
(
uintptr_t
)
ptr
,
true
,
&
szind
,
&
is_slab
);
if
(
likely
(
is_slab
))
{
emap_alloc_ctx_t
alloc_ctx
;
emap_alloc_ctx_lookup
(
tsdn
,
&
arena_emap_global
,
ptr
,
&
alloc_ctx
);
if
(
likely
(
alloc_ctx
.
slab
))
{
/* Small allocation. */
e
xtent
_t
*
slab
=
iealloc
(
tsdn
,
ptr
);
arena_t
*
arena
=
extent_
arena_get
(
slab
);
szind_t
binind
=
e
xtent
_szind_get
(
slab
);
unsigned
binshard
=
e
xtent
_binshard_get
(
slab
);
bin_t
*
bin
=
&
arena
->
bins
[
binind
].
bin
_
shard
s
[
binshard
]
;
e
data
_t
*
slab
=
emap_edata_lookup
(
tsdn
,
&
arena_emap_global
,
ptr
);
arena_t
*
arena
=
arena_get
_from_edata
(
slab
);
szind_t
binind
=
e
data
_szind_get
(
slab
);
unsigned
binshard
=
e
data
_binshard_get
(
slab
);
bin_t
*
bin
=
arena
_get_bin
(
arena
,
binind
,
binshard
)
;
malloc_mutex_lock
(
tsdn
,
&
bin
->
lock
);
arena_dalloc_bin_locked_info_t
info
;
arena_dalloc_bin_locked_begin
(
&
info
,
binind
);
/* Don't bother moving allocations from the slab currently used for new allocations */
if
(
slab
!=
bin
->
slabcur
)
{
int
free_in_slab
=
e
xtent
_nfree_get
(
slab
);
int
free_in_slab
=
e
data
_nfree_get
(
slab
);
if
(
free_in_slab
)
{
const
bin_info_t
*
bin_info
=
&
bin_infos
[
binind
];
/* Find number of non-full slabs and the number of regs in them */
...
...
@@ -245,14 +362,14 @@ iget_defrag_hint(tsdn_t *tsdn, void* ptr) {
size_t
curregs
=
0
;
/* Run on all bin shards (usually just one) */
for
(
uint32_t
i
=
0
;
i
<
bin_info
->
n_shards
;
i
++
)
{
bin_t
*
bb
=
&
arena
->
bins
[
binind
].
bin_shards
[
i
]
;
bin_t
*
bb
=
arena
_get_bin
(
arena
,
binind
,
i
)
;
curslabs
+=
bb
->
stats
.
nonfull_slabs
;
/* Deduct the regs in full slabs (they're not part of the game) */
unsigned
long
full_slabs
=
bb
->
stats
.
curslabs
-
bb
->
stats
.
nonfull_slabs
;
curregs
+=
bb
->
stats
.
curregs
-
full_slabs
*
bin_info
->
nregs
;
if
(
bb
->
slabcur
)
{
/* Remove slabcur from the overall utilization (not a candidate to nove from) */
curregs
-=
bin_info
->
nregs
-
e
xtent
_nfree_get
(
bb
->
slabcur
);
curregs
-=
bin_info
->
nregs
-
e
data
_nfree_get
(
bb
->
slabcur
);
curslabs
-=
1
;
}
}
...
...
@@ -265,6 +382,7 @@ iget_defrag_hint(tsdn_t *tsdn, void* ptr) {
defrag
=
(
bin_info
->
nregs
-
free_in_slab
)
*
curslabs
<=
curregs
+
curregs
/
8
;
}
}
arena_dalloc_bin_locked_finish
(
tsdn
,
arena
,
bin
,
&
info
);
malloc_mutex_unlock
(
tsdn
,
&
bin
->
lock
);
}
return
defrag
;
...
...
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h
View file @
d4439bd4
...
...
@@ -4,7 +4,11 @@
#ifdef JEMALLOC_DEBUG
# define JEMALLOC_ALWAYS_INLINE static inline
#else
# define JEMALLOC_ALWAYS_INLINE JEMALLOC_ATTR(always_inline) static inline
# ifdef _MSC_VER
# define JEMALLOC_ALWAYS_INLINE static __forceinline
# else
# define JEMALLOC_ALWAYS_INLINE JEMALLOC_ATTR(always_inline) static inline
# endif
#endif
#ifdef _MSC_VER
# define inline _inline
...
...
@@ -40,13 +44,6 @@
#define JEMALLOC_VA_ARGS_HEAD(head, ...) head
#define JEMALLOC_VA_ARGS_TAIL(head, ...) __VA_ARGS__
#if (defined(__GNUC__) || defined(__GNUG__)) && !defined(__clang__) \
&& defined(JEMALLOC_HAVE_ATTR) && (__GNUC__ >= 7)
#define JEMALLOC_FALLTHROUGH JEMALLOC_ATTR(fallthrough);
#else
#define JEMALLOC_FALLTHROUGH
/* falls through */
#endif
/* Diagnostic suppression macros */
#if defined(_MSC_VER) && !defined(__clang__)
# define JEMALLOC_DIAGNOSTIC_PUSH __pragma(warning(push))
...
...
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h
View file @
d4439bd4
...
...
@@ -3,15 +3,31 @@
#include "jemalloc/internal/quantum.h"
/* Page size index type. */
typedef
unsigned
pszind_t
;
/* Size class index type. */
typedef
unsigned
szind_t
;
/* Processor / core id type. */
typedef
int
malloc_cpuid_t
;
/* When realloc(non-null-ptr, 0) is called, what happens? */
enum
zero_realloc_action_e
{
/* Realloc(ptr, 0) is free(ptr); return malloc(0); */
zero_realloc_action_alloc
=
0
,
/* Realloc(ptr, 0) is free(ptr); */
zero_realloc_action_free
=
1
,
/* Realloc(ptr, 0) aborts. */
zero_realloc_action_abort
=
2
};
typedef
enum
zero_realloc_action_e
zero_realloc_action_t
;
/* Signature of write callback. */
typedef
void
(
write_cb_t
)(
void
*
,
const
char
*
);
enum
malloc_init_e
{
malloc_init_uninitialized
=
3
,
malloc_init_a0_initialized
=
2
,
malloc_init_recursible
=
1
,
malloc_init_initialized
=
0
/* Common case --> jnz. */
};
typedef
enum
malloc_init_e
malloc_init_t
;
/*
* Flags bits:
*
...
...
deps/jemalloc/include/jemalloc/internal/jemalloc_preamble.h.in
View file @
d4439bd4
...
...
@@ -4,8 +4,14 @@
#include "jemalloc_internal_defs.h"
#include "jemalloc/internal/jemalloc_internal_decls.h"
#ifdef
JEMALLOC_UTRACE
#if
def
ined(JEMALLOC_UTRACE) || defined(
JEMALLOC_UTRACE
_LABEL)
#include <sys/ktrace.h>
# if defined(JEMALLOC_UTRACE)
# define UTRACE_CALL(p, l) utrace(p, l)
# else
# define UTRACE_CALL(p, l) utrace("jemalloc_process", p, l)
# define JEMALLOC_UTRACE
# endif
#endif
#define JEMALLOC_NO_DEMANGLE
...
...
@@ -180,6 +186,35 @@ static const bool config_opt_safety_checks =
#endif
;
/*
* Extra debugging of sized deallocations too onerous to be included in the
* general safety checks.
*/
static const bool config_opt_size_checks =
#if defined(JEMALLOC_OPT_SIZE_CHECKS) || defined(JEMALLOC_DEBUG)
true
#else
false
#endif
;
static const bool config_uaf_detection =
#if defined(JEMALLOC_UAF_DETECTION) || defined(JEMALLOC_DEBUG)
true
#else
false
#endif
;
/* Whether or not the C++ extensions are enabled. */
static const bool config_enable_cxx =
#ifdef JEMALLOC_ENABLE_CXX
true
#else
false
#endif
;
#if defined(_WIN32) || defined(JEMALLOC_HAVE_SCHED_GETCPU)
/* Currently percpu_arena depends on sched_getcpu. */
#define JEMALLOC_PERCPU_ARENA
...
...
@@ -209,5 +244,20 @@ static const bool have_background_thread =
false
#endif
;
static const bool config_high_res_timer =
#ifdef JEMALLOC_HAVE_CLOCK_REALTIME
true
#else
false
#endif
;
static const bool have_memcntl =
#ifdef JEMALLOC_HAVE_MEMCNTL
true
#else
false
#endif
;
#endif /* JEMALLOC_PREAMBLE_H */
Prev
1
2
3
4
5
6
7
8
…
10
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment