Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
5268379e
Commit
5268379e
authored
Oct 06, 2015
by
antirez
Browse files
Jemalloc updated to 4.0.3.
parent
589c41e4
Changes
136
Show whitespace changes
Inline
Side-by-side
deps/jemalloc/include/jemalloc/internal/bitmap.h
View file @
5268379e
...
...
@@ -3,6 +3,7 @@
/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
#define LG_BITMAP_MAXBITS LG_RUN_MAXREGS
#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS)
typedef
struct
bitmap_level_s
bitmap_level_t
;
typedef
struct
bitmap_info_s
bitmap_info_t
;
...
...
@@ -14,6 +15,51 @@ typedef unsigned long bitmap_t;
#define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS)
#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)
/* Number of groups required to store a given number of bits. */
#define BITMAP_BITS2GROUPS(nbits) \
((nbits + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS)
/*
* Number of groups required at a particular level for a given number of bits.
*/
#define BITMAP_GROUPS_L0(nbits) \
BITMAP_BITS2GROUPS(nbits)
#define BITMAP_GROUPS_L1(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits))
#define BITMAP_GROUPS_L2(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))
#define BITMAP_GROUPS_L3(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
BITMAP_BITS2GROUPS((nbits)))))
/*
* Assuming the number of levels, number of groups required for a given number
* of bits.
*/
#define BITMAP_GROUPS_1_LEVEL(nbits) \
BITMAP_GROUPS_L0(nbits)
#define BITMAP_GROUPS_2_LEVEL(nbits) \
(BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits))
#define BITMAP_GROUPS_3_LEVEL(nbits) \
(BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits))
#define BITMAP_GROUPS_4_LEVEL(nbits) \
(BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits))
/*
* Maximum number of groups required to support LG_BITMAP_MAXBITS.
*/
#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS)
#else
# error "Unsupported bitmap size"
#endif
/* Maximum number of levels possible. */
#define BITMAP_MAX_LEVELS \
(LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \
...
...
@@ -93,7 +139,7 @@ bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
bitmap_t
g
;
assert
(
bit
<
binfo
->
nbits
);
assert
(
bitmap_get
(
bitmap
,
binfo
,
bit
)
==
false
);
assert
(
!
bitmap_get
(
bitmap
,
binfo
,
bit
));
goff
=
bit
>>
LG_BITMAP_GROUP_NBITS
;
gp
=
&
bitmap
[
goff
];
g
=
*
gp
;
...
...
@@ -126,15 +172,15 @@ bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo)
bitmap_t
g
;
unsigned
i
;
assert
(
bitmap_full
(
bitmap
,
binfo
)
==
false
);
assert
(
!
bitmap_full
(
bitmap
,
binfo
));
i
=
binfo
->
nlevels
-
1
;
g
=
bitmap
[
binfo
->
levels
[
i
].
group_offset
];
bit
=
ffsl
(
g
)
-
1
;
bit
=
jemalloc_
ffsl
(
g
)
-
1
;
while
(
i
>
0
)
{
i
--
;
g
=
bitmap
[
binfo
->
levels
[
i
].
group_offset
+
bit
];
bit
=
(
bit
<<
LG_BITMAP_GROUP_NBITS
)
+
(
ffsl
(
g
)
-
1
);
bit
=
(
bit
<<
LG_BITMAP_GROUP_NBITS
)
+
(
jemalloc_
ffsl
(
g
)
-
1
);
}
bitmap_set
(
bitmap
,
binfo
,
bit
);
...
...
@@ -158,7 +204,7 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
assert
((
g
&
(
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
)))
==
0
);
g
^=
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
);
*
gp
=
g
;
assert
(
bitmap_get
(
bitmap
,
binfo
,
bit
)
==
false
);
assert
(
!
bitmap_get
(
bitmap
,
binfo
,
bit
));
/* Propagate group state transitions up the tree. */
if
(
propagate
)
{
unsigned
i
;
...
...
@@ -172,7 +218,7 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
==
0
);
g
^=
1LU
<<
(
bit
&
BITMAP_GROUP_NBITS_MASK
);
*
gp
=
g
;
if
(
propagate
==
false
)
if
(
!
propagate
)
break
;
}
}
...
...
deps/jemalloc/include/jemalloc/internal/chunk.h
View file @
5268379e
...
...
@@ -5,7 +5,7 @@
* Size and alignment of memory chunks that are allocated by the OS's virtual
* memory system.
*/
#define LG_CHUNK_DEFAULT 2
2
#define LG_CHUNK_DEFAULT 2
1
/* Return the chunk address for allocation address a. */
#define CHUNK_ADDR2BASE(a) \
...
...
@@ -19,6 +19,16 @@
#define CHUNK_CEILING(s) \
(((s) + chunksize_mask) & ~chunksize_mask)
#define CHUNK_HOOKS_INITIALIZER { \
NULL, \
NULL, \
NULL, \
NULL, \
NULL, \
NULL, \
NULL \
}
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
...
...
@@ -30,23 +40,36 @@
extern
size_t
opt_lg_chunk
;
extern
const
char
*
opt_dss
;
/* Protects stats_chunks; currently not used for any other purpose. */
extern
malloc_mutex_t
chunks_mtx
;
/* Chunk statistics. */
extern
chunk_stats_t
stats_chunks
;
extern
rtree_t
*
chunks_rtree
;
extern
rtree_t
chunks_rtree
;
extern
size_t
chunksize
;
extern
size_t
chunksize_mask
;
/* (chunksize - 1). */
extern
size_t
chunk_npages
;
extern
size_t
map_bias
;
/* Number of arena chunk header pages. */
extern
size_t
arena_maxclass
;
/* Max size class for arenas. */
void
*
chunk_alloc
(
size_t
size
,
size_t
alignment
,
bool
base
,
bool
*
zero
,
dss_prec_t
dss_prec
);
void
chunk_unmap
(
void
*
chunk
,
size_t
size
);
void
chunk_dealloc
(
void
*
chunk
,
size_t
size
,
bool
unmap
);
extern
const
chunk_hooks_t
chunk_hooks_default
;
chunk_hooks_t
chunk_hooks_get
(
arena_t
*
arena
);
chunk_hooks_t
chunk_hooks_set
(
arena_t
*
arena
,
const
chunk_hooks_t
*
chunk_hooks
);
bool
chunk_register
(
const
void
*
chunk
,
const
extent_node_t
*
node
);
void
chunk_deregister
(
const
void
*
chunk
,
const
extent_node_t
*
node
);
void
*
chunk_alloc_base
(
size_t
size
);
void
*
chunk_alloc_cache
(
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
dalloc_node
);
void
*
chunk_alloc_wrapper
(
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
*
commit
);
void
chunk_dalloc_cache
(
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
chunk
,
size_t
size
,
bool
committed
);
void
chunk_dalloc_arena
(
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
chunk
,
size_t
size
,
bool
zeroed
,
bool
committed
);
void
chunk_dalloc_wrapper
(
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
chunk
,
size_t
size
,
bool
committed
);
bool
chunk_purge_arena
(
arena_t
*
arena
,
void
*
chunk
,
size_t
offset
,
size_t
length
);
bool
chunk_purge_wrapper
(
arena_t
*
arena
,
chunk_hooks_t
*
chunk_hooks
,
void
*
chunk
,
size_t
size
,
size_t
offset
,
size_t
length
);
bool
chunk_boot
(
void
);
void
chunk_prefork
(
void
);
void
chunk_postfork_parent
(
void
);
...
...
@@ -56,6 +79,19 @@ void chunk_postfork_child(void);
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
extent_node_t
*
chunk_lookup
(
const
void
*
chunk
,
bool
dependent
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_CHUNK_C_))
JEMALLOC_INLINE
extent_node_t
*
chunk_lookup
(
const
void
*
ptr
,
bool
dependent
)
{
return
(
rtree_get
(
&
chunks_rtree
,
(
uintptr_t
)
ptr
,
dependent
));
}
#endif
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
...
...
deps/jemalloc/include/jemalloc/internal/chunk_dss.h
View file @
5268379e
...
...
@@ -23,7 +23,8 @@ extern const char *dss_prec_names[];
dss_prec_t
chunk_dss_prec_get
(
void
);
bool
chunk_dss_prec_set
(
dss_prec_t
dss_prec
);
void
*
chunk_alloc_dss
(
size_t
size
,
size_t
alignment
,
bool
*
zero
);
void
*
chunk_alloc_dss
(
arena_t
*
arena
,
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
*
commit
);
bool
chunk_in_dss
(
void
*
chunk
);
bool
chunk_dss_boot
(
void
);
void
chunk_dss_prefork
(
void
);
...
...
deps/jemalloc/include/jemalloc/internal/chunk_mmap.h
View file @
5268379e
...
...
@@ -9,10 +9,9 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
bool
pages_purge
(
void
*
addr
,
size_t
length
);
void
*
chunk_alloc_mmap
(
size_t
size
,
size_t
alignment
,
bool
*
zero
);
bool
chunk_dealloc_mmap
(
void
*
chunk
,
size_t
size
);
void
*
chunk_alloc_mmap
(
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
*
commit
);
bool
chunk_dalloc_mmap
(
void
*
chunk
,
size_t
size
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
...
...
deps/jemalloc/include/jemalloc/internal/ckh.h
View file @
5268379e
...
...
@@ -66,13 +66,13 @@ struct ckh_s {
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
bool
ckh_new
(
ckh_t
*
ckh
,
size_t
minitems
,
ckh_hash_t
*
hash
,
bool
ckh_new
(
tsd_t
*
tsd
,
ckh_t
*
ckh
,
size_t
minitems
,
ckh_hash_t
*
hash
,
ckh_keycomp_t
*
keycomp
);
void
ckh_delete
(
ckh_t
*
ckh
);
void
ckh_delete
(
tsd_t
*
tsd
,
ckh_t
*
ckh
);
size_t
ckh_count
(
ckh_t
*
ckh
);
bool
ckh_iter
(
ckh_t
*
ckh
,
size_t
*
tabind
,
void
**
key
,
void
**
data
);
bool
ckh_insert
(
ckh_t
*
ckh
,
const
void
*
key
,
const
void
*
data
);
bool
ckh_remove
(
ckh_t
*
ckh
,
const
void
*
searchkey
,
void
**
key
,
bool
ckh_insert
(
tsd_t
*
tsd
,
ckh_t
*
ckh
,
const
void
*
key
,
const
void
*
data
);
bool
ckh_remove
(
tsd_t
*
tsd
,
ckh_t
*
ckh
,
const
void
*
searchkey
,
void
**
key
,
void
**
data
);
bool
ckh_search
(
ckh_t
*
ckh
,
const
void
*
seachkey
,
void
**
key
,
void
**
data
);
void
ckh_string_hash
(
const
void
*
key
,
size_t
r_hash
[
2
]);
...
...
deps/jemalloc/include/jemalloc/internal/ctl.h
View file @
5268379e
...
...
@@ -34,6 +34,7 @@ struct ctl_arena_stats_s {
bool
initialized
;
unsigned
nthreads
;
const
char
*
dss
;
ssize_t
lg_dirty_mult
;
size_t
pactive
;
size_t
pdirty
;
arena_stats_t
astats
;
...
...
@@ -46,22 +47,15 @@ struct ctl_arena_stats_s {
malloc_bin_stats_t
bstats
[
NBINS
];
malloc_large_stats_t
*
lstats
;
/* nlclasses elements. */
malloc_huge_stats_t
*
hstats
;
/* nhclasses elements. */
};
struct
ctl_stats_s
{
size_t
allocated
;
size_t
active
;
size_t
metadata
;
size_t
resident
;
size_t
mapped
;
struct
{
size_t
current
;
/* stats_chunks.curchunks */
uint64_t
total
;
/* stats_chunks.nchunks */
size_t
high
;
/* stats_chunks.highchunks */
}
chunks
;
struct
{
size_t
allocated
;
/* huge_allocated */
uint64_t
nmalloc
;
/* huge_nmalloc */
uint64_t
ndalloc
;
/* huge_ndalloc */
}
huge
;
unsigned
narenas
;
ctl_arena_stats_t
*
arenas
;
/* (narenas + 1) elements. */
};
...
...
deps/jemalloc/include/jemalloc/internal/extent.h
View file @
5268379e
...
...
@@ -7,25 +7,53 @@ typedef struct extent_node_s extent_node_t;
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
/* Tree of extents. */
/* Tree of extents.
Use accessor functions for en_* fields.
*/
struct
extent_node_s
{
/*
Linkage for the size/address-ordered tree
. */
rb_node
(
extent_node_t
)
link_szad
;
/*
Arena from which this extent came, if any
. */
arena_t
*
en_arena
;
/* Linkage for the address-ordered tree. */
rb_node
(
extent_node_t
)
link_ad
;
/* Pointer to the extent that this tree node is responsible for. */
void
*
en_addr
;
/* Total region size. */
size_t
en_size
;
/*
* The zeroed flag is used by chunk recycling code to track whether
* memory is zero-filled.
*/
bool
en_zeroed
;
/*
* True if physical memory is committed to the extent, whether
* explicitly or implicitly as on a system that overcommits and
* satisfies physical memory needs on demand via soft page faults.
*/
bool
en_committed
;
/*
* The achunk flag is used to validate that huge allocation lookups
* don't return arena chunks.
*/
bool
en_achunk
;
/* Profile counters, used for huge objects. */
prof_ctx_t
*
prof_ctx
;
prof_
t
ctx_t
*
en_
prof_
t
ctx
;
/* Pointer to the extent that this tree node is responsible for. */
void
*
addr
;
/* Linkage for arena's runs_dirty and chunks_cache rings. */
arena_runs_dirty_link_t
rd
;
qr
(
extent_node_t
)
cc_link
;
/* Total region size. */
size_t
size
;
union
{
/* Linkage for the size/address-ordered tree. */
rb_node
(
extent_node_t
)
szad_link
;
/* Linkage for arena's huge and node_cache lists. */
ql_elm
(
extent_node_t
)
ql_link
;
};
/*
True if zero-filled; used by chunk recycling cod
e. */
bool
zeroed
;
/*
Linkage for the address-ordered tre
e. */
rb_node
(
extent_node_t
)
ad_link
;
};
typedef
rb_tree
(
extent_node_t
)
extent_tree_t
;
...
...
@@ -41,6 +69,171 @@ rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
arena_t
*
extent_node_arena_get
(
const
extent_node_t
*
node
);
void
*
extent_node_addr_get
(
const
extent_node_t
*
node
);
size_t
extent_node_size_get
(
const
extent_node_t
*
node
);
bool
extent_node_zeroed_get
(
const
extent_node_t
*
node
);
bool
extent_node_committed_get
(
const
extent_node_t
*
node
);
bool
extent_node_achunk_get
(
const
extent_node_t
*
node
);
prof_tctx_t
*
extent_node_prof_tctx_get
(
const
extent_node_t
*
node
);
void
extent_node_arena_set
(
extent_node_t
*
node
,
arena_t
*
arena
);
void
extent_node_addr_set
(
extent_node_t
*
node
,
void
*
addr
);
void
extent_node_size_set
(
extent_node_t
*
node
,
size_t
size
);
void
extent_node_zeroed_set
(
extent_node_t
*
node
,
bool
zeroed
);
void
extent_node_committed_set
(
extent_node_t
*
node
,
bool
committed
);
void
extent_node_achunk_set
(
extent_node_t
*
node
,
bool
achunk
);
void
extent_node_prof_tctx_set
(
extent_node_t
*
node
,
prof_tctx_t
*
tctx
);
void
extent_node_init
(
extent_node_t
*
node
,
arena_t
*
arena
,
void
*
addr
,
size_t
size
,
bool
zeroed
,
bool
committed
);
void
extent_node_dirty_linkage_init
(
extent_node_t
*
node
);
void
extent_node_dirty_insert
(
extent_node_t
*
node
,
arena_runs_dirty_link_t
*
runs_dirty
,
extent_node_t
*
chunks_dirty
);
void
extent_node_dirty_remove
(
extent_node_t
*
node
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_))
JEMALLOC_INLINE
arena_t
*
extent_node_arena_get
(
const
extent_node_t
*
node
)
{
return
(
node
->
en_arena
);
}
JEMALLOC_INLINE
void
*
extent_node_addr_get
(
const
extent_node_t
*
node
)
{
return
(
node
->
en_addr
);
}
JEMALLOC_INLINE
size_t
extent_node_size_get
(
const
extent_node_t
*
node
)
{
return
(
node
->
en_size
);
}
JEMALLOC_INLINE
bool
extent_node_zeroed_get
(
const
extent_node_t
*
node
)
{
return
(
node
->
en_zeroed
);
}
JEMALLOC_INLINE
bool
extent_node_committed_get
(
const
extent_node_t
*
node
)
{
assert
(
!
node
->
en_achunk
);
return
(
node
->
en_committed
);
}
JEMALLOC_INLINE
bool
extent_node_achunk_get
(
const
extent_node_t
*
node
)
{
return
(
node
->
en_achunk
);
}
JEMALLOC_INLINE
prof_tctx_t
*
extent_node_prof_tctx_get
(
const
extent_node_t
*
node
)
{
return
(
node
->
en_prof_tctx
);
}
JEMALLOC_INLINE
void
extent_node_arena_set
(
extent_node_t
*
node
,
arena_t
*
arena
)
{
node
->
en_arena
=
arena
;
}
JEMALLOC_INLINE
void
extent_node_addr_set
(
extent_node_t
*
node
,
void
*
addr
)
{
node
->
en_addr
=
addr
;
}
JEMALLOC_INLINE
void
extent_node_size_set
(
extent_node_t
*
node
,
size_t
size
)
{
node
->
en_size
=
size
;
}
JEMALLOC_INLINE
void
extent_node_zeroed_set
(
extent_node_t
*
node
,
bool
zeroed
)
{
node
->
en_zeroed
=
zeroed
;
}
JEMALLOC_INLINE
void
extent_node_committed_set
(
extent_node_t
*
node
,
bool
committed
)
{
node
->
en_committed
=
committed
;
}
JEMALLOC_INLINE
void
extent_node_achunk_set
(
extent_node_t
*
node
,
bool
achunk
)
{
node
->
en_achunk
=
achunk
;
}
JEMALLOC_INLINE
void
extent_node_prof_tctx_set
(
extent_node_t
*
node
,
prof_tctx_t
*
tctx
)
{
node
->
en_prof_tctx
=
tctx
;
}
JEMALLOC_INLINE
void
extent_node_init
(
extent_node_t
*
node
,
arena_t
*
arena
,
void
*
addr
,
size_t
size
,
bool
zeroed
,
bool
committed
)
{
extent_node_arena_set
(
node
,
arena
);
extent_node_addr_set
(
node
,
addr
);
extent_node_size_set
(
node
,
size
);
extent_node_zeroed_set
(
node
,
zeroed
);
extent_node_committed_set
(
node
,
committed
);
extent_node_achunk_set
(
node
,
false
);
if
(
config_prof
)
extent_node_prof_tctx_set
(
node
,
NULL
);
}
JEMALLOC_INLINE
void
extent_node_dirty_linkage_init
(
extent_node_t
*
node
)
{
qr_new
(
&
node
->
rd
,
rd_link
);
qr_new
(
node
,
cc_link
);
}
JEMALLOC_INLINE
void
extent_node_dirty_insert
(
extent_node_t
*
node
,
arena_runs_dirty_link_t
*
runs_dirty
,
extent_node_t
*
chunks_dirty
)
{
qr_meld
(
runs_dirty
,
&
node
->
rd
,
rd_link
);
qr_meld
(
chunks_dirty
,
node
,
cc_link
);
}
JEMALLOC_INLINE
void
extent_node_dirty_remove
(
extent_node_t
*
node
)
{
qr_remove
(
&
node
->
rd
,
rd_link
);
qr_remove
(
node
,
cc_link
);
}
#endif
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/hash.h
View file @
5268379e
...
...
@@ -35,13 +35,14 @@ JEMALLOC_INLINE uint32_t
hash_rotl_32
(
uint32_t
x
,
int8_t
r
)
{
return
(
x
<<
r
)
|
(
x
>>
(
32
-
r
));
return
(
(
x
<<
r
)
|
(
x
>>
(
32
-
r
))
)
;
}
JEMALLOC_INLINE
uint64_t
hash_rotl_64
(
uint64_t
x
,
int8_t
r
)
{
return
(
x
<<
r
)
|
(
x
>>
(
64
-
r
));
return
((
x
<<
r
)
|
(
x
>>
(
64
-
r
)));
}
JEMALLOC_INLINE
uint32_t
...
...
@@ -76,9 +77,9 @@ hash_fmix_64(uint64_t k)
{
k
^=
k
>>
33
;
k
*=
QU
(
0xff51afd7ed558ccd
LLU
);
k
*=
K
QU
(
0xff51afd7ed558ccd
);
k
^=
k
>>
33
;
k
*=
QU
(
0xc4ceb9fe1a85ec53
LLU
);
k
*=
K
QU
(
0xc4ceb9fe1a85ec53
);
k
^=
k
>>
33
;
return
(
k
);
...
...
@@ -247,8 +248,8 @@ hash_x64_128(const void *key, const int len, const uint32_t seed,
uint64_t
h1
=
seed
;
uint64_t
h2
=
seed
;
const
uint64_t
c1
=
QU
(
0x87c37b91114253d5
LLU
);
const
uint64_t
c2
=
QU
(
0x4cf5ad432745937f
LLU
);
const
uint64_t
c1
=
K
QU
(
0x87c37b91114253d5
);
const
uint64_t
c2
=
K
QU
(
0x4cf5ad432745937f
);
/* body */
{
...
...
deps/jemalloc/include/jemalloc/internal/huge.h
View file @
5268379e
...
...
@@ -9,34 +9,24 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
/* Huge allocation statistics. */
extern
uint64_t
huge_nmalloc
;
extern
uint64_t
huge_ndalloc
;
extern
size_t
huge_allocated
;
/* Protects chunk-related data structures. */
extern
malloc_mutex_t
huge_mtx
;
void
*
huge_malloc
(
size_t
size
,
bool
zero
,
dss_prec_t
dss_prec
);
void
*
huge_palloc
(
size_t
size
,
size_t
alignment
,
bool
zero
,
dss_prec_t
dss_prec
);
bool
huge_ralloc_no_move
(
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
extra
);
void
*
huge_ralloc
(
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
extra
,
size_t
alignment
,
bool
zero
,
bool
try_tcache_dalloc
,
dss_prec_t
dss_prec
);
void
*
huge_malloc
(
tsd_t
*
tsd
,
arena_t
*
arena
,
size_t
size
,
bool
zero
,
tcache_t
*
tcache
);
void
*
huge_palloc
(
tsd_t
*
tsd
,
arena_t
*
arena
,
size_t
size
,
size_t
alignment
,
bool
zero
,
tcache_t
*
tcache
);
bool
huge_ralloc_no_move
(
void
*
ptr
,
size_t
oldsize
,
size_t
usize_min
,
size_t
usize_max
,
bool
zero
);
void
*
huge_ralloc
(
tsd_t
*
tsd
,
arena_t
*
arena
,
void
*
ptr
,
size_t
oldsize
,
size_t
usize
,
size_t
alignment
,
bool
zero
,
tcache_t
*
tcache
);
#ifdef JEMALLOC_JET
typedef
void
(
huge_dalloc_junk_t
)(
void
*
,
size_t
);
extern
huge_dalloc_junk_t
*
huge_dalloc_junk
;
#endif
void
huge_dalloc
(
void
*
ptr
,
bool
unmap
);
void
huge_dalloc
(
tsd_t
*
tsd
,
void
*
ptr
,
tcache_t
*
tcache
);
arena_t
*
huge_aalloc
(
const
void
*
ptr
);
size_t
huge_salloc
(
const
void
*
ptr
);
dss_prec_t
huge_dss_prec_get
(
arena_t
*
arena
);
prof_ctx_t
*
huge_prof_ctx_get
(
const
void
*
ptr
);
void
huge_prof_ctx_set
(
const
void
*
ptr
,
prof_ctx_t
*
ctx
);
bool
huge_boot
(
void
);
void
huge_prefork
(
void
);
void
huge_postfork_parent
(
void
);
void
huge_postfork_child
(
void
);
prof_tctx_t
*
huge_prof_tctx_get
(
const
void
*
ptr
);
void
huge_prof_tctx_set
(
const
void
*
ptr
,
prof_tctx_t
*
tctx
);
void
huge_prof_tctx_reset
(
const
void
*
ptr
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
...
...
deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in
View file @
5268379e
#ifndef JEMALLOC_INTERNAL_H
#define JEMALLOC_INTERNAL_H
#include <math.h>
#ifdef _WIN32
# include <windows.h>
# define ENOENT ERROR_PATH_NOT_FOUND
# define EINVAL ERROR_BAD_ARGUMENTS
# define EAGAIN ERROR_OUTOFMEMORY
# define EPERM ERROR_WRITE_FAULT
# define EFAULT ERROR_INVALID_ADDRESS
# define ENOMEM ERROR_NOT_ENOUGH_MEMORY
# undef ERANGE
# define ERANGE ERROR_INVALID_DATA
#else
# include <sys/param.h>
# include <sys/mman.h>
# include <sys/syscall.h>
# if !defined(SYS_write) && defined(__NR_write)
# define SYS_write __NR_write
# endif
# include <sys/uio.h>
# include <pthread.h>
# include <errno.h>
#endif
#include <sys/types.h>
#include <limits.h>
#ifndef SIZE_T_MAX
# define SIZE_T_MAX SIZE_MAX
#endif
#include <stdarg.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <stddef.h>
#ifndef offsetof
# define offsetof(type, member) ((size_t)&(((type *)NULL)->member))
#endif
#include <inttypes.h>
#include <string.h>
#include <strings.h>
#include <ctype.h>
#ifdef _MSC_VER
# include <io.h>
typedef intptr_t ssize_t;
# define PATH_MAX 1024
# define STDERR_FILENO 2
# define __func__ __FUNCTION__
/* Disable warnings about deprecated system functions */
# pragma warning(disable: 4996)
#else
# include <unistd.h>
#endif
#include <fcntl.h>
#include "jemalloc_internal_defs.h"
#include "jemalloc/internal/jemalloc_internal_decls.h"
#ifdef JEMALLOC_UTRACE
#include <sys/ktrace.h>
#endif
#ifdef JEMALLOC_VALGRIND
#include <valgrind/valgrind.h>
#include <valgrind/memcheck.h>
#endif
#define JEMALLOC_NO_DEMANGLE
#ifdef JEMALLOC_JET
# define JEMALLOC_N(n) jet_##n
...
...
@@ -85,7 +28,7 @@ static const bool config_debug =
false
#endif
;
static const bool
config
_dss =
static const bool
have
_dss =
#ifdef JEMALLOC_DSS
true
#else
...
...
@@ -127,8 +70,8 @@ static const bool config_prof_libunwind =
false
#endif
;
static const bool
config_mremap
=
#ifdef JEMALLOC_M
REMAP
static const bool
maps_coalesce
=
#ifdef JEMALLOC_M
APS_COALESCE
true
#else
false
...
...
@@ -190,6 +133,17 @@ static const bool config_ivsalloc =
false
#endif
;
static const bool config_cache_oblivious =
#ifdef JEMALLOC_CACHE_OBLIVIOUS
true
#else
false
#endif
;
#ifdef JEMALLOC_C11ATOMICS
#include <stdatomic.h>
#endif
#ifdef JEMALLOC_ATOMIC9
#include <machine/atomic.h>
...
...
@@ -229,20 +183,48 @@ static const bool config_ivsalloc =
#include "jemalloc/internal/jemalloc_internal_macros.h"
/* Size class index type. */
typedef unsigned szind_t;
/*
* Flags bits:
*
* a: arena
* t: tcache
* 0: unused
* z: zero
* n: alignment
*
* aaaaaaaa aaaatttt tttttttt 0znnnnnn
*/
#define MALLOCX_ARENA_MASK ((int)~0xfffff)
#define MALLOCX_ARENA_MAX 0xffe
#define MALLOCX_TCACHE_MASK ((int)~0xfff000ffU)
#define MALLOCX_TCACHE_MAX 0xffd
#define MALLOCX_LG_ALIGN_MASK ((int)0x3f)
#define ALLOCM_LG_ALIGN_MASK ((int)0x3f)
/* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */
#define MALLOCX_ALIGN_GET_SPECIFIED(flags) \
(ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK))
#define MALLOCX_ALIGN_GET(flags) \
(MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1))
#define MALLOCX_ZERO_GET(flags) \
((bool)(flags & MALLOCX_ZERO))
#define MALLOCX_TCACHE_GET(flags) \
(((unsigned)((flags & MALLOCX_TCACHE_MASK) >> 8)) - 2)
#define MALLOCX_ARENA_GET(flags) \
(((unsigned)(((unsigned)flags) >> 20)) - 1)
/* Smallest size class to support. */
#define LG_TINY_MIN 3
#define TINY_MIN (1U << LG_TINY_MIN)
/*
* Minimum al
ignment of allocations
is 2^LG_QUANTUM bytes (ignoring tiny size
* Minimum al
location alignment
is 2^LG_QUANTUM bytes (ignoring tiny size
* classes).
*/
#ifndef LG_QUANTUM
# if (defined(__i386__) || defined(_M_IX86))
# define LG_QUANTUM
3
# define LG_QUANTUM
4
# endif
# ifdef __ia64__
# define LG_QUANTUM 4
...
...
@@ -250,11 +232,11 @@ static const bool config_ivsalloc =
# ifdef __alpha__
# define LG_QUANTUM 4
# endif
# ifdef
__sparc64__
# if
(
def
ined(
__sparc64__
) || defined(__sparcv9))
# define LG_QUANTUM 4
# endif
# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
# define LG_QUANTUM
3
# define LG_QUANTUM
4
# endif
# ifdef __arm__
# define LG_QUANTUM 3
...
...
@@ -268,6 +250,9 @@ static const bool config_ivsalloc =
# ifdef __mips__
# define LG_QUANTUM 3
# endif
# ifdef __or1k__
# define LG_QUANTUM 3
# endif
# ifdef __powerpc__
# define LG_QUANTUM 4
# endif
...
...
@@ -280,8 +265,12 @@ static const bool config_ivsalloc =
# ifdef __tile__
# define LG_QUANTUM 4
# endif
# ifdef __le32__
# define LG_QUANTUM 4
# endif
# ifndef LG_QUANTUM
# error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS"
# error "Unknown minimum alignment for architecture; specify via "
"--with-lg-quantum"
# endif
#endif
...
...
@@ -321,12 +310,11 @@ static const bool config_ivsalloc =
#define CACHELINE_CEILING(s) \
(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
/* Page size.
STATIC_PAGE_SHIFT
is determined by the configure script. */
/* Page size.
LG_PAGE
is determined by the configure script. */
#ifdef PAGE_MASK
# undef PAGE_MASK
#endif
#define LG_PAGE STATIC_PAGE_SHIFT
#define PAGE ((size_t)(1U << STATIC_PAGE_SHIFT))
#define PAGE ((size_t)(1U << LG_PAGE))
#define PAGE_MASK ((size_t)(PAGE - 1))
/* Return the smallest pagesize multiple that is >= s. */
...
...
@@ -345,7 +333,7 @@ static const bool config_ivsalloc =
#define ALIGNMENT_CEILING(s, alignment) \
(((s) + (alignment - 1)) & (-(alignment)))
/* Declare a variable
length array */
/* Declare a variable
-
length array
.
*/
#if __STDC_VERSION__ < 199901L
# ifdef _MSC_VER
# include <malloc.h>
...
...
@@ -358,86 +346,12 @@ static const bool config_ivsalloc =
# endif
# endif
# define VARIABLE_ARRAY(type, name, count) \
type *name = alloca(sizeof(type) * count)
#else
# define VARIABLE_ARRAY(type, name, count) type name[count]
#endif
#ifdef JEMALLOC_VALGRIND
/*
* The JEMALLOC_VALGRIND_*() macros must be macros rather than functions
* so that when Valgrind reports errors, there are no extra stack frames
* in the backtraces.
*
* The size that is reported to valgrind must be consistent through a chain of
* malloc..realloc..realloc calls. Request size isn't recorded anywhere in
* jemalloc, so it is critical that all callers of these macros provide usize
* rather than request size. As a result, buffer overflow detection is
* technically weakened for the standard API, though it is generally accepted
* practice to consider any extra bytes reported by malloc_usable_size() as
* usable space.
*/
#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do { \
if (config_valgrind && opt_valgrind && cond) \
VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero); \
} while (0)
#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \
old_rzsize, zero) do { \
if (config_valgrind && opt_valgrind) { \
size_t rzsize = p2rz(ptr); \
\
if (ptr == old_ptr) { \
VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \
usize, rzsize); \
if (zero && old_usize < usize) { \
VALGRIND_MAKE_MEM_DEFINED( \
(void *)((uintptr_t)ptr + \
old_usize), usize - old_usize); \
} \
} else { \
if (old_ptr != NULL) { \
VALGRIND_FREELIKE_BLOCK(old_ptr, \
old_rzsize); \
} \
if (ptr != NULL) { \
size_t copy_size = (old_usize < usize) \
? old_usize : usize; \
size_t tail_size = usize - copy_size; \
VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \
rzsize, false); \
if (copy_size > 0) { \
VALGRIND_MAKE_MEM_DEFINED(ptr, \
copy_size); \
} \
if (zero && tail_size > 0) { \
VALGRIND_MAKE_MEM_DEFINED( \
(void *)((uintptr_t)ptr + \
copy_size), tail_size); \
} \
} \
} \
} \
} while (0)
#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \
if (config_valgrind && opt_valgrind) \
VALGRIND_FREELIKE_BLOCK(ptr, rzsize); \
} while (0)
type *name = alloca(sizeof(type) * (count))
#else
#define RUNNING_ON_VALGRIND ((unsigned)0)
#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
do {} while (0)
#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \
do {} while (0)
#define VALGRIND_FREELIKE_BLOCK(addr, rzB) do {} while (0)
#define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr, _qzz_len) do {} while (0)
#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len) do {} while (0)
#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len) do {} while (0)
#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {} while (0)
#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \
old_rzsize, zero) do {} while (0)
#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0)
# define VARIABLE_ARRAY(type, name, count) type name[(count)]
#endif
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prng.h"
...
...
@@ -452,9 +366,10 @@ static const bool config_ivsalloc =
#include "jemalloc/internal/arena.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/base.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/quarantine.h"
...
...
@@ -464,6 +379,7 @@ static const bool config_ivsalloc =
/******************************************************************************/
#define JEMALLOC_H_STRUCTS
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prng.h"
...
...
@@ -472,68 +388,83 @@ static const bool config_ivsalloc =
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/bitmap.h"
#define JEMALLOC_ARENA_STRUCTS_A
#include "jemalloc/internal/arena.h"
#undef JEMALLOC_ARENA_STRUCTS_A
#include "jemalloc/internal/extent.h"
#define JEMALLOC_ARENA_STRUCTS_B
#include "jemalloc/internal/arena.h"
#undef JEMALLOC_ARENA_STRUCTS_B
#include "jemalloc/internal/base.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/quarantine.h"
#include "jemalloc/internal/prof.h"
typedef struct {
uint64_t allocated;
uint64_t deallocated;
} thread_allocated_t;
/*
* The JEMALLOC_ARG_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro
* argument.
*/
#define THREAD_ALLOCATED_INITIALIZER JEMALLOC_ARG_CONCAT({0, 0})
#include "jemalloc/internal/tsd.h"
#undef JEMALLOC_H_STRUCTS
/******************************************************************************/
#define JEMALLOC_H_EXTERNS
extern bool opt_abort;
extern bool opt_junk;
extern const char *opt_junk;
extern bool opt_junk_alloc;
extern bool opt_junk_free;
extern size_t opt_quarantine;
extern bool opt_redzone;
extern bool opt_utrace;
extern bool opt_valgrind;
extern bool opt_xmalloc;
extern bool opt_zero;
extern size_t opt_narenas;
extern bool in_valgrind;
/* Number of CPUs. */
extern unsigned ncpus;
/* Protects arenas initialization (arenas, arenas_total). */
extern malloc_mutex_t arenas_lock;
/*
* Arenas that are used to service external requests. Not all elements of the
* arenas array are necessarily used; arenas are created lazily as needed.
*
* arenas[0..narenas_auto) are used for automatic multiplexing of threads and
* arenas. arenas[narenas_auto..narenas_total) are only used if the application
* takes some action to create them and allocate from them.
* index2size_tab encodes the same information as could be computed (at
* unacceptable cost in some code paths) by index2size_compute().
*/
extern arena_t **arenas;
extern unsigned narenas_total;
extern unsigned narenas_auto; /* Read-only after initialization. */
extern size_t const index2size_tab[NSIZES];
/*
* size2index_tab is a compact lookup table that rounds request sizes up to
* size classes. In order to reduce cache footprint, the table is compressed,
* and all accesses are via size2index().
*/
extern uint8_t const size2index_tab[];
arena_t *a0get(void);
void *a0malloc(size_t size);
void a0dalloc(void *ptr);
void *bootstrap_malloc(size_t size);
void *bootstrap_calloc(size_t num, size_t size);
void bootstrap_free(void *ptr);
arena_t *arenas_extend(unsigned ind);
void arenas_cleanup(void *arg);
arena_t *choose_arena_hard(void);
arena_t *arena_init(unsigned ind);
unsigned narenas_total_get(void);
arena_t *arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing);
arena_t *arena_choose_hard(tsd_t *tsd);
void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
unsigned arena_nbound(unsigned ind);
void thread_allocated_cleanup(tsd_t *tsd);
void thread_deallocated_cleanup(tsd_t *tsd);
void arena_cleanup(tsd_t *tsd);
void arenas_cache_cleanup(tsd_t *tsd);
void narenas_cache_cleanup(tsd_t *tsd);
void arenas_cache_bypass_cleanup(tsd_t *tsd);
void jemalloc_prefork(void);
void jemalloc_postfork_parent(void);
void jemalloc_postfork_child(void);
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prng.h"
...
...
@@ -542,24 +473,26 @@ void jemalloc_postfork_child(void);
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/arena.h"
#include "jemalloc/internal/base.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/quarantine.h"
#include "jemalloc/internal/prof.h"
#include "jemalloc/internal/tsd.h"
#undef JEMALLOC_H_EXTERNS
/******************************************************************************/
#define JEMALLOC_H_INLINES
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prng.h"
...
...
@@ -572,26 +505,158 @@ void jemalloc_postfork_child(void);
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/base.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h"
#ifndef JEMALLOC_ENABLE_INLINE
malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *)
szind_t size2index_compute(size_t size);
szind_t size2index_lookup(size_t size);
szind_t size2index(size_t size);
size_t index2size_compute(szind_t index);
size_t index2size_lookup(szind_t index);
size_t index2size(szind_t index);
size_t s2u_compute(size_t size);
size_t s2u_lookup(size_t size);
size_t s2u(size_t size);
size_t sa2u(size_t size, size_t alignment);
unsigned narenas_total_get(void);
arena_t *choose_arena(arena_t *arena);
arena_t *arena_choose(tsd_t *tsd, arena_t *arena);
arena_t *arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing,
bool refresh_if_missing);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
/*
* Map of pthread_self() --> arenas[???], used for selecting an arena to use
* for allocations.
*/
malloc_tsd_externs(arenas, arena_t *)
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, arenas, arena_t *, NULL,
arenas_cleanup)
JEMALLOC_INLINE szind_t
size2index_compute(size_t size)
{
#if (NTBINS != 0)
if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
size_t lg_ceil = lg_floor(pow2_ceil(size));
return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin);
}
#endif
{
size_t x = unlikely(ZI(size) < 0) ? ((size<<1) ?
(ZU(1)<<(LG_SIZEOF_PTR+3)) : ((ZU(1)<<(LG_SIZEOF_PTR+3))-1))
: lg_floor((size<<1)-1);
size_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 :
x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM);
size_t grp = shift << LG_SIZE_CLASS_GROUP;
size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
size_t delta_inverse_mask = ZI(-1) << lg_delta;
size_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
size_t index = NTBINS + grp + mod;
return (index);
}
}
JEMALLOC_ALWAYS_INLINE szind_t
size2index_lookup(size_t size)
{
assert(size <= LOOKUP_MAXCLASS);
{
size_t ret = ((size_t)(size2index_tab[(size-1) >>
LG_TINY_MIN]));
assert(ret == size2index_compute(size));
return (ret);
}
}
JEMALLOC_ALWAYS_INLINE szind_t
size2index(size_t size)
{
assert(size > 0);
if (likely(size <= LOOKUP_MAXCLASS))
return (size2index_lookup(size));
return (size2index_compute(size));
}
JEMALLOC_INLINE size_t
index2size_compute(szind_t index)
{
#if (NTBINS > 0)
if (index < NTBINS)
return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index));
#endif
{
size_t reduced_index = index - NTBINS;
size_t grp = reduced_index >> LG_SIZE_CLASS_GROUP;
size_t mod = reduced_index & ((ZU(1) << LG_SIZE_CLASS_GROUP) -
1);
size_t grp_size_mask = ~((!!grp)-1);
size_t grp_size = ((ZU(1) << (LG_QUANTUM +
(LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
size_t shift = (grp == 0) ? 1 : grp;
size_t lg_delta = shift + (LG_QUANTUM-1);
size_t mod_size = (mod+1) << lg_delta;
size_t usize = grp_size + mod_size;
return (usize);
}
}
JEMALLOC_ALWAYS_INLINE size_t
index2size_lookup(szind_t index)
{
size_t ret = (size_t)index2size_tab[index];
assert(ret == index2size_compute(index));
return (ret);
}
JEMALLOC_ALWAYS_INLINE size_t
index2size(szind_t index)
{
assert(index < NSIZES);
return (index2size_lookup(index));
}
JEMALLOC_ALWAYS_INLINE size_t
s2u_compute(size_t size)
{
#if (NTBINS > 0)
if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
size_t lg_ceil = lg_floor(pow2_ceil(size));
return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
(ZU(1) << lg_ceil));
}
#endif
{
size_t x = unlikely(ZI(size) < 0) ? ((size<<1) ?
(ZU(1)<<(LG_SIZEOF_PTR+3)) : ((ZU(1)<<(LG_SIZEOF_PTR+3))-1))
: lg_floor((size<<1)-1);
size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
size_t delta = ZU(1) << lg_delta;
size_t delta_mask = delta - 1;
size_t usize = (size + delta_mask) & ~delta_mask;
return (usize);
}
}
JEMALLOC_ALWAYS_INLINE size_t
s2u_lookup(size_t size)
{
size_t ret = index2size_lookup(size2index_lookup(size));
assert(ret == s2u_compute(size));
return (ret);
}
/*
* Compute usable size that would result from allocating an object with the
...
...
@@ -601,11 +666,10 @@ JEMALLOC_ALWAYS_INLINE size_t
s2u(size_t size)
{
if (size <= SMALL_MAXCLASS)
return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size);
if (size <= arena_maxclass)
return (PAGE_CEILING(size));
return (CHUNK_CEILING(size));
assert(size > 0);
if (likely(size <= LOOKUP_MAXCLASS))
return (s2u_lookup(size));
return (s2u_compute(size));
}
/*
...
...
@@ -619,13 +683,15 @@ sa2u(size_t size, size_t alignment)
assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
/* Try for a small size class. */
if (size <= SMALL_MAXCLASS && alignment < PAGE) {
/*
* Round size up to the nearest multiple of alignment.
*
* This done, we can take advantage of the fact that for each
small
* size class, every object is aligned at the smallest
power of two
* that is non-zero in the base two representation
of the size. For
* example:
* This done, we can take advantage of the fact that for each
*
small
size class, every object is aligned at the smallest
*
power of two
that is non-zero in the base two representation
*
of the size. For
example:
*
* Size | Base 2 | Minimum alignment
* -----+----------+------------------
...
...
@@ -633,94 +699,112 @@ sa2u(size_t size, size_t alignment)
* 144 | 10100000 | 32
* 192 | 11000000 | 64
*/
usize = ALIGNMENT_CEILING(size, alignment);
/*
* (usize < size) protects against the combination of maximal
* alignment and size greater than maximal alignment.
*/
if (usize < size) {
/* size_t overflow. */
return (0);
usize = s2u(ALIGNMENT_CEILING(size, alignment));
if (usize < LARGE_MINCLASS)
return (usize);
}
if (usize <= arena_maxclass && alignment <= PAGE) {
if (usize <= SMALL_MAXCLASS)
return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size);
return (PAGE_CEILING(usize));
} else {
size_t run_size;
/* Try for a large size class. */
if (likely(size <= large_maxclass) && likely(alignment < chunksize)) {
/*
* We can't achieve subpage alignment, so round up alignment
*
permanently; it makes later calculations simpler
.
*
to the minimum that can actually be supported
.
*/
alignment = PAGE_CEILING(alignment);
usize = PAGE_CEILING(size);
/*
* (usize < size) protects against very large sizes within
* PAGE of SIZE_T_MAX.
*
* (usize + alignment < usize) protects against the
* combination of maximal alignment and usize large enough
* to cause overflow. This is similar to the first overflow
* check above, but it needs to be repeated due to the new
* usize value, which may now be *equal* to maximal
* alignment, whereas before we only detected overflow if the
* original size was *greater* than maximal alignment.
*/
if (usize < size || usize + alignment < usize) {
/* size_t overflow. */
return (0);
}
/* Make sure result is a large size class. */
usize = (size <= LARGE_MINCLASS) ? LARGE_MINCLASS : s2u(size);
/*
* Calculate the size of the over-size run that arena_palloc()
* would need to allocate in order to guarantee the alignment.
* If the run wouldn't fit within a chunk, round up to a huge
* allocation size.
*/
run_size = usize + alignment - PAGE;
if (run_size <= arena_maxclass)
return (PAGE_CEILING(usize));
return (CHUNK_CEILING(usize));
if (usize + large_pad + alignment - PAGE <= arena_maxrun)
return (usize);
}
}
JEMALLOC_INLINE unsigned
narenas_total_get(void)
{
unsigned narenas;
/* Huge size class. Beware of size_t overflow. */
/*
* We can't achieve subchunk alignment, so round up alignment to the
* minimum that can actually be supported.
*/
alignment = CHUNK_CEILING(alignment);
if (alignment == 0) {
/* size_t overflow. */
return (0);
}
malloc_mutex_lock(&arenas_lock);
narenas = narenas_total;
malloc_mutex_unlock(&arenas_lock);
/* Make sure result is a huge size class. */
if (size <= chunksize)
usize = chunksize;
else {
usize = s2u(size);
if (usize < size) {
/* size_t overflow. */
return (0);
}
}
return (narenas);
/*
* Calculate the multi-chunk mapping that huge_palloc() would need in
* order to guarantee the alignment.
*/
if (usize + alignment - PAGE < usize) {
/* size_t overflow. */
return (0);
}
return (usize);
}
/* Choose an arena based on a per-thread value. */
JEMALLOC_INLINE arena_t *
choose_arena(
arena_t *arena)
arena_choose(tsd_t *tsd,
arena_t *arena)
{
arena_t *ret;
if (arena != NULL)
return (arena);
if ((ret = *arenas_tsd_get()) == NULL) {
ret = choose_arena_hard();
assert(ret != NULL);
}
if (unlikely((ret = tsd_arena_get(tsd)) == NULL))
ret = arena_choose_hard(tsd);
return (ret);
}
JEMALLOC_INLINE arena_t *
arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing,
bool refresh_if_missing)
{
arena_t *arena;
arena_t **arenas_cache = tsd_arenas_cache_get(tsd);
/* init_if_missing requires refresh_if_missing. */
assert(!init_if_missing || refresh_if_missing);
if (unlikely(arenas_cache == NULL)) {
/* arenas_cache hasn't been initialized yet. */
return (arena_get_hard(tsd, ind, init_if_missing));
}
if (unlikely(ind >= tsd_narenas_cache_get(tsd))) {
/*
* ind is invalid, cache is old (too small), or arena to be
* initialized.
*/
return (refresh_if_missing ? arena_get_hard(tsd, ind,
init_if_missing) : NULL);
}
arena = arenas_cache[ind];
if (likely(arena != NULL) || !refresh_if_missing)
return (arena);
return (arena_get_hard(tsd, ind, init_if_missing));
}
#endif
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/rtree.h"
/*
* Include
arena.h twice in order to resolve circular dependencies with
*
tcache.h
.
* Include
portions of arena.h interleaved with tcache.h in order to resolve
*
circular dependencies
.
*/
#define JEMALLOC_ARENA_INLINE_A
#include "jemalloc/internal/arena.h"
...
...
@@ -733,133 +817,155 @@ choose_arena(arena_t *arena)
#include "jemalloc/internal/quarantine.h"
#ifndef JEMALLOC_ENABLE_INLINE
void *imalloct(size_t size, bool try_tcache, arena_t *arena);
void *imalloc(size_t size);
void *icalloct(size_t size, bool try_tcache, arena_t *arena);
void *icalloc(size_t size);
void *ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache,
arena_t *arena);
void *ipalloc(size_t usize, size_t alignment, bool zero);
arena_t *iaalloc(const void *ptr);
size_t isalloc(const void *ptr, bool demote);
void *iallocztm(tsd_t *tsd, size_t size, bool zero, tcache_t *tcache,
bool is_metadata, arena_t *arena);
void *imalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena);
void *imalloc(tsd_t *tsd, size_t size);
void *icalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena);
void *icalloc(tsd_t *tsd, size_t size);
void *ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, bool is_metadata, arena_t *arena);
void *ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, arena_t *arena);
void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero);
size_t ivsalloc(const void *ptr, bool demote);
size_t u2rz(size_t usize);
size_t p2rz(const void *ptr);
void idalloct(void *ptr, bool try_tcache);
void idalloc(void *ptr);
void iqalloct(void *ptr, bool try_tcache);
void iqalloc(void *ptr);
void *iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
void idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata);
void idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache);
void idalloc(tsd_t *tsd, void *ptr);
void iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache);
void isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
void isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
void *iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t extra, size_t alignment, bool zero, tcache_t *tcache,
arena_t *arena);
void *iralloct(void *ptr, size_t size, size_t extra, size_t alignment,
bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena);
void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
bool zero);
bool ixalloc(void *ptr, size_t size, size_t extra, size_t alignment,
bool zero);
malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
void *iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t alignment, bool zero, tcache_t *tcache, arena_t *arena);
void *iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t alignment, bool zero);
bool ixalloc(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
JEMALLOC_ALWAYS_INLINE arena_t *
iaalloc(const void *ptr)
{
assert(ptr != NULL);
return (arena_aalloc(ptr));
}
/*
* Typical usage:
* void *ptr = [...]
* size_t sz = isalloc(ptr, config_prof);
*/
JEMALLOC_ALWAYS_INLINE size_t
isalloc(const void *ptr, bool demote)
{
assert(ptr != NULL);
/* Demotion only makes sense if config_prof is true. */
assert(config_prof || !demote);
return (arena_salloc(ptr, demote));
}
JEMALLOC_ALWAYS_INLINE void *
imalloct(size_t size, bool try_tcache, arena_t *arena)
iallocztm(tsd_t *tsd, size_t size, bool zero, tcache_t *tcache, bool is_metadata,
arena_t *arena)
{
void *ret;
assert(size != 0);
if (size <= arena_maxclass)
return (arena_malloc(arena, size, false, try_tcache));
else
return (huge_malloc(size, false, huge_dss_prec_get(arena)));
ret = arena_malloc(tsd, arena, size, zero, tcache);
if (config_stats && is_metadata && likely(ret != NULL)) {
arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
config_prof));
}
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
imalloc
(size_t size
)
imalloc
t(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena
)
{
return (i
m
alloc
t(size, true, NULL
));
return (ialloc
ztm(tsd, size, false, tcache, false, arena
));
}
JEMALLOC_ALWAYS_INLINE void *
i
c
alloc
t(size_t size, bool try_tcache, arena_t *arena
)
i
m
alloc
(tsd_t *tsd, size_t size
)
{
if (size <= arena_maxclass)
return (arena_malloc(arena, size, true, try_tcache));
else
return (huge_malloc(size, true, huge_dss_prec_get(arena)));
return (iallocztm(tsd, size, false, tcache_get(tsd, true), false, NULL));
}
JEMALLOC_ALWAYS_INLINE void *
icalloc
(size_t size
)
icalloc
t(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena
)
{
return (i
c
alloc
t(
size, true,
NULL
));
return (ialloc
ztm(tsd,
size, true,
tcache, false, arena
));
}
JEMALLOC_ALWAYS_INLINE void *
ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache,
arena_t *arena)
icalloc(tsd_t *tsd, size_t size)
{
return (iallocztm(tsd, size, true, tcache_get(tsd, true), false, NULL));
}
JEMALLOC_ALWAYS_INLINE void *
ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, bool is_metadata, arena_t *arena)
{
void *ret;
assert(usize != 0);
assert(usize == sa2u(usize, alignment));
if (usize <= arena_maxclass && alignment <= PAGE)
ret = arena_malloc(arena, usize, zero, try_tcache);
else {
if (usize <= arena_maxclass) {
ret = arena_palloc(choose_arena(arena), usize,
alignment, zero);
} else if (alignment <= chunksize)
ret = huge_malloc(usize, zero, huge_dss_prec_get(arena));
else
ret = huge_palloc(usize, alignment, zero, huge_dss_prec_get(arena));
}
ret = arena_palloc(tsd, arena, usize, alignment, zero, tcache);
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
if (config_stats && is_metadata && likely(ret != NULL)) {
arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
config_prof));
}
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
ipalloc(size_t usize, size_t alignment, bool zero)
ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, arena_t *arena)
{
return (ipalloc
t(
usize, alignment, zero, t
rue, NULL
));
return (ipalloc
ztm(tsd,
usize, alignment, zero, t
cache, false, arena
));
}
/*
* Typical usage:
* void *ptr = [...]
* size_t sz = isalloc(ptr, config_prof);
*/
JEMALLOC_ALWAYS_INLINE size_t
isalloc(const void *ptr, bool demote)
JEMALLOC_ALWAYS_INLINE void *
ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero)
{
size_t ret;
arena_chunk_t *chunk;
assert(ptr != NULL);
/* Demotion only makes sense if config_prof is true. */
assert(config_prof || demote == false);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr)
ret = arena_salloc(ptr, demote);
else
ret = huge_salloc(ptr);
return (ret);
return (ipallocztm(tsd, usize, alignment, zero, tcache_get(tsd,
NULL), false, NULL));
}
JEMALLOC_ALWAYS_INLINE size_t
ivsalloc(const void *ptr, bool demote)
{
extent_node_t *node;
/* Return 0 if ptr is not within a chunk managed by jemalloc. */
if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == 0)
node = chunk_lookup(ptr, false);
if (node == NULL)
return (0);
/* Only arena chunks should be looked up via interior pointers. */
assert(extent_node_addr_get(node) == ptr ||
extent_node_achunk_get(node));
return (isalloc(ptr, demote));
}
...
...
@@ -870,7 +976,7 @@ u2rz(size_t usize)
size_t ret;
if (usize <= SMALL_MAXCLASS) {
s
ize
_t binind =
SMALL_SIZE2BIN
(usize);
s
zind
_t binind =
size2index
(usize);
ret = arena_bin_info[binind].redzone_size;
} else
ret = 0;
...
...
@@ -887,47 +993,62 @@ p2rz(const void *ptr)
}
JEMALLOC_ALWAYS_INLINE void
idalloct
(void *ptr, bool try_tcache
)
idalloct
m(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata
)
{
arena_chunk_t *chunk;
assert(ptr != NULL);
if (config_stats && is_metadata) {
arena_metadata_allocated_sub(iaalloc(ptr), isalloc(ptr,
config_prof));
}
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr)
arena_dalloc(chunk->arena, chunk, ptr, try_tcache);
else
huge_dalloc(ptr, true);
arena_dalloc(tsd, ptr, tcache);
}
JEMALLOC_ALWAYS_INLINE void
idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache)
{
idalloctm(tsd, ptr, tcache, false);
}
JEMALLOC_ALWAYS_INLINE void
idalloc(void *ptr)
idalloc(
tsd_t *tsd,
void *ptr)
{
idalloct
(ptr, tru
e);
idalloct
m(tsd, ptr, tcache_get(tsd, false), fals
e);
}
JEMALLOC_ALWAYS_INLINE void
iqalloc
t
(void *ptr,
bool try_
tcache)
iqalloc(
tsd_t *tsd,
void *ptr,
tcache_t *
tcache)
{
if (config_fill && opt_quarantine)
quarantine(ptr);
if (config_fill &&
unlikely(
opt_quarantine)
)
quarantine(
tsd,
ptr);
else
idalloct(ptr, try_tcache);
idalloctm(tsd, ptr, tcache, false);
}
JEMALLOC_ALWAYS_INLINE void
isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
{
arena_sdalloc(tsd, ptr, size, tcache);
}
JEMALLOC_ALWAYS_INLINE void
iqalloc(
void *ptr
)
i
s
qalloc(
tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache
)
{
iqalloct(ptr, true);
if (config_fill && unlikely(opt_quarantine))
quarantine(tsd, ptr);
else
isdalloct(tsd, ptr, size, tcache);
}
JEMALLOC_ALWAYS_INLINE void *
iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
arena_t *arena)
iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t extra, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena)
{
void *p;
size_t usize, copysize;
...
...
@@ -935,7 +1056,7 @@ iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
usize = sa2u(size + extra, alignment);
if (usize == 0)
return (NULL);
p = ipalloct(usize, alignment, zero,
try_
tcache
_alloc
, arena);
p = ipalloct(
tsd,
usize, alignment, zero, tcache, arena);
if (p == NULL) {
if (extra == 0)
return (NULL);
...
...
@@ -943,7 +1064,7 @@ iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
usize = sa2u(size, alignment);
if (usize == 0)
return (NULL);
p = ipalloct(usize, alignment, zero,
try_
tcache
_alloc
, arena);
p = ipalloct(
tsd,
usize, alignment, zero, tcache, arena);
if (p == NULL)
return (NULL);
}
...
...
@@ -953,72 +1074,57 @@ iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
*/
copysize = (size < oldsize) ? size : oldsize;
memcpy(p, ptr, copysize);
iqalloc
t(ptr, try_tcache_dalloc
);
i
s
qalloc
(tsd, ptr, oldsize, tcache
);
return (p);
}
JEMALLOC_ALWAYS_INLINE void *
iralloct(void *ptr, size_t size, size_t
extra
, size_t alignment,
bool zero,
bool
try_
tcache_
alloc, bool try_tcache_dalloc
, arena_t *arena)
iralloct(
tsd_t *tsd,
void *ptr, size_t
old
size, size_t
size
, size_t alignment,
bool
zero,
tcache_
t *tcache
, arena_t *arena)
{
size_t oldsize;
assert(ptr != NULL);
assert(size != 0);
oldsize = isalloc(ptr, config_prof);
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
!= 0) {
/*
* Existing object alignment is inadequate; allocate new space
* and copy.
*/
return (iralloct_realign(ptr, oldsize, size,
extra
, alignment,
zero,
try_
tcache
_alloc, try_tcache_dalloc
, arena));
return (iralloct_realign(
tsd,
ptr, oldsize, size,
0
, alignment,
zero, tcache, arena));
}
if (size + extra <= arena_maxclass) {
return (arena_ralloc(arena, ptr, oldsize, size, extra,
alignment, zero, try_tcache_alloc,
try_tcache_dalloc));
} else {
return (huge_ralloc(ptr, oldsize, size, extra,
alignment, zero, try_tcache_dalloc, huge_dss_prec_get(arena)));
}
return (arena_ralloc(tsd, arena, ptr, oldsize, size, alignment, zero,
tcache));
}
JEMALLOC_ALWAYS_INLINE void *
iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero)
iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
bool zero)
{
return (iralloct(ptr, size, extra, alignment, zero, true, true, NULL));
return (iralloct(tsd, ptr, oldsize, size, alignment, zero,
tcache_get(tsd, true), NULL));
}
JEMALLOC_ALWAYS_INLINE bool
ixalloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero)
ixalloc(void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment,
bool zero)
{
size_t oldsize;
assert(ptr != NULL);
assert(size != 0);
oldsize = isalloc(ptr, config_prof);
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
!= 0) {
/* Existing object alignment is inadequate. */
return (true);
}
if (size <= arena_maxclass)
return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero));
else
return (huge_ralloc_no_move(ptr, oldsize, size, extra));
}
malloc_tsd_externs(thread_allocated, thread_allocated_t)
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, thread_allocated, thread_allocated_t,
THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup)
#endif
#include "jemalloc/internal/prof.h"
...
...
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h
0 → 100644
View file @
5268379e
#ifndef JEMALLOC_INTERNAL_DECLS_H
#define JEMALLOC_INTERNAL_DECLS_H
#include <math.h>
#ifdef _WIN32
# include <windows.h>
# include "msvc_compat/windows_extra.h"
#else
# include <sys/param.h>
# include <sys/mman.h>
# if !defined(__pnacl__) && !defined(__native_client__)
# include <sys/syscall.h>
# if !defined(SYS_write) && defined(__NR_write)
# define SYS_write __NR_write
# endif
# include <sys/uio.h>
# endif
# include <pthread.h>
# include <errno.h>
#endif
#include <sys/types.h>
#include <limits.h>
#ifndef SIZE_T_MAX
# define SIZE_T_MAX SIZE_MAX
#endif
#include <stdarg.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <stddef.h>
#ifndef offsetof
# define offsetof(type, member) ((size_t)&(((type *)NULL)->member))
#endif
#include <string.h>
#include <strings.h>
#include <ctype.h>
#ifdef _MSC_VER
# include <io.h>
typedef
intptr_t
ssize_t
;
# define PATH_MAX 1024
# define STDERR_FILENO 2
# define __func__ __FUNCTION__
# ifdef JEMALLOC_HAS_RESTRICT
# define restrict __restrict
# endif
/* Disable warnings about deprecated system functions. */
# pragma warning(disable: 4996)
#if _MSC_VER < 1800
static
int
isblank
(
int
c
)
{
return
(
c
==
'\t'
||
c
==
' '
);
}
#endif
#else
# include <unistd.h>
#endif
#include <fcntl.h>
#endif
/* JEMALLOC_INTERNAL_H */
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in
View file @
5268379e
...
...
@@ -22,6 +22,9 @@
*/
#undef CPU_SPINWAIT
/* Defined if C11 atomics are available. */
#undef JEMALLOC_C11ATOMICS
/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */
#undef JEMALLOC_ATOMIC9
...
...
@@ -35,7 +38,7 @@
* Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and
* __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the
* functions are defined in libgcc instead of being inlines)
* functions are defined in libgcc instead of being inlines)
.
*/
#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4
...
...
@@ -43,16 +46,36 @@
* Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and
* __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the
* functions are defined in libgcc instead of being inlines)
* functions are defined in libgcc instead of being inlines)
.
*/
#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8
/*
* Defined if __builtin_clz() and __builtin_clzl() are available.
*/
#undef JEMALLOC_HAVE_BUILTIN_CLZ
/*
* Defined if madvise(2) is available.
*/
#undef JEMALLOC_HAVE_MADVISE
/*
* Defined if OSSpin*() functions are available, as provided by Darwin, and
* documented in the spinlock(3) manual page.
*/
#undef JEMALLOC_OSSPIN
/*
* Defined if secure_getenv(3) is available.
*/
#undef JEMALLOC_HAVE_SECURE_GETENV
/*
* Defined if issetugid(2) is available.
*/
#undef JEMALLOC_HAVE_ISSETUGID
/*
* Defined if _malloc_thread_cleanup() exists. At least in the case of
* FreeBSD, pthread_key_create() allocates, which if used during malloc
...
...
@@ -76,9 +99,6 @@
*/
#undef JEMALLOC_MUTEX_INIT_CB
/* Defined if sbrk() is supported. */
#undef JEMALLOC_HAVE_SBRK
/* Non-empty if the tls_model attribute is supported. */
#undef JEMALLOC_TLS_MODEL
...
...
@@ -137,8 +157,26 @@
/* Support lazy locking (avoid locking unless a second thread is launched). */
#undef JEMALLOC_LAZY_LOCK
/* One page is 2^STATIC_PAGE_SHIFT bytes. */
#undef STATIC_PAGE_SHIFT
/* Minimum size class to support is 2^LG_TINY_MIN bytes. */
#undef LG_TINY_MIN
/*
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
* classes).
*/
#undef LG_QUANTUM
/* One page is 2^LG_PAGE bytes. */
#undef LG_PAGE
/*
* If defined, adjacent virtual memory mappings with identical attributes
* automatically coalesce, and they fragment when changes are made to subranges.
* This is the normal order of things for mmap()/munmap(), but on Windows
* VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
* mappings do *not* coalesce/fragment.
*/
#undef JEMALLOC_MAPS_COALESCE
/*
* If defined, use munmap() to unmap freed chunks, rather than storing them for
...
...
@@ -147,22 +185,28 @@
*/
#undef JEMALLOC_MUNMAP
/*
* If defined, use mremap(...MREMAP_FIXED...) for huge realloc(). This is
* disabled by default because it is Linux-specific and it will cause virtual
* memory map holes, much like munmap(2) does.
*/
#undef JEMALLOC_MREMAP
/* TLS is used to map arenas and magazine caches to threads. */
#undef JEMALLOC_TLS
/*
* ffs()/ffsl() functions to use for bitmapping. Don't use these directly;
* instead, use jemalloc_ffs() or jemalloc_ffsl() from util.h.
*/
#undef JEMALLOC_INTERNAL_FFSL
#undef JEMALLOC_INTERNAL_FFS
/*
* JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside
* within jemalloc-owned chunks before dereferencing them.
*/
#undef JEMALLOC_IVSALLOC
/*
* If defined, explicitly attempt to more uniformly distribute large allocation
* pointer alignments across all cache indices.
*/
#undef JEMALLOC_CACHE_OBLIVIOUS
/*
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
*/
...
...
@@ -182,9 +226,7 @@
#undef JEMALLOC_PURGE_MADVISE_DONTNEED
#undef JEMALLOC_PURGE_MADVISE_FREE
/*
* Define if operating system has alloca.h header.
*/
/* Define if operating system has alloca.h header. */
#undef JEMALLOC_HAS_ALLOCA_H
/* C99 restrict keyword supported. */
...
...
@@ -202,4 +244,19 @@
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
#undef LG_SIZEOF_INTMAX_T
/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
#undef JEMALLOC_GLIBC_MALLOC_HOOK
/* glibc memalign hook. */
#undef JEMALLOC_GLIBC_MEMALIGN_HOOK
/* Adaptive mutex support in pthreads. */
#undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
/*
* If defined, jemalloc symbols are not exported (doesn't work when
* JEMALLOC_PREFIX is not defined).
*/
#undef JEMALLOC_EXPORT
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
deps/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h
View file @
5268379e
...
...
@@ -39,9 +39,15 @@
#endif
#define ZU(z) ((size_t)z)
#define ZI(z) ((ssize_t)z)
#define QU(q) ((uint64_t)q)
#define QI(q) ((int64_t)q)
#define KZU(z) ZU(z##ULL)
#define KZI(z) ZI(z##LL)
#define KQU(q) QU(q##ULL)
#define KQI(q) QI(q##LL)
#ifndef __DECONST
# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
#endif
...
...
deps/jemalloc/include/jemalloc/internal/mutex.h
View file @
5268379e
...
...
@@ -10,7 +10,7 @@ typedef struct malloc_mutex_s malloc_mutex_t;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, NULL}
#else
# if (defined(PTHREAD_MUTEX_ADAPTIVE_NP) &&
\
# if (defined(
JEMALLOC_HAVE_
PTHREAD_MUTEX_ADAPTIVE_NP) && \
defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP))
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP}
...
...
@@ -26,7 +26,11 @@ typedef struct malloc_mutex_s malloc_mutex_t;
struct
malloc_mutex_s
{
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
SRWLOCK
lock
;
# else
CRITICAL_SECTION
lock
;
# endif
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLock
lock
;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
...
...
@@ -70,7 +74,11 @@ malloc_mutex_lock(malloc_mutex_t *mutex)
if
(
isthreaded
)
{
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
AcquireSRWLockExclusive
(
&
mutex
->
lock
);
# else
EnterCriticalSection
(
&
mutex
->
lock
);
# endif
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockLock
(
&
mutex
->
lock
);
#else
...
...
@@ -85,7 +93,11 @@ malloc_mutex_unlock(malloc_mutex_t *mutex)
if
(
isthreaded
)
{
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
ReleaseSRWLockExclusive
(
&
mutex
->
lock
);
# else
LeaveCriticalSection
(
&
mutex
->
lock
);
# endif
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockUnlock
(
&
mutex
->
lock
);
#else
...
...
deps/jemalloc/include/jemalloc/internal/pages.h
0 → 100644
View file @
5268379e
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif
/* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void
*
pages_map
(
void
*
addr
,
size_t
size
);
void
pages_unmap
(
void
*
addr
,
size_t
size
);
void
*
pages_trim
(
void
*
addr
,
size_t
alloc_size
,
size_t
leadsize
,
size_t
size
);
bool
pages_commit
(
void
*
addr
,
size_t
size
);
bool
pages_decommit
(
void
*
addr
,
size_t
size
);
bool
pages_purge
(
void
*
addr
,
size_t
size
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif
/* JEMALLOC_H_INLINES */
/******************************************************************************/
deps/jemalloc/include/jemalloc/internal/private_symbols.txt
View file @
5268379e
a0
c
alloc
a0
free
a0
d
alloc
a0
get
a0malloc
arena_aalloc
arena_alloc_junk_small
arena_bin_index
arena_bin_info
arena_bitselm_get
arena_boot
arena_choose
arena_choose_hard
arena_chunk_alloc_huge
arena_chunk_cache_maybe_insert
arena_chunk_cache_maybe_remove
arena_chunk_dalloc_huge
arena_chunk_ralloc_huge_expand
arena_chunk_ralloc_huge_shrink
arena_chunk_ralloc_huge_similar
arena_cleanup
arena_dalloc
arena_dalloc_bin
arena_dalloc_bin_locked
arena_dalloc_bin_
junked_
locked
arena_dalloc_junk_large
arena_dalloc_junk_small
arena_dalloc_large
arena_dalloc_large_locked
arena_dalloc_large_
junked_
locked
arena_dalloc_small
arena_dss_prec_get
arena_dss_prec_set
arena_get
arena_get_hard
arena_init
arena_lg_dirty_mult_default_get
arena_lg_dirty_mult_default_set
arena_lg_dirty_mult_get
arena_lg_dirty_mult_set
arena_malloc
arena_malloc_large
arena_malloc_small
arena_mapbits_allocated_get
arena_mapbits_binind_get
arena_mapbits_decommitted_get
arena_mapbits_dirty_get
arena_mapbits_get
arena_mapbits_internal_set
arena_mapbits_large_binind_set
arena_mapbits_large_get
arena_mapbits_large_set
arena_mapbits_large_size_get
arena_mapbitsp_get
arena_mapbitsp_read
arena_mapbitsp_write
arena_mapbits_size_decode
arena_mapbits_size_encode
arena_mapbits_small_runind_get
arena_mapbits_small_set
arena_mapbits_unallocated_set
arena_mapbits_unallocated_size_get
arena_mapbits_unallocated_size_set
arena_mapbits_unzeroed_get
arena_mapbits_unzeroed_set
arena_mapbitsp_get
arena_mapbitsp_read
arena_mapbitsp_write
arena_mapp_get
arena_maxclass
arena_maxrun
arena_maybe_purge
arena_metadata_allocated_add
arena_metadata_allocated_get
arena_metadata_allocated_sub
arena_migrate
arena_miscelm_get
arena_miscelm_to_pageind
arena_miscelm_to_rpages
arena_nbound
arena_new
arena_node_alloc
arena_node_dalloc
arena_palloc
arena_postfork_child
arena_postfork_parent
...
...
@@ -46,50 +78,47 @@ arena_prefork
arena_prof_accum
arena_prof_accum_impl
arena_prof_accum_locked
arena_prof_ctx_get
arena_prof_ctx_set
arena_prof_promoted
arena_prof_tctx_get
arena_prof_tctx_reset
arena_prof_tctx_set
arena_ptr_small_binind_get
arena_purge_all
arena_quarantine_junk_small
arena_ralloc
arena_ralloc_junk_large
arena_ralloc_no_move
arena_rd_to_miscelm
arena_redzone_corruption
arena_run_regind
arena_run_to_miscelm
arena_salloc
arenas_cache_bypass_cleanup
arenas_cache_cleanup
arena_sdalloc
arena_stats_merge
arena_tcache_fill_small
arenas
arenas_booted
arenas_cleanup
arenas_extend
arenas_initialized
arenas_lock
arenas_tls
arenas_tsd
arenas_tsd_boot
arenas_tsd_cleanup_wrapper
arenas_tsd_get
arenas_tsd_get_wrapper
arenas_tsd_init_head
arenas_tsd_set
atomic_add_p
atomic_add_u
atomic_add_uint32
atomic_add_uint64
atomic_add_z
atomic_cas_p
atomic_cas_u
atomic_cas_uint32
atomic_cas_uint64
atomic_cas_z
atomic_sub_p
atomic_sub_u
atomic_sub_uint32
atomic_sub_uint64
atomic_sub_z
base_alloc
base_boot
base_calloc
base_node_alloc
base_node_dealloc
base_postfork_child
base_postfork_parent
base_prefork
base_stats_get
bitmap_full
bitmap_get
bitmap_info_init
...
...
@@ -99,49 +128,54 @@ bitmap_set
bitmap_sfu
bitmap_size
bitmap_unset
bootstrap_calloc
bootstrap_free
bootstrap_malloc
bt_init
buferror
choose_arena
choose_arena_hard
chunk_alloc
chunk_alloc_base
chunk_alloc_cache
chunk_alloc_dss
chunk_alloc_mmap
chunk_alloc_wrapper
chunk_boot
chunk_dealloc
chunk_dealloc_mmap
chunk_dalloc_arena
chunk_dalloc_cache
chunk_dalloc_mmap
chunk_dalloc_wrapper
chunk_deregister
chunk_dss_boot
chunk_dss_postfork_child
chunk_dss_postfork_parent
chunk_dss_prec_get
chunk_dss_prec_set
chunk_dss_prefork
chunk_hooks_default
chunk_hooks_get
chunk_hooks_set
chunk_in_dss
chunk_lookup
chunk_npages
chunk_postfork_child
chunk_postfork_parent
chunk_prefork
chunk_
unmap
chunk
s_mtx
chunk
s
_r
tree
chunk_
purge_arena
chunk
_purge_wrapper
chunk_r
egister
chunksize
chunksize_mask
c
kh_bucket_search
c
hunks_rtree
ckh_count
ckh_delete
ckh_evict_reloc_insert
ckh_insert
ckh_isearch
ckh_iter
ckh_new
ckh_pointer_hash
ckh_pointer_keycomp
ckh_rebuild
ckh_remove
ckh_search
ckh_string_hash
ckh_string_keycomp
ckh_try_bucket_insert
ckh_try_insert
ctl_boot
ctl_bymib
ctl_byname
...
...
@@ -150,6 +184,23 @@ ctl_postfork_child
ctl_postfork_parent
ctl_prefork
dss_prec_names
extent_node_achunk_get
extent_node_achunk_set
extent_node_addr_get
extent_node_addr_set
extent_node_arena_get
extent_node_arena_set
extent_node_dirty_insert
extent_node_dirty_linkage_init
extent_node_dirty_remove
extent_node_init
extent_node_prof_tctx_get
extent_node_prof_tctx_set
extent_node_size_get
extent_node_size_set
extent_node_zeroed_get
extent_node_zeroed_set
extent_tree_ad_empty
extent_tree_ad_first
extent_tree_ad_insert
extent_tree_ad_iter
...
...
@@ -166,6 +217,7 @@ extent_tree_ad_reverse_iter
extent_tree_ad_reverse_iter_recurse
extent_tree_ad_reverse_iter_start
extent_tree_ad_search
extent_tree_szad_empty
extent_tree_szad_first
extent_tree_szad_insert
extent_tree_szad_iter
...
...
@@ -193,45 +245,49 @@ hash_rotl_64
hash_x64_128
hash_x86_128
hash_x86_32
huge_allocated
huge_boot
huge_aalloc
huge_dalloc
huge_dalloc_junk
huge_dss_prec_get
huge_malloc
huge_mtx
huge_ndalloc
huge_nmalloc
huge_palloc
huge_postfork_child
huge_postfork_parent
huge_prefork
huge_prof_ctx_get
huge_prof_ctx_set
huge_prof_tctx_get
huge_prof_tctx_reset
huge_prof_tctx_set
huge_ralloc
huge_ralloc_no_move
huge_salloc
iallocm
iaalloc
iallocztm
icalloc
icalloct
idalloc
idalloct
idalloctm
imalloc
imalloct
index2size
index2size_compute
index2size_lookup
index2size_tab
in_valgrind
ipalloc
ipalloct
ipallocztm
iqalloc
iqalloct
iralloc
iralloct
iralloct_realign
isalloc
isdalloct
isqalloc
isthreaded
ivsalloc
ixalloc
jemalloc_postfork_child
jemalloc_postfork_parent
jemalloc_prefork
large_maxclass
lg_floor
malloc_cprintf
malloc_mutex_init
malloc_mutex_lock
...
...
@@ -242,7 +298,8 @@ malloc_mutex_unlock
malloc_printf
malloc_snprintf
malloc_strtoumax
malloc_tsd_boot
malloc_tsd_boot0
malloc_tsd_boot1
malloc_tsd_cleanup_register
malloc_tsd_dalloc
malloc_tsd_malloc
...
...
@@ -251,16 +308,18 @@ malloc_vcprintf
malloc_vsnprintf
malloc_write
map_bias
map_misc_offset
mb_write
mutex_boot
narenas_auto
narenas_total
narenas_cache_cleanup
narenas_total_get
ncpus
nhbins
opt_abort
opt_dss
opt_junk
opt_junk_alloc
opt_junk_free
opt_lg_chunk
opt_lg_dirty_mult
opt_lg_prof_interval
...
...
@@ -274,84 +333,99 @@ opt_prof_final
opt_prof_gdump
opt_prof_leak
opt_prof_prefix
opt_prof_thread_active_init
opt_quarantine
opt_redzone
opt_stats_print
opt_tcache
opt_utrace
opt_valgrind
opt_xmalloc
opt_zero
p2rz
pages_commit
pages_decommit
pages_map
pages_purge
pages_trim
pages_unmap
pow2_ceil
prof_active_get
prof_active_get_unlocked
prof_active_set
prof_alloc_prep
prof_alloc_rollback
prof_backtrace
prof_boot0
prof_boot1
prof_boot2
prof_bt_count
prof_ctx_get
prof_ctx_set
prof_dump_header
prof_dump_open
prof_free
prof_free_sampled_object
prof_gdump
prof_gdump_get
prof_gdump_get_unlocked
prof_gdump_set
prof_gdump_val
prof_idump
prof_interval
prof_lookup
prof_malloc
prof_malloc_sample_object
prof_mdump
prof_postfork_child
prof_postfork_parent
prof_prefork
prof_promote
prof_realloc
prof_reset
prof_sample_accum_update
prof_sample_threshold_update
prof_tdata_booted
prof_tctx_get
prof_tctx_reset
prof_tctx_set
prof_tdata_cleanup
prof_tdata_get
prof_tdata_init
prof_tdata_initialized
prof_tdata_tls
prof_tdata_tsd
prof_tdata_tsd_boot
prof_tdata_tsd_cleanup_wrapper
prof_tdata_tsd_get
prof_tdata_tsd_get_wrapper
prof_tdata_tsd_init_head
prof_tdata_tsd_set
prof_tdata_reinit
prof_thread_active_get
prof_thread_active_init_get
prof_thread_active_init_set
prof_thread_active_set
prof_thread_name_get
prof_thread_name_set
quarantine
quarantine_alloc_hook
quarantine_boot
quarantine_booted
quarantine_alloc_hook_work
quarantine_cleanup
quarantine_init
quarantine_tls
quarantine_tsd
quarantine_tsd_boot
quarantine_tsd_cleanup_wrapper
quarantine_tsd_get
quarantine_tsd_get_wrapper
quarantine_tsd_init_head
quarantine_tsd_set
register_zone
rtree_child_read
rtree_child_read_hard
rtree_child_tryread
rtree_delete
rtree_get
rtree_get_locked
rtree_new
rtree_postfork_child
rtree_postfork_parent
rtree_prefork
rtree_node_valid
rtree_set
rtree_start_level
rtree_subkey
rtree_subtree_read
rtree_subtree_read_hard
rtree_subtree_tryread
rtree_val_read
rtree_val_write
s2u
s2u_compute
s2u_lookup
sa2u
set_errno
small_size2bin
size2index
size2index_compute
size2index_lookup
size2index_tab
stats_cactive
stats_cactive_add
stats_cactive_get
stats_cactive_sub
stats_chunks
stats_print
tcache_alloc_easy
tcache_alloc_large
...
...
@@ -359,55 +433,67 @@ tcache_alloc_small
tcache_alloc_small_hard
tcache_arena_associate
tcache_arena_dissociate
tcache_arena_reassociate
tcache_bin_flush_large
tcache_bin_flush_small
tcache_bin_info
tcache_boot0
tcache_boot1
tcache_booted
tcache_boot
tcache_cleanup
tcache_create
tcache_dalloc_large
tcache_dalloc_small
tcache_destroy
tcache_enabled_booted
tcache_enabled_cleanup
tcache_enabled_get
tcache_enabled_initialized
tcache_enabled_set
tcache_enabled_tls
tcache_enabled_tsd
tcache_enabled_tsd_boot
tcache_enabled_tsd_cleanup_wrapper
tcache_enabled_tsd_get
tcache_enabled_tsd_get_wrapper
tcache_enabled_tsd_init_head
tcache_enabled_tsd_set
tcache_event
tcache_event_hard
tcache_flush
tcache_get
tcache_
initialize
d
tcache_
get_har
d
tcache_maxclass
tcaches
tcache_salloc
tcaches_create
tcaches_destroy
tcaches_flush
tcaches_get
tcache_stats_merge
tcache_thread_cleanup
tcache_tls
tcache_tsd
tcache_tsd_boot
tcache_tsd_cleanup_wrapper
tcache_tsd_get
tcache_tsd_get_wrapper
tcache_tsd_init_head
tcache_tsd_set
thread_allocated_booted
thread_allocated_initialized
thread_allocated_tls
thread_allocated_tsd
thread_allocated_tsd_boot
thread_allocated_tsd_cleanup_wrapper
thread_allocated_tsd_get
thread_allocated_tsd_get_wrapper
thread_allocated_tsd_init_head
thread_allocated_tsd_set
thread_allocated_cleanup
thread_deallocated_cleanup
tsd_arena_get
tsd_arena_set
tsd_boot
tsd_boot0
tsd_boot1
tsd_booted
tsd_cleanup
tsd_cleanup_wrapper
tsd_fetch
tsd_get
tsd_wrapper_get
tsd_wrapper_set
tsd_initialized
tsd_init_check_recursion
tsd_init_finish
tsd_init_head
tsd_nominal
tsd_quarantine_get
tsd_quarantine_set
tsd_set
tsd_tcache_enabled_get
tsd_tcache_enabled_set
tsd_tcache_get
tsd_tcache_set
tsd_tls
tsd_tsd
tsd_prof_tdata_get
tsd_prof_tdata_set
tsd_thread_allocated_get
tsd_thread_allocated_set
tsd_thread_deallocated_get
tsd_thread_deallocated_set
u2rz
valgrind_freelike_block
valgrind_make_mem_defined
valgrind_make_mem_noaccess
valgrind_make_mem_undefined
deps/jemalloc/include/jemalloc/internal/prng.h
View file @
5268379e
...
...
@@ -15,7 +15,7 @@
* See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints.
*
* This choice of m has the disadvantage that the quality of the bits is
* proportional to bit position. For example
.
the lowest bit has a cycle of 2,
* proportional to bit position. For example
,
the lowest bit has a cycle of 2,
* the next has a cycle of 4, etc. For this reason, we prefer to use the upper
* bits.
*
...
...
@@ -26,22 +26,22 @@
* const uint32_t a, c : See above discussion.
*/
#define prng32(r, lg_range, state, a, c) do { \
assert(lg_range > 0); \
assert(lg_range <= 32);
\
assert(
(
lg_range
)
> 0); \
assert(
(
lg_range
)
<= 32); \
\
r = (state * (a)) + (c); \
state = r; \
r >>= (32 - lg_range);
\
r >>= (32 -
(
lg_range)
)
; \
} while (false)
/* Same as prng32(), but 64 bits of pseudo-randomness, using uint64_t. */
#define prng64(r, lg_range, state, a, c) do { \
assert(lg_range > 0); \
assert(lg_range <= 64);
\
assert(
(
lg_range
)
> 0); \
assert(
(
lg_range
)
<= 64); \
\
r = (state * (a)) + (c); \
state = r; \
r >>= (64 - lg_range);
\
r >>= (64 -
(
lg_range)
)
; \
} while (false)
#endif
/* JEMALLOC_H_TYPES */
...
...
deps/jemalloc/include/jemalloc/internal/prof.h
View file @
5268379e
...
...
@@ -3,8 +3,8 @@
typedef
struct
prof_bt_s
prof_bt_t
;
typedef
struct
prof_cnt_s
prof_cnt_t
;
typedef
struct
prof_t
hr_cnt
_s
prof_t
hr_cnt
_t
;
typedef
struct
prof_ctx_s
prof_ctx_t
;
typedef
struct
prof_t
ctx
_s
prof_t
ctx
_t
;
typedef
struct
prof_
g
ctx_s
prof_
g
ctx_t
;
typedef
struct
prof_tdata_s
prof_tdata_t
;
/* Option defaults. */
...
...
@@ -23,9 +23,6 @@ typedef struct prof_tdata_s prof_tdata_t;
*/
#define PROF_BT_MAX 128
/* Maximum number of backtraces to store in each per thread LRU cache. */
#define PROF_TCMAX 1024
/* Initial hash table size. */
#define PROF_CKH_MINITEMS 64
...
...
@@ -36,11 +33,17 @@ typedef struct prof_tdata_s prof_tdata_t;
#define PROF_PRINTF_BUFSIZE 128
/*
* Number of mutexes shared among all ctx's. No space is allocated for these
* Number of mutexes shared among all
g
ctx's. No space is allocated for these
* unless profiling is enabled, so it's okay to over-provision.
*/
#define PROF_NCTX_LOCKS 1024
/*
* Number of mutexes shared among all tdata's. No space is allocated for these
* unless profiling is enabled, so it's okay to over-provision.
*/
#define PROF_NTDATA_LOCKS 256
/*
* prof_tdata pointers close to NULL are used to encode state information that
* is used for cleaning up during thread shutdown.
...
...
@@ -63,141 +66,186 @@ struct prof_bt_s {
/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
typedef
struct
{
prof_bt_t
*
bt
;
unsigned
nignore
;
unsigned
max
;
}
prof_unwind_data_t
;
#endif
struct
prof_cnt_s
{
/*
* Profiling counters. An allocation/deallocation pair can operate on
* different prof_thr_cnt_t objects that are linked into the same
* prof_ctx_t cnts_ql, so it is possible for the cur* counters to go
* negative. In principle it is possible for the *bytes counters to
* overflow/underflow, but a general solution would require something
* like 128-bit counters; this implementation doesn't bother to solve
* that problem.
*/
int64_t
curobjs
;
int64_t
curbytes
;
/* Profiling counters. */
uint64_t
curobjs
;
uint64_t
curbytes
;
uint64_t
accumobjs
;
uint64_t
accumbytes
;
};
struct
prof_thr_cnt_s
{
/* Linkage into prof_ctx_t's cnts_ql. */
ql_elm
(
prof_thr_cnt_t
)
cnts_link
;
typedef
enum
{
prof_tctx_state_initializing
,
prof_tctx_state_nominal
,
prof_tctx_state_dumping
,
prof_tctx_state_purgatory
/* Dumper must finish destroying. */
}
prof_tctx_state_t
;
/* Linkage into thread's LRU. */
ql_elm
(
prof_thr_cnt_t
)
lru_link
;
struct
prof_tctx_s
{
/* Thread data for thread that performed the allocation. */
prof_tdata_t
*
tdata
;
/*
* Associated context. If a thread frees an object that it did not
* allocate, it is possible that the context is not cached in the
* thread's hash table, in which case it must be able to look up the
* context, insert a new prof_thr_cnt_t into the thread's hash table,
* and link it into the prof_ctx_t's cnts_ql.
* Copy of tdata->thr_{uid,discrim}, necessary because tdata may be
* defunct during teardown.
*/
prof_ctx_t
*
ctx
;
uint64_t
thr_uid
;
uint64_t
thr_discrim
;
/* Profiling counters, protected by tdata->lock. */
prof_cnt_t
cnts
;
/* Associated global context. */
prof_gctx_t
*
gctx
;
/*
* Threads use memory barriers to update the counters. Since there is
* only ever one writer, the only challenge is for the reader to get a
* consistent read of the counters.
*
* The writer uses this series of operations:
*
* 1) Increment epoch to an odd number.
* 2) Update counters.
* 3) Increment epoch to an even number.
*
* The reader must assure 1) that the epoch is even while it reads the
* counters, and 2) that the epoch doesn't change between the time it
* starts and finishes reading the counters.
* UID that distinguishes multiple tctx's created by the same thread,
* but coexisting in gctx->tctxs. There are two ways that such
* coexistence can occur:
* - A dumper thread can cause a tctx to be retained in the purgatory
* state.
* - Although a single "producer" thread must create all tctx's which
* share the same thr_uid, multiple "consumers" can each concurrently
* execute portions of prof_tctx_destroy(). prof_tctx_destroy() only
* gets called once each time cnts.cur{objs,bytes} drop to 0, but this
* threshold can be hit again before the first consumer finishes
* executing prof_tctx_destroy().
*/
u
nsigned
epoch
;
u
int64_t
tctx_uid
;
/* Profiling counters. */
prof_cnt_t
cnts
;
};
/* Linkage into gctx's tctxs. */
rb_node
(
prof_tctx_t
)
tctx_link
;
struct
prof_ctx_s
{
/* Associated backtrace. */
prof_bt_t
*
bt
;
/*
* True during prof_alloc_prep()..prof_malloc_sample_object(), prevents
* sample vs destroy race.
*/
bool
prepared
;
/* Current dump-related state, protected by gctx->lock. */
prof_tctx_state_t
state
;
/*
* Copy of cnts snapshotted during early dump phase, protected by
* dump_mtx.
*/
prof_cnt_t
dump_cnts
;
};
typedef
rb_tree
(
prof_tctx_t
)
prof_tctx_tree_t
;
/* Protects nlimbo, cnt_merged, and cnts_ql. */
struct
prof_gctx_s
{
/* Protects nlimbo, cnt_summed, and tctxs. */
malloc_mutex_t
*
lock
;
/*
* Number of threads that currently cause this ctx to be in a state of
* Number of threads that currently cause this
g
ctx to be in a state of
* limbo due to one of:
* - Initializing per thread counters associated with this ctx.
* - Preparing to destroy this ctx.
* - Dumping a heap profile that includes this ctx.
* - Initializing this gctx.
* - Initializing per thread counters associated with this gctx.
* - Preparing to destroy this gctx.
* - Dumping a heap profile that includes this gctx.
* nlimbo must be 1 (single destroyer) in order to safely destroy the
* ctx.
*
g
ctx.
*/
unsigned
nlimbo
;
/* Temporary storage for summation during dump. */
prof_cnt_t
cnt_summed
;
/* When threads exit, they merge their stats into cnt_merged. */
prof_cnt_t
cnt_merged
;
/*
*
List
of profile counters, one for each thread that has allocated in
*
Tree
of profile counters, one for each thread that has allocated in
* this context.
*/
ql_head
(
prof_thr_cnt_t
)
cnts_ql
;
prof_tctx_tree_t
tctxs
;
/* Linkage for tree of contexts to be dumped. */
rb_node
(
prof_gctx_t
)
dump_link
;
/* Temporary storage for summation during dump. */
prof_cnt_t
cnt_summed
;
/* Associated backtrace. */
prof_bt_t
bt
;
/*
Linkage for list of contexts to be dumped
. */
ql_elm
(
prof_ctx_t
)
dump_link
;
/*
Backtrace vector, variable size, referred to by bt
. */
void
*
vec
[
1
]
;
};
typedef
ql_head
(
prof_ctx_t
)
prof_ctx_
list
_t
;
typedef
rb_tree
(
prof_
g
ctx_t
)
prof_
g
ctx_
tree
_t
;
struct
prof_tdata_s
{
malloc_mutex_t
*
lock
;
/* Monotonically increasing unique thread identifier. */
uint64_t
thr_uid
;
/*
* Hash of (prof_bt_t *)-->(prof_thr_cnt_t *). Each thread keeps a
* cache of backtraces, with associated thread-specific prof_thr_cnt_t
* objects. Other threads may read the prof_thr_cnt_t contents, but no
* others will ever write them.
*
* Upon thread exit, the thread must merge all the prof_thr_cnt_t
* counter data into the associated prof_ctx_t objects, and unlink/free
* the prof_thr_cnt_t objects.
* Monotonically increasing discriminator among tdata structures
* associated with the same thr_uid.
*/
ckh_t
bt2cnt
;
uint64_t
thr_discrim
;
/*
LRU for content
s
o
f
bt2cnt
. */
ql_head
(
prof_thr_cnt_t
)
lru_ql
;
/*
Included in heap profile dump
s
i
f
non-NULL
. */
char
*
thread_name
;
/* Backtrace vector, used for calls to prof_backtrace(). */
void
**
vec
;
bool
attached
;
bool
expired
;
rb_node
(
prof_tdata_t
)
tdata_link
;
/*
* Counter used to initialize prof_tctx_t's tctx_uid. No locking is
* necessary when incrementing this field, because only one thread ever
* does so.
*/
uint64_t
tctx_uid_next
;
/*
* Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks
* backtraces for which it has non-zero allocation/deallocation counters
* associated with thread-specific prof_tctx_t objects. Other threads
* may write to prof_tctx_t contents when freeing associated objects.
*/
ckh_t
bt2tctx
;
/* Sampling state. */
uint64_t
prng_state
;
uint64_t
threshold
;
uint64_t
accum
;
uint64_t
bytes_until_sample
;
/* State used to avoid dumping while operating on prof internals. */
bool
enq
;
bool
enq_idump
;
bool
enq_gdump
;
/*
* Set to true during an early dump phase for tdata's which are
* currently being dumped. New threads' tdata's have this initialized
* to false so that they aren't accidentally included in later dump
* phases.
*/
bool
dumping
;
/*
* True if profiling is active for this tdata's thread
* (thread.prof.active mallctl).
*/
bool
active
;
/* Temporary storage for summation during dump. */
prof_cnt_t
cnt_summed
;
/* Backtrace vector, used for calls to prof_backtrace(). */
void
*
vec
[
PROF_BT_MAX
];
};
typedef
rb_tree
(
prof_tdata_t
)
prof_tdata_tree_t
;
#endif
/* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern
bool
opt_prof
;
/*
* Even if opt_prof is true, sampling can be temporarily disabled by setting
* opt_prof_active to false. No locking is used when updating opt_prof_active,
* so there are no guarantees regarding how long it will take for all threads
* to notice state changes.
*/
extern
bool
opt_prof_active
;
extern
bool
opt_prof_thread_active_init
;
extern
size_t
opt_lg_prof_sample
;
/* Mean bytes between samples. */
extern
ssize_t
opt_lg_prof_interval
;
/* lg(prof_interval). */
extern
bool
opt_prof_gdump
;
/* High-water memory dumping. */
...
...
@@ -211,6 +259,12 @@ extern char opt_prof_prefix[
#endif
1
];
/* Accessed via prof_active_[gs]et{_unlocked,}(). */
extern
bool
prof_active
;
/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */
extern
bool
prof_gdump_val
;
/*
* Profile dump interval, measured in bytes allocated. Each arena triggers a
* profile dump when it reaches this threshold. The effect is that the
...
...
@@ -221,391 +275,269 @@ extern char opt_prof_prefix[
extern
uint64_t
prof_interval
;
/*
* I
f true, promote small sampled objects to large objects, since small run
*
headers do not have embedded profile context pointer
s.
* I
nitialized as opt_lg_prof_sample, and potentially modified during profiling
*
reset
s.
*/
extern
bool
prof_promot
e
;
extern
size_t
lg_prof_sampl
e
;
void
prof_alloc_rollback
(
tsd_t
*
tsd
,
prof_tctx_t
*
tctx
,
bool
updated
);
void
prof_malloc_sample_object
(
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
);
void
prof_free_sampled_object
(
tsd_t
*
tsd
,
size_t
usize
,
prof_tctx_t
*
tctx
);
void
bt_init
(
prof_bt_t
*
bt
,
void
**
vec
);
void
prof_backtrace
(
prof_bt_t
*
bt
,
unsigned
nignore
);
prof_t
hr_cnt
_t
*
prof_lookup
(
prof_bt_t
*
bt
);
void
prof_backtrace
(
prof_bt_t
*
bt
);
prof_t
ctx
_t
*
prof_lookup
(
tsd_t
*
tsd
,
prof_bt_t
*
bt
);
#ifdef JEMALLOC_JET
size_t
prof_tdata_count
(
void
);
size_t
prof_bt_count
(
void
);
const
prof_cnt_t
*
prof_cnt_all
(
void
);
typedef
int
(
prof_dump_open_t
)(
bool
,
const
char
*
);
extern
prof_dump_open_t
*
prof_dump_open
;
typedef
bool
(
prof_dump_header_t
)(
bool
,
const
prof_cnt_t
*
);
extern
prof_dump_header_t
*
prof_dump_header
;
#endif
void
prof_idump
(
void
);
bool
prof_mdump
(
const
char
*
filename
);
void
prof_gdump
(
void
);
prof_tdata_t
*
prof_tdata_init
(
void
);
void
prof_tdata_cleanup
(
void
*
arg
);
prof_tdata_t
*
prof_tdata_init
(
tsd_t
*
tsd
);
prof_tdata_t
*
prof_tdata_reinit
(
tsd_t
*
tsd
,
prof_tdata_t
*
tdata
);
void
prof_reset
(
tsd_t
*
tsd
,
size_t
lg_sample
);
void
prof_tdata_cleanup
(
tsd_t
*
tsd
);
const
char
*
prof_thread_name_get
(
void
);
bool
prof_active_get
(
void
);
bool
prof_active_set
(
bool
active
);
int
prof_thread_name_set
(
tsd_t
*
tsd
,
const
char
*
thread_name
);
bool
prof_thread_active_get
(
void
);
bool
prof_thread_active_set
(
bool
active
);
bool
prof_thread_active_init_get
(
void
);
bool
prof_thread_active_init_set
(
bool
active_init
);
bool
prof_gdump_get
(
void
);
bool
prof_gdump_set
(
bool
active
);
void
prof_boot0
(
void
);
void
prof_boot1
(
void
);
bool
prof_boot2
(
void
);
void
prof_prefork
(
void
);
void
prof_postfork_parent
(
void
);
void
prof_postfork_child
(
void
);
void
prof_sample_threshold_update
(
prof_tdata_t
*
tdata
);
#endif
/* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#define PROF_ALLOC_PREP(nignore, size, ret) do { \
prof_tdata_t *prof_tdata; \
prof_bt_t bt; \
\
assert(size == s2u(size)); \
\
prof_tdata = prof_tdata_get(true); \
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) { \
if (prof_tdata != NULL) \
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
else \
ret = NULL; \
break; \
} \
\
if (opt_prof_active == false) { \
/* Sampling is currently inactive, so avoid sampling. */
\
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
} else if (opt_lg_prof_sample == 0) { \
/* Don't bother with sampling logic, since sampling */
\
/* interval is 1. */
\
bt_init(&bt, prof_tdata->vec); \
prof_backtrace(&bt, nignore); \
ret = prof_lookup(&bt); \
} else { \
if (prof_tdata->threshold == 0) { \
/* Initialize. Seed the prng differently for */
\
/* each thread. */
\
prof_tdata->prng_state = \
(uint64_t)(uintptr_t)&size; \
prof_sample_threshold_update(prof_tdata); \
} \
\
/* Determine whether to capture a backtrace based on */
\
/* whether size is enough for prof_accum to reach */
\
/* prof_tdata->threshold. However, delay updating */
\
/* these variables until prof_{m,re}alloc(), because */
\
/* we don't know for sure that the allocation will */
\
/* succeed. */
\
/* */
\
/* Use subtraction rather than addition to avoid */
\
/* potential integer overflow. */
\
if (size >= prof_tdata->threshold - \
prof_tdata->accum) { \
bt_init(&bt, prof_tdata->vec); \
prof_backtrace(&bt, nignore); \
ret = prof_lookup(&bt); \
} else \
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
} \
} while (0)
#ifndef JEMALLOC_ENABLE_INLINE
malloc_tsd_protos
(
JEMALLOC_ATTR
(
unused
),
prof_tdata
,
prof_tdata_t
*
)
prof_tdata_t
*
prof_tdata_get
(
bool
create
);
void
prof_sample_threshold_update
(
prof_tdata_t
*
prof_tdata
);
prof_ctx_t
*
prof_ctx_get
(
const
void
*
ptr
);
void
prof_ctx_set
(
const
void
*
ptr
,
size_t
usize
,
prof_ctx_t
*
ctx
);
bool
prof_sample_accum_update
(
size_t
size
);
void
prof_malloc
(
const
void
*
ptr
,
size_t
usize
,
prof_thr_cnt_t
*
cnt
);
void
prof_realloc
(
const
void
*
ptr
,
size_t
usize
,
prof_thr_cnt_t
*
cnt
,
size_t
old_usize
,
prof_ctx_t
*
old_ctx
);
void
prof_free
(
const
void
*
ptr
,
size_t
size
);
bool
prof_active_get_unlocked
(
void
);
bool
prof_gdump_get_unlocked
(
void
);
prof_tdata_t
*
prof_tdata_get
(
tsd_t
*
tsd
,
bool
create
);
bool
prof_sample_accum_update
(
tsd_t
*
tsd
,
size_t
usize
,
bool
commit
,
prof_tdata_t
**
tdata_out
);
prof_tctx_t
*
prof_alloc_prep
(
tsd_t
*
tsd
,
size_t
usize
,
bool
prof_active
,
bool
update
);
prof_tctx_t
*
prof_tctx_get
(
const
void
*
ptr
);
void
prof_tctx_set
(
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
);
void
prof_tctx_reset
(
const
void
*
ptr
,
size_t
usize
,
const
void
*
old_ptr
,
prof_tctx_t
*
tctx
);
void
prof_malloc_sample_object
(
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
);
void
prof_malloc
(
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
);
void
prof_realloc
(
tsd_t
*
tsd
,
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
,
bool
prof_active
,
bool
updated
,
const
void
*
old_ptr
,
size_t
old_usize
,
prof_tctx_t
*
old_tctx
);
void
prof_free
(
tsd_t
*
tsd
,
const
void
*
ptr
,
size_t
usize
);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_))
/* Thread-specific backtrace cache, used to reduce bt2ctx contention. */
malloc_tsd_externs
(
prof_tdata
,
prof_tdata_t
*
)
malloc_tsd_funcs
(
JEMALLOC_INLINE
,
prof_tdata
,
prof_tdata_t
*
,
NULL
,
prof_tdata_cleanup
)
JEMALLOC_ALWAYS_INLINE
bool
prof_active_get_unlocked
(
void
)
{
/*
* Even if opt_prof is true, sampling can be temporarily disabled by
* setting prof_active to false. No locking is used when reading
* prof_active in the fast path, so there are no guarantees regarding
* how long it will take for all threads to notice state changes.
*/
return
(
prof_active
);
}
JEMALLOC_
INLINE
prof_tdata_t
*
prof_
tdata_get
(
bool
create
)
JEMALLOC_
ALWAYS_INLINE
bool
prof_
gdump_get_unlocked
(
void
)
{
prof_tdata_t
*
prof_tdata
;
/*
* No locking is used when reading prof_gdump_val in the fast path, so
* there are no guarantees regarding how long it will take for all
* threads to notice state changes.
*/
return
(
prof_gdump_val
);
}
JEMALLOC_ALWAYS_INLINE
prof_tdata_t
*
prof_tdata_get
(
tsd_t
*
tsd
,
bool
create
)
{
prof_tdata_t
*
tdata
;
cassert
(
config_prof
);
prof_tdata
=
*
prof_tdata_tsd_get
();
if
(
create
&&
prof_tdata
==
NULL
)
prof_tdata
=
prof_tdata_init
();
tdata
=
tsd_prof_tdata_get
(
tsd
);
if
(
create
)
{
if
(
unlikely
(
tdata
==
NULL
))
{
if
(
tsd_nominal
(
tsd
))
{
tdata
=
prof_tdata_init
(
tsd
);
tsd_prof_tdata_set
(
tsd
,
tdata
);
}
}
else
if
(
unlikely
(
tdata
->
expired
))
{
tdata
=
prof_tdata_reinit
(
tsd
,
tdata
);
tsd_prof_tdata_set
(
tsd
,
tdata
);
}
assert
(
tdata
==
NULL
||
tdata
->
attached
);
}
return
(
prof_
tdata
);
return
(
tdata
);
}
JEMALLOC_
INLINE
void
prof_
sample_threshold_update
(
prof_tdata_t
*
prof_tdata
)
JEMALLOC_
ALWAYS_INLINE
prof_tctx_t
*
prof_
tctx_get
(
const
void
*
ptr
)
{
/*
* The body of this function is compiled out unless heap profiling is
* enabled, so that it is possible to compile jemalloc with floating
* point support completely disabled. Avoiding floating point code is
* important on memory-constrained systems, but it also enables a
* workaround for versions of glibc that don't properly save/restore
* floating point registers during dynamic lazy symbol loading (which
* internally calls into whatever malloc implementation happens to be
* integrated into the application). Note that some compilers (e.g.
* gcc 4.8) may use floating point registers for fast memory moves, so
* jemalloc must be compiled with such optimizations disabled (e.g.
* -mno-sse) in order for the workaround to be complete.
*/
#ifdef JEMALLOC_PROF
uint64_t
r
;
double
u
;
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
/*
* Compute sample threshold as a geometrically distributed random
* variable with mean (2^opt_lg_prof_sample).
*
* __ __
* | log(u) | 1
* prof_tdata->threshold = | -------- |, where p = -------------------
* | log(1-p) | opt_lg_prof_sample
* 2
*
* For more information on the math, see:
*
* Non-Uniform Random Variate Generation
* Luc Devroye
* Springer-Verlag, New York, 1986
* pp 500
* (http://luc.devroye.org/rnbookindex.html)
*/
prng64
(
r
,
53
,
prof_tdata
->
prng_state
,
UINT64_C
(
6364136223846793005
),
UINT64_C
(
1442695040888963407
));
u
=
(
double
)
r
*
(
1
.
0
/
9007199254740992
.
0L
);
prof_tdata
->
threshold
=
(
uint64_t
)(
log
(
u
)
/
log
(
1
.
0
-
(
1
.
0
/
(
double
)((
uint64_t
)
1U
<<
opt_lg_prof_sample
))))
+
(
uint64_t
)
1U
;
#endif
return
(
arena_prof_tctx_get
(
ptr
));
}
JEMALLOC_
INLINE
prof_ctx_t
*
prof_ctx_
g
et
(
const
void
*
ptr
)
JEMALLOC_
ALWAYS_INLINE
void
prof_
t
ctx_
s
et
(
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
)
{
prof_ctx_t
*
ret
;
arena_chunk_t
*
chunk
;
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
ptr
);
if
(
chunk
!=
ptr
)
{
/* Region. */
ret
=
arena_prof_ctx_get
(
ptr
);
}
else
ret
=
huge_prof_ctx_get
(
ptr
);
return
(
ret
);
arena_prof_tctx_set
(
ptr
,
usize
,
tctx
);
}
JEMALLOC_INLINE
void
prof_ctx_set
(
const
void
*
ptr
,
size_t
usize
,
prof_ctx_t
*
ctx
)
JEMALLOC_ALWAYS_INLINE
void
prof_tctx_reset
(
const
void
*
ptr
,
size_t
usize
,
const
void
*
old_ptr
,
prof_tctx_t
*
old_tctx
)
{
arena_chunk_t
*
chunk
;
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
ptr
);
if
(
chunk
!=
ptr
)
{
/* Region. */
arena_prof_ctx_set
(
ptr
,
usize
,
ctx
);
}
else
huge_prof_ctx_set
(
ptr
,
ctx
);
arena_prof_tctx_reset
(
ptr
,
usize
,
old_ptr
,
old_tctx
);
}
JEMALLOC_INLINE
bool
prof_sample_accum_update
(
size_t
size
)
JEMALLOC_ALWAYS_INLINE
bool
prof_sample_accum_update
(
tsd_t
*
tsd
,
size_t
usize
,
bool
update
,
prof_tdata_t
**
tdata_out
)
{
prof_tdata_t
*
prof_
tdata
;
prof_tdata_t
*
tdata
;
cassert
(
config_prof
);
/* Sampling logic is unnecessary if the interval is 1. */
assert
(
opt_lg_prof_sample
!=
0
);
prof_tdata
=
prof_tdata_get
(
false
);
if
((
uintptr_t
)
prof_tdata
<=
(
uintptr_t
)
PROF_TDATA_STATE_MAX
)
tdata
=
prof_tdata_get
(
tsd
,
true
);
if
((
uintptr_t
)
tdata
<=
(
uintptr_t
)
PROF_TDATA_STATE_MAX
)
tdata
=
NULL
;
if
(
tdata_out
!=
NULL
)
*
tdata_out
=
tdata
;
if
(
tdata
==
NULL
)
return
(
true
);
/* Take care to avoid integer overflow. */
if
(
size
>=
prof_tdata
->
threshold
-
prof_tdata
->
accum
)
{
prof_tdata
->
accum
-=
(
prof_tdata
->
threshold
-
size
);
if
(
tdata
->
bytes_until_sample
>=
usize
)
{
if
(
update
)
tdata
->
bytes_until_sample
-=
usize
;
return
(
true
);
}
else
{
/* Compute new sample threshold. */
prof_sample_threshold_update
(
prof_tdata
);
while
(
prof_tdata
->
accum
>=
prof_tdata
->
threshold
)
{
prof_tdata
->
accum
-=
prof_tdata
->
threshold
;
prof_sample_threshold_update
(
prof_tdata
);
if
(
update
)
prof_sample_threshold_update
(
tdata
);
return
(
!
tdata
->
active
);
}
return
(
false
);
}
else
{
prof_tdata
->
accum
+=
size
;
return
(
true
);
}
JEMALLOC_ALWAYS_INLINE
prof_tctx_t
*
prof_alloc_prep
(
tsd_t
*
tsd
,
size_t
usize
,
bool
prof_active
,
bool
update
)
{
prof_tctx_t
*
ret
;
prof_tdata_t
*
tdata
;
prof_bt_t
bt
;
assert
(
usize
==
s2u
(
usize
));
if
(
!
prof_active
||
likely
(
prof_sample_accum_update
(
tsd
,
usize
,
update
,
&
tdata
)))
ret
=
(
prof_tctx_t
*
)(
uintptr_t
)
1U
;
else
{
bt_init
(
&
bt
,
tdata
->
vec
);
prof_backtrace
(
&
bt
);
ret
=
prof_lookup
(
tsd
,
&
bt
);
}
return
(
ret
);
}
JEMALLOC_INLINE
void
prof_malloc
(
const
void
*
ptr
,
size_t
usize
,
prof_t
hr_cnt_t
*
cnt
)
JEMALLOC_
ALWAYS_
INLINE
void
prof_malloc
(
const
void
*
ptr
,
size_t
usize
,
prof_t
ctx_t
*
tctx
)
{
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
);
assert
(
usize
==
isalloc
(
ptr
,
true
));
if
(
opt_lg_prof_sample
!=
0
)
{
if
(
prof_sample_accum_update
(
usize
))
{
/*
* Don't sample. For malloc()-like allocation, it is
* always possible to tell in advance how large an
* object's usable size will be, so there should never
* be a difference between the usize passed to
* PROF_ALLOC_PREP() and prof_malloc().
*/
assert
((
uintptr_t
)
cnt
==
(
uintptr_t
)
1U
);
}
}
if
((
uintptr_t
)
cnt
>
(
uintptr_t
)
1U
)
{
prof_ctx_set
(
ptr
,
usize
,
cnt
->
ctx
);
cnt
->
epoch
++
;
/*********/
mb_write
();
/*********/
cnt
->
cnts
.
curobjs
++
;
cnt
->
cnts
.
curbytes
+=
usize
;
if
(
opt_prof_accum
)
{
cnt
->
cnts
.
accumobjs
++
;
cnt
->
cnts
.
accumbytes
+=
usize
;
}
/*********/
mb_write
();
/*********/
cnt
->
epoch
++
;
/*********/
mb_write
();
/*********/
}
else
prof_ctx_set
(
ptr
,
usize
,
(
prof_ctx_t
*
)(
uintptr_t
)
1U
);
if
(
unlikely
((
uintptr_t
)
tctx
>
(
uintptr_t
)
1U
))
prof_malloc_sample_object
(
ptr
,
usize
,
tctx
);
else
prof_tctx_set
(
ptr
,
usize
,
(
prof_tctx_t
*
)(
uintptr_t
)
1U
);
}
JEMALLOC_INLINE
void
prof_realloc
(
const
void
*
ptr
,
size_t
usize
,
prof_thr_cnt_t
*
cnt
,
size_t
old_usize
,
prof_ctx_t
*
old_ctx
)
JEMALLOC_ALWAYS_INLINE
void
prof_realloc
(
tsd_t
*
tsd
,
const
void
*
ptr
,
size_t
usize
,
prof_tctx_t
*
tctx
,
bool
prof_active
,
bool
updated
,
const
void
*
old_ptr
,
size_t
old_usize
,
prof_tctx_t
*
old_tctx
)
{
prof_thr_cnt_t
*
told_cnt
;
bool
sampled
,
old_sampled
;
cassert
(
config_prof
);
assert
(
ptr
!=
NULL
||
(
uintptr_t
)
cnt
<=
(
uintptr_t
)
1U
);
assert
(
ptr
!=
NULL
||
(
uintptr_t
)
tctx
<=
(
uintptr_t
)
1U
);
if
(
ptr
!=
NULL
)
{
if
(
prof_active
&&
!
updated
&&
ptr
!=
NULL
)
{
assert
(
usize
==
isalloc
(
ptr
,
true
));
if
(
opt_lg_prof_sample
!=
0
)
{
if
(
prof_sample_accum_update
(
usize
))
{
if
(
prof_sample_accum_update
(
tsd
,
usize
,
true
,
NULL
))
{
/*
* Don't sample. The usize passed to
* PROF_ALLOC_PREP() was larger than what
* actually got allocated, so a backtrace was
* captured for this allocation, even though
* its actual usize was insufficient to cross
* the sample threshold.
* Don't sample. The usize passed to prof_alloc_prep()
* was larger than what actually got allocated, so a
* backtrace was captured for this allocation, even
* though its actual usize was insufficient to cross the
* sample threshold.
*/
cnt
=
(
prof_thr_cnt_t
*
)(
uintptr_t
)
1U
;
}
tctx
=
(
prof_tctx_t
*
)(
uintptr_t
)
1U
;
}
}
if
((
uintptr_t
)
old_ctx
>
(
uintptr_t
)
1U
)
{
told_cnt
=
prof_lookup
(
old_ctx
->
bt
);
if
(
told_cnt
==
NULL
)
{
/*
* It's too late to propagate OOM for this realloc(),
* so operate directly on old_cnt->ctx->cnt_merged.
*/
malloc_mutex_lock
(
old_ctx
->
lock
);
old_ctx
->
cnt_merged
.
curobjs
--
;
old_ctx
->
cnt_merged
.
curbytes
-=
old_usize
;
malloc_mutex_unlock
(
old_ctx
->
lock
);
told_cnt
=
(
prof_thr_cnt_t
*
)(
uintptr_t
)
1U
;
}
}
else
told_cnt
=
(
prof_thr_cnt_t
*
)(
uintptr_t
)
1U
;
if
((
uintptr_t
)
told_cnt
>
(
uintptr_t
)
1U
)
told_cnt
->
epoch
++
;
if
((
uintptr_t
)
cnt
>
(
uintptr_t
)
1U
)
{
prof_ctx_set
(
ptr
,
usize
,
cnt
->
ctx
);
cnt
->
epoch
++
;
}
else
if
(
ptr
!=
NULL
)
prof_ctx_set
(
ptr
,
usize
,
(
prof_ctx_t
*
)(
uintptr_t
)
1U
);
/*********/
mb_write
();
/*********/
if
((
uintptr_t
)
told_cnt
>
(
uintptr_t
)
1U
)
{
told_cnt
->
cnts
.
curobjs
--
;
told_cnt
->
cnts
.
curbytes
-=
old_usize
;
}
if
((
uintptr_t
)
cnt
>
(
uintptr_t
)
1U
)
{
cnt
->
cnts
.
curobjs
++
;
cnt
->
cnts
.
curbytes
+=
usize
;
if
(
opt_prof_accum
)
{
cnt
->
cnts
.
accumobjs
++
;
cnt
->
cnts
.
accumbytes
+=
usize
;
}
}
/*********/
mb_write
();
/*********/
if
((
uintptr_t
)
told_cnt
>
(
uintptr_t
)
1U
)
told_cnt
->
epoch
++
;
if
((
uintptr_t
)
cnt
>
(
uintptr_t
)
1U
)
cnt
->
epoch
++
;
/*********/
mb_write
();
/* Not strictly necessary. */
sampled
=
((
uintptr_t
)
tctx
>
(
uintptr_t
)
1U
);
old_sampled
=
((
uintptr_t
)
old_tctx
>
(
uintptr_t
)
1U
);
if
(
unlikely
(
sampled
))
prof_malloc_sample_object
(
ptr
,
usize
,
tctx
);
else
prof_tctx_reset
(
ptr
,
usize
,
old_ptr
,
old_tctx
);
if
(
unlikely
(
old_sampled
))
prof_free_sampled_object
(
tsd
,
old_usize
,
old_tctx
);
}
JEMALLOC_INLINE
void
prof_free
(
const
void
*
ptr
,
size_t
size
)
JEMALLOC_
ALWAYS_
INLINE
void
prof_free
(
tsd_t
*
tsd
,
const
void
*
ptr
,
size_t
u
size
)
{
prof_ctx_t
*
ctx
=
prof_ctx_get
(
ptr
);
prof_
t
ctx_t
*
t
ctx
=
prof_
t
ctx_get
(
ptr
);
cassert
(
config_prof
);
assert
(
usize
==
isalloc
(
ptr
,
true
));
if
((
uintptr_t
)
ctx
>
(
uintptr_t
)
1
)
{
prof_thr_cnt_t
*
tcnt
;
assert
(
size
==
isalloc
(
ptr
,
true
));
tcnt
=
prof_lookup
(
ctx
->
bt
);
if
(
tcnt
!=
NULL
)
{
tcnt
->
epoch
++
;
/*********/
mb_write
();
/*********/
tcnt
->
cnts
.
curobjs
--
;
tcnt
->
cnts
.
curbytes
-=
size
;
/*********/
mb_write
();
/*********/
tcnt
->
epoch
++
;
/*********/
mb_write
();
/*********/
}
else
{
/*
* OOM during free() cannot be propagated, so operate
* directly on cnt->ctx->cnt_merged.
*/
malloc_mutex_lock
(
ctx
->
lock
);
ctx
->
cnt_merged
.
curobjs
--
;
ctx
->
cnt_merged
.
curbytes
-=
size
;
malloc_mutex_unlock
(
ctx
->
lock
);
}
}
if
(
unlikely
((
uintptr_t
)
tctx
>
(
uintptr_t
)
1U
))
prof_free_sampled_object
(
tsd
,
usize
,
tctx
);
}
#endif
...
...
deps/jemalloc/include/jemalloc/internal/ql.h
View file @
5268379e
/*
* List definitions.
*/
/* List definitions. */
#define ql_head(a_type) \
struct { \
a_type *qlh_first; \
...
...
deps/jemalloc/include/jemalloc/internal/qr.h
View file @
5268379e
...
...
@@ -40,8 +40,10 @@ struct { \
(a_qr_b)->a_field.qre_prev = t; \
} while (0)
/* qr_meld() and qr_split() are functionally equivalent, so there's no need to
* have two copies of the code. */
/*
* qr_meld() and qr_split() are functionally equivalent, so there's no need to
* have two copies of the code.
*/
#define qr_split(a_qr_a, a_qr_b, a_field) \
qr_meld((a_qr_a), (a_qr_b), a_field)
...
...
Prev
1
2
3
4
5
6
7
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment