Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
b85cb4ce
Commit
b85cb4ce
authored
Nov 28, 2012
by
antirez
Browse files
Jemalloc updated to version 3.2.0.
parent
3756e141
Changes
40
Expand all
Hide whitespace changes
Inline
Side-by-side
deps/jemalloc/include/jemalloc/jemalloc.h.in
View file @
b85cb4ce
...
@@ -25,6 +25,8 @@ extern "C" {
...
@@ -25,6 +25,8 @@ extern "C" {
#endif
#endif
#define ALLOCM_ZERO ((int)0x40)
#define ALLOCM_ZERO ((int)0x40)
#define ALLOCM_NO_MOVE ((int)0x80)
#define ALLOCM_NO_MOVE ((int)0x80)
/* Bias arena index bits so that 0 encodes "ALLOCM_ARENA() unspecified". */
#define ALLOCM_ARENA(a) ((int)(((a)+1) << 8))
#define ALLOCM_SUCCESS 0
#define ALLOCM_SUCCESS 0
#define ALLOCM_ERR_OOM 1
#define ALLOCM_ERR_OOM 1
...
@@ -59,7 +61,8 @@ JEMALLOC_EXPORT void * je_memalign(size_t alignment, size_t size)
...
@@ -59,7 +61,8 @@ JEMALLOC_EXPORT void * je_memalign(size_t alignment, size_t size)
JEMALLOC_EXPORT void * je_valloc(size_t size) JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT void * je_valloc(size_t size) JEMALLOC_ATTR(malloc);
#endif
#endif
JEMALLOC_EXPORT size_t je_malloc_usable_size(const void *ptr);
JEMALLOC_EXPORT size_t je_malloc_usable_size(
JEMALLOC_USABLE_SIZE_CONST void *ptr);
JEMALLOC_EXPORT void je_malloc_stats_print(void (*write_cb)(void *,
JEMALLOC_EXPORT void je_malloc_stats_print(void (*write_cb)(void *,
const char *), void *je_cbopaque, const char *opts);
const char *), void *je_cbopaque, const char *opts);
JEMALLOC_EXPORT int je_mallctl(const char *name, void *oldp,
JEMALLOC_EXPORT int je_mallctl(const char *name, void *oldp,
...
...
deps/jemalloc/include/jemalloc/jemalloc_defs.h.in
View file @
b85cb4ce
...
@@ -221,6 +221,15 @@
...
@@ -221,6 +221,15 @@
#undef JEMALLOC_OVERRIDE_MEMALIGN
#undef JEMALLOC_OVERRIDE_MEMALIGN
#undef JEMALLOC_OVERRIDE_VALLOC
#undef JEMALLOC_OVERRIDE_VALLOC
/*
* At least Linux omits the "const" in:
*
* size_t malloc_usable_size(const void *ptr);
*
* Match the operating system's prototype.
*/
#undef JEMALLOC_USABLE_SIZE_CONST
/*
/*
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
*/
*/
...
...
deps/jemalloc/src/arena.c
View file @
b85cb4ce
This diff is collapsed.
Click to expand it.
deps/jemalloc/src/base.c
View file @
b85cb4ce
...
@@ -32,7 +32,8 @@ base_pages_alloc(size_t minsize)
...
@@ -32,7 +32,8 @@ base_pages_alloc(size_t minsize)
assert
(
minsize
!=
0
);
assert
(
minsize
!=
0
);
csize
=
CHUNK_CEILING
(
minsize
);
csize
=
CHUNK_CEILING
(
minsize
);
zero
=
false
;
zero
=
false
;
base_pages
=
chunk_alloc
(
csize
,
chunksize
,
true
,
&
zero
);
base_pages
=
chunk_alloc
(
csize
,
chunksize
,
true
,
&
zero
,
chunk_dss_prec_get
());
if
(
base_pages
==
NULL
)
if
(
base_pages
==
NULL
)
return
(
true
);
return
(
true
);
base_next_addr
=
base_pages
;
base_next_addr
=
base_pages
;
...
...
deps/jemalloc/src/chunk.c
View file @
b85cb4ce
...
@@ -4,7 +4,8 @@
...
@@ -4,7 +4,8 @@
/******************************************************************************/
/******************************************************************************/
/* Data. */
/* Data. */
size_t
opt_lg_chunk
=
LG_CHUNK_DEFAULT
;
const
char
*
opt_dss
=
DSS_DEFAULT
;
size_t
opt_lg_chunk
=
LG_CHUNK_DEFAULT
;
malloc_mutex_t
chunks_mtx
;
malloc_mutex_t
chunks_mtx
;
chunk_stats_t
stats_chunks
;
chunk_stats_t
stats_chunks
;
...
@@ -15,8 +16,10 @@ chunk_stats_t stats_chunks;
...
@@ -15,8 +16,10 @@ chunk_stats_t stats_chunks;
* address space. Depending on function, different tree orderings are needed,
* address space. Depending on function, different tree orderings are needed,
* which is why there are two trees with the same contents.
* which is why there are two trees with the same contents.
*/
*/
static
extent_tree_t
chunks_szad
;
static
extent_tree_t
chunks_szad_mmap
;
static
extent_tree_t
chunks_ad
;
static
extent_tree_t
chunks_ad_mmap
;
static
extent_tree_t
chunks_szad_dss
;
static
extent_tree_t
chunks_ad_dss
;
rtree_t
*
chunks_rtree
;
rtree_t
*
chunks_rtree
;
...
@@ -30,19 +33,23 @@ size_t arena_maxclass; /* Max size class for arenas. */
...
@@ -30,19 +33,23 @@ size_t arena_maxclass; /* Max size class for arenas. */
/******************************************************************************/
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
/* Function prototypes for non-inline static functions. */
static
void
*
chunk_recycle
(
size_t
size
,
size_t
alignment
,
bool
base
,
static
void
*
chunk_recycle
(
extent_tree_t
*
chunks_szad
,
extent_tree_t
*
chunks_ad
,
size_t
size
,
size_t
alignment
,
bool
base
,
bool
*
zero
);
bool
*
zero
);
static
void
chunk_record
(
void
*
chunk
,
size_t
size
);
static
void
chunk_record
(
extent_tree_t
*
chunks_szad
,
extent_tree_t
*
chunks_ad
,
void
*
chunk
,
size_t
size
);
/******************************************************************************/
/******************************************************************************/
static
void
*
static
void
*
chunk_recycle
(
size_t
size
,
size_t
alignment
,
bool
base
,
bool
*
zero
)
chunk_recycle
(
extent_tree_t
*
chunks_szad
,
extent_tree_t
*
chunks_ad
,
size_t
size
,
size_t
alignment
,
bool
base
,
bool
*
zero
)
{
{
void
*
ret
;
void
*
ret
;
extent_node_t
*
node
;
extent_node_t
*
node
;
extent_node_t
key
;
extent_node_t
key
;
size_t
alloc_size
,
leadsize
,
trailsize
;
size_t
alloc_size
,
leadsize
,
trailsize
;
bool
zeroed
;
if
(
base
)
{
if
(
base
)
{
/*
/*
...
@@ -61,7 +68,7 @@ chunk_recycle(size_t size, size_t alignment, bool base, bool *zero)
...
@@ -61,7 +68,7 @@ chunk_recycle(size_t size, size_t alignment, bool base, bool *zero)
key
.
addr
=
NULL
;
key
.
addr
=
NULL
;
key
.
size
=
alloc_size
;
key
.
size
=
alloc_size
;
malloc_mutex_lock
(
&
chunks_mtx
);
malloc_mutex_lock
(
&
chunks_mtx
);
node
=
extent_tree_szad_nsearch
(
&
chunks_szad
,
&
key
);
node
=
extent_tree_szad_nsearch
(
chunks_szad
,
&
key
);
if
(
node
==
NULL
)
{
if
(
node
==
NULL
)
{
malloc_mutex_unlock
(
&
chunks_mtx
);
malloc_mutex_unlock
(
&
chunks_mtx
);
return
(
NULL
);
return
(
NULL
);
...
@@ -72,13 +79,13 @@ chunk_recycle(size_t size, size_t alignment, bool base, bool *zero)
...
@@ -72,13 +79,13 @@ chunk_recycle(size_t size, size_t alignment, bool base, bool *zero)
trailsize
=
node
->
size
-
leadsize
-
size
;
trailsize
=
node
->
size
-
leadsize
-
size
;
ret
=
(
void
*
)((
uintptr_t
)
node
->
addr
+
leadsize
);
ret
=
(
void
*
)((
uintptr_t
)
node
->
addr
+
leadsize
);
/* Remove node from the tree. */
/* Remove node from the tree. */
extent_tree_szad_remove
(
&
chunks_szad
,
node
);
extent_tree_szad_remove
(
chunks_szad
,
node
);
extent_tree_ad_remove
(
&
chunks_ad
,
node
);
extent_tree_ad_remove
(
chunks_ad
,
node
);
if
(
leadsize
!=
0
)
{
if
(
leadsize
!=
0
)
{
/* Insert the leading space as a smaller chunk. */
/* Insert the leading space as a smaller chunk. */
node
->
size
=
leadsize
;
node
->
size
=
leadsize
;
extent_tree_szad_insert
(
&
chunks_szad
,
node
);
extent_tree_szad_insert
(
chunks_szad
,
node
);
extent_tree_ad_insert
(
&
chunks_ad
,
node
);
extent_tree_ad_insert
(
chunks_ad
,
node
);
node
=
NULL
;
node
=
NULL
;
}
}
if
(
trailsize
!=
0
)
{
if
(
trailsize
!=
0
)
{
...
@@ -101,23 +108,24 @@ chunk_recycle(size_t size, size_t alignment, bool base, bool *zero)
...
@@ -101,23 +108,24 @@ chunk_recycle(size_t size, size_t alignment, bool base, bool *zero)
}
}
node
->
addr
=
(
void
*
)((
uintptr_t
)(
ret
)
+
size
);
node
->
addr
=
(
void
*
)((
uintptr_t
)(
ret
)
+
size
);
node
->
size
=
trailsize
;
node
->
size
=
trailsize
;
extent_tree_szad_insert
(
&
chunks_szad
,
node
);
extent_tree_szad_insert
(
chunks_szad
,
node
);
extent_tree_ad_insert
(
&
chunks_ad
,
node
);
extent_tree_ad_insert
(
chunks_ad
,
node
);
node
=
NULL
;
node
=
NULL
;
}
}
malloc_mutex_unlock
(
&
chunks_mtx
);
malloc_mutex_unlock
(
&
chunks_mtx
);
if
(
node
!=
NULL
)
zeroed
=
false
;
if
(
node
!=
NULL
)
{
if
(
node
->
zeroed
)
{
zeroed
=
true
;
*
zero
=
true
;
}
base_node_dealloc
(
node
);
base_node_dealloc
(
node
);
#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
}
/* Pages are zeroed as a side effect of pages_purge(). */
if
(
zeroed
==
false
&&
*
zero
)
{
*
zero
=
true
;
#else
if
(
*
zero
)
{
VALGRIND_MAKE_MEM_UNDEFINED
(
ret
,
size
);
VALGRIND_MAKE_MEM_UNDEFINED
(
ret
,
size
);
memset
(
ret
,
0
,
size
);
memset
(
ret
,
0
,
size
);
}
}
#endif
return
(
ret
);
return
(
ret
);
}
}
...
@@ -128,7 +136,8 @@ chunk_recycle(size_t size, size_t alignment, bool base, bool *zero)
...
@@ -128,7 +136,8 @@ chunk_recycle(size_t size, size_t alignment, bool base, bool *zero)
* advantage of them if they are returned.
* advantage of them if they are returned.
*/
*/
void
*
void
*
chunk_alloc
(
size_t
size
,
size_t
alignment
,
bool
base
,
bool
*
zero
)
chunk_alloc
(
size_t
size
,
size_t
alignment
,
bool
base
,
bool
*
zero
,
dss_prec_t
dss_prec
)
{
{
void
*
ret
;
void
*
ret
;
...
@@ -137,17 +146,26 @@ chunk_alloc(size_t size, size_t alignment, bool base, bool *zero)
...
@@ -137,17 +146,26 @@ chunk_alloc(size_t size, size_t alignment, bool base, bool *zero)
assert
(
alignment
!=
0
);
assert
(
alignment
!=
0
);
assert
((
alignment
&
chunksize_mask
)
==
0
);
assert
((
alignment
&
chunksize_mask
)
==
0
);
ret
=
chunk_recycle
(
size
,
alignment
,
base
,
zero
);
/* "primary" dss. */
if
(
ret
!=
NULL
)
if
(
config_dss
&&
dss_prec
==
dss_prec_primary
)
{
if
((
ret
=
chunk_recycle
(
&
chunks_szad_dss
,
&
chunks_ad_dss
,
size
,
alignment
,
base
,
zero
))
!=
NULL
)
goto
label_return
;
if
((
ret
=
chunk_alloc_dss
(
size
,
alignment
,
zero
))
!=
NULL
)
goto
label_return
;
}
/* mmap. */
if
((
ret
=
chunk_recycle
(
&
chunks_szad_mmap
,
&
chunks_ad_mmap
,
size
,
alignment
,
base
,
zero
))
!=
NULL
)
goto
label_return
;
goto
label_return
;
if
((
ret
=
chunk_alloc_mmap
(
size
,
alignment
,
zero
))
!=
NULL
)
ret
=
chunk_alloc_mmap
(
size
,
alignment
,
zero
);
if
(
ret
!=
NULL
)
goto
label_return
;
goto
label_return
;
/* "secondary" dss. */
if
(
config_dss
)
{
if
(
config_dss
&&
dss_prec
==
dss_prec_secondary
)
{
ret
=
chunk_alloc_dss
(
size
,
alignment
,
zero
);
if
((
ret
=
chunk_recycle
(
&
chunks_szad_dss
,
&
chunks_ad_dss
,
size
,
if
(
ret
!=
NULL
)
alignment
,
base
,
zero
))
!=
NULL
)
goto
label_return
;
if
((
ret
=
chunk_alloc_dss
(
size
,
alignment
,
zero
))
!=
NULL
)
goto
label_return
;
goto
label_return
;
}
}
...
@@ -189,11 +207,13 @@ label_return:
...
@@ -189,11 +207,13 @@ label_return:
}
}
static
void
static
void
chunk_record
(
void
*
chunk
,
size_t
size
)
chunk_record
(
extent_tree_t
*
chunks_szad
,
extent_tree_t
*
chunks_ad
,
void
*
chunk
,
size_t
size
)
{
{
bool
unzeroed
;
extent_node_t
*
xnode
,
*
node
,
*
prev
,
key
;
extent_node_t
*
xnode
,
*
node
,
*
prev
,
key
;
pages_purge
(
chunk
,
size
);
unzeroed
=
pages_purge
(
chunk
,
size
);
/*
/*
* Allocate a node before acquiring chunks_mtx even though it might not
* Allocate a node before acquiring chunks_mtx even though it might not
...
@@ -205,7 +225,7 @@ chunk_record(void *chunk, size_t size)
...
@@ -205,7 +225,7 @@ chunk_record(void *chunk, size_t size)
malloc_mutex_lock
(
&
chunks_mtx
);
malloc_mutex_lock
(
&
chunks_mtx
);
key
.
addr
=
(
void
*
)((
uintptr_t
)
chunk
+
size
);
key
.
addr
=
(
void
*
)((
uintptr_t
)
chunk
+
size
);
node
=
extent_tree_ad_nsearch
(
&
chunks_ad
,
&
key
);
node
=
extent_tree_ad_nsearch
(
chunks_ad
,
&
key
);
/* Try to coalesce forward. */
/* Try to coalesce forward. */
if
(
node
!=
NULL
&&
node
->
addr
==
key
.
addr
)
{
if
(
node
!=
NULL
&&
node
->
addr
==
key
.
addr
)
{
/*
/*
...
@@ -213,10 +233,11 @@ chunk_record(void *chunk, size_t size)
...
@@ -213,10 +233,11 @@ chunk_record(void *chunk, size_t size)
* not change the position within chunks_ad, so only
* not change the position within chunks_ad, so only
* remove/insert from/into chunks_szad.
* remove/insert from/into chunks_szad.
*/
*/
extent_tree_szad_remove
(
&
chunks_szad
,
node
);
extent_tree_szad_remove
(
chunks_szad
,
node
);
node
->
addr
=
chunk
;
node
->
addr
=
chunk
;
node
->
size
+=
size
;
node
->
size
+=
size
;
extent_tree_szad_insert
(
&
chunks_szad
,
node
);
node
->
zeroed
=
(
node
->
zeroed
&&
(
unzeroed
==
false
));
extent_tree_szad_insert
(
chunks_szad
,
node
);
if
(
xnode
!=
NULL
)
if
(
xnode
!=
NULL
)
base_node_dealloc
(
xnode
);
base_node_dealloc
(
xnode
);
}
else
{
}
else
{
...
@@ -234,12 +255,13 @@ chunk_record(void *chunk, size_t size)
...
@@ -234,12 +255,13 @@ chunk_record(void *chunk, size_t size)
node
=
xnode
;
node
=
xnode
;
node
->
addr
=
chunk
;
node
->
addr
=
chunk
;
node
->
size
=
size
;
node
->
size
=
size
;
extent_tree_ad_insert
(
&
chunks_ad
,
node
);
node
->
zeroed
=
(
unzeroed
==
false
);
extent_tree_szad_insert
(
&
chunks_szad
,
node
);
extent_tree_ad_insert
(
chunks_ad
,
node
);
extent_tree_szad_insert
(
chunks_szad
,
node
);
}
}
/* Try to coalesce backward. */
/* Try to coalesce backward. */
prev
=
extent_tree_ad_prev
(
&
chunks_ad
,
node
);
prev
=
extent_tree_ad_prev
(
chunks_ad
,
node
);
if
(
prev
!=
NULL
&&
(
void
*
)((
uintptr_t
)
prev
->
addr
+
prev
->
size
)
==
if
(
prev
!=
NULL
&&
(
void
*
)((
uintptr_t
)
prev
->
addr
+
prev
->
size
)
==
chunk
)
{
chunk
)
{
/*
/*
...
@@ -247,19 +269,34 @@ chunk_record(void *chunk, size_t size)
...
@@ -247,19 +269,34 @@ chunk_record(void *chunk, size_t size)
* not change the position within chunks_ad, so only
* not change the position within chunks_ad, so only
* remove/insert node from/into chunks_szad.
* remove/insert node from/into chunks_szad.
*/
*/
extent_tree_szad_remove
(
&
chunks_szad
,
prev
);
extent_tree_szad_remove
(
chunks_szad
,
prev
);
extent_tree_ad_remove
(
&
chunks_ad
,
prev
);
extent_tree_ad_remove
(
chunks_ad
,
prev
);
extent_tree_szad_remove
(
&
chunks_szad
,
node
);
extent_tree_szad_remove
(
chunks_szad
,
node
);
node
->
addr
=
prev
->
addr
;
node
->
addr
=
prev
->
addr
;
node
->
size
+=
prev
->
size
;
node
->
size
+=
prev
->
size
;
extent_tree_szad_insert
(
&
chunks_szad
,
node
);
node
->
zeroed
=
(
node
->
zeroed
&&
prev
->
zeroed
);
extent_tree_szad_insert
(
chunks_szad
,
node
);
base_node_dealloc
(
prev
);
base_node_dealloc
(
prev
);
}
}
malloc_mutex_unlock
(
&
chunks_mtx
);
malloc_mutex_unlock
(
&
chunks_mtx
);
}
}
void
chunk_unmap
(
void
*
chunk
,
size_t
size
)
{
assert
(
chunk
!=
NULL
);
assert
(
CHUNK_ADDR2BASE
(
chunk
)
==
chunk
);
assert
(
size
!=
0
);
assert
((
size
&
chunksize_mask
)
==
0
);
if
(
config_dss
&&
chunk_in_dss
(
chunk
))
chunk_record
(
&
chunks_szad_dss
,
&
chunks_ad_dss
,
chunk
,
size
);
else
if
(
chunk_dealloc_mmap
(
chunk
,
size
))
chunk_record
(
&
chunks_szad_mmap
,
&
chunks_ad_mmap
,
chunk
,
size
);
}
void
void
chunk_dealloc
(
void
*
chunk
,
size_t
size
,
bool
unmap
)
chunk_dealloc
(
void
*
chunk
,
size_t
size
,
bool
unmap
)
{
{
...
@@ -273,15 +310,13 @@ chunk_dealloc(void *chunk, size_t size, bool unmap)
...
@@ -273,15 +310,13 @@ chunk_dealloc(void *chunk, size_t size, bool unmap)
rtree_set
(
chunks_rtree
,
(
uintptr_t
)
chunk
,
NULL
);
rtree_set
(
chunks_rtree
,
(
uintptr_t
)
chunk
,
NULL
);
if
(
config_stats
||
config_prof
)
{
if
(
config_stats
||
config_prof
)
{
malloc_mutex_lock
(
&
chunks_mtx
);
malloc_mutex_lock
(
&
chunks_mtx
);
assert
(
stats_chunks
.
curchunks
>=
(
size
/
chunksize
));
stats_chunks
.
curchunks
-=
(
size
/
chunksize
);
stats_chunks
.
curchunks
-=
(
size
/
chunksize
);
malloc_mutex_unlock
(
&
chunks_mtx
);
malloc_mutex_unlock
(
&
chunks_mtx
);
}
}
if
(
unmap
)
{
if
(
unmap
)
if
((
config_dss
&&
chunk_in_dss
(
chunk
))
||
chunk_unmap
(
chunk
,
size
);
chunk_dealloc_mmap
(
chunk
,
size
))
chunk_record
(
chunk
,
size
);
}
}
}
bool
bool
...
@@ -301,8 +336,10 @@ chunk_boot(void)
...
@@ -301,8 +336,10 @@ chunk_boot(void)
}
}
if
(
config_dss
&&
chunk_dss_boot
())
if
(
config_dss
&&
chunk_dss_boot
())
return
(
true
);
return
(
true
);
extent_tree_szad_new
(
&
chunks_szad
);
extent_tree_szad_new
(
&
chunks_szad_mmap
);
extent_tree_ad_new
(
&
chunks_ad
);
extent_tree_ad_new
(
&
chunks_ad_mmap
);
extent_tree_szad_new
(
&
chunks_szad_dss
);
extent_tree_ad_new
(
&
chunks_ad_dss
);
if
(
config_ivsalloc
)
{
if
(
config_ivsalloc
)
{
chunks_rtree
=
rtree_new
((
ZU
(
1
)
<<
(
LG_SIZEOF_PTR
+
3
))
-
chunks_rtree
=
rtree_new
((
ZU
(
1
)
<<
(
LG_SIZEOF_PTR
+
3
))
-
opt_lg_chunk
);
opt_lg_chunk
);
...
@@ -312,3 +349,33 @@ chunk_boot(void)
...
@@ -312,3 +349,33 @@ chunk_boot(void)
return
(
false
);
return
(
false
);
}
}
void
chunk_prefork
(
void
)
{
malloc_mutex_lock
(
&
chunks_mtx
);
if
(
config_ivsalloc
)
rtree_prefork
(
chunks_rtree
);
chunk_dss_prefork
();
}
void
chunk_postfork_parent
(
void
)
{
chunk_dss_postfork_parent
();
if
(
config_ivsalloc
)
rtree_postfork_parent
(
chunks_rtree
);
malloc_mutex_postfork_parent
(
&
chunks_mtx
);
}
void
chunk_postfork_child
(
void
)
{
chunk_dss_postfork_child
();
if
(
config_ivsalloc
)
rtree_postfork_child
(
chunks_rtree
);
malloc_mutex_postfork_child
(
&
chunks_mtx
);
}
deps/jemalloc/src/chunk_dss.c
View file @
b85cb4ce
...
@@ -3,6 +3,16 @@
...
@@ -3,6 +3,16 @@
/******************************************************************************/
/******************************************************************************/
/* Data. */
/* Data. */
const
char
*
dss_prec_names
[]
=
{
"disabled"
,
"primary"
,
"secondary"
,
"N/A"
};
/* Current dss precedence default, used when creating new arenas. */
static
dss_prec_t
dss_prec_default
=
DSS_PREC_DEFAULT
;
/*
/*
* Protects sbrk() calls. This avoids malloc races among threads, though it
* Protects sbrk() calls. This avoids malloc races among threads, though it
* does not protect against races with threads that call sbrk() directly.
* does not protect against races with threads that call sbrk() directly.
...
@@ -29,6 +39,31 @@ sbrk(intptr_t increment)
...
@@ -29,6 +39,31 @@ sbrk(intptr_t increment)
}
}
#endif
#endif
dss_prec_t
chunk_dss_prec_get
(
void
)
{
dss_prec_t
ret
;
if
(
config_dss
==
false
)
return
(
dss_prec_disabled
);
malloc_mutex_lock
(
&
dss_mtx
);
ret
=
dss_prec_default
;
malloc_mutex_unlock
(
&
dss_mtx
);
return
(
ret
);
}
bool
chunk_dss_prec_set
(
dss_prec_t
dss_prec
)
{
if
(
config_dss
==
false
)
return
(
true
);
malloc_mutex_lock
(
&
dss_mtx
);
dss_prec_default
=
dss_prec
;
malloc_mutex_unlock
(
&
dss_mtx
);
return
(
false
);
}
void
*
void
*
chunk_alloc_dss
(
size_t
size
,
size_t
alignment
,
bool
*
zero
)
chunk_alloc_dss
(
size_t
size
,
size_t
alignment
,
bool
*
zero
)
{
{
...
@@ -88,7 +123,7 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
...
@@ -88,7 +123,7 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
dss_max
=
dss_next
;
dss_max
=
dss_next
;
malloc_mutex_unlock
(
&
dss_mtx
);
malloc_mutex_unlock
(
&
dss_mtx
);
if
(
cpad_size
!=
0
)
if
(
cpad_size
!=
0
)
chunk_
dealloc
(
cpad
,
cpad_size
,
true
);
chunk_
unmap
(
cpad
,
cpad_size
);
if
(
*
zero
)
{
if
(
*
zero
)
{
VALGRIND_MAKE_MEM_UNDEFINED
(
ret
,
size
);
VALGRIND_MAKE_MEM_UNDEFINED
(
ret
,
size
);
memset
(
ret
,
0
,
size
);
memset
(
ret
,
0
,
size
);
...
...
deps/jemalloc/src/chunk_mmap.c
View file @
b85cb4ce
...
@@ -113,22 +113,30 @@ pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
...
@@ -113,22 +113,30 @@ pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
#endif
#endif
}
}
void
bool
pages_purge
(
void
*
addr
,
size_t
length
)
pages_purge
(
void
*
addr
,
size_t
length
)
{
{
bool
unzeroed
;
#ifdef _WIN32
#ifdef _WIN32
VirtualAlloc
(
addr
,
length
,
MEM_RESET
,
PAGE_READWRITE
);
VirtualAlloc
(
addr
,
length
,
MEM_RESET
,
PAGE_READWRITE
);
unzeroed
=
true
;
#else
#else
# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
# define JEMALLOC_MADV_PURGE MADV_DONTNEED
# define JEMALLOC_MADV_PURGE MADV_DONTNEED
# define JEMALLOC_MADV_ZEROS true
# elif defined(JEMALLOC_PURGE_MADVISE_FREE)
# elif defined(JEMALLOC_PURGE_MADVISE_FREE)
# define JEMALLOC_MADV_PURGE MADV_FREE
# define JEMALLOC_MADV_PURGE MADV_FREE
# define JEMALLOC_MADV_ZEROS false
# else
# else
# error "No method defined for purging unused dirty pages."
# error "No method defined for purging unused dirty pages."
# endif
# endif
madvise
(
addr
,
length
,
JEMALLOC_MADV_PURGE
);
int
err
=
madvise
(
addr
,
length
,
JEMALLOC_MADV_PURGE
);
unzeroed
=
(
JEMALLOC_MADV_ZEROS
==
false
||
err
!=
0
);
# undef JEMALLOC_MADV_PURGE
# undef JEMALLOC_MADV_ZEROS
#endif
#endif
return
(
unzeroed
);
}
}
static
void
*
static
void
*
...
...
deps/jemalloc/src/ctl.c
View file @
b85cb4ce
...
@@ -48,8 +48,8 @@ static int n##_ctl(const size_t *mib, size_t miblen, void *oldp, \
...
@@ -48,8 +48,8 @@ static int n##_ctl(const size_t *mib, size_t miblen, void *oldp, \
size_t *oldlenp, void *newp, size_t newlen);
size_t *oldlenp, void *newp, size_t newlen);
#define INDEX_PROTO(n) \
#define INDEX_PROTO(n) \
const ctl_named_node_t *n##_index(const size_t *mib,
size_t miblen,
\
static
const ctl_named_node_t *n##_index(const size_t *mib,
\
size_t i);
size_t
miblen, size_t
i);
static
bool
ctl_arena_init
(
ctl_arena_stats_t
*
astats
);
static
bool
ctl_arena_init
(
ctl_arena_stats_t
*
astats
);
static
void
ctl_arena_clear
(
ctl_arena_stats_t
*
astats
);
static
void
ctl_arena_clear
(
ctl_arena_stats_t
*
astats
);
...
@@ -58,6 +58,7 @@ static void ctl_arena_stats_amerge(ctl_arena_stats_t *cstats,
...
@@ -58,6 +58,7 @@ static void ctl_arena_stats_amerge(ctl_arena_stats_t *cstats,
static
void
ctl_arena_stats_smerge
(
ctl_arena_stats_t
*
sstats
,
static
void
ctl_arena_stats_smerge
(
ctl_arena_stats_t
*
sstats
,
ctl_arena_stats_t
*
astats
);
ctl_arena_stats_t
*
astats
);
static
void
ctl_arena_refresh
(
arena_t
*
arena
,
unsigned
i
);
static
void
ctl_arena_refresh
(
arena_t
*
arena
,
unsigned
i
);
static
bool
ctl_grow
(
void
);
static
void
ctl_refresh
(
void
);
static
void
ctl_refresh
(
void
);
static
bool
ctl_init
(
void
);
static
bool
ctl_init
(
void
);
static
int
ctl_lookup
(
const
char
*
name
,
ctl_node_t
const
**
nodesp
,
static
int
ctl_lookup
(
const
char
*
name
,
ctl_node_t
const
**
nodesp
,
...
@@ -88,6 +89,7 @@ CTL_PROTO(config_utrace)
...
@@ -88,6 +89,7 @@ CTL_PROTO(config_utrace)
CTL_PROTO
(
config_valgrind
)
CTL_PROTO
(
config_valgrind
)
CTL_PROTO
(
config_xmalloc
)
CTL_PROTO
(
config_xmalloc
)
CTL_PROTO
(
opt_abort
)
CTL_PROTO
(
opt_abort
)
CTL_PROTO
(
opt_dss
)
CTL_PROTO
(
opt_lg_chunk
)
CTL_PROTO
(
opt_lg_chunk
)
CTL_PROTO
(
opt_narenas
)
CTL_PROTO
(
opt_narenas
)
CTL_PROTO
(
opt_lg_dirty_mult
)
CTL_PROTO
(
opt_lg_dirty_mult
)
...
@@ -110,6 +112,10 @@ CTL_PROTO(opt_prof_gdump)
...
@@ -110,6 +112,10 @@ CTL_PROTO(opt_prof_gdump)
CTL_PROTO
(
opt_prof_final
)
CTL_PROTO
(
opt_prof_final
)
CTL_PROTO
(
opt_prof_leak
)
CTL_PROTO
(
opt_prof_leak
)
CTL_PROTO
(
opt_prof_accum
)
CTL_PROTO
(
opt_prof_accum
)
CTL_PROTO
(
arena_i_purge
)
static
void
arena_purge
(
unsigned
arena_ind
);
CTL_PROTO
(
arena_i_dss
)
INDEX_PROTO
(
arena_i
)
CTL_PROTO
(
arenas_bin_i_size
)
CTL_PROTO
(
arenas_bin_i_size
)
CTL_PROTO
(
arenas_bin_i_nregs
)
CTL_PROTO
(
arenas_bin_i_nregs
)
CTL_PROTO
(
arenas_bin_i_run_size
)
CTL_PROTO
(
arenas_bin_i_run_size
)
...
@@ -125,6 +131,7 @@ CTL_PROTO(arenas_nbins)
...
@@ -125,6 +131,7 @@ CTL_PROTO(arenas_nbins)
CTL_PROTO
(
arenas_nhbins
)
CTL_PROTO
(
arenas_nhbins
)
CTL_PROTO
(
arenas_nlruns
)
CTL_PROTO
(
arenas_nlruns
)
CTL_PROTO
(
arenas_purge
)
CTL_PROTO
(
arenas_purge
)
CTL_PROTO
(
arenas_extend
)
CTL_PROTO
(
prof_active
)
CTL_PROTO
(
prof_active
)
CTL_PROTO
(
prof_dump
)
CTL_PROTO
(
prof_dump
)
CTL_PROTO
(
prof_interval
)
CTL_PROTO
(
prof_interval
)
...
@@ -158,6 +165,7 @@ CTL_PROTO(stats_arenas_i_lruns_j_nrequests)
...
@@ -158,6 +165,7 @@ CTL_PROTO(stats_arenas_i_lruns_j_nrequests)
CTL_PROTO
(
stats_arenas_i_lruns_j_curruns
)
CTL_PROTO
(
stats_arenas_i_lruns_j_curruns
)
INDEX_PROTO
(
stats_arenas_i_lruns_j
)
INDEX_PROTO
(
stats_arenas_i_lruns_j
)
CTL_PROTO
(
stats_arenas_i_nthreads
)
CTL_PROTO
(
stats_arenas_i_nthreads
)
CTL_PROTO
(
stats_arenas_i_dss
)
CTL_PROTO
(
stats_arenas_i_pactive
)
CTL_PROTO
(
stats_arenas_i_pactive
)
CTL_PROTO
(
stats_arenas_i_pdirty
)
CTL_PROTO
(
stats_arenas_i_pdirty
)
CTL_PROTO
(
stats_arenas_i_mapped
)
CTL_PROTO
(
stats_arenas_i_mapped
)
...
@@ -223,6 +231,7 @@ static const ctl_named_node_t config_node[] = {
...
@@ -223,6 +231,7 @@ static const ctl_named_node_t config_node[] = {
static
const
ctl_named_node_t
opt_node
[]
=
{
static
const
ctl_named_node_t
opt_node
[]
=
{
{
NAME
(
"abort"
),
CTL
(
opt_abort
)},
{
NAME
(
"abort"
),
CTL
(
opt_abort
)},
{
NAME
(
"dss"
),
CTL
(
opt_dss
)},
{
NAME
(
"lg_chunk"
),
CTL
(
opt_lg_chunk
)},
{
NAME
(
"lg_chunk"
),
CTL
(
opt_lg_chunk
)},
{
NAME
(
"narenas"
),
CTL
(
opt_narenas
)},
{
NAME
(
"narenas"
),
CTL
(
opt_narenas
)},
{
NAME
(
"lg_dirty_mult"
),
CTL
(
opt_lg_dirty_mult
)},
{
NAME
(
"lg_dirty_mult"
),
CTL
(
opt_lg_dirty_mult
)},
...
@@ -247,6 +256,18 @@ static const ctl_named_node_t opt_node[] = {
...
@@ -247,6 +256,18 @@ static const ctl_named_node_t opt_node[] = {
{
NAME
(
"prof_accum"
),
CTL
(
opt_prof_accum
)}
{
NAME
(
"prof_accum"
),
CTL
(
opt_prof_accum
)}
};
};
static
const
ctl_named_node_t
arena_i_node
[]
=
{
{
NAME
(
"purge"
),
CTL
(
arena_i_purge
)},
{
NAME
(
"dss"
),
CTL
(
arena_i_dss
)}
};
static
const
ctl_named_node_t
super_arena_i_node
[]
=
{
{
NAME
(
""
),
CHILD
(
named
,
arena_i
)}
};
static
const
ctl_indexed_node_t
arena_node
[]
=
{
{
INDEX
(
arena_i
)}
};
static
const
ctl_named_node_t
arenas_bin_i_node
[]
=
{
static
const
ctl_named_node_t
arenas_bin_i_node
[]
=
{
{
NAME
(
"size"
),
CTL
(
arenas_bin_i_size
)},
{
NAME
(
"size"
),
CTL
(
arenas_bin_i_size
)},
{
NAME
(
"nregs"
),
CTL
(
arenas_bin_i_nregs
)},
{
NAME
(
"nregs"
),
CTL
(
arenas_bin_i_nregs
)},
...
@@ -282,7 +303,8 @@ static const ctl_named_node_t arenas_node[] = {
...
@@ -282,7 +303,8 @@ static const ctl_named_node_t arenas_node[] = {
{
NAME
(
"bin"
),
CHILD
(
indexed
,
arenas_bin
)},
{
NAME
(
"bin"
),
CHILD
(
indexed
,
arenas_bin
)},
{
NAME
(
"nlruns"
),
CTL
(
arenas_nlruns
)},
{
NAME
(
"nlruns"
),
CTL
(
arenas_nlruns
)},
{
NAME
(
"lrun"
),
CHILD
(
indexed
,
arenas_lrun
)},
{
NAME
(
"lrun"
),
CHILD
(
indexed
,
arenas_lrun
)},
{
NAME
(
"purge"
),
CTL
(
arenas_purge
)}
{
NAME
(
"purge"
),
CTL
(
arenas_purge
)},
{
NAME
(
"extend"
),
CTL
(
arenas_extend
)}
};
};
static
const
ctl_named_node_t
prof_node
[]
=
{
static
const
ctl_named_node_t
prof_node
[]
=
{
...
@@ -352,6 +374,7 @@ static const ctl_indexed_node_t stats_arenas_i_lruns_node[] = {
...
@@ -352,6 +374,7 @@ static const ctl_indexed_node_t stats_arenas_i_lruns_node[] = {
static
const
ctl_named_node_t
stats_arenas_i_node
[]
=
{
static
const
ctl_named_node_t
stats_arenas_i_node
[]
=
{
{
NAME
(
"nthreads"
),
CTL
(
stats_arenas_i_nthreads
)},
{
NAME
(
"nthreads"
),
CTL
(
stats_arenas_i_nthreads
)},
{
NAME
(
"dss"
),
CTL
(
stats_arenas_i_dss
)},
{
NAME
(
"pactive"
),
CTL
(
stats_arenas_i_pactive
)},
{
NAME
(
"pactive"
),
CTL
(
stats_arenas_i_pactive
)},
{
NAME
(
"pdirty"
),
CTL
(
stats_arenas_i_pdirty
)},
{
NAME
(
"pdirty"
),
CTL
(
stats_arenas_i_pdirty
)},
{
NAME
(
"mapped"
),
CTL
(
stats_arenas_i_mapped
)},
{
NAME
(
"mapped"
),
CTL
(
stats_arenas_i_mapped
)},
...
@@ -387,6 +410,7 @@ static const ctl_named_node_t root_node[] = {
...
@@ -387,6 +410,7 @@ static const ctl_named_node_t root_node[] = {
{
NAME
(
"thread"
),
CHILD
(
named
,
thread
)},
{
NAME
(
"thread"
),
CHILD
(
named
,
thread
)},
{
NAME
(
"config"
),
CHILD
(
named
,
config
)},
{
NAME
(
"config"
),
CHILD
(
named
,
config
)},
{
NAME
(
"opt"
),
CHILD
(
named
,
opt
)},
{
NAME
(
"opt"
),
CHILD
(
named
,
opt
)},
{
NAME
(
"arena"
),
CHILD
(
indexed
,
arena
)},
{
NAME
(
"arenas"
),
CHILD
(
named
,
arenas
)},
{
NAME
(
"arenas"
),
CHILD
(
named
,
arenas
)},
{
NAME
(
"prof"
),
CHILD
(
named
,
prof
)},
{
NAME
(
"prof"
),
CHILD
(
named
,
prof
)},
{
NAME
(
"stats"
),
CHILD
(
named
,
stats
)}
{
NAME
(
"stats"
),
CHILD
(
named
,
stats
)}
...
@@ -420,6 +444,7 @@ static void
...
@@ -420,6 +444,7 @@ static void
ctl_arena_clear
(
ctl_arena_stats_t
*
astats
)
ctl_arena_clear
(
ctl_arena_stats_t
*
astats
)
{
{
astats
->
dss
=
dss_prec_names
[
dss_prec_limit
];
astats
->
pactive
=
0
;
astats
->
pactive
=
0
;
astats
->
pdirty
=
0
;
astats
->
pdirty
=
0
;
if
(
config_stats
)
{
if
(
config_stats
)
{
...
@@ -439,8 +464,8 @@ ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena)
...
@@ -439,8 +464,8 @@ ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena)
{
{
unsigned
i
;
unsigned
i
;
arena_stats_merge
(
arena
,
&
cstats
->
pactive
,
&
cstats
->
p
dirty
,
arena_stats_merge
(
arena
,
&
cstats
->
dss
,
&
cstats
->
p
active
,
&
cstats
->
astats
,
cstats
->
bstats
,
cstats
->
lstats
);
&
cstats
->
pdirty
,
&
cstats
->
astats
,
cstats
->
bstats
,
cstats
->
lstats
);
for
(
i
=
0
;
i
<
NBINS
;
i
++
)
{
for
(
i
=
0
;
i
<
NBINS
;
i
++
)
{
cstats
->
allocated_small
+=
cstats
->
bstats
[
i
].
allocated
;
cstats
->
allocated_small
+=
cstats
->
bstats
[
i
].
allocated
;
...
@@ -500,7 +525,7 @@ static void
...
@@ -500,7 +525,7 @@ static void
ctl_arena_refresh
(
arena_t
*
arena
,
unsigned
i
)
ctl_arena_refresh
(
arena_t
*
arena
,
unsigned
i
)
{
{
ctl_arena_stats_t
*
astats
=
&
ctl_stats
.
arenas
[
i
];
ctl_arena_stats_t
*
astats
=
&
ctl_stats
.
arenas
[
i
];
ctl_arena_stats_t
*
sstats
=
&
ctl_stats
.
arenas
[
narenas
];
ctl_arena_stats_t
*
sstats
=
&
ctl_stats
.
arenas
[
ctl_stats
.
narenas
];
ctl_arena_clear
(
astats
);
ctl_arena_clear
(
astats
);
...
@@ -518,11 +543,72 @@ ctl_arena_refresh(arena_t *arena, unsigned i)
...
@@ -518,11 +543,72 @@ ctl_arena_refresh(arena_t *arena, unsigned i)
}
}
}
}
static
bool
ctl_grow
(
void
)
{
size_t
astats_size
;
ctl_arena_stats_t
*
astats
;
arena_t
**
tarenas
;
/* Extend arena stats and arenas arrays. */
astats_size
=
(
ctl_stats
.
narenas
+
2
)
*
sizeof
(
ctl_arena_stats_t
);
if
(
ctl_stats
.
narenas
==
narenas_auto
)
{
/* ctl_stats.arenas and arenas came from base_alloc(). */
astats
=
(
ctl_arena_stats_t
*
)
imalloc
(
astats_size
);
if
(
astats
==
NULL
)
return
(
true
);
memcpy
(
astats
,
ctl_stats
.
arenas
,
(
ctl_stats
.
narenas
+
1
)
*
sizeof
(
ctl_arena_stats_t
));
tarenas
=
(
arena_t
**
)
imalloc
((
ctl_stats
.
narenas
+
1
)
*
sizeof
(
arena_t
*
));
if
(
tarenas
==
NULL
)
{
idalloc
(
astats
);
return
(
true
);
}
memcpy
(
tarenas
,
arenas
,
ctl_stats
.
narenas
*
sizeof
(
arena_t
*
));
}
else
{
astats
=
(
ctl_arena_stats_t
*
)
iralloc
(
ctl_stats
.
arenas
,
astats_size
,
0
,
0
,
false
,
false
);
if
(
astats
==
NULL
)
return
(
true
);
tarenas
=
(
arena_t
**
)
iralloc
(
arenas
,
(
ctl_stats
.
narenas
+
1
)
*
sizeof
(
arena_t
*
),
0
,
0
,
false
,
false
);
if
(
tarenas
==
NULL
)
return
(
true
);
}
/* Initialize the new astats and arenas elements. */
memset
(
&
astats
[
ctl_stats
.
narenas
+
1
],
0
,
sizeof
(
ctl_arena_stats_t
));
if
(
ctl_arena_init
(
&
astats
[
ctl_stats
.
narenas
+
1
]))
return
(
true
);
tarenas
[
ctl_stats
.
narenas
]
=
NULL
;
/* Swap merged stats to their new location. */
{
ctl_arena_stats_t
tstats
;
memcpy
(
&
tstats
,
&
astats
[
ctl_stats
.
narenas
],
sizeof
(
ctl_arena_stats_t
));
memcpy
(
&
astats
[
ctl_stats
.
narenas
],
&
astats
[
ctl_stats
.
narenas
+
1
],
sizeof
(
ctl_arena_stats_t
));
memcpy
(
&
astats
[
ctl_stats
.
narenas
+
1
],
&
tstats
,
sizeof
(
ctl_arena_stats_t
));
}
ctl_stats
.
arenas
=
astats
;
ctl_stats
.
narenas
++
;
malloc_mutex_lock
(
&
arenas_lock
);
arenas
=
tarenas
;
narenas_total
++
;
arenas_extend
(
narenas_total
-
1
);
malloc_mutex_unlock
(
&
arenas_lock
);
return
(
false
);
}
static
void
static
void
ctl_refresh
(
void
)
ctl_refresh
(
void
)
{
{
unsigned
i
;
unsigned
i
;
VARIABLE_ARRAY
(
arena_t
*
,
tarenas
,
narenas
);
VARIABLE_ARRAY
(
arena_t
*
,
tarenas
,
ctl_stats
.
narenas
);
if
(
config_stats
)
{
if
(
config_stats
)
{
malloc_mutex_lock
(
&
chunks_mtx
);
malloc_mutex_lock
(
&
chunks_mtx
);
...
@@ -542,19 +628,19 @@ ctl_refresh(void)
...
@@ -542,19 +628,19 @@ ctl_refresh(void)
* Clear sum stats, since they will be merged into by
* Clear sum stats, since they will be merged into by
* ctl_arena_refresh().
* ctl_arena_refresh().
*/
*/
ctl_stats
.
arenas
[
narenas
].
nthreads
=
0
;
ctl_stats
.
arenas
[
ctl_stats
.
narenas
].
nthreads
=
0
;
ctl_arena_clear
(
&
ctl_stats
.
arenas
[
narenas
]);
ctl_arena_clear
(
&
ctl_stats
.
arenas
[
ctl_stats
.
narenas
]);
malloc_mutex_lock
(
&
arenas_lock
);
malloc_mutex_lock
(
&
arenas_lock
);
memcpy
(
tarenas
,
arenas
,
sizeof
(
arena_t
*
)
*
narenas
);
memcpy
(
tarenas
,
arenas
,
sizeof
(
arena_t
*
)
*
ctl_stats
.
narenas
);
for
(
i
=
0
;
i
<
narenas
;
i
++
)
{
for
(
i
=
0
;
i
<
ctl_stats
.
narenas
;
i
++
)
{
if
(
arenas
[
i
]
!=
NULL
)
if
(
arenas
[
i
]
!=
NULL
)
ctl_stats
.
arenas
[
i
].
nthreads
=
arenas
[
i
]
->
nthreads
;
ctl_stats
.
arenas
[
i
].
nthreads
=
arenas
[
i
]
->
nthreads
;
else
else
ctl_stats
.
arenas
[
i
].
nthreads
=
0
;
ctl_stats
.
arenas
[
i
].
nthreads
=
0
;
}
}
malloc_mutex_unlock
(
&
arenas_lock
);
malloc_mutex_unlock
(
&
arenas_lock
);
for
(
i
=
0
;
i
<
narenas
;
i
++
)
{
for
(
i
=
0
;
i
<
ctl_stats
.
narenas
;
i
++
)
{
bool
initialized
=
(
tarenas
[
i
]
!=
NULL
);
bool
initialized
=
(
tarenas
[
i
]
!=
NULL
);
ctl_stats
.
arenas
[
i
].
initialized
=
initialized
;
ctl_stats
.
arenas
[
i
].
initialized
=
initialized
;
...
@@ -563,11 +649,13 @@ ctl_refresh(void)
...
@@ -563,11 +649,13 @@ ctl_refresh(void)
}
}
if
(
config_stats
)
{
if
(
config_stats
)
{
ctl_stats
.
allocated
=
ctl_stats
.
arenas
[
narenas
].
allocated_small
ctl_stats
.
allocated
=
+
ctl_stats
.
arenas
[
narenas
].
astats
.
allocated_large
ctl_stats
.
arenas
[
ctl_stats
.
narenas
].
allocated_small
+
ctl_stats
.
arenas
[
ctl_stats
.
narenas
].
astats
.
allocated_large
+
ctl_stats
.
huge
.
allocated
;
ctl_stats
.
active
=
(
ctl_stats
.
arenas
[
ctl_stats
.
narenas
].
pactive
<<
LG_PAGE
)
+
ctl_stats
.
huge
.
allocated
;
+
ctl_stats
.
huge
.
allocated
;
ctl_stats
.
active
=
(
ctl_stats
.
arenas
[
narenas
].
pactive
<<
LG_PAGE
)
+
ctl_stats
.
huge
.
allocated
;
ctl_stats
.
mapped
=
(
ctl_stats
.
chunks
.
current
<<
opt_lg_chunk
);
ctl_stats
.
mapped
=
(
ctl_stats
.
chunks
.
current
<<
opt_lg_chunk
);
}
}
...
@@ -585,13 +673,15 @@ ctl_init(void)
...
@@ -585,13 +673,15 @@ ctl_init(void)
* Allocate space for one extra arena stats element, which
* Allocate space for one extra arena stats element, which
* contains summed stats across all arenas.
* contains summed stats across all arenas.
*/
*/
assert
(
narenas_auto
==
narenas_total_get
());
ctl_stats
.
narenas
=
narenas_auto
;
ctl_stats
.
arenas
=
(
ctl_arena_stats_t
*
)
base_alloc
(
ctl_stats
.
arenas
=
(
ctl_arena_stats_t
*
)
base_alloc
(
(
narenas
+
1
)
*
sizeof
(
ctl_arena_stats_t
));
(
ctl_stats
.
narenas
+
1
)
*
sizeof
(
ctl_arena_stats_t
));
if
(
ctl_stats
.
arenas
==
NULL
)
{
if
(
ctl_stats
.
arenas
==
NULL
)
{
ret
=
true
;
ret
=
true
;
goto
label_return
;
goto
label_return
;
}
}
memset
(
ctl_stats
.
arenas
,
0
,
(
narenas
+
1
)
*
memset
(
ctl_stats
.
arenas
,
0
,
(
ctl_stats
.
narenas
+
1
)
*
sizeof
(
ctl_arena_stats_t
));
sizeof
(
ctl_arena_stats_t
));
/*
/*
...
@@ -601,14 +691,14 @@ ctl_init(void)
...
@@ -601,14 +691,14 @@ ctl_init(void)
*/
*/
if
(
config_stats
)
{
if
(
config_stats
)
{
unsigned
i
;
unsigned
i
;
for
(
i
=
0
;
i
<=
narenas
;
i
++
)
{
for
(
i
=
0
;
i
<=
ctl_stats
.
narenas
;
i
++
)
{
if
(
ctl_arena_init
(
&
ctl_stats
.
arenas
[
i
]))
{
if
(
ctl_arena_init
(
&
ctl_stats
.
arenas
[
i
]))
{
ret
=
true
;
ret
=
true
;
goto
label_return
;
goto
label_return
;
}
}
}
}
}
}
ctl_stats
.
arenas
[
narenas
].
initialized
=
true
;
ctl_stats
.
arenas
[
ctl_stats
.
narenas
].
initialized
=
true
;
ctl_epoch
=
0
;
ctl_epoch
=
0
;
ctl_refresh
();
ctl_refresh
();
...
@@ -827,6 +917,27 @@ ctl_boot(void)
...
@@ -827,6 +917,27 @@ ctl_boot(void)
return
(
false
);
return
(
false
);
}
}
void
ctl_prefork
(
void
)
{
malloc_mutex_lock
(
&
ctl_mtx
);
}
void
ctl_postfork_parent
(
void
)
{
malloc_mutex_postfork_parent
(
&
ctl_mtx
);
}
void
ctl_postfork_child
(
void
)
{
malloc_mutex_postfork_child
(
&
ctl_mtx
);
}
/******************************************************************************/
/******************************************************************************/
/* *_ctl() functions. */
/* *_ctl() functions. */
...
@@ -1032,8 +1143,8 @@ thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp,
...
@@ -1032,8 +1143,8 @@ thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp,
}
}
READ
(
oldval
,
bool
);
READ
(
oldval
,
bool
);
label_return:
ret
=
0
;
ret
=
0
;
label_return:
return
(
ret
);
return
(
ret
);
}
}
...
@@ -1063,13 +1174,14 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
...
@@ -1063,13 +1174,14 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
int
ret
;
int
ret
;
unsigned
newind
,
oldind
;
unsigned
newind
,
oldind
;
malloc_mutex_lock
(
&
ctl_mtx
);
newind
=
oldind
=
choose_arena
(
NULL
)
->
ind
;
newind
=
oldind
=
choose_arena
(
NULL
)
->
ind
;
WRITE
(
newind
,
unsigned
);
WRITE
(
newind
,
unsigned
);
READ
(
oldind
,
unsigned
);
READ
(
oldind
,
unsigned
);
if
(
newind
!=
oldind
)
{
if
(
newind
!=
oldind
)
{
arena_t
*
arena
;
arena_t
*
arena
;
if
(
newind
>=
narenas
)
{
if
(
newind
>=
ctl_stats
.
narenas
)
{
/* New arena index is out of range. */
/* New arena index is out of range. */
ret
=
EFAULT
;
ret
=
EFAULT
;
goto
label_return
;
goto
label_return
;
...
@@ -1102,6 +1214,7 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
...
@@ -1102,6 +1214,7 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
ret
=
0
;
ret
=
0
;
label_return:
label_return:
malloc_mutex_unlock
(
&
ctl_mtx
);
return
(
ret
);
return
(
ret
);
}
}
...
@@ -1135,6 +1248,7 @@ CTL_RO_BOOL_CONFIG_GEN(config_xmalloc)
...
@@ -1135,6 +1248,7 @@ CTL_RO_BOOL_CONFIG_GEN(config_xmalloc)
/******************************************************************************/
/******************************************************************************/
CTL_RO_NL_GEN
(
opt_abort
,
opt_abort
,
bool
)
CTL_RO_NL_GEN
(
opt_abort
,
opt_abort
,
bool
)
CTL_RO_NL_GEN
(
opt_dss
,
opt_dss
,
const
char
*
)
CTL_RO_NL_GEN
(
opt_lg_chunk
,
opt_lg_chunk
,
size_t
)
CTL_RO_NL_GEN
(
opt_lg_chunk
,
opt_lg_chunk
,
size_t
)
CTL_RO_NL_GEN
(
opt_narenas
,
opt_narenas
,
size_t
)
CTL_RO_NL_GEN
(
opt_narenas
,
opt_narenas
,
size_t
)
CTL_RO_NL_GEN
(
opt_lg_dirty_mult
,
opt_lg_dirty_mult
,
ssize_t
)
CTL_RO_NL_GEN
(
opt_lg_dirty_mult
,
opt_lg_dirty_mult
,
ssize_t
)
...
@@ -1158,12 +1272,123 @@ CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool)
...
@@ -1158,12 +1272,123 @@ CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof_leak
,
opt_prof_leak
,
bool
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof_leak
,
opt_prof_leak
,
bool
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof_accum
,
opt_prof_accum
,
bool
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof_accum
,
opt_prof_accum
,
bool
)
/******************************************************************************/
/* ctl_mutex must be held during execution of this function. */
static
void
arena_purge
(
unsigned
arena_ind
)
{
VARIABLE_ARRAY
(
arena_t
*
,
tarenas
,
ctl_stats
.
narenas
);
malloc_mutex_lock
(
&
arenas_lock
);
memcpy
(
tarenas
,
arenas
,
sizeof
(
arena_t
*
)
*
ctl_stats
.
narenas
);
malloc_mutex_unlock
(
&
arenas_lock
);
if
(
arena_ind
==
ctl_stats
.
narenas
)
{
unsigned
i
;
for
(
i
=
0
;
i
<
ctl_stats
.
narenas
;
i
++
)
{
if
(
tarenas
[
i
]
!=
NULL
)
arena_purge_all
(
tarenas
[
i
]);
}
}
else
{
assert
(
arena_ind
<
ctl_stats
.
narenas
);
if
(
tarenas
[
arena_ind
]
!=
NULL
)
arena_purge_all
(
tarenas
[
arena_ind
]);
}
}
static
int
arena_i_purge_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
READONLY
();
WRITEONLY
();
malloc_mutex_lock
(
&
ctl_mtx
);
arena_purge
(
mib
[
1
]);
malloc_mutex_unlock
(
&
ctl_mtx
);
ret
=
0
;
label_return:
return
(
ret
);
}
static
int
arena_i_dss_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
,
i
;
bool
match
,
err
;
const
char
*
dss
;
unsigned
arena_ind
=
mib
[
1
];
dss_prec_t
dss_prec_old
=
dss_prec_limit
;
dss_prec_t
dss_prec
=
dss_prec_limit
;
malloc_mutex_lock
(
&
ctl_mtx
);
WRITE
(
dss
,
const
char
*
);
match
=
false
;
for
(
i
=
0
;
i
<
dss_prec_limit
;
i
++
)
{
if
(
strcmp
(
dss_prec_names
[
i
],
dss
)
==
0
)
{
dss_prec
=
i
;
match
=
true
;
break
;
}
}
if
(
match
==
false
)
{
ret
=
EINVAL
;
goto
label_return
;
}
if
(
arena_ind
<
ctl_stats
.
narenas
)
{
arena_t
*
arena
=
arenas
[
arena_ind
];
if
(
arena
!=
NULL
)
{
dss_prec_old
=
arena_dss_prec_get
(
arena
);
arena_dss_prec_set
(
arena
,
dss_prec
);
err
=
false
;
}
else
err
=
true
;
}
else
{
dss_prec_old
=
chunk_dss_prec_get
();
err
=
chunk_dss_prec_set
(
dss_prec
);
}
dss
=
dss_prec_names
[
dss_prec_old
];
READ
(
dss
,
const
char
*
);
if
(
err
)
{
ret
=
EFAULT
;
goto
label_return
;
}
ret
=
0
;
label_return:
malloc_mutex_unlock
(
&
ctl_mtx
);
return
(
ret
);
}
static
const
ctl_named_node_t
*
arena_i_index
(
const
size_t
*
mib
,
size_t
miblen
,
size_t
i
)
{
const
ctl_named_node_t
*
ret
;
malloc_mutex_lock
(
&
ctl_mtx
);
if
(
i
>
ctl_stats
.
narenas
)
{
ret
=
NULL
;
goto
label_return
;
}
ret
=
super_arena_i_node
;
label_return:
malloc_mutex_unlock
(
&
ctl_mtx
);
return
(
ret
);
}
/******************************************************************************/
/******************************************************************************/
CTL_RO_NL_GEN
(
arenas_bin_i_size
,
arena_bin_info
[
mib
[
2
]].
reg_size
,
size_t
)
CTL_RO_NL_GEN
(
arenas_bin_i_size
,
arena_bin_info
[
mib
[
2
]].
reg_size
,
size_t
)
CTL_RO_NL_GEN
(
arenas_bin_i_nregs
,
arena_bin_info
[
mib
[
2
]].
nregs
,
uint32_t
)
CTL_RO_NL_GEN
(
arenas_bin_i_nregs
,
arena_bin_info
[
mib
[
2
]].
nregs
,
uint32_t
)
CTL_RO_NL_GEN
(
arenas_bin_i_run_size
,
arena_bin_info
[
mib
[
2
]].
run_size
,
size_t
)
CTL_RO_NL_GEN
(
arenas_bin_i_run_size
,
arena_bin_info
[
mib
[
2
]].
run_size
,
size_t
)
const
ctl_named_node_t
*
static
const
ctl_named_node_t
*
arenas_bin_i_index
(
const
size_t
*
mib
,
size_t
miblen
,
size_t
i
)
arenas_bin_i_index
(
const
size_t
*
mib
,
size_t
miblen
,
size_t
i
)
{
{
...
@@ -1173,7 +1398,7 @@ arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
...
@@ -1173,7 +1398,7 @@ arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
}
}
CTL_RO_NL_GEN
(
arenas_lrun_i_size
,
((
mib
[
2
]
+
1
)
<<
LG_PAGE
),
size_t
)
CTL_RO_NL_GEN
(
arenas_lrun_i_size
,
((
mib
[
2
]
+
1
)
<<
LG_PAGE
),
size_t
)
const
ctl_named_node_t
*
static
const
ctl_named_node_t
*
arenas_lrun_i_index
(
const
size_t
*
mib
,
size_t
miblen
,
size_t
i
)
arenas_lrun_i_index
(
const
size_t
*
mib
,
size_t
miblen
,
size_t
i
)
{
{
...
@@ -1182,7 +1407,27 @@ arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
...
@@ -1182,7 +1407,27 @@ arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
return
(
super_arenas_lrun_i_node
);
return
(
super_arenas_lrun_i_node
);
}
}
CTL_RO_NL_GEN
(
arenas_narenas
,
narenas
,
unsigned
)
static
int
arenas_narenas_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
unsigned
narenas
;
malloc_mutex_lock
(
&
ctl_mtx
);
READONLY
();
if
(
*
oldlenp
!=
sizeof
(
unsigned
))
{
ret
=
EINVAL
;
goto
label_return
;
}
narenas
=
ctl_stats
.
narenas
;
READ
(
narenas
,
unsigned
);
ret
=
0
;
label_return:
malloc_mutex_unlock
(
&
ctl_mtx
);
return
(
ret
);
}
static
int
static
int
arenas_initialized_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
arenas_initialized_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
...
@@ -1193,13 +1438,13 @@ arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp,
...
@@ -1193,13 +1438,13 @@ arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp,
malloc_mutex_lock
(
&
ctl_mtx
);
malloc_mutex_lock
(
&
ctl_mtx
);
READONLY
();
READONLY
();
if
(
*
oldlenp
!=
narenas
*
sizeof
(
bool
))
{
if
(
*
oldlenp
!=
ctl_stats
.
narenas
*
sizeof
(
bool
))
{
ret
=
EINVAL
;
ret
=
EINVAL
;
nread
=
(
*
oldlenp
<
narenas
*
sizeof
(
bool
))
nread
=
(
*
oldlenp
<
ctl_stats
.
narenas
*
sizeof
(
bool
))
?
(
*
oldlenp
/
sizeof
(
bool
))
:
narenas
;
?
(
*
oldlenp
/
sizeof
(
bool
))
:
ctl_stats
.
narenas
;
}
else
{
}
else
{
ret
=
0
;
ret
=
0
;
nread
=
narenas
;
nread
=
ctl_stats
.
narenas
;
}
}
for
(
i
=
0
;
i
<
nread
;
i
++
)
for
(
i
=
0
;
i
<
nread
;
i
++
)
...
@@ -1222,36 +1467,43 @@ arenas_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
...
@@ -1222,36 +1467,43 @@ arenas_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void
*
newp
,
size_t
newlen
)
void
*
newp
,
size_t
newlen
)
{
{
int
ret
;
int
ret
;
unsigned
arena
;
unsigned
arena
_ind
;
malloc_mutex_lock
(
&
ctl_mtx
);
WRITEONLY
();
WRITEONLY
();
arena
=
UINT_MAX
;
arena
_ind
=
UINT_MAX
;
WRITE
(
arena
,
unsigned
);
WRITE
(
arena
_ind
,
unsigned
);
if
(
newp
!=
NULL
&&
arena
>=
narenas
)
{
if
(
newp
!=
NULL
&&
arena
_ind
>=
ctl_stats
.
narenas
)
ret
=
EFAULT
;
ret
=
EFAULT
;
goto
label_return
;
else
{
}
else
{
if
(
arena_ind
==
UINT_MAX
)
VARIABLE_ARRAY
(
arena_t
*
,
tarenas
,
narenas
);
arena_ind
=
ctl_stats
.
narenas
;
arena_purge
(
arena_ind
);
ret
=
0
;
}
malloc_mutex_lock
(
&
arenas_lock
);
label_return:
memcpy
(
tarenas
,
arenas
,
sizeof
(
arena_t
*
)
*
narenas
);
malloc_mutex_unlock
(
&
ctl_mtx
);
malloc_mutex_unlock
(
&
arenas_lock
);
return
(
ret
);
}
if
(
arena
==
UINT_MAX
)
{
static
int
unsigned
i
;
arenas_extend_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
for
(
i
=
0
;
i
<
narenas
;
i
++
)
{
void
*
newp
,
size_t
newlen
)
if
(
tarenas
[
i
]
!=
NULL
)
{
arena_purge_all
(
tarenas
[
i
])
;
int
ret
;
}
}
else
{
malloc_mutex_lock
(
&
ctl_mtx
);
assert
(
arena
<
narenas
);
READONLY
(
);
if
(
tarenas
[
arena
]
!=
NULL
)
if
(
ctl_grow
())
{
arena_purge_all
(
tarenas
[
arena
])
;
ret
=
EAGAIN
;
}
goto
label_return
;
}
}
READ
(
ctl_stats
.
narenas
-
1
,
unsigned
);
ret
=
0
;
ret
=
0
;
label_return:
label_return:
malloc_mutex_unlock
(
&
ctl_mtx
);
return
(
ret
);
return
(
ret
);
}
}
...
@@ -1356,7 +1608,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns,
...
@@ -1356,7 +1608,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns,
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_bins_j_curruns
,
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_bins_j_curruns
,
ctl_stats
.
arenas
[
mib
[
2
]].
bstats
[
mib
[
4
]].
curruns
,
size_t
)
ctl_stats
.
arenas
[
mib
[
2
]].
bstats
[
mib
[
4
]].
curruns
,
size_t
)
const
ctl_named_node_t
*
static
const
ctl_named_node_t
*
stats_arenas_i_bins_j_index
(
const
size_t
*
mib
,
size_t
miblen
,
size_t
j
)
stats_arenas_i_bins_j_index
(
const
size_t
*
mib
,
size_t
miblen
,
size_t
j
)
{
{
...
@@ -1374,7 +1626,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests,
...
@@ -1374,7 +1626,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests,
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_lruns_j_curruns
,
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_lruns_j_curruns
,
ctl_stats
.
arenas
[
mib
[
2
]].
lstats
[
mib
[
4
]].
curruns
,
size_t
)
ctl_stats
.
arenas
[
mib
[
2
]].
lstats
[
mib
[
4
]].
curruns
,
size_t
)
const
ctl_named_node_t
*
static
const
ctl_named_node_t
*
stats_arenas_i_lruns_j_index
(
const
size_t
*
mib
,
size_t
miblen
,
size_t
j
)
stats_arenas_i_lruns_j_index
(
const
size_t
*
mib
,
size_t
miblen
,
size_t
j
)
{
{
...
@@ -1384,6 +1636,7 @@ stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
...
@@ -1384,6 +1636,7 @@ stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
}
}
CTL_RO_GEN
(
stats_arenas_i_nthreads
,
ctl_stats
.
arenas
[
mib
[
2
]].
nthreads
,
unsigned
)
CTL_RO_GEN
(
stats_arenas_i_nthreads
,
ctl_stats
.
arenas
[
mib
[
2
]].
nthreads
,
unsigned
)
CTL_RO_GEN
(
stats_arenas_i_dss
,
ctl_stats
.
arenas
[
mib
[
2
]].
dss
,
const
char
*
)
CTL_RO_GEN
(
stats_arenas_i_pactive
,
ctl_stats
.
arenas
[
mib
[
2
]].
pactive
,
size_t
)
CTL_RO_GEN
(
stats_arenas_i_pactive
,
ctl_stats
.
arenas
[
mib
[
2
]].
pactive
,
size_t
)
CTL_RO_GEN
(
stats_arenas_i_pdirty
,
ctl_stats
.
arenas
[
mib
[
2
]].
pdirty
,
size_t
)
CTL_RO_GEN
(
stats_arenas_i_pdirty
,
ctl_stats
.
arenas
[
mib
[
2
]].
pdirty
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_mapped
,
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_mapped
,
...
@@ -1395,13 +1648,13 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
...
@@ -1395,13 +1648,13 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_purged
,
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_purged
,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
purged
,
uint64_t
)
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
purged
,
uint64_t
)
const
ctl_named_node_t
*
static
const
ctl_named_node_t
*
stats_arenas_i_index
(
const
size_t
*
mib
,
size_t
miblen
,
size_t
i
)
stats_arenas_i_index
(
const
size_t
*
mib
,
size_t
miblen
,
size_t
i
)
{
{
const
ctl_named_node_t
*
ret
;
const
ctl_named_node_t
*
ret
;
malloc_mutex_lock
(
&
ctl_mtx
);
malloc_mutex_lock
(
&
ctl_mtx
);
if
(
ctl_stats
.
arenas
[
i
].
initialized
==
false
)
{
if
(
i
>
ctl_stats
.
narenas
||
ctl_stats
.
arenas
[
i
].
initialized
==
false
)
{
ret
=
NULL
;
ret
=
NULL
;
goto
label_return
;
goto
label_return
;
}
}
...
...
deps/jemalloc/src/huge.c
View file @
b85cb4ce
...
@@ -48,7 +48,8 @@ huge_palloc(size_t size, size_t alignment, bool zero)
...
@@ -48,7 +48,8 @@ huge_palloc(size_t size, size_t alignment, bool zero)
* it is possible to make correct junk/zero fill decisions below.
* it is possible to make correct junk/zero fill decisions below.
*/
*/
is_zeroed
=
zero
;
is_zeroed
=
zero
;
ret
=
chunk_alloc
(
csize
,
alignment
,
false
,
&
is_zeroed
);
ret
=
chunk_alloc
(
csize
,
alignment
,
false
,
&
is_zeroed
,
chunk_dss_prec_get
());
if
(
ret
==
NULL
)
{
if
(
ret
==
NULL
)
{
base_node_dealloc
(
node
);
base_node_dealloc
(
node
);
return
(
NULL
);
return
(
NULL
);
...
@@ -101,7 +102,7 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
...
@@ -101,7 +102,7 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
void
*
void
*
huge_ralloc
(
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
extra
,
huge_ralloc
(
void
*
ptr
,
size_t
oldsize
,
size_t
size
,
size_t
extra
,
size_t
alignment
,
bool
zero
)
size_t
alignment
,
bool
zero
,
bool
try_tcache_dalloc
)
{
{
void
*
ret
;
void
*
ret
;
size_t
copysize
;
size_t
copysize
;
...
@@ -180,7 +181,7 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
...
@@ -180,7 +181,7 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
#endif
#endif
{
{
memcpy
(
ret
,
ptr
,
copysize
);
memcpy
(
ret
,
ptr
,
copysize
);
iqalloc
(
ptr
);
iqalloc
x
(
ptr
,
try_tcache_dalloc
);
}
}
return
(
ret
);
return
(
ret
);
}
}
...
...
deps/jemalloc/src/jemalloc.c
View file @
b85cb4ce
...
@@ -33,7 +33,8 @@ unsigned ncpus;
...
@@ -33,7 +33,8 @@ unsigned ncpus;
malloc_mutex_t
arenas_lock
;
malloc_mutex_t
arenas_lock
;
arena_t
**
arenas
;
arena_t
**
arenas
;
unsigned
narenas
;
unsigned
narenas_total
;
unsigned
narenas_auto
;
/* Set to true once the allocator has been initialized. */
/* Set to true once the allocator has been initialized. */
static
bool
malloc_initialized
=
false
;
static
bool
malloc_initialized
=
false
;
...
@@ -144,14 +145,14 @@ choose_arena_hard(void)
...
@@ -144,14 +145,14 @@ choose_arena_hard(void)
{
{
arena_t
*
ret
;
arena_t
*
ret
;
if
(
narenas
>
1
)
{
if
(
narenas
_auto
>
1
)
{
unsigned
i
,
choose
,
first_null
;
unsigned
i
,
choose
,
first_null
;
choose
=
0
;
choose
=
0
;
first_null
=
narenas
;
first_null
=
narenas
_auto
;
malloc_mutex_lock
(
&
arenas_lock
);
malloc_mutex_lock
(
&
arenas_lock
);
assert
(
arenas
[
0
]
!=
NULL
);
assert
(
arenas
[
0
]
!=
NULL
);
for
(
i
=
1
;
i
<
narenas
;
i
++
)
{
for
(
i
=
1
;
i
<
narenas
_auto
;
i
++
)
{
if
(
arenas
[
i
]
!=
NULL
)
{
if
(
arenas
[
i
]
!=
NULL
)
{
/*
/*
* Choose the first arena that has the lowest
* Choose the first arena that has the lowest
...
@@ -160,7 +161,7 @@ choose_arena_hard(void)
...
@@ -160,7 +161,7 @@ choose_arena_hard(void)
if
(
arenas
[
i
]
->
nthreads
<
if
(
arenas
[
i
]
->
nthreads
<
arenas
[
choose
]
->
nthreads
)
arenas
[
choose
]
->
nthreads
)
choose
=
i
;
choose
=
i
;
}
else
if
(
first_null
==
narenas
)
{
}
else
if
(
first_null
==
narenas
_auto
)
{
/*
/*
* Record the index of the first uninitialized
* Record the index of the first uninitialized
* arena, in case all extant arenas are in use.
* arena, in case all extant arenas are in use.
...
@@ -174,7 +175,8 @@ choose_arena_hard(void)
...
@@ -174,7 +175,8 @@ choose_arena_hard(void)
}
}
}
}
if
(
arenas
[
choose
]
->
nthreads
==
0
||
first_null
==
narenas
)
{
if
(
arenas
[
choose
]
->
nthreads
==
0
||
first_null
==
narenas_auto
)
{
/*
/*
* Use an unloaded arena, or the least loaded arena if
* Use an unloaded arena, or the least loaded arena if
* all arenas are already initialized.
* all arenas are already initialized.
...
@@ -203,7 +205,7 @@ stats_print_atexit(void)
...
@@ -203,7 +205,7 @@ stats_print_atexit(void)
{
{
if
(
config_tcache
&&
config_stats
)
{
if
(
config_tcache
&&
config_stats
)
{
unsigned
i
;
unsigned
narenas
,
i
;
/*
/*
* Merge stats from extant threads. This is racy, since
* Merge stats from extant threads. This is racy, since
...
@@ -212,7 +214,7 @@ stats_print_atexit(void)
...
@@ -212,7 +214,7 @@ stats_print_atexit(void)
* out of date by the time they are reported, if other threads
* out of date by the time they are reported, if other threads
* continue to allocate.
* continue to allocate.
*/
*/
for
(
i
=
0
;
i
<
narenas
;
i
++
)
{
for
(
i
=
0
,
narenas
=
narenas_total_get
()
;
i
<
narenas
;
i
++
)
{
arena_t
*
arena
=
arenas
[
i
];
arena_t
*
arena
=
arenas
[
i
];
if
(
arena
!=
NULL
)
{
if
(
arena
!=
NULL
)
{
tcache_t
*
tcache
;
tcache_t
*
tcache
;
...
@@ -254,12 +256,13 @@ malloc_ncpus(void)
...
@@ -254,12 +256,13 @@ malloc_ncpus(void)
result
=
si
.
dwNumberOfProcessors
;
result
=
si
.
dwNumberOfProcessors
;
#else
#else
result
=
sysconf
(
_SC_NPROCESSORS_ONLN
);
result
=
sysconf
(
_SC_NPROCESSORS_ONLN
);
#endif
if
(
result
==
-
1
)
{
if
(
result
==
-
1
)
{
/* Error. */
/* Error. */
ret
=
1
;
ret
=
1
;
}
}
else
{
#endif
ret
=
(
unsigned
)
result
;
ret
=
(
unsigned
)
result
;
}
return
(
ret
);
return
(
ret
);
}
}
...
@@ -377,6 +380,22 @@ malloc_conf_init(void)
...
@@ -377,6 +380,22 @@ malloc_conf_init(void)
const
char
*
opts
,
*
k
,
*
v
;
const
char
*
opts
,
*
k
,
*
v
;
size_t
klen
,
vlen
;
size_t
klen
,
vlen
;
/*
* Automatically configure valgrind before processing options. The
* valgrind option remains in jemalloc 3.x for compatibility reasons.
*/
if
(
config_valgrind
)
{
opt_valgrind
=
(
RUNNING_ON_VALGRIND
!=
0
)
?
true
:
false
;
if
(
config_fill
&&
opt_valgrind
)
{
opt_junk
=
false
;
assert
(
opt_zero
==
false
);
opt_quarantine
=
JEMALLOC_VALGRIND_QUARANTINE_DEFAULT
;
opt_redzone
=
true
;
}
if
(
config_tcache
&&
opt_valgrind
)
opt_tcache
=
false
;
}
for
(
i
=
0
;
i
<
3
;
i
++
)
{
for
(
i
=
0
;
i
<
3
;
i
++
)
{
/* Get runtime configuration. */
/* Get runtime configuration. */
switch
(
i
)
{
switch
(
i
)
{
...
@@ -537,6 +556,30 @@ malloc_conf_init(void)
...
@@ -537,6 +556,30 @@ malloc_conf_init(void)
*/
*/
CONF_HANDLE_SIZE_T
(
opt_lg_chunk
,
"lg_chunk"
,
LG_PAGE
+
CONF_HANDLE_SIZE_T
(
opt_lg_chunk
,
"lg_chunk"
,
LG_PAGE
+
(
config_fill
?
2
:
1
),
(
sizeof
(
size_t
)
<<
3
)
-
1
)
(
config_fill
?
2
:
1
),
(
sizeof
(
size_t
)
<<
3
)
-
1
)
if
(
strncmp
(
"dss"
,
k
,
klen
)
==
0
)
{
int
i
;
bool
match
=
false
;
for
(
i
=
0
;
i
<
dss_prec_limit
;
i
++
)
{
if
(
strncmp
(
dss_prec_names
[
i
],
v
,
vlen
)
==
0
)
{
if
(
chunk_dss_prec_set
(
i
))
{
malloc_conf_error
(
"Error setting dss"
,
k
,
klen
,
v
,
vlen
);
}
else
{
opt_dss
=
dss_prec_names
[
i
];
match
=
true
;
break
;
}
}
}
if
(
match
==
false
)
{
malloc_conf_error
(
"Invalid conf value"
,
k
,
klen
,
v
,
vlen
);
}
continue
;
}
CONF_HANDLE_SIZE_T
(
opt_narenas
,
"narenas"
,
1
,
CONF_HANDLE_SIZE_T
(
opt_narenas
,
"narenas"
,
1
,
SIZE_T_MAX
)
SIZE_T_MAX
)
CONF_HANDLE_SSIZE_T
(
opt_lg_dirty_mult
,
"lg_dirty_mult"
,
CONF_HANDLE_SSIZE_T
(
opt_lg_dirty_mult
,
"lg_dirty_mult"
,
...
@@ -553,20 +596,7 @@ malloc_conf_init(void)
...
@@ -553,20 +596,7 @@ malloc_conf_init(void)
CONF_HANDLE_BOOL
(
opt_utrace
,
"utrace"
)
CONF_HANDLE_BOOL
(
opt_utrace
,
"utrace"
)
}
}
if
(
config_valgrind
)
{
if
(
config_valgrind
)
{
bool
hit
;
CONF_HANDLE_BOOL
(
opt_valgrind
,
"valgrind"
)
CONF_HANDLE_BOOL_HIT
(
opt_valgrind
,
"valgrind"
,
hit
)
if
(
config_fill
&&
opt_valgrind
&&
hit
)
{
opt_junk
=
false
;
opt_zero
=
false
;
if
(
opt_quarantine
==
0
)
{
opt_quarantine
=
JEMALLOC_VALGRIND_QUARANTINE_DEFAULT
;
}
opt_redzone
=
true
;
}
if
(
hit
)
continue
;
}
}
if
(
config_xmalloc
)
{
if
(
config_xmalloc
)
{
CONF_HANDLE_BOOL
(
opt_xmalloc
,
"xmalloc"
)
CONF_HANDLE_BOOL
(
opt_xmalloc
,
"xmalloc"
)
...
@@ -695,9 +725,9 @@ malloc_init_hard(void)
...
@@ -695,9 +725,9 @@ malloc_init_hard(void)
* Create enough scaffolding to allow recursive allocation in
* Create enough scaffolding to allow recursive allocation in
* malloc_ncpus().
* malloc_ncpus().
*/
*/
narenas
=
1
;
narenas
_total
=
narenas_auto
=
1
;
arenas
=
init_arenas
;
arenas
=
init_arenas
;
memset
(
arenas
,
0
,
sizeof
(
arena_t
*
)
*
narenas
);
memset
(
arenas
,
0
,
sizeof
(
arena_t
*
)
*
narenas
_auto
);
/*
/*
* Initialize one arena here. The rest are lazily created in
* Initialize one arena here. The rest are lazily created in
...
@@ -755,20 +785,21 @@ malloc_init_hard(void)
...
@@ -755,20 +785,21 @@ malloc_init_hard(void)
else
else
opt_narenas
=
1
;
opt_narenas
=
1
;
}
}
narenas
=
opt_narenas
;
narenas
_auto
=
opt_narenas
;
/*
/*
* Make sure that the arenas array can be allocated. In practice, this
* Make sure that the arenas array can be allocated. In practice, this
* limit is enough to allow the allocator to function, but the ctl
* limit is enough to allow the allocator to function, but the ctl
* machinery will fail to allocate memory at far lower limits.
* machinery will fail to allocate memory at far lower limits.
*/
*/
if
(
narenas
>
chunksize
/
sizeof
(
arena_t
*
))
{
if
(
narenas
_auto
>
chunksize
/
sizeof
(
arena_t
*
))
{
narenas
=
chunksize
/
sizeof
(
arena_t
*
);
narenas
_auto
=
chunksize
/
sizeof
(
arena_t
*
);
malloc_printf
(
"<jemalloc>: Reducing narenas to limit (%d)
\n
"
,
malloc_printf
(
"<jemalloc>: Reducing narenas to limit (%d)
\n
"
,
narenas
);
narenas
_auto
);
}
}
narenas_total
=
narenas_auto
;
/* Allocate and initialize arenas. */
/* Allocate and initialize arenas. */
arenas
=
(
arena_t
**
)
base_alloc
(
sizeof
(
arena_t
*
)
*
narenas
);
arenas
=
(
arena_t
**
)
base_alloc
(
sizeof
(
arena_t
*
)
*
narenas
_total
);
if
(
arenas
==
NULL
)
{
if
(
arenas
==
NULL
)
{
malloc_mutex_unlock
(
&
init_lock
);
malloc_mutex_unlock
(
&
init_lock
);
return
(
true
);
return
(
true
);
...
@@ -777,7 +808,7 @@ malloc_init_hard(void)
...
@@ -777,7 +808,7 @@ malloc_init_hard(void)
* Zero the array. In practice, this should always be pre-zeroed,
* Zero the array. In practice, this should always be pre-zeroed,
* since it was just mmap()ed, but let's be sure.
* since it was just mmap()ed, but let's be sure.
*/
*/
memset
(
arenas
,
0
,
sizeof
(
arena_t
*
)
*
narenas
);
memset
(
arenas
,
0
,
sizeof
(
arena_t
*
)
*
narenas
_total
);
/* Copy the pointer to the one arena that was already initialized. */
/* Copy the pointer to the one arena that was already initialized. */
arenas
[
0
]
=
init_arenas
[
0
];
arenas
[
0
]
=
init_arenas
[
0
];
...
@@ -1262,11 +1293,10 @@ je_valloc(size_t size)
...
@@ -1262,11 +1293,10 @@ je_valloc(size_t size)
* passed an extra argument for the caller return address, which will be
* passed an extra argument for the caller return address, which will be
* ignored.
* ignored.
*/
*/
JEMALLOC_EXPORT
void
(
*
const
__free_hook
)(
void
*
ptr
)
=
je_free
;
JEMALLOC_EXPORT
void
(
*
__free_hook
)(
void
*
ptr
)
=
je_free
;
JEMALLOC_EXPORT
void
*
(
*
const
__malloc_hook
)(
size_t
size
)
=
je_malloc
;
JEMALLOC_EXPORT
void
*
(
*
__malloc_hook
)(
size_t
size
)
=
je_malloc
;
JEMALLOC_EXPORT
void
*
(
*
const
__realloc_hook
)(
void
*
ptr
,
size_t
size
)
=
JEMALLOC_EXPORT
void
*
(
*
__realloc_hook
)(
void
*
ptr
,
size_t
size
)
=
je_realloc
;
je_realloc
;
JEMALLOC_EXPORT
void
*
(
*
__memalign_hook
)(
size_t
alignment
,
size_t
size
)
=
JEMALLOC_EXPORT
void
*
(
*
const
__memalign_hook
)(
size_t
alignment
,
size_t
size
)
=
je_memalign
;
je_memalign
;
#endif
#endif
...
@@ -1279,7 +1309,7 @@ JEMALLOC_EXPORT void *(* const __memalign_hook)(size_t alignment, size_t size) =
...
@@ -1279,7 +1309,7 @@ JEMALLOC_EXPORT void *(* const __memalign_hook)(size_t alignment, size_t size) =
*/
*/
size_t
size_t
je_malloc_usable_size
(
const
void
*
ptr
)
je_malloc_usable_size
(
JEMALLOC_USABLE_SIZE_CONST
void
*
ptr
)
{
{
size_t
ret
;
size_t
ret
;
...
@@ -1343,18 +1373,19 @@ je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
...
@@ -1343,18 +1373,19 @@ je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
#ifdef JEMALLOC_EXPERIMENTAL
#ifdef JEMALLOC_EXPERIMENTAL
JEMALLOC_INLINE
void
*
JEMALLOC_INLINE
void
*
iallocm
(
size_t
usize
,
size_t
alignment
,
bool
zero
)
iallocm
(
size_t
usize
,
size_t
alignment
,
bool
zero
,
bool
try_tcache
,
arena_t
*
arena
)
{
{
assert
(
usize
==
((
alignment
==
0
)
?
s2u
(
usize
)
:
sa2u
(
usize
,
assert
(
usize
==
((
alignment
==
0
)
?
s2u
(
usize
)
:
sa2u
(
usize
,
alignment
)));
alignment
)));
if
(
alignment
!=
0
)
if
(
alignment
!=
0
)
return
(
ipalloc
(
usize
,
alignment
,
zero
));
return
(
ipalloc
x
(
usize
,
alignment
,
zero
,
try_tcache
,
arena
));
else
if
(
zero
)
else
if
(
zero
)
return
(
icalloc
(
usize
));
return
(
icalloc
x
(
usize
,
try_tcache
,
arena
));
else
else
return
(
imalloc
(
usize
));
return
(
imalloc
x
(
usize
,
try_tcache
,
arena
));
}
}
int
int
...
@@ -1365,6 +1396,9 @@ je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
...
@@ -1365,6 +1396,9 @@ je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
size_t
alignment
=
(
ZU
(
1
)
<<
(
flags
&
ALLOCM_LG_ALIGN_MASK
)
size_t
alignment
=
(
ZU
(
1
)
<<
(
flags
&
ALLOCM_LG_ALIGN_MASK
)
&
(
SIZE_T_MAX
-
1
));
&
(
SIZE_T_MAX
-
1
));
bool
zero
=
flags
&
ALLOCM_ZERO
;
bool
zero
=
flags
&
ALLOCM_ZERO
;
unsigned
arena_ind
=
((
unsigned
)(
flags
>>
8
))
-
1
;
arena_t
*
arena
;
bool
try_tcache
;
assert
(
ptr
!=
NULL
);
assert
(
ptr
!=
NULL
);
assert
(
size
!=
0
);
assert
(
size
!=
0
);
...
@@ -1372,6 +1406,14 @@ je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
...
@@ -1372,6 +1406,14 @@ je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
if
(
malloc_init
())
if
(
malloc_init
())
goto
label_oom
;
goto
label_oom
;
if
(
arena_ind
!=
UINT_MAX
)
{
arena
=
arenas
[
arena_ind
];
try_tcache
=
false
;
}
else
{
arena
=
NULL
;
try_tcache
=
true
;
}
usize
=
(
alignment
==
0
)
?
s2u
(
size
)
:
sa2u
(
size
,
alignment
);
usize
=
(
alignment
==
0
)
?
s2u
(
size
)
:
sa2u
(
size
,
alignment
);
if
(
usize
==
0
)
if
(
usize
==
0
)
goto
label_oom
;
goto
label_oom
;
...
@@ -1388,18 +1430,19 @@ je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
...
@@ -1388,18 +1430,19 @@ je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
s2u
(
SMALL_MAXCLASS
+
1
)
:
sa2u
(
SMALL_MAXCLASS
+
1
,
s2u
(
SMALL_MAXCLASS
+
1
)
:
sa2u
(
SMALL_MAXCLASS
+
1
,
alignment
);
alignment
);
assert
(
usize_promoted
!=
0
);
assert
(
usize_promoted
!=
0
);
p
=
iallocm
(
usize_promoted
,
alignment
,
zero
);
p
=
iallocm
(
usize_promoted
,
alignment
,
zero
,
try_tcache
,
arena
);
if
(
p
==
NULL
)
if
(
p
==
NULL
)
goto
label_oom
;
goto
label_oom
;
arena_prof_promoted
(
p
,
usize
);
arena_prof_promoted
(
p
,
usize
);
}
else
{
}
else
{
p
=
iallocm
(
usize
,
alignment
,
zero
);
p
=
iallocm
(
usize
,
alignment
,
zero
,
try_tcache
,
arena
);
if
(
p
==
NULL
)
if
(
p
==
NULL
)
goto
label_oom
;
goto
label_oom
;
}
}
prof_malloc
(
p
,
usize
,
cnt
);
prof_malloc
(
p
,
usize
,
cnt
);
}
else
{
}
else
{
p
=
iallocm
(
usize
,
alignment
,
zero
);
p
=
iallocm
(
usize
,
alignment
,
zero
,
try_tcache
,
arena
);
if
(
p
==
NULL
)
if
(
p
==
NULL
)
goto
label_oom
;
goto
label_oom
;
}
}
...
@@ -1436,6 +1479,9 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
...
@@ -1436,6 +1479,9 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
&
(
SIZE_T_MAX
-
1
));
&
(
SIZE_T_MAX
-
1
));
bool
zero
=
flags
&
ALLOCM_ZERO
;
bool
zero
=
flags
&
ALLOCM_ZERO
;
bool
no_move
=
flags
&
ALLOCM_NO_MOVE
;
bool
no_move
=
flags
&
ALLOCM_NO_MOVE
;
unsigned
arena_ind
=
((
unsigned
)(
flags
>>
8
))
-
1
;
bool
try_tcache_alloc
,
try_tcache_dalloc
;
arena_t
*
arena
;
assert
(
ptr
!=
NULL
);
assert
(
ptr
!=
NULL
);
assert
(
*
ptr
!=
NULL
);
assert
(
*
ptr
!=
NULL
);
...
@@ -1443,6 +1489,19 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
...
@@ -1443,6 +1489,19 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
assert
(
SIZE_T_MAX
-
size
>=
extra
);
assert
(
SIZE_T_MAX
-
size
>=
extra
);
assert
(
malloc_initialized
||
IS_INITIALIZER
);
assert
(
malloc_initialized
||
IS_INITIALIZER
);
if
(
arena_ind
!=
UINT_MAX
)
{
arena_chunk_t
*
chunk
;
try_tcache_alloc
=
true
;
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
*
ptr
);
try_tcache_dalloc
=
(
chunk
==
*
ptr
||
chunk
->
arena
!=
arenas
[
arena_ind
]);
arena
=
arenas
[
arena_ind
];
}
else
{
try_tcache_alloc
=
true
;
try_tcache_dalloc
=
true
;
arena
=
NULL
;
}
p
=
*
ptr
;
p
=
*
ptr
;
if
(
config_prof
&&
opt_prof
)
{
if
(
config_prof
&&
opt_prof
)
{
prof_thr_cnt_t
*
cnt
;
prof_thr_cnt_t
*
cnt
;
...
@@ -1469,9 +1528,10 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
...
@@ -1469,9 +1528,10 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
if
(
prof_promote
&&
(
uintptr_t
)
cnt
!=
(
uintptr_t
)
1U
if
(
prof_promote
&&
(
uintptr_t
)
cnt
!=
(
uintptr_t
)
1U
&&
((
alignment
==
0
)
?
s2u
(
size
)
:
sa2u
(
size
,
alignment
))
&&
((
alignment
==
0
)
?
s2u
(
size
)
:
sa2u
(
size
,
alignment
))
<=
SMALL_MAXCLASS
)
{
<=
SMALL_MAXCLASS
)
{
q
=
iralloc
(
p
,
SMALL_MAXCLASS
+
1
,
(
SMALL_MAXCLASS
+
1
>=
q
=
iralloc
x
(
p
,
SMALL_MAXCLASS
+
1
,
(
SMALL_MAXCLASS
+
1
>=
size
+
extra
)
?
0
:
size
+
extra
-
(
SMALL_MAXCLASS
+
1
),
size
+
extra
)
?
0
:
size
+
extra
-
(
SMALL_MAXCLASS
+
1
),
alignment
,
zero
,
no_move
);
alignment
,
zero
,
no_move
,
try_tcache_alloc
,
try_tcache_dalloc
,
arena
);
if
(
q
==
NULL
)
if
(
q
==
NULL
)
goto
label_err
;
goto
label_err
;
if
(
max_usize
<
PAGE
)
{
if
(
max_usize
<
PAGE
)
{
...
@@ -1480,7 +1540,8 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
...
@@ -1480,7 +1540,8 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
}
else
}
else
usize
=
isalloc
(
q
,
config_prof
);
usize
=
isalloc
(
q
,
config_prof
);
}
else
{
}
else
{
q
=
iralloc
(
p
,
size
,
extra
,
alignment
,
zero
,
no_move
);
q
=
irallocx
(
p
,
size
,
extra
,
alignment
,
zero
,
no_move
,
try_tcache_alloc
,
try_tcache_dalloc
,
arena
);
if
(
q
==
NULL
)
if
(
q
==
NULL
)
goto
label_err
;
goto
label_err
;
usize
=
isalloc
(
q
,
config_prof
);
usize
=
isalloc
(
q
,
config_prof
);
...
@@ -1497,7 +1558,8 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
...
@@ -1497,7 +1558,8 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
old_size
=
isalloc
(
p
,
false
);
old_size
=
isalloc
(
p
,
false
);
old_rzsize
=
u2rz
(
old_size
);
old_rzsize
=
u2rz
(
old_size
);
}
}
q
=
iralloc
(
p
,
size
,
extra
,
alignment
,
zero
,
no_move
);
q
=
irallocx
(
p
,
size
,
extra
,
alignment
,
zero
,
no_move
,
try_tcache_alloc
,
try_tcache_dalloc
,
arena
);
if
(
q
==
NULL
)
if
(
q
==
NULL
)
goto
label_err
;
goto
label_err
;
if
(
config_stats
)
if
(
config_stats
)
...
@@ -1558,10 +1620,19 @@ je_dallocm(void *ptr, int flags)
...
@@ -1558,10 +1620,19 @@ je_dallocm(void *ptr, int flags)
{
{
size_t
usize
;
size_t
usize
;
size_t
rzsize
JEMALLOC_CC_SILENCE_INIT
(
0
);
size_t
rzsize
JEMALLOC_CC_SILENCE_INIT
(
0
);
unsigned
arena_ind
=
((
unsigned
)(
flags
>>
8
))
-
1
;
bool
try_tcache
;
assert
(
ptr
!=
NULL
);
assert
(
ptr
!=
NULL
);
assert
(
malloc_initialized
||
IS_INITIALIZER
);
assert
(
malloc_initialized
||
IS_INITIALIZER
);
if
(
arena_ind
!=
UINT_MAX
)
{
arena_chunk_t
*
chunk
=
(
arena_chunk_t
*
)
CHUNK_ADDR2BASE
(
ptr
);
try_tcache
=
(
chunk
==
ptr
||
chunk
->
arena
!=
arenas
[
arena_ind
]);
}
else
try_tcache
=
true
;
UTRACE
(
ptr
,
0
,
0
);
UTRACE
(
ptr
,
0
,
0
);
if
(
config_stats
||
config_valgrind
)
if
(
config_stats
||
config_valgrind
)
usize
=
isalloc
(
ptr
,
config_prof
);
usize
=
isalloc
(
ptr
,
config_prof
);
...
@@ -1574,7 +1645,7 @@ je_dallocm(void *ptr, int flags)
...
@@ -1574,7 +1645,7 @@ je_dallocm(void *ptr, int flags)
thread_allocated_tsd_get
()
->
deallocated
+=
usize
;
thread_allocated_tsd_get
()
->
deallocated
+=
usize
;
if
(
config_valgrind
&&
opt_valgrind
)
if
(
config_valgrind
&&
opt_valgrind
)
rzsize
=
p2rz
(
ptr
);
rzsize
=
p2rz
(
ptr
);
iqalloc
(
ptr
);
iqalloc
x
(
ptr
,
try_tcache
);
JEMALLOC_VALGRIND_FREE
(
ptr
,
rzsize
);
JEMALLOC_VALGRIND_FREE
(
ptr
,
rzsize
);
return
(
ALLOCM_SUCCESS
);
return
(
ALLOCM_SUCCESS
);
...
@@ -1611,6 +1682,27 @@ je_nallocm(size_t *rsize, size_t size, int flags)
...
@@ -1611,6 +1682,27 @@ je_nallocm(size_t *rsize, size_t size, int flags)
* malloc during fork().
* malloc during fork().
*/
*/
/*
* If an application creates a thread before doing any allocation in the main
* thread, then calls fork(2) in the main thread followed by memory allocation
* in the child process, a race can occur that results in deadlock within the
* child: the main thread may have forked while the created thread had
* partially initialized the allocator. Ordinarily jemalloc prevents
* fork/malloc races via the following functions it registers during
* initialization using pthread_atfork(), but of course that does no good if
* the allocator isn't fully initialized at fork time. The following library
* constructor is a partial solution to this problem. It may still possible to
* trigger the deadlock described above, but doing so would involve forking via
* a library constructor that runs before jemalloc's runs.
*/
JEMALLOC_ATTR
(
constructor
)
static
void
jemalloc_constructor
(
void
)
{
malloc_init
();
}
#ifndef JEMALLOC_MUTEX_INIT_CB
#ifndef JEMALLOC_MUTEX_INIT_CB
void
void
jemalloc_prefork
(
void
)
jemalloc_prefork
(
void
)
...
@@ -1628,14 +1720,16 @@ _malloc_prefork(void)
...
@@ -1628,14 +1720,16 @@ _malloc_prefork(void)
assert
(
malloc_initialized
);
assert
(
malloc_initialized
);
/* Acquire all mutexes in a safe order. */
/* Acquire all mutexes in a safe order. */
ctl_prefork
();
malloc_mutex_prefork
(
&
arenas_lock
);
malloc_mutex_prefork
(
&
arenas_lock
);
for
(
i
=
0
;
i
<
narenas
;
i
++
)
{
for
(
i
=
0
;
i
<
narenas
_total
;
i
++
)
{
if
(
arenas
[
i
]
!=
NULL
)
if
(
arenas
[
i
]
!=
NULL
)
arena_prefork
(
arenas
[
i
]);
arena_prefork
(
arenas
[
i
]);
}
}
prof_prefork
();
chunk_prefork
();
base_prefork
();
base_prefork
();
huge_prefork
();
huge_prefork
();
chunk_dss_prefork
();
}
}
#ifndef JEMALLOC_MUTEX_INIT_CB
#ifndef JEMALLOC_MUTEX_INIT_CB
...
@@ -1655,14 +1749,16 @@ _malloc_postfork(void)
...
@@ -1655,14 +1749,16 @@ _malloc_postfork(void)
assert
(
malloc_initialized
);
assert
(
malloc_initialized
);
/* Release all mutexes, now that fork() has completed. */
/* Release all mutexes, now that fork() has completed. */
chunk_dss_postfork_parent
();
huge_postfork_parent
();
huge_postfork_parent
();
base_postfork_parent
();
base_postfork_parent
();
for
(
i
=
0
;
i
<
narenas
;
i
++
)
{
chunk_postfork_parent
();
prof_postfork_parent
();
for
(
i
=
0
;
i
<
narenas_total
;
i
++
)
{
if
(
arenas
[
i
]
!=
NULL
)
if
(
arenas
[
i
]
!=
NULL
)
arena_postfork_parent
(
arenas
[
i
]);
arena_postfork_parent
(
arenas
[
i
]);
}
}
malloc_mutex_postfork_parent
(
&
arenas_lock
);
malloc_mutex_postfork_parent
(
&
arenas_lock
);
ctl_postfork_parent
();
}
}
void
void
...
@@ -1673,14 +1769,16 @@ jemalloc_postfork_child(void)
...
@@ -1673,14 +1769,16 @@ jemalloc_postfork_child(void)
assert
(
malloc_initialized
);
assert
(
malloc_initialized
);
/* Release all mutexes, now that fork() has completed. */
/* Release all mutexes, now that fork() has completed. */
chunk_dss_postfork_child
();
huge_postfork_child
();
huge_postfork_child
();
base_postfork_child
();
base_postfork_child
();
for
(
i
=
0
;
i
<
narenas
;
i
++
)
{
chunk_postfork_child
();
prof_postfork_child
();
for
(
i
=
0
;
i
<
narenas_total
;
i
++
)
{
if
(
arenas
[
i
]
!=
NULL
)
if
(
arenas
[
i
]
!=
NULL
)
arena_postfork_child
(
arenas
[
i
]);
arena_postfork_child
(
arenas
[
i
]);
}
}
malloc_mutex_postfork_child
(
&
arenas_lock
);
malloc_mutex_postfork_child
(
&
arenas_lock
);
ctl_postfork_child
();
}
}
/******************************************************************************/
/******************************************************************************/
...
...
deps/jemalloc/src/mutex.c
View file @
b85cb4ce
...
@@ -64,7 +64,7 @@ pthread_create(pthread_t *__restrict thread,
...
@@ -64,7 +64,7 @@ pthread_create(pthread_t *__restrict thread,
/******************************************************************************/
/******************************************************************************/
#ifdef JEMALLOC_MUTEX_INIT_CB
#ifdef JEMALLOC_MUTEX_INIT_CB
int
_pthread_mutex_init_calloc_cb
(
pthread_mutex_t
*
mutex
,
JEMALLOC_EXPORT
int
_pthread_mutex_init_calloc_cb
(
pthread_mutex_t
*
mutex
,
void
*
(
calloc_cb
)(
size_t
,
size_t
));
void
*
(
calloc_cb
)(
size_t
,
size_t
));
#endif
#endif
...
...
deps/jemalloc/src/prof.c
View file @
b85cb4ce
...
@@ -1270,4 +1270,46 @@ prof_boot2(void)
...
@@ -1270,4 +1270,46 @@ prof_boot2(void)
return
(
false
);
return
(
false
);
}
}
void
prof_prefork
(
void
)
{
if
(
opt_prof
)
{
unsigned
i
;
malloc_mutex_lock
(
&
bt2ctx_mtx
);
malloc_mutex_lock
(
&
prof_dump_seq_mtx
);
for
(
i
=
0
;
i
<
PROF_NCTX_LOCKS
;
i
++
)
malloc_mutex_lock
(
&
ctx_locks
[
i
]);
}
}
void
prof_postfork_parent
(
void
)
{
if
(
opt_prof
)
{
unsigned
i
;
for
(
i
=
0
;
i
<
PROF_NCTX_LOCKS
;
i
++
)
malloc_mutex_postfork_parent
(
&
ctx_locks
[
i
]);
malloc_mutex_postfork_parent
(
&
prof_dump_seq_mtx
);
malloc_mutex_postfork_parent
(
&
bt2ctx_mtx
);
}
}
void
prof_postfork_child
(
void
)
{
if
(
opt_prof
)
{
unsigned
i
;
for
(
i
=
0
;
i
<
PROF_NCTX_LOCKS
;
i
++
)
malloc_mutex_postfork_child
(
&
ctx_locks
[
i
]);
malloc_mutex_postfork_child
(
&
prof_dump_seq_mtx
);
malloc_mutex_postfork_child
(
&
bt2ctx_mtx
);
}
}
/******************************************************************************/
/******************************************************************************/
deps/jemalloc/src/rtree.c
View file @
b85cb4ce
...
@@ -44,3 +44,24 @@ rtree_new(unsigned bits)
...
@@ -44,3 +44,24 @@ rtree_new(unsigned bits)
return
(
ret
);
return
(
ret
);
}
}
void
rtree_prefork
(
rtree_t
*
rtree
)
{
malloc_mutex_prefork
(
&
rtree
->
mutex
);
}
void
rtree_postfork_parent
(
rtree_t
*
rtree
)
{
malloc_mutex_postfork_parent
(
&
rtree
->
mutex
);
}
void
rtree_postfork_child
(
rtree_t
*
rtree
)
{
malloc_mutex_postfork_child
(
&
rtree
->
mutex
);
}
deps/jemalloc/src/stats.c
View file @
b85cb4ce
...
@@ -206,6 +206,7 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
...
@@ -206,6 +206,7 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
unsigned
i
,
bool
bins
,
bool
large
)
unsigned
i
,
bool
bins
,
bool
large
)
{
{
unsigned
nthreads
;
unsigned
nthreads
;
const
char
*
dss
;
size_t
page
,
pactive
,
pdirty
,
mapped
;
size_t
page
,
pactive
,
pdirty
,
mapped
;
uint64_t
npurge
,
nmadvise
,
purged
;
uint64_t
npurge
,
nmadvise
,
purged
;
size_t
small_allocated
;
size_t
small_allocated
;
...
@@ -218,6 +219,9 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
...
@@ -218,6 +219,9 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
CTL_I_GET
(
"stats.arenas.0.nthreads"
,
&
nthreads
,
unsigned
);
CTL_I_GET
(
"stats.arenas.0.nthreads"
,
&
nthreads
,
unsigned
);
malloc_cprintf
(
write_cb
,
cbopaque
,
malloc_cprintf
(
write_cb
,
cbopaque
,
"assigned threads: %u
\n
"
,
nthreads
);
"assigned threads: %u
\n
"
,
nthreads
);
CTL_I_GET
(
"stats.arenas.0.dss"
,
&
dss
,
const
char
*
);
malloc_cprintf
(
write_cb
,
cbopaque
,
"dss allocation precedence: %s
\n
"
,
dss
);
CTL_I_GET
(
"stats.arenas.0.pactive"
,
&
pactive
,
size_t
);
CTL_I_GET
(
"stats.arenas.0.pactive"
,
&
pactive
,
size_t
);
CTL_I_GET
(
"stats.arenas.0.pdirty"
,
&
pdirty
,
size_t
);
CTL_I_GET
(
"stats.arenas.0.pdirty"
,
&
pdirty
,
size_t
);
CTL_I_GET
(
"stats.arenas.0.npurge"
,
&
npurge
,
uint64_t
);
CTL_I_GET
(
"stats.arenas.0.npurge"
,
&
npurge
,
uint64_t
);
...
@@ -370,6 +374,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
...
@@ -370,6 +374,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
"Run-time option settings:
\n
"
);
"Run-time option settings:
\n
"
);
OPT_WRITE_BOOL
(
abort
)
OPT_WRITE_BOOL
(
abort
)
OPT_WRITE_SIZE_T
(
lg_chunk
)
OPT_WRITE_SIZE_T
(
lg_chunk
)
OPT_WRITE_CHAR_P
(
dss
)
OPT_WRITE_SIZE_T
(
narenas
)
OPT_WRITE_SIZE_T
(
narenas
)
OPT_WRITE_SSIZE_T
(
lg_dirty_mult
)
OPT_WRITE_SSIZE_T
(
lg_dirty_mult
)
OPT_WRITE_BOOL
(
stats_print
)
OPT_WRITE_BOOL
(
stats_print
)
...
@@ -400,7 +405,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
...
@@ -400,7 +405,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
malloc_cprintf
(
write_cb
,
cbopaque
,
"CPUs: %u
\n
"
,
ncpus
);
malloc_cprintf
(
write_cb
,
cbopaque
,
"CPUs: %u
\n
"
,
ncpus
);
CTL_GET
(
"arenas.narenas"
,
&
uv
,
unsigned
);
CTL_GET
(
"arenas.narenas"
,
&
uv
,
unsigned
);
malloc_cprintf
(
write_cb
,
cbopaque
,
"
Max a
renas: %u
\n
"
,
uv
);
malloc_cprintf
(
write_cb
,
cbopaque
,
"
A
renas: %u
\n
"
,
uv
);
malloc_cprintf
(
write_cb
,
cbopaque
,
"Pointer size: %zu
\n
"
,
malloc_cprintf
(
write_cb
,
cbopaque
,
"Pointer size: %zu
\n
"
,
sizeof
(
void
*
));
sizeof
(
void
*
));
...
@@ -472,7 +477,8 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
...
@@ -472,7 +477,8 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
CTL_GET
(
"stats.chunks.current"
,
&
chunks_current
,
size_t
);
CTL_GET
(
"stats.chunks.current"
,
&
chunks_current
,
size_t
);
malloc_cprintf
(
write_cb
,
cbopaque
,
"chunks: nchunks "
malloc_cprintf
(
write_cb
,
cbopaque
,
"chunks: nchunks "
"highchunks curchunks
\n
"
);
"highchunks curchunks
\n
"
);
malloc_cprintf
(
write_cb
,
cbopaque
,
" %13"
PRIu64
"%13zu%13zu
\n
"
,
malloc_cprintf
(
write_cb
,
cbopaque
,
" %13"
PRIu64
" %12zu %12zu
\n
"
,
chunks_total
,
chunks_high
,
chunks_current
);
chunks_total
,
chunks_high
,
chunks_current
);
/* Print huge stats. */
/* Print huge stats. */
...
...
deps/jemalloc/src/tcache.c
View file @
b85cb4ce
...
@@ -288,7 +288,7 @@ tcache_create(arena_t *arena)
...
@@ -288,7 +288,7 @@ tcache_create(arena_t *arena)
else
if
(
size
<=
tcache_maxclass
)
else
if
(
size
<=
tcache_maxclass
)
tcache
=
(
tcache_t
*
)
arena_malloc_large
(
arena
,
size
,
true
);
tcache
=
(
tcache_t
*
)
arena_malloc_large
(
arena
,
size
,
true
);
else
else
tcache
=
(
tcache_t
*
)
icalloc
(
size
);
tcache
=
(
tcache_t
*
)
icalloc
x
(
size
,
false
,
arena
);
if
(
tcache
==
NULL
)
if
(
tcache
==
NULL
)
return
(
NULL
);
return
(
NULL
);
...
@@ -364,7 +364,7 @@ tcache_destroy(tcache_t *tcache)
...
@@ -364,7 +364,7 @@ tcache_destroy(tcache_t *tcache)
arena_dalloc_large
(
arena
,
chunk
,
tcache
);
arena_dalloc_large
(
arena
,
chunk
,
tcache
);
}
else
}
else
idalloc
(
tcache
);
idalloc
x
(
tcache
,
false
);
}
}
void
void
...
...
deps/jemalloc/src/util.c
View file @
b85cb4ce
...
@@ -377,7 +377,6 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
...
@@ -377,7 +377,6 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
case
'\0'
:
goto
label_out
;
case
'\0'
:
goto
label_out
;
case
'%'
:
{
case
'%'
:
{
bool
alt_form
=
false
;
bool
alt_form
=
false
;
bool
zero_pad
=
false
;
bool
left_justify
=
false
;
bool
left_justify
=
false
;
bool
plus_space
=
false
;
bool
plus_space
=
false
;
bool
plus_plus
=
false
;
bool
plus_plus
=
false
;
...
@@ -398,10 +397,6 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
...
@@ -398,10 +397,6 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
assert
(
alt_form
==
false
);
assert
(
alt_form
==
false
);
alt_form
=
true
;
alt_form
=
true
;
break
;
break
;
case
'0'
:
assert
(
zero_pad
==
false
);
zero_pad
=
true
;
break
;
case
'-'
:
case
'-'
:
assert
(
left_justify
==
false
);
assert
(
left_justify
==
false
);
left_justify
=
true
;
left_justify
=
true
;
...
...
deps/jemalloc/src/zone.c
View file @
b85cb4ce
...
@@ -171,6 +171,16 @@ void
...
@@ -171,6 +171,16 @@ void
register_zone
(
void
)
register_zone
(
void
)
{
{
/*
* If something else replaced the system default zone allocator, don't
* register jemalloc's.
*/
malloc_zone_t
*
default_zone
=
malloc_default_zone
();
if
(
!
default_zone
->
zone_name
||
strcmp
(
default_zone
->
zone_name
,
"DefaultMallocZone"
)
!=
0
)
{
return
;
}
zone
.
size
=
(
void
*
)
zone_size
;
zone
.
size
=
(
void
*
)
zone_size
;
zone
.
malloc
=
(
void
*
)
zone_malloc
;
zone
.
malloc
=
(
void
*
)
zone_malloc
;
zone
.
calloc
=
(
void
*
)
zone_calloc
;
zone
.
calloc
=
(
void
*
)
zone_calloc
;
...
@@ -241,7 +251,7 @@ register_zone(void)
...
@@ -241,7 +251,7 @@ register_zone(void)
* then becomes the default.
* then becomes the default.
*/
*/
do
{
do
{
malloc_zone_t
*
default_zone
=
malloc_default_zone
();
default_zone
=
malloc_default_zone
();
malloc_zone_unregister
(
default_zone
);
malloc_zone_unregister
(
default_zone
);
malloc_zone_register
(
default_zone
);
malloc_zone_register
(
default_zone
);
}
while
(
malloc_default_zone
()
!=
&
zone
);
}
while
(
malloc_default_zone
()
!=
&
zone
);
...
...
deps/jemalloc/test/ALLOCM_ARENA.c
0 → 100644
View file @
b85cb4ce
#define JEMALLOC_MANGLE
#include "jemalloc_test.h"
#define NTHREADS 10
void
*
je_thread_start
(
void
*
arg
)
{
unsigned
thread_ind
=
(
unsigned
)(
uintptr_t
)
arg
;
unsigned
arena_ind
;
int
r
;
void
*
p
;
size_t
rsz
,
sz
;
sz
=
sizeof
(
arena_ind
);
if
(
mallctl
(
"arenas.extend"
,
&
arena_ind
,
&
sz
,
NULL
,
0
)
!=
0
)
{
malloc_printf
(
"Error in arenas.extend
\n
"
);
abort
();
}
if
(
thread_ind
%
4
!=
3
)
{
size_t
mib
[
3
];
size_t
miblen
=
sizeof
(
mib
)
/
sizeof
(
size_t
);
const
char
*
dss_precs
[]
=
{
"disabled"
,
"primary"
,
"secondary"
};
const
char
*
dss
=
dss_precs
[
thread_ind
%
4
];
if
(
mallctlnametomib
(
"arena.0.dss"
,
mib
,
&
miblen
)
!=
0
)
{
malloc_printf
(
"Error in mallctlnametomib()
\n
"
);
abort
();
}
mib
[
1
]
=
arena_ind
;
if
(
mallctlbymib
(
mib
,
miblen
,
NULL
,
NULL
,
(
void
*
)
&
dss
,
sizeof
(
const
char
*
)))
{
malloc_printf
(
"Error in mallctlbymib()
\n
"
);
abort
();
}
}
r
=
allocm
(
&
p
,
&
rsz
,
1
,
ALLOCM_ARENA
(
arena_ind
));
if
(
r
!=
ALLOCM_SUCCESS
)
{
malloc_printf
(
"Unexpected allocm() error
\n
"
);
abort
();
}
return
(
NULL
);
}
int
main
(
void
)
{
je_thread_t
threads
[
NTHREADS
];
unsigned
i
;
malloc_printf
(
"Test begin
\n
"
);
for
(
i
=
0
;
i
<
NTHREADS
;
i
++
)
{
je_thread_create
(
&
threads
[
i
],
je_thread_start
,
(
void
*
)(
uintptr_t
)
i
);
}
for
(
i
=
0
;
i
<
NTHREADS
;
i
++
)
je_thread_join
(
threads
[
i
],
NULL
);
malloc_printf
(
"Test end
\n
"
);
return
(
0
);
}
deps/jemalloc/test/ALLOCM_ARENA.exp
0 → 100644
View file @
b85cb4ce
Test begin
Test end
deps/jemalloc/test/thread_arena.c
View file @
b85cb4ce
#define JEMALLOC_MANGLE
#define JEMALLOC_MANGLE
#include "jemalloc_test.h"
#include "jemalloc_test.h"
#define
NTHREADS 10
#define
NTHREADS 10
void
*
void
*
je_thread_start
(
void
*
arg
)
je_thread_start
(
void
*
arg
)
...
@@ -66,8 +66,10 @@ main(void)
...
@@ -66,8 +66,10 @@ main(void)
goto
label_return
;
goto
label_return
;
}
}
for
(
i
=
0
;
i
<
NTHREADS
;
i
++
)
for
(
i
=
0
;
i
<
NTHREADS
;
i
++
)
{
je_thread_create
(
&
threads
[
i
],
je_thread_start
,
(
void
*
)
&
arena_ind
);
je_thread_create
(
&
threads
[
i
],
je_thread_start
,
(
void
*
)
&
arena_ind
);
}
for
(
i
=
0
;
i
<
NTHREADS
;
i
++
)
for
(
i
=
0
;
i
<
NTHREADS
;
i
++
)
je_thread_join
(
threads
[
i
],
(
void
*
)
&
ret
);
je_thread_join
(
threads
[
i
],
(
void
*
)
&
ret
);
...
...
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment