Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
fb1f4f4e
Unverified
Commit
fb1f4f4e
authored
Oct 25, 2019
by
Wander Hillen
Committed by
GitHub
Oct 25, 2019
Browse files
Merge branch 'unstable' into minor-typos
parents
dda8cc18
6e98214f
Changes
205
Show whitespace changes
Inline
Side-by-side
Too many changes to show.
To preserve performance only
205 of 205+
files are displayed.
Plain diff
Email patch
deps/jemalloc/src/chunk_dss.c
deleted
100644 → 0
View file @
dda8cc18
#define JEMALLOC_CHUNK_DSS_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Data. */
const
char
*
dss_prec_names
[]
=
{
"disabled"
,
"primary"
,
"secondary"
,
"N/A"
};
/* Current dss precedence default, used when creating new arenas. */
static
dss_prec_t
dss_prec_default
=
DSS_PREC_DEFAULT
;
/*
* Protects sbrk() calls. This avoids malloc races among threads, though it
* does not protect against races with threads that call sbrk() directly.
*/
static
malloc_mutex_t
dss_mtx
;
/* Base address of the DSS. */
static
void
*
dss_base
;
/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
static
void
*
dss_prev
;
/* Current upper limit on DSS addresses. */
static
void
*
dss_max
;
/******************************************************************************/
static
void
*
chunk_dss_sbrk
(
intptr_t
increment
)
{
#ifdef JEMALLOC_DSS
return
(
sbrk
(
increment
));
#else
not_implemented
();
return
(
NULL
);
#endif
}
dss_prec_t
chunk_dss_prec_get
(
void
)
{
dss_prec_t
ret
;
if
(
!
have_dss
)
return
(
dss_prec_disabled
);
malloc_mutex_lock
(
&
dss_mtx
);
ret
=
dss_prec_default
;
malloc_mutex_unlock
(
&
dss_mtx
);
return
(
ret
);
}
bool
chunk_dss_prec_set
(
dss_prec_t
dss_prec
)
{
if
(
!
have_dss
)
return
(
dss_prec
!=
dss_prec_disabled
);
malloc_mutex_lock
(
&
dss_mtx
);
dss_prec_default
=
dss_prec
;
malloc_mutex_unlock
(
&
dss_mtx
);
return
(
false
);
}
void
*
chunk_alloc_dss
(
arena_t
*
arena
,
void
*
new_addr
,
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
*
commit
)
{
cassert
(
have_dss
);
assert
(
size
>
0
&&
(
size
&
chunksize_mask
)
==
0
);
assert
(
alignment
>
0
&&
(
alignment
&
chunksize_mask
)
==
0
);
/*
* sbrk() uses a signed increment argument, so take care not to
* interpret a huge allocation request as a negative increment.
*/
if
((
intptr_t
)
size
<
0
)
return
(
NULL
);
malloc_mutex_lock
(
&
dss_mtx
);
if
(
dss_prev
!=
(
void
*
)
-
1
)
{
/*
* The loop is necessary to recover from races with other
* threads that are using the DSS for something other than
* malloc.
*/
do
{
void
*
ret
,
*
cpad
,
*
dss_next
;
size_t
gap_size
,
cpad_size
;
intptr_t
incr
;
/* Avoid an unnecessary system call. */
if
(
new_addr
!=
NULL
&&
dss_max
!=
new_addr
)
break
;
/* Get the current end of the DSS. */
dss_max
=
chunk_dss_sbrk
(
0
);
/* Make sure the earlier condition still holds. */
if
(
new_addr
!=
NULL
&&
dss_max
!=
new_addr
)
break
;
/*
* Calculate how much padding is necessary to
* chunk-align the end of the DSS.
*/
gap_size
=
(
chunksize
-
CHUNK_ADDR2OFFSET
(
dss_max
))
&
chunksize_mask
;
/*
* Compute how much chunk-aligned pad space (if any) is
* necessary to satisfy alignment. This space can be
* recycled for later use.
*/
cpad
=
(
void
*
)((
uintptr_t
)
dss_max
+
gap_size
);
ret
=
(
void
*
)
ALIGNMENT_CEILING
((
uintptr_t
)
dss_max
,
alignment
);
cpad_size
=
(
uintptr_t
)
ret
-
(
uintptr_t
)
cpad
;
dss_next
=
(
void
*
)((
uintptr_t
)
ret
+
size
);
if
((
uintptr_t
)
ret
<
(
uintptr_t
)
dss_max
||
(
uintptr_t
)
dss_next
<
(
uintptr_t
)
dss_max
)
{
/* Wrap-around. */
malloc_mutex_unlock
(
&
dss_mtx
);
return
(
NULL
);
}
incr
=
gap_size
+
cpad_size
+
size
;
dss_prev
=
chunk_dss_sbrk
(
incr
);
if
(
dss_prev
==
dss_max
)
{
/* Success. */
dss_max
=
dss_next
;
malloc_mutex_unlock
(
&
dss_mtx
);
if
(
cpad_size
!=
0
)
{
chunk_hooks_t
chunk_hooks
=
CHUNK_HOOKS_INITIALIZER
;
chunk_dalloc_wrapper
(
arena
,
&
chunk_hooks
,
cpad
,
cpad_size
,
true
);
}
if
(
*
zero
)
{
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED
(
ret
,
size
);
memset
(
ret
,
0
,
size
);
}
if
(
!*
commit
)
*
commit
=
pages_decommit
(
ret
,
size
);
return
(
ret
);
}
}
while
(
dss_prev
!=
(
void
*
)
-
1
);
}
malloc_mutex_unlock
(
&
dss_mtx
);
return
(
NULL
);
}
bool
chunk_in_dss
(
void
*
chunk
)
{
bool
ret
;
cassert
(
have_dss
);
malloc_mutex_lock
(
&
dss_mtx
);
if
((
uintptr_t
)
chunk
>=
(
uintptr_t
)
dss_base
&&
(
uintptr_t
)
chunk
<
(
uintptr_t
)
dss_max
)
ret
=
true
;
else
ret
=
false
;
malloc_mutex_unlock
(
&
dss_mtx
);
return
(
ret
);
}
bool
chunk_dss_boot
(
void
)
{
cassert
(
have_dss
);
if
(
malloc_mutex_init
(
&
dss_mtx
))
return
(
true
);
dss_base
=
chunk_dss_sbrk
(
0
);
dss_prev
=
dss_base
;
dss_max
=
dss_base
;
return
(
false
);
}
void
chunk_dss_prefork
(
void
)
{
if
(
have_dss
)
malloc_mutex_prefork
(
&
dss_mtx
);
}
void
chunk_dss_postfork_parent
(
void
)
{
if
(
have_dss
)
malloc_mutex_postfork_parent
(
&
dss_mtx
);
}
void
chunk_dss_postfork_child
(
void
)
{
if
(
have_dss
)
malloc_mutex_postfork_child
(
&
dss_mtx
);
}
/******************************************************************************/
deps/jemalloc/src/chunk_mmap.c
deleted
100644 → 0
View file @
dda8cc18
#define JEMALLOC_CHUNK_MMAP_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
static
void
*
chunk_alloc_mmap_slow
(
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
*
commit
)
{
void
*
ret
;
size_t
alloc_size
;
alloc_size
=
size
+
alignment
-
PAGE
;
/* Beware size_t wrap-around. */
if
(
alloc_size
<
size
)
return
(
NULL
);
do
{
void
*
pages
;
size_t
leadsize
;
pages
=
pages_map
(
NULL
,
alloc_size
);
if
(
pages
==
NULL
)
return
(
NULL
);
leadsize
=
ALIGNMENT_CEILING
((
uintptr_t
)
pages
,
alignment
)
-
(
uintptr_t
)
pages
;
ret
=
pages_trim
(
pages
,
alloc_size
,
leadsize
,
size
);
}
while
(
ret
==
NULL
);
assert
(
ret
!=
NULL
);
*
zero
=
true
;
if
(
!*
commit
)
*
commit
=
pages_decommit
(
ret
,
size
);
return
(
ret
);
}
void
*
chunk_alloc_mmap
(
size_t
size
,
size_t
alignment
,
bool
*
zero
,
bool
*
commit
)
{
void
*
ret
;
size_t
offset
;
/*
* Ideally, there would be a way to specify alignment to mmap() (like
* NetBSD has), but in the absence of such a feature, we have to work
* hard to efficiently create aligned mappings. The reliable, but
* slow method is to create a mapping that is over-sized, then trim the
* excess. However, that always results in one or two calls to
* pages_unmap().
*
* Optimistically try mapping precisely the right amount before falling
* back to the slow method, with the expectation that the optimistic
* approach works most of the time.
*/
assert
(
alignment
!=
0
);
assert
((
alignment
&
chunksize_mask
)
==
0
);
ret
=
pages_map
(
NULL
,
size
);
if
(
ret
==
NULL
)
return
(
NULL
);
offset
=
ALIGNMENT_ADDR2OFFSET
(
ret
,
alignment
);
if
(
offset
!=
0
)
{
pages_unmap
(
ret
,
size
);
return
(
chunk_alloc_mmap_slow
(
size
,
alignment
,
zero
,
commit
));
}
assert
(
ret
!=
NULL
);
*
zero
=
true
;
if
(
!*
commit
)
*
commit
=
pages_decommit
(
ret
,
size
);
return
(
ret
);
}
bool
chunk_dalloc_mmap
(
void
*
chunk
,
size_t
size
)
{
if
(
config_munmap
)
pages_unmap
(
chunk
,
size
);
return
(
!
config_munmap
);
}
deps/jemalloc/src/ckh.c
View file @
fb1f4f4e
...
...
@@ -35,7 +35,17 @@
*
******************************************************************************/
#define JEMALLOC_CKH_C_
#include "jemalloc/internal/jemalloc_internal.h"
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/malloc_io.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/util.h"
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
...
...
@@ -49,27 +59,26 @@ static void ckh_shrink(tsd_t *tsd, ckh_t *ckh);
* Search bucket for key and return the cell number if found; SIZE_T_MAX
* otherwise.
*/
JEMALLOC_INLINE_C
size_t
ckh_bucket_search
(
ckh_t
*
ckh
,
size_t
bucket
,
const
void
*
key
)
{
static
size_t
ckh_bucket_search
(
ckh_t
*
ckh
,
size_t
bucket
,
const
void
*
key
)
{
ckhc_t
*
cell
;
unsigned
i
;
for
(
i
=
0
;
i
<
(
ZU
(
1
)
<<
LG_CKH_BUCKET_CELLS
);
i
++
)
{
cell
=
&
ckh
->
tab
[(
bucket
<<
LG_CKH_BUCKET_CELLS
)
+
i
];
if
(
cell
->
key
!=
NULL
&&
ckh
->
keycomp
(
key
,
cell
->
key
))
return
((
bucket
<<
LG_CKH_BUCKET_CELLS
)
+
i
);
if
(
cell
->
key
!=
NULL
&&
ckh
->
keycomp
(
key
,
cell
->
key
))
{
return
(
bucket
<<
LG_CKH_BUCKET_CELLS
)
+
i
;
}
}
return
(
SIZE_T_MAX
)
;
return
SIZE_T_MAX
;
}
/*
* Search table for key and return cell number if found; SIZE_T_MAX otherwise.
*/
JEMALLOC_INLINE_C
size_t
ckh_isearch
(
ckh_t
*
ckh
,
const
void
*
key
)
{
static
size_t
ckh_isearch
(
ckh_t
*
ckh
,
const
void
*
key
)
{
size_t
hashes
[
2
],
bucket
,
cell
;
assert
(
ckh
!=
NULL
);
...
...
@@ -79,19 +88,19 @@ ckh_isearch(ckh_t *ckh, const void *key)
/* Search primary bucket. */
bucket
=
hashes
[
0
]
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
cell
=
ckh_bucket_search
(
ckh
,
bucket
,
key
);
if
(
cell
!=
SIZE_T_MAX
)
return
(
cell
);
if
(
cell
!=
SIZE_T_MAX
)
{
return
cell
;
}
/* Search secondary bucket. */
bucket
=
hashes
[
1
]
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
cell
=
ckh_bucket_search
(
ckh
,
bucket
,
key
);
return
(
cell
)
;
return
cell
;
}
JEMALLOC_INLINE_C
bool
static
bool
ckh_try_bucket_insert
(
ckh_t
*
ckh
,
size_t
bucket
,
const
void
*
key
,
const
void
*
data
)
{
const
void
*
data
)
{
ckhc_t
*
cell
;
unsigned
offset
,
i
;
...
...
@@ -99,7 +108,8 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
* Cycle through the cells in the bucket, starting at a random position.
* The randomness avoids worst-case search overhead as buckets fill up.
*/
prng32
(
offset
,
LG_CKH_BUCKET_CELLS
,
ckh
->
prng_state
,
CKH_A
,
CKH_C
);
offset
=
(
unsigned
)
prng_lg_range_u64
(
&
ckh
->
prng_state
,
LG_CKH_BUCKET_CELLS
);
for
(
i
=
0
;
i
<
(
ZU
(
1
)
<<
LG_CKH_BUCKET_CELLS
);
i
++
)
{
cell
=
&
ckh
->
tab
[(
bucket
<<
LG_CKH_BUCKET_CELLS
)
+
((
i
+
offset
)
&
((
ZU
(
1
)
<<
LG_CKH_BUCKET_CELLS
)
-
1
))];
...
...
@@ -107,11 +117,11 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
cell
->
key
=
key
;
cell
->
data
=
data
;
ckh
->
count
++
;
return
(
false
)
;
return
false
;
}
}
return
(
true
)
;
return
true
;
}
/*
...
...
@@ -120,10 +130,9 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
* eviction/relocation procedure until either success or detection of an
* eviction/relocation bucket cycle.
*/
JEMALLOC_INLINE_C
bool
static
bool
ckh_evict_reloc_insert
(
ckh_t
*
ckh
,
size_t
argbucket
,
void
const
**
argkey
,
void
const
**
argdata
)
{
void
const
**
argdata
)
{
const
void
*
key
,
*
data
,
*
tkey
,
*
tdata
;
ckhc_t
*
cell
;
size_t
hashes
[
2
],
bucket
,
tbucket
;
...
...
@@ -141,7 +150,8 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
* were an item for which both hashes indicated the same
* bucket.
*/
prng32
(
i
,
LG_CKH_BUCKET_CELLS
,
ckh
->
prng_state
,
CKH_A
,
CKH_C
);
i
=
(
unsigned
)
prng_lg_range_u64
(
&
ckh
->
prng_state
,
LG_CKH_BUCKET_CELLS
);
cell
=
&
ckh
->
tab
[(
bucket
<<
LG_CKH_BUCKET_CELLS
)
+
i
];
assert
(
cell
->
key
!=
NULL
);
...
...
@@ -181,18 +191,18 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
if
(
tbucket
==
argbucket
)
{
*
argkey
=
key
;
*
argdata
=
data
;
return
(
true
)
;
return
true
;
}
bucket
=
tbucket
;
if
(
!
ckh_try_bucket_insert
(
ckh
,
bucket
,
key
,
data
))
return
(
false
);
if
(
!
ckh_try_bucket_insert
(
ckh
,
bucket
,
key
,
data
))
{
return
false
;
}
}
}
JEMALLOC_INLINE_C
bool
ckh_try_insert
(
ckh_t
*
ckh
,
void
const
**
argkey
,
void
const
**
argdata
)
{
static
bool
ckh_try_insert
(
ckh_t
*
ckh
,
void
const
**
argkey
,
void
const
**
argdata
)
{
size_t
hashes
[
2
],
bucket
;
const
void
*
key
=
*
argkey
;
const
void
*
data
=
*
argdata
;
...
...
@@ -201,27 +211,28 @@ ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
/* Try to insert in primary bucket. */
bucket
=
hashes
[
0
]
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
if
(
!
ckh_try_bucket_insert
(
ckh
,
bucket
,
key
,
data
))
return
(
false
);
if
(
!
ckh_try_bucket_insert
(
ckh
,
bucket
,
key
,
data
))
{
return
false
;
}
/* Try to insert in secondary bucket. */
bucket
=
hashes
[
1
]
&
((
ZU
(
1
)
<<
ckh
->
lg_curbuckets
)
-
1
);
if
(
!
ckh_try_bucket_insert
(
ckh
,
bucket
,
key
,
data
))
return
(
false
);
if
(
!
ckh_try_bucket_insert
(
ckh
,
bucket
,
key
,
data
))
{
return
false
;
}
/*
* Try to find a place for this item via iterative eviction/relocation.
*/
return
(
ckh_evict_reloc_insert
(
ckh
,
bucket
,
argkey
,
argdata
)
)
;
return
ckh_evict_reloc_insert
(
ckh
,
bucket
,
argkey
,
argdata
);
}
/*
* Try to rebuild the hash table from scratch by inserting all items from the
* old table into the new.
*/
JEMALLOC_INLINE_C
bool
ckh_rebuild
(
ckh_t
*
ckh
,
ckhc_t
*
aTab
)
{
static
bool
ckh_rebuild
(
ckh_t
*
ckh
,
ckhc_t
*
aTab
)
{
size_t
count
,
i
,
nins
;
const
void
*
key
,
*
data
;
...
...
@@ -233,22 +244,20 @@ ckh_rebuild(ckh_t *ckh, ckhc_t *aTab)
data
=
aTab
[
i
].
data
;
if
(
ckh_try_insert
(
ckh
,
&
key
,
&
data
))
{
ckh
->
count
=
count
;
return
(
true
)
;
return
true
;
}
nins
++
;
}
}
return
(
false
)
;
return
false
;
}
static
bool
ckh_grow
(
tsd_t
*
tsd
,
ckh_t
*
ckh
)
{
ckh_grow
(
tsd_t
*
tsd
,
ckh_t
*
ckh
)
{
bool
ret
;
ckhc_t
*
tab
,
*
ttab
;
size_t
lg_curcells
;
unsigned
lg_prevbuckets
;
unsigned
lg_prevbuckets
,
lg_curcells
;
#ifdef CKH_COUNT
ckh
->
ngrows
++
;
...
...
@@ -265,13 +274,13 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
size_t
usize
;
lg_curcells
++
;
usize
=
sa2u
(
sizeof
(
ckhc_t
)
<<
lg_curcells
,
CACHELINE
);
if
(
u
size
==
0
)
{
usize
=
sz_
sa2u
(
sizeof
(
ckhc_t
)
<<
lg_curcells
,
CACHELINE
);
if
(
u
nlikely
(
usize
==
0
||
usize
>
LARGE_MAXCLASS
)
)
{
ret
=
true
;
goto
label_return
;
}
tab
=
(
ckhc_t
*
)
ipallocztm
(
tsd
,
usize
,
CACHELINE
,
true
,
NULL
,
true
,
NULL
);
tab
=
(
ckhc_t
*
)
ipallocztm
(
tsd
_tsdn
(
tsd
)
,
usize
,
CACHELINE
,
true
,
NULL
,
true
,
arena_ichoose
(
tsd
,
NULL
)
);
if
(
tab
==
NULL
)
{
ret
=
true
;
goto
label_return
;
...
...
@@ -283,27 +292,26 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
ckh
->
lg_curbuckets
=
lg_curcells
-
LG_CKH_BUCKET_CELLS
;
if
(
!
ckh_rebuild
(
ckh
,
tab
))
{
idalloctm
(
tsd
,
tab
,
tcache_get
(
tsd
,
false
)
,
true
);
idalloctm
(
tsd
_tsdn
(
tsd
)
,
tab
,
NULL
,
NULL
,
true
,
true
);
break
;
}
/* Rebuilding failed, so back out partially rebuilt table. */
idalloctm
(
tsd
,
ckh
->
tab
,
tcache_get
(
tsd
,
false
)
,
true
);
idalloctm
(
tsd
_tsdn
(
tsd
)
,
ckh
->
tab
,
NULL
,
NULL
,
true
,
true
);
ckh
->
tab
=
tab
;
ckh
->
lg_curbuckets
=
lg_prevbuckets
;
}
ret
=
false
;
label_return:
return
(
ret
)
;
return
ret
;
}
static
void
ckh_shrink
(
tsd_t
*
tsd
,
ckh_t
*
ckh
)
{
ckh_shrink
(
tsd_t
*
tsd
,
ckh_t
*
ckh
)
{
ckhc_t
*
tab
,
*
ttab
;
size_t
lg_curcells
,
usize
;
unsigned
lg_prevbuckets
;
size_t
usize
;
unsigned
lg_prevbuckets
,
lg_curcells
;
/*
* It is possible (though unlikely, given well behaved hashes) that the
...
...
@@ -311,11 +319,12 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
*/
lg_prevbuckets
=
ckh
->
lg_curbuckets
;
lg_curcells
=
ckh
->
lg_curbuckets
+
LG_CKH_BUCKET_CELLS
-
1
;
usize
=
sa2u
(
sizeof
(
ckhc_t
)
<<
lg_curcells
,
CACHELINE
);
if
(
u
size
==
0
)
usize
=
sz_
sa2u
(
sizeof
(
ckhc_t
)
<<
lg_curcells
,
CACHELINE
);
if
(
u
nlikely
(
usize
==
0
||
usize
>
LARGE_MAXCLASS
))
{
return
;
tab
=
(
ckhc_t
*
)
ipallocztm
(
tsd
,
usize
,
CACHELINE
,
true
,
NULL
,
true
,
NULL
);
}
tab
=
(
ckhc_t
*
)
ipallocztm
(
tsd_tsdn
(
tsd
),
usize
,
CACHELINE
,
true
,
NULL
,
true
,
arena_ichoose
(
tsd
,
NULL
));
if
(
tab
==
NULL
)
{
/*
* An OOM error isn't worth propagating, since it doesn't
...
...
@@ -330,7 +339,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
ckh
->
lg_curbuckets
=
lg_curcells
-
LG_CKH_BUCKET_CELLS
;
if
(
!
ckh_rebuild
(
ckh
,
tab
))
{
idalloctm
(
tsd
,
tab
,
tcache_get
(
tsd
,
false
)
,
true
);
idalloctm
(
tsd
_tsdn
(
tsd
)
,
tab
,
NULL
,
NULL
,
true
,
true
);
#ifdef CKH_COUNT
ckh
->
nshrinks
++
;
#endif
...
...
@@ -338,7 +347,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
}
/* Rebuilding failed, so back out partially rebuilt table. */
idalloctm
(
tsd
,
ckh
->
tab
,
tcache_get
(
tsd
,
false
)
,
true
);
idalloctm
(
tsd
_tsdn
(
tsd
)
,
ckh
->
tab
,
NULL
,
NULL
,
true
,
true
);
ckh
->
tab
=
tab
;
ckh
->
lg_curbuckets
=
lg_prevbuckets
;
#ifdef CKH_COUNT
...
...
@@ -348,8 +357,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
bool
ckh_new
(
tsd_t
*
tsd
,
ckh_t
*
ckh
,
size_t
minitems
,
ckh_hash_t
*
hash
,
ckh_keycomp_t
*
keycomp
)
{
ckh_keycomp_t
*
keycomp
)
{
bool
ret
;
size_t
mincells
,
usize
;
unsigned
lg_mincells
;
...
...
@@ -379,20 +387,21 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
mincells
=
((
minitems
+
(
3
-
(
minitems
%
3
)))
/
3
)
<<
2
;
for
(
lg_mincells
=
LG_CKH_BUCKET_CELLS
;
(
ZU
(
1
)
<<
lg_mincells
)
<
mincells
;
lg_mincells
++
)
;
/* Do nothing. */
lg_mincells
++
)
{
/* Do nothing. */
}
ckh
->
lg_minbuckets
=
lg_mincells
-
LG_CKH_BUCKET_CELLS
;
ckh
->
lg_curbuckets
=
lg_mincells
-
LG_CKH_BUCKET_CELLS
;
ckh
->
hash
=
hash
;
ckh
->
keycomp
=
keycomp
;
usize
=
sa2u
(
sizeof
(
ckhc_t
)
<<
lg_mincells
,
CACHELINE
);
if
(
u
size
==
0
)
{
usize
=
sz_
sa2u
(
sizeof
(
ckhc_t
)
<<
lg_mincells
,
CACHELINE
);
if
(
u
nlikely
(
usize
==
0
||
usize
>
LARGE_MAXCLASS
)
)
{
ret
=
true
;
goto
label_return
;
}
ckh
->
tab
=
(
ckhc_t
*
)
ipallocztm
(
tsd
,
usize
,
CACHELINE
,
true
,
NULL
,
true
,
NULL
);
ckh
->
tab
=
(
ckhc_t
*
)
ipallocztm
(
tsd
_tsdn
(
tsd
)
,
usize
,
CACHELINE
,
true
,
NULL
,
true
,
arena_ichoose
(
tsd
,
NULL
)
);
if
(
ckh
->
tab
==
NULL
)
{
ret
=
true
;
goto
label_return
;
...
...
@@ -400,13 +409,11 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ret
=
false
;
label_return:
return
(
ret
)
;
return
ret
;
}
void
ckh_delete
(
tsd_t
*
tsd
,
ckh_t
*
ckh
)
{
ckh_delete
(
tsd_t
*
tsd
,
ckh_t
*
ckh
)
{
assert
(
ckh
!=
NULL
);
#ifdef CKH_VERBOSE
...
...
@@ -421,43 +428,42 @@ ckh_delete(tsd_t *tsd, ckh_t *ckh)
(
unsigned
long
long
)
ckh
->
nrelocs
);
#endif
idalloctm
(
tsd
,
ckh
->
tab
,
tcache_get
(
tsd
,
false
),
true
);
if
(
config_debug
)
memset
(
ckh
,
0x5a
,
sizeof
(
ckh_t
));
idalloctm
(
tsd_tsdn
(
tsd
),
ckh
->
tab
,
NULL
,
NULL
,
true
,
true
);
if
(
config_debug
)
{
memset
(
ckh
,
JEMALLOC_FREE_JUNK
,
sizeof
(
ckh_t
));
}
}
size_t
ckh_count
(
ckh_t
*
ckh
)
{
ckh_count
(
ckh_t
*
ckh
)
{
assert
(
ckh
!=
NULL
);
return
(
ckh
->
count
)
;
return
ckh
->
count
;
}
bool
ckh_iter
(
ckh_t
*
ckh
,
size_t
*
tabind
,
void
**
key
,
void
**
data
)
{
ckh_iter
(
ckh_t
*
ckh
,
size_t
*
tabind
,
void
**
key
,
void
**
data
)
{
size_t
i
,
ncells
;
for
(
i
=
*
tabind
,
ncells
=
(
ZU
(
1
)
<<
(
ckh
->
lg_curbuckets
+
LG_CKH_BUCKET_CELLS
));
i
<
ncells
;
i
++
)
{
if
(
ckh
->
tab
[
i
].
key
!=
NULL
)
{
if
(
key
!=
NULL
)
if
(
key
!=
NULL
)
{
*
key
=
(
void
*
)
ckh
->
tab
[
i
].
key
;
if
(
data
!=
NULL
)
}
if
(
data
!=
NULL
)
{
*
data
=
(
void
*
)
ckh
->
tab
[
i
].
data
;
}
*
tabind
=
i
+
1
;
return
(
false
)
;
return
false
;
}
}
return
(
true
)
;
return
true
;
}
bool
ckh_insert
(
tsd_t
*
tsd
,
ckh_t
*
ckh
,
const
void
*
key
,
const
void
*
data
)
{
ckh_insert
(
tsd_t
*
tsd
,
ckh_t
*
ckh
,
const
void
*
key
,
const
void
*
data
)
{
bool
ret
;
assert
(
ckh
!=
NULL
);
...
...
@@ -476,23 +482,24 @@ ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data)
ret
=
false
;
label_return:
return
(
ret
)
;
return
ret
;
}
bool
ckh_remove
(
tsd_t
*
tsd
,
ckh_t
*
ckh
,
const
void
*
searchkey
,
void
**
key
,
void
**
data
)
{
void
**
data
)
{
size_t
cell
;
assert
(
ckh
!=
NULL
);
cell
=
ckh_isearch
(
ckh
,
searchkey
);
if
(
cell
!=
SIZE_T_MAX
)
{
if
(
key
!=
NULL
)
if
(
key
!=
NULL
)
{
*
key
=
(
void
*
)
ckh
->
tab
[
cell
].
key
;
if
(
data
!=
NULL
)
}
if
(
data
!=
NULL
)
{
*
data
=
(
void
*
)
ckh
->
tab
[
cell
].
data
;
}
ckh
->
tab
[
cell
].
key
=
NULL
;
ckh
->
tab
[
cell
].
data
=
NULL
;
/* Not necessary. */
...
...
@@ -505,51 +512,47 @@ ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
ckh_shrink
(
tsd
,
ckh
);
}
return
(
false
)
;
return
false
;
}
return
(
true
)
;
return
true
;
}
bool
ckh_search
(
ckh_t
*
ckh
,
const
void
*
searchkey
,
void
**
key
,
void
**
data
)
{
ckh_search
(
ckh_t
*
ckh
,
const
void
*
searchkey
,
void
**
key
,
void
**
data
)
{
size_t
cell
;
assert
(
ckh
!=
NULL
);
cell
=
ckh_isearch
(
ckh
,
searchkey
);
if
(
cell
!=
SIZE_T_MAX
)
{
if
(
key
!=
NULL
)
if
(
key
!=
NULL
)
{
*
key
=
(
void
*
)
ckh
->
tab
[
cell
].
key
;
if
(
data
!=
NULL
)
}
if
(
data
!=
NULL
)
{
*
data
=
(
void
*
)
ckh
->
tab
[
cell
].
data
;
return
(
false
);
}
return
false
;
}
return
(
true
)
;
return
true
;
}
void
ckh_string_hash
(
const
void
*
key
,
size_t
r_hash
[
2
])
{
ckh_string_hash
(
const
void
*
key
,
size_t
r_hash
[
2
])
{
hash
(
key
,
strlen
((
const
char
*
)
key
),
0x94122f33U
,
r_hash
);
}
bool
ckh_string_keycomp
(
const
void
*
k1
,
const
void
*
k2
)
{
ckh_string_keycomp
(
const
void
*
k1
,
const
void
*
k2
)
{
assert
(
k1
!=
NULL
);
assert
(
k2
!=
NULL
);
return
(
strcmp
((
char
*
)
k1
,
(
char
*
)
k2
)
?
false
:
true
)
;
return
!
strcmp
((
char
*
)
k1
,
(
char
*
)
k2
);
}
void
ckh_pointer_hash
(
const
void
*
key
,
size_t
r_hash
[
2
])
{
ckh_pointer_hash
(
const
void
*
key
,
size_t
r_hash
[
2
])
{
union
{
const
void
*
v
;
size_t
i
;
...
...
@@ -561,8 +564,6 @@ ckh_pointer_hash(const void *key, size_t r_hash[2])
}
bool
ckh_pointer_keycomp
(
const
void
*
k1
,
const
void
*
k2
)
{
return
((
k1
==
k2
)
?
true
:
false
);
ckh_pointer_keycomp
(
const
void
*
k1
,
const
void
*
k2
)
{
return
(
k1
==
k2
);
}
deps/jemalloc/src/ctl.c
View file @
fb1f4f4e
#define JEMALLOC_CTL_C_
#include "jemalloc/internal/jemalloc_internal.h"
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/extent_mmap.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/util.h"
/******************************************************************************/
/* Data. */
/*
* ctl_mtx protects the following:
* - ctl_stats
.
*
* - ctl_stats
->
*
*/
static
malloc_mutex_t
ctl_mtx
;
static
bool
ctl_initialized
;
static
uint64
_t
ctl_
epoch
;
static
ctl_
stat
s_t
ctl_
stat
s
;
static
ctl_stats
_t
*
ctl_
stats
;
static
ctl_
arena
s_t
*
ctl_
arena
s
;
/******************************************************************************/
/* Helpers for named and indexed nodes. */
JEMALLOC_INLINE_C
const
ctl_named_node_t
*
ctl_named_node
(
const
ctl_node_t
*
node
)
{
static
const
ctl_named_node_t
*
ctl_named_node
(
const
ctl_node_t
*
node
)
{
return
((
node
->
named
)
?
(
const
ctl_named_node_t
*
)
node
:
NULL
);
}
JEMALLOC_INLINE_C
const
ctl_named_node_t
*
ctl_named_children
(
const
ctl_named_node_t
*
node
,
int
index
)
{
static
const
ctl_named_node_t
*
ctl_named_children
(
const
ctl_named_node_t
*
node
,
size_t
index
)
{
const
ctl_named_node_t
*
children
=
ctl_named_node
(
node
->
children
);
return
(
children
?
&
children
[
index
]
:
NULL
);
}
JEMALLOC_INLINE_C
const
ctl_indexed_node_t
*
ctl_indexed_node
(
const
ctl_node_t
*
node
)
{
static
const
ctl_indexed_node_t
*
ctl_indexed_node
(
const
ctl_node_t
*
node
)
{
return
(
!
node
->
named
?
(
const
ctl_indexed_node_t
*
)
node
:
NULL
);
}
...
...
@@ -42,28 +47,17 @@ ctl_indexed_node(const ctl_node_t *node)
/* Function prototypes for non-inline static functions. */
#define CTL_PROTO(n) \
static int n##_ctl(const size_t *mib, size_t miblen,
void *oldp,
\
size_t *oldlenp, void *newp, size_t newlen);
static int n##_ctl(
tsd_t *tsd,
const size_t *mib, size_t miblen, \
void *oldp,
size_t *oldlenp, void *newp, size_t newlen);
#define INDEX_PROTO(n) \
static const ctl_named_node_t *n##_index(const size_t *mib, \
size_t miblen, size_t i);
static
bool
ctl_arena_init
(
ctl_arena_stats_t
*
astats
);
static
void
ctl_arena_clear
(
ctl_arena_stats_t
*
astats
);
static
void
ctl_arena_stats_amerge
(
ctl_arena_stats_t
*
cstats
,
arena_t
*
arena
);
static
void
ctl_arena_stats_smerge
(
ctl_arena_stats_t
*
sstats
,
ctl_arena_stats_t
*
astats
);
static
void
ctl_arena_refresh
(
arena_t
*
arena
,
unsigned
i
);
static
bool
ctl_grow
(
void
);
static
void
ctl_refresh
(
void
);
static
bool
ctl_init
(
void
);
static
int
ctl_lookup
(
const
char
*
name
,
ctl_node_t
const
**
nodesp
,
size_t
*
mibp
,
size_t
*
depthp
);
static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \
const size_t *mib, size_t miblen, size_t i);
CTL_PROTO
(
version
)
CTL_PROTO
(
epoch
)
CTL_PROTO
(
background_thread
)
CTL_PROTO
(
max_background_threads
)
CTL_PROTO
(
thread_tcache_enabled
)
CTL_PROTO
(
thread_tcache_flush
)
CTL_PROTO
(
thread_prof_name
)
...
...
@@ -77,29 +71,33 @@ CTL_PROTO(config_cache_oblivious)
CTL_PROTO
(
config_debug
)
CTL_PROTO
(
config_fill
)
CTL_PROTO
(
config_lazy_lock
)
CTL_PROTO
(
config_m
unmap
)
CTL_PROTO
(
config_m
alloc_conf
)
CTL_PROTO
(
config_prof
)
CTL_PROTO
(
config_prof_libgcc
)
CTL_PROTO
(
config_prof_libunwind
)
CTL_PROTO
(
config_stats
)
CTL_PROTO
(
config_tcache
)
CTL_PROTO
(
config_tls
)
CTL_PROTO
(
config_utrace
)
CTL_PROTO
(
config_valgrind
)
CTL_PROTO
(
config_xmalloc
)
CTL_PROTO
(
opt_abort
)
CTL_PROTO
(
opt_abort_conf
)
CTL_PROTO
(
opt_metadata_thp
)
CTL_PROTO
(
opt_retain
)
CTL_PROTO
(
opt_dss
)
CTL_PROTO
(
opt_lg_chunk
)
CTL_PROTO
(
opt_narenas
)
CTL_PROTO
(
opt_lg_dirty_mult
)
CTL_PROTO
(
opt_percpu_arena
)
CTL_PROTO
(
opt_background_thread
)
CTL_PROTO
(
opt_max_background_threads
)
CTL_PROTO
(
opt_dirty_decay_ms
)
CTL_PROTO
(
opt_muzzy_decay_ms
)
CTL_PROTO
(
opt_stats_print
)
CTL_PROTO
(
opt_stats_print_opts
)
CTL_PROTO
(
opt_junk
)
CTL_PROTO
(
opt_zero
)
CTL_PROTO
(
opt_quarantine
)
CTL_PROTO
(
opt_redzone
)
CTL_PROTO
(
opt_utrace
)
CTL_PROTO
(
opt_xmalloc
)
CTL_PROTO
(
opt_tcache
)
CTL_PROTO
(
opt_thp
)
CTL_PROTO
(
opt_lg_extent_max_active_fit
)
CTL_PROTO
(
opt_lg_tcache_max
)
CTL_PROTO
(
opt_prof
)
CTL_PROTO
(
opt_prof_prefix
)
...
...
@@ -114,31 +112,34 @@ CTL_PROTO(opt_prof_accum)
CTL_PROTO
(
tcache_create
)
CTL_PROTO
(
tcache_flush
)
CTL_PROTO
(
tcache_destroy
)
CTL_PROTO
(
arena_i_initialized
)
CTL_PROTO
(
arena_i_decay
)
CTL_PROTO
(
arena_i_purge
)
static
void
arena_purge
(
unsigned
arena_ind
);
CTL_PROTO
(
arena_i_reset
)
CTL_PROTO
(
arena_i_destroy
)
CTL_PROTO
(
arena_i_dss
)
CTL_PROTO
(
arena_i_lg_dirty_mult
)
CTL_PROTO
(
arena_i_chunk_hooks
)
CTL_PROTO
(
arena_i_dirty_decay_ms
)
CTL_PROTO
(
arena_i_muzzy_decay_ms
)
CTL_PROTO
(
arena_i_extent_hooks
)
CTL_PROTO
(
arena_i_retain_grow_limit
)
INDEX_PROTO
(
arena_i
)
CTL_PROTO
(
arenas_bin_i_size
)
CTL_PROTO
(
arenas_bin_i_nregs
)
CTL_PROTO
(
arenas_bin_i_
run
_size
)
CTL_PROTO
(
arenas_bin_i_
slab
_size
)
INDEX_PROTO
(
arenas_bin_i
)
CTL_PROTO
(
arenas_lrun_i_size
)
INDEX_PROTO
(
arenas_lrun_i
)
CTL_PROTO
(
arenas_hchunk_i_size
)
INDEX_PROTO
(
arenas_hchunk_i
)
CTL_PROTO
(
arenas_lextent_i_size
)
INDEX_PROTO
(
arenas_lextent_i
)
CTL_PROTO
(
arenas_narenas
)
CTL_PROTO
(
arenas_
initialized
)
CTL_PROTO
(
arenas_
lg_dirty_mult
)
CTL_PROTO
(
arenas_
dirty_decay_ms
)
CTL_PROTO
(
arenas_
muzzy_decay_ms
)
CTL_PROTO
(
arenas_quantum
)
CTL_PROTO
(
arenas_page
)
CTL_PROTO
(
arenas_tcache_max
)
CTL_PROTO
(
arenas_nbins
)
CTL_PROTO
(
arenas_nhbins
)
CTL_PROTO
(
arenas_nl
run
s
)
CTL_PROTO
(
arenas_
nhchunks
)
CTL_PROTO
(
arenas_
extend
)
CTL_PROTO
(
arenas_nl
extent
s
)
CTL_PROTO
(
arenas_
create
)
CTL_PROTO
(
arenas_
lookup
)
CTL_PROTO
(
prof_thread_active_init
)
CTL_PROTO
(
prof_active
)
CTL_PROTO
(
prof_dump
)
...
...
@@ -154,55 +155,82 @@ CTL_PROTO(stats_arenas_i_large_allocated)
CTL_PROTO
(
stats_arenas_i_large_nmalloc
)
CTL_PROTO
(
stats_arenas_i_large_ndalloc
)
CTL_PROTO
(
stats_arenas_i_large_nrequests
)
CTL_PROTO
(
stats_arenas_i_huge_allocated
)
CTL_PROTO
(
stats_arenas_i_huge_nmalloc
)
CTL_PROTO
(
stats_arenas_i_huge_ndalloc
)
CTL_PROTO
(
stats_arenas_i_huge_nrequests
)
CTL_PROTO
(
stats_arenas_i_bins_j_nmalloc
)
CTL_PROTO
(
stats_arenas_i_bins_j_ndalloc
)
CTL_PROTO
(
stats_arenas_i_bins_j_nrequests
)
CTL_PROTO
(
stats_arenas_i_bins_j_curregs
)
CTL_PROTO
(
stats_arenas_i_bins_j_nfills
)
CTL_PROTO
(
stats_arenas_i_bins_j_nflushes
)
CTL_PROTO
(
stats_arenas_i_bins_j_n
run
s
)
CTL_PROTO
(
stats_arenas_i_bins_j_nre
run
s
)
CTL_PROTO
(
stats_arenas_i_bins_j_cur
run
s
)
CTL_PROTO
(
stats_arenas_i_bins_j_n
slab
s
)
CTL_PROTO
(
stats_arenas_i_bins_j_nre
slab
s
)
CTL_PROTO
(
stats_arenas_i_bins_j_cur
slab
s
)
INDEX_PROTO
(
stats_arenas_i_bins_j
)
CTL_PROTO
(
stats_arenas_i_lruns_j_nmalloc
)
CTL_PROTO
(
stats_arenas_i_lruns_j_ndalloc
)
CTL_PROTO
(
stats_arenas_i_lruns_j_nrequests
)
CTL_PROTO
(
stats_arenas_i_lruns_j_curruns
)
INDEX_PROTO
(
stats_arenas_i_lruns_j
)
CTL_PROTO
(
stats_arenas_i_hchunks_j_nmalloc
)
CTL_PROTO
(
stats_arenas_i_hchunks_j_ndalloc
)
CTL_PROTO
(
stats_arenas_i_hchunks_j_nrequests
)
CTL_PROTO
(
stats_arenas_i_hchunks_j_curhchunks
)
INDEX_PROTO
(
stats_arenas_i_hchunks_j
)
CTL_PROTO
(
stats_arenas_i_lextents_j_nmalloc
)
CTL_PROTO
(
stats_arenas_i_lextents_j_ndalloc
)
CTL_PROTO
(
stats_arenas_i_lextents_j_nrequests
)
CTL_PROTO
(
stats_arenas_i_lextents_j_curlextents
)
INDEX_PROTO
(
stats_arenas_i_lextents_j
)
CTL_PROTO
(
stats_arenas_i_nthreads
)
CTL_PROTO
(
stats_arenas_i_uptime
)
CTL_PROTO
(
stats_arenas_i_dss
)
CTL_PROTO
(
stats_arenas_i_lg_dirty_mult
)
CTL_PROTO
(
stats_arenas_i_dirty_decay_ms
)
CTL_PROTO
(
stats_arenas_i_muzzy_decay_ms
)
CTL_PROTO
(
stats_arenas_i_pactive
)
CTL_PROTO
(
stats_arenas_i_pdirty
)
CTL_PROTO
(
stats_arenas_i_pmuzzy
)
CTL_PROTO
(
stats_arenas_i_mapped
)
CTL_PROTO
(
stats_arenas_i_npurge
)
CTL_PROTO
(
stats_arenas_i_nmadvise
)
CTL_PROTO
(
stats_arenas_i_purged
)
CTL_PROTO
(
stats_arenas_i_metadata_mapped
)
CTL_PROTO
(
stats_arenas_i_metadata_allocated
)
CTL_PROTO
(
stats_arenas_i_retained
)
CTL_PROTO
(
stats_arenas_i_dirty_npurge
)
CTL_PROTO
(
stats_arenas_i_dirty_nmadvise
)
CTL_PROTO
(
stats_arenas_i_dirty_purged
)
CTL_PROTO
(
stats_arenas_i_muzzy_npurge
)
CTL_PROTO
(
stats_arenas_i_muzzy_nmadvise
)
CTL_PROTO
(
stats_arenas_i_muzzy_purged
)
CTL_PROTO
(
stats_arenas_i_base
)
CTL_PROTO
(
stats_arenas_i_internal
)
CTL_PROTO
(
stats_arenas_i_metadata_thp
)
CTL_PROTO
(
stats_arenas_i_tcache_bytes
)
CTL_PROTO
(
stats_arenas_i_resident
)
INDEX_PROTO
(
stats_arenas_i
)
CTL_PROTO
(
stats_cactive
)
CTL_PROTO
(
stats_allocated
)
CTL_PROTO
(
stats_active
)
CTL_PROTO
(
stats_background_thread_num_threads
)
CTL_PROTO
(
stats_background_thread_num_runs
)
CTL_PROTO
(
stats_background_thread_run_interval
)
CTL_PROTO
(
stats_metadata
)
CTL_PROTO
(
stats_metadata_thp
)
CTL_PROTO
(
stats_resident
)
CTL_PROTO
(
stats_mapped
)
CTL_PROTO
(
stats_retained
)
#define MUTEX_STATS_CTL_PROTO_GEN(n) \
CTL_PROTO(stats_##n##_num_ops) \
CTL_PROTO(stats_##n##_num_wait) \
CTL_PROTO(stats_##n##_num_spin_acq) \
CTL_PROTO(stats_##n##_num_owner_switch) \
CTL_PROTO(stats_##n##_total_wait_time) \
CTL_PROTO(stats_##n##_max_wait_time) \
CTL_PROTO(stats_##n##_max_num_thds)
/* Global mutexes. */
#define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(mutexes_##mtx)
MUTEX_PROF_GLOBAL_MUTEXES
#undef OP
/* Per arena mutexes. */
#define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(arenas_i_mutexes_##mtx)
MUTEX_PROF_ARENA_MUTEXES
#undef OP
/* Arena bin mutexes. */
MUTEX_STATS_CTL_PROTO_GEN
(
arenas_i_bins_j_mutex
)
#undef MUTEX_STATS_CTL_PROTO_GEN
CTL_PROTO
(
stats_mutexes_reset
)
/******************************************************************************/
/* mallctl tree. */
/* Maximum tree depth. */
#define CTL_MAX_DEPTH 6
#define NAME(n) {true}, n
#define CHILD(t, c) \
sizeof(c##_node) / sizeof(ctl_##t##_node_t), \
...
...
@@ -241,32 +269,36 @@ static const ctl_named_node_t config_node[] = {
{
NAME
(
"debug"
),
CTL
(
config_debug
)},
{
NAME
(
"fill"
),
CTL
(
config_fill
)},
{
NAME
(
"lazy_lock"
),
CTL
(
config_lazy_lock
)},
{
NAME
(
"m
unmap
"
),
CTL
(
config_m
unmap
)},
{
NAME
(
"m
alloc_conf
"
),
CTL
(
config_m
alloc_conf
)},
{
NAME
(
"prof"
),
CTL
(
config_prof
)},
{
NAME
(
"prof_libgcc"
),
CTL
(
config_prof_libgcc
)},
{
NAME
(
"prof_libunwind"
),
CTL
(
config_prof_libunwind
)},
{
NAME
(
"stats"
),
CTL
(
config_stats
)},
{
NAME
(
"tcache"
),
CTL
(
config_tcache
)},
{
NAME
(
"tls"
),
CTL
(
config_tls
)},
{
NAME
(
"utrace"
),
CTL
(
config_utrace
)},
{
NAME
(
"valgrind"
),
CTL
(
config_valgrind
)},
{
NAME
(
"xmalloc"
),
CTL
(
config_xmalloc
)}
};
static
const
ctl_named_node_t
opt_node
[]
=
{
{
NAME
(
"abort"
),
CTL
(
opt_abort
)},
{
NAME
(
"abort_conf"
),
CTL
(
opt_abort_conf
)},
{
NAME
(
"metadata_thp"
),
CTL
(
opt_metadata_thp
)},
{
NAME
(
"retain"
),
CTL
(
opt_retain
)},
{
NAME
(
"dss"
),
CTL
(
opt_dss
)},
{
NAME
(
"lg_chunk"
),
CTL
(
opt_lg_chunk
)},
{
NAME
(
"narenas"
),
CTL
(
opt_narenas
)},
{
NAME
(
"lg_dirty_mult"
),
CTL
(
opt_lg_dirty_mult
)},
{
NAME
(
"percpu_arena"
),
CTL
(
opt_percpu_arena
)},
{
NAME
(
"background_thread"
),
CTL
(
opt_background_thread
)},
{
NAME
(
"max_background_threads"
),
CTL
(
opt_max_background_threads
)},
{
NAME
(
"dirty_decay_ms"
),
CTL
(
opt_dirty_decay_ms
)},
{
NAME
(
"muzzy_decay_ms"
),
CTL
(
opt_muzzy_decay_ms
)},
{
NAME
(
"stats_print"
),
CTL
(
opt_stats_print
)},
{
NAME
(
"stats_print_opts"
),
CTL
(
opt_stats_print_opts
)},
{
NAME
(
"junk"
),
CTL
(
opt_junk
)},
{
NAME
(
"zero"
),
CTL
(
opt_zero
)},
{
NAME
(
"quarantine"
),
CTL
(
opt_quarantine
)},
{
NAME
(
"redzone"
),
CTL
(
opt_redzone
)},
{
NAME
(
"utrace"
),
CTL
(
opt_utrace
)},
{
NAME
(
"xmalloc"
),
CTL
(
opt_xmalloc
)},
{
NAME
(
"tcache"
),
CTL
(
opt_tcache
)},
{
NAME
(
"thp"
),
CTL
(
opt_thp
)},
{
NAME
(
"lg_extent_max_active_fit"
),
CTL
(
opt_lg_extent_max_active_fit
)},
{
NAME
(
"lg_tcache_max"
),
CTL
(
opt_lg_tcache_max
)},
{
NAME
(
"prof"
),
CTL
(
opt_prof
)},
{
NAME
(
"prof_prefix"
),
CTL
(
opt_prof_prefix
)},
...
...
@@ -287,10 +319,16 @@ static const ctl_named_node_t tcache_node[] = {
};
static
const
ctl_named_node_t
arena_i_node
[]
=
{
{
NAME
(
"initialized"
),
CTL
(
arena_i_initialized
)},
{
NAME
(
"decay"
),
CTL
(
arena_i_decay
)},
{
NAME
(
"purge"
),
CTL
(
arena_i_purge
)},
{
NAME
(
"reset"
),
CTL
(
arena_i_reset
)},
{
NAME
(
"destroy"
),
CTL
(
arena_i_destroy
)},
{
NAME
(
"dss"
),
CTL
(
arena_i_dss
)},
{
NAME
(
"lg_dirty_mult"
),
CTL
(
arena_i_lg_dirty_mult
)},
{
NAME
(
"chunk_hooks"
),
CTL
(
arena_i_chunk_hooks
)}
{
NAME
(
"dirty_decay_ms"
),
CTL
(
arena_i_dirty_decay_ms
)},
{
NAME
(
"muzzy_decay_ms"
),
CTL
(
arena_i_muzzy_decay_ms
)},
{
NAME
(
"extent_hooks"
),
CTL
(
arena_i_extent_hooks
)},
{
NAME
(
"retain_grow_limit"
),
CTL
(
arena_i_retain_grow_limit
)}
};
static
const
ctl_named_node_t
super_arena_i_node
[]
=
{
{
NAME
(
""
),
CHILD
(
named
,
arena_i
)}
...
...
@@ -303,7 +341,7 @@ static const ctl_indexed_node_t arena_node[] = {
static
const
ctl_named_node_t
arenas_bin_i_node
[]
=
{
{
NAME
(
"size"
),
CTL
(
arenas_bin_i_size
)},
{
NAME
(
"nregs"
),
CTL
(
arenas_bin_i_nregs
)},
{
NAME
(
"
run
_size"
),
CTL
(
arenas_bin_i_
run
_size
)}
{
NAME
(
"
slab
_size"
),
CTL
(
arenas_bin_i_
slab
_size
)}
};
static
const
ctl_named_node_t
super_arenas_bin_i_node
[]
=
{
{
NAME
(
""
),
CHILD
(
named
,
arenas_bin_i
)}
...
...
@@ -313,43 +351,31 @@ static const ctl_indexed_node_t arenas_bin_node[] = {
{
INDEX
(
arenas_bin_i
)}
};
static
const
ctl_named_node_t
arenas_lrun_i_node
[]
=
{
{
NAME
(
"size"
),
CTL
(
arenas_lrun_i_size
)}
};
static
const
ctl_named_node_t
super_arenas_lrun_i_node
[]
=
{
{
NAME
(
""
),
CHILD
(
named
,
arenas_lrun_i
)}
};
static
const
ctl_indexed_node_t
arenas_lrun_node
[]
=
{
{
INDEX
(
arenas_lrun_i
)}
};
static
const
ctl_named_node_t
arenas_hchunk_i_node
[]
=
{
{
NAME
(
"size"
),
CTL
(
arenas_hchunk_i_size
)}
static
const
ctl_named_node_t
arenas_lextent_i_node
[]
=
{
{
NAME
(
"size"
),
CTL
(
arenas_lextent_i_size
)}
};
static
const
ctl_named_node_t
super_arenas_
hchunk
_i_node
[]
=
{
{
NAME
(
""
),
CHILD
(
named
,
arenas_
hchunk
_i
)}
static
const
ctl_named_node_t
super_arenas_
lextent
_i_node
[]
=
{
{
NAME
(
""
),
CHILD
(
named
,
arenas_
lextent
_i
)}
};
static
const
ctl_indexed_node_t
arenas_
hchunk
_node
[]
=
{
{
INDEX
(
arenas_
hchunk
_i
)}
static
const
ctl_indexed_node_t
arenas_
lextent
_node
[]
=
{
{
INDEX
(
arenas_
lextent
_i
)}
};
static
const
ctl_named_node_t
arenas_node
[]
=
{
{
NAME
(
"narenas"
),
CTL
(
arenas_narenas
)},
{
NAME
(
"
initialized
"
),
CTL
(
arenas_
initialized
)},
{
NAME
(
"
lg_dirty_mult
"
),
CTL
(
arenas_
lg_dirty_mult
)},
{
NAME
(
"
dirty_decay_ms
"
),
CTL
(
arenas_
dirty_decay_ms
)},
{
NAME
(
"
muzzy_decay_ms
"
),
CTL
(
arenas_
muzzy_decay_ms
)},
{
NAME
(
"quantum"
),
CTL
(
arenas_quantum
)},
{
NAME
(
"page"
),
CTL
(
arenas_page
)},
{
NAME
(
"tcache_max"
),
CTL
(
arenas_tcache_max
)},
{
NAME
(
"nbins"
),
CTL
(
arenas_nbins
)},
{
NAME
(
"nhbins"
),
CTL
(
arenas_nhbins
)},
{
NAME
(
"bin"
),
CHILD
(
indexed
,
arenas_bin
)},
{
NAME
(
"nlruns"
),
CTL
(
arenas_nlruns
)},
{
NAME
(
"lrun"
),
CHILD
(
indexed
,
arenas_lrun
)},
{
NAME
(
"nhchunks"
),
CTL
(
arenas_nhchunks
)},
{
NAME
(
"hchunk"
),
CHILD
(
indexed
,
arenas_hchunk
)},
{
NAME
(
"extend"
),
CTL
(
arenas_extend
)}
{
NAME
(
"nlextents"
),
CTL
(
arenas_nlextents
)},
{
NAME
(
"lextent"
),
CHILD
(
indexed
,
arenas_lextent
)},
{
NAME
(
"create"
),
CTL
(
arenas_create
)},
{
NAME
(
"lookup"
),
CTL
(
arenas_lookup
)}
};
static
const
ctl_named_node_t
prof_node
[]
=
{
...
...
@@ -362,11 +388,6 @@ static const ctl_named_node_t prof_node[] = {
{
NAME
(
"lg_sample"
),
CTL
(
lg_prof_sample
)}
};
static
const
ctl_named_node_t
stats_arenas_i_metadata_node
[]
=
{
{
NAME
(
"mapped"
),
CTL
(
stats_arenas_i_metadata_mapped
)},
{
NAME
(
"allocated"
),
CTL
(
stats_arenas_i_metadata_allocated
)}
};
static
const
ctl_named_node_t
stats_arenas_i_small_node
[]
=
{
{
NAME
(
"allocated"
),
CTL
(
stats_arenas_i_small_allocated
)},
{
NAME
(
"nmalloc"
),
CTL
(
stats_arenas_i_small_nmalloc
)},
...
...
@@ -381,13 +402,27 @@ static const ctl_named_node_t stats_arenas_i_large_node[] = {
{
NAME
(
"nrequests"
),
CTL
(
stats_arenas_i_large_nrequests
)}
};
static
const
ctl_named_node_t
stats_arenas_i_huge_node
[]
=
{
{
NAME
(
"allocated"
),
CTL
(
stats_arenas_i_huge_allocated
)},
{
NAME
(
"nmalloc"
),
CTL
(
stats_arenas_i_huge_nmalloc
)},
{
NAME
(
"ndalloc"
),
CTL
(
stats_arenas_i_huge_ndalloc
)},
{
NAME
(
"nrequests"
),
CTL
(
stats_arenas_i_huge_nrequests
)}
#define MUTEX_PROF_DATA_NODE(prefix) \
static const ctl_named_node_t stats_##prefix##_node[] = { \
{NAME("num_ops"), \
CTL(stats_##prefix##_num_ops)}, \
{NAME("num_wait"), \
CTL(stats_##prefix##_num_wait)}, \
{NAME("num_spin_acq"), \
CTL(stats_##prefix##_num_spin_acq)}, \
{NAME("num_owner_switch"), \
CTL(stats_##prefix##_num_owner_switch)}, \
{NAME("total_wait_time"), \
CTL(stats_##prefix##_total_wait_time)}, \
{NAME("max_wait_time"), \
CTL(stats_##prefix##_max_wait_time)}, \
{NAME("max_num_thds"), \
CTL(stats_##prefix##_max_num_thds)} \
/* Note that # of current waiting thread not provided. */
\
};
MUTEX_PROF_DATA_NODE
(
arenas_i_bins_j_mutex
)
static
const
ctl_named_node_t
stats_arenas_i_bins_j_node
[]
=
{
{
NAME
(
"nmalloc"
),
CTL
(
stats_arenas_i_bins_j_nmalloc
)},
{
NAME
(
"ndalloc"
),
CTL
(
stats_arenas_i_bins_j_ndalloc
)},
...
...
@@ -395,10 +430,12 @@ static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
{
NAME
(
"curregs"
),
CTL
(
stats_arenas_i_bins_j_curregs
)},
{
NAME
(
"nfills"
),
CTL
(
stats_arenas_i_bins_j_nfills
)},
{
NAME
(
"nflushes"
),
CTL
(
stats_arenas_i_bins_j_nflushes
)},
{
NAME
(
"nruns"
),
CTL
(
stats_arenas_i_bins_j_nruns
)},
{
NAME
(
"nreruns"
),
CTL
(
stats_arenas_i_bins_j_nreruns
)},
{
NAME
(
"curruns"
),
CTL
(
stats_arenas_i_bins_j_curruns
)}
{
NAME
(
"nslabs"
),
CTL
(
stats_arenas_i_bins_j_nslabs
)},
{
NAME
(
"nreslabs"
),
CTL
(
stats_arenas_i_bins_j_nreslabs
)},
{
NAME
(
"curslabs"
),
CTL
(
stats_arenas_i_bins_j_curslabs
)},
{
NAME
(
"mutex"
),
CHILD
(
named
,
stats_arenas_i_bins_j_mutex
)}
};
static
const
ctl_named_node_t
super_stats_arenas_i_bins_j_node
[]
=
{
{
NAME
(
""
),
CHILD
(
named
,
stats_arenas_i_bins_j
)}
};
...
...
@@ -407,51 +444,57 @@ static const ctl_indexed_node_t stats_arenas_i_bins_node[] = {
{
INDEX
(
stats_arenas_i_bins_j
)}
};
static
const
ctl_named_node_t
stats_arenas_i_l
run
s_j_node
[]
=
{
{
NAME
(
"nmalloc"
),
CTL
(
stats_arenas_i_l
run
s_j_nmalloc
)},
{
NAME
(
"ndalloc"
),
CTL
(
stats_arenas_i_l
run
s_j_ndalloc
)},
{
NAME
(
"nrequests"
),
CTL
(
stats_arenas_i_l
run
s_j_nrequests
)},
{
NAME
(
"cur
run
s"
),
CTL
(
stats_arenas_i_l
run
s_j_cur
run
s
)}
static
const
ctl_named_node_t
stats_arenas_i_l
extent
s_j_node
[]
=
{
{
NAME
(
"nmalloc"
),
CTL
(
stats_arenas_i_l
extent
s_j_nmalloc
)},
{
NAME
(
"ndalloc"
),
CTL
(
stats_arenas_i_l
extent
s_j_ndalloc
)},
{
NAME
(
"nrequests"
),
CTL
(
stats_arenas_i_l
extent
s_j_nrequests
)},
{
NAME
(
"cur
lextent
s"
),
CTL
(
stats_arenas_i_l
extent
s_j_cur
lextent
s
)}
};
static
const
ctl_named_node_t
super_stats_arenas_i_l
run
s_j_node
[]
=
{
{
NAME
(
""
),
CHILD
(
named
,
stats_arenas_i_l
run
s_j
)}
static
const
ctl_named_node_t
super_stats_arenas_i_l
extent
s_j_node
[]
=
{
{
NAME
(
""
),
CHILD
(
named
,
stats_arenas_i_l
extent
s_j
)}
};
static
const
ctl_indexed_node_t
stats_arenas_i_l
run
s_node
[]
=
{
{
INDEX
(
stats_arenas_i_l
run
s_j
)}
static
const
ctl_indexed_node_t
stats_arenas_i_l
extent
s_node
[]
=
{
{
INDEX
(
stats_arenas_i_l
extent
s_j
)}
};
static
const
ctl_named_node_t
stats_arenas_i_hchunks_j_node
[]
=
{
{
NAME
(
"nmalloc"
),
CTL
(
stats_arenas_i_hchunks_j_nmalloc
)},
{
NAME
(
"ndalloc"
),
CTL
(
stats_arenas_i_hchunks_j_ndalloc
)},
{
NAME
(
"nrequests"
),
CTL
(
stats_arenas_i_hchunks_j_nrequests
)},
{
NAME
(
"curhchunks"
),
CTL
(
stats_arenas_i_hchunks_j_curhchunks
)}
};
static
const
ctl_named_node_t
super_stats_arenas_i_hchunks_j_node
[]
=
{
{
NAME
(
""
),
CHILD
(
named
,
stats_arenas_i_hchunks_j
)}
};
#define OP(mtx) MUTEX_PROF_DATA_NODE(arenas_i_mutexes_##mtx)
MUTEX_PROF_ARENA_MUTEXES
#undef OP
static
const
ctl_indexed_node_t
stats_arenas_i_hchunks_node
[]
=
{
{
INDEX
(
stats_arenas_i_hchunks_j
)}
static
const
ctl_named_node_t
stats_arenas_i_mutexes_node
[]
=
{
#define OP(mtx) {NAME(#mtx), CHILD(named, stats_arenas_i_mutexes_##mtx)},
MUTEX_PROF_ARENA_MUTEXES
#undef OP
};
static
const
ctl_named_node_t
stats_arenas_i_node
[]
=
{
{
NAME
(
"nthreads"
),
CTL
(
stats_arenas_i_nthreads
)},
{
NAME
(
"uptime"
),
CTL
(
stats_arenas_i_uptime
)},
{
NAME
(
"dss"
),
CTL
(
stats_arenas_i_dss
)},
{
NAME
(
"lg_dirty_mult"
),
CTL
(
stats_arenas_i_lg_dirty_mult
)},
{
NAME
(
"dirty_decay_ms"
),
CTL
(
stats_arenas_i_dirty_decay_ms
)},
{
NAME
(
"muzzy_decay_ms"
),
CTL
(
stats_arenas_i_muzzy_decay_ms
)},
{
NAME
(
"pactive"
),
CTL
(
stats_arenas_i_pactive
)},
{
NAME
(
"pdirty"
),
CTL
(
stats_arenas_i_pdirty
)},
{
NAME
(
"pmuzzy"
),
CTL
(
stats_arenas_i_pmuzzy
)},
{
NAME
(
"mapped"
),
CTL
(
stats_arenas_i_mapped
)},
{
NAME
(
"npurge"
),
CTL
(
stats_arenas_i_npurge
)},
{
NAME
(
"nmadvise"
),
CTL
(
stats_arenas_i_nmadvise
)},
{
NAME
(
"purged"
),
CTL
(
stats_arenas_i_purged
)},
{
NAME
(
"metadata"
),
CHILD
(
named
,
stats_arenas_i_metadata
)},
{
NAME
(
"retained"
),
CTL
(
stats_arenas_i_retained
)},
{
NAME
(
"dirty_npurge"
),
CTL
(
stats_arenas_i_dirty_npurge
)},
{
NAME
(
"dirty_nmadvise"
),
CTL
(
stats_arenas_i_dirty_nmadvise
)},
{
NAME
(
"dirty_purged"
),
CTL
(
stats_arenas_i_dirty_purged
)},
{
NAME
(
"muzzy_npurge"
),
CTL
(
stats_arenas_i_muzzy_npurge
)},
{
NAME
(
"muzzy_nmadvise"
),
CTL
(
stats_arenas_i_muzzy_nmadvise
)},
{
NAME
(
"muzzy_purged"
),
CTL
(
stats_arenas_i_muzzy_purged
)},
{
NAME
(
"base"
),
CTL
(
stats_arenas_i_base
)},
{
NAME
(
"internal"
),
CTL
(
stats_arenas_i_internal
)},
{
NAME
(
"metadata_thp"
),
CTL
(
stats_arenas_i_metadata_thp
)},
{
NAME
(
"tcache_bytes"
),
CTL
(
stats_arenas_i_tcache_bytes
)},
{
NAME
(
"resident"
),
CTL
(
stats_arenas_i_resident
)},
{
NAME
(
"small"
),
CHILD
(
named
,
stats_arenas_i_small
)},
{
NAME
(
"large"
),
CHILD
(
named
,
stats_arenas_i_large
)},
{
NAME
(
"huge"
),
CHILD
(
named
,
stats_arenas_i_huge
)},
{
NAME
(
"bins"
),
CHILD
(
indexed
,
stats_arenas_i_bins
)},
{
NAME
(
"l
run
s"
),
CHILD
(
indexed
,
stats_arenas_i_l
run
s
)},
{
NAME
(
"
hchunk
s"
),
CHILD
(
index
ed
,
stats_arenas_i_
hchunk
s
)}
{
NAME
(
"l
extent
s"
),
CHILD
(
indexed
,
stats_arenas_i_l
extent
s
)},
{
NAME
(
"
mutexe
s"
),
CHILD
(
nam
ed
,
stats_arenas_i_
mutexe
s
)}
};
static
const
ctl_named_node_t
super_stats_arenas_i_node
[]
=
{
{
NAME
(
""
),
CHILD
(
named
,
stats_arenas_i
)}
...
...
@@ -461,19 +504,43 @@ static const ctl_indexed_node_t stats_arenas_node[] = {
{
INDEX
(
stats_arenas_i
)}
};
static
const
ctl_named_node_t
stats_background_thread_node
[]
=
{
{
NAME
(
"num_threads"
),
CTL
(
stats_background_thread_num_threads
)},
{
NAME
(
"num_runs"
),
CTL
(
stats_background_thread_num_runs
)},
{
NAME
(
"run_interval"
),
CTL
(
stats_background_thread_run_interval
)}
};
#define OP(mtx) MUTEX_PROF_DATA_NODE(mutexes_##mtx)
MUTEX_PROF_GLOBAL_MUTEXES
#undef OP
static
const
ctl_named_node_t
stats_mutexes_node
[]
=
{
#define OP(mtx) {NAME(#mtx), CHILD(named, stats_mutexes_##mtx)},
MUTEX_PROF_GLOBAL_MUTEXES
#undef OP
{
NAME
(
"reset"
),
CTL
(
stats_mutexes_reset
)}
};
#undef MUTEX_PROF_DATA_NODE
static
const
ctl_named_node_t
stats_node
[]
=
{
{
NAME
(
"cactive"
),
CTL
(
stats_cactive
)},
{
NAME
(
"allocated"
),
CTL
(
stats_allocated
)},
{
NAME
(
"active"
),
CTL
(
stats_active
)},
{
NAME
(
"metadata"
),
CTL
(
stats_metadata
)},
{
NAME
(
"metadata_thp"
),
CTL
(
stats_metadata_thp
)},
{
NAME
(
"resident"
),
CTL
(
stats_resident
)},
{
NAME
(
"mapped"
),
CTL
(
stats_mapped
)},
{
NAME
(
"retained"
),
CTL
(
stats_retained
)},
{
NAME
(
"background_thread"
),
CHILD
(
named
,
stats_background_thread
)},
{
NAME
(
"mutexes"
),
CHILD
(
named
,
stats_mutexes
)},
{
NAME
(
"arenas"
),
CHILD
(
indexed
,
stats_arenas
)}
};
static
const
ctl_named_node_t
root_node
[]
=
{
{
NAME
(
"version"
),
CTL
(
version
)},
{
NAME
(
"epoch"
),
CTL
(
epoch
)},
{
NAME
(
"background_thread"
),
CTL
(
background_thread
)},
{
NAME
(
"max_background_threads"
),
CTL
(
max_background_threads
)},
{
NAME
(
"thread"
),
CHILD
(
named
,
thread
)},
{
NAME
(
"config"
),
CHILD
(
named
,
config
)},
{
NAME
(
"opt"
),
CHILD
(
named
,
opt
)},
...
...
@@ -494,312 +561,519 @@ static const ctl_named_node_t super_root_node[] = {
/******************************************************************************/
static
bool
ctl_arena_init
(
ctl_arena_stats_t
*
astats
)
{
/*
* Sets *dst + *src non-atomically. This is safe, since everything is
* synchronized by the ctl mutex.
*/
static
void
ctl_accum_arena_stats_u64
(
arena_stats_u64_t
*
dst
,
arena_stats_u64_t
*
src
)
{
#ifdef JEMALLOC_ATOMIC_U64
uint64_t
cur_dst
=
atomic_load_u64
(
dst
,
ATOMIC_RELAXED
);
uint64_t
cur_src
=
atomic_load_u64
(
src
,
ATOMIC_RELAXED
);
atomic_store_u64
(
dst
,
cur_dst
+
cur_src
,
ATOMIC_RELAXED
);
#else
*
dst
+=
*
src
;
#endif
}
/* Likewise: with ctl mutex synchronization, reading is simple. */
static
uint64_t
ctl_arena_stats_read_u64
(
arena_stats_u64_t
*
p
)
{
#ifdef JEMALLOC_ATOMIC_U64
return
atomic_load_u64
(
p
,
ATOMIC_RELAXED
);
#else
return
*
p
;
#endif
}
if
(
astats
->
lstats
==
NULL
)
{
astats
->
lstats
=
(
malloc_large_stats_t
*
)
a0malloc
(
nlclasses
*
sizeof
(
malloc_large_stats_t
));
if
(
astats
->
lstats
==
NULL
)
return
(
true
);
static
void
accum_atomic_zu
(
atomic_zu_t
*
dst
,
atomic_zu_t
*
src
)
{
size_t
cur_dst
=
atomic_load_zu
(
dst
,
ATOMIC_RELAXED
);
size_t
cur_src
=
atomic_load_zu
(
src
,
ATOMIC_RELAXED
);
atomic_store_zu
(
dst
,
cur_dst
+
cur_src
,
ATOMIC_RELAXED
);
}
/******************************************************************************/
static
unsigned
arenas_i2a_impl
(
size_t
i
,
bool
compat
,
bool
validate
)
{
unsigned
a
;
switch
(
i
)
{
case
MALLCTL_ARENAS_ALL
:
a
=
0
;
break
;
case
MALLCTL_ARENAS_DESTROYED
:
a
=
1
;
break
;
default:
if
(
compat
&&
i
==
ctl_arenas
->
narenas
)
{
/*
* Provide deprecated backward compatibility for
* accessing the merged stats at index narenas rather
* than via MALLCTL_ARENAS_ALL. This is scheduled for
* removal in 6.0.0.
*/
a
=
0
;
}
else
if
(
validate
&&
i
>=
ctl_arenas
->
narenas
)
{
a
=
UINT_MAX
;
}
else
{
/*
* This function should never be called for an index
* more than one past the range of indices that have
* initialized ctl data.
*/
assert
(
i
<
ctl_arenas
->
narenas
||
(
!
validate
&&
i
==
ctl_arenas
->
narenas
));
a
=
(
unsigned
)
i
+
2
;
}
break
;
}
if
(
astats
->
hstats
==
NULL
)
{
astats
->
hstats
=
(
malloc_huge_stats_t
*
)
a0malloc
(
nhclasses
*
sizeof
(
malloc_huge_stats_t
));
if
(
astats
->
hstats
==
NULL
)
return
(
true
);
return
a
;
}
static
unsigned
arenas_i2a
(
size_t
i
)
{
return
arenas_i2a_impl
(
i
,
true
,
false
);
}
static
ctl_arena_t
*
arenas_i_impl
(
tsd_t
*
tsd
,
size_t
i
,
bool
compat
,
bool
init
)
{
ctl_arena_t
*
ret
;
assert
(
!
compat
||
!
init
);
ret
=
ctl_arenas
->
arenas
[
arenas_i2a_impl
(
i
,
compat
,
false
)];
if
(
init
&&
ret
==
NULL
)
{
if
(
config_stats
)
{
struct
container_s
{
ctl_arena_t
ctl_arena
;
ctl_arena_stats_t
astats
;
};
struct
container_s
*
cont
=
(
struct
container_s
*
)
base_alloc
(
tsd_tsdn
(
tsd
),
b0get
(),
sizeof
(
struct
container_s
),
QUANTUM
);
if
(
cont
==
NULL
)
{
return
NULL
;
}
ret
=
&
cont
->
ctl_arena
;
ret
->
astats
=
&
cont
->
astats
;
}
else
{
ret
=
(
ctl_arena_t
*
)
base_alloc
(
tsd_tsdn
(
tsd
),
b0get
(),
sizeof
(
ctl_arena_t
),
QUANTUM
);
if
(
ret
==
NULL
)
{
return
NULL
;
}
}
ret
->
arena_ind
=
(
unsigned
)
i
;
ctl_arenas
->
arenas
[
arenas_i2a_impl
(
i
,
compat
,
false
)]
=
ret
;
}
return
(
false
);
assert
(
ret
==
NULL
||
arenas_i2a
(
ret
->
arena_ind
)
==
arenas_i2a
(
i
));
return
ret
;
}
static
void
ctl_arena_clear
(
ctl_arena_stats_t
*
astats
)
{
static
ctl_arena_t
*
arenas_i
(
size_t
i
)
{
ctl_arena_t
*
ret
=
arenas_i_impl
(
tsd_fetch
(),
i
,
true
,
false
);
assert
(
ret
!=
NULL
);
return
ret
;
}
astats
->
dss
=
dss_prec_names
[
dss_prec_limit
];
astats
->
lg_dirty_mult
=
-
1
;
astats
->
pactive
=
0
;
astats
->
pdirty
=
0
;
static
void
ctl_arena_clear
(
ctl_arena_t
*
ctl_arena
)
{
ctl_arena
->
nthreads
=
0
;
ctl_arena
->
dss
=
dss_prec_names
[
dss_prec_limit
];
ctl_arena
->
dirty_decay_ms
=
-
1
;
ctl_arena
->
muzzy_decay_ms
=
-
1
;
ctl_arena
->
pactive
=
0
;
ctl_arena
->
pdirty
=
0
;
ctl_arena
->
pmuzzy
=
0
;
if
(
config_stats
)
{
memset
(
&
astats
->
astats
,
0
,
sizeof
(
arena_stats_t
));
astats
->
allocated_small
=
0
;
astats
->
nmalloc_small
=
0
;
astats
->
ndalloc_small
=
0
;
astats
->
nrequests_small
=
0
;
memset
(
astats
->
bstats
,
0
,
NBINS
*
sizeof
(
malloc_bin_stats_t
));
memset
(
astats
->
lstats
,
0
,
nlclasses
*
sizeof
(
malloc_large_stats_t
));
memset
(
astats
->
hstats
,
0
,
nhclasses
*
sizeof
(
malloc_huge_stats_t
));
memset
(
&
ctl_arena
->
astats
->
astats
,
0
,
sizeof
(
arena_stats_t
));
ctl_arena
->
astats
->
allocated_small
=
0
;
ctl_arena
->
astats
->
nmalloc_small
=
0
;
ctl_arena
->
astats
->
ndalloc_small
=
0
;
ctl_arena
->
astats
->
nrequests_small
=
0
;
memset
(
ctl_arena
->
astats
->
bstats
,
0
,
NBINS
*
sizeof
(
bin_stats_t
));
memset
(
ctl_arena
->
astats
->
lstats
,
0
,
(
NSIZES
-
NBINS
)
*
sizeof
(
arena_stats_large_t
));
}
}
static
void
ctl_arena_stats_amerge
(
ctl_arena_stats_t
*
cstats
,
arena_t
*
arena
)
{
ctl_arena_stats_amerge
(
tsdn_t
*
tsdn
,
ctl_arena_t
*
ctl_arena
,
arena_t
*
arena
)
{
unsigned
i
;
arena_stats_merge
(
arena
,
&
cstats
->
dss
,
&
cstats
->
lg_dirty_mult
,
&
cstats
->
pactive
,
&
cstats
->
pdirty
,
&
cstats
->
astats
,
cstats
->
bstats
,
cstats
->
lstats
,
cstats
->
hstats
);
if
(
config_stats
)
{
arena_stats_merge
(
tsdn
,
arena
,
&
ctl_arena
->
nthreads
,
&
ctl_arena
->
dss
,
&
ctl_arena
->
dirty_decay_ms
,
&
ctl_arena
->
muzzy_decay_ms
,
&
ctl_arena
->
pactive
,
&
ctl_arena
->
pdirty
,
&
ctl_arena
->
pmuzzy
,
&
ctl_arena
->
astats
->
astats
,
ctl_arena
->
astats
->
bstats
,
ctl_arena
->
astats
->
lstats
);
for
(
i
=
0
;
i
<
NBINS
;
i
++
)
{
cstats
->
allocated_small
+=
cstats
->
bstats
[
i
].
curregs
*
index2size
(
i
);
cstats
->
nmalloc_small
+=
cstats
->
bstats
[
i
].
nmalloc
;
cstats
->
ndalloc_small
+=
cstats
->
bstats
[
i
].
ndalloc
;
cstats
->
nrequests_small
+=
cstats
->
bstats
[
i
].
nrequests
;
ctl_arena
->
astats
->
allocated_small
+=
ctl_arena
->
astats
->
bstats
[
i
].
curregs
*
sz_index2size
(
i
);
ctl_arena
->
astats
->
nmalloc_small
+=
ctl_arena
->
astats
->
bstats
[
i
].
nmalloc
;
ctl_arena
->
astats
->
ndalloc_small
+=
ctl_arena
->
astats
->
bstats
[
i
].
ndalloc
;
ctl_arena
->
astats
->
nrequests_small
+=
ctl_arena
->
astats
->
bstats
[
i
].
nrequests
;
}
}
else
{
arena_basic_stats_merge
(
tsdn
,
arena
,
&
ctl_arena
->
nthreads
,
&
ctl_arena
->
dss
,
&
ctl_arena
->
dirty_decay_ms
,
&
ctl_arena
->
muzzy_decay_ms
,
&
ctl_arena
->
pactive
,
&
ctl_arena
->
pdirty
,
&
ctl_arena
->
pmuzzy
);
}
}
static
void
ctl_arena_stats_smerge
(
ctl_arena_
stats_t
*
sstats
,
ctl_arena_stats_t
*
astats
)
{
ctl_arena_stats_s
d
merge
(
ctl_arena_
t
*
ctl_sdarena
,
ctl_arena_t
*
ctl_arena
,
bool
destroyed
)
{
unsigned
i
;
sstats
->
pactive
+=
astats
->
pactive
;
sstats
->
pdirty
+=
astats
->
pdirty
;
if
(
!
destroyed
)
{
ctl_sdarena
->
nthreads
+=
ctl_arena
->
nthreads
;
ctl_sdarena
->
pactive
+=
ctl_arena
->
pactive
;
ctl_sdarena
->
pdirty
+=
ctl_arena
->
pdirty
;
ctl_sdarena
->
pmuzzy
+=
ctl_arena
->
pmuzzy
;
}
else
{
assert
(
ctl_arena
->
nthreads
==
0
);
assert
(
ctl_arena
->
pactive
==
0
);
assert
(
ctl_arena
->
pdirty
==
0
);
assert
(
ctl_arena
->
pmuzzy
==
0
);
}
sstats
->
astats
.
mapped
+=
astats
->
astats
.
mapped
;
sstats
->
astats
.
npurge
+=
astats
->
astats
.
npurge
;
sstats
->
astats
.
nmadvise
+=
astats
->
astats
.
nmadvise
;
sstats
->
astats
.
purged
+=
astats
->
astats
.
purged
;
if
(
config_stats
)
{
ctl_arena_stats_t
*
sdstats
=
ctl_sdarena
->
astats
;
ctl_arena_stats_t
*
astats
=
ctl_arena
->
astats
;
if
(
!
destroyed
)
{
accum_atomic_zu
(
&
sdstats
->
astats
.
mapped
,
&
astats
->
astats
.
mapped
);
accum_atomic_zu
(
&
sdstats
->
astats
.
retained
,
&
astats
->
astats
.
retained
);
}
ctl_accum_arena_stats_u64
(
&
sdstats
->
astats
.
decay_dirty
.
npurge
,
&
astats
->
astats
.
decay_dirty
.
npurge
);
ctl_accum_arena_stats_u64
(
&
sdstats
->
astats
.
decay_dirty
.
nmadvise
,
&
astats
->
astats
.
decay_dirty
.
nmadvise
);
ctl_accum_arena_stats_u64
(
&
sdstats
->
astats
.
decay_dirty
.
purged
,
&
astats
->
astats
.
decay_dirty
.
purged
);
ctl_accum_arena_stats_u64
(
&
sdstats
->
astats
.
decay_muzzy
.
npurge
,
&
astats
->
astats
.
decay_muzzy
.
npurge
);
ctl_accum_arena_stats_u64
(
&
sdstats
->
astats
.
decay_muzzy
.
nmadvise
,
&
astats
->
astats
.
decay_muzzy
.
nmadvise
);
ctl_accum_arena_stats_u64
(
&
sdstats
->
astats
.
decay_muzzy
.
purged
,
&
astats
->
astats
.
decay_muzzy
.
purged
);
#define OP(mtx) malloc_mutex_prof_merge( \
&(sdstats->astats.mutex_prof_data[ \
arena_prof_mutex_##mtx]), \
&(astats->astats.mutex_prof_data[ \
arena_prof_mutex_##mtx]));
MUTEX_PROF_ARENA_MUTEXES
#undef OP
if
(
!
destroyed
)
{
accum_atomic_zu
(
&
sdstats
->
astats
.
base
,
&
astats
->
astats
.
base
);
accum_atomic_zu
(
&
sdstats
->
astats
.
internal
,
&
astats
->
astats
.
internal
);
accum_atomic_zu
(
&
sdstats
->
astats
.
resident
,
&
astats
->
astats
.
resident
);
accum_atomic_zu
(
&
sdstats
->
astats
.
metadata_thp
,
&
astats
->
astats
.
metadata_thp
);
}
else
{
assert
(
atomic_load_zu
(
&
astats
->
astats
.
internal
,
ATOMIC_RELAXED
)
==
0
);
}
sstats
->
astats
.
metadata_mapped
+=
astats
->
astats
.
metadata_mapped
;
sstats
->
astats
.
metadata_allocated
+=
astats
->
astats
.
metadata_allocated
;
if
(
!
destroyed
)
{
sdstats
->
allocated_small
+=
astats
->
allocated_small
;
}
else
{
assert
(
astats
->
allocated_small
==
0
);
}
sdstats
->
nmalloc_small
+=
astats
->
nmalloc_small
;
sdstats
->
ndalloc_small
+=
astats
->
ndalloc_small
;
sdstats
->
nrequests_small
+=
astats
->
nrequests_small
;
sstats
->
allocated_small
+=
astats
->
allocated_small
;
sstats
->
nmalloc_small
+=
astats
->
nmalloc_small
;
sstats
->
ndalloc_small
+=
astats
->
ndalloc_small
;
sstats
->
nrequests_small
+=
astats
->
nrequests_small
;
if
(
!
destroyed
)
{
accum_atomic_zu
(
&
sdstats
->
astats
.
allocated_large
,
&
astats
->
astats
.
allocated_large
);
}
else
{
assert
(
atomic_load_zu
(
&
astats
->
astats
.
allocated_large
,
ATOMIC_RELAXED
)
==
0
);
}
ctl_accum_arena_stats_u64
(
&
sdstats
->
astats
.
nmalloc_large
,
&
astats
->
astats
.
nmalloc_large
);
ctl_accum_arena_stats_u64
(
&
sdstats
->
astats
.
ndalloc_large
,
&
astats
->
astats
.
ndalloc_large
);
ctl_accum_arena_stats_u64
(
&
sdstats
->
astats
.
nrequests_large
,
&
astats
->
astats
.
nrequests_large
);
sstats
->
astats
.
allocated_large
+=
astats
->
astats
.
allocated_large
;
sstats
->
astats
.
nmalloc_large
+=
astats
->
astats
.
nmalloc_large
;
sstats
->
astats
.
ndalloc_large
+=
astats
->
astats
.
ndalloc_large
;
sstats
->
astats
.
nrequests_large
+=
astats
->
astats
.
nrequests_large
;
accum_atomic_zu
(
&
sdstats
->
astats
.
tcache_bytes
,
&
astats
->
astats
.
tcache_bytes
);
sstats
->
astats
.
allocated_huge
+=
astats
->
astats
.
allocated_huge
;
sstats
->
astats
.
nmalloc_hug
e
+
=
astats
->
astats
.
nmalloc_hug
e
;
sstats
->
astats
.
ndalloc_huge
+=
astats
->
astats
.
ndalloc_huge
;
if
(
ctl_arena
->
arena_ind
==
0
)
{
s
d
stats
->
astats
.
uptim
e
=
astats
->
astats
.
uptim
e
;
}
for
(
i
=
0
;
i
<
NBINS
;
i
++
)
{
sstats
->
bstats
[
i
].
nmalloc
+=
astats
->
bstats
[
i
].
nmalloc
;
sstats
->
bstats
[
i
].
ndalloc
+=
astats
->
bstats
[
i
].
ndalloc
;
sstats
->
bstats
[
i
].
nrequests
+=
astats
->
bstats
[
i
].
nrequests
;
sstats
->
bstats
[
i
].
curregs
+=
astats
->
bstats
[
i
].
curregs
;
if
(
config_tcache
)
{
sstats
->
bstats
[
i
].
nfills
+=
astats
->
bstats
[
i
].
nfills
;
sstats
->
bstats
[
i
].
nflushes
+=
astats
->
bstats
[
i
].
nflushes
;
sdstats
->
bstats
[
i
].
nmalloc
+=
astats
->
bstats
[
i
].
nmalloc
;
sdstats
->
bstats
[
i
].
ndalloc
+=
astats
->
bstats
[
i
].
ndalloc
;
sdstats
->
bstats
[
i
].
nrequests
+=
astats
->
bstats
[
i
].
nrequests
;
if
(
!
destroyed
)
{
sdstats
->
bstats
[
i
].
curregs
+=
astats
->
bstats
[
i
].
curregs
;
}
else
{
assert
(
astats
->
bstats
[
i
].
curregs
==
0
);
}
sstats
->
bstats
[
i
].
nruns
+=
astats
->
bstats
[
i
].
nruns
;
sstats
->
bstats
[
i
].
reruns
+=
astats
->
bstats
[
i
].
reruns
;
sstats
->
bstats
[
i
].
curruns
+=
astats
->
bstats
[
i
].
curruns
;
sdstats
->
bstats
[
i
].
nfills
+=
astats
->
bstats
[
i
].
nfills
;
sdstats
->
bstats
[
i
].
nflushes
+=
astats
->
bstats
[
i
].
nflushes
;
sdstats
->
bstats
[
i
].
nslabs
+=
astats
->
bstats
[
i
].
nslabs
;
sdstats
->
bstats
[
i
].
reslabs
+=
astats
->
bstats
[
i
].
reslabs
;
if
(
!
destroyed
)
{
sdstats
->
bstats
[
i
].
curslabs
+=
astats
->
bstats
[
i
].
curslabs
;
}
else
{
assert
(
astats
->
bstats
[
i
].
curslabs
==
0
);
}
malloc_mutex_prof_merge
(
&
sdstats
->
bstats
[
i
].
mutex_data
,
&
astats
->
bstats
[
i
].
mutex_data
);
}
for
(
i
=
0
;
i
<
NSIZES
-
NBINS
;
i
++
)
{
ctl_accum_arena_stats_u64
(
&
sdstats
->
lstats
[
i
].
nmalloc
,
&
astats
->
lstats
[
i
].
nmalloc
);
ctl_accum_arena_stats_u64
(
&
sdstats
->
lstats
[
i
].
ndalloc
,
&
astats
->
lstats
[
i
].
ndalloc
);
ctl_accum_arena_stats_u64
(
&
sdstats
->
lstats
[
i
].
nrequests
,
&
astats
->
lstats
[
i
].
nrequests
);
if
(
!
destroyed
)
{
sdstats
->
lstats
[
i
].
curlextents
+=
astats
->
lstats
[
i
].
curlextents
;
}
else
{
assert
(
astats
->
lstats
[
i
].
curlextents
==
0
);
}
for
(
i
=
0
;
i
<
nlclasses
;
i
++
)
{
sstats
->
lstats
[
i
].
nmalloc
+=
astats
->
lstats
[
i
].
nmalloc
;
sstats
->
lstats
[
i
].
ndalloc
+=
astats
->
lstats
[
i
].
ndalloc
;
sstats
->
lstats
[
i
].
nrequests
+=
astats
->
lstats
[
i
].
nrequests
;
sstats
->
lstats
[
i
].
curruns
+=
astats
->
lstats
[
i
].
curruns
;
}
for
(
i
=
0
;
i
<
nhclasses
;
i
++
)
{
sstats
->
hstats
[
i
].
nmalloc
+=
astats
->
hstats
[
i
].
nmalloc
;
sstats
->
hstats
[
i
].
ndalloc
+=
astats
->
hstats
[
i
].
ndalloc
;
sstats
->
hstats
[
i
].
curhchunks
+=
astats
->
hstats
[
i
].
curhchunks
;
}
}
static
void
ctl_arena_refresh
(
arena_t
*
arena
,
unsigned
i
)
{
ctl_arena_stats_t
*
astats
=
&
ctl_stats
.
arenas
[
i
];
ctl_arena_stats_t
*
sstats
=
&
ctl_stats
.
arenas
[
ctl_stats
.
narenas
];
ctl_arena_clear
(
astats
);
ctl_arena_refresh
(
tsdn_t
*
tsdn
,
arena_t
*
arena
,
ctl_arena_t
*
ctl_sdarena
,
unsigned
i
,
bool
destroyed
)
{
ctl_arena_t
*
ctl_arena
=
arenas_i
(
i
);
sstats
->
nthreads
+=
astats
->
nthreads
;
if
(
config_stats
)
{
ctl_arena_stats_amerge
(
astats
,
arena
);
ctl_arena_clear
(
ctl_arena
);
ctl_arena_stats_amerge
(
tsdn
,
ctl_arena
,
arena
);
/* Merge into sum stats as well. */
ctl_arena_stats_smerge
(
sstats
,
astats
);
ctl_arena_stats_sdmerge
(
ctl_sdarena
,
ctl_arena
,
destroyed
);
}
static
unsigned
ctl_arena_init
(
tsd_t
*
tsd
,
extent_hooks_t
*
extent_hooks
)
{
unsigned
arena_ind
;
ctl_arena_t
*
ctl_arena
;
if
((
ctl_arena
=
ql_last
(
&
ctl_arenas
->
destroyed
,
destroyed_link
))
!=
NULL
)
{
ql_remove
(
&
ctl_arenas
->
destroyed
,
ctl_arena
,
destroyed_link
);
arena_ind
=
ctl_arena
->
arena_ind
;
}
else
{
astats
->
pactive
+=
arena
->
nactive
;
astats
->
pdirty
+=
arena
->
ndirty
;
/* Merge into sum stats as well. */
sstats
->
pactive
+=
arena
->
nactive
;
sstats
->
pdirty
+=
arena
->
ndirty
;
arena_ind
=
ctl_arenas
->
narenas
;
}
}
static
bool
ctl_grow
(
void
)
{
ctl_arena_stats_t
*
astats
;
/* Trigger stats allocation. */
if
(
arenas_i_impl
(
tsd
,
arena_ind
,
false
,
true
)
==
NULL
)
{
return
UINT_MAX
;
}
/* Initialize new arena. */
if
(
arena_init
(
ctl_stats
.
narenas
)
==
NULL
)
return
(
true
);
/* Allocate extended arena stats. */
astats
=
(
ctl_arena_stats_t
*
)
a0malloc
((
ctl_stats
.
narenas
+
2
)
*
sizeof
(
ctl_arena_stats_t
));
if
(
astats
==
NULL
)
return
(
true
);
/* Initialize the new astats element. */
memcpy
(
astats
,
ctl_stats
.
arenas
,
(
ctl_stats
.
narenas
+
1
)
*
sizeof
(
ctl_arena_stats_t
));
memset
(
&
astats
[
ctl_stats
.
narenas
+
1
],
0
,
sizeof
(
ctl_arena_stats_t
));
if
(
ctl_arena_init
(
&
astats
[
ctl_stats
.
narenas
+
1
]))
{
a0dalloc
(
astats
);
return
(
true
);
}
/* Swap merged stats to their new location. */
{
ctl_arena_stats_t
tstats
;
memcpy
(
&
tstats
,
&
astats
[
ctl_stats
.
narenas
],
sizeof
(
ctl_arena_stats_t
));
memcpy
(
&
astats
[
ctl_stats
.
narenas
],
&
astats
[
ctl_stats
.
narenas
+
1
],
sizeof
(
ctl_arena_stats_t
));
memcpy
(
&
astats
[
ctl_stats
.
narenas
+
1
],
&
tstats
,
sizeof
(
ctl_arena_stats_t
));
if
(
arena_init
(
tsd_tsdn
(
tsd
),
arena_ind
,
extent_hooks
)
==
NULL
)
{
return
UINT_MAX
;
}
a0dalloc
(
ctl_stats
.
arenas
);
ctl_stats
.
arenas
=
astats
;
ctl_stats
.
narenas
++
;
return
(
false
);
if
(
arena_ind
==
ctl_arenas
->
narenas
)
{
ctl_arenas
->
narenas
++
;
}
return
arena_ind
;
}
static
void
ctl_refresh
(
void
)
{
tsd_t
*
tsd
;
ctl_background_thread_stats_read
(
tsdn_t
*
tsdn
)
{
background_thread_stats_t
*
stats
=
&
ctl_stats
->
background_thread
;
if
(
!
have_background_thread
||
background_thread_stats_read
(
tsdn
,
stats
))
{
memset
(
stats
,
0
,
sizeof
(
background_thread_stats_t
));
nstime_init
(
&
stats
->
run_interval
,
0
);
}
}
static
void
ctl_refresh
(
tsdn_t
*
tsdn
)
{
unsigned
i
;
bool
refreshed
;
VARIABLE_ARRAY
(
arena_t
*
,
tarenas
,
ctl_
stats
.
narenas
);
ctl_arena_t
*
ctl_sarena
=
arenas_i
(
MALLCTL_ARENAS_ALL
)
;
VARIABLE_ARRAY
(
arena_t
*
,
tarenas
,
ctl_
arenas
->
narenas
);
/*
* Clear sum stats, since they will be merged into by
* ctl_arena_refresh().
*/
ctl_stats
.
arenas
[
ctl_stats
.
narenas
].
nthreads
=
0
;
ctl_arena_clear
(
&
ctl_stats
.
arenas
[
ctl_stats
.
narenas
]);
ctl_arena_clear
(
ctl_sarena
);
tsd
=
tsd_fetch
();
for
(
i
=
0
,
refreshed
=
false
;
i
<
ctl_stats
.
narenas
;
i
++
)
{
tarenas
[
i
]
=
arena_get
(
tsd
,
i
,
false
,
false
);
if
(
tarenas
[
i
]
==
NULL
&&
!
refreshed
)
{
tarenas
[
i
]
=
arena_get
(
tsd
,
i
,
false
,
true
);
refreshed
=
true
;
}
for
(
i
=
0
;
i
<
ctl_arenas
->
narenas
;
i
++
)
{
tarenas
[
i
]
=
arena_get
(
tsdn
,
i
,
false
);
}
for
(
i
=
0
;
i
<
ctl_stats
.
narenas
;
i
++
)
{
if
(
tarenas
[
i
]
!=
NULL
)
ctl_stats
.
arenas
[
i
].
nthreads
=
arena_nbound
(
i
);
else
ctl_stats
.
arenas
[
i
].
nthreads
=
0
;
}
for
(
i
=
0
;
i
<
ctl_stats
.
narenas
;
i
++
)
{
for
(
i
=
0
;
i
<
ctl_arenas
->
narenas
;
i
++
)
{
ctl_arena_t
*
ctl_arena
=
arenas_i
(
i
);
bool
initialized
=
(
tarenas
[
i
]
!=
NULL
);
ctl_stats
.
arenas
[
i
].
initialized
=
initialized
;
if
(
initialized
)
ctl_arena_refresh
(
tarenas
[
i
],
i
);
ctl_arena
->
initialized
=
initialized
;
if
(
initialized
)
{
ctl_arena_refresh
(
tsdn
,
tarenas
[
i
],
ctl_sarena
,
i
,
false
);
}
}
if
(
config_stats
)
{
size_t
base_allocated
,
base_resident
,
base_mapped
;
base_stats_get
(
&
base_allocated
,
&
base_resident
,
&
base_mapped
);
ctl_stats
.
allocated
=
ctl_stats
.
arenas
[
ctl_stats
.
narenas
].
allocated_small
+
ctl_stats
.
arenas
[
ctl_stats
.
narenas
].
astats
.
allocated_large
+
ctl_stats
.
arenas
[
ctl_stats
.
narenas
].
astats
.
allocated_huge
;
ctl_stats
.
active
=
(
ctl_stats
.
arenas
[
ctl_stats
.
narenas
].
pactive
<<
LG_PAGE
);
ctl_stats
.
metadata
=
base_allocated
+
ctl_stats
.
arenas
[
ctl_stats
.
narenas
].
astats
.
metadata_mapped
+
ctl_stats
.
arenas
[
ctl_stats
.
narenas
].
astats
.
metadata_allocated
;
ctl_stats
.
resident
=
base_resident
+
ctl_stats
.
arenas
[
ctl_stats
.
narenas
].
astats
.
metadata_mapped
+
((
ctl_stats
.
arenas
[
ctl_stats
.
narenas
].
pactive
+
ctl_stats
.
arenas
[
ctl_stats
.
narenas
].
pdirty
)
<<
LG_PAGE
);
ctl_stats
.
mapped
=
base_mapped
+
ctl_stats
.
arenas
[
ctl_stats
.
narenas
].
astats
.
mapped
;
}
ctl_epoch
++
;
ctl_stats
->
allocated
=
ctl_sarena
->
astats
->
allocated_small
+
atomic_load_zu
(
&
ctl_sarena
->
astats
->
astats
.
allocated_large
,
ATOMIC_RELAXED
);
ctl_stats
->
active
=
(
ctl_sarena
->
pactive
<<
LG_PAGE
);
ctl_stats
->
metadata
=
atomic_load_zu
(
&
ctl_sarena
->
astats
->
astats
.
base
,
ATOMIC_RELAXED
)
+
atomic_load_zu
(
&
ctl_sarena
->
astats
->
astats
.
internal
,
ATOMIC_RELAXED
);
ctl_stats
->
metadata_thp
=
atomic_load_zu
(
&
ctl_sarena
->
astats
->
astats
.
metadata_thp
,
ATOMIC_RELAXED
);
ctl_stats
->
resident
=
atomic_load_zu
(
&
ctl_sarena
->
astats
->
astats
.
resident
,
ATOMIC_RELAXED
);
ctl_stats
->
mapped
=
atomic_load_zu
(
&
ctl_sarena
->
astats
->
astats
.
mapped
,
ATOMIC_RELAXED
);
ctl_stats
->
retained
=
atomic_load_zu
(
&
ctl_sarena
->
astats
->
astats
.
retained
,
ATOMIC_RELAXED
);
ctl_background_thread_stats_read
(
tsdn
);
#define READ_GLOBAL_MUTEX_PROF_DATA(i, mtx) \
malloc_mutex_lock(tsdn, &mtx); \
malloc_mutex_prof_read(tsdn, &ctl_stats->mutex_prof_data[i], &mtx); \
malloc_mutex_unlock(tsdn, &mtx);
if
(
config_prof
&&
opt_prof
)
{
READ_GLOBAL_MUTEX_PROF_DATA
(
global_prof_mutex_prof
,
bt2gctx_mtx
);
}
if
(
have_background_thread
)
{
READ_GLOBAL_MUTEX_PROF_DATA
(
global_prof_mutex_background_thread
,
background_thread_lock
);
}
else
{
memset
(
&
ctl_stats
->
mutex_prof_data
[
global_prof_mutex_background_thread
],
0
,
sizeof
(
mutex_prof_data_t
));
}
/* We own ctl mutex already. */
malloc_mutex_prof_read
(
tsdn
,
&
ctl_stats
->
mutex_prof_data
[
global_prof_mutex_ctl
],
&
ctl_mtx
);
#undef READ_GLOBAL_MUTEX_PROF_DATA
}
ctl_arenas
->
epoch
++
;
}
static
bool
ctl_init
(
void
)
{
ctl_init
(
tsd_t
*
tsd
)
{
bool
ret
;
tsdn_t
*
tsdn
=
tsd_tsdn
(
tsd
);
malloc_mutex_lock
(
&
ctl_mtx
);
malloc_mutex_lock
(
tsdn
,
&
ctl_mtx
);
if
(
!
ctl_initialized
)
{
ctl_arena_t
*
ctl_sarena
,
*
ctl_darena
;
unsigned
i
;
/*
* Allocate
space for one extra arena stats element, which
*
contains summed stats across all arena
s.
* Allocate
demand-zeroed space for pointers to the full
*
range of supported arena indice
s.
*/
ctl_stats
.
narenas
=
narenas_total_get
();
ctl_stats
.
arenas
=
(
ctl_arena_stats_t
*
)
a0malloc
(
(
ctl_stats
.
narenas
+
1
)
*
sizeof
(
ctl_arena_stats_t
));
if
(
ctl_stats
.
arenas
==
NULL
)
{
if
(
ctl_arenas
==
NULL
)
{
ctl_arenas
=
(
ctl_arenas_t
*
)
base_alloc
(
tsdn
,
b0get
(),
sizeof
(
ctl_arenas_t
),
QUANTUM
);
if
(
ctl_arenas
==
NULL
)
{
ret
=
true
;
goto
label_return
;
}
}
if
(
config_stats
&&
ctl_stats
==
NULL
)
{
ctl_stats
=
(
ctl_stats_t
*
)
base_alloc
(
tsdn
,
b0get
(),
sizeof
(
ctl_stats_t
),
QUANTUM
);
if
(
ctl_stats
==
NULL
)
{
ret
=
true
;
goto
label_return
;
}
memset
(
ctl_stats
.
arenas
,
0
,
(
ctl_stats
.
narenas
+
1
)
*
sizeof
(
ctl_arena_stats_t
));
}
/*
*
Initialize all stats structures, regardless of whether they
*
ever get used. Lazy initialization would allow errors to
*
cause inconsistent state to be viewable by the application
.
*
Allocate space for the current full range of arenas
*
here rather than doing it lazily elsewhere, in order
*
to limit when OOM-caused errors can occur
.
*/
if
(
config_stats
)
{
unsigned
i
;
for
(
i
=
0
;
i
<=
ctl_stats
.
narenas
;
i
++
)
{
if
(
ctl_arena_init
(
&
ctl_stats
.
arenas
[
i
]))
{
unsigned
j
;
for
(
j
=
0
;
j
<
i
;
j
++
)
{
a0dalloc
(
ctl_stats
.
arenas
[
j
].
lstats
);
a0dalloc
(
ctl_stats
.
arenas
[
j
].
hstats
);
}
a0dalloc
(
ctl_stats
.
arenas
);
ctl_stats
.
arenas
=
NULL
;
if
((
ctl_sarena
=
arenas_i_impl
(
tsd
,
MALLCTL_ARENAS_ALL
,
false
,
true
))
==
NULL
)
{
ret
=
true
;
goto
label_return
;
}
ctl_sarena
->
initialized
=
true
;
if
((
ctl_darena
=
arenas_i_impl
(
tsd
,
MALLCTL_ARENAS_DESTROYED
,
false
,
true
))
==
NULL
)
{
ret
=
true
;
goto
label_return
;
}
ctl_arena_clear
(
ctl_darena
);
/*
* Don't toggle ctl_darena to initialized until an arena is
* actually destroyed, so that arena.<i>.initialized can be used
* to query whether the stats are relevant.
*/
ctl_arenas
->
narenas
=
narenas_total_get
();
for
(
i
=
0
;
i
<
ctl_arenas
->
narenas
;
i
++
)
{
if
(
arenas_i_impl
(
tsd
,
i
,
false
,
true
)
==
NULL
)
{
ret
=
true
;
goto
label_return
;
}
}
ctl_stats
.
arenas
[
ctl_stats
.
narenas
].
initialized
=
true
;
ctl_epoch
=
0
;
ctl_refresh
();
ql_new
(
&
ctl_arenas
->
destroyed
);
ctl_refresh
(
tsdn
);
ctl_initialized
=
true
;
}
ret
=
false
;
label_return:
malloc_mutex_unlock
(
&
ctl_mtx
);
return
(
ret
)
;
malloc_mutex_unlock
(
tsdn
,
&
ctl_mtx
);
return
ret
;
}
static
int
ctl_lookup
(
const
char
*
name
,
ctl_node_t
const
**
nodesp
,
size_t
*
mibp
,
size_t
*
depthp
)
{
ctl_lookup
(
tsdn_t
*
tsdn
,
const
char
*
name
,
ctl_node_t
const
**
nodesp
,
size_t
*
mibp
,
size_t
*
depthp
)
{
int
ret
;
const
char
*
elm
,
*
tdot
,
*
dot
;
size_t
elen
,
i
,
j
;
...
...
@@ -827,9 +1101,10 @@ ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp,
if
(
strlen
(
child
->
name
)
==
elen
&&
strncmp
(
elm
,
child
->
name
,
elen
)
==
0
)
{
node
=
child
;
if
(
nodesp
!=
NULL
)
if
(
nodesp
!=
NULL
)
{
nodesp
[
i
]
=
(
const
ctl_node_t
*
)
node
;
}
mibp
[
i
]
=
j
;
break
;
}
...
...
@@ -850,14 +1125,15 @@ ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp,
}
inode
=
ctl_indexed_node
(
node
->
children
);
node
=
inode
->
index
(
mibp
,
*
depthp
,
(
size_t
)
index
);
node
=
inode
->
index
(
tsdn
,
mibp
,
*
depthp
,
(
size_t
)
index
);
if
(
node
==
NULL
)
{
ret
=
ENOENT
;
goto
label_return
;
}
if
(
nodesp
!=
NULL
)
if
(
nodesp
!=
NULL
)
{
nodesp
[
i
]
=
(
const
ctl_node_t
*
)
node
;
}
mibp
[
i
]
=
(
size_t
)
index
;
}
...
...
@@ -890,33 +1166,33 @@ ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp,
ret
=
0
;
label_return:
return
(
ret
)
;
return
ret
;
}
int
ctl_byname
(
const
char
*
name
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
ctl_byname
(
tsd_t
*
tsd
,
const
char
*
name
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
size_t
depth
;
ctl_node_t
const
*
nodes
[
CTL_MAX_DEPTH
];
size_t
mib
[
CTL_MAX_DEPTH
];
const
ctl_named_node_t
*
node
;
if
(
!
ctl_initialized
&&
ctl_init
())
{
if
(
!
ctl_initialized
&&
ctl_init
(
tsd
))
{
ret
=
EAGAIN
;
goto
label_return
;
}
depth
=
CTL_MAX_DEPTH
;
ret
=
ctl_lookup
(
name
,
nodes
,
mib
,
&
depth
);
if
(
ret
!=
0
)
ret
=
ctl_lookup
(
tsd_tsdn
(
tsd
),
name
,
nodes
,
mib
,
&
depth
);
if
(
ret
!=
0
)
{
goto
label_return
;
}
node
=
ctl_named_node
(
nodes
[
depth
-
1
]);
if
(
node
!=
NULL
&&
node
->
ctl
)
ret
=
node
->
ctl
(
mib
,
depth
,
oldp
,
oldlenp
,
newp
,
newlen
);
else
{
if
(
node
!=
NULL
&&
node
->
ctl
)
{
ret
=
node
->
ctl
(
tsd
,
mib
,
depth
,
oldp
,
oldlenp
,
newp
,
newlen
);
}
else
{
/* The name refers to a partial path through the ctl tree. */
ret
=
ENOENT
;
}
...
...
@@ -926,29 +1202,27 @@ label_return:
}
int
ctl_nametomib
(
const
char
*
name
,
size_t
*
mibp
,
size_t
*
miblenp
)
{
ctl_nametomib
(
tsd_t
*
tsd
,
const
char
*
name
,
size_t
*
mibp
,
size_t
*
miblenp
)
{
int
ret
;
if
(
!
ctl_initialized
&&
ctl_init
())
{
if
(
!
ctl_initialized
&&
ctl_init
(
tsd
))
{
ret
=
EAGAIN
;
goto
label_return
;
}
ret
=
ctl_lookup
(
name
,
NULL
,
mibp
,
miblenp
);
ret
=
ctl_lookup
(
tsd_tsdn
(
tsd
),
name
,
NULL
,
mibp
,
miblenp
);
label_return:
return
(
ret
);
}
int
ctl_bymib
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
ctl_bymib
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
const
ctl_named_node_t
*
node
;
size_t
i
;
if
(
!
ctl_initialized
&&
ctl_init
())
{
if
(
!
ctl_initialized
&&
ctl_init
(
tsd
))
{
ret
=
EAGAIN
;
goto
label_return
;
}
...
...
@@ -970,7 +1244,7 @@ ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
/* Indexed element. */
inode
=
ctl_indexed_node
(
node
->
children
);
node
=
inode
->
index
(
mib
,
miblen
,
mib
[
i
]);
node
=
inode
->
index
(
tsd_tsdn
(
tsd
),
mib
,
miblen
,
mib
[
i
]);
if
(
node
==
NULL
)
{
ret
=
ENOENT
;
goto
label_return
;
...
...
@@ -979,9 +1253,9 @@ ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
}
/* Call the ctl function. */
if
(
node
&&
node
->
ctl
)
ret
=
node
->
ctl
(
mib
,
miblen
,
oldp
,
oldlenp
,
newp
,
newlen
);
else
{
if
(
node
&&
node
->
ctl
)
{
ret
=
node
->
ctl
(
tsd
,
mib
,
miblen
,
oldp
,
oldlenp
,
newp
,
newlen
);
}
else
{
/* Partial MIB. */
ret
=
ENOENT
;
}
...
...
@@ -991,36 +1265,30 @@ label_return:
}
bool
ctl_boot
(
void
)
{
if
(
malloc_mutex_init
(
&
ctl_mtx
))
return
(
true
);
ctl_boot
(
void
)
{
if
(
malloc_mutex_init
(
&
ctl_mtx
,
"ctl"
,
WITNESS_RANK_CTL
,
malloc_mutex_rank_exclusive
))
{
return
true
;
}
ctl_initialized
=
false
;
return
(
false
)
;
return
false
;
}
void
ctl_prefork
(
void
)
{
malloc_mutex_prefork
(
&
ctl_mtx
);
ctl_prefork
(
tsdn_t
*
tsdn
)
{
malloc_mutex_prefork
(
tsdn
,
&
ctl_mtx
);
}
void
ctl_postfork_parent
(
void
)
{
malloc_mutex_postfork_parent
(
&
ctl_mtx
);
ctl_postfork_parent
(
tsdn_t
*
tsdn
)
{
malloc_mutex_postfork_parent
(
tsdn
,
&
ctl_mtx
);
}
void
ctl_postfork_child
(
void
)
{
malloc_mutex_postfork_child
(
&
ctl_mtx
);
ctl_postfork_child
(
tsdn_t
*
tsdn
)
{
malloc_mutex_postfork_child
(
tsdn
,
&
ctl_mtx
);
}
/******************************************************************************/
...
...
@@ -1071,71 +1339,80 @@ ctl_postfork_child(void)
} \
} while (0)
#define MIB_UNSIGNED(v, i) do { \
if (mib[i] > UINT_MAX) { \
ret = EFAULT; \
goto label_return; \
} \
v = (unsigned)mib[i]; \
} while (0)
/*
* There's a lot of code duplication in the following macros due to limitations
* in how nested cpp macros are expanded.
*/
#define CTL_RO_CLGEN(c, l, n, v, t) \
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
void *newp, size_t newlen) \
{ \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
if (!(c)) \
return (ENOENT); \
if (l) \
malloc_mutex_lock(&ctl_mtx); \
if (!(c)) { \
return ENOENT; \
} \
if (l) { \
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
} \
READONLY(); \
oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
if (l) \
malloc_mutex_unlock(&ctl_mtx); \
return (ret); \
if (l) { \
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
} \
return ret; \
}
#define CTL_RO_CGEN(c, n, v, t) \
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
void *newp, size_t newlen) \
{ \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
if (!(c)) \
return (ENOENT); \
malloc_mutex_lock(&ctl_mtx); \
if (!(c)) { \
return ENOENT; \
} \
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
READONLY(); \
oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
malloc_mutex_unlock(&ctl_mtx);
\
return
(
ret
)
; \
malloc_mutex_unlock(
tsd_tsdn(tsd),
&ctl_mtx); \
return ret; \
}
#define CTL_RO_GEN(n, v, t) \
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
void *newp, size_t newlen) \
{ \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
malloc_mutex_lock(&ctl_mtx);
\
malloc_mutex_lock(
tsd_tsdn(tsd),
&ctl_mtx); \
READONLY(); \
oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
malloc_mutex_unlock(&ctl_mtx);
\
return
(
ret
)
; \
malloc_mutex_unlock(
tsd_tsdn(tsd),
&ctl_mtx); \
return ret; \
}
/*
...
...
@@ -1144,28 +1421,27 @@ label_return: \
*/
#define CTL_RO_NL_CGEN(c, n, v, t) \
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
void *newp, size_t newlen) \
{ \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
if (!(c)) \
return (ENOENT); \
if (!(c)) { \
return ENOENT; \
} \
READONLY(); \
oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
return
(
ret
)
; \
return ret; \
}
#define CTL_RO_NL_GEN(n, v, t) \
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
void *newp, size_t newlen) \
{ \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
...
...
@@ -1175,45 +1451,42 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
\
ret = 0; \
label_return: \
return
(
ret
)
; \
return ret; \
}
#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
void *newp, size_t newlen) \
{ \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
tsd_t *tsd; \
\
if (!(c)) \
return (ENOENT); \
if (!(c)) { \
return ENOENT; \
} \
READONLY(); \
tsd = tsd_fetch(); \
oldval = (m(tsd)); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
return
(
ret
)
; \
return ret; \
}
#define
CTL_RO_
BOOL_
CONFIG_GEN(n
)
\
#define
CTL_RO_CONFIG_GEN(n
, t)
\
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
void *newp, size_t newlen) \
{ \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
bool
oldval; \
t
oldval; \
\
READONLY(); \
oldval = n; \
READ(oldval,
bool
); \
READ(oldval,
t
); \
\
ret = 0; \
label_return: \
return
(
ret
)
; \
return ret; \
}
/******************************************************************************/
...
...
@@ -1221,57 +1494,187 @@ label_return: \
CTL_RO_NL_GEN
(
version
,
JEMALLOC_VERSION
,
const
char
*
)
static
int
epoch_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
epoch_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
UNUSED
uint64_t
newval
;
malloc_mutex_lock
(
&
ctl_mtx
);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
WRITE
(
newval
,
uint64_t
);
if
(
newp
!=
NULL
)
ctl_refresh
();
READ
(
ctl_epoch
,
uint64_t
);
if
(
newp
!=
NULL
)
{
ctl_refresh
(
tsd_tsdn
(
tsd
));
}
READ
(
ctl_arenas
->
epoch
,
uint64_t
);
ret
=
0
;
label_return:
malloc_mutex_unlock
(
&
ctl_mtx
);
return
(
ret
);
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
return
ret
;
}
static
int
background_thread_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
bool
oldval
;
if
(
!
have_background_thread
)
{
return
ENOENT
;
}
background_thread_ctl_init
(
tsd_tsdn
(
tsd
));
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
background_thread_lock
);
if
(
newp
==
NULL
)
{
oldval
=
background_thread_enabled
();
READ
(
oldval
,
bool
);
}
else
{
if
(
newlen
!=
sizeof
(
bool
))
{
ret
=
EINVAL
;
goto
label_return
;
}
oldval
=
background_thread_enabled
();
READ
(
oldval
,
bool
);
bool
newval
=
*
(
bool
*
)
newp
;
if
(
newval
==
oldval
)
{
ret
=
0
;
goto
label_return
;
}
background_thread_enabled_set
(
tsd_tsdn
(
tsd
),
newval
);
if
(
newval
)
{
if
(
!
can_enable_background_thread
)
{
malloc_printf
(
"<jemalloc>: Error in dlsym("
"RTLD_NEXT,
\"
pthread_create
\"
). Cannot "
"enable background_thread
\n
"
);
ret
=
EFAULT
;
goto
label_return
;
}
if
(
background_threads_enable
(
tsd
))
{
ret
=
EFAULT
;
goto
label_return
;
}
}
else
{
if
(
background_threads_disable
(
tsd
))
{
ret
=
EFAULT
;
goto
label_return
;
}
}
}
ret
=
0
;
label_return:
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
background_thread_lock
);
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
return
ret
;
}
static
int
max_background_threads_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
size_t
oldval
;
if
(
!
have_background_thread
)
{
return
ENOENT
;
}
background_thread_ctl_init
(
tsd_tsdn
(
tsd
));
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
background_thread_lock
);
if
(
newp
==
NULL
)
{
oldval
=
max_background_threads
;
READ
(
oldval
,
size_t
);
}
else
{
if
(
newlen
!=
sizeof
(
size_t
))
{
ret
=
EINVAL
;
goto
label_return
;
}
oldval
=
max_background_threads
;
READ
(
oldval
,
size_t
);
size_t
newval
=
*
(
size_t
*
)
newp
;
if
(
newval
==
oldval
)
{
ret
=
0
;
goto
label_return
;
}
if
(
newval
>
opt_max_background_threads
)
{
ret
=
EINVAL
;
goto
label_return
;
}
if
(
background_thread_enabled
())
{
if
(
!
can_enable_background_thread
)
{
malloc_printf
(
"<jemalloc>: Error in dlsym("
"RTLD_NEXT,
\"
pthread_create
\"
). Cannot "
"enable background_thread
\n
"
);
ret
=
EFAULT
;
goto
label_return
;
}
background_thread_enabled_set
(
tsd_tsdn
(
tsd
),
false
);
if
(
background_threads_disable
(
tsd
))
{
ret
=
EFAULT
;
goto
label_return
;
}
max_background_threads
=
newval
;
background_thread_enabled_set
(
tsd_tsdn
(
tsd
),
true
);
if
(
background_threads_enable
(
tsd
))
{
ret
=
EFAULT
;
goto
label_return
;
}
}
else
{
max_background_threads
=
newval
;
}
}
ret
=
0
;
label_return:
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
background_thread_lock
);
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
return
ret
;
}
/******************************************************************************/
CTL_RO_BOOL_CONFIG_GEN
(
config_cache_oblivious
)
CTL_RO_BOOL_CONFIG_GEN
(
config_debug
)
CTL_RO_BOOL_CONFIG_GEN
(
config_fill
)
CTL_RO_BOOL_CONFIG_GEN
(
config_lazy_lock
)
CTL_RO_BOOL_CONFIG_GEN
(
config_munmap
)
CTL_RO_BOOL_CONFIG_GEN
(
config_prof
)
CTL_RO_BOOL_CONFIG_GEN
(
config_prof_libgcc
)
CTL_RO_BOOL_CONFIG_GEN
(
config_prof_libunwind
)
CTL_RO_BOOL_CONFIG_GEN
(
config_stats
)
CTL_RO_BOOL_CONFIG_GEN
(
config_tcache
)
CTL_RO_BOOL_CONFIG_GEN
(
config_tls
)
CTL_RO_BOOL_CONFIG_GEN
(
config_utrace
)
CTL_RO_BOOL_CONFIG_GEN
(
config_valgrind
)
CTL_RO_BOOL_CONFIG_GEN
(
config_xmalloc
)
CTL_RO_CONFIG_GEN
(
config_cache_oblivious
,
bool
)
CTL_RO_CONFIG_GEN
(
config_debug
,
bool
)
CTL_RO_CONFIG_GEN
(
config_fill
,
bool
)
CTL_RO_CONFIG_GEN
(
config_lazy_lock
,
bool
)
CTL_RO_CONFIG_GEN
(
config_malloc_conf
,
const
char
*
)
CTL_RO_CONFIG_GEN
(
config_prof
,
bool
)
CTL_RO_CONFIG_GEN
(
config_prof_libgcc
,
bool
)
CTL_RO_CONFIG_GEN
(
config_prof_libunwind
,
bool
)
CTL_RO_CONFIG_GEN
(
config_stats
,
bool
)
CTL_RO_CONFIG_GEN
(
config_utrace
,
bool
)
CTL_RO_CONFIG_GEN
(
config_xmalloc
,
bool
)
/******************************************************************************/
CTL_RO_NL_GEN
(
opt_abort
,
opt_abort
,
bool
)
CTL_RO_NL_GEN
(
opt_abort_conf
,
opt_abort_conf
,
bool
)
CTL_RO_NL_GEN
(
opt_metadata_thp
,
metadata_thp_mode_names
[
opt_metadata_thp
],
const
char
*
)
CTL_RO_NL_GEN
(
opt_retain
,
opt_retain
,
bool
)
CTL_RO_NL_GEN
(
opt_dss
,
opt_dss
,
const
char
*
)
CTL_RO_NL_GEN
(
opt_lg_chunk
,
opt_lg_chunk
,
size_t
)
CTL_RO_NL_GEN
(
opt_narenas
,
opt_narenas
,
size_t
)
CTL_RO_NL_GEN
(
opt_lg_dirty_mult
,
opt_lg_dirty_mult
,
ssize_t
)
CTL_RO_NL_GEN
(
opt_narenas
,
opt_narenas
,
unsigned
)
CTL_RO_NL_GEN
(
opt_percpu_arena
,
percpu_arena_mode_names
[
opt_percpu_arena
],
const
char
*
)
CTL_RO_NL_GEN
(
opt_background_thread
,
opt_background_thread
,
bool
)
CTL_RO_NL_GEN
(
opt_max_background_threads
,
opt_max_background_threads
,
size_t
)
CTL_RO_NL_GEN
(
opt_dirty_decay_ms
,
opt_dirty_decay_ms
,
ssize_t
)
CTL_RO_NL_GEN
(
opt_muzzy_decay_ms
,
opt_muzzy_decay_ms
,
ssize_t
)
CTL_RO_NL_GEN
(
opt_stats_print
,
opt_stats_print
,
bool
)
CTL_RO_NL_GEN
(
opt_stats_print_opts
,
opt_stats_print_opts
,
const
char
*
)
CTL_RO_NL_CGEN
(
config_fill
,
opt_junk
,
opt_junk
,
const
char
*
)
CTL_RO_NL_CGEN
(
config_fill
,
opt_quarantine
,
opt_quarantine
,
size_t
)
CTL_RO_NL_CGEN
(
config_fill
,
opt_redzone
,
opt_redzone
,
bool
)
CTL_RO_NL_CGEN
(
config_fill
,
opt_zero
,
opt_zero
,
bool
)
CTL_RO_NL_CGEN
(
config_utrace
,
opt_utrace
,
opt_utrace
,
bool
)
CTL_RO_NL_CGEN
(
config_xmalloc
,
opt_xmalloc
,
opt_xmalloc
,
bool
)
CTL_RO_NL_CGEN
(
config_tcache
,
opt_tcache
,
opt_tcache
,
bool
)
CTL_RO_NL_CGEN
(
config_tcache
,
opt_lg_tcache_max
,
opt_lg_tcache_max
,
ssize_t
)
CTL_RO_NL_GEN
(
opt_tcache
,
opt_tcache
,
bool
)
CTL_RO_NL_GEN
(
opt_thp
,
thp_mode_names
[
opt_thp
],
const
char
*
)
CTL_RO_NL_GEN
(
opt_lg_extent_max_active_fit
,
opt_lg_extent_max_active_fit
,
size_t
)
CTL_RO_NL_GEN
(
opt_lg_tcache_max
,
opt_lg_tcache_max
,
ssize_t
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof
,
opt_prof
,
bool
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof_prefix
,
opt_prof_prefix
,
const
char
*
)
CTL_RO_NL_CGEN
(
config_prof
,
opt_prof_active
,
opt_prof_active
,
bool
)
...
...
@@ -1287,53 +1690,59 @@ CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
/******************************************************************************/
static
int
thread_arena_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
thread_arena_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
tsd_t
*
tsd
;
arena_t
*
oldarena
;
unsigned
newind
,
oldind
;
tsd
=
tsd_fetch
();
oldarena
=
arena_choose
(
tsd
,
NULL
);
if
(
oldarena
==
NULL
)
return
(
EAGAIN
);
malloc_mutex_lock
(
&
ctl_mtx
);
newind
=
oldind
=
oldarena
->
ind
;
if
(
oldarena
==
NULL
)
{
return
EAGAIN
;
}
newind
=
oldind
=
arena_ind_get
(
oldarena
);
WRITE
(
newind
,
unsigned
);
READ
(
oldind
,
unsigned
);
if
(
newind
!=
oldind
)
{
arena_t
*
newarena
;
if
(
newind
>=
ctl_stats
.
narenas
)
{
if
(
newind
>=
narenas_total_get
()
)
{
/* New arena index is out of range. */
ret
=
EFAULT
;
goto
label_return
;
}
if
(
have_percpu_arena
&&
PERCPU_ARENA_ENABLED
(
opt_percpu_arena
))
{
if
(
newind
<
percpu_arena_ind_limit
(
opt_percpu_arena
))
{
/*
* If perCPU arena is enabled, thread_arena
* control is not allowed for the auto arena
* range.
*/
ret
=
EPERM
;
goto
label_return
;
}
}
/* Initialize arena if necessary. */
newarena
=
arena_get
(
tsd
,
newind
,
true
,
true
);
newarena
=
arena_get
(
tsd
_tsdn
(
tsd
)
,
newind
,
true
);
if
(
newarena
==
NULL
)
{
ret
=
EAGAIN
;
goto
label_return
;
}
/* Set new arena/tcache associations. */
arena_migrate
(
tsd
,
oldind
,
newind
);
if
(
config_tcache
)
{
tcache_t
*
tcache
=
tsd_tcache_get
(
tsd
);
if
(
tcache
!=
NULL
)
{
tcache_arena_reassociate
(
tcache
,
oldarena
,
newarena
);
}
if
(
tcache_available
(
tsd
))
{
tcache_arena_reassociate
(
tsd_tsdn
(
tsd
),
tsd_tcachep_get
(
tsd
),
newarena
);
}
}
ret
=
0
;
label_return:
malloc_mutex_unlock
(
&
ctl_mtx
);
return
(
ret
);
return
ret
;
}
CTL_TSD_RO_NL_CGEN
(
config_stats
,
thread_allocated
,
tsd_thread_allocated_get
,
...
...
@@ -1346,100 +1755,94 @@ CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp,
tsd_thread_deallocatedp_get
,
uint64_t
*
)
static
int
thread_tcache_enabled_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
thread_tcache_enabled_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
bool
oldval
;
if
(
!
config_tcache
)
return
(
ENOENT
);
oldval
=
tcache_enabled_get
();
oldval
=
tcache_enabled_get
(
tsd
);
if
(
newp
!=
NULL
)
{
if
(
newlen
!=
sizeof
(
bool
))
{
ret
=
EINVAL
;
goto
label_return
;
}
tcache_enabled_set
(
*
(
bool
*
)
newp
);
tcache_enabled_set
(
tsd
,
*
(
bool
*
)
newp
);
}
READ
(
oldval
,
bool
);
ret
=
0
;
label_return:
return
(
ret
)
;
return
ret
;
}
static
int
thread_tcache_flush_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
thread_tcache_flush_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
if
(
!
config_tcache
)
return
(
ENOENT
);
if
(
!
tcache_available
(
tsd
))
{
ret
=
EFAULT
;
goto
label_return
;
}
READONLY
();
WRITEONLY
();
tcache_flush
();
tcache_flush
(
tsd
);
ret
=
0
;
label_return:
return
(
ret
)
;
return
ret
;
}
static
int
thread_prof_name_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
thread_prof_name_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
if
(
!
config_prof
)
return
(
ENOENT
);
if
(
!
config_prof
)
{
return
ENOENT
;
}
READ_XOR_WRITE
();
if
(
newp
!=
NULL
)
{
tsd_t
*
tsd
;
if
(
newlen
!=
sizeof
(
const
char
*
))
{
ret
=
EINVAL
;
goto
label_return
;
}
tsd
=
tsd_fetch
();
if
((
ret
=
prof_thread_name_set
(
tsd
,
*
(
const
char
**
)
newp
))
!=
0
)
0
)
{
goto
label_return
;
}
}
else
{
const
char
*
oldname
=
prof_thread_name_get
();
const
char
*
oldname
=
prof_thread_name_get
(
tsd
);
READ
(
oldname
,
const
char
*
);
}
ret
=
0
;
label_return:
return
(
ret
)
;
return
ret
;
}
static
int
thread_prof_active_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
thread_prof_active_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
bool
oldval
;
if
(
!
config_prof
)
return
(
ENOENT
);
if
(
!
config_prof
)
{
return
ENOENT
;
}
oldval
=
prof_thread_active_get
();
oldval
=
prof_thread_active_get
(
tsd
);
if
(
newp
!=
NULL
)
{
if
(
newlen
!=
sizeof
(
bool
))
{
ret
=
EINVAL
;
goto
label_return
;
}
if
(
prof_thread_active_set
(
*
(
bool
*
)
newp
))
{
if
(
prof_thread_active_set
(
tsd
,
*
(
bool
*
)
newp
))
{
ret
=
EAGAIN
;
goto
label_return
;
}
...
...
@@ -1448,25 +1851,17 @@ thread_prof_active_ctl(const size_t *mib, size_t miblen, void *oldp,
ret
=
0
;
label_return:
return
(
ret
)
;
return
ret
;
}
/******************************************************************************/
static
int
tcache_create_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
tcache_create_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
tsd_t
*
tsd
;
unsigned
tcache_ind
;
if
(
!
config_tcache
)
return
(
ENOENT
);
tsd
=
tsd_fetch
();
malloc_mutex_lock
(
&
ctl_mtx
);
READONLY
();
if
(
tcaches_create
(
tsd
,
&
tcache_ind
))
{
ret
=
EFAULT
;
...
...
@@ -1476,23 +1871,15 @@ tcache_create_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
ret
=
0
;
label_return:
malloc_mutex_unlock
(
&
ctl_mtx
);
return
(
ret
);
return
ret
;
}
static
int
tcache_flush_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
tcache_flush_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
tsd_t
*
tsd
;
unsigned
tcache_ind
;
if
(
!
config_tcache
)
return
(
ENOENT
);
tsd
=
tsd_fetch
();
WRITEONLY
();
tcache_ind
=
UINT_MAX
;
WRITE
(
tcache_ind
,
unsigned
);
...
...
@@ -1504,22 +1891,15 @@ tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
ret
=
0
;
label_return:
return
(
ret
)
;
return
ret
;
}
static
int
tcache_destroy_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
tcache_destroy_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
tsd_t
*
tsd
;
unsigned
tcache_ind
;
if
(
!
config_tcache
)
return
(
ENOENT
);
tsd
=
tsd_fetch
();
WRITEONLY
();
tcache_ind
=
UINT_MAX
;
WRITE
(
tcache_ind
,
unsigned
);
...
...
@@ -1531,71 +1911,239 @@ tcache_destroy_ctl(const size_t *mib, size_t miblen, void *oldp,
ret
=
0
;
label_return:
return
(
ret
)
;
return
ret
;
}
/******************************************************************************/
/* ctl_mutex must be held during execution of this function. */
static
int
arena_i_initialized_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
tsdn_t
*
tsdn
=
tsd_tsdn
(
tsd
);
unsigned
arena_ind
;
bool
initialized
;
READONLY
();
MIB_UNSIGNED
(
arena_ind
,
1
);
malloc_mutex_lock
(
tsdn
,
&
ctl_mtx
);
initialized
=
arenas_i
(
arena_ind
)
->
initialized
;
malloc_mutex_unlock
(
tsdn
,
&
ctl_mtx
);
READ
(
initialized
,
bool
);
ret
=
0
;
label_return:
return
ret
;
}
static
void
arena_purge
(
unsigned
arena_ind
)
{
tsd_t
*
tsd
;
arena_i_decay
(
tsdn_t
*
tsdn
,
unsigned
arena_ind
,
bool
all
)
{
malloc_mutex_lock
(
tsdn
,
&
ctl_mtx
);
{
unsigned
narenas
=
ctl_arenas
->
narenas
;
/*
* Access via index narenas is deprecated, and scheduled for
* removal in 6.0.0.
*/
if
(
arena_ind
==
MALLCTL_ARENAS_ALL
||
arena_ind
==
narenas
)
{
unsigned
i
;
bool
refreshed
;
VARIABLE_ARRAY
(
arena_t
*
,
tarenas
,
ctl_stats
.
narenas
);
VARIABLE_ARRAY
(
arena_t
*
,
tarenas
,
narenas
);
tsd
=
tsd_fetch
();
for
(
i
=
0
,
refreshed
=
false
;
i
<
ctl_stats
.
narenas
;
i
++
)
{
tarenas
[
i
]
=
arena_get
(
tsd
,
i
,
false
,
false
);
if
(
tarenas
[
i
]
==
NULL
&&
!
refreshed
)
{
tarenas
[
i
]
=
arena_get
(
tsd
,
i
,
false
,
true
);
refreshed
=
true
;
}
for
(
i
=
0
;
i
<
narenas
;
i
++
)
{
tarenas
[
i
]
=
arena_get
(
tsdn
,
i
,
false
);
}
if
(
arena_ind
==
ctl_stats
.
narenas
)
{
unsigned
i
;
for
(
i
=
0
;
i
<
ctl_stats
.
narenas
;
i
++
)
{
if
(
tarenas
[
i
]
!=
NULL
)
arena_purge_all
(
tarenas
[
i
]);
/*
* No further need to hold ctl_mtx, since narenas and
* tarenas contain everything needed below.
*/
malloc_mutex_unlock
(
tsdn
,
&
ctl_mtx
);
for
(
i
=
0
;
i
<
narenas
;
i
++
)
{
if
(
tarenas
[
i
]
!=
NULL
)
{
arena_decay
(
tsdn
,
tarenas
[
i
],
false
,
all
);
}
}
}
else
{
assert
(
arena_ind
<
ctl_stats
.
narenas
);
if
(
tarenas
[
arena_ind
]
!=
NULL
)
arena_purge_all
(
tarenas
[
arena_ind
]);
arena_t
*
tarena
;
assert
(
arena_ind
<
narenas
);
tarena
=
arena_get
(
tsdn
,
arena_ind
,
false
);
/* No further need to hold ctl_mtx. */
malloc_mutex_unlock
(
tsdn
,
&
ctl_mtx
);
if
(
tarena
!=
NULL
)
{
arena_decay
(
tsdn
,
tarena
,
false
,
all
);
}
}
}
}
static
int
arena_i_purge_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
arena_i_decay_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
unsigned
arena_ind
;
READONLY
();
WRITEONLY
();
malloc_mutex_lock
(
&
ctl_mtx
);
arena_purge
(
mib
[
1
]);
malloc_mutex_unlock
(
&
ctl_mtx
);
MIB_UNSIGNED
(
arena_ind
,
1
);
arena_i_decay
(
tsd_tsdn
(
tsd
),
arena_ind
,
false
);
ret
=
0
;
label_return:
return
(
ret
)
;
return
ret
;
}
static
int
arena_i_dss_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
arena_i_purge_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
unsigned
arena_ind
;
READONLY
();
WRITEONLY
();
MIB_UNSIGNED
(
arena_ind
,
1
);
arena_i_decay
(
tsd_tsdn
(
tsd
),
arena_ind
,
true
);
ret
=
0
;
label_return:
return
ret
;
}
static
int
arena_i_reset_destroy_helper
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
,
unsigned
*
arena_ind
,
arena_t
**
arena
)
{
int
ret
;
READONLY
();
WRITEONLY
();
MIB_UNSIGNED
(
*
arena_ind
,
1
);
*
arena
=
arena_get
(
tsd_tsdn
(
tsd
),
*
arena_ind
,
false
);
if
(
*
arena
==
NULL
||
arena_is_auto
(
*
arena
))
{
ret
=
EFAULT
;
goto
label_return
;
}
ret
=
0
;
label_return:
return
ret
;
}
static
void
arena_reset_prepare_background_thread
(
tsd_t
*
tsd
,
unsigned
arena_ind
)
{
/* Temporarily disable the background thread during arena reset. */
if
(
have_background_thread
)
{
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
background_thread_lock
);
if
(
background_thread_enabled
())
{
unsigned
ind
=
arena_ind
%
ncpus
;
background_thread_info_t
*
info
=
&
background_thread_info
[
ind
];
assert
(
info
->
state
==
background_thread_started
);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
info
->
mtx
);
info
->
state
=
background_thread_paused
;
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
info
->
mtx
);
}
}
}
static
void
arena_reset_finish_background_thread
(
tsd_t
*
tsd
,
unsigned
arena_ind
)
{
if
(
have_background_thread
)
{
if
(
background_thread_enabled
())
{
unsigned
ind
=
arena_ind
%
ncpus
;
background_thread_info_t
*
info
=
&
background_thread_info
[
ind
];
assert
(
info
->
state
==
background_thread_paused
);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
info
->
mtx
);
info
->
state
=
background_thread_started
;
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
info
->
mtx
);
}
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
background_thread_lock
);
}
}
static
int
arena_i_reset_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
unsigned
arena_ind
;
arena_t
*
arena
;
ret
=
arena_i_reset_destroy_helper
(
tsd
,
mib
,
miblen
,
oldp
,
oldlenp
,
newp
,
newlen
,
&
arena_ind
,
&
arena
);
if
(
ret
!=
0
)
{
return
ret
;
}
arena_reset_prepare_background_thread
(
tsd
,
arena_ind
);
arena_reset
(
tsd
,
arena
);
arena_reset_finish_background_thread
(
tsd
,
arena_ind
);
return
ret
;
}
static
int
arena_i_destroy_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
unsigned
arena_ind
;
arena_t
*
arena
;
ctl_arena_t
*
ctl_darena
,
*
ctl_arena
;
ret
=
arena_i_reset_destroy_helper
(
tsd
,
mib
,
miblen
,
oldp
,
oldlenp
,
newp
,
newlen
,
&
arena_ind
,
&
arena
);
if
(
ret
!=
0
)
{
goto
label_return
;
}
if
(
arena_nthreads_get
(
arena
,
false
)
!=
0
||
arena_nthreads_get
(
arena
,
true
)
!=
0
)
{
ret
=
EFAULT
;
goto
label_return
;
}
arena_reset_prepare_background_thread
(
tsd
,
arena_ind
);
/* Merge stats after resetting and purging arena. */
arena_reset
(
tsd
,
arena
);
arena_decay
(
tsd_tsdn
(
tsd
),
arena
,
false
,
true
);
ctl_darena
=
arenas_i
(
MALLCTL_ARENAS_DESTROYED
);
ctl_darena
->
initialized
=
true
;
ctl_arena_refresh
(
tsd_tsdn
(
tsd
),
arena
,
ctl_darena
,
arena_ind
,
true
);
/* Destroy arena. */
arena_destroy
(
tsd
,
arena
);
ctl_arena
=
arenas_i
(
arena_ind
);
ctl_arena
->
initialized
=
false
;
/* Record arena index for later recycling via arenas.create. */
ql_elm_new
(
ctl_arena
,
destroyed_link
);
ql_tail_insert
(
&
ctl_arenas
->
destroyed
,
ctl_arena
,
destroyed_link
);
arena_reset_finish_background_thread
(
tsd
,
arena_ind
);
assert
(
ret
==
0
);
label_return:
return
ret
;
}
static
int
arena_i_dss_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
const
char
*
dss
=
NULL
;
unsigned
arena_ind
=
mib
[
1
]
;
unsigned
arena_ind
;
dss_prec_t
dss_prec_old
=
dss_prec_limit
;
dss_prec_t
dss_prec
=
dss_prec_limit
;
malloc_mutex_lock
(
&
ctl_mtx
);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
WRITE
(
dss
,
const
char
*
);
MIB_UNSIGNED
(
arena_ind
,
1
);
if
(
dss
!=
NULL
)
{
int
i
;
bool
match
=
false
;
...
...
@@ -1614,21 +2162,26 @@ arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
}
}
if
(
arena_ind
<
ctl_stats
.
narenas
)
{
arena_t
*
arena
=
arena_get
(
tsd_fetch
(),
arena_ind
,
false
,
true
);
if
(
arena
==
NULL
||
(
dss_prec
!=
dss_prec_limit
&&
arena_dss_prec_set
(
arena
,
dss_prec
)))
{
/*
* Access via index narenas is deprecated, and scheduled for removal in
* 6.0.0.
*/
if
(
arena_ind
==
MALLCTL_ARENAS_ALL
||
arena_ind
==
ctl_arenas
->
narenas
)
{
if
(
dss_prec
!=
dss_prec_limit
&&
extent_dss_prec_set
(
dss_prec
))
{
ret
=
EFAULT
;
goto
label_return
;
}
dss_prec_old
=
ar
en
a
_dss_prec_get
(
arena
);
dss_prec_old
=
ext
en
t
_dss_prec_get
();
}
else
{
if
(
dss_prec
!=
dss_prec_limit
&&
chunk_dss_prec_set
(
dss_prec
))
{
arena_t
*
arena
=
arena_get
(
tsd_tsdn
(
tsd
),
arena_ind
,
false
);
if
(
arena
==
NULL
||
(
dss_prec
!=
dss_prec_limit
&&
arena_dss_prec_set
(
arena
,
dss_prec
)))
{
ret
=
EFAULT
;
goto
label_return
;
}
dss_prec_old
=
chunk
_dss_prec_get
();
dss_prec_old
=
arena
_dss_prec_get
(
arena
);
}
dss
=
dss_prec_names
[
dss_prec_old
];
...
...
@@ -1636,26 +2189,27 @@ arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
ret
=
0
;
label_return:
malloc_mutex_unlock
(
&
ctl_mtx
);
return
(
ret
)
;
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
return
ret
;
}
static
int
arena_i_lg_dirty_mult_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
arena_i_decay_ms_ctl_impl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
,
bool
dirty
)
{
int
ret
;
unsigned
arena_ind
=
mib
[
1
]
;
unsigned
arena_ind
;
arena_t
*
arena
;
arena
=
arena_get
(
tsd_fetch
(),
arena_ind
,
false
,
true
);
MIB_UNSIGNED
(
arena_ind
,
1
);
arena
=
arena_get
(
tsd_tsdn
(
tsd
),
arena_ind
,
false
);
if
(
arena
==
NULL
)
{
ret
=
EFAULT
;
goto
label_return
;
}
if
(
oldp
!=
NULL
&&
oldlenp
!=
NULL
)
{
size_t
oldval
=
arena_lg_dirty_mult_get
(
arena
);
size_t
oldval
=
dirty
?
arena_dirty_decay_ms_get
(
arena
)
:
arena_muzzy_decay_ms_get
(
arena
);
READ
(
oldval
,
ssize_t
);
}
if
(
newp
!=
NULL
)
{
...
...
@@ -1663,7 +2217,9 @@ arena_i_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp,
ret
=
EINVAL
;
goto
label_return
;
}
if
(
arena_lg_dirty_mult_set
(
arena
,
*
(
ssize_t
*
)
newp
))
{
if
(
dirty
?
arena_dirty_decay_ms_set
(
tsd_tsdn
(
tsd
),
arena
,
*
(
ssize_t
*
)
newp
)
:
arena_muzzy_decay_ms_set
(
tsd_tsdn
(
tsd
),
arena
,
*
(
ssize_t
*
)
newp
))
{
ret
=
EFAULT
;
goto
label_return
;
}
...
...
@@ -1671,29 +2227,67 @@ arena_i_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp,
ret
=
0
;
label_return:
return
(
ret
);
return
ret
;
}
static
int
arena_i_dirty_decay_ms_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
return
arena_i_decay_ms_ctl_impl
(
tsd
,
mib
,
miblen
,
oldp
,
oldlenp
,
newp
,
newlen
,
true
);
}
static
int
arena_i_muzzy_decay_ms_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
return
arena_i_decay_ms_ctl_impl
(
tsd
,
mib
,
miblen
,
oldp
,
oldlenp
,
newp
,
newlen
,
false
);
}
static
int
arena_i_chunk_hooks_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
arena_i_extent_hooks_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
unsigned
arena_ind
=
mib
[
1
]
;
unsigned
arena_ind
;
arena_t
*
arena
;
malloc_mutex_lock
(
&
ctl_mtx
);
if
(
arena_ind
<
narenas_total_get
()
&&
(
arena
=
arena_get
(
tsd_fetch
(),
arena_ind
,
false
,
true
))
!=
NULL
)
{
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
MIB_UNSIGNED
(
arena_ind
,
1
);
if
(
arena_ind
<
narenas_total_get
())
{
extent_hooks_t
*
old_extent_hooks
;
arena
=
arena_get
(
tsd_tsdn
(
tsd
),
arena_ind
,
false
);
if
(
arena
==
NULL
)
{
if
(
arena_ind
>=
narenas_auto
)
{
ret
=
EFAULT
;
goto
label_return
;
}
old_extent_hooks
=
(
extent_hooks_t
*
)
&
extent_hooks_default
;
READ
(
old_extent_hooks
,
extent_hooks_t
*
);
if
(
newp
!=
NULL
)
{
chunk_hooks_t
old_chunk_hooks
,
new_chunk_hooks
;
WRITE
(
new_chunk_hooks
,
chunk_hooks_t
);
old_chunk_hooks
=
chunk_hooks_set
(
arena
,
&
new_chunk_hooks
);
READ
(
old_chunk_hooks
,
chunk_hooks_t
);
/* Initialize a new arena as a side effect. */
extent_hooks_t
*
new_extent_hooks
JEMALLOC_CC_SILENCE_INIT
(
NULL
);
WRITE
(
new_extent_hooks
,
extent_hooks_t
*
);
arena
=
arena_init
(
tsd_tsdn
(
tsd
),
arena_ind
,
new_extent_hooks
);
if
(
arena
==
NULL
)
{
ret
=
EFAULT
;
goto
label_return
;
}
}
}
else
{
if
(
newp
!=
NULL
)
{
extent_hooks_t
*
new_extent_hooks
JEMALLOC_CC_SILENCE_INIT
(
NULL
);
WRITE
(
new_extent_hooks
,
extent_hooks_t
*
);
old_extent_hooks
=
extent_hooks_set
(
tsd
,
arena
,
new_extent_hooks
);
READ
(
old_extent_hooks
,
extent_hooks_t
*
);
}
else
{
chunk_hooks_t
old_chunk_hooks
=
chunk_hooks_get
(
arena
);
READ
(
old_chunk_hooks
,
chunk_hooks_t
);
old_extent_hooks
=
extent_hooks_get
(
arena
);
READ
(
old_extent_hooks
,
extent_hooks_t
*
);
}
}
}
else
{
ret
=
EFAULT
;
...
...
@@ -1701,85 +2295,100 @@ arena_i_chunk_hooks_ctl(const size_t *mib, size_t miblen, void *oldp,
}
ret
=
0
;
label_return:
malloc_mutex_unlock
(
&
ctl_mtx
);
return
(
ret
);
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
return
ret
;
}
static
int
arena_i_retain_grow_limit_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
unsigned
arena_ind
;
arena_t
*
arena
;
if
(
!
opt_retain
)
{
/* Only relevant when retain is enabled. */
return
ENOENT
;
}
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
MIB_UNSIGNED
(
arena_ind
,
1
);
if
(
arena_ind
<
narenas_total_get
()
&&
(
arena
=
arena_get
(
tsd_tsdn
(
tsd
),
arena_ind
,
false
))
!=
NULL
)
{
size_t
old_limit
,
new_limit
;
if
(
newp
!=
NULL
)
{
WRITE
(
new_limit
,
size_t
);
}
bool
err
=
arena_retain_grow_limit_get_set
(
tsd
,
arena
,
&
old_limit
,
newp
!=
NULL
?
&
new_limit
:
NULL
);
if
(
!
err
)
{
READ
(
old_limit
,
size_t
);
ret
=
0
;
}
else
{
ret
=
EFAULT
;
}
}
else
{
ret
=
EFAULT
;
}
label_return:
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
return
ret
;
}
static
const
ctl_named_node_t
*
arena_i_index
(
const
size_t
*
mib
,
size_t
miblen
,
size_t
i
)
{
const
ctl_named_node_t
*
ret
;
arena_i_index
(
tsdn_t
*
tsdn
,
const
size_t
*
mib
,
size_t
miblen
,
size_t
i
)
{
const
ctl_named_node_t
*
ret
;
malloc_mutex_lock
(
&
ctl_mtx
);
if
(
i
>
ctl_stats
.
narenas
)
{
malloc_mutex_lock
(
tsdn
,
&
ctl_mtx
);
switch
(
i
)
{
case
MALLCTL_ARENAS_ALL
:
case
MALLCTL_ARENAS_DESTROYED
:
break
;
default:
if
(
i
>
ctl_arenas
->
narenas
)
{
ret
=
NULL
;
goto
label_return
;
}
break
;
}
ret
=
super_arena_i_node
;
label_return:
malloc_mutex_unlock
(
&
ctl_mtx
);
return
(
ret
)
;
malloc_mutex_unlock
(
tsdn
,
&
ctl_mtx
);
return
ret
;
}
/******************************************************************************/
static
int
arenas_narenas_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
arenas_narenas_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
unsigned
narenas
;
malloc_mutex_lock
(
&
ctl_mtx
);
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
READONLY
();
if
(
*
oldlenp
!=
sizeof
(
unsigned
))
{
ret
=
EINVAL
;
goto
label_return
;
}
narenas
=
ctl_
stats
.
narenas
;
narenas
=
ctl_
arenas
->
narenas
;
READ
(
narenas
,
unsigned
);
ret
=
0
;
label_return:
malloc_mutex_unlock
(
&
ctl_mtx
);
return
(
ret
)
;
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
return
ret
;
}
static
int
arenas_initialized_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
unsigned
nread
,
i
;
malloc_mutex_lock
(
&
ctl_mtx
);
READONLY
();
if
(
*
oldlenp
!=
ctl_stats
.
narenas
*
sizeof
(
bool
))
{
ret
=
EINVAL
;
nread
=
(
*
oldlenp
<
ctl_stats
.
narenas
*
sizeof
(
bool
))
?
(
*
oldlenp
/
sizeof
(
bool
))
:
ctl_stats
.
narenas
;
}
else
{
ret
=
0
;
nread
=
ctl_stats
.
narenas
;
}
for
(
i
=
0
;
i
<
nread
;
i
++
)
((
bool
*
)
oldp
)[
i
]
=
ctl_stats
.
arenas
[
i
].
initialized
;
label_return:
malloc_mutex_unlock
(
&
ctl_mtx
);
return
(
ret
);
}
static
int
arenas_lg_dirty_mult_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
arenas_decay_ms_ctl_impl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
,
bool
dirty
)
{
int
ret
;
if
(
oldp
!=
NULL
&&
oldlenp
!=
NULL
)
{
size_t
oldval
=
arena_lg_dirty_mult_default_get
();
size_t
oldval
=
(
dirty
?
arena_dirty_decay_ms_default_get
()
:
arena_muzzy_decay_ms_default_get
());
READ
(
oldval
,
ssize_t
);
}
if
(
newp
!=
NULL
)
{
...
...
@@ -1787,7 +2396,8 @@ arenas_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp,
ret
=
EINVAL
;
goto
label_return
;
}
if
(
arena_lg_dirty_mult_default_set
(
*
(
ssize_t
*
)
newp
))
{
if
(
dirty
?
arena_dirty_decay_ms_default_set
(
*
(
ssize_t
*
)
newp
)
:
arena_muzzy_decay_ms_default_set
(
*
(
ssize_t
*
)
newp
))
{
ret
=
EFAULT
;
goto
label_return
;
}
...
...
@@ -1795,193 +2405,229 @@ arenas_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp,
ret
=
0
;
label_return:
return
(
ret
);
return
ret
;
}
static
int
arenas_dirty_decay_ms_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
return
arenas_decay_ms_ctl_impl
(
tsd
,
mib
,
miblen
,
oldp
,
oldlenp
,
newp
,
newlen
,
true
);
}
static
int
arenas_muzzy_decay_ms_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
return
arenas_decay_ms_ctl_impl
(
tsd
,
mib
,
miblen
,
oldp
,
oldlenp
,
newp
,
newlen
,
false
);
}
CTL_RO_NL_GEN
(
arenas_quantum
,
QUANTUM
,
size_t
)
CTL_RO_NL_GEN
(
arenas_page
,
PAGE
,
size_t
)
CTL_RO_NL_
C
GEN
(
config_tcache
,
arenas_tcache_max
,
tcache_maxclass
,
size_t
)
CTL_RO_NL_GEN
(
arenas_tcache_max
,
tcache_maxclass
,
size_t
)
CTL_RO_NL_GEN
(
arenas_nbins
,
NBINS
,
unsigned
)
CTL_RO_NL_
C
GEN
(
config_tcache
,
arenas_nhbins
,
nhbins
,
unsigned
)
CTL_RO_NL_GEN
(
arenas_bin_i_size
,
arena_
bin_info
[
mib
[
2
]].
reg_size
,
size_t
)
CTL_RO_NL_GEN
(
arenas_bin_i_nregs
,
arena_
bin_info
[
mib
[
2
]].
nregs
,
uint32_t
)
CTL_RO_NL_GEN
(
arenas_bin_i_
run
_size
,
arena_
bin_info
[
mib
[
2
]].
run
_size
,
size_t
)
CTL_RO_NL_GEN
(
arenas_nhbins
,
nhbins
,
unsigned
)
CTL_RO_NL_GEN
(
arenas_bin_i_size
,
bin_info
s
[
mib
[
2
]].
reg_size
,
size_t
)
CTL_RO_NL_GEN
(
arenas_bin_i_nregs
,
bin_info
s
[
mib
[
2
]].
nregs
,
uint32_t
)
CTL_RO_NL_GEN
(
arenas_bin_i_
slab
_size
,
bin_info
s
[
mib
[
2
]].
slab
_size
,
size_t
)
static
const
ctl_named_node_t
*
arenas_bin_i_index
(
const
size_t
*
mib
,
size_t
miblen
,
size_t
i
)
{
if
(
i
>
NBINS
)
return
(
NULL
);
return
(
super_arenas_bin_i_node
);
arenas_bin_i_index
(
tsdn_t
*
tsdn
,
const
size_t
*
mib
,
size_t
miblen
,
size_t
i
)
{
if
(
i
>
NBINS
)
{
return
NULL
;
}
return
super_arenas_bin_i_node
;
}
CTL_RO_NL_GEN
(
arenas_nlruns
,
nlclasses
,
unsigned
)
CTL_RO_NL_GEN
(
arenas_lrun_i_size
,
index2size
(
NBINS
+
mib
[
2
]),
size_t
)
CTL_RO_NL_GEN
(
arenas_nlextents
,
NSIZES
-
NBINS
,
unsigned
)
CTL_RO_NL_GEN
(
arenas_lextent_i_size
,
sz_index2size
(
NBINS
+
(
szind_t
)
mib
[
2
]),
size_t
)
static
const
ctl_named_node_t
*
arenas_l
run
_i_index
(
const
size_t
*
mib
,
size_t
miblen
,
size_t
i
)
{
if
(
i
>
nlclasses
)
return
(
NULL
);
return
(
super_arenas_l
run
_i_node
)
;
arenas_l
extent
_i_index
(
tsdn_t
*
tsdn
,
const
size_t
*
mib
,
size_t
miblen
,
size_t
i
)
{
if
(
i
>
NSIZES
-
NBINS
)
{
return
NULL
;
}
return
super_arenas_l
extent
_i_node
;
}
CTL_RO_NL_GEN
(
arenas_nhchunks
,
nhclasses
,
unsigned
)
CTL_RO_NL_GEN
(
arenas_hchunk_i_size
,
index2size
(
NBINS
+
nlclasses
+
mib
[
2
]),
size_t
)
static
const
ctl_named_node_t
*
arenas_hchunk_i_index
(
const
size_t
*
mib
,
size_t
miblen
,
size_t
i
)
{
static
int
arenas_create_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
extent_hooks_t
*
extent_hooks
;
unsigned
arena_ind
;
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
if
(
i
>
nhclasses
)
return
(
NULL
);
return
(
super_arenas_hchunk_i_node
);
extent_hooks
=
(
extent_hooks_t
*
)
&
extent_hooks_default
;
WRITE
(
extent_hooks
,
extent_hooks_t
*
);
if
((
arena_ind
=
ctl_arena_init
(
tsd
,
extent_hooks
))
==
UINT_MAX
)
{
ret
=
EAGAIN
;
goto
label_return
;
}
READ
(
arena_ind
,
unsigned
);
ret
=
0
;
label_return:
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
return
ret
;
}
static
int
arenas_extend_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
arenas_lookup_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
unsigned
narenas
;
unsigned
arena_ind
;
void
*
ptr
;
extent_t
*
extent
;
arena_t
*
arena
;
malloc_mutex_lock
(
&
ctl_mtx
);
READONLY
();
if
(
ctl_grow
())
{
ret
=
EAGAIN
;
ptr
=
NULL
;
ret
=
EINVAL
;
malloc_mutex_lock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
WRITE
(
ptr
,
void
*
);
extent
=
iealloc
(
tsd_tsdn
(
tsd
),
ptr
);
if
(
extent
==
NULL
)
goto
label_return
;
}
narenas
=
ctl_stats
.
narenas
-
1
;
READ
(
narenas
,
unsigned
);
arena
=
extent_arena_get
(
extent
);
if
(
arena
==
NULL
)
goto
label_return
;
arena_ind
=
arena_ind_get
(
arena
);
READ
(
arena_ind
,
unsigned
);
ret
=
0
;
label_return:
malloc_mutex_unlock
(
&
ctl_mtx
);
return
(
ret
)
;
malloc_mutex_unlock
(
tsd_tsdn
(
tsd
),
&
ctl_mtx
);
return
ret
;
}
/******************************************************************************/
static
int
prof_thread_active_init_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
prof_thread_active_init_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
bool
oldval
;
if
(
!
config_prof
)
return
(
ENOENT
);
if
(
!
config_prof
)
{
return
ENOENT
;
}
if
(
newp
!=
NULL
)
{
if
(
newlen
!=
sizeof
(
bool
))
{
ret
=
EINVAL
;
goto
label_return
;
}
oldval
=
prof_thread_active_init_set
(
*
(
bool
*
)
newp
);
}
else
oldval
=
prof_thread_active_init_get
();
oldval
=
prof_thread_active_init_set
(
tsd_tsdn
(
tsd
),
*
(
bool
*
)
newp
);
}
else
{
oldval
=
prof_thread_active_init_get
(
tsd_tsdn
(
tsd
));
}
READ
(
oldval
,
bool
);
ret
=
0
;
label_return:
return
(
ret
)
;
return
ret
;
}
static
int
prof_active_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
prof_active_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
bool
oldval
;
if
(
!
config_prof
)
return
(
ENOENT
);
if
(
!
config_prof
)
{
return
ENOENT
;
}
if
(
newp
!=
NULL
)
{
if
(
newlen
!=
sizeof
(
bool
))
{
ret
=
EINVAL
;
goto
label_return
;
}
oldval
=
prof_active_set
(
*
(
bool
*
)
newp
);
}
else
oldval
=
prof_active_get
();
oldval
=
prof_active_set
(
tsd_tsdn
(
tsd
),
*
(
bool
*
)
newp
);
}
else
{
oldval
=
prof_active_get
(
tsd_tsdn
(
tsd
));
}
READ
(
oldval
,
bool
);
ret
=
0
;
label_return:
return
(
ret
)
;
return
ret
;
}
static
int
prof_dump_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
prof_dump_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
const
char
*
filename
=
NULL
;
if
(
!
config_prof
)
return
(
ENOENT
);
if
(
!
config_prof
)
{
return
ENOENT
;
}
WRITEONLY
();
WRITE
(
filename
,
const
char
*
);
if
(
prof_mdump
(
filename
))
{
if
(
prof_mdump
(
tsd
,
filename
))
{
ret
=
EFAULT
;
goto
label_return
;
}
ret
=
0
;
label_return:
return
(
ret
)
;
return
ret
;
}
static
int
prof_gdump_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
prof_gdump_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
bool
oldval
;
if
(
!
config_prof
)
return
(
ENOENT
);
if
(
!
config_prof
)
{
return
ENOENT
;
}
if
(
newp
!=
NULL
)
{
if
(
newlen
!=
sizeof
(
bool
))
{
ret
=
EINVAL
;
goto
label_return
;
}
oldval
=
prof_gdump_set
(
*
(
bool
*
)
newp
);
}
else
oldval
=
prof_gdump_get
();
oldval
=
prof_gdump_set
(
tsd_tsdn
(
tsd
),
*
(
bool
*
)
newp
);
}
else
{
oldval
=
prof_gdump_get
(
tsd_tsdn
(
tsd
));
}
READ
(
oldval
,
bool
);
ret
=
0
;
label_return:
return
(
ret
)
;
return
ret
;
}
static
int
prof_reset_ctl
(
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
prof_reset_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
int
ret
;
size_t
lg_sample
=
lg_prof_sample
;
tsd_t
*
tsd
;
if
(
!
config_prof
)
return
(
ENOENT
);
if
(
!
config_prof
)
{
return
ENOENT
;
}
WRITEONLY
();
WRITE
(
lg_sample
,
size_t
);
if
(
lg_sample
>=
(
sizeof
(
uint64_t
)
<<
3
))
if
(
lg_sample
>=
(
sizeof
(
uint64_t
)
<<
3
))
{
lg_sample
=
(
sizeof
(
uint64_t
)
<<
3
)
-
1
;
tsd
=
tsd_fetch
();
}
prof_reset
(
tsd
,
lg_sample
);
ret
=
0
;
label_return:
return
(
ret
)
;
return
ret
;
}
CTL_RO_NL_CGEN
(
config_prof
,
prof_interval
,
prof_interval
,
uint64_t
)
...
...
@@ -1989,135 +2635,249 @@ CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t)
/******************************************************************************/
CTL_RO_CGEN
(
config_stats
,
stats_cactive
,
&
stats_cactive
,
size_t
*
)
CTL_RO_CGEN
(
config_stats
,
stats_allocated
,
ctl_stats
.
allocated
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_active
,
ctl_stats
.
active
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_metadata
,
ctl_stats
.
metadata
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_resident
,
ctl_stats
.
resident
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_mapped
,
ctl_stats
.
mapped
,
size_t
)
CTL_RO_GEN
(
stats_arenas_i_dss
,
ctl_stats
.
arenas
[
mib
[
2
]].
dss
,
const
char
*
)
CTL_RO_GEN
(
stats_arenas_i_lg_dirty_mult
,
ctl_stats
.
arenas
[
mib
[
2
]].
lg_dirty_mult
,
CTL_RO_CGEN
(
config_stats
,
stats_allocated
,
ctl_stats
->
allocated
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_active
,
ctl_stats
->
active
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_metadata
,
ctl_stats
->
metadata
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_metadata_thp
,
ctl_stats
->
metadata_thp
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_resident
,
ctl_stats
->
resident
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_mapped
,
ctl_stats
->
mapped
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_retained
,
ctl_stats
->
retained
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_background_thread_num_threads
,
ctl_stats
->
background_thread
.
num_threads
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_background_thread_num_runs
,
ctl_stats
->
background_thread
.
num_runs
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_background_thread_run_interval
,
nstime_ns
(
&
ctl_stats
->
background_thread
.
run_interval
),
uint64_t
)
CTL_RO_GEN
(
stats_arenas_i_dss
,
arenas_i
(
mib
[
2
])
->
dss
,
const
char
*
)
CTL_RO_GEN
(
stats_arenas_i_dirty_decay_ms
,
arenas_i
(
mib
[
2
])
->
dirty_decay_ms
,
ssize_t
)
CTL_RO_GEN
(
stats_arenas_i_nthreads
,
ctl_stats
.
arenas
[
mib
[
2
]].
nthreads
,
unsigned
)
CTL_RO_GEN
(
stats_arenas_i_pactive
,
ctl_stats
.
arenas
[
mib
[
2
]].
pactive
,
size_t
)
CTL_RO_GEN
(
stats_arenas_i_pdirty
,
ctl_stats
.
arenas
[
mib
[
2
]].
pdirty
,
size_t
)
CTL_RO_GEN
(
stats_arenas_i_muzzy_decay_ms
,
arenas_i
(
mib
[
2
])
->
muzzy_decay_ms
,
ssize_t
)
CTL_RO_GEN
(
stats_arenas_i_nthreads
,
arenas_i
(
mib
[
2
])
->
nthreads
,
unsigned
)
CTL_RO_GEN
(
stats_arenas_i_uptime
,
nstime_ns
(
&
arenas_i
(
mib
[
2
])
->
astats
->
astats
.
uptime
),
uint64_t
)
CTL_RO_GEN
(
stats_arenas_i_pactive
,
arenas_i
(
mib
[
2
])
->
pactive
,
size_t
)
CTL_RO_GEN
(
stats_arenas_i_pdirty
,
arenas_i
(
mib
[
2
])
->
pdirty
,
size_t
)
CTL_RO_GEN
(
stats_arenas_i_pmuzzy
,
arenas_i
(
mib
[
2
])
->
pmuzzy
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_mapped
,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
mapped
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_npurge
,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
npurge
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_nmadvise
,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
nmadvise
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_purged
,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
purged
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_metadata_mapped
,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
metadata_mapped
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_metadata_allocated
,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
metadata_allocated
,
size_t
)
atomic_load_zu
(
&
arenas_i
(
mib
[
2
])
->
astats
->
astats
.
mapped
,
ATOMIC_RELAXED
),
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_retained
,
atomic_load_zu
(
&
arenas_i
(
mib
[
2
])
->
astats
->
astats
.
retained
,
ATOMIC_RELAXED
),
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_dirty_npurge
,
ctl_arena_stats_read_u64
(
&
arenas_i
(
mib
[
2
])
->
astats
->
astats
.
decay_dirty
.
npurge
),
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_dirty_nmadvise
,
ctl_arena_stats_read_u64
(
&
arenas_i
(
mib
[
2
])
->
astats
->
astats
.
decay_dirty
.
nmadvise
),
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_dirty_purged
,
ctl_arena_stats_read_u64
(
&
arenas_i
(
mib
[
2
])
->
astats
->
astats
.
decay_dirty
.
purged
),
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_muzzy_npurge
,
ctl_arena_stats_read_u64
(
&
arenas_i
(
mib
[
2
])
->
astats
->
astats
.
decay_muzzy
.
npurge
),
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_muzzy_nmadvise
,
ctl_arena_stats_read_u64
(
&
arenas_i
(
mib
[
2
])
->
astats
->
astats
.
decay_muzzy
.
nmadvise
),
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_muzzy_purged
,
ctl_arena_stats_read_u64
(
&
arenas_i
(
mib
[
2
])
->
astats
->
astats
.
decay_muzzy
.
purged
),
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_base
,
atomic_load_zu
(
&
arenas_i
(
mib
[
2
])
->
astats
->
astats
.
base
,
ATOMIC_RELAXED
),
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_internal
,
atomic_load_zu
(
&
arenas_i
(
mib
[
2
])
->
astats
->
astats
.
internal
,
ATOMIC_RELAXED
),
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_metadata_thp
,
atomic_load_zu
(
&
arenas_i
(
mib
[
2
])
->
astats
->
astats
.
metadata_thp
,
ATOMIC_RELAXED
),
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_tcache_bytes
,
atomic_load_zu
(
&
arenas_i
(
mib
[
2
])
->
astats
->
astats
.
tcache_bytes
,
ATOMIC_RELAXED
),
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_resident
,
atomic_load_zu
(
&
arenas_i
(
mib
[
2
])
->
astats
->
astats
.
resident
,
ATOMIC_RELAXED
),
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_small_allocated
,
ctl_stats
.
arenas
[
mib
[
2
]
].
allocated_small
,
size_t
)
arenas
_i
(
mib
[
2
]
)
->
astats
->
allocated_small
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_small_nmalloc
,
ctl_stats
.
arenas
[
mib
[
2
]
].
nmalloc_small
,
uint64_t
)
arenas
_i
(
mib
[
2
]
)
->
astats
->
nmalloc_small
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_small_ndalloc
,
ctl_stats
.
arenas
[
mib
[
2
]
].
ndalloc_small
,
uint64_t
)
arenas
_i
(
mib
[
2
]
)
->
astats
->
ndalloc_small
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_small_nrequests
,
ctl_stats
.
arenas
[
mib
[
2
]
].
nrequests_small
,
uint64_t
)
arenas
_i
(
mib
[
2
]
)
->
astats
->
nrequests_small
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_large_allocated
,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
allocated_large
,
size_t
)
atomic_load_zu
(
&
arenas_i
(
mib
[
2
])
->
astats
->
astats
.
allocated_large
,
ATOMIC_RELAXED
),
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_large_nmalloc
,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
nmalloc_large
,
uint64_t
)
ctl_arena_stats_read_u64
(
&
arenas_i
(
mib
[
2
])
->
astats
->
astats
.
nmalloc_large
),
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_large_ndalloc
,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
ndalloc_large
,
uint64_t
)
ctl_arena_stats_read_u64
(
&
arenas_i
(
mib
[
2
])
->
astats
->
astats
.
ndalloc_large
),
uint64_t
)
/*
* Note: "nmalloc" here instead of "nrequests" in the read. This is intentional.
*/
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_large_nrequests
,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
nrequests_large
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_huge_allocated
,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
allocated_huge
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_huge_nmalloc
,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
nmalloc_huge
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_huge_ndalloc
,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
ndalloc_huge
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_huge_nrequests
,
ctl_stats
.
arenas
[
mib
[
2
]].
astats
.
nmalloc_huge
,
uint64_t
)
/* Intentional. */
ctl_arena_stats_read_u64
(
&
arenas_i
(
mib
[
2
])
->
astats
->
astats
.
nmalloc_large
),
uint64_t
)
/* Intentional. */
/* Lock profiling related APIs below. */
#define RO_MUTEX_CTL_GEN(n, l) \
CTL_RO_CGEN(config_stats, stats_##n##_num_ops, \
l.n_lock_ops, uint64_t) \
CTL_RO_CGEN(config_stats, stats_##n##_num_wait, \
l.n_wait_times, uint64_t) \
CTL_RO_CGEN(config_stats, stats_##n##_num_spin_acq, \
l.n_spin_acquired, uint64_t) \
CTL_RO_CGEN(config_stats, stats_##n##_num_owner_switch, \
l.n_owner_switches, uint64_t) \
CTL_RO_CGEN(config_stats, stats_##n##_total_wait_time, \
nstime_ns(&l.tot_wait_time), uint64_t) \
CTL_RO_CGEN(config_stats, stats_##n##_max_wait_time, \
nstime_ns(&l.max_wait_time), uint64_t) \
CTL_RO_CGEN(config_stats, stats_##n##_max_num_thds, \
l.max_n_thds, uint32_t)
/* Global mutexes. */
#define OP(mtx) \
RO_MUTEX_CTL_GEN(mutexes_##mtx, \
ctl_stats->mutex_prof_data[global_prof_mutex_##mtx])
MUTEX_PROF_GLOBAL_MUTEXES
#undef OP
/* Per arena mutexes */
#define OP(mtx) RO_MUTEX_CTL_GEN(arenas_i_mutexes_##mtx, \
arenas_i(mib[2])->astats->astats.mutex_prof_data[arena_prof_mutex_##mtx])
MUTEX_PROF_ARENA_MUTEXES
#undef OP
/* tcache bin mutex */
RO_MUTEX_CTL_GEN
(
arenas_i_bins_j_mutex
,
arenas_i
(
mib
[
2
])
->
astats
->
bstats
[
mib
[
4
]].
mutex_data
)
#undef RO_MUTEX_CTL_GEN
/* Resets all mutex stats, including global, arena and bin mutexes. */
static
int
stats_mutexes_reset_ctl
(
tsd_t
*
tsd
,
const
size_t
*
mib
,
size_t
miblen
,
void
*
oldp
,
size_t
*
oldlenp
,
void
*
newp
,
size_t
newlen
)
{
if
(
!
config_stats
)
{
return
ENOENT
;
}
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_bins_j_nmalloc
,
ctl_stats
.
arenas
[
mib
[
2
]].
bstats
[
mib
[
4
]].
nmalloc
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_bins_j_ndalloc
,
ctl_stats
.
arenas
[
mib
[
2
]].
bstats
[
mib
[
4
]].
ndalloc
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_bins_j_nrequests
,
ctl_stats
.
arenas
[
mib
[
2
]].
bstats
[
mib
[
4
]].
nrequests
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_bins_j_curregs
,
ctl_stats
.
arenas
[
mib
[
2
]].
bstats
[
mib
[
4
]].
curregs
,
size_t
)
CTL_RO_CGEN
(
config_stats
&&
config_tcache
,
stats_arenas_i_bins_j_nfills
,
ctl_stats
.
arenas
[
mib
[
2
]].
bstats
[
mib
[
4
]].
nfills
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
&&
config_tcache
,
stats_arenas_i_bins_j_nflushes
,
ctl_stats
.
arenas
[
mib
[
2
]].
bstats
[
mib
[
4
]].
nflushes
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_bins_j_nruns
,
ctl_stats
.
arenas
[
mib
[
2
]].
bstats
[
mib
[
4
]].
nruns
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_bins_j_nreruns
,
ctl_stats
.
arenas
[
mib
[
2
]].
bstats
[
mib
[
4
]].
reruns
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_bins_j_curruns
,
ctl_stats
.
arenas
[
mib
[
2
]].
bstats
[
mib
[
4
]].
curruns
,
size_t
)
tsdn_t
*
tsdn
=
tsd_tsdn
(
tsd
);
static
const
ctl_named_node_t
*
stats_arenas_i_bins_j_index
(
const
size_t
*
mib
,
size_t
miblen
,
size_t
j
)
{
#define MUTEX_PROF_RESET(mtx) \
malloc_mutex_lock(tsdn, &mtx); \
malloc_mutex_prof_data_reset(tsdn, &mtx); \
malloc_mutex_unlock(tsdn, &mtx);
if
(
j
>
NBINS
)
return
(
NULL
);
return
(
super_stats_arenas_i_bins_j_node
);
}
/* Global mutexes: ctl and prof. */
MUTEX_PROF_RESET
(
ctl_mtx
);
if
(
have_background_thread
)
{
MUTEX_PROF_RESET
(
background_thread_lock
);
}
if
(
config_prof
&&
opt_prof
)
{
MUTEX_PROF_RESET
(
bt2gctx_mtx
);
}
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_lruns_j_nmalloc
,
ctl_stats
.
arenas
[
mib
[
2
]].
lstats
[
mib
[
4
]].
nmalloc
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_lruns_j_ndalloc
,
ctl_stats
.
arenas
[
mib
[
2
]].
lstats
[
mib
[
4
]].
ndalloc
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_lruns_j_nrequests
,
ctl_stats
.
arenas
[
mib
[
2
]].
lstats
[
mib
[
4
]].
nrequests
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_lruns_j_curruns
,
ctl_stats
.
arenas
[
mib
[
2
]].
lstats
[
mib
[
4
]].
curruns
,
size_t
)
static
const
ctl_named_node_t
*
stats_arenas_i_lruns_j_index
(
const
size_t
*
mib
,
size_t
miblen
,
size_t
j
)
{
/* Per arena mutexes. */
unsigned
n
=
narenas_total_get
();
if
(
j
>
nlclasses
)
return
(
NULL
);
return
(
super_stats_arenas_i_lruns_j_node
);
for
(
unsigned
i
=
0
;
i
<
n
;
i
++
)
{
arena_t
*
arena
=
arena_get
(
tsdn
,
i
,
false
);
if
(
!
arena
)
{
continue
;
}
MUTEX_PROF_RESET
(
arena
->
large_mtx
);
MUTEX_PROF_RESET
(
arena
->
extent_avail_mtx
);
MUTEX_PROF_RESET
(
arena
->
extents_dirty
.
mtx
);
MUTEX_PROF_RESET
(
arena
->
extents_muzzy
.
mtx
);
MUTEX_PROF_RESET
(
arena
->
extents_retained
.
mtx
);
MUTEX_PROF_RESET
(
arena
->
decay_dirty
.
mtx
);
MUTEX_PROF_RESET
(
arena
->
decay_muzzy
.
mtx
);
MUTEX_PROF_RESET
(
arena
->
tcache_ql_mtx
);
MUTEX_PROF_RESET
(
arena
->
base
->
mtx
);
for
(
szind_t
i
=
0
;
i
<
NBINS
;
i
++
)
{
bin_t
*
bin
=
&
arena
->
bins
[
i
];
MUTEX_PROF_RESET
(
bin
->
lock
);
}
}
#undef MUTEX_PROF_RESET
return
0
;
}
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_hchunks_j_nmalloc
,
ctl_stats
.
arenas
[
mib
[
2
]].
hstats
[
mib
[
4
]].
nmalloc
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_hchunks_j_ndalloc
,
ctl_stats
.
arenas
[
mib
[
2
]].
hstats
[
mib
[
4
]].
ndalloc
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_hchunks_j_nrequests
,
ctl_stats
.
arenas
[
mib
[
2
]].
hstats
[
mib
[
4
]].
nmalloc
,
/* Intentional. */
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_hchunks_j_curhchunks
,
ctl_stats
.
arenas
[
mib
[
2
]].
hstats
[
mib
[
4
]].
curhchunks
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_bins_j_nmalloc
,
arenas_i
(
mib
[
2
])
->
astats
->
bstats
[
mib
[
4
]].
nmalloc
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_bins_j_ndalloc
,
arenas_i
(
mib
[
2
])
->
astats
->
bstats
[
mib
[
4
]].
ndalloc
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_bins_j_nrequests
,
arenas_i
(
mib
[
2
])
->
astats
->
bstats
[
mib
[
4
]].
nrequests
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_bins_j_curregs
,
arenas_i
(
mib
[
2
])
->
astats
->
bstats
[
mib
[
4
]].
curregs
,
size_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_bins_j_nfills
,
arenas_i
(
mib
[
2
])
->
astats
->
bstats
[
mib
[
4
]].
nfills
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_bins_j_nflushes
,
arenas_i
(
mib
[
2
])
->
astats
->
bstats
[
mib
[
4
]].
nflushes
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_bins_j_nslabs
,
arenas_i
(
mib
[
2
])
->
astats
->
bstats
[
mib
[
4
]].
nslabs
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_bins_j_nreslabs
,
arenas_i
(
mib
[
2
])
->
astats
->
bstats
[
mib
[
4
]].
reslabs
,
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_bins_j_curslabs
,
arenas_i
(
mib
[
2
])
->
astats
->
bstats
[
mib
[
4
]].
curslabs
,
size_t
)
static
const
ctl_named_node_t
*
stats_arenas_i_hchunks_j_index
(
const
size_t
*
mib
,
size_t
miblen
,
size_t
j
)
{
stats_arenas_i_bins_j_index
(
tsdn_t
*
tsdn
,
const
size_t
*
mib
,
size_t
miblen
,
size_t
j
)
{
if
(
j
>
NBINS
)
{
return
NULL
;
}
return
super_stats_arenas_i_bins_j_node
;
}
if
(
j
>
nhclasses
)
return
(
NULL
);
return
(
super_stats_arenas_i_hchunks_j_node
);
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_lextents_j_nmalloc
,
ctl_arena_stats_read_u64
(
&
arenas_i
(
mib
[
2
])
->
astats
->
lstats
[
mib
[
4
]].
nmalloc
),
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_lextents_j_ndalloc
,
ctl_arena_stats_read_u64
(
&
arenas_i
(
mib
[
2
])
->
astats
->
lstats
[
mib
[
4
]].
ndalloc
),
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_lextents_j_nrequests
,
ctl_arena_stats_read_u64
(
&
arenas_i
(
mib
[
2
])
->
astats
->
lstats
[
mib
[
4
]].
nrequests
),
uint64_t
)
CTL_RO_CGEN
(
config_stats
,
stats_arenas_i_lextents_j_curlextents
,
arenas_i
(
mib
[
2
])
->
astats
->
lstats
[
mib
[
4
]].
curlextents
,
size_t
)
static
const
ctl_named_node_t
*
stats_arenas_i_lextents_j_index
(
tsdn_t
*
tsdn
,
const
size_t
*
mib
,
size_t
miblen
,
size_t
j
)
{
if
(
j
>
NSIZES
-
NBINS
)
{
return
NULL
;
}
return
super_stats_arenas_i_lextents_j_node
;
}
static
const
ctl_named_node_t
*
stats_arenas_i_index
(
const
size_t
*
mib
,
size_t
miblen
,
size_t
i
)
{
const
ctl_named_node_t
*
ret
;
stats_arenas_i_index
(
tsdn_t
*
tsdn
,
const
size_t
*
mib
,
size_t
miblen
,
size_t
i
)
{
const
ctl_named_node_t
*
ret
;
size_t
a
;
malloc_mutex_lock
(
&
ctl_mtx
);
if
(
i
>
ctl_stats
.
narenas
||
!
ctl_stats
.
arenas
[
i
].
initialized
)
{
malloc_mutex_lock
(
tsdn
,
&
ctl_mtx
);
a
=
arenas_i2a_impl
(
i
,
true
,
true
);
if
(
a
==
UINT_MAX
||
!
ctl_arenas
->
arenas
[
a
]
->
initialized
)
{
ret
=
NULL
;
goto
label_return
;
}
ret
=
super_stats_arenas_i_node
;
label_return:
malloc_mutex_unlock
(
&
ctl_mtx
);
return
(
ret
)
;
malloc_mutex_unlock
(
tsdn
,
&
ctl_mtx
);
return
ret
;
}
deps/jemalloc/src/div.c
0 → 100644
View file @
fb1f4f4e
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/div.h"
#include "jemalloc/internal/assert.h"
/*
* Suppose we have n = q * d, all integers. We know n and d, and want q = n / d.
*
* For any k, we have (here, all division is exact; not C-style rounding):
* floor(ceil(2^k / d) * n / 2^k) = floor((2^k + r) / d * n / 2^k), where
* r = (-2^k) mod d.
*
* Expanding this out:
* ... = floor(2^k / d * n / 2^k + r / d * n / 2^k)
* = floor(n / d + (r / d) * (n / 2^k)).
*
* The fractional part of n / d is 0 (because of the assumption that d divides n
* exactly), so we have:
* ... = n / d + floor((r / d) * (n / 2^k))
*
* So that our initial expression is equal to the quantity we seek, so long as
* (r / d) * (n / 2^k) < 1.
*
* r is a remainder mod d, so r < d and r / d < 1 always. We can make
* n / 2 ^ k < 1 by setting k = 32. This gets us a value of magic that works.
*/
void
div_init
(
div_info_t
*
div_info
,
size_t
d
)
{
/* Nonsensical. */
assert
(
d
!=
0
);
/*
* This would make the value of magic too high to fit into a uint32_t
* (we would want magic = 2^32 exactly). This would mess with code gen
* on 32-bit machines.
*/
assert
(
d
!=
1
);
uint64_t
two_to_k
=
((
uint64_t
)
1
<<
32
);
uint32_t
magic
=
(
uint32_t
)(
two_to_k
/
d
);
/*
* We want magic = ceil(2^k / d), but C gives us floor. We have to
* increment it unless the result was exact (i.e. unless d is a power of
* two).
*/
if
(
two_to_k
%
d
!=
0
)
{
magic
++
;
}
div_info
->
magic
=
magic
;
#ifdef JEMALLOC_DEBUG
div_info
->
d
=
d
;
#endif
}
Prev
1
…
7
8
9
10
11
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment