Unverified Commit c4b4b6c0 authored by Oran Agra's avatar Oran Agra Committed by GitHub
Browse files

Merge pull request #9623 from yoav-steinberg/upgrade_jemalloc_5.2.1

Upgraded to jemalloc 5.2.1 from 5.1.0.
Cherry picked all relevant fixes (by diffing our 5.1.0 to upstream 5.10 and finding relevant commits).
Details of what was done:

[cherry-picked] fd7d51c3 2021-05-03 Resolve nonsense static analysis warnings (Oran Agra)
[cherry-picked] 448c435b 2020-09-29 Fix compilation warnings in Lua and jemalloc dependencies (#7785) (YoongHM)
[skipped - already in upstream] 9216b96b 2020-09-21 Fix compilation warning in jemalloc's malloc_vsnprintf (#7789) (YoongHM)
[cherry-picked] 88d71f47 2020-05-20 fix a rare active defrag edge case bug leading to stagnation (Oran Agra)
[skipped - already in upstream] 2fec7d9c 2019-05-30 Jemalloc: Avoid blocking on background thread lock for stats.
[cherry-picked] 920158ec 2018-07-11 Active defrag fixes for 32bit builds (again) (Oran Agra)
[cherry-picked] e8099cab 2018-06-26 add defrag hint support into jemalloc 5 (Oran Agra)
[re-done] 4e729fcd 2018-05-24 Generate configure for Jemalloc. (antirez)

Additionally had to do this:
7727cc2 2021-10-10 Fix defrag to support sharded bins in arena (added in v5.2.1) (Yoav Steinberg)

When reviewing please look at all except the first commit which is just replacing 5.1.0 with 5.2.1 sources.
Also I think we should merge this without squashing to preserve the changes we did to to jemalloc.
parents 276b460e 85737e67
#include "test/jemalloc_test.h"
/* Note that this test relies on the unusual slab sizes set in slab_sizes.sh. */
TEST_BEGIN(test_slab_sizes) {
unsigned nbins;
size_t page;
size_t sizemib[4];
size_t slabmib[4];
size_t len;
len = sizeof(nbins);
assert_d_eq(mallctl("arenas.nbins", &nbins, &len, NULL, 0), 0,
"nbins mallctl failure");
len = sizeof(page);
assert_d_eq(mallctl("arenas.page", &page, &len, NULL, 0), 0,
"page mallctl failure");
len = 4;
assert_d_eq(mallctlnametomib("arenas.bin.0.size", sizemib, &len), 0,
"bin size mallctlnametomib failure");
len = 4;
assert_d_eq(mallctlnametomib("arenas.bin.0.slab_size", slabmib, &len),
0, "slab size mallctlnametomib failure");
size_t biggest_slab_seen = 0;
for (unsigned i = 0; i < nbins; i++) {
size_t bin_size;
size_t slab_size;
len = sizeof(size_t);
sizemib[2] = i;
slabmib[2] = i;
assert_d_eq(mallctlbymib(sizemib, 4, (void *)&bin_size, &len,
NULL, 0), 0, "bin size mallctlbymib failure");
len = sizeof(size_t);
assert_d_eq(mallctlbymib(slabmib, 4, (void *)&slab_size, &len,
NULL, 0), 0, "slab size mallctlbymib failure");
if (bin_size < 100) {
/*
* Then we should be as close to 17 as possible. Since
* not all page sizes are valid (because of bitmap
* limitations on the number of items in a slab), we
* should at least make sure that the number of pages
* goes up.
*/
assert_zu_ge(slab_size, biggest_slab_seen,
"Slab sizes should go up");
biggest_slab_seen = slab_size;
} else if (
(100 <= bin_size && bin_size < 128)
|| (128 < bin_size && bin_size <= 200)) {
assert_zu_eq(slab_size, page,
"Forced-small slabs should be small");
} else if (bin_size == 128) {
assert_zu_eq(slab_size, 2 * page,
"Forced-2-page slab should be 2 pages");
} else if (200 < bin_size && bin_size <= 4096) {
assert_zu_ge(slab_size, biggest_slab_seen,
"Slab sizes should go up");
biggest_slab_seen = slab_size;
}
}
/*
* For any reasonable configuration, 17 pages should be a valid slab
* size for 4096-byte items.
*/
assert_zu_eq(biggest_slab_seen, 17 * page, "Didn't hit page target");
}
TEST_END
int
main(void) {
return test(
test_slab_sizes);
}
#!/bin/sh
# Some screwy-looking slab sizes.
export MALLOC_CONF="slab_sizes:1-4096:17|100-200:1|128-128:2"
#include "test/jemalloc_test.h"
#include "jemalloc/jemalloc_macros.h"
#define STR_HELPER(x) #x
#define STR(x) STR_HELPER(x)
#ifndef JEMALLOC_VERSION_GID_IDENT
#error "JEMALLOC_VERSION_GID_IDENT not defined"
#endif
#define JOIN(x, y) x ## y
#define JOIN2(x, y) JOIN(x, y)
#define smallocx JOIN2(smallocx_, JEMALLOC_VERSION_GID_IDENT)
typedef struct {
void *ptr;
size_t size;
} smallocx_return_t;
extern smallocx_return_t
smallocx(size_t size, int flags);
static unsigned
get_nsizes_impl(const char *cmd) {
unsigned ret;
size_t z;
z = sizeof(unsigned);
assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
"Unexpected mallctl(\"%s\", ...) failure", cmd);
return ret;
}
static unsigned
get_nlarge(void) {
return get_nsizes_impl("arenas.nlextents");
}
static size_t
get_size_impl(const char *cmd, size_t ind) {
size_t ret;
size_t z;
size_t mib[4];
size_t miblen = 4;
z = sizeof(size_t);
assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
mib[2] = ind;
z = sizeof(size_t);
assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
return ret;
}
static size_t
get_large_size(size_t ind) {
return get_size_impl("arenas.lextent.0.size", ind);
}
/*
* On systems which can't merge extents, tests that call this function generate
* a lot of dirty memory very quickly. Purging between cycles mitigates
* potential OOM on e.g. 32-bit Windows.
*/
static void
purge(void) {
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl error");
}
/*
* GCC "-Walloc-size-larger-than" warning detects when one of the memory
* allocation functions is called with a size larger than the maximum size that
* they support. Here we want to explicitly test that the allocation functions
* do indeed fail properly when this is the case, which triggers the warning.
* Therefore we disable the warning for these tests.
*/
JEMALLOC_DIAGNOSTIC_PUSH
JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
TEST_BEGIN(test_overflow) {
size_t largemax;
largemax = get_large_size(get_nlarge()-1);
assert_ptr_null(smallocx(largemax+1, 0).ptr,
"Expected OOM for smallocx(size=%#zx, 0)", largemax+1);
assert_ptr_null(smallocx(ZU(PTRDIFF_MAX)+1, 0).ptr,
"Expected OOM for smallocx(size=%#zx, 0)", ZU(PTRDIFF_MAX)+1);
assert_ptr_null(smallocx(SIZE_T_MAX, 0).ptr,
"Expected OOM for smallocx(size=%#zx, 0)", SIZE_T_MAX);
assert_ptr_null(smallocx(1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)).ptr,
"Expected OOM for smallocx(size=1, MALLOCX_ALIGN(%#zx))",
ZU(PTRDIFF_MAX)+1);
}
TEST_END
static void *
remote_alloc(void *arg) {
unsigned arena;
size_t sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
size_t large_sz;
sz = sizeof(size_t);
assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large_sz, &sz,
NULL, 0), 0, "Unexpected mallctl failure");
smallocx_return_t r
= smallocx(large_sz, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE);
void *ptr = r.ptr;
assert_zu_eq(r.size,
nallocx(large_sz, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE),
"Expected smalloc(size,flags).size == nallocx(size,flags)");
void **ret = (void **)arg;
*ret = ptr;
return NULL;
}
TEST_BEGIN(test_remote_free) {
thd_t thd;
void *ret;
thd_create(&thd, remote_alloc, (void *)&ret);
thd_join(thd, NULL);
assert_ptr_not_null(ret, "Unexpected smallocx failure");
/* Avoid TCACHE_NONE to explicitly test tcache_flush(). */
dallocx(ret, 0);
mallctl("thread.tcache.flush", NULL, NULL, NULL, 0);
}
TEST_END
TEST_BEGIN(test_oom) {
size_t largemax;
bool oom;
void *ptrs[3];
unsigned i;
/*
* It should be impossible to allocate three objects that each consume
* nearly half the virtual address space.
*/
largemax = get_large_size(get_nlarge()-1);
oom = false;
for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) {
ptrs[i] = smallocx(largemax, 0).ptr;
if (ptrs[i] == NULL) {
oom = true;
}
}
assert_true(oom,
"Expected OOM during series of calls to smallocx(size=%zu, 0)",
largemax);
for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) {
if (ptrs[i] != NULL) {
dallocx(ptrs[i], 0);
}
}
purge();
#if LG_SIZEOF_PTR == 3
assert_ptr_null(smallocx(0x8000000000000000ULL,
MALLOCX_ALIGN(0x8000000000000000ULL)).ptr,
"Expected OOM for smallocx()");
assert_ptr_null(smallocx(0x8000000000000000ULL,
MALLOCX_ALIGN(0x80000000)).ptr,
"Expected OOM for smallocx()");
#else
assert_ptr_null(smallocx(0x80000000UL, MALLOCX_ALIGN(0x80000000UL)).ptr,
"Expected OOM for smallocx()");
#endif
}
TEST_END
/* Re-enable the "-Walloc-size-larger-than=" warning */
JEMALLOC_DIAGNOSTIC_POP
TEST_BEGIN(test_basic) {
#define MAXSZ (((size_t)1) << 23)
size_t sz;
for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) {
smallocx_return_t ret;
size_t nsz, rsz, smz;
void *p;
nsz = nallocx(sz, 0);
assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
ret = smallocx(sz, 0);
p = ret.ptr;
smz = ret.size;
assert_ptr_not_null(p,
"Unexpected smallocx(size=%zx, flags=0) error", sz);
rsz = sallocx(p, 0);
assert_zu_ge(rsz, sz, "Real size smaller than expected");
assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch");
assert_zu_eq(nsz, smz, "nallocx()/smallocx() size mismatch");
dallocx(p, 0);
ret = smallocx(sz, 0);
p = ret.ptr;
smz = ret.size;
assert_ptr_not_null(p,
"Unexpected smallocx(size=%zx, flags=0) error", sz);
dallocx(p, 0);
nsz = nallocx(sz, MALLOCX_ZERO);
assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
assert_zu_ne(smz, 0, "Unexpected smallocx() error");
ret = smallocx(sz, MALLOCX_ZERO);
p = ret.ptr;
assert_ptr_not_null(p,
"Unexpected smallocx(size=%zx, flags=MALLOCX_ZERO) error",
nsz);
rsz = sallocx(p, 0);
assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch");
assert_zu_eq(nsz, smz, "nallocx()/smallocx() size mismatch");
dallocx(p, 0);
purge();
}
#undef MAXSZ
}
TEST_END
TEST_BEGIN(test_alignment_and_size) {
const char *percpu_arena;
size_t sz = sizeof(percpu_arena);
if(mallctl("opt.percpu_arena", (void *)&percpu_arena, &sz, NULL, 0) ||
strcmp(percpu_arena, "disabled") != 0) {
test_skip("test_alignment_and_size skipped: "
"not working with percpu arena.");
};
#define MAXALIGN (((size_t)1) << 23)
#define NITER 4
size_t nsz, rsz, smz, alignment, total;
unsigned i;
void *ps[NITER];
for (i = 0; i < NITER; i++) {
ps[i] = NULL;
}
for (alignment = 8;
alignment <= MAXALIGN;
alignment <<= 1) {
total = 0;
for (sz = 1;
sz < 3 * alignment && sz < (1U << 31);
sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
for (i = 0; i < NITER; i++) {
nsz = nallocx(sz, MALLOCX_ALIGN(alignment) |
MALLOCX_ZERO);
assert_zu_ne(nsz, 0,
"nallocx() error for alignment=%zu, "
"size=%zu (%#zx)", alignment, sz, sz);
smallocx_return_t ret
= smallocx(sz, MALLOCX_ALIGN(alignment) | MALLOCX_ZERO);
ps[i] = ret.ptr;
assert_ptr_not_null(ps[i],
"smallocx() error for alignment=%zu, "
"size=%zu (%#zx)", alignment, sz, sz);
rsz = sallocx(ps[i], 0);
smz = ret.size;
assert_zu_ge(rsz, sz,
"Real size smaller than expected for "
"alignment=%zu, size=%zu", alignment, sz);
assert_zu_eq(nsz, rsz,
"nallocx()/sallocx() size mismatch for "
"alignment=%zu, size=%zu", alignment, sz);
assert_zu_eq(nsz, smz,
"nallocx()/smallocx() size mismatch for "
"alignment=%zu, size=%zu", alignment, sz);
assert_ptr_null(
(void *)((uintptr_t)ps[i] & (alignment-1)),
"%p inadequately aligned for"
" alignment=%zu, size=%zu", ps[i],
alignment, sz);
total += rsz;
if (total >= (MAXALIGN << 1)) {
break;
}
}
for (i = 0; i < NITER; i++) {
if (ps[i] != NULL) {
dallocx(ps[i], 0);
ps[i] = NULL;
}
}
}
purge();
}
#undef MAXALIGN
#undef NITER
}
TEST_END
int
main(void) {
return test(
test_overflow,
test_oom,
test_remote_free,
test_basic,
test_alignment_and_size);
}
#!/bin/sh
if [ "x${enable_fill}" = "x1" ] ; then
export MALLOC_CONF="junk:false"
fi
......@@ -13,8 +13,6 @@ mtx_init(mtx_t *mtx) {
}
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
mtx->lock = OS_UNFAIR_LOCK_INIT;
#elif (defined(JEMALLOC_OSSPIN))
mtx->lock = 0;
#else
pthread_mutexattr_t attr;
......@@ -35,7 +33,6 @@ void
mtx_fini(mtx_t *mtx) {
#ifdef _WIN32
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
#elif (defined(JEMALLOC_OSSPIN))
#else
pthread_mutex_destroy(&mtx->lock);
#endif
......@@ -47,8 +44,6 @@ mtx_lock(mtx_t *mtx) {
EnterCriticalSection(&mtx->lock);
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
os_unfair_lock_lock(&mtx->lock);
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockLock(&mtx->lock);
#else
pthread_mutex_lock(&mtx->lock);
#endif
......@@ -60,8 +55,6 @@ mtx_unlock(mtx_t *mtx) {
LeaveCriticalSection(&mtx->lock);
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
os_unfair_lock_unlock(&mtx->lock);
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockUnlock(&mtx->lock);
#else
pthread_mutex_unlock(&mtx->lock);
#endif
......
......@@ -48,12 +48,12 @@ do_hook(bool *hook_ran, void (**hook)()) {
static void
libc_reentrancy_hook() {
do_hook(&libc_hook_ran, &hooks_libc_hook);
do_hook(&libc_hook_ran, &test_hooks_libc_hook);
}
static void
arena_new_reentrancy_hook() {
do_hook(&arena_new_hook_ran, &hooks_arena_new_hook);
do_hook(&arena_new_hook_ran, &test_hooks_arena_new_hook);
}
/* Actual test infrastructure. */
......@@ -110,6 +110,20 @@ p_test_fini(void) {
test_status_string(test_status));
}
static void
check_global_slow(test_status_t *status) {
#ifdef JEMALLOC_UNIT_TEST
/*
* This check needs to peek into tsd internals, which is why it's only
* exposed in unit tests.
*/
if (tsd_global_slow()) {
malloc_printf("Testing increased global slow count\n");
*status = test_status_fail;
}
#endif
}
static test_status_t
p_test_impl(bool do_malloc_init, bool do_reentrant, test_t *t, va_list ap) {
test_status_t ret;
......@@ -131,28 +145,31 @@ p_test_impl(bool do_malloc_init, bool do_reentrant, test_t *t, va_list ap) {
for (; t != NULL; t = va_arg(ap, test_t *)) {
/* Non-reentrant run. */
reentrancy = non_reentrant;
hooks_arena_new_hook = hooks_libc_hook = NULL;
test_hooks_arena_new_hook = test_hooks_libc_hook = NULL;
t();
if (test_status > ret) {
ret = test_status;
}
check_global_slow(&ret);
/* Reentrant run. */
if (do_reentrant) {
reentrancy = libc_reentrant;
hooks_arena_new_hook = NULL;
hooks_libc_hook = &libc_reentrancy_hook;
test_hooks_arena_new_hook = NULL;
test_hooks_libc_hook = &libc_reentrancy_hook;
t();
if (test_status > ret) {
ret = test_status;
}
check_global_slow(&ret);
reentrancy = arena_new_reentrant;
hooks_libc_hook = NULL;
hooks_arena_new_hook = &arena_new_reentrancy_hook;
test_hooks_libc_hook = NULL;
test_hooks_arena_new_hook = &arena_new_reentrancy_hook;
t();
if (test_status > ret) {
ret = test_status;
}
check_global_slow(&ret);
}
}
......
#include "test/jemalloc_test.h"
static void
noop_alloc_hook(void *extra, hook_alloc_t type, void *result,
uintptr_t result_raw, uintptr_t args_raw[3]) {
}
static void
noop_dalloc_hook(void *extra, hook_dalloc_t type, void *address,
uintptr_t args_raw[3]) {
}
static void
noop_expand_hook(void *extra, hook_expand_t type, void *address,
size_t old_usize, size_t new_usize, uintptr_t result_raw,
uintptr_t args_raw[4]) {
}
static void
malloc_free_loop(int iters) {
for (int i = 0; i < iters; i++) {
void *p = mallocx(1, 0);
free(p);
}
}
static void
test_hooked(int iters) {
hooks_t hooks = {&noop_alloc_hook, &noop_dalloc_hook, &noop_expand_hook,
NULL};
int err;
void *handles[HOOK_MAX];
size_t sz = sizeof(handles[0]);
for (int i = 0; i < HOOK_MAX; i++) {
err = mallctl("experimental.hooks.install", &handles[i],
&sz, &hooks, sizeof(hooks));
assert(err == 0);
timedelta_t timer;
timer_start(&timer);
malloc_free_loop(iters);
timer_stop(&timer);
malloc_printf("With %d hook%s: %"FMTu64"us\n", i + 1,
i + 1 == 1 ? "" : "s", timer_usec(&timer));
}
for (int i = 0; i < HOOK_MAX; i++) {
err = mallctl("experimental.hooks.remove", NULL, NULL,
&handles[i], sizeof(handles[i]));
assert(err == 0);
}
}
static void
test_unhooked(int iters) {
timedelta_t timer;
timer_start(&timer);
malloc_free_loop(iters);
timer_stop(&timer);
malloc_printf("Without hooks: %"FMTu64"us\n", timer_usec(&timer));
}
int
main(void) {
/* Initialize */
free(mallocx(1, 0));
int iters = 10 * 1000 * 1000;
malloc_printf("Benchmarking hooks with %d iterations:\n", iters);
test_hooked(iters);
test_unhooked(iters);
}
......@@ -77,7 +77,7 @@ vsalloc(tsdn_t *tsdn, const void *ptr) {
return 0;
}
if (szind == NSIZES) {
if (szind == SC_NSIZES) {
return 0;
}
......@@ -142,7 +142,7 @@ do_arena_reset_post(void **ptrs, unsigned nptrs, unsigned arena_ind) {
if (have_background_thread) {
malloc_mutex_lock(tsdn,
&background_thread_info[arena_ind % ncpus].mtx);
&background_thread_info_get(arena_ind)->mtx);
}
/* Verify allocations no longer exist. */
for (i = 0; i < nptrs; i++) {
......@@ -151,7 +151,7 @@ do_arena_reset_post(void **ptrs, unsigned nptrs, unsigned arena_ind) {
}
if (have_background_thread) {
malloc_mutex_unlock(tsdn,
&background_thread_info[arena_ind % ncpus].mtx);
&background_thread_info_get(arena_ind)->mtx);
}
free(ptrs);
......@@ -279,8 +279,11 @@ extent_dalloc_unmap(extent_hooks_t *extent_hooks, void *addr, size_t size,
if (!try_dalloc) {
return true;
}
pages_unmap(addr, size);
did_dalloc = true;
if (!maps_coalesce && opt_retain) {
return true;
}
pages_unmap(addr, size);
return false;
}
......@@ -304,7 +307,9 @@ TEST_BEGIN(test_arena_destroy_hooks_unmap) {
unsigned nptrs;
extent_hooks_prep();
if (maps_coalesce) {
try_decommit = false;
}
memcpy(&hooks_orig, &hooks, sizeof(extent_hooks_t));
memcpy(&hooks, &hooks_unmap, sizeof(extent_hooks_t));
......
......@@ -33,20 +33,19 @@ TEST_END
TEST_BEGIN(test_max_background_threads) {
test_skip_if(!have_background_thread);
size_t maxt;
size_t opt_maxt;
size_t sz_m = sizeof(maxt);
size_t max_n_thds;
size_t opt_max_n_thds;
size_t sz_m = sizeof(max_n_thds);
assert_d_eq(mallctl("opt.max_background_threads",
&opt_maxt, &sz_m, NULL, 0), 0,
&opt_max_n_thds, &sz_m, NULL, 0), 0,
"Failed to get opt.max_background_threads");
assert_d_eq(mallctl("max_background_threads", &maxt, &sz_m, NULL, 0), 0,
"Failed to get max background threads");
assert_zu_eq(20, maxt, "should be ncpus");
assert_zu_eq(opt_maxt, maxt,
assert_d_eq(mallctl("max_background_threads", &max_n_thds, &sz_m, NULL,
0), 0, "Failed to get max background threads");
assert_zu_eq(opt_max_n_thds, max_n_thds,
"max_background_threads and "
"opt.max_background_threads should match");
assert_d_eq(mallctl("max_background_threads", NULL, NULL, &maxt, sz_m),
0, "Failed to set max background threads");
assert_d_eq(mallctl("max_background_threads", NULL, NULL, &max_n_thds,
sz_m), 0, "Failed to set max background threads");
unsigned id;
size_t sz_u = sizeof(unsigned);
......@@ -60,18 +59,21 @@ TEST_BEGIN(test_max_background_threads) {
size_t sz_b = sizeof(bool);
assert_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0,
"Failed to enable background threads");
assert_zu_eq(n_background_threads, maxt,
"Number of background threads should be 3.\n");
maxt = 10;
assert_d_eq(mallctl("max_background_threads", NULL, NULL, &maxt, sz_m),
0, "Failed to set max background threads");
assert_zu_eq(n_background_threads, maxt,
"Number of background threads should be 10.\n");
maxt = 3;
assert_d_eq(mallctl("max_background_threads", NULL, NULL, &maxt, sz_m),
0, "Failed to set max background threads");
assert_zu_eq(n_background_threads, maxt,
"Number of background threads should be 3.\n");
assert_zu_eq(n_background_threads, max_n_thds,
"Number of background threads should not change.\n");
size_t new_max_thds = max_n_thds - 1;
if (new_max_thds > 0) {
assert_d_eq(mallctl("max_background_threads", NULL, NULL,
&new_max_thds, sz_m), 0,
"Failed to set max background threads");
assert_zu_eq(n_background_threads, new_max_thds,
"Number of background threads should decrease by 1.\n");
}
new_max_thds = 1;
assert_d_eq(mallctl("max_background_threads", NULL, NULL, &new_max_thds,
sz_m), 0, "Failed to set max background threads");
assert_zu_eq(n_background_threads, new_max_thds,
"Number of background threads should be 1.\n");
}
TEST_END
......
#include "test/jemalloc_test.h"
/* Config -- "narenas:1,bin_shards:1-160:16|129-512:4|256-256:8" */
#define NTHREADS 16
#define REMOTE_NALLOC 256
static void *
thd_producer(void *varg) {
void **mem = varg;
unsigned arena, i;
size_t sz;
sz = sizeof(arena);
/* Remote arena. */
assert_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
for (i = 0; i < REMOTE_NALLOC / 2; i++) {
mem[i] = mallocx(1, MALLOCX_TCACHE_NONE | MALLOCX_ARENA(arena));
}
/* Remote bin. */
for (; i < REMOTE_NALLOC; i++) {
mem[i] = mallocx(1, MALLOCX_TCACHE_NONE | MALLOCX_ARENA(0));
}
return NULL;
}
TEST_BEGIN(test_producer_consumer) {
thd_t thds[NTHREADS];
void *mem[NTHREADS][REMOTE_NALLOC];
unsigned i;
/* Create producer threads to allocate. */
for (i = 0; i < NTHREADS; i++) {
thd_create(&thds[i], thd_producer, mem[i]);
}
for (i = 0; i < NTHREADS; i++) {
thd_join(thds[i], NULL);
}
/* Remote deallocation by the current thread. */
for (i = 0; i < NTHREADS; i++) {
for (unsigned j = 0; j < REMOTE_NALLOC; j++) {
assert_ptr_not_null(mem[i][j],
"Unexpected remote allocation failure");
dallocx(mem[i][j], 0);
}
}
}
TEST_END
static void *
thd_start(void *varg) {
void *ptr, *ptr2;
extent_t *extent;
unsigned shard1, shard2;
tsdn_t *tsdn = tsdn_fetch();
/* Try triggering allocations from sharded bins. */
for (unsigned i = 0; i < 1024; i++) {
ptr = mallocx(1, MALLOCX_TCACHE_NONE);
ptr2 = mallocx(129, MALLOCX_TCACHE_NONE);
extent = iealloc(tsdn, ptr);
shard1 = extent_binshard_get(extent);
dallocx(ptr, 0);
assert_u_lt(shard1, 16, "Unexpected bin shard used");
extent = iealloc(tsdn, ptr2);
shard2 = extent_binshard_get(extent);
dallocx(ptr2, 0);
assert_u_lt(shard2, 4, "Unexpected bin shard used");
if (shard1 > 0 || shard2 > 0) {
/* Triggered sharded bin usage. */
return (void *)(uintptr_t)shard1;
}
}
return NULL;
}
TEST_BEGIN(test_bin_shard_mt) {
test_skip_if(have_percpu_arena &&
PERCPU_ARENA_ENABLED(opt_percpu_arena));
thd_t thds[NTHREADS];
unsigned i;
for (i = 0; i < NTHREADS; i++) {
thd_create(&thds[i], thd_start, NULL);
}
bool sharded = false;
for (i = 0; i < NTHREADS; i++) {
void *ret;
thd_join(thds[i], &ret);
if (ret != NULL) {
sharded = true;
}
}
assert_b_eq(sharded, true, "Did not find sharded bins");
}
TEST_END
TEST_BEGIN(test_bin_shard) {
unsigned nbins, i;
size_t mib[4], mib2[4];
size_t miblen, miblen2, len;
len = sizeof(nbins);
assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &len, NULL, 0), 0,
"Unexpected mallctl() failure");
miblen = 4;
assert_d_eq(mallctlnametomib("arenas.bin.0.nshards", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
miblen2 = 4;
assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib2, &miblen2), 0,
"Unexpected mallctlnametomib() failure");
for (i = 0; i < nbins; i++) {
uint32_t nshards;
size_t size, sz1, sz2;
mib[2] = i;
sz1 = sizeof(nshards);
assert_d_eq(mallctlbymib(mib, miblen, (void *)&nshards, &sz1,
NULL, 0), 0, "Unexpected mallctlbymib() failure");
mib2[2] = i;
sz2 = sizeof(size);
assert_d_eq(mallctlbymib(mib2, miblen2, (void *)&size, &sz2,
NULL, 0), 0, "Unexpected mallctlbymib() failure");
if (size >= 1 && size <= 128) {
assert_u_eq(nshards, 16, "Unexpected nshards");
} else if (size == 256) {
assert_u_eq(nshards, 8, "Unexpected nshards");
} else if (size > 128 && size <= 512) {
assert_u_eq(nshards, 4, "Unexpected nshards");
} else {
assert_u_eq(nshards, 1, "Unexpected nshards");
}
}
}
TEST_END
int
main(void) {
return test_no_reentrancy(
test_bin_shard,
test_bin_shard_mt,
test_producer_consumer);
}
#!/bin/sh
export MALLOC_CONF="narenas:1,bin_shards:1-160:16|129-512:4|256-256:8"
......@@ -48,10 +48,64 @@ TEST_BEGIN(test_pow2_ceil_zu) {
}
TEST_END
void
assert_lg_ceil_range(size_t input, unsigned answer) {
if (input == 1) {
assert_u_eq(0, answer, "Got %u as lg_ceil of 1", answer);
return;
}
assert_zu_le(input, (ZU(1) << answer),
"Got %u as lg_ceil of %zu", answer, input);
assert_zu_gt(input, (ZU(1) << (answer - 1)),
"Got %u as lg_ceil of %zu", answer, input);
}
void
assert_lg_floor_range(size_t input, unsigned answer) {
if (input == 1) {
assert_u_eq(0, answer, "Got %u as lg_floor of 1", answer);
return;
}
assert_zu_ge(input, (ZU(1) << answer),
"Got %u as lg_floor of %zu", answer, input);
assert_zu_lt(input, (ZU(1) << (answer + 1)),
"Got %u as lg_floor of %zu", answer, input);
}
TEST_BEGIN(test_lg_ceil_floor) {
for (size_t i = 1; i < 10 * 1000 * 1000; i++) {
assert_lg_ceil_range(i, lg_ceil(i));
assert_lg_ceil_range(i, LG_CEIL(i));
assert_lg_floor_range(i, lg_floor(i));
assert_lg_floor_range(i, LG_FLOOR(i));
}
for (int i = 10; i < 8 * (1 << LG_SIZEOF_PTR) - 5; i++) {
for (size_t j = 0; j < (1 << 4); j++) {
size_t num1 = ((size_t)1 << i)
- j * ((size_t)1 << (i - 4));
size_t num2 = ((size_t)1 << i)
+ j * ((size_t)1 << (i - 4));
assert_zu_ne(num1, 0, "Invalid lg argument");
assert_zu_ne(num2, 0, "Invalid lg argument");
assert_lg_ceil_range(num1, lg_ceil(num1));
assert_lg_ceil_range(num1, LG_CEIL(num1));
assert_lg_ceil_range(num2, lg_ceil(num2));
assert_lg_ceil_range(num2, LG_CEIL(num2));
assert_lg_floor_range(num1, lg_floor(num1));
assert_lg_floor_range(num1, LG_FLOOR(num1));
assert_lg_floor_range(num2, lg_floor(num2));
assert_lg_floor_range(num2, LG_FLOOR(num2));
}
}
}
TEST_END
int
main(void) {
return test(
test_pow2_ceil_u64,
test_pow2_ceil_u32,
test_pow2_ceil_zu);
test_pow2_ceil_zu,
test_lg_ceil_floor);
}
......@@ -121,6 +121,12 @@ get_arena_dirty_npurge(unsigned arena_ind) {
return get_arena_npurge_impl("stats.arenas.0.dirty_npurge", arena_ind);
}
static uint64_t
get_arena_dirty_purged(unsigned arena_ind) {
do_epoch();
return get_arena_npurge_impl("stats.arenas.0.dirty_purged", arena_ind);
}
static uint64_t
get_arena_muzzy_npurge(unsigned arena_ind) {
do_epoch();
......@@ -559,7 +565,7 @@ TEST_BEGIN(test_decay_now) {
TEST_END
TEST_BEGIN(test_decay_never) {
test_skip_if(check_background_thread_enabled());
test_skip_if(check_background_thread_enabled() || !config_stats);
unsigned arena_ind = do_arena_create(-1, -1);
int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
......@@ -579,8 +585,8 @@ TEST_BEGIN(test_decay_never) {
dallocx(ptrs[i], flags);
size_t pdirty = get_arena_pdirty(arena_ind);
size_t pmuzzy = get_arena_pmuzzy(arena_ind);
assert_zu_gt(pdirty, pdirty_prev,
"Expected dirty pages to increase.");
assert_zu_gt(pdirty + (size_t)get_arena_dirty_purged(arena_ind),
pdirty_prev, "Expected dirty pages to increase.");
assert_zu_eq(pmuzzy, 0, "Unexpected muzzy pages");
pdirty_prev = pdirty;
}
......
......@@ -169,7 +169,7 @@ static void emit_nested_dict(emitter_t *emitter) {
emitter_end(emitter);
}
static const char *nested_dict_json =
static const char *nested_object_json =
"{\n"
"\t\"json1\": {\n"
"\t\t\"json2\": {\n"
......@@ -183,7 +183,7 @@ static const char *nested_dict_json =
"\t}\n"
"}\n";
static const char *nested_dict_table =
static const char *nested_object_table =
"Dict 1\n"
" Dict 2\n"
" A primitive: 123\n"
......@@ -192,8 +192,8 @@ static const char *nested_dict_table =
" Another primitive: 123\n";
TEST_BEGIN(test_nested_dict) {
assert_emit_output(&emit_nested_dict, nested_dict_json,
nested_dict_table);
assert_emit_output(&emit_nested_dict, nested_object_json,
nested_object_table);
}
TEST_END
......@@ -256,13 +256,14 @@ emit_modal(emitter_t *emitter) {
int val = 123;
emitter_begin(emitter);
emitter_dict_begin(emitter, "j0", "T0");
emitter_json_dict_begin(emitter, "j1");
emitter_json_key(emitter, "j1");
emitter_json_object_begin(emitter);
emitter_kv(emitter, "i1", "I1", emitter_type_int, &val);
emitter_json_kv(emitter, "i2", emitter_type_int, &val);
emitter_table_kv(emitter, "I3", emitter_type_int, &val);
emitter_table_dict_begin(emitter, "T1");
emitter_kv(emitter, "i4", "I4", emitter_type_int, &val);
emitter_json_dict_end(emitter); /* Close j1 */
emitter_json_object_end(emitter); /* Close j1 */
emitter_kv(emitter, "i5", "I5", emitter_type_int, &val);
emitter_table_dict_end(emitter); /* Close T1 */
emitter_kv(emitter, "i6", "I6", emitter_type_int, &val);
......@@ -302,24 +303,26 @@ emit_json_arr(emitter_t *emitter) {
int ival = 123;
emitter_begin(emitter);
emitter_json_dict_begin(emitter, "dict");
emitter_json_arr_begin(emitter, "arr");
emitter_json_arr_obj_begin(emitter);
emitter_json_key(emitter, "dict");
emitter_json_object_begin(emitter);
emitter_json_key(emitter, "arr");
emitter_json_array_begin(emitter);
emitter_json_object_begin(emitter);
emitter_json_kv(emitter, "foo", emitter_type_int, &ival);
emitter_json_arr_obj_end(emitter); /* Close arr[0] */
emitter_json_object_end(emitter); /* Close arr[0] */
/* arr[1] and arr[2] are primitives. */
emitter_json_arr_value(emitter, emitter_type_int, &ival);
emitter_json_arr_value(emitter, emitter_type_int, &ival);
emitter_json_arr_obj_begin(emitter);
emitter_json_value(emitter, emitter_type_int, &ival);
emitter_json_value(emitter, emitter_type_int, &ival);
emitter_json_object_begin(emitter);
emitter_json_kv(emitter, "bar", emitter_type_int, &ival);
emitter_json_kv(emitter, "baz", emitter_type_int, &ival);
emitter_json_arr_obj_end(emitter); /* Close arr[3]. */
emitter_json_arr_end(emitter); /* Close arr. */
emitter_json_dict_end(emitter); /* Close dict. */
emitter_json_object_end(emitter); /* Close arr[3]. */
emitter_json_array_end(emitter); /* Close arr. */
emitter_json_object_end(emitter); /* Close dict. */
emitter_end(emitter);
}
static const char *json_arr_json =
static const char *json_array_json =
"{\n"
"\t\"dict\": {\n"
"\t\t\"arr\": [\n"
......@@ -336,10 +339,62 @@ static const char *json_arr_json =
"\t}\n"
"}\n";
static const char *json_arr_table = "";
static const char *json_array_table = "";
TEST_BEGIN(test_json_arr) {
assert_emit_output(&emit_json_arr, json_arr_json, json_arr_table);
assert_emit_output(&emit_json_arr, json_array_json, json_array_table);
}
TEST_END
static void
emit_json_nested_array(emitter_t *emitter) {
int ival = 123;
char *sval = "foo";
emitter_begin(emitter);
emitter_json_array_begin(emitter);
emitter_json_array_begin(emitter);
emitter_json_value(emitter, emitter_type_int, &ival);
emitter_json_value(emitter, emitter_type_string, &sval);
emitter_json_value(emitter, emitter_type_int, &ival);
emitter_json_value(emitter, emitter_type_string, &sval);
emitter_json_array_end(emitter);
emitter_json_array_begin(emitter);
emitter_json_value(emitter, emitter_type_int, &ival);
emitter_json_array_end(emitter);
emitter_json_array_begin(emitter);
emitter_json_value(emitter, emitter_type_string, &sval);
emitter_json_value(emitter, emitter_type_int, &ival);
emitter_json_array_end(emitter);
emitter_json_array_begin(emitter);
emitter_json_array_end(emitter);
emitter_json_array_end(emitter);
emitter_end(emitter);
}
static const char *json_nested_array_json =
"{\n"
"\t[\n"
"\t\t[\n"
"\t\t\t123,\n"
"\t\t\t\"foo\",\n"
"\t\t\t123,\n"
"\t\t\t\"foo\"\n"
"\t\t],\n"
"\t\t[\n"
"\t\t\t123\n"
"\t\t],\n"
"\t\t[\n"
"\t\t\t\"foo\",\n"
"\t\t\t123\n"
"\t\t],\n"
"\t\t[\n"
"\t\t]\n"
"\t]\n"
"}\n";
TEST_BEGIN(test_json_nested_arr) {
assert_emit_output(&emit_json_nested_array, json_nested_array_json,
json_array_table);
}
TEST_END
......@@ -347,11 +402,11 @@ static void
emit_table_row(emitter_t *emitter) {
emitter_begin(emitter);
emitter_row_t row;
emitter_col_t abc = {emitter_justify_left, 10, emitter_type_title};
emitter_col_t abc = {emitter_justify_left, 10, emitter_type_title, {0}, {0, 0}};
abc.str_val = "ABC title";
emitter_col_t def = {emitter_justify_right, 15, emitter_type_title};
emitter_col_t def = {emitter_justify_right, 15, emitter_type_title, {0}, {0, 0}};
def.str_val = "DEF title";
emitter_col_t ghi = {emitter_justify_right, 5, emitter_type_title};
emitter_col_t ghi = {emitter_justify_right, 5, emitter_type_title, {0}, {0, 0}};
ghi.str_val = "GHI";
emitter_row_init(&row);
......@@ -409,5 +464,6 @@ main(void) {
test_types,
test_modal,
test_json_arr,
test_json_nested_arr,
test_table_row);
}
#include "test/jemalloc_test.h"
#define TEST_UTIL_EINVAL(node, a, b, c, d, why_inval) do { \
assert_d_eq(mallctl("experimental.utilization." node, \
a, b, c, d), EINVAL, "Should fail when " why_inval); \
assert_zu_eq(out_sz, out_sz_ref, \
"Output size touched when given invalid arguments"); \
assert_d_eq(memcmp(out, out_ref, out_sz_ref), 0, \
"Output content touched when given invalid arguments"); \
} while (0)
#define TEST_UTIL_QUERY_EINVAL(a, b, c, d, why_inval) \
TEST_UTIL_EINVAL("query", a, b, c, d, why_inval)
#define TEST_UTIL_BATCH_EINVAL(a, b, c, d, why_inval) \
TEST_UTIL_EINVAL("batch_query", a, b, c, d, why_inval)
#define TEST_UTIL_VALID(node) do { \
assert_d_eq(mallctl("experimental.utilization." node, \
out, &out_sz, in, in_sz), 0, \
"Should return 0 on correct arguments"); \
assert_zu_eq(out_sz, out_sz_ref, "incorrect output size"); \
assert_d_ne(memcmp(out, out_ref, out_sz_ref), 0, \
"Output content should be changed"); \
} while (0)
#define TEST_UTIL_BATCH_VALID TEST_UTIL_VALID("batch_query")
#define TEST_MAX_SIZE (1 << 20)
TEST_BEGIN(test_query) {
size_t sz;
/*
* Select some sizes that can span both small and large sizes, and are
* numerically unrelated to any size boundaries.
*/
for (sz = 7; sz <= TEST_MAX_SIZE && sz <= SC_LARGE_MAXCLASS;
sz += (sz <= SC_SMALL_MAXCLASS ? 1009 : 99989)) {
void *p = mallocx(sz, 0);
void **in = &p;
size_t in_sz = sizeof(const void *);
size_t out_sz = sizeof(void *) + sizeof(size_t) * 5;
void *out = mallocx(out_sz, 0);
void *out_ref = mallocx(out_sz, 0);
size_t out_sz_ref = out_sz;
assert_ptr_not_null(p,
"test pointer allocation failed");
assert_ptr_not_null(out,
"test output allocation failed");
assert_ptr_not_null(out_ref,
"test reference output allocation failed");
#define SLABCUR_READ(out) (*(void **)out)
#define COUNTS(out) ((size_t *)((void **)out + 1))
#define NFREE_READ(out) COUNTS(out)[0]
#define NREGS_READ(out) COUNTS(out)[1]
#define SIZE_READ(out) COUNTS(out)[2]
#define BIN_NFREE_READ(out) COUNTS(out)[3]
#define BIN_NREGS_READ(out) COUNTS(out)[4]
SLABCUR_READ(out) = NULL;
NFREE_READ(out) = NREGS_READ(out) = SIZE_READ(out) = -1;
BIN_NFREE_READ(out) = BIN_NREGS_READ(out) = -1;
memcpy(out_ref, out, out_sz);
/* Test invalid argument(s) errors */
TEST_UTIL_QUERY_EINVAL(NULL, &out_sz, in, in_sz,
"old is NULL");
TEST_UTIL_QUERY_EINVAL(out, NULL, in, in_sz,
"oldlenp is NULL");
TEST_UTIL_QUERY_EINVAL(out, &out_sz, NULL, in_sz,
"newp is NULL");
TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, 0,
"newlen is zero");
in_sz -= 1;
TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, in_sz,
"invalid newlen");
in_sz += 1;
out_sz_ref = out_sz -= 2 * sizeof(size_t);
TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, in_sz,
"invalid *oldlenp");
out_sz_ref = out_sz += 2 * sizeof(size_t);
/* Examine output for valid call */
TEST_UTIL_VALID("query");
assert_zu_le(sz, SIZE_READ(out),
"Extent size should be at least allocation size");
assert_zu_eq(SIZE_READ(out) & (PAGE - 1), 0,
"Extent size should be a multiple of page size");
if (sz <= SC_SMALL_MAXCLASS) {
assert_zu_le(NFREE_READ(out), NREGS_READ(out),
"Extent free count exceeded region count");
assert_zu_le(NREGS_READ(out), SIZE_READ(out),
"Extent region count exceeded size");
assert_zu_ne(NREGS_READ(out), 0,
"Extent region count must be positive");
assert_ptr_not_null(SLABCUR_READ(out),
"Current slab is null");
assert_true(NFREE_READ(out) == 0
|| SLABCUR_READ(out) <= p,
"Allocation should follow first fit principle");
if (config_stats) {
assert_zu_le(BIN_NFREE_READ(out),
BIN_NREGS_READ(out),
"Bin free count exceeded region count");
assert_zu_ne(BIN_NREGS_READ(out), 0,
"Bin region count must be positive");
assert_zu_le(NFREE_READ(out),
BIN_NFREE_READ(out),
"Extent free count exceeded bin free count");
assert_zu_le(NREGS_READ(out),
BIN_NREGS_READ(out),
"Extent region count exceeded "
"bin region count");
assert_zu_eq(BIN_NREGS_READ(out)
% NREGS_READ(out), 0,
"Bin region count isn't a multiple of "
"extent region count");
assert_zu_le(
BIN_NFREE_READ(out) - NFREE_READ(out),
BIN_NREGS_READ(out) - NREGS_READ(out),
"Free count in other extents in the bin "
"exceeded region count in other extents "
"in the bin");
assert_zu_le(NREGS_READ(out) - NFREE_READ(out),
BIN_NREGS_READ(out) - BIN_NFREE_READ(out),
"Extent utilized count exceeded "
"bin utilized count");
}
} else {
assert_zu_eq(NFREE_READ(out), 0,
"Extent free count should be zero");
assert_zu_eq(NREGS_READ(out), 1,
"Extent region count should be one");
assert_ptr_null(SLABCUR_READ(out),
"Current slab must be null for large size classes");
if (config_stats) {
assert_zu_eq(BIN_NFREE_READ(out), 0,
"Bin free count must be zero for "
"large sizes");
assert_zu_eq(BIN_NREGS_READ(out), 0,
"Bin region count must be zero for "
"large sizes");
}
}
#undef BIN_NREGS_READ
#undef BIN_NFREE_READ
#undef SIZE_READ
#undef NREGS_READ
#undef NFREE_READ
#undef COUNTS
#undef SLABCUR_READ
free(out_ref);
free(out);
free(p);
}
}
TEST_END
TEST_BEGIN(test_batch) {
size_t sz;
/*
* Select some sizes that can span both small and large sizes, and are
* numerically unrelated to any size boundaries.
*/
for (sz = 17; sz <= TEST_MAX_SIZE && sz <= SC_LARGE_MAXCLASS;
sz += (sz <= SC_SMALL_MAXCLASS ? 1019 : 99991)) {
void *p = mallocx(sz, 0);
void *q = mallocx(sz, 0);
void *in[] = {p, q};
size_t in_sz = sizeof(const void *) * 2;
size_t out[] = {-1, -1, -1, -1, -1, -1};
size_t out_sz = sizeof(size_t) * 6;
size_t out_ref[] = {-1, -1, -1, -1, -1, -1};
size_t out_sz_ref = out_sz;
assert_ptr_not_null(p, "test pointer allocation failed");
assert_ptr_not_null(q, "test pointer allocation failed");
/* Test invalid argument(s) errors */
TEST_UTIL_BATCH_EINVAL(NULL, &out_sz, in, in_sz,
"old is NULL");
TEST_UTIL_BATCH_EINVAL(out, NULL, in, in_sz,
"oldlenp is NULL");
TEST_UTIL_BATCH_EINVAL(out, &out_sz, NULL, in_sz,
"newp is NULL");
TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, 0,
"newlen is zero");
in_sz -= 1;
TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz,
"newlen is not an exact multiple");
in_sz += 1;
out_sz_ref = out_sz -= 2 * sizeof(size_t);
TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz,
"*oldlenp is not an exact multiple");
out_sz_ref = out_sz += 2 * sizeof(size_t);
in_sz -= sizeof(const void *);
TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz,
"*oldlenp and newlen do not match");
in_sz += sizeof(const void *);
/* Examine output for valid calls */
#define TEST_EQUAL_REF(i, message) \
assert_d_eq(memcmp(out + (i) * 3, out_ref + (i) * 3, 3), 0, message)
#define NFREE_READ(out, i) out[(i) * 3]
#define NREGS_READ(out, i) out[(i) * 3 + 1]
#define SIZE_READ(out, i) out[(i) * 3 + 2]
out_sz_ref = out_sz /= 2;
in_sz /= 2;
TEST_UTIL_BATCH_VALID;
assert_zu_le(sz, SIZE_READ(out, 0),
"Extent size should be at least allocation size");
assert_zu_eq(SIZE_READ(out, 0) & (PAGE - 1), 0,
"Extent size should be a multiple of page size");
if (sz <= SC_SMALL_MAXCLASS) {
assert_zu_le(NFREE_READ(out, 0), NREGS_READ(out, 0),
"Extent free count exceeded region count");
assert_zu_le(NREGS_READ(out, 0), SIZE_READ(out, 0),
"Extent region count exceeded size");
assert_zu_ne(NREGS_READ(out, 0), 0,
"Extent region count must be positive");
} else {
assert_zu_eq(NFREE_READ(out, 0), 0,
"Extent free count should be zero");
assert_zu_eq(NREGS_READ(out, 0), 1,
"Extent region count should be one");
}
TEST_EQUAL_REF(1,
"Should not overwrite content beyond what's needed");
in_sz *= 2;
out_sz_ref = out_sz *= 2;
memcpy(out_ref, out, 3 * sizeof(size_t));
TEST_UTIL_BATCH_VALID;
TEST_EQUAL_REF(0, "Statistics should be stable across calls");
if (sz <= SC_SMALL_MAXCLASS) {
assert_zu_le(NFREE_READ(out, 1), NREGS_READ(out, 1),
"Extent free count exceeded region count");
} else {
assert_zu_eq(NFREE_READ(out, 0), 0,
"Extent free count should be zero");
}
assert_zu_eq(NREGS_READ(out, 0), NREGS_READ(out, 1),
"Extent region count should be same for same region size");
assert_zu_eq(SIZE_READ(out, 0), SIZE_READ(out, 1),
"Extent size should be same for same region size");
#undef SIZE_READ
#undef NREGS_READ
#undef NFREE_READ
#undef TEST_EQUAL_REF
free(q);
free(p);
}
}
TEST_END
int
main(void) {
assert_zu_lt(SC_SMALL_MAXCLASS, TEST_MAX_SIZE,
"Test case cannot cover large classes");
return test(test_query, test_batch);
}
#include "test/jemalloc_test.h"
#include "jemalloc/internal/hook.h"
static void *arg_extra;
static int arg_type;
static void *arg_result;
static void *arg_address;
static size_t arg_old_usize;
static size_t arg_new_usize;
static uintptr_t arg_result_raw;
static uintptr_t arg_args_raw[4];
static int call_count = 0;
static void
reset_args() {
arg_extra = NULL;
arg_type = 12345;
arg_result = NULL;
arg_address = NULL;
arg_old_usize = 0;
arg_new_usize = 0;
arg_result_raw = 0;
memset(arg_args_raw, 77, sizeof(arg_args_raw));
}
static void
alloc_free_size(size_t sz) {
void *ptr = mallocx(1, 0);
free(ptr);
ptr = mallocx(1, 0);
free(ptr);
ptr = mallocx(1, MALLOCX_TCACHE_NONE);
dallocx(ptr, MALLOCX_TCACHE_NONE);
}
/*
* We want to support a degree of user reentrancy. This tests a variety of
* allocation scenarios.
*/
static void
be_reentrant() {
/* Let's make sure the tcache is non-empty if enabled. */
alloc_free_size(1);
alloc_free_size(1024);
alloc_free_size(64 * 1024);
alloc_free_size(256 * 1024);
alloc_free_size(1024 * 1024);
/* Some reallocation. */
void *ptr = mallocx(129, 0);
ptr = rallocx(ptr, 130, 0);
free(ptr);
ptr = mallocx(2 * 1024 * 1024, 0);
free(ptr);
ptr = mallocx(1 * 1024 * 1024, 0);
ptr = rallocx(ptr, 2 * 1024 * 1024, 0);
free(ptr);
ptr = mallocx(1, 0);
ptr = rallocx(ptr, 1000, 0);
free(ptr);
}
static void
set_args_raw(uintptr_t *args_raw, int nargs) {
memcpy(arg_args_raw, args_raw, sizeof(uintptr_t) * nargs);
}
static void
assert_args_raw(uintptr_t *args_raw_expected, int nargs) {
int cmp = memcmp(args_raw_expected, arg_args_raw,
sizeof(uintptr_t) * nargs);
assert_d_eq(cmp, 0, "Raw args mismatch");
}
static void
reset() {
call_count = 0;
reset_args();
}
static void
test_alloc_hook(void *extra, hook_alloc_t type, void *result,
uintptr_t result_raw, uintptr_t args_raw[3]) {
call_count++;
arg_extra = extra;
arg_type = (int)type;
arg_result = result;
arg_result_raw = result_raw;
set_args_raw(args_raw, 3);
be_reentrant();
}
static void
test_dalloc_hook(void *extra, hook_dalloc_t type, void *address,
uintptr_t args_raw[3]) {
call_count++;
arg_extra = extra;
arg_type = (int)type;
arg_address = address;
set_args_raw(args_raw, 3);
be_reentrant();
}
static void
test_expand_hook(void *extra, hook_expand_t type, void *address,
size_t old_usize, size_t new_usize, uintptr_t result_raw,
uintptr_t args_raw[4]) {
call_count++;
arg_extra = extra;
arg_type = (int)type;
arg_address = address;
arg_old_usize = old_usize;
arg_new_usize = new_usize;
arg_result_raw = result_raw;
set_args_raw(args_raw, 4);
be_reentrant();
}
TEST_BEGIN(test_hooks_basic) {
/* Just verify that the record their arguments correctly. */
hooks_t hooks = {
&test_alloc_hook, &test_dalloc_hook, &test_expand_hook,
(void *)111};
void *handle = hook_install(TSDN_NULL, &hooks);
uintptr_t args_raw[4] = {10, 20, 30, 40};
/* Alloc */
reset_args();
hook_invoke_alloc(hook_alloc_posix_memalign, (void *)222, 333,
args_raw);
assert_ptr_eq(arg_extra, (void *)111, "Passed wrong user pointer");
assert_d_eq((int)hook_alloc_posix_memalign, arg_type,
"Passed wrong alloc type");
assert_ptr_eq((void *)222, arg_result, "Passed wrong result address");
assert_u64_eq(333, arg_result_raw, "Passed wrong result");
assert_args_raw(args_raw, 3);
/* Dalloc */
reset_args();
hook_invoke_dalloc(hook_dalloc_sdallocx, (void *)222, args_raw);
assert_d_eq((int)hook_dalloc_sdallocx, arg_type,
"Passed wrong dalloc type");
assert_ptr_eq((void *)111, arg_extra, "Passed wrong user pointer");
assert_ptr_eq((void *)222, arg_address, "Passed wrong address");
assert_args_raw(args_raw, 3);
/* Expand */
reset_args();
hook_invoke_expand(hook_expand_xallocx, (void *)222, 333, 444, 555,
args_raw);
assert_d_eq((int)hook_expand_xallocx, arg_type,
"Passed wrong expand type");
assert_ptr_eq((void *)111, arg_extra, "Passed wrong user pointer");
assert_ptr_eq((void *)222, arg_address, "Passed wrong address");
assert_zu_eq(333, arg_old_usize, "Passed wrong old usize");
assert_zu_eq(444, arg_new_usize, "Passed wrong new usize");
assert_zu_eq(555, arg_result_raw, "Passed wrong result");
assert_args_raw(args_raw, 4);
hook_remove(TSDN_NULL, handle);
}
TEST_END
TEST_BEGIN(test_hooks_null) {
/* Null hooks should be ignored, not crash. */
hooks_t hooks1 = {NULL, NULL, NULL, NULL};
hooks_t hooks2 = {&test_alloc_hook, NULL, NULL, NULL};
hooks_t hooks3 = {NULL, &test_dalloc_hook, NULL, NULL};
hooks_t hooks4 = {NULL, NULL, &test_expand_hook, NULL};
void *handle1 = hook_install(TSDN_NULL, &hooks1);
void *handle2 = hook_install(TSDN_NULL, &hooks2);
void *handle3 = hook_install(TSDN_NULL, &hooks3);
void *handle4 = hook_install(TSDN_NULL, &hooks4);
assert_ptr_ne(handle1, NULL, "Hook installation failed");
assert_ptr_ne(handle2, NULL, "Hook installation failed");
assert_ptr_ne(handle3, NULL, "Hook installation failed");
assert_ptr_ne(handle4, NULL, "Hook installation failed");
uintptr_t args_raw[4] = {10, 20, 30, 40};
call_count = 0;
hook_invoke_alloc(hook_alloc_malloc, NULL, 0, args_raw);
assert_d_eq(call_count, 1, "Called wrong number of times");
call_count = 0;
hook_invoke_dalloc(hook_dalloc_free, NULL, args_raw);
assert_d_eq(call_count, 1, "Called wrong number of times");
call_count = 0;
hook_invoke_expand(hook_expand_realloc, NULL, 0, 0, 0, args_raw);
assert_d_eq(call_count, 1, "Called wrong number of times");
hook_remove(TSDN_NULL, handle1);
hook_remove(TSDN_NULL, handle2);
hook_remove(TSDN_NULL, handle3);
hook_remove(TSDN_NULL, handle4);
}
TEST_END
TEST_BEGIN(test_hooks_remove) {
hooks_t hooks = {&test_alloc_hook, NULL, NULL, NULL};
void *handle = hook_install(TSDN_NULL, &hooks);
assert_ptr_ne(handle, NULL, "Hook installation failed");
call_count = 0;
uintptr_t args_raw[4] = {10, 20, 30, 40};
hook_invoke_alloc(hook_alloc_malloc, NULL, 0, args_raw);
assert_d_eq(call_count, 1, "Hook not invoked");
call_count = 0;
hook_remove(TSDN_NULL, handle);
hook_invoke_alloc(hook_alloc_malloc, NULL, 0, NULL);
assert_d_eq(call_count, 0, "Hook invoked after removal");
}
TEST_END
TEST_BEGIN(test_hooks_alloc_simple) {
/* "Simple" in the sense that we're not in a realloc variant. */
hooks_t hooks = {&test_alloc_hook, NULL, NULL, (void *)123};
void *handle = hook_install(TSDN_NULL, &hooks);
assert_ptr_ne(handle, NULL, "Hook installation failed");
/* Stop malloc from being optimized away. */
volatile int err;
void *volatile ptr;
/* malloc */
reset();
ptr = malloc(1);
assert_d_eq(call_count, 1, "Hook not called");
assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
assert_d_eq(arg_type, (int)hook_alloc_malloc, "Wrong hook type");
assert_ptr_eq(ptr, arg_result, "Wrong result");
assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
"Wrong raw result");
assert_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument");
free(ptr);
/* posix_memalign */
reset();
err = posix_memalign((void **)&ptr, 1024, 1);
assert_d_eq(call_count, 1, "Hook not called");
assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
assert_d_eq(arg_type, (int)hook_alloc_posix_memalign,
"Wrong hook type");
assert_ptr_eq(ptr, arg_result, "Wrong result");
assert_u64_eq((uintptr_t)err, (uintptr_t)arg_result_raw,
"Wrong raw result");
assert_u64_eq((uintptr_t)&ptr, arg_args_raw[0], "Wrong argument");
assert_u64_eq((uintptr_t)1024, arg_args_raw[1], "Wrong argument");
assert_u64_eq((uintptr_t)1, arg_args_raw[2], "Wrong argument");
free(ptr);
/* aligned_alloc */
reset();
ptr = aligned_alloc(1024, 1);
assert_d_eq(call_count, 1, "Hook not called");
assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
assert_d_eq(arg_type, (int)hook_alloc_aligned_alloc,
"Wrong hook type");
assert_ptr_eq(ptr, arg_result, "Wrong result");
assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
"Wrong raw result");
assert_u64_eq((uintptr_t)1024, arg_args_raw[0], "Wrong argument");
assert_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong argument");
free(ptr);
/* calloc */
reset();
ptr = calloc(11, 13);
assert_d_eq(call_count, 1, "Hook not called");
assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
assert_d_eq(arg_type, (int)hook_alloc_calloc, "Wrong hook type");
assert_ptr_eq(ptr, arg_result, "Wrong result");
assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
"Wrong raw result");
assert_u64_eq((uintptr_t)11, arg_args_raw[0], "Wrong argument");
assert_u64_eq((uintptr_t)13, arg_args_raw[1], "Wrong argument");
free(ptr);
/* memalign */
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
reset();
ptr = memalign(1024, 1);
assert_d_eq(call_count, 1, "Hook not called");
assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
assert_d_eq(arg_type, (int)hook_alloc_memalign, "Wrong hook type");
assert_ptr_eq(ptr, arg_result, "Wrong result");
assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
"Wrong raw result");
assert_u64_eq((uintptr_t)1024, arg_args_raw[0], "Wrong argument");
assert_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong argument");
free(ptr);
#endif /* JEMALLOC_OVERRIDE_MEMALIGN */
/* valloc */
#ifdef JEMALLOC_OVERRIDE_VALLOC
reset();
ptr = valloc(1);
assert_d_eq(call_count, 1, "Hook not called");
assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
assert_d_eq(arg_type, (int)hook_alloc_valloc, "Wrong hook type");
assert_ptr_eq(ptr, arg_result, "Wrong result");
assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
"Wrong raw result");
assert_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument");
free(ptr);
#endif /* JEMALLOC_OVERRIDE_VALLOC */
/* mallocx */
reset();
ptr = mallocx(1, MALLOCX_LG_ALIGN(10));
assert_d_eq(call_count, 1, "Hook not called");
assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
assert_d_eq(arg_type, (int)hook_alloc_mallocx, "Wrong hook type");
assert_ptr_eq(ptr, arg_result, "Wrong result");
assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
"Wrong raw result");
assert_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument");
assert_u64_eq((uintptr_t)MALLOCX_LG_ALIGN(10), arg_args_raw[1],
"Wrong flags");
free(ptr);
hook_remove(TSDN_NULL, handle);
}
TEST_END
TEST_BEGIN(test_hooks_dalloc_simple) {
/* "Simple" in the sense that we're not in a realloc variant. */
hooks_t hooks = {NULL, &test_dalloc_hook, NULL, (void *)123};
void *handle = hook_install(TSDN_NULL, &hooks);
assert_ptr_ne(handle, NULL, "Hook installation failed");
void *volatile ptr;
/* free() */
reset();
ptr = malloc(1);
free(ptr);
assert_d_eq(call_count, 1, "Hook not called");
assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
assert_d_eq(arg_type, (int)hook_dalloc_free, "Wrong hook type");
assert_ptr_eq(ptr, arg_address, "Wrong pointer freed");
assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg");
/* dallocx() */
reset();
ptr = malloc(1);
dallocx(ptr, MALLOCX_TCACHE_NONE);
assert_d_eq(call_count, 1, "Hook not called");
assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
assert_d_eq(arg_type, (int)hook_dalloc_dallocx, "Wrong hook type");
assert_ptr_eq(ptr, arg_address, "Wrong pointer freed");
assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg");
assert_u64_eq((uintptr_t)MALLOCX_TCACHE_NONE, arg_args_raw[1],
"Wrong raw arg");
/* sdallocx() */
reset();
ptr = malloc(1);
sdallocx(ptr, 1, MALLOCX_TCACHE_NONE);
assert_d_eq(call_count, 1, "Hook not called");
assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
assert_d_eq(arg_type, (int)hook_dalloc_sdallocx, "Wrong hook type");
assert_ptr_eq(ptr, arg_address, "Wrong pointer freed");
assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg");
assert_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong raw arg");
assert_u64_eq((uintptr_t)MALLOCX_TCACHE_NONE, arg_args_raw[2],
"Wrong raw arg");
hook_remove(TSDN_NULL, handle);
}
TEST_END
TEST_BEGIN(test_hooks_expand_simple) {
/* "Simple" in the sense that we're not in a realloc variant. */
hooks_t hooks = {NULL, NULL, &test_expand_hook, (void *)123};
void *handle = hook_install(TSDN_NULL, &hooks);
assert_ptr_ne(handle, NULL, "Hook installation failed");
void *volatile ptr;
/* xallocx() */
reset();
ptr = malloc(1);
size_t new_usize = xallocx(ptr, 100, 200, MALLOCX_TCACHE_NONE);
assert_d_eq(call_count, 1, "Hook not called");
assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
assert_d_eq(arg_type, (int)hook_expand_xallocx, "Wrong hook type");
assert_ptr_eq(ptr, arg_address, "Wrong pointer expanded");
assert_u64_eq(arg_old_usize, nallocx(1, 0), "Wrong old usize");
assert_u64_eq(arg_new_usize, sallocx(ptr, 0), "Wrong new usize");
assert_u64_eq(new_usize, arg_result_raw, "Wrong result");
assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong arg");
assert_u64_eq(100, arg_args_raw[1], "Wrong arg");
assert_u64_eq(200, arg_args_raw[2], "Wrong arg");
assert_u64_eq(MALLOCX_TCACHE_NONE, arg_args_raw[3], "Wrong arg");
hook_remove(TSDN_NULL, handle);
}
TEST_END
TEST_BEGIN(test_hooks_realloc_as_malloc_or_free) {
hooks_t hooks = {&test_alloc_hook, &test_dalloc_hook,
&test_expand_hook, (void *)123};
void *handle = hook_install(TSDN_NULL, &hooks);
assert_ptr_ne(handle, NULL, "Hook installation failed");
void *volatile ptr;
/* realloc(NULL, size) as malloc */
reset();
ptr = realloc(NULL, 1);
assert_d_eq(call_count, 1, "Hook not called");
assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
assert_d_eq(arg_type, (int)hook_alloc_realloc, "Wrong hook type");
assert_ptr_eq(ptr, arg_result, "Wrong result");
assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
"Wrong raw result");
assert_u64_eq((uintptr_t)NULL, arg_args_raw[0], "Wrong argument");
assert_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong argument");
free(ptr);
/* realloc(ptr, 0) as free */
ptr = malloc(1);
reset();
realloc(ptr, 0);
assert_d_eq(call_count, 1, "Hook not called");
assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
assert_d_eq(arg_type, (int)hook_dalloc_realloc, "Wrong hook type");
assert_ptr_eq(ptr, arg_address, "Wrong pointer freed");
assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg");
assert_u64_eq((uintptr_t)0, arg_args_raw[1], "Wrong raw arg");
/* realloc(NULL, 0) as malloc(0) */
reset();
ptr = realloc(NULL, 0);
assert_d_eq(call_count, 1, "Hook not called");
assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
assert_d_eq(arg_type, (int)hook_alloc_realloc, "Wrong hook type");
assert_ptr_eq(ptr, arg_result, "Wrong result");
assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
"Wrong raw result");
assert_u64_eq((uintptr_t)NULL, arg_args_raw[0], "Wrong argument");
assert_u64_eq((uintptr_t)0, arg_args_raw[1], "Wrong argument");
free(ptr);
hook_remove(TSDN_NULL, handle);
}
TEST_END
static void
do_realloc_test(void *(*ralloc)(void *, size_t, int), int flags,
int expand_type, int dalloc_type) {
hooks_t hooks = {&test_alloc_hook, &test_dalloc_hook,
&test_expand_hook, (void *)123};
void *handle = hook_install(TSDN_NULL, &hooks);
assert_ptr_ne(handle, NULL, "Hook installation failed");
void *volatile ptr;
void *volatile ptr2;
/* Realloc in-place, small. */
ptr = malloc(129);
reset();
ptr2 = ralloc(ptr, 130, flags);
assert_ptr_eq(ptr, ptr2, "Small realloc moved");
assert_d_eq(call_count, 1, "Hook not called");
assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
assert_d_eq(arg_type, expand_type, "Wrong hook type");
assert_ptr_eq(ptr, arg_address, "Wrong address");
assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
"Wrong raw result");
assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong argument");
assert_u64_eq((uintptr_t)130, arg_args_raw[1], "Wrong argument");
free(ptr);
/*
* Realloc in-place, large. Since we can't guarantee the large case
* across all platforms, we stay resilient to moving results.
*/
ptr = malloc(2 * 1024 * 1024);
free(ptr);
ptr2 = malloc(1 * 1024 * 1024);
reset();
ptr = ralloc(ptr2, 2 * 1024 * 1024, flags);
/* ptr is the new address, ptr2 is the old address. */
if (ptr == ptr2) {
assert_d_eq(call_count, 1, "Hook not called");
assert_d_eq(arg_type, expand_type, "Wrong hook type");
} else {
assert_d_eq(call_count, 2, "Wrong hooks called");
assert_ptr_eq(ptr, arg_result, "Wrong address");
assert_d_eq(arg_type, dalloc_type, "Wrong hook type");
}
assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
assert_ptr_eq(ptr2, arg_address, "Wrong address");
assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
"Wrong raw result");
assert_u64_eq((uintptr_t)ptr2, arg_args_raw[0], "Wrong argument");
assert_u64_eq((uintptr_t)2 * 1024 * 1024, arg_args_raw[1],
"Wrong argument");
free(ptr);
/* Realloc with move, small. */
ptr = malloc(8);
reset();
ptr2 = ralloc(ptr, 128, flags);
assert_ptr_ne(ptr, ptr2, "Small realloc didn't move");
assert_d_eq(call_count, 2, "Hook not called");
assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
assert_d_eq(arg_type, dalloc_type, "Wrong hook type");
assert_ptr_eq(ptr, arg_address, "Wrong address");
assert_ptr_eq(ptr2, arg_result, "Wrong address");
assert_u64_eq((uintptr_t)ptr2, (uintptr_t)arg_result_raw,
"Wrong raw result");
assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong argument");
assert_u64_eq((uintptr_t)128, arg_args_raw[1], "Wrong argument");
free(ptr2);
/* Realloc with move, large. */
ptr = malloc(1);
reset();
ptr2 = ralloc(ptr, 2 * 1024 * 1024, flags);
assert_ptr_ne(ptr, ptr2, "Large realloc didn't move");
assert_d_eq(call_count, 2, "Hook not called");
assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
assert_d_eq(arg_type, dalloc_type, "Wrong hook type");
assert_ptr_eq(ptr, arg_address, "Wrong address");
assert_ptr_eq(ptr2, arg_result, "Wrong address");
assert_u64_eq((uintptr_t)ptr2, (uintptr_t)arg_result_raw,
"Wrong raw result");
assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong argument");
assert_u64_eq((uintptr_t)2 * 1024 * 1024, arg_args_raw[1],
"Wrong argument");
free(ptr2);
hook_remove(TSDN_NULL, handle);
}
static void *
realloc_wrapper(void *ptr, size_t size, UNUSED int flags) {
return realloc(ptr, size);
}
TEST_BEGIN(test_hooks_realloc) {
do_realloc_test(&realloc_wrapper, 0, hook_expand_realloc,
hook_dalloc_realloc);
}
TEST_END
TEST_BEGIN(test_hooks_rallocx) {
do_realloc_test(&rallocx, MALLOCX_TCACHE_NONE, hook_expand_rallocx,
hook_dalloc_rallocx);
}
TEST_END
int
main(void) {
/* We assert on call counts. */
return test_no_reentrancy(
test_hooks_basic,
test_hooks_null,
test_hooks_remove,
test_hooks_alloc_simple,
test_hooks_dalloc_simple,
test_hooks_expand_simple,
test_hooks_realloc_as_malloc_or_free,
test_hooks_realloc,
test_hooks_rallocx);
}
#include "test/jemalloc_test.h"
/* Threshold: 2 << 20 = 2097152. */
const char *malloc_conf = "oversize_threshold:2097152";
#define HUGE_SZ (2 << 20)
#define SMALL_SZ (8)
TEST_BEGIN(huge_bind_thread) {
unsigned arena1, arena2;
size_t sz = sizeof(unsigned);
/* Bind to a manual arena. */
assert_d_eq(mallctl("arenas.create", &arena1, &sz, NULL, 0), 0,
"Failed to create arena");
assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena1,
sizeof(arena1)), 0, "Fail to bind thread");
void *ptr = mallocx(HUGE_SZ, 0);
assert_ptr_not_null(ptr, "Fail to allocate huge size");
assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
sizeof(ptr)), 0, "Unexpected mallctl() failure");
assert_u_eq(arena1, arena2, "Wrong arena used after binding");
dallocx(ptr, 0);
/* Switch back to arena 0. */
test_skip_if(have_percpu_arena &&
PERCPU_ARENA_ENABLED(opt_percpu_arena));
arena2 = 0;
assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena2,
sizeof(arena2)), 0, "Fail to bind thread");
ptr = mallocx(SMALL_SZ, MALLOCX_TCACHE_NONE);
assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
sizeof(ptr)), 0, "Unexpected mallctl() failure");
assert_u_eq(arena2, 0, "Wrong arena used after binding");
dallocx(ptr, MALLOCX_TCACHE_NONE);
/* Then huge allocation should use the huge arena. */
ptr = mallocx(HUGE_SZ, 0);
assert_ptr_not_null(ptr, "Fail to allocate huge size");
assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
sizeof(ptr)), 0, "Unexpected mallctl() failure");
assert_u_ne(arena2, 0, "Wrong arena used after binding");
assert_u_ne(arena1, arena2, "Wrong arena used after binding");
dallocx(ptr, 0);
}
TEST_END
TEST_BEGIN(huge_mallocx) {
unsigned arena1, arena2;
size_t sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.create", &arena1, &sz, NULL, 0), 0,
"Failed to create arena");
void *huge = mallocx(HUGE_SZ, MALLOCX_ARENA(arena1));
assert_ptr_not_null(huge, "Fail to allocate huge size");
assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &huge,
sizeof(huge)), 0, "Unexpected mallctl() failure");
assert_u_eq(arena1, arena2, "Wrong arena used for mallocx");
dallocx(huge, MALLOCX_ARENA(arena1));
void *huge2 = mallocx(HUGE_SZ, 0);
assert_ptr_not_null(huge, "Fail to allocate huge size");
assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &huge2,
sizeof(huge2)), 0, "Unexpected mallctl() failure");
assert_u_ne(arena1, arena2,
"Huge allocation should not come from the manual arena.");
assert_u_ne(arena2, 0,
"Huge allocation should not come from the arena 0.");
dallocx(huge2, 0);
}
TEST_END
TEST_BEGIN(huge_allocation) {
unsigned arena1, arena2;
void *ptr = mallocx(HUGE_SZ, 0);
assert_ptr_not_null(ptr, "Fail to allocate huge size");
size_t sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.lookup", &arena1, &sz, &ptr, sizeof(ptr)),
0, "Unexpected mallctl() failure");
assert_u_gt(arena1, 0, "Huge allocation should not come from arena 0");
dallocx(ptr, 0);
ptr = mallocx(HUGE_SZ >> 1, 0);
assert_ptr_not_null(ptr, "Fail to allocate half huge size");
assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
sizeof(ptr)), 0, "Unexpected mallctl() failure");
assert_u_ne(arena1, arena2, "Wrong arena used for half huge");
dallocx(ptr, 0);
ptr = mallocx(SMALL_SZ, MALLOCX_TCACHE_NONE);
assert_ptr_not_null(ptr, "Fail to allocate small size");
assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
sizeof(ptr)), 0, "Unexpected mallctl() failure");
assert_u_ne(arena1, arena2,
"Huge and small should be from different arenas");
dallocx(ptr, 0);
}
TEST_END
int
main(void) {
return test(
huge_allocation,
huge_mallocx,
huge_bind_thread);
}
......@@ -123,13 +123,13 @@ test_junk(size_t sz_min, size_t sz_max) {
TEST_BEGIN(test_junk_small) {
test_skip_if(!config_fill);
test_junk(1, SMALL_MAXCLASS-1);
test_junk(1, SC_SMALL_MAXCLASS - 1);
}
TEST_END
TEST_BEGIN(test_junk_large) {
test_skip_if(!config_fill);
test_junk(SMALL_MAXCLASS+1, (1U << (LG_LARGE_MINCLASS+1)));
test_junk(SC_SMALL_MAXCLASS + 1, (1U << (SC_LG_LARGE_MINCLASS + 1)));
}
TEST_END
......
#include "test/jemalloc_test.h"
#include "jemalloc/internal/hook.h"
#include "jemalloc/internal/util.h"
TEST_BEGIN(test_mallctl_errors) {
......@@ -158,11 +159,13 @@ TEST_BEGIN(test_mallctl_opt) {
TEST_MALLCTL_OPT(bool, abort, always);
TEST_MALLCTL_OPT(bool, abort_conf, always);
TEST_MALLCTL_OPT(bool, confirm_conf, always);
TEST_MALLCTL_OPT(const char *, metadata_thp, always);
TEST_MALLCTL_OPT(bool, retain, always);
TEST_MALLCTL_OPT(const char *, dss, always);
TEST_MALLCTL_OPT(unsigned, narenas, always);
TEST_MALLCTL_OPT(const char *, percpu_arena, always);
TEST_MALLCTL_OPT(size_t, oversize_threshold, always);
TEST_MALLCTL_OPT(bool, background_thread, always);
TEST_MALLCTL_OPT(ssize_t, dirty_decay_ms, always);
TEST_MALLCTL_OPT(ssize_t, muzzy_decay_ms, always);
......@@ -340,6 +343,9 @@ TEST_BEGIN(test_thread_arena) {
sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
0, "Unexpected mallctl() failure");
if (opt_oversize_threshold != 0) {
narenas--;
}
assert_u_eq(narenas, opt_narenas, "Number of arenas incorrect");
if (strcmp(opa, "disabled") == 0) {
......@@ -576,7 +582,7 @@ TEST_BEGIN(test_arena_i_retain_grow_limit) {
assert_d_eq(mallctlbymib(mib, miblen, &default_limit, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_zu_eq(default_limit, sz_pind2sz(EXTENT_GROW_MAX_PIND),
assert_zu_eq(default_limit, SC_LARGE_MAXCLASS,
"Unexpected default for retain_grow_limit");
new_limit = PAGE - 1;
......@@ -681,8 +687,8 @@ TEST_BEGIN(test_arenas_constants) {
TEST_ARENAS_CONSTANT(size_t, quantum, QUANTUM);
TEST_ARENAS_CONSTANT(size_t, page, PAGE);
TEST_ARENAS_CONSTANT(unsigned, nbins, NBINS);
TEST_ARENAS_CONSTANT(unsigned, nlextents, NSIZES - NBINS);
TEST_ARENAS_CONSTANT(unsigned, nbins, SC_NBINS);
TEST_ARENAS_CONSTANT(unsigned, nlextents, SC_NSIZES - SC_NBINS);
#undef TEST_ARENAS_CONSTANT
}
......@@ -701,6 +707,7 @@ TEST_BEGIN(test_arenas_bin_constants) {
TEST_ARENAS_BIN_CONSTANT(uint32_t, nregs, bin_infos[0].nregs);
TEST_ARENAS_BIN_CONSTANT(size_t, slab_size,
bin_infos[0].slab_size);
TEST_ARENAS_BIN_CONSTANT(uint32_t, nshards, bin_infos[0].n_shards);
#undef TEST_ARENAS_BIN_CONSTANT
}
......@@ -715,7 +722,8 @@ TEST_BEGIN(test_arenas_lextent_constants) {
assert_zu_eq(name, expected, "Incorrect "#name" size"); \
} while (0)
TEST_ARENAS_LEXTENT_CONSTANT(size_t, size, LARGE_MINCLASS);
TEST_ARENAS_LEXTENT_CONSTANT(size_t, size,
SC_LARGE_MINCLASS);
#undef TEST_ARENAS_LEXTENT_CONSTANT
}
......@@ -773,6 +781,79 @@ TEST_BEGIN(test_stats_arenas) {
}
TEST_END
static void
alloc_hook(void *extra, UNUSED hook_alloc_t type, UNUSED void *result,
UNUSED uintptr_t result_raw, UNUSED uintptr_t args_raw[3]) {
*(bool *)extra = true;
}
static void
dalloc_hook(void *extra, UNUSED hook_dalloc_t type,
UNUSED void *address, UNUSED uintptr_t args_raw[3]) {
*(bool *)extra = true;
}
TEST_BEGIN(test_hooks) {
bool hook_called = false;
hooks_t hooks = {&alloc_hook, &dalloc_hook, NULL, &hook_called};
void *handle = NULL;
size_t sz = sizeof(handle);
int err = mallctl("experimental.hooks.install", &handle, &sz, &hooks,
sizeof(hooks));
assert_d_eq(err, 0, "Hook installation failed");
assert_ptr_ne(handle, NULL, "Hook installation gave null handle");
void *ptr = mallocx(1, 0);
assert_true(hook_called, "Alloc hook not called");
hook_called = false;
free(ptr);
assert_true(hook_called, "Free hook not called");
err = mallctl("experimental.hooks.remove", NULL, NULL, &handle,
sizeof(handle));
assert_d_eq(err, 0, "Hook removal failed");
hook_called = false;
ptr = mallocx(1, 0);
free(ptr);
assert_false(hook_called, "Hook called after removal");
}
TEST_END
TEST_BEGIN(test_hooks_exhaustion) {
bool hook_called = false;
hooks_t hooks = {&alloc_hook, &dalloc_hook, NULL, &hook_called};
void *handle;
void *handles[HOOK_MAX];
size_t sz = sizeof(handle);
int err;
for (int i = 0; i < HOOK_MAX; i++) {
handle = NULL;
err = mallctl("experimental.hooks.install", &handle, &sz,
&hooks, sizeof(hooks));
assert_d_eq(err, 0, "Error installation hooks");
assert_ptr_ne(handle, NULL, "Got NULL handle");
handles[i] = handle;
}
err = mallctl("experimental.hooks.install", &handle, &sz, &hooks,
sizeof(hooks));
assert_d_eq(err, EAGAIN, "Should have failed hook installation");
for (int i = 0; i < HOOK_MAX; i++) {
err = mallctl("experimental.hooks.remove", NULL, NULL,
&handles[i], sizeof(handles[i]));
assert_d_eq(err, 0, "Hook removal failed");
}
/* Insertion failed, but then we removed some; it should work now. */
handle = NULL;
err = mallctl("experimental.hooks.install", &handle, &sz, &hooks,
sizeof(hooks));
assert_d_eq(err, 0, "Hook insertion failed");
assert_ptr_ne(handle, NULL, "Got NULL handle");
err = mallctl("experimental.hooks.remove", NULL, NULL, &handle,
sizeof(handle));
assert_d_eq(err, 0, "Hook removal failed");
}
TEST_END
int
main(void) {
return test(
......@@ -801,5 +882,7 @@ main(void) {
test_arenas_lextent_constants,
test_arenas_create,
test_arenas_lookup,
test_stats_arenas);
test_stats_arenas,
test_hooks,
test_hooks_exhaustion);
}
......@@ -29,12 +29,12 @@ TEST_BEGIN(test_gdump) {
prof_dump_open = prof_dump_open_intercept;
did_prof_dump_open = false;
p = mallocx((1U << LG_LARGE_MINCLASS), 0);
p = mallocx((1U << SC_LG_LARGE_MINCLASS), 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_true(did_prof_dump_open, "Expected a profile dump");
did_prof_dump_open = false;
q = mallocx((1U << LG_LARGE_MINCLASS), 0);
q = mallocx((1U << SC_LG_LARGE_MINCLASS), 0);
assert_ptr_not_null(q, "Unexpected mallocx() failure");
assert_true(did_prof_dump_open, "Expected a profile dump");
......@@ -45,7 +45,7 @@ TEST_BEGIN(test_gdump) {
"Unexpected mallctl failure while disabling prof.gdump");
assert(gdump_old);
did_prof_dump_open = false;
r = mallocx((1U << LG_LARGE_MINCLASS), 0);
r = mallocx((1U << SC_LG_LARGE_MINCLASS), 0);
assert_ptr_not_null(q, "Unexpected mallocx() failure");
assert_false(did_prof_dump_open, "Unexpected profile dump");
......@@ -56,7 +56,7 @@ TEST_BEGIN(test_gdump) {
"Unexpected mallctl failure while enabling prof.gdump");
assert(!gdump_old);
did_prof_dump_open = false;
s = mallocx((1U << LG_LARGE_MINCLASS), 0);
s = mallocx((1U << SC_LG_LARGE_MINCLASS), 0);
assert_ptr_not_null(q, "Unexpected mallocx() failure");
assert_true(did_prof_dump_open, "Expected a profile dump");
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment