Commit 7178cac0 authored by antirez's avatar antirez
Browse files

Revert "Jemalloc updated to 4.4.0."

This reverts commit 153f2f00.

Jemalloc 4.4.0 is apparently causing deadlocks in certain
systems. See for example https://github.com/antirez/redis/issues/3799.
As a cautionary step we are reverting the commit back and
releasing a new stable Redis version.
parent 33fad43c
/* Simple timer, for use in benchmark reporting. */ /* Simple timer, for use in benchmark reporting. */
#include <unistd.h>
#include <sys/time.h>
#define JEMALLOC_CLOCK_GETTIME defined(_POSIX_MONOTONIC_CLOCK) \
&& _POSIX_MONOTONIC_CLOCK >= 0
typedef struct { typedef struct {
nstime_t t0; #ifdef _WIN32
nstime_t t1; FILETIME ft0;
FILETIME ft1;
#elif JEMALLOC_CLOCK_GETTIME
struct timespec ts0;
struct timespec ts1;
int clock_id;
#else
struct timeval tv0;
struct timeval tv1;
#endif
} timedelta_t; } timedelta_t;
void timer_start(timedelta_t *timer); void timer_start(timedelta_t *timer);
......
...@@ -19,8 +19,8 @@ thd_start(void *arg) ...@@ -19,8 +19,8 @@ thd_start(void *arg)
size_t sz; size_t sz;
sz = sizeof(arena_ind); sz = sizeof(arena_ind);
assert_d_eq(mallctl("arenas.extend", (void *)&arena_ind, &sz, NULL, 0), assert_d_eq(mallctl("arenas.extend", &arena_ind, &sz, NULL, 0), 0,
0, "Error in arenas.extend"); "Error in arenas.extend");
if (thread_ind % 4 != 3) { if (thread_ind % 4 != 3) {
size_t mib[3]; size_t mib[3];
......
#include "test/jemalloc_test.h" #include "test/jemalloc_test.h"
#define CHUNK 0x400000 #define CHUNK 0x400000
#define MAXALIGN (((size_t)1) << 23) /* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */
#define MAXALIGN ((size_t)0x2000000LU)
/* #define NITER 4
* On systems which can't merge extents, tests that call this function generate
* a lot of dirty memory very quickly. Purging between cycles mitigates
* potential OOM on e.g. 32-bit Windows.
*/
static void
purge(void)
{
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl error");
}
TEST_BEGIN(test_alignment_errors) TEST_BEGIN(test_alignment_errors)
{ {
...@@ -85,7 +74,6 @@ TEST_END ...@@ -85,7 +74,6 @@ TEST_END
TEST_BEGIN(test_alignment_and_size) TEST_BEGIN(test_alignment_and_size)
{ {
#define NITER 4
size_t alignment, size, total; size_t alignment, size, total;
unsigned i; unsigned i;
void *ps[NITER]; void *ps[NITER];
...@@ -122,9 +110,7 @@ TEST_BEGIN(test_alignment_and_size) ...@@ -122,9 +110,7 @@ TEST_BEGIN(test_alignment_and_size)
} }
} }
} }
purge();
} }
#undef NITER
} }
TEST_END TEST_END
......
...@@ -18,14 +18,14 @@ thd_start(void *arg) ...@@ -18,14 +18,14 @@ thd_start(void *arg)
size_t sz, usize; size_t sz, usize;
sz = sizeof(a0); sz = sizeof(a0);
if ((err = mallctl("thread.allocated", (void *)&a0, &sz, NULL, 0))) { if ((err = mallctl("thread.allocated", &a0, &sz, NULL, 0))) {
if (err == ENOENT) if (err == ENOENT)
goto label_ENOENT; goto label_ENOENT;
test_fail("%s(): Error in mallctl(): %s", __func__, test_fail("%s(): Error in mallctl(): %s", __func__,
strerror(err)); strerror(err));
} }
sz = sizeof(ap0); sz = sizeof(ap0);
if ((err = mallctl("thread.allocatedp", (void *)&ap0, &sz, NULL, 0))) { if ((err = mallctl("thread.allocatedp", &ap0, &sz, NULL, 0))) {
if (err == ENOENT) if (err == ENOENT)
goto label_ENOENT; goto label_ENOENT;
test_fail("%s(): Error in mallctl(): %s", __func__, test_fail("%s(): Error in mallctl(): %s", __func__,
...@@ -36,15 +36,14 @@ thd_start(void *arg) ...@@ -36,15 +36,14 @@ thd_start(void *arg)
"storage"); "storage");
sz = sizeof(d0); sz = sizeof(d0);
if ((err = mallctl("thread.deallocated", (void *)&d0, &sz, NULL, 0))) { if ((err = mallctl("thread.deallocated", &d0, &sz, NULL, 0))) {
if (err == ENOENT) if (err == ENOENT)
goto label_ENOENT; goto label_ENOENT;
test_fail("%s(): Error in mallctl(): %s", __func__, test_fail("%s(): Error in mallctl(): %s", __func__,
strerror(err)); strerror(err));
} }
sz = sizeof(dp0); sz = sizeof(dp0);
if ((err = mallctl("thread.deallocatedp", (void *)&dp0, &sz, NULL, if ((err = mallctl("thread.deallocatedp", &dp0, &sz, NULL, 0))) {
0))) {
if (err == ENOENT) if (err == ENOENT)
goto label_ENOENT; goto label_ENOENT;
test_fail("%s(): Error in mallctl(): %s", __func__, test_fail("%s(): Error in mallctl(): %s", __func__,
...@@ -58,9 +57,9 @@ thd_start(void *arg) ...@@ -58,9 +57,9 @@ thd_start(void *arg)
assert_ptr_not_null(p, "Unexpected malloc() error"); assert_ptr_not_null(p, "Unexpected malloc() error");
sz = sizeof(a1); sz = sizeof(a1);
mallctl("thread.allocated", (void *)&a1, &sz, NULL, 0); mallctl("thread.allocated", &a1, &sz, NULL, 0);
sz = sizeof(ap1); sz = sizeof(ap1);
mallctl("thread.allocatedp", (void *)&ap1, &sz, NULL, 0); mallctl("thread.allocatedp", &ap1, &sz, NULL, 0);
assert_u64_eq(*ap1, a1, assert_u64_eq(*ap1, a1,
"Dereferenced \"thread.allocatedp\" value should equal " "Dereferenced \"thread.allocatedp\" value should equal "
"\"thread.allocated\" value"); "\"thread.allocated\" value");
...@@ -75,9 +74,9 @@ thd_start(void *arg) ...@@ -75,9 +74,9 @@ thd_start(void *arg)
free(p); free(p);
sz = sizeof(d1); sz = sizeof(d1);
mallctl("thread.deallocated", (void *)&d1, &sz, NULL, 0); mallctl("thread.deallocated", &d1, &sz, NULL, 0);
sz = sizeof(dp1); sz = sizeof(dp1);
mallctl("thread.deallocatedp", (void *)&dp1, &sz, NULL, 0); mallctl("thread.deallocatedp", &dp1, &sz, NULL, 0);
assert_u64_eq(*dp1, d1, assert_u64_eq(*dp1, d1,
"Dereferenced \"thread.deallocatedp\" value should equal " "Dereferenced \"thread.deallocatedp\" value should equal "
"\"thread.deallocated\" value"); "\"thread.deallocated\" value");
......
...@@ -121,10 +121,6 @@ TEST_BEGIN(test_chunk) ...@@ -121,10 +121,6 @@ TEST_BEGIN(test_chunk)
{ {
void *p; void *p;
size_t old_size, new_size, large0, large1, huge0, huge1, huge2, sz; size_t old_size, new_size, large0, large1, huge0, huge1, huge2, sz;
unsigned arena_ind;
int flags;
size_t hooks_mib[3], purge_mib[3];
size_t hooks_miblen, purge_miblen;
chunk_hooks_t new_hooks = { chunk_hooks_t new_hooks = {
chunk_alloc, chunk_alloc,
chunk_dalloc, chunk_dalloc,
...@@ -136,21 +132,11 @@ TEST_BEGIN(test_chunk) ...@@ -136,21 +132,11 @@ TEST_BEGIN(test_chunk)
}; };
bool xallocx_success_a, xallocx_success_b, xallocx_success_c; bool xallocx_success_a, xallocx_success_b, xallocx_success_c;
sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.extend", (void *)&arena_ind, &sz, NULL, 0),
0, "Unexpected mallctl() failure");
flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
/* Install custom chunk hooks. */ /* Install custom chunk hooks. */
hooks_miblen = sizeof(hooks_mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("arena.0.chunk_hooks", hooks_mib,
&hooks_miblen), 0, "Unexpected mallctlnametomib() failure");
hooks_mib[1] = (size_t)arena_ind;
old_size = sizeof(chunk_hooks_t); old_size = sizeof(chunk_hooks_t);
new_size = sizeof(chunk_hooks_t); new_size = sizeof(chunk_hooks_t);
assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks, assert_d_eq(mallctl("arena.0.chunk_hooks", &old_hooks, &old_size,
&old_size, (void *)&new_hooks, new_size), 0, &new_hooks, new_size), 0, "Unexpected chunk_hooks error");
"Unexpected chunk_hooks error");
orig_hooks = old_hooks; orig_hooks = old_hooks;
assert_ptr_ne(old_hooks.alloc, chunk_alloc, "Unexpected alloc error"); assert_ptr_ne(old_hooks.alloc, chunk_alloc, "Unexpected alloc error");
assert_ptr_ne(old_hooks.dalloc, chunk_dalloc, assert_ptr_ne(old_hooks.dalloc, chunk_dalloc,
...@@ -165,63 +151,59 @@ TEST_BEGIN(test_chunk) ...@@ -165,63 +151,59 @@ TEST_BEGIN(test_chunk)
/* Get large size classes. */ /* Get large size classes. */
sz = sizeof(size_t); sz = sizeof(size_t);
assert_d_eq(mallctl("arenas.lrun.0.size", (void *)&large0, &sz, NULL, assert_d_eq(mallctl("arenas.lrun.0.size", &large0, &sz, NULL, 0), 0,
0), 0, "Unexpected arenas.lrun.0.size failure"); "Unexpected arenas.lrun.0.size failure");
assert_d_eq(mallctl("arenas.lrun.1.size", (void *)&large1, &sz, NULL, assert_d_eq(mallctl("arenas.lrun.1.size", &large1, &sz, NULL, 0), 0,
0), 0, "Unexpected arenas.lrun.1.size failure"); "Unexpected arenas.lrun.1.size failure");
/* Get huge size classes. */ /* Get huge size classes. */
assert_d_eq(mallctl("arenas.hchunk.0.size", (void *)&huge0, &sz, NULL, assert_d_eq(mallctl("arenas.hchunk.0.size", &huge0, &sz, NULL, 0), 0,
0), 0, "Unexpected arenas.hchunk.0.size failure"); "Unexpected arenas.hchunk.0.size failure");
assert_d_eq(mallctl("arenas.hchunk.1.size", (void *)&huge1, &sz, NULL, assert_d_eq(mallctl("arenas.hchunk.1.size", &huge1, &sz, NULL, 0), 0,
0), 0, "Unexpected arenas.hchunk.1.size failure"); "Unexpected arenas.hchunk.1.size failure");
assert_d_eq(mallctl("arenas.hchunk.2.size", (void *)&huge2, &sz, NULL, assert_d_eq(mallctl("arenas.hchunk.2.size", &huge2, &sz, NULL, 0), 0,
0), 0, "Unexpected arenas.hchunk.2.size failure"); "Unexpected arenas.hchunk.2.size failure");
/* Test dalloc/decommit/purge cascade. */ /* Test dalloc/decommit/purge cascade. */
purge_miblen = sizeof(purge_mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("arena.0.purge", purge_mib, &purge_miblen),
0, "Unexpected mallctlnametomib() failure");
purge_mib[1] = (size_t)arena_ind;
do_dalloc = false; do_dalloc = false;
do_decommit = false; do_decommit = false;
p = mallocx(huge0 * 2, flags); p = mallocx(huge0 * 2, 0);
assert_ptr_not_null(p, "Unexpected mallocx() error"); assert_ptr_not_null(p, "Unexpected mallocx() error");
did_dalloc = false; did_dalloc = false;
did_decommit = false; did_decommit = false;
did_purge = false; did_purge = false;
did_split = false; did_split = false;
xallocx_success_a = (xallocx(p, huge0, 0, flags) == huge0); xallocx_success_a = (xallocx(p, huge0, 0, 0) == huge0);
assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0), assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
0, "Unexpected arena.%u.purge error", arena_ind); "Unexpected arena.0.purge error");
if (xallocx_success_a) { if (xallocx_success_a) {
assert_true(did_dalloc, "Expected dalloc"); assert_true(did_dalloc, "Expected dalloc");
assert_false(did_decommit, "Unexpected decommit"); assert_false(did_decommit, "Unexpected decommit");
assert_true(did_purge, "Expected purge"); assert_true(did_purge, "Expected purge");
} }
assert_true(did_split, "Expected split"); assert_true(did_split, "Expected split");
dallocx(p, flags); dallocx(p, 0);
do_dalloc = true; do_dalloc = true;
/* Test decommit/commit and observe split/merge. */ /* Test decommit/commit and observe split/merge. */
do_dalloc = false; do_dalloc = false;
do_decommit = true; do_decommit = true;
p = mallocx(huge0 * 2, flags); p = mallocx(huge0 * 2, 0);
assert_ptr_not_null(p, "Unexpected mallocx() error"); assert_ptr_not_null(p, "Unexpected mallocx() error");
did_decommit = false; did_decommit = false;
did_commit = false; did_commit = false;
did_split = false; did_split = false;
did_merge = false; did_merge = false;
xallocx_success_b = (xallocx(p, huge0, 0, flags) == huge0); xallocx_success_b = (xallocx(p, huge0, 0, 0) == huge0);
assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0), assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
0, "Unexpected arena.%u.purge error", arena_ind); "Unexpected arena.0.purge error");
if (xallocx_success_b) if (xallocx_success_b)
assert_true(did_split, "Expected split"); assert_true(did_split, "Expected split");
xallocx_success_c = (xallocx(p, huge0 * 2, 0, flags) == huge0 * 2); xallocx_success_c = (xallocx(p, huge0 * 2, 0, 0) == huge0 * 2);
assert_b_eq(did_decommit, did_commit, "Expected decommit/commit match"); assert_b_eq(did_decommit, did_commit, "Expected decommit/commit match");
if (xallocx_success_b && xallocx_success_c) if (xallocx_success_b && xallocx_success_c)
assert_true(did_merge, "Expected merge"); assert_true(did_merge, "Expected merge");
dallocx(p, flags); dallocx(p, 0);
do_dalloc = true; do_dalloc = true;
do_decommit = false; do_decommit = false;
...@@ -232,43 +214,43 @@ TEST_BEGIN(test_chunk) ...@@ -232,43 +214,43 @@ TEST_BEGIN(test_chunk)
* successful xallocx() from size=huge2 to size=huge1 is * successful xallocx() from size=huge2 to size=huge1 is
* guaranteed to leave trailing purgeable memory. * guaranteed to leave trailing purgeable memory.
*/ */
p = mallocx(huge2, flags); p = mallocx(huge2, 0);
assert_ptr_not_null(p, "Unexpected mallocx() error"); assert_ptr_not_null(p, "Unexpected mallocx() error");
did_purge = false; did_purge = false;
assert_zu_eq(xallocx(p, huge1, 0, flags), huge1, assert_zu_eq(xallocx(p, huge1, 0, 0), huge1,
"Unexpected xallocx() failure"); "Unexpected xallocx() failure");
assert_true(did_purge, "Expected purge"); assert_true(did_purge, "Expected purge");
dallocx(p, flags); dallocx(p, 0);
} }
/* Test decommit for large allocations. */ /* Test decommit for large allocations. */
do_decommit = true; do_decommit = true;
p = mallocx(large1, flags); p = mallocx(large1, 0);
assert_ptr_not_null(p, "Unexpected mallocx() error"); assert_ptr_not_null(p, "Unexpected mallocx() error");
assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0), assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
0, "Unexpected arena.%u.purge error", arena_ind); "Unexpected arena.0.purge error");
did_decommit = false; did_decommit = false;
assert_zu_eq(xallocx(p, large0, 0, flags), large0, assert_zu_eq(xallocx(p, large0, 0, 0), large0,
"Unexpected xallocx() failure"); "Unexpected xallocx() failure");
assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0), assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
0, "Unexpected arena.%u.purge error", arena_ind); "Unexpected arena.0.purge error");
did_commit = false; did_commit = false;
assert_zu_eq(xallocx(p, large1, 0, flags), large1, assert_zu_eq(xallocx(p, large1, 0, 0), large1,
"Unexpected xallocx() failure"); "Unexpected xallocx() failure");
assert_b_eq(did_decommit, did_commit, "Expected decommit/commit match"); assert_b_eq(did_decommit, did_commit, "Expected decommit/commit match");
dallocx(p, flags); dallocx(p, 0);
do_decommit = false; do_decommit = false;
/* Make sure non-huge allocation succeeds. */ /* Make sure non-huge allocation succeeds. */
p = mallocx(42, flags); p = mallocx(42, 0);
assert_ptr_not_null(p, "Unexpected mallocx() error"); assert_ptr_not_null(p, "Unexpected mallocx() error");
dallocx(p, flags); dallocx(p, 0);
/* Restore chunk hooks. */ /* Restore chunk hooks. */
assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, NULL, NULL, assert_d_eq(mallctl("arena.0.chunk_hooks", NULL, NULL, &old_hooks,
(void *)&old_hooks, new_size), 0, "Unexpected chunk_hooks error"); new_size), 0, "Unexpected chunk_hooks error");
assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks, assert_d_eq(mallctl("arena.0.chunk_hooks", &old_hooks, &old_size,
&old_size, NULL, 0), 0, "Unexpected chunk_hooks error"); NULL, 0), 0, "Unexpected chunk_hooks error");
assert_ptr_eq(old_hooks.alloc, orig_hooks.alloc, assert_ptr_eq(old_hooks.alloc, orig_hooks.alloc,
"Unexpected alloc error"); "Unexpected alloc error");
assert_ptr_eq(old_hooks.dalloc, orig_hooks.dalloc, assert_ptr_eq(old_hooks.dalloc, orig_hooks.dalloc,
......
#include "test/jemalloc_test.h" #include "test/jemalloc_test.h"
#ifdef JEMALLOC_FILL
const char *malloc_conf = "junk:false";
#endif
static unsigned static unsigned
get_nsizes_impl(const char *cmd) get_nsizes_impl(const char *cmd)
{ {
...@@ -11,7 +7,7 @@ get_nsizes_impl(const char *cmd) ...@@ -11,7 +7,7 @@ get_nsizes_impl(const char *cmd)
size_t z; size_t z;
z = sizeof(unsigned); z = sizeof(unsigned);
assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0, assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0,
"Unexpected mallctl(\"%s\", ...) failure", cmd); "Unexpected mallctl(\"%s\", ...) failure", cmd);
return (ret); return (ret);
...@@ -37,7 +33,7 @@ get_size_impl(const char *cmd, size_t ind) ...@@ -37,7 +33,7 @@ get_size_impl(const char *cmd, size_t ind)
0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
mib[2] = ind; mib[2] = ind;
z = sizeof(size_t); z = sizeof(size_t);
assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), assert_d_eq(mallctlbymib(mib, miblen, &ret, &z, NULL, 0),
0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
return (ret); return (ret);
...@@ -50,84 +46,43 @@ get_huge_size(size_t ind) ...@@ -50,84 +46,43 @@ get_huge_size(size_t ind)
return (get_size_impl("arenas.hchunk.0.size", ind)); return (get_size_impl("arenas.hchunk.0.size", ind));
} }
/* TEST_BEGIN(test_oom)
* On systems which can't merge extents, tests that call this function generate
* a lot of dirty memory very quickly. Purging between cycles mitigates
* potential OOM on e.g. 32-bit Windows.
*/
static void
purge(void)
{
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl error");
}
TEST_BEGIN(test_overflow)
{ {
size_t hugemax; size_t hugemax, size, alignment;
hugemax = get_huge_size(get_nhuge()-1); hugemax = get_huge_size(get_nhuge()-1);
assert_ptr_null(mallocx(hugemax+1, 0),
"Expected OOM for mallocx(size=%#zx, 0)", hugemax+1);
assert_ptr_null(mallocx(ZU(PTRDIFF_MAX)+1, 0),
"Expected OOM for mallocx(size=%#zx, 0)", ZU(PTRDIFF_MAX)+1);
assert_ptr_null(mallocx(SIZE_T_MAX, 0),
"Expected OOM for mallocx(size=%#zx, 0)", SIZE_T_MAX);
assert_ptr_null(mallocx(1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)),
"Expected OOM for mallocx(size=1, MALLOCX_ALIGN(%#zx))",
ZU(PTRDIFF_MAX)+1);
}
TEST_END
TEST_BEGIN(test_oom)
{
size_t hugemax;
bool oom;
void *ptrs[3];
unsigned i;
/* /*
* It should be impossible to allocate three objects that each consume * It should be impossible to allocate two objects that each consume
* nearly half the virtual address space. * more than half the virtual address space.
*/ */
hugemax = get_huge_size(get_nhuge()-1); {
oom = false; void *p;
for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) {
ptrs[i] = mallocx(hugemax, 0); p = mallocx(hugemax, 0);
if (ptrs[i] == NULL) if (p != NULL) {
oom = true; assert_ptr_null(mallocx(hugemax, 0),
} "Expected OOM for mallocx(size=%#zx, 0)", hugemax);
assert_true(oom, dallocx(p, 0);
"Expected OOM during series of calls to mallocx(size=%zu, 0)", }
hugemax);
for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) {
if (ptrs[i] != NULL)
dallocx(ptrs[i], 0);
} }
purge();
#if LG_SIZEOF_PTR == 3 #if LG_SIZEOF_PTR == 3
assert_ptr_null(mallocx(0x8000000000000000ULL, size = ZU(0x8000000000000000);
MALLOCX_ALIGN(0x8000000000000000ULL)), alignment = ZU(0x8000000000000000);
"Expected OOM for mallocx()");
assert_ptr_null(mallocx(0x8000000000000000ULL,
MALLOCX_ALIGN(0x80000000)),
"Expected OOM for mallocx()");
#else #else
assert_ptr_null(mallocx(0x80000000UL, MALLOCX_ALIGN(0x80000000UL)), size = ZU(0x80000000);
"Expected OOM for mallocx()"); alignment = ZU(0x80000000);
#endif #endif
assert_ptr_null(mallocx(size, MALLOCX_ALIGN(alignment)),
"Expected OOM for mallocx(size=%#zx, MALLOCX_ALIGN(%#zx)", size,
alignment);
} }
TEST_END TEST_END
TEST_BEGIN(test_basic) TEST_BEGIN(test_basic)
{ {
#define MAXSZ (((size_t)1) << 23) #define MAXSZ (((size_t)1) << 26)
size_t sz; size_t sz;
for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) { for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) {
...@@ -136,28 +91,23 @@ TEST_BEGIN(test_basic) ...@@ -136,28 +91,23 @@ TEST_BEGIN(test_basic)
nsz = nallocx(sz, 0); nsz = nallocx(sz, 0);
assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
p = mallocx(sz, 0); p = mallocx(sz, 0);
assert_ptr_not_null(p, assert_ptr_not_null(p, "Unexpected mallocx() error");
"Unexpected mallocx(size=%zx, flags=0) error", sz);
rsz = sallocx(p, 0); rsz = sallocx(p, 0);
assert_zu_ge(rsz, sz, "Real size smaller than expected"); assert_zu_ge(rsz, sz, "Real size smaller than expected");
assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch"); assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch");
dallocx(p, 0); dallocx(p, 0);
p = mallocx(sz, 0); p = mallocx(sz, 0);
assert_ptr_not_null(p, assert_ptr_not_null(p, "Unexpected mallocx() error");
"Unexpected mallocx(size=%zx, flags=0) error", sz);
dallocx(p, 0); dallocx(p, 0);
nsz = nallocx(sz, MALLOCX_ZERO); nsz = nallocx(sz, MALLOCX_ZERO);
assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
p = mallocx(sz, MALLOCX_ZERO); p = mallocx(sz, MALLOCX_ZERO);
assert_ptr_not_null(p, assert_ptr_not_null(p, "Unexpected mallocx() error");
"Unexpected mallocx(size=%zx, flags=MALLOCX_ZERO) error",
nsz);
rsz = sallocx(p, 0); rsz = sallocx(p, 0);
assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch"); assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch");
dallocx(p, 0); dallocx(p, 0);
purge();
} }
#undef MAXSZ #undef MAXSZ
} }
...@@ -165,7 +115,7 @@ TEST_END ...@@ -165,7 +115,7 @@ TEST_END
TEST_BEGIN(test_alignment_and_size) TEST_BEGIN(test_alignment_and_size)
{ {
#define MAXALIGN (((size_t)1) << 23) #define MAXALIGN (((size_t)1) << 25)
#define NITER 4 #define NITER 4
size_t nsz, rsz, sz, alignment, total; size_t nsz, rsz, sz, alignment, total;
unsigned i; unsigned i;
...@@ -215,7 +165,6 @@ TEST_BEGIN(test_alignment_and_size) ...@@ -215,7 +165,6 @@ TEST_BEGIN(test_alignment_and_size)
} }
} }
} }
purge();
} }
#undef MAXALIGN #undef MAXALIGN
#undef NITER #undef NITER
...@@ -227,7 +176,6 @@ main(void) ...@@ -227,7 +176,6 @@ main(void)
{ {
return (test( return (test(
test_overflow,
test_oom, test_oom,
test_basic, test_basic,
test_alignment_and_size)); test_alignment_and_size));
......
...@@ -8,8 +8,8 @@ TEST_BEGIN(test_overflow) ...@@ -8,8 +8,8 @@ TEST_BEGIN(test_overflow)
void *p; void *p;
sz = sizeof(unsigned); sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.nhchunks", (void *)&nhchunks, &sz, NULL, 0), assert_d_eq(mallctl("arenas.nhchunks", &nhchunks, &sz, NULL, 0), 0,
0, "Unexpected mallctl() error"); "Unexpected mallctl() error");
miblen = sizeof(mib) / sizeof(size_t); miblen = sizeof(mib) / sizeof(size_t);
assert_d_eq(mallctlnametomib("arenas.hchunk.0.size", mib, &miblen), 0, assert_d_eq(mallctlnametomib("arenas.hchunk.0.size", mib, &miblen), 0,
...@@ -17,8 +17,8 @@ TEST_BEGIN(test_overflow) ...@@ -17,8 +17,8 @@ TEST_BEGIN(test_overflow)
mib[2] = nhchunks - 1; mib[2] = nhchunks - 1;
sz = sizeof(size_t); sz = sizeof(size_t);
assert_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz, assert_d_eq(mallctlbymib(mib, miblen, &max_size_class, &sz, NULL, 0), 0,
NULL, 0), 0, "Unexpected mallctlbymib() error"); "Unexpected mallctlbymib() error");
assert_ptr_null(malloc(max_size_class + 1), assert_ptr_null(malloc(max_size_class + 1),
"Expected OOM due to over-sized allocation request"); "Expected OOM due to over-sized allocation request");
......
#include "test/jemalloc_test.h" #include "test/jemalloc_test.h"
#define CHUNK 0x400000 #define CHUNK 0x400000
#define MAXALIGN (((size_t)1) << 23) /* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */
#define MAXALIGN ((size_t)0x2000000LU)
/* #define NITER 4
* On systems which can't merge extents, tests that call this function generate
* a lot of dirty memory very quickly. Purging between cycles mitigates
* potential OOM on e.g. 32-bit Windows.
*/
static void
purge(void)
{
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl error");
}
TEST_BEGIN(test_alignment_errors) TEST_BEGIN(test_alignment_errors)
{ {
...@@ -77,7 +66,6 @@ TEST_END ...@@ -77,7 +66,6 @@ TEST_END
TEST_BEGIN(test_alignment_and_size) TEST_BEGIN(test_alignment_and_size)
{ {
#define NITER 4
size_t alignment, size, total; size_t alignment, size, total;
unsigned i; unsigned i;
int err; int err;
...@@ -116,9 +104,7 @@ TEST_BEGIN(test_alignment_and_size) ...@@ -116,9 +104,7 @@ TEST_BEGIN(test_alignment_and_size)
} }
} }
} }
purge();
} }
#undef NITER
} }
TEST_END TEST_END
......
#include "test/jemalloc_test.h" #include "test/jemalloc_test.h"
static unsigned
get_nsizes_impl(const char *cmd)
{
unsigned ret;
size_t z;
z = sizeof(unsigned);
assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
"Unexpected mallctl(\"%s\", ...) failure", cmd);
return (ret);
}
static unsigned
get_nhuge(void)
{
return (get_nsizes_impl("arenas.nhchunks"));
}
static size_t
get_size_impl(const char *cmd, size_t ind)
{
size_t ret;
size_t z;
size_t mib[4];
size_t miblen = 4;
z = sizeof(size_t);
assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
mib[2] = ind;
z = sizeof(size_t);
assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
return (ret);
}
static size_t
get_huge_size(size_t ind)
{
return (get_size_impl("arenas.hchunk.0.size", ind));
}
TEST_BEGIN(test_grow_and_shrink) TEST_BEGIN(test_grow_and_shrink)
{ {
void *p, *q; void *p, *q;
...@@ -184,22 +138,22 @@ TEST_END ...@@ -184,22 +138,22 @@ TEST_END
TEST_BEGIN(test_lg_align_and_zero) TEST_BEGIN(test_lg_align_and_zero)
{ {
void *p, *q; void *p, *q;
unsigned lg_align; size_t lg_align, sz;
size_t sz;
#define MAX_LG_ALIGN 25 #define MAX_LG_ALIGN 25
#define MAX_VALIDATE (ZU(1) << 22) #define MAX_VALIDATE (ZU(1) << 22)
lg_align = 0; lg_align = ZU(0);
p = mallocx(1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO); p = mallocx(1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO);
assert_ptr_not_null(p, "Unexpected mallocx() error"); assert_ptr_not_null(p, "Unexpected mallocx() error");
for (lg_align++; lg_align <= MAX_LG_ALIGN; lg_align++) { for (lg_align++; lg_align <= MAX_LG_ALIGN; lg_align++) {
q = rallocx(p, 1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO); q = rallocx(p, 1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO);
assert_ptr_not_null(q, assert_ptr_not_null(q,
"Unexpected rallocx() error for lg_align=%u", lg_align); "Unexpected rallocx() error for lg_align=%zu", lg_align);
assert_ptr_null( assert_ptr_null(
(void *)((uintptr_t)q & ((ZU(1) << lg_align)-1)), (void *)((uintptr_t)q & ((ZU(1) << lg_align)-1)),
"%p inadequately aligned for lg_align=%u", q, lg_align); "%p inadequately aligned for lg_align=%zu",
q, lg_align);
sz = sallocx(q, 0); sz = sallocx(q, 0);
if ((sz << 1) <= MAX_VALIDATE) { if ((sz << 1) <= MAX_VALIDATE) {
assert_false(validate_fill(q, 0, 0, sz), assert_false(validate_fill(q, 0, 0, sz),
...@@ -219,33 +173,6 @@ TEST_BEGIN(test_lg_align_and_zero) ...@@ -219,33 +173,6 @@ TEST_BEGIN(test_lg_align_and_zero)
} }
TEST_END TEST_END
TEST_BEGIN(test_overflow)
{
size_t hugemax;
void *p;
hugemax = get_huge_size(get_nhuge()-1);
p = mallocx(1, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_ptr_null(rallocx(p, hugemax+1, 0),
"Expected OOM for rallocx(p, size=%#zx, 0)", hugemax+1);
assert_ptr_null(rallocx(p, ZU(PTRDIFF_MAX)+1, 0),
"Expected OOM for rallocx(p, size=%#zx, 0)", ZU(PTRDIFF_MAX)+1);
assert_ptr_null(rallocx(p, SIZE_T_MAX, 0),
"Expected OOM for rallocx(p, size=%#zx, 0)", SIZE_T_MAX);
assert_ptr_null(rallocx(p, 1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)),
"Expected OOM for rallocx(p, size=1, MALLOCX_ALIGN(%#zx))",
ZU(PTRDIFF_MAX)+1);
dallocx(p, 0);
}
TEST_END
int int
main(void) main(void)
{ {
...@@ -254,6 +181,5 @@ main(void) ...@@ -254,6 +181,5 @@ main(void)
test_grow_and_shrink, test_grow_and_shrink,
test_zero, test_zero,
test_align, test_align,
test_lg_align_and_zero, test_lg_align_and_zero));
test_overflow));
} }
#include "test/jemalloc_test.h" #include "test/jemalloc_test.h"
#define MAXALIGN (((size_t)1) << 22) #define MAXALIGN (((size_t)1) << 25)
#define NITER 3 #define NITER 4
TEST_BEGIN(test_basic) TEST_BEGIN(test_basic)
{ {
......
...@@ -16,8 +16,8 @@ thd_start(void *arg) ...@@ -16,8 +16,8 @@ thd_start(void *arg)
free(p); free(p);
size = sizeof(arena_ind); size = sizeof(arena_ind);
if ((err = mallctl("thread.arena", (void *)&arena_ind, &size, if ((err = mallctl("thread.arena", &arena_ind, &size, &main_arena_ind,
(void *)&main_arena_ind, sizeof(main_arena_ind)))) { sizeof(main_arena_ind)))) {
char buf[BUFERROR_BUF]; char buf[BUFERROR_BUF];
buferror(err, buf, sizeof(buf)); buferror(err, buf, sizeof(buf));
...@@ -25,8 +25,7 @@ thd_start(void *arg) ...@@ -25,8 +25,7 @@ thd_start(void *arg)
} }
size = sizeof(arena_ind); size = sizeof(arena_ind);
if ((err = mallctl("thread.arena", (void *)&arena_ind, &size, NULL, if ((err = mallctl("thread.arena", &arena_ind, &size, NULL, 0))) {
0))) {
char buf[BUFERROR_BUF]; char buf[BUFERROR_BUF];
buferror(err, buf, sizeof(buf)); buferror(err, buf, sizeof(buf));
...@@ -51,8 +50,7 @@ TEST_BEGIN(test_thread_arena) ...@@ -51,8 +50,7 @@ TEST_BEGIN(test_thread_arena)
assert_ptr_not_null(p, "Error in malloc()"); assert_ptr_not_null(p, "Error in malloc()");
size = sizeof(arena_ind); size = sizeof(arena_ind);
if ((err = mallctl("thread.arena", (void *)&arena_ind, &size, NULL, if ((err = mallctl("thread.arena", &arena_ind, &size, NULL, 0))) {
0))) {
char buf[BUFERROR_BUF]; char buf[BUFERROR_BUF];
buferror(err, buf, sizeof(buf)); buferror(err, buf, sizeof(buf));
......
...@@ -16,8 +16,7 @@ thd_start(void *arg) ...@@ -16,8 +16,7 @@ thd_start(void *arg)
bool e0, e1; bool e0, e1;
sz = sizeof(bool); sz = sizeof(bool);
if ((err = mallctl("thread.tcache.enabled", (void *)&e0, &sz, NULL, if ((err = mallctl("thread.tcache.enabled", &e0, &sz, NULL, 0))) {
0))) {
if (err == ENOENT) { if (err == ENOENT) {
assert_false(config_tcache, assert_false(config_tcache,
"ENOENT should only be returned if tcache is " "ENOENT should only be returned if tcache is "
...@@ -28,53 +27,53 @@ thd_start(void *arg) ...@@ -28,53 +27,53 @@ thd_start(void *arg)
if (e0) { if (e0) {
e1 = false; e1 = false;
assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz),
(void *)&e1, sz), 0, "Unexpected mallctl() error"); 0, "Unexpected mallctl() error");
assert_true(e0, "tcache should be enabled"); assert_true(e0, "tcache should be enabled");
} }
e1 = true; e1 = true;
assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
(void *)&e1, sz), 0, "Unexpected mallctl() error"); "Unexpected mallctl() error");
assert_false(e0, "tcache should be disabled"); assert_false(e0, "tcache should be disabled");
e1 = true; e1 = true;
assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
(void *)&e1, sz), 0, "Unexpected mallctl() error"); "Unexpected mallctl() error");
assert_true(e0, "tcache should be enabled"); assert_true(e0, "tcache should be enabled");
e1 = false; e1 = false;
assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
(void *)&e1, sz), 0, "Unexpected mallctl() error"); "Unexpected mallctl() error");
assert_true(e0, "tcache should be enabled"); assert_true(e0, "tcache should be enabled");
e1 = false; e1 = false;
assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
(void *)&e1, sz), 0, "Unexpected mallctl() error"); "Unexpected mallctl() error");
assert_false(e0, "tcache should be disabled"); assert_false(e0, "tcache should be disabled");
free(malloc(1)); free(malloc(1));
e1 = true; e1 = true;
assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
(void *)&e1, sz), 0, "Unexpected mallctl() error"); "Unexpected mallctl() error");
assert_false(e0, "tcache should be disabled"); assert_false(e0, "tcache should be disabled");
free(malloc(1)); free(malloc(1));
e1 = true; e1 = true;
assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
(void *)&e1, sz), 0, "Unexpected mallctl() error"); "Unexpected mallctl() error");
assert_true(e0, "tcache should be enabled"); assert_true(e0, "tcache should be enabled");
free(malloc(1)); free(malloc(1));
e1 = false; e1 = false;
assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
(void *)&e1, sz), 0, "Unexpected mallctl() error"); "Unexpected mallctl() error");
assert_true(e0, "tcache should be enabled"); assert_true(e0, "tcache should be enabled");
free(malloc(1)); free(malloc(1));
e1 = false; e1 = false;
assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
(void *)&e1, sz), 0, "Unexpected mallctl() error"); "Unexpected mallctl() error");
assert_false(e0, "tcache should be disabled"); assert_false(e0, "tcache should be disabled");
free(malloc(1)); free(malloc(1));
......
#include "test/jemalloc_test.h" #include "test/jemalloc_test.h"
#ifdef JEMALLOC_FILL
const char *malloc_conf = "junk:false";
#endif
/*
* Use a separate arena for xallocx() extension/contraction tests so that
* internal allocation e.g. by heap profiling can't interpose allocations where
* xallocx() would ordinarily be able to extend.
*/
static unsigned
arena_ind(void)
{
static unsigned ind = 0;
if (ind == 0) {
size_t sz = sizeof(ind);
assert_d_eq(mallctl("arenas.extend", (void *)&ind, &sz, NULL,
0), 0, "Unexpected mallctl failure creating arena");
}
return (ind);
}
TEST_BEGIN(test_same_size) TEST_BEGIN(test_same_size)
{ {
void *p; void *p;
...@@ -78,7 +55,7 @@ get_nsizes_impl(const char *cmd) ...@@ -78,7 +55,7 @@ get_nsizes_impl(const char *cmd)
size_t z; size_t z;
z = sizeof(unsigned); z = sizeof(unsigned);
assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0, assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0,
"Unexpected mallctl(\"%s\", ...) failure", cmd); "Unexpected mallctl(\"%s\", ...) failure", cmd);
return (ret); return (ret);
...@@ -118,7 +95,7 @@ get_size_impl(const char *cmd, size_t ind) ...@@ -118,7 +95,7 @@ get_size_impl(const char *cmd, size_t ind)
0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
mib[2] = ind; mib[2] = ind;
z = sizeof(size_t); z = sizeof(size_t);
assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), assert_d_eq(mallctlbymib(mib, miblen, &ret, &z, NULL, 0),
0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
return (ret); return (ret);
...@@ -241,7 +218,6 @@ TEST_END ...@@ -241,7 +218,6 @@ TEST_END
TEST_BEGIN(test_extra_large) TEST_BEGIN(test_extra_large)
{ {
int flags = MALLOCX_ARENA(arena_ind());
size_t smallmax, large0, large1, large2, huge0, hugemax; size_t smallmax, large0, large1, large2, huge0, hugemax;
void *p; void *p;
...@@ -253,122 +229,121 @@ TEST_BEGIN(test_extra_large) ...@@ -253,122 +229,121 @@ TEST_BEGIN(test_extra_large)
huge0 = get_huge_size(0); huge0 = get_huge_size(0);
hugemax = get_huge_size(get_nhuge()-1); hugemax = get_huge_size(get_nhuge()-1);
p = mallocx(large2, flags); p = mallocx(large2, 0);
assert_ptr_not_null(p, "Unexpected mallocx() error"); assert_ptr_not_null(p, "Unexpected mallocx() error");
assert_zu_eq(xallocx(p, large2, 0, flags), large2, assert_zu_eq(xallocx(p, large2, 0, 0), large2,
"Unexpected xallocx() behavior"); "Unexpected xallocx() behavior");
/* Test size decrease with zero extra. */ /* Test size decrease with zero extra. */
assert_zu_eq(xallocx(p, large0, 0, flags), large0, assert_zu_eq(xallocx(p, large0, 0, 0), large0,
"Unexpected xallocx() behavior"); "Unexpected xallocx() behavior");
assert_zu_eq(xallocx(p, smallmax, 0, flags), large0, assert_zu_eq(xallocx(p, smallmax, 0, 0), large0,
"Unexpected xallocx() behavior"); "Unexpected xallocx() behavior");
assert_zu_eq(xallocx(p, large2, 0, flags), large2, assert_zu_eq(xallocx(p, large2, 0, 0), large2,
"Unexpected xallocx() behavior"); "Unexpected xallocx() behavior");
/* Test size decrease with non-zero extra. */ /* Test size decrease with non-zero extra. */
assert_zu_eq(xallocx(p, large0, large2 - large0, flags), large2, assert_zu_eq(xallocx(p, large0, large2 - large0, 0), large2,
"Unexpected xallocx() behavior"); "Unexpected xallocx() behavior");
assert_zu_eq(xallocx(p, large1, large2 - large1, flags), large2, assert_zu_eq(xallocx(p, large1, large2 - large1, 0), large2,
"Unexpected xallocx() behavior"); "Unexpected xallocx() behavior");
assert_zu_eq(xallocx(p, large0, large1 - large0, flags), large1, assert_zu_eq(xallocx(p, large0, large1 - large0, 0), large1,
"Unexpected xallocx() behavior"); "Unexpected xallocx() behavior");
assert_zu_eq(xallocx(p, smallmax, large0 - smallmax, flags), large0, assert_zu_eq(xallocx(p, smallmax, large0 - smallmax, 0), large0,
"Unexpected xallocx() behavior"); "Unexpected xallocx() behavior");
assert_zu_eq(xallocx(p, large0, 0, flags), large0, assert_zu_eq(xallocx(p, large0, 0, 0), large0,
"Unexpected xallocx() behavior"); "Unexpected xallocx() behavior");
/* Test size increase with zero extra. */ /* Test size increase with zero extra. */
assert_zu_eq(xallocx(p, large2, 0, flags), large2, assert_zu_eq(xallocx(p, large2, 0, 0), large2,
"Unexpected xallocx() behavior"); "Unexpected xallocx() behavior");
assert_zu_eq(xallocx(p, huge0, 0, flags), large2, assert_zu_eq(xallocx(p, huge0, 0, 0), large2,
"Unexpected xallocx() behavior"); "Unexpected xallocx() behavior");
assert_zu_eq(xallocx(p, large0, 0, flags), large0, assert_zu_eq(xallocx(p, large0, 0, 0), large0,
"Unexpected xallocx() behavior"); "Unexpected xallocx() behavior");
/* Test size increase with non-zero extra. */ /* Test size increase with non-zero extra. */
assert_zu_lt(xallocx(p, large0, huge0 - large0, flags), huge0, assert_zu_lt(xallocx(p, large0, huge0 - large0, 0), huge0,
"Unexpected xallocx() behavior"); "Unexpected xallocx() behavior");
assert_zu_eq(xallocx(p, large0, 0, flags), large0, assert_zu_eq(xallocx(p, large0, 0, 0), large0,
"Unexpected xallocx() behavior"); "Unexpected xallocx() behavior");
/* Test size increase with non-zero extra. */ /* Test size increase with non-zero extra. */
assert_zu_eq(xallocx(p, large0, large2 - large0, flags), large2, assert_zu_eq(xallocx(p, large0, large2 - large0, 0), large2,
"Unexpected xallocx() behavior"); "Unexpected xallocx() behavior");
assert_zu_eq(xallocx(p, large2, 0, flags), large2, assert_zu_eq(xallocx(p, large2, 0, 0), large2,
"Unexpected xallocx() behavior"); "Unexpected xallocx() behavior");
/* Test size+extra overflow. */ /* Test size+extra overflow. */
assert_zu_lt(xallocx(p, large2, hugemax - large2 + 1, flags), huge0, assert_zu_lt(xallocx(p, large2, hugemax - large2 + 1, 0), huge0,
"Unexpected xallocx() behavior"); "Unexpected xallocx() behavior");
dallocx(p, flags); dallocx(p, 0);
} }
TEST_END TEST_END
TEST_BEGIN(test_extra_huge) TEST_BEGIN(test_extra_huge)
{ {
int flags = MALLOCX_ARENA(arena_ind()); size_t largemax, huge0, huge1, huge2, hugemax;
size_t largemax, huge1, huge2, huge3, hugemax;
void *p; void *p;
/* Get size classes. */ /* Get size classes. */
largemax = get_large_size(get_nlarge()-1); largemax = get_large_size(get_nlarge()-1);
huge0 = get_huge_size(0);
huge1 = get_huge_size(1); huge1 = get_huge_size(1);
huge2 = get_huge_size(2); huge2 = get_huge_size(2);
huge3 = get_huge_size(3);
hugemax = get_huge_size(get_nhuge()-1); hugemax = get_huge_size(get_nhuge()-1);
p = mallocx(huge3, flags); p = mallocx(huge2, 0);
assert_ptr_not_null(p, "Unexpected mallocx() error"); assert_ptr_not_null(p, "Unexpected mallocx() error");
assert_zu_eq(xallocx(p, huge3, 0, flags), huge3, assert_zu_eq(xallocx(p, huge2, 0, 0), huge2,
"Unexpected xallocx() behavior"); "Unexpected xallocx() behavior");
/* Test size decrease with zero extra. */ /* Test size decrease with zero extra. */
assert_zu_ge(xallocx(p, huge1, 0, flags), huge1, assert_zu_ge(xallocx(p, huge0, 0, 0), huge0,
"Unexpected xallocx() behavior"); "Unexpected xallocx() behavior");
assert_zu_ge(xallocx(p, largemax, 0, flags), huge1, assert_zu_ge(xallocx(p, largemax, 0, 0), huge0,
"Unexpected xallocx() behavior"); "Unexpected xallocx() behavior");
assert_zu_eq(xallocx(p, huge3, 0, flags), huge3, assert_zu_eq(xallocx(p, huge2, 0, 0), huge2,
"Unexpected xallocx() behavior"); "Unexpected xallocx() behavior");
/* Test size decrease with non-zero extra. */ /* Test size decrease with non-zero extra. */
assert_zu_eq(xallocx(p, huge1, huge3 - huge1, flags), huge3, assert_zu_eq(xallocx(p, huge0, huge2 - huge0, 0), huge2,
"Unexpected xallocx() behavior"); "Unexpected xallocx() behavior");
assert_zu_eq(xallocx(p, huge2, huge3 - huge2, flags), huge3, assert_zu_eq(xallocx(p, huge1, huge2 - huge1, 0), huge2,
"Unexpected xallocx() behavior"); "Unexpected xallocx() behavior");
assert_zu_eq(xallocx(p, huge1, huge2 - huge1, flags), huge2, assert_zu_eq(xallocx(p, huge0, huge1 - huge0, 0), huge1,
"Unexpected xallocx() behavior"); "Unexpected xallocx() behavior");
assert_zu_ge(xallocx(p, largemax, huge1 - largemax, flags), huge1, assert_zu_ge(xallocx(p, largemax, huge0 - largemax, 0), huge0,
"Unexpected xallocx() behavior"); "Unexpected xallocx() behavior");
assert_zu_ge(xallocx(p, huge1, 0, flags), huge1, assert_zu_ge(xallocx(p, huge0, 0, 0), huge0,
"Unexpected xallocx() behavior"); "Unexpected xallocx() behavior");
/* Test size increase with zero extra. */ /* Test size increase with zero extra. */
assert_zu_le(xallocx(p, huge3, 0, flags), huge3, assert_zu_le(xallocx(p, huge2, 0, 0), huge2,
"Unexpected xallocx() behavior"); "Unexpected xallocx() behavior");
assert_zu_le(xallocx(p, hugemax+1, 0, flags), huge3, assert_zu_le(xallocx(p, hugemax+1, 0, 0), huge2,
"Unexpected xallocx() behavior"); "Unexpected xallocx() behavior");
assert_zu_ge(xallocx(p, huge1, 0, flags), huge1, assert_zu_ge(xallocx(p, huge0, 0, 0), huge0,
"Unexpected xallocx() behavior"); "Unexpected xallocx() behavior");
/* Test size increase with non-zero extra. */ /* Test size increase with non-zero extra. */
assert_zu_le(xallocx(p, huge1, SIZE_T_MAX - huge1, flags), hugemax, assert_zu_le(xallocx(p, huge0, SIZE_T_MAX - huge0, 0), hugemax,
"Unexpected xallocx() behavior"); "Unexpected xallocx() behavior");
assert_zu_ge(xallocx(p, huge1, 0, flags), huge1, assert_zu_ge(xallocx(p, huge0, 0, 0), huge0,
"Unexpected xallocx() behavior"); "Unexpected xallocx() behavior");
/* Test size increase with non-zero extra. */ /* Test size increase with non-zero extra. */
assert_zu_le(xallocx(p, huge1, huge3 - huge1, flags), huge3, assert_zu_le(xallocx(p, huge0, huge2 - huge0, 0), huge2,
"Unexpected xallocx() behavior"); "Unexpected xallocx() behavior");
assert_zu_eq(xallocx(p, huge3, 0, flags), huge3, assert_zu_eq(xallocx(p, huge2, 0, 0), huge2,
"Unexpected xallocx() behavior"); "Unexpected xallocx() behavior");
/* Test size+extra overflow. */ /* Test size+extra overflow. */
assert_zu_le(xallocx(p, huge3, hugemax - huge3 + 1, flags), hugemax, assert_zu_le(xallocx(p, huge2, hugemax - huge2 + 1, 0), hugemax,
"Unexpected xallocx() behavior"); "Unexpected xallocx() behavior");
dallocx(p, flags); dallocx(p, 0);
} }
TEST_END TEST_END
...@@ -413,13 +388,12 @@ validate_fill(const void *p, uint8_t c, size_t offset, size_t len) ...@@ -413,13 +388,12 @@ validate_fill(const void *p, uint8_t c, size_t offset, size_t len)
static void static void
test_zero(size_t szmin, size_t szmax) test_zero(size_t szmin, size_t szmax)
{ {
int flags = MALLOCX_ARENA(arena_ind()) | MALLOCX_ZERO;
size_t sz, nsz; size_t sz, nsz;
void *p; void *p;
#define FILL_BYTE 0x7aU #define FILL_BYTE 0x7aU
sz = szmax; sz = szmax;
p = mallocx(sz, flags); p = mallocx(sz, MALLOCX_ZERO);
assert_ptr_not_null(p, "Unexpected mallocx() error"); assert_ptr_not_null(p, "Unexpected mallocx() error");
assert_false(validate_fill(p, 0x00, 0, sz), "Memory not filled: sz=%zu", assert_false(validate_fill(p, 0x00, 0, sz), "Memory not filled: sz=%zu",
sz); sz);
...@@ -434,14 +408,14 @@ test_zero(size_t szmin, size_t szmax) ...@@ -434,14 +408,14 @@ test_zero(size_t szmin, size_t szmax)
/* Shrink in place so that we can expect growing in place to succeed. */ /* Shrink in place so that we can expect growing in place to succeed. */
sz = szmin; sz = szmin;
assert_zu_eq(xallocx(p, sz, 0, flags), sz, assert_zu_eq(xallocx(p, sz, 0, MALLOCX_ZERO), sz,
"Unexpected xallocx() error"); "Unexpected xallocx() error");
assert_false(validate_fill(p, FILL_BYTE, 0, sz), assert_false(validate_fill(p, FILL_BYTE, 0, sz),
"Memory not filled: sz=%zu", sz); "Memory not filled: sz=%zu", sz);
for (sz = szmin; sz < szmax; sz = nsz) { for (sz = szmin; sz < szmax; sz = nsz) {
nsz = nallocx(sz+1, flags); nsz = nallocx(sz+1, MALLOCX_ZERO);
assert_zu_eq(xallocx(p, sz+1, 0, flags), nsz, assert_zu_eq(xallocx(p, sz+1, 0, MALLOCX_ZERO), nsz,
"Unexpected xallocx() failure"); "Unexpected xallocx() failure");
assert_false(validate_fill(p, FILL_BYTE, 0, sz), assert_false(validate_fill(p, FILL_BYTE, 0, sz),
"Memory not filled: sz=%zu", sz); "Memory not filled: sz=%zu", sz);
...@@ -452,7 +426,7 @@ test_zero(size_t szmin, size_t szmax) ...@@ -452,7 +426,7 @@ test_zero(size_t szmin, size_t szmax)
"Memory not filled: nsz=%zu", nsz); "Memory not filled: nsz=%zu", nsz);
} }
dallocx(p, flags); dallocx(p, 0);
} }
TEST_BEGIN(test_zero_large) TEST_BEGIN(test_zero_large)
......
...@@ -11,8 +11,6 @@ mtx_init(mtx_t *mtx) ...@@ -11,8 +11,6 @@ mtx_init(mtx_t *mtx)
#ifdef _WIN32 #ifdef _WIN32
if (!InitializeCriticalSectionAndSpinCount(&mtx->lock, _CRT_SPINCOUNT)) if (!InitializeCriticalSectionAndSpinCount(&mtx->lock, _CRT_SPINCOUNT))
return (true); return (true);
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
mtx->lock = OS_UNFAIR_LOCK_INIT;
#elif (defined(JEMALLOC_OSSPIN)) #elif (defined(JEMALLOC_OSSPIN))
mtx->lock = 0; mtx->lock = 0;
#else #else
...@@ -35,7 +33,6 @@ mtx_fini(mtx_t *mtx) ...@@ -35,7 +33,6 @@ mtx_fini(mtx_t *mtx)
{ {
#ifdef _WIN32 #ifdef _WIN32
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
#elif (defined(JEMALLOC_OSSPIN)) #elif (defined(JEMALLOC_OSSPIN))
#else #else
pthread_mutex_destroy(&mtx->lock); pthread_mutex_destroy(&mtx->lock);
...@@ -48,8 +45,6 @@ mtx_lock(mtx_t *mtx) ...@@ -48,8 +45,6 @@ mtx_lock(mtx_t *mtx)
#ifdef _WIN32 #ifdef _WIN32
EnterCriticalSection(&mtx->lock); EnterCriticalSection(&mtx->lock);
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
os_unfair_lock_lock(&mtx->lock);
#elif (defined(JEMALLOC_OSSPIN)) #elif (defined(JEMALLOC_OSSPIN))
OSSpinLockLock(&mtx->lock); OSSpinLockLock(&mtx->lock);
#else #else
...@@ -63,8 +58,6 @@ mtx_unlock(mtx_t *mtx) ...@@ -63,8 +58,6 @@ mtx_unlock(mtx_t *mtx)
#ifdef _WIN32 #ifdef _WIN32
LeaveCriticalSection(&mtx->lock); LeaveCriticalSection(&mtx->lock);
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
os_unfair_lock_unlock(&mtx->lock);
#elif (defined(JEMALLOC_OSSPIN)) #elif (defined(JEMALLOC_OSSPIN))
OSSpinLockUnlock(&mtx->lock); OSSpinLockUnlock(&mtx->lock);
#else #else
......
...@@ -60,30 +60,32 @@ p_test_fini(void) ...@@ -60,30 +60,32 @@ p_test_fini(void)
malloc_printf("%s: %s\n", test_name, test_status_string(test_status)); malloc_printf("%s: %s\n", test_name, test_status_string(test_status));
} }
static test_status_t test_status_t
p_test_impl(bool do_malloc_init, test_t *t, va_list ap) p_test(test_t *t, ...)
{ {
test_status_t ret; test_status_t ret;
va_list ap;
if (do_malloc_init) { /*
/* * Make sure initialization occurs prior to running tests. Tests are
* Make sure initialization occurs prior to running tests. * special because they may use internal facilities prior to triggering
* Tests are special because they may use internal facilities * initialization as a side effect of calling into the public API. This
* prior to triggering initialization as a side effect of * is a final safety that works even if jemalloc_constructor() doesn't
* calling into the public API. * run, as for MSVC builds.
*/ */
if (nallocx(1, 0) == 0) { if (nallocx(1, 0) == 0) {
malloc_printf("Initialization error"); malloc_printf("Initialization error");
return (test_status_fail); return (test_status_fail);
}
} }
ret = test_status_pass; ret = test_status_pass;
va_start(ap, t);
for (; t != NULL; t = va_arg(ap, test_t *)) { for (; t != NULL; t = va_arg(ap, test_t *)) {
t(); t();
if (test_status > ret) if (test_status > ret)
ret = test_status; ret = test_status;
} }
va_end(ap);
malloc_printf("--- %s: %u/%u, %s: %u/%u, %s: %u/%u ---\n", malloc_printf("--- %s: %u/%u, %s: %u/%u, %s: %u/%u ---\n",
test_status_string(test_status_pass), test_status_string(test_status_pass),
...@@ -96,34 +98,6 @@ p_test_impl(bool do_malloc_init, test_t *t, va_list ap) ...@@ -96,34 +98,6 @@ p_test_impl(bool do_malloc_init, test_t *t, va_list ap)
return (ret); return (ret);
} }
test_status_t
p_test(test_t *t, ...)
{
test_status_t ret;
va_list ap;
ret = test_status_pass;
va_start(ap, t);
ret = p_test_impl(true, t, ap);
va_end(ap);
return (ret);
}
test_status_t
p_test_no_malloc_init(test_t *t, ...)
{
test_status_t ret;
va_list ap;
ret = test_status_pass;
va_start(ap, t);
ret = p_test_impl(false, t, ap);
va_end(ap);
return (ret);
}
void void
p_test_fail(const char *prefix, const char *message) p_test_fail(const char *prefix, const char *message)
{ {
......
...@@ -4,26 +4,50 @@ void ...@@ -4,26 +4,50 @@ void
timer_start(timedelta_t *timer) timer_start(timedelta_t *timer)
{ {
nstime_init(&timer->t0, 0); #ifdef _WIN32
nstime_update(&timer->t0); GetSystemTimeAsFileTime(&timer->ft0);
#elif JEMALLOC_CLOCK_GETTIME
if (sysconf(_SC_MONOTONIC_CLOCK) <= 0)
timer->clock_id = CLOCK_REALTIME;
else
timer->clock_id = CLOCK_MONOTONIC;
clock_gettime(timer->clock_id, &timer->ts0);
#else
gettimeofday(&timer->tv0, NULL);
#endif
} }
void void
timer_stop(timedelta_t *timer) timer_stop(timedelta_t *timer)
{ {
nstime_copy(&timer->t1, &timer->t0); #ifdef _WIN32
nstime_update(&timer->t1); GetSystemTimeAsFileTime(&timer->ft0);
#elif JEMALLOC_CLOCK_GETTIME
clock_gettime(timer->clock_id, &timer->ts1);
#else
gettimeofday(&timer->tv1, NULL);
#endif
} }
uint64_t uint64_t
timer_usec(const timedelta_t *timer) timer_usec(const timedelta_t *timer)
{ {
nstime_t delta;
nstime_copy(&delta, &timer->t1); #ifdef _WIN32
nstime_subtract(&delta, &timer->t0); uint64_t t0, t1;
return (nstime_ns(&delta) / 1000); t0 = (((uint64_t)timer->ft0.dwHighDateTime) << 32) |
timer->ft0.dwLowDateTime;
t1 = (((uint64_t)timer->ft1.dwHighDateTime) << 32) |
timer->ft1.dwLowDateTime;
return ((t1 - t0) / 10);
#elif JEMALLOC_CLOCK_GETTIME
return (((timer->ts1.tv_sec - timer->ts0.tv_sec) * 1000000) +
(timer->ts1.tv_nsec - timer->ts0.tv_nsec) / 1000);
#else
return (((timer->tv1.tv_sec - timer->tv0.tv_sec) * 1000000) +
timer->tv1.tv_usec - timer->tv0.tv_usec);
#endif
} }
void void
...@@ -32,8 +56,9 @@ timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen) ...@@ -32,8 +56,9 @@ timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen)
uint64_t t0 = timer_usec(a); uint64_t t0 = timer_usec(a);
uint64_t t1 = timer_usec(b); uint64_t t1 = timer_usec(b);
uint64_t mult; uint64_t mult;
size_t i = 0; unsigned i = 0;
size_t j, n; unsigned j;
int n;
/* Whole. */ /* Whole. */
n = malloc_snprintf(&buf[i], buflen-i, "%"FMTu64, t0 / t1); n = malloc_snprintf(&buf[i], buflen-i, "%"FMTu64, t0 / t1);
......
#include "test/jemalloc_test.h" #include "test/jemalloc_test.h"
JEMALLOC_INLINE_C void JEMALLOC_INLINE_C void
time_func(timedelta_t *timer, uint64_t nwarmup, uint64_t niter, time_func(timedelta_t *timer, uint64_t nwarmup, uint64_t niter, void (*func)(void))
void (*func)(void))
{ {
uint64_t i; uint64_t i;
......
#include "test/jemalloc_test.h"
TEST_BEGIN(test_a0)
{
void *p;
p = a0malloc(1);
assert_ptr_not_null(p, "Unexpected a0malloc() error");
a0dalloc(p);
}
TEST_END
int
main(void)
{
return (test_no_malloc_init(
test_a0));
}
#include "test/jemalloc_test.h"
#ifdef JEMALLOC_PROF
const char *malloc_conf = "prof:true,lg_prof_sample:0";
#endif
static unsigned
get_nsizes_impl(const char *cmd)
{
unsigned ret;
size_t z;
z = sizeof(unsigned);
assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
"Unexpected mallctl(\"%s\", ...) failure", cmd);
return (ret);
}
static unsigned
get_nsmall(void)
{
return (get_nsizes_impl("arenas.nbins"));
}
static unsigned
get_nlarge(void)
{
return (get_nsizes_impl("arenas.nlruns"));
}
static unsigned
get_nhuge(void)
{
return (get_nsizes_impl("arenas.nhchunks"));
}
static size_t
get_size_impl(const char *cmd, size_t ind)
{
size_t ret;
size_t z;
size_t mib[4];
size_t miblen = 4;
z = sizeof(size_t);
assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
mib[2] = ind;
z = sizeof(size_t);
assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
return (ret);
}
static size_t
get_small_size(size_t ind)
{
return (get_size_impl("arenas.bin.0.size", ind));
}
static size_t
get_large_size(size_t ind)
{
return (get_size_impl("arenas.lrun.0.size", ind));
}
static size_t
get_huge_size(size_t ind)
{
return (get_size_impl("arenas.hchunk.0.size", ind));
}
TEST_BEGIN(test_arena_reset)
{
#define NHUGE 4
unsigned arena_ind, nsmall, nlarge, nhuge, nptrs, i;
size_t sz, miblen;
void **ptrs;
int flags;
size_t mib[3];
tsdn_t *tsdn;
test_skip_if((config_valgrind && unlikely(in_valgrind)) || (config_fill
&& unlikely(opt_quarantine)));
sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.extend", (void *)&arena_ind, &sz, NULL, 0),
0, "Unexpected mallctl() failure");
flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
nsmall = get_nsmall();
nlarge = get_nlarge();
nhuge = get_nhuge() > NHUGE ? NHUGE : get_nhuge();
nptrs = nsmall + nlarge + nhuge;
ptrs = (void **)malloc(nptrs * sizeof(void *));
assert_ptr_not_null(ptrs, "Unexpected malloc() failure");
/* Allocate objects with a wide range of sizes. */
for (i = 0; i < nsmall; i++) {
sz = get_small_size(i);
ptrs[i] = mallocx(sz, flags);
assert_ptr_not_null(ptrs[i],
"Unexpected mallocx(%zu, %#x) failure", sz, flags);
}
for (i = 0; i < nlarge; i++) {
sz = get_large_size(i);
ptrs[nsmall + i] = mallocx(sz, flags);
assert_ptr_not_null(ptrs[i],
"Unexpected mallocx(%zu, %#x) failure", sz, flags);
}
for (i = 0; i < nhuge; i++) {
sz = get_huge_size(i);
ptrs[nsmall + nlarge + i] = mallocx(sz, flags);
assert_ptr_not_null(ptrs[i],
"Unexpected mallocx(%zu, %#x) failure", sz, flags);
}
tsdn = tsdn_fetch();
/* Verify allocations. */
for (i = 0; i < nptrs; i++) {
assert_zu_gt(ivsalloc(tsdn, ptrs[i], false), 0,
"Allocation should have queryable size");
}
/* Reset. */
miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("arena.0.reset", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
mib[1] = (size_t)arena_ind;
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
"Unexpected mallctlbymib() failure");
/* Verify allocations no longer exist. */
for (i = 0; i < nptrs; i++) {
assert_zu_eq(ivsalloc(tsdn, ptrs[i], false), 0,
"Allocation should no longer exist");
}
free(ptrs);
}
TEST_END
int
main(void)
{
return (test(
test_arena_reset));
}
...@@ -6,11 +6,7 @@ TEST_BEGIN(test_bitmap_size) ...@@ -6,11 +6,7 @@ TEST_BEGIN(test_bitmap_size)
prev_size = 0; prev_size = 0;
for (i = 1; i <= BITMAP_MAXBITS; i++) { for (i = 1; i <= BITMAP_MAXBITS; i++) {
bitmap_info_t binfo; size_t size = bitmap_size(i);
size_t size;
bitmap_info_init(&binfo, i);
size = bitmap_size(&binfo);
assert_true(size >= prev_size, assert_true(size >= prev_size,
"Bitmap size is smaller than expected"); "Bitmap size is smaller than expected");
prev_size = size; prev_size = size;
...@@ -27,8 +23,8 @@ TEST_BEGIN(test_bitmap_init) ...@@ -27,8 +23,8 @@ TEST_BEGIN(test_bitmap_init)
bitmap_info_init(&binfo, i); bitmap_info_init(&binfo, i);
{ {
size_t j; size_t j;
bitmap_t *bitmap = (bitmap_t *)malloc( bitmap_t *bitmap = (bitmap_t *)malloc(sizeof(bitmap_t) *
bitmap_size(&binfo)); bitmap_info_ngroups(&binfo));
bitmap_init(bitmap, &binfo); bitmap_init(bitmap, &binfo);
for (j = 0; j < i; j++) { for (j = 0; j < i; j++) {
...@@ -50,8 +46,8 @@ TEST_BEGIN(test_bitmap_set) ...@@ -50,8 +46,8 @@ TEST_BEGIN(test_bitmap_set)
bitmap_info_init(&binfo, i); bitmap_info_init(&binfo, i);
{ {
size_t j; size_t j;
bitmap_t *bitmap = (bitmap_t *)malloc( bitmap_t *bitmap = (bitmap_t *)malloc(sizeof(bitmap_t) *
bitmap_size(&binfo)); bitmap_info_ngroups(&binfo));
bitmap_init(bitmap, &binfo); bitmap_init(bitmap, &binfo);
for (j = 0; j < i; j++) for (j = 0; j < i; j++)
...@@ -73,8 +69,8 @@ TEST_BEGIN(test_bitmap_unset) ...@@ -73,8 +69,8 @@ TEST_BEGIN(test_bitmap_unset)
bitmap_info_init(&binfo, i); bitmap_info_init(&binfo, i);
{ {
size_t j; size_t j;
bitmap_t *bitmap = (bitmap_t *)malloc( bitmap_t *bitmap = (bitmap_t *)malloc(sizeof(bitmap_t) *
bitmap_size(&binfo)); bitmap_info_ngroups(&binfo));
bitmap_init(bitmap, &binfo); bitmap_init(bitmap, &binfo);
for (j = 0; j < i; j++) for (j = 0; j < i; j++)
...@@ -101,9 +97,9 @@ TEST_BEGIN(test_bitmap_sfu) ...@@ -101,9 +97,9 @@ TEST_BEGIN(test_bitmap_sfu)
bitmap_info_t binfo; bitmap_info_t binfo;
bitmap_info_init(&binfo, i); bitmap_info_init(&binfo, i);
{ {
size_t j; ssize_t j;
bitmap_t *bitmap = (bitmap_t *)malloc( bitmap_t *bitmap = (bitmap_t *)malloc(sizeof(bitmap_t) *
bitmap_size(&binfo)); bitmap_info_ngroups(&binfo));
bitmap_init(bitmap, &binfo); bitmap_init(bitmap, &binfo);
/* Iteratively set bits starting at the beginning. */ /* Iteratively set bits starting at the beginning. */
...@@ -119,7 +115,7 @@ TEST_BEGIN(test_bitmap_sfu) ...@@ -119,7 +115,7 @@ TEST_BEGIN(test_bitmap_sfu)
* Iteratively unset bits starting at the end, and * Iteratively unset bits starting at the end, and
* verify that bitmap_sfu() reaches the unset bits. * verify that bitmap_sfu() reaches the unset bits.
*/ */
for (j = i - 1; j < i; j--) { /* (i..0] */ for (j = i - 1; j >= 0; j--) {
bitmap_unset(bitmap, &binfo, j); bitmap_unset(bitmap, &binfo, j);
assert_zd_eq(bitmap_sfu(bitmap, &binfo), j, assert_zd_eq(bitmap_sfu(bitmap, &binfo), j,
"First unset bit should the bit previously " "First unset bit should the bit previously "
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment