Commit 2753acf1 authored by Salvatore Sanfilippo's avatar Salvatore Sanfilippo
Browse files

Merge pull request #208 from jbergstroem/jemalloc-2.2.5

Update to jemalloc 2.2.5
parents 3e0a975e 1d03c1c9
...@@ -84,6 +84,7 @@ static void malloc_conf_error(const char *msg, const char *k, size_t klen, ...@@ -84,6 +84,7 @@ static void malloc_conf_error(const char *msg, const char *k, size_t klen,
const char *v, size_t vlen); const char *v, size_t vlen);
static void malloc_conf_init(void); static void malloc_conf_init(void);
static bool malloc_init_hard(void); static bool malloc_init_hard(void);
static int imemalign(void **memptr, size_t alignment, size_t size);
/******************************************************************************/ /******************************************************************************/
/* malloc_message() setup. */ /* malloc_message() setup. */
...@@ -688,7 +689,7 @@ malloc_init_hard(void) ...@@ -688,7 +689,7 @@ malloc_init_hard(void)
result = sysconf(_SC_PAGESIZE); result = sysconf(_SC_PAGESIZE);
assert(result != -1); assert(result != -1);
pagesize = (unsigned)result; pagesize = (size_t)result;
/* /*
* We assume that pagesize is a power of 2 when calculating * We assume that pagesize is a power of 2 when calculating
...@@ -768,6 +769,14 @@ malloc_init_hard(void) ...@@ -768,6 +769,14 @@ malloc_init_hard(void)
} }
#endif #endif
if (malloc_mutex_init(&arenas_lock))
return (true);
if (pthread_key_create(&arenas_tsd, arenas_cleanup) != 0) {
malloc_mutex_unlock(&init_lock);
return (true);
}
/* /*
* Create enough scaffolding to allow recursive allocation in * Create enough scaffolding to allow recursive allocation in
* malloc_ncpus(). * malloc_ncpus().
...@@ -794,14 +803,6 @@ malloc_init_hard(void) ...@@ -794,14 +803,6 @@ malloc_init_hard(void)
ARENA_SET(arenas[0]); ARENA_SET(arenas[0]);
arenas[0]->nthreads++; arenas[0]->nthreads++;
if (malloc_mutex_init(&arenas_lock))
return (true);
if (pthread_key_create(&arenas_tsd, arenas_cleanup) != 0) {
malloc_mutex_unlock(&init_lock);
return (true);
}
#ifdef JEMALLOC_PROF #ifdef JEMALLOC_PROF
if (prof_boot2()) { if (prof_boot2()) {
malloc_mutex_unlock(&init_lock); malloc_mutex_unlock(&init_lock);
...@@ -939,7 +940,8 @@ JEMALLOC_P(malloc)(size_t size) ...@@ -939,7 +940,8 @@ JEMALLOC_P(malloc)(size_t size)
#ifdef JEMALLOC_PROF #ifdef JEMALLOC_PROF
if (opt_prof) { if (opt_prof) {
usize = s2u(size); usize = s2u(size);
if ((cnt = prof_alloc_prep(usize)) == NULL) { PROF_ALLOC_PREP(1, usize, cnt);
if (cnt == NULL) {
ret = NULL; ret = NULL;
goto OOM; goto OOM;
} }
...@@ -988,9 +990,15 @@ RETURN: ...@@ -988,9 +990,15 @@ RETURN:
} }
JEMALLOC_ATTR(nonnull(1)) JEMALLOC_ATTR(nonnull(1))
JEMALLOC_ATTR(visibility("default")) #ifdef JEMALLOC_PROF
int /*
JEMALLOC_P(posix_memalign)(void **memptr, size_t alignment, size_t size) * Avoid any uncertainty as to how many backtrace frames to ignore in
* PROF_ALLOC_PREP().
*/
JEMALLOC_ATTR(noinline)
#endif
static int
imemalign(void **memptr, size_t alignment, size_t size)
{ {
int ret; int ret;
size_t usize size_t usize
...@@ -1057,7 +1065,8 @@ JEMALLOC_P(posix_memalign)(void **memptr, size_t alignment, size_t size) ...@@ -1057,7 +1065,8 @@ JEMALLOC_P(posix_memalign)(void **memptr, size_t alignment, size_t size)
#ifdef JEMALLOC_PROF #ifdef JEMALLOC_PROF
if (opt_prof) { if (opt_prof) {
if ((cnt = prof_alloc_prep(usize)) == NULL) { PROF_ALLOC_PREP(2, usize, cnt);
if (cnt == NULL) {
result = NULL; result = NULL;
ret = EINVAL; ret = EINVAL;
} else { } else {
...@@ -1110,6 +1119,15 @@ RETURN: ...@@ -1110,6 +1119,15 @@ RETURN:
return (ret); return (ret);
} }
JEMALLOC_ATTR(nonnull(1))
JEMALLOC_ATTR(visibility("default"))
int
JEMALLOC_P(posix_memalign)(void **memptr, size_t alignment, size_t size)
{
return imemalign(memptr, alignment, size);
}
JEMALLOC_ATTR(malloc) JEMALLOC_ATTR(malloc)
JEMALLOC_ATTR(visibility("default")) JEMALLOC_ATTR(visibility("default"))
void * void *
...@@ -1165,7 +1183,8 @@ JEMALLOC_P(calloc)(size_t num, size_t size) ...@@ -1165,7 +1183,8 @@ JEMALLOC_P(calloc)(size_t num, size_t size)
#ifdef JEMALLOC_PROF #ifdef JEMALLOC_PROF
if (opt_prof) { if (opt_prof) {
usize = s2u(num_size); usize = s2u(num_size);
if ((cnt = prof_alloc_prep(usize)) == NULL) { PROF_ALLOC_PREP(1, usize, cnt);
if (cnt == NULL) {
ret = NULL; ret = NULL;
goto RETURN; goto RETURN;
} }
...@@ -1278,7 +1297,9 @@ JEMALLOC_P(realloc)(void *ptr, size_t size) ...@@ -1278,7 +1297,9 @@ JEMALLOC_P(realloc)(void *ptr, size_t size)
if (opt_prof) { if (opt_prof) {
usize = s2u(size); usize = s2u(size);
old_ctx = prof_ctx_get(ptr); old_ctx = prof_ctx_get(ptr);
if ((cnt = prof_alloc_prep(usize)) == NULL) { PROF_ALLOC_PREP(1, usize, cnt);
if (cnt == NULL) {
old_ctx = NULL;
ret = NULL; ret = NULL;
goto OOM; goto OOM;
} }
...@@ -1288,8 +1309,13 @@ JEMALLOC_P(realloc)(void *ptr, size_t size) ...@@ -1288,8 +1309,13 @@ JEMALLOC_P(realloc)(void *ptr, size_t size)
false, false); false, false);
if (ret != NULL) if (ret != NULL)
arena_prof_promoted(ret, usize); arena_prof_promoted(ret, usize);
} else else
old_ctx = NULL;
} else {
ret = iralloc(ptr, size, 0, 0, false, false); ret = iralloc(ptr, size, 0, 0, false, false);
if (ret == NULL)
old_ctx = NULL;
}
} else } else
#endif #endif
{ {
...@@ -1327,7 +1353,8 @@ OOM: ...@@ -1327,7 +1353,8 @@ OOM:
#ifdef JEMALLOC_PROF #ifdef JEMALLOC_PROF
if (opt_prof) { if (opt_prof) {
usize = s2u(size); usize = s2u(size);
if ((cnt = prof_alloc_prep(usize)) == NULL) PROF_ALLOC_PREP(1, usize, cnt);
if (cnt == NULL)
ret = NULL; ret = NULL;
else { else {
if (prof_promote && (uintptr_t)cnt != if (prof_promote && (uintptr_t)cnt !=
...@@ -1432,7 +1459,7 @@ JEMALLOC_P(memalign)(size_t alignment, size_t size) ...@@ -1432,7 +1459,7 @@ JEMALLOC_P(memalign)(size_t alignment, size_t size)
#ifdef JEMALLOC_CC_SILENCE #ifdef JEMALLOC_CC_SILENCE
int result = int result =
#endif #endif
JEMALLOC_P(posix_memalign)(&ret, alignment, size); imemalign(&ret, alignment, size);
#ifdef JEMALLOC_CC_SILENCE #ifdef JEMALLOC_CC_SILENCE
if (result != 0) if (result != 0)
return (NULL); return (NULL);
...@@ -1451,7 +1478,7 @@ JEMALLOC_P(valloc)(size_t size) ...@@ -1451,7 +1478,7 @@ JEMALLOC_P(valloc)(size_t size)
#ifdef JEMALLOC_CC_SILENCE #ifdef JEMALLOC_CC_SILENCE
int result = int result =
#endif #endif
JEMALLOC_P(posix_memalign)(&ret, PAGE_SIZE, size); imemalign(&ret, PAGE_SIZE, size);
#ifdef JEMALLOC_CC_SILENCE #ifdef JEMALLOC_CC_SILENCE
if (result != 0) if (result != 0)
return (NULL); return (NULL);
...@@ -1566,14 +1593,14 @@ JEMALLOC_P(allocm)(void **ptr, size_t *rsize, size_t size, int flags) ...@@ -1566,14 +1593,14 @@ JEMALLOC_P(allocm)(void **ptr, size_t *rsize, size_t size, int flags)
if (malloc_init()) if (malloc_init())
goto OOM; goto OOM;
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, NULL);
NULL);
if (usize == 0) if (usize == 0)
goto OOM; goto OOM;
#ifdef JEMALLOC_PROF #ifdef JEMALLOC_PROF
if (opt_prof) { if (opt_prof) {
if ((cnt = prof_alloc_prep(usize)) == NULL) PROF_ALLOC_PREP(1, usize, cnt);
if (cnt == NULL)
goto OOM; goto OOM;
if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <= if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
small_maxclass) { small_maxclass) {
...@@ -1590,7 +1617,7 @@ JEMALLOC_P(allocm)(void **ptr, size_t *rsize, size_t size, int flags) ...@@ -1590,7 +1617,7 @@ JEMALLOC_P(allocm)(void **ptr, size_t *rsize, size_t size, int flags)
if (p == NULL) if (p == NULL)
goto OOM; goto OOM;
} }
prof_malloc(p, usize, cnt);
if (rsize != NULL) if (rsize != NULL)
*rsize = usize; *rsize = usize;
} else } else
...@@ -1645,7 +1672,6 @@ JEMALLOC_P(rallocm)(void **ptr, size_t *rsize, size_t size, size_t extra, ...@@ -1645,7 +1672,6 @@ JEMALLOC_P(rallocm)(void **ptr, size_t *rsize, size_t size, size_t extra,
bool no_move = flags & ALLOCM_NO_MOVE; bool no_move = flags & ALLOCM_NO_MOVE;
#ifdef JEMALLOC_PROF #ifdef JEMALLOC_PROF
prof_thr_cnt_t *cnt; prof_thr_cnt_t *cnt;
prof_ctx_t *old_ctx;
#endif #endif
assert(ptr != NULL); assert(ptr != NULL);
...@@ -1660,25 +1686,33 @@ JEMALLOC_P(rallocm)(void **ptr, size_t *rsize, size_t size, size_t extra, ...@@ -1660,25 +1686,33 @@ JEMALLOC_P(rallocm)(void **ptr, size_t *rsize, size_t size, size_t extra,
/* /*
* usize isn't knowable before iralloc() returns when extra is * usize isn't knowable before iralloc() returns when extra is
* non-zero. Therefore, compute its maximum possible value and * non-zero. Therefore, compute its maximum possible value and
* use that in prof_alloc_prep() to decide whether to capture a * use that in PROF_ALLOC_PREP() to decide whether to capture a
* backtrace. prof_realloc() will use the actual usize to * backtrace. prof_realloc() will use the actual usize to
* decide whether to sample. * decide whether to sample.
*/ */
size_t max_usize = (alignment == 0) ? s2u(size+extra) : size_t max_usize = (alignment == 0) ? s2u(size+extra) :
sa2u(size+extra, alignment, NULL); sa2u(size+extra, alignment, NULL);
prof_ctx_t *old_ctx = prof_ctx_get(p);
old_size = isalloc(p); old_size = isalloc(p);
old_ctx = prof_ctx_get(p); PROF_ALLOC_PREP(1, max_usize, cnt);
if ((cnt = prof_alloc_prep(max_usize)) == NULL) if (cnt == NULL)
goto OOM; goto OOM;
if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && max_usize /*
<= small_maxclass) { * Use minimum usize to determine whether promotion may happen.
*/
if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
&& ((alignment == 0) ? s2u(size) : sa2u(size,
alignment, NULL)) <= small_maxclass) {
q = iralloc(p, small_maxclass+1, (small_maxclass+1 >= q = iralloc(p, small_maxclass+1, (small_maxclass+1 >=
size+extra) ? 0 : size+extra - (small_maxclass+1), size+extra) ? 0 : size+extra - (small_maxclass+1),
alignment, zero, no_move); alignment, zero, no_move);
if (q == NULL) if (q == NULL)
goto ERR; goto ERR;
usize = isalloc(q); if (max_usize < PAGE_SIZE) {
usize = max_usize;
arena_prof_promoted(q, usize); arena_prof_promoted(q, usize);
} else
usize = isalloc(q);
} else { } else {
q = iralloc(p, size, extra, alignment, zero, no_move); q = iralloc(p, size, extra, alignment, zero, no_move);
if (q == NULL) if (q == NULL)
......
...@@ -474,11 +474,23 @@ prof_lookup(prof_bt_t *bt) ...@@ -474,11 +474,23 @@ prof_lookup(prof_bt_t *bt)
/* /*
* Artificially raise curobjs, in order to avoid a race * Artificially raise curobjs, in order to avoid a race
* condition with prof_ctx_merge()/prof_ctx_destroy(). * condition with prof_ctx_merge()/prof_ctx_destroy().
*
* No locking is necessary for ctx here because no other
* threads have had the opportunity to fetch it from
* bt2ctx yet.
*/ */
ctx.p->cnt_merged.curobjs++; ctx.p->cnt_merged.curobjs++;
new_ctx = true; new_ctx = true;
} else } else {
/*
* Artificially raise curobjs, in order to avoid a race
* condition with prof_ctx_merge()/prof_ctx_destroy().
*/
malloc_mutex_lock(&ctx.p->lock);
ctx.p->cnt_merged.curobjs++;
malloc_mutex_unlock(&ctx.p->lock);
new_ctx = false; new_ctx = false;
}
prof_leave(); prof_leave();
/* Link a prof_thd_cnt_t into ctx for this thread. */ /* Link a prof_thd_cnt_t into ctx for this thread. */
...@@ -491,8 +503,9 @@ prof_lookup(prof_bt_t *bt) ...@@ -491,8 +503,9 @@ prof_lookup(prof_bt_t *bt)
*/ */
ret.p = ql_last(&prof_tdata->lru_ql, lru_link); ret.p = ql_last(&prof_tdata->lru_ql, lru_link);
assert(ret.v != NULL); assert(ret.v != NULL);
ckh_remove(&prof_tdata->bt2cnt, ret.p->ctx->bt, NULL, if (ckh_remove(&prof_tdata->bt2cnt, ret.p->ctx->bt,
NULL); NULL, NULL))
assert(false);
ql_remove(&prof_tdata->lru_ql, ret.p, lru_link); ql_remove(&prof_tdata->lru_ql, ret.p, lru_link);
prof_ctx_merge(ret.p->ctx, ret.p); prof_ctx_merge(ret.p->ctx, ret.p);
/* ret can now be re-used. */ /* ret can now be re-used. */
...@@ -503,11 +516,8 @@ prof_lookup(prof_bt_t *bt) ...@@ -503,11 +516,8 @@ prof_lookup(prof_bt_t *bt)
/* Allocate and partially initialize a new cnt. */ /* Allocate and partially initialize a new cnt. */
ret.v = imalloc(sizeof(prof_thr_cnt_t)); ret.v = imalloc(sizeof(prof_thr_cnt_t));
if (ret.p == NULL) { if (ret.p == NULL) {
if (new_ctx) { if (new_ctx)
malloc_mutex_lock(&ctx.p->lock); prof_ctx_destroy(ctx.p);
ctx.p->cnt_merged.curobjs--;
malloc_mutex_unlock(&ctx.p->lock);
}
return (NULL); return (NULL);
} }
ql_elm_new(ret.p, cnts_link); ql_elm_new(ret.p, cnts_link);
...@@ -518,18 +528,14 @@ prof_lookup(prof_bt_t *bt) ...@@ -518,18 +528,14 @@ prof_lookup(prof_bt_t *bt)
ret.p->epoch = 0; ret.p->epoch = 0;
memset(&ret.p->cnts, 0, sizeof(prof_cnt_t)); memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
if (ckh_insert(&prof_tdata->bt2cnt, btkey.v, ret.v)) { if (ckh_insert(&prof_tdata->bt2cnt, btkey.v, ret.v)) {
if (new_ctx) { if (new_ctx)
malloc_mutex_lock(&ctx.p->lock); prof_ctx_destroy(ctx.p);
ctx.p->cnt_merged.curobjs--;
malloc_mutex_unlock(&ctx.p->lock);
}
idalloc(ret.v); idalloc(ret.v);
return (NULL); return (NULL);
} }
ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link); ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link);
malloc_mutex_lock(&ctx.p->lock); malloc_mutex_lock(&ctx.p->lock);
ql_tail_insert(&ctx.p->cnts_ql, ret.p, cnts_link); ql_tail_insert(&ctx.p->cnts_ql, ret.p, cnts_link);
if (new_ctx)
ctx.p->cnt_merged.curobjs--; ctx.p->cnt_merged.curobjs--;
malloc_mutex_unlock(&ctx.p->lock); malloc_mutex_unlock(&ctx.p->lock);
} else { } else {
...@@ -644,11 +650,10 @@ prof_ctx_destroy(prof_ctx_t *ctx) ...@@ -644,11 +650,10 @@ prof_ctx_destroy(prof_ctx_t *ctx)
/* /*
* Check that ctx is still unused by any thread cache before destroying * Check that ctx is still unused by any thread cache before destroying
* it. prof_lookup() interlocks bt2ctx_mtx and ctx->lock in order to * it. prof_lookup() artificially raises ctx->cnt_merge.curobjs in
* avoid a race condition with this function, and prof_ctx_merge() * order to avoid a race condition with this function, as does
* artificially raises ctx->cnt_merged.curobjs in order to avoid a race * prof_ctx_merge() in order to avoid a race between the main body of
* between the main body of prof_ctx_merge() and entry into this * prof_ctx_merge() and entry into this function.
* function.
*/ */
prof_enter(); prof_enter();
malloc_mutex_lock(&ctx->lock); malloc_mutex_lock(&ctx->lock);
...@@ -657,7 +662,8 @@ prof_ctx_destroy(prof_ctx_t *ctx) ...@@ -657,7 +662,8 @@ prof_ctx_destroy(prof_ctx_t *ctx)
assert(ctx->cnt_merged.accumobjs == 0); assert(ctx->cnt_merged.accumobjs == 0);
assert(ctx->cnt_merged.accumbytes == 0); assert(ctx->cnt_merged.accumbytes == 0);
/* Remove ctx from bt2ctx. */ /* Remove ctx from bt2ctx. */
ckh_remove(&bt2ctx, ctx->bt, NULL, NULL); if (ckh_remove(&bt2ctx, ctx->bt, NULL, NULL))
assert(false);
prof_leave(); prof_leave();
/* Destroy ctx. */ /* Destroy ctx. */
malloc_mutex_unlock(&ctx->lock); malloc_mutex_unlock(&ctx->lock);
...@@ -665,7 +671,10 @@ prof_ctx_destroy(prof_ctx_t *ctx) ...@@ -665,7 +671,10 @@ prof_ctx_destroy(prof_ctx_t *ctx)
malloc_mutex_destroy(&ctx->lock); malloc_mutex_destroy(&ctx->lock);
idalloc(ctx); idalloc(ctx);
} else { } else {
/* Compensate for increment in prof_ctx_merge(). */ /*
* Compensate for increment in prof_ctx_merge() or
* prof_lookup().
*/
ctx->cnt_merged.curobjs--; ctx->cnt_merged.curobjs--;
malloc_mutex_unlock(&ctx->lock); malloc_mutex_unlock(&ctx->lock);
prof_leave(); prof_leave();
...@@ -1072,7 +1081,7 @@ prof_bt_hash(const void *key, unsigned minbits, size_t *hash1, size_t *hash2) ...@@ -1072,7 +1081,7 @@ prof_bt_hash(const void *key, unsigned minbits, size_t *hash1, size_t *hash2)
} else { } else {
ret1 = h; ret1 = h;
ret2 = hash(bt->vec, bt->len * sizeof(void *), ret2 = hash(bt->vec, bt->len * sizeof(void *),
0x8432a476666bbc13U); 0x8432a476666bbc13LLU);
} }
*hash1 = ret1; *hash1 = ret1;
...@@ -1109,7 +1118,6 @@ prof_tdata_init(void) ...@@ -1109,7 +1118,6 @@ prof_tdata_init(void)
prof_tdata->vec = imalloc(sizeof(void *) * prof_bt_max); prof_tdata->vec = imalloc(sizeof(void *) * prof_bt_max);
if (prof_tdata->vec == NULL) { if (prof_tdata->vec == NULL) {
ckh_delete(&prof_tdata->bt2cnt); ckh_delete(&prof_tdata->bt2cnt);
idalloc(prof_tdata); idalloc(prof_tdata);
return (NULL); return (NULL);
...@@ -1127,25 +1135,19 @@ prof_tdata_init(void) ...@@ -1127,25 +1135,19 @@ prof_tdata_init(void)
static void static void
prof_tdata_cleanup(void *arg) prof_tdata_cleanup(void *arg)
{ {
prof_tdata_t *prof_tdata;
prof_tdata = PROF_TCACHE_GET();
if (prof_tdata != NULL) {
prof_thr_cnt_t *cnt; prof_thr_cnt_t *cnt;
prof_tdata_t *prof_tdata = (prof_tdata_t *)arg;
/* /*
* Delete the hash table. All of its contents can still be * Delete the hash table. All of its contents can still be iterated
* iterated over via the LRU. * over via the LRU.
*/ */
ckh_delete(&prof_tdata->bt2cnt); ckh_delete(&prof_tdata->bt2cnt);
/* /* Iteratively merge cnt's into the global stats and delete them. */
* Iteratively merge cnt's into the global stats and delete
* them.
*/
while ((cnt = ql_last(&prof_tdata->lru_ql, lru_link)) != NULL) { while ((cnt = ql_last(&prof_tdata->lru_ql, lru_link)) != NULL) {
prof_ctx_merge(cnt->ctx, cnt);
ql_remove(&prof_tdata->lru_ql, cnt, lru_link); ql_remove(&prof_tdata->lru_ql, cnt, lru_link);
prof_ctx_merge(cnt->ctx, cnt);
idalloc(cnt); idalloc(cnt);
} }
...@@ -1153,7 +1155,6 @@ prof_tdata_cleanup(void *arg) ...@@ -1153,7 +1155,6 @@ prof_tdata_cleanup(void *arg)
idalloc(prof_tdata); idalloc(prof_tdata);
PROF_TCACHE_SET(NULL); PROF_TCACHE_SET(NULL);
}
} }
void void
......
...@@ -748,7 +748,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque, ...@@ -748,7 +748,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
ninitialized++; ninitialized++;
} }
if (ninitialized > 1) { if (ninitialized > 1 || unmerged == false) {
/* Print merged arena stats. */ /* Print merged arena stats. */
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"\nMerged arenas stats:\n"); "\nMerged arenas stats:\n");
......
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h>
#include <unistd.h>
#include <string.h> #include <string.h>
#include <assert.h>
#define JEMALLOC_MANGLE #define JEMALLOC_MANGLE
#include "jemalloc_test.h" #include "jemalloc_test.h"
...@@ -8,12 +10,20 @@ ...@@ -8,12 +10,20 @@
int int
main(void) main(void)
{ {
size_t pagesize;
void *p, *q; void *p, *q;
size_t sz, tsz; size_t sz, tsz;
int r; int r;
fprintf(stderr, "Test begin\n"); fprintf(stderr, "Test begin\n");
/* Get page size. */
{
long result = sysconf(_SC_PAGESIZE);
assert(result != -1);
pagesize = (size_t)result;
}
r = JEMALLOC_P(allocm)(&p, &sz, 42, 0); r = JEMALLOC_P(allocm)(&p, &sz, 42, 0);
if (r != ALLOCM_SUCCESS) { if (r != ALLOCM_SUCCESS) {
fprintf(stderr, "Unexpected allocm() error\n"); fprintf(stderr, "Unexpected allocm() error\n");
...@@ -66,7 +76,7 @@ main(void) ...@@ -66,7 +76,7 @@ main(void)
p = q; p = q;
sz = tsz; sz = tsz;
r = JEMALLOC_P(rallocm)(&q, &tsz, 8192, 0, 0); r = JEMALLOC_P(rallocm)(&q, &tsz, pagesize*2, 0, 0);
if (r != ALLOCM_SUCCESS) if (r != ALLOCM_SUCCESS)
fprintf(stderr, "Unexpected rallocm() error\n"); fprintf(stderr, "Unexpected rallocm() error\n");
if (q == p) if (q == p)
...@@ -78,7 +88,7 @@ main(void) ...@@ -78,7 +88,7 @@ main(void)
p = q; p = q;
sz = tsz; sz = tsz;
r = JEMALLOC_P(rallocm)(&q, &tsz, 16384, 0, 0); r = JEMALLOC_P(rallocm)(&q, &tsz, pagesize*4, 0, 0);
if (r != ALLOCM_SUCCESS) if (r != ALLOCM_SUCCESS)
fprintf(stderr, "Unexpected rallocm() error\n"); fprintf(stderr, "Unexpected rallocm() error\n");
if (tsz == sz) { if (tsz == sz) {
...@@ -88,7 +98,7 @@ main(void) ...@@ -88,7 +98,7 @@ main(void)
p = q; p = q;
sz = tsz; sz = tsz;
r = JEMALLOC_P(rallocm)(&q, &tsz, 8192, 0, ALLOCM_NO_MOVE); r = JEMALLOC_P(rallocm)(&q, &tsz, pagesize*2, 0, ALLOCM_NO_MOVE);
if (r != ALLOCM_SUCCESS) if (r != ALLOCM_SUCCESS)
fprintf(stderr, "Unexpected rallocm() error\n"); fprintf(stderr, "Unexpected rallocm() error\n");
if (q != p) if (q != p)
...@@ -99,7 +109,7 @@ main(void) ...@@ -99,7 +109,7 @@ main(void)
} }
sz = tsz; sz = tsz;
r = JEMALLOC_P(rallocm)(&q, &tsz, 16384, 0, ALLOCM_NO_MOVE); r = JEMALLOC_P(rallocm)(&q, &tsz, pagesize*4, 0, ALLOCM_NO_MOVE);
if (r != ALLOCM_SUCCESS) if (r != ALLOCM_SUCCESS)
fprintf(stderr, "Unexpected rallocm() error\n"); fprintf(stderr, "Unexpected rallocm() error\n");
if (q != p) if (q != p)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment