Unverified Commit c4b4b6c0 authored by Oran Agra's avatar Oran Agra Committed by GitHub
Browse files

Merge pull request #9623 from yoav-steinberg/upgrade_jemalloc_5.2.1

Upgraded to jemalloc 5.2.1 from 5.1.0.
Cherry picked all relevant fixes (by diffing our 5.1.0 to upstream 5.10 and finding relevant commits).
Details of what was done:

[cherry-picked] fd7d51c3 2021-05-03 Resolve nonsense static analysis warnings (Oran Agra)
[cherry-picked] 448c435b 2020-09-29 Fix compilation warnings in Lua and jemalloc dependencies (#7785) (YoongHM)
[skipped - already in upstream] 9216b96b 2020-09-21 Fix compilation warning in jemalloc's malloc_vsnprintf (#7789) (YoongHM)
[cherry-picked] 88d71f47 2020-05-20 fix a rare active defrag edge case bug leading to stagnation (Oran Agra)
[skipped - already in upstream] 2fec7d9c 2019-05-30 Jemalloc: Avoid blocking on background thread lock for stats.
[cherry-picked] 920158ec 2018-07-11 Active defrag fixes for 32bit builds (again) (Oran Agra)
[cherry-picked] e8099cab 2018-06-26 add defrag hint support into jemalloc 5 (Oran Agra)
[re-done] 4e729fcd 2018-05-24 Generate configure for Jemalloc. (antirez)

Additionally had to do this:
7727cc2 2021-10-10 Fix defrag to support sharded bins in arena (added in v5.2.1) (Yoav Steinberg)

When reviewing please look at all except the first commit which is just replacing 5.1.0 with 5.2.1 sources.
Also I think we should merge this without squashing to preserve the changes we did to to jemalloc.
parents 276b460e 85737e67
#include "test/jemalloc_test.h"
#define N_PARAM 100
#define N_THREADS 10
static void assert_rep() {
assert_b_eq(prof_log_rep_check(), false, "Rep check failed");
}
static void assert_log_empty() {
assert_zu_eq(prof_log_bt_count(), 0,
"The log has backtraces; it isn't empty");
assert_zu_eq(prof_log_thr_count(), 0,
"The log has threads; it isn't empty");
assert_zu_eq(prof_log_alloc_count(), 0,
"The log has allocations; it isn't empty");
}
void *buf[N_PARAM];
static void f() {
int i;
for (i = 0; i < N_PARAM; i++) {
buf[i] = malloc(100);
}
for (i = 0; i < N_PARAM; i++) {
free(buf[i]);
}
}
TEST_BEGIN(test_prof_log_many_logs) {
int i;
test_skip_if(!config_prof);
for (i = 0; i < N_PARAM; i++) {
assert_b_eq(prof_log_is_logging(), false,
"Logging shouldn't have started yet");
assert_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl failure when starting logging");
assert_b_eq(prof_log_is_logging(), true,
"Logging should be started by now");
assert_log_empty();
assert_rep();
f();
assert_zu_eq(prof_log_thr_count(), 1, "Wrong thread count");
assert_rep();
assert_b_eq(prof_log_is_logging(), true,
"Logging should still be on");
assert_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl failure when stopping logging");
assert_b_eq(prof_log_is_logging(), false,
"Logging should have turned off");
}
}
TEST_END
thd_t thr_buf[N_THREADS];
static void *f_thread(void *unused) {
int i;
for (i = 0; i < N_PARAM; i++) {
void *p = malloc(100);
memset(p, 100, sizeof(char));
free(p);
}
return NULL;
}
TEST_BEGIN(test_prof_log_many_threads) {
test_skip_if(!config_prof);
int i;
assert_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl failure when starting logging");
for (i = 0; i < N_THREADS; i++) {
thd_create(&thr_buf[i], &f_thread, NULL);
}
for (i = 0; i < N_THREADS; i++) {
thd_join(thr_buf[i], NULL);
}
assert_zu_eq(prof_log_thr_count(), N_THREADS,
"Wrong number of thread entries");
assert_rep();
assert_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl failure when stopping logging");
}
TEST_END
static void f3() {
void *p = malloc(100);
free(p);
}
static void f1() {
void *p = malloc(100);
f3();
free(p);
}
static void f2() {
void *p = malloc(100);
free(p);
}
TEST_BEGIN(test_prof_log_many_traces) {
test_skip_if(!config_prof);
assert_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl failure when starting logging");
int i;
assert_rep();
assert_log_empty();
for (i = 0; i < N_PARAM; i++) {
assert_rep();
f1();
assert_rep();
f2();
assert_rep();
f3();
assert_rep();
}
/*
* There should be 8 total backtraces: two for malloc/free in f1(), two
* for malloc/free in f2(), two for malloc/free in f3(), and then two
* for malloc/free in f1()'s call to f3(). However compiler
* optimizations such as loop unrolling might generate more call sites.
* So >= 8 traces are expected.
*/
assert_zu_ge(prof_log_bt_count(), 8,
"Expect at least 8 backtraces given sample workload");
assert_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl failure when stopping logging");
}
TEST_END
int
main(void) {
prof_log_dummy_set(true);
return test_no_reentrancy(
test_prof_log_many_logs,
test_prof_log_many_traces,
test_prof_log_many_threads);
}
#!/bin/sh
if [ "x${enable_prof}" = "x1" ] ; then
export MALLOC_CONF="prof:true,lg_prof_sample:0"
fi
...@@ -107,6 +107,9 @@ TEST_BEGIN(test_retained) { ...@@ -107,6 +107,9 @@ TEST_BEGIN(test_retained) {
atomic_store_u(&epoch, 0, ATOMIC_RELAXED); atomic_store_u(&epoch, 0, ATOMIC_RELAXED);
unsigned nthreads = ncpus * 2; unsigned nthreads = ncpus * 2;
if (LG_SIZEOF_PTR < 3 && nthreads > 16) {
nthreads = 16; /* 32-bit platform could run out of vaddr. */
}
VARIABLE_ARRAY(thd_t, threads, nthreads); VARIABLE_ARRAY(thd_t, threads, nthreads);
for (unsigned i = 0; i < nthreads; i++) { for (unsigned i = 0; i < nthreads; i++) {
thd_create(&threads[i], thd_start, NULL); thd_create(&threads[i], thd_start, NULL);
......
...@@ -85,11 +85,11 @@ TEST_END ...@@ -85,11 +85,11 @@ TEST_END
TEST_BEGIN(test_rtree_extrema) { TEST_BEGIN(test_rtree_extrema) {
extent_t extent_a, extent_b; extent_t extent_a, extent_b;
extent_init(&extent_a, NULL, NULL, LARGE_MINCLASS, false, extent_init(&extent_a, NULL, NULL, SC_LARGE_MINCLASS, false,
sz_size2index(LARGE_MINCLASS), 0, extent_state_active, false, sz_size2index(SC_LARGE_MINCLASS), 0,
false, true); extent_state_active, false, false, true, EXTENT_NOT_HEAD);
extent_init(&extent_b, NULL, NULL, 0, false, NSIZES, 0, extent_init(&extent_b, NULL, NULL, 0, false, SC_NSIZES, 0,
extent_state_active, false, false, true); extent_state_active, false, false, true, EXTENT_NOT_HEAD);
tsdn_t *tsdn = tsdn_fetch(); tsdn_t *tsdn = tsdn_fetch();
...@@ -125,8 +125,8 @@ TEST_BEGIN(test_rtree_bits) { ...@@ -125,8 +125,8 @@ TEST_BEGIN(test_rtree_bits) {
PAGE + (((uintptr_t)1) << LG_PAGE) - 1}; PAGE + (((uintptr_t)1) << LG_PAGE) - 1};
extent_t extent; extent_t extent;
extent_init(&extent, NULL, NULL, 0, false, NSIZES, 0, extent_init(&extent, NULL, NULL, 0, false, SC_NSIZES, 0,
extent_state_active, false, false, true); extent_state_active, false, false, true, EXTENT_NOT_HEAD);
rtree_t *rtree = &test_rtree; rtree_t *rtree = &test_rtree;
rtree_ctx_t rtree_ctx; rtree_ctx_t rtree_ctx;
...@@ -135,7 +135,7 @@ TEST_BEGIN(test_rtree_bits) { ...@@ -135,7 +135,7 @@ TEST_BEGIN(test_rtree_bits) {
for (unsigned i = 0; i < sizeof(keys)/sizeof(uintptr_t); i++) { for (unsigned i = 0; i < sizeof(keys)/sizeof(uintptr_t); i++) {
assert_false(rtree_write(tsdn, rtree, &rtree_ctx, keys[i], assert_false(rtree_write(tsdn, rtree, &rtree_ctx, keys[i],
&extent, NSIZES, false), &extent, SC_NSIZES, false),
"Unexpected rtree_write() failure"); "Unexpected rtree_write() failure");
for (unsigned j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) { for (unsigned j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) {
assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx, assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx,
...@@ -166,8 +166,8 @@ TEST_BEGIN(test_rtree_random) { ...@@ -166,8 +166,8 @@ TEST_BEGIN(test_rtree_random) {
rtree_ctx_data_init(&rtree_ctx); rtree_ctx_data_init(&rtree_ctx);
extent_t extent; extent_t extent;
extent_init(&extent, NULL, NULL, 0, false, NSIZES, 0, extent_init(&extent, NULL, NULL, 0, false, SC_NSIZES, 0,
extent_state_active, false, false, true); extent_state_active, false, false, true, EXTENT_NOT_HEAD);
assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure"); assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure");
...@@ -177,7 +177,8 @@ TEST_BEGIN(test_rtree_random) { ...@@ -177,7 +177,8 @@ TEST_BEGIN(test_rtree_random) {
&rtree_ctx, keys[i], false, true); &rtree_ctx, keys[i], false, true);
assert_ptr_not_null(elm, assert_ptr_not_null(elm,
"Unexpected rtree_leaf_elm_lookup() failure"); "Unexpected rtree_leaf_elm_lookup() failure");
rtree_leaf_elm_write(tsdn, rtree, elm, &extent, NSIZES, false); rtree_leaf_elm_write(tsdn, rtree, elm, &extent, SC_NSIZES,
false);
assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx, assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx,
keys[i], true), &extent, keys[i], true), &extent,
"rtree_extent_read() should return previously set value"); "rtree_extent_read() should return previously set value");
......
#include "test/jemalloc_test.h"
#include "jemalloc/internal/safety_check.h"
/*
* Note that we get called through safety_check.sh, which turns on sampling for
* everything.
*/
bool fake_abort_called;
void fake_abort(const char *message) {
(void)message;
fake_abort_called = true;
}
TEST_BEGIN(test_malloc_free_overflow) {
test_skip_if(!config_prof);
test_skip_if(!config_opt_safety_checks);
safety_check_set_abort(&fake_abort);
/* Buffer overflow! */
char* ptr = malloc(128);
ptr[128] = 0;
free(ptr);
safety_check_set_abort(NULL);
assert_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
fake_abort_called = false;
}
TEST_END
TEST_BEGIN(test_mallocx_dallocx_overflow) {
test_skip_if(!config_prof);
test_skip_if(!config_opt_safety_checks);
safety_check_set_abort(&fake_abort);
/* Buffer overflow! */
char* ptr = mallocx(128, 0);
ptr[128] = 0;
dallocx(ptr, 0);
safety_check_set_abort(NULL);
assert_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
fake_abort_called = false;
}
TEST_END
TEST_BEGIN(test_malloc_sdallocx_overflow) {
test_skip_if(!config_prof);
test_skip_if(!config_opt_safety_checks);
safety_check_set_abort(&fake_abort);
/* Buffer overflow! */
char* ptr = malloc(128);
ptr[128] = 0;
sdallocx(ptr, 128, 0);
safety_check_set_abort(NULL);
assert_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
fake_abort_called = false;
}
TEST_END
TEST_BEGIN(test_realloc_overflow) {
test_skip_if(!config_prof);
test_skip_if(!config_opt_safety_checks);
safety_check_set_abort(&fake_abort);
/* Buffer overflow! */
char* ptr = malloc(128);
ptr[128] = 0;
ptr = realloc(ptr, 129);
safety_check_set_abort(NULL);
free(ptr);
assert_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
fake_abort_called = false;
}
TEST_END
TEST_BEGIN(test_rallocx_overflow) {
test_skip_if(!config_prof);
test_skip_if(!config_opt_safety_checks);
safety_check_set_abort(&fake_abort);
/* Buffer overflow! */
char* ptr = malloc(128);
ptr[128] = 0;
ptr = rallocx(ptr, 129, 0);
safety_check_set_abort(NULL);
free(ptr);
assert_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
fake_abort_called = false;
}
TEST_END
TEST_BEGIN(test_xallocx_overflow) {
test_skip_if(!config_prof);
test_skip_if(!config_opt_safety_checks);
safety_check_set_abort(&fake_abort);
/* Buffer overflow! */
char* ptr = malloc(128);
ptr[128] = 0;
size_t result = xallocx(ptr, 129, 0, 0);
assert_zu_eq(result, 128, "");
free(ptr);
assert_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
fake_abort_called = false;
safety_check_set_abort(NULL);
}
TEST_END
TEST_BEGIN(test_realloc_no_overflow) {
char* ptr = malloc(128);
ptr = realloc(ptr, 256);
ptr[128] = 0;
ptr[255] = 0;
free(ptr);
ptr = malloc(128);
ptr = realloc(ptr, 64);
ptr[63] = 0;
ptr[0] = 0;
free(ptr);
}
TEST_END
TEST_BEGIN(test_rallocx_no_overflow) {
char* ptr = malloc(128);
ptr = rallocx(ptr, 256, 0);
ptr[128] = 0;
ptr[255] = 0;
free(ptr);
ptr = malloc(128);
ptr = rallocx(ptr, 64, 0);
ptr[63] = 0;
ptr[0] = 0;
free(ptr);
}
TEST_END
int
main(void) {
return test(
test_malloc_free_overflow,
test_mallocx_dallocx_overflow,
test_malloc_sdallocx_overflow,
test_realloc_overflow,
test_rallocx_overflow,
test_xallocx_overflow,
test_realloc_no_overflow,
test_rallocx_no_overflow);
}
#!/bin/sh
if [ "x${enable_prof}" = "x1" ] ; then
export MALLOC_CONF="prof:true,lg_prof_sample:0"
fi
#include "test/jemalloc_test.h"
TEST_BEGIN(test_update_slab_size) {
sc_data_t data;
memset(&data, 0, sizeof(data));
sc_data_init(&data);
sc_t *tiny = &data.sc[0];
size_t tiny_size = (ZU(1) << tiny->lg_base)
+ (ZU(tiny->ndelta) << tiny->lg_delta);
size_t pgs_too_big = (tiny_size * BITMAP_MAXBITS + PAGE - 1) / PAGE + 1;
sc_data_update_slab_size(&data, tiny_size, tiny_size, (int)pgs_too_big);
assert_zu_lt((size_t)tiny->pgs, pgs_too_big, "Allowed excessive pages");
sc_data_update_slab_size(&data, 1, 10 * PAGE, 1);
for (int i = 0; i < data.nbins; i++) {
sc_t *sc = &data.sc[i];
size_t reg_size = (ZU(1) << sc->lg_base)
+ (ZU(sc->ndelta) << sc->lg_delta);
if (reg_size <= PAGE) {
assert_d_eq(sc->pgs, 1, "Ignored valid page size hint");
} else {
assert_d_gt(sc->pgs, 1,
"Allowed invalid page size hint");
}
}
}
TEST_END
int
main(void) {
return test(
test_update_slab_size);
}
#include "test/jemalloc_test.h"
#include "jemalloc/internal/seq.h"
typedef struct data_s data_t;
struct data_s {
int arr[10];
};
static void
set_data(data_t *data, int num) {
for (int i = 0; i < 10; i++) {
data->arr[i] = num;
}
}
static void
assert_data(data_t *data) {
int num = data->arr[0];
for (int i = 0; i < 10; i++) {
assert_d_eq(num, data->arr[i], "Data consistency error");
}
}
seq_define(data_t, data)
typedef struct thd_data_s thd_data_t;
struct thd_data_s {
seq_data_t data;
};
static void *
seq_reader_thd(void *arg) {
thd_data_t *thd_data = (thd_data_t *)arg;
int iter = 0;
data_t local_data;
while (iter < 1000 * 1000 - 1) {
bool success = seq_try_load_data(&local_data, &thd_data->data);
if (success) {
assert_data(&local_data);
assert_d_le(iter, local_data.arr[0],
"Seq read went back in time.");
iter = local_data.arr[0];
}
}
return NULL;
}
static void *
seq_writer_thd(void *arg) {
thd_data_t *thd_data = (thd_data_t *)arg;
data_t local_data;
memset(&local_data, 0, sizeof(local_data));
for (int i = 0; i < 1000 * 1000; i++) {
set_data(&local_data, i);
seq_store_data(&thd_data->data, &local_data);
}
return NULL;
}
TEST_BEGIN(test_seq_threaded) {
thd_data_t thd_data;
memset(&thd_data, 0, sizeof(thd_data));
thd_t reader;
thd_t writer;
thd_create(&reader, seq_reader_thd, &thd_data);
thd_create(&writer, seq_writer_thd, &thd_data);
thd_join(reader, NULL);
thd_join(writer, NULL);
}
TEST_END
TEST_BEGIN(test_seq_simple) {
data_t data;
seq_data_t seq;
memset(&seq, 0, sizeof(seq));
for (int i = 0; i < 1000 * 1000; i++) {
set_data(&data, i);
seq_store_data(&seq, &data);
set_data(&data, 0);
bool success = seq_try_load_data(&data, &seq);
assert_b_eq(success, true, "Failed non-racing read");
assert_data(&data);
}
}
TEST_END
int main(void) {
return test_no_reentrancy(
test_seq_simple,
test_seq_threaded);
}
...@@ -108,8 +108,13 @@ TEST_BEGIN(test_psize_classes) { ...@@ -108,8 +108,13 @@ TEST_BEGIN(test_psize_classes) {
size_class, sz_psz2ind(size_class), size_class, sz_psz2ind(size_class),
sz_pind2sz(sz_psz2ind(size_class))); sz_pind2sz(sz_psz2ind(size_class)));
assert_u_eq(pind+1, sz_psz2ind(size_class+1), if (size_class == SC_LARGE_MAXCLASS) {
assert_u_eq(SC_NPSIZES, sz_psz2ind(size_class + 1),
"Next size_class does not round up properly"); "Next size_class does not round up properly");
} else {
assert_u_eq(pind + 1, sz_psz2ind(size_class + 1),
"Next size_class does not round up properly");
}
assert_zu_eq(size_class, (pind > 0) ? assert_zu_eq(size_class, (pind > 0) ?
sz_psz2u(sz_pind2sz(pind-1)+1) : sz_psz2u(1), sz_psz2u(sz_pind2sz(pind-1)+1) : sz_psz2u(1),
...@@ -142,11 +147,11 @@ TEST_BEGIN(test_overflow) { ...@@ -142,11 +147,11 @@ TEST_BEGIN(test_overflow) {
max_size_class = get_max_size_class(); max_size_class = get_max_size_class();
max_psz = max_size_class + PAGE; max_psz = max_size_class + PAGE;
assert_u_eq(sz_size2index(max_size_class+1), NSIZES, assert_u_eq(sz_size2index(max_size_class+1), SC_NSIZES,
"sz_size2index() should return NSIZES on overflow"); "sz_size2index() should return NSIZES on overflow");
assert_u_eq(sz_size2index(ZU(PTRDIFF_MAX)+1), NSIZES, assert_u_eq(sz_size2index(ZU(PTRDIFF_MAX)+1), SC_NSIZES,
"sz_size2index() should return NSIZES on overflow"); "sz_size2index() should return NSIZES on overflow");
assert_u_eq(sz_size2index(SIZE_T_MAX), NSIZES, assert_u_eq(sz_size2index(SIZE_T_MAX), SC_NSIZES,
"sz_size2index() should return NSIZES on overflow"); "sz_size2index() should return NSIZES on overflow");
assert_zu_eq(sz_s2u(max_size_class+1), 0, assert_zu_eq(sz_s2u(max_size_class+1), 0,
...@@ -156,11 +161,11 @@ TEST_BEGIN(test_overflow) { ...@@ -156,11 +161,11 @@ TEST_BEGIN(test_overflow) {
assert_zu_eq(sz_s2u(SIZE_T_MAX), 0, assert_zu_eq(sz_s2u(SIZE_T_MAX), 0,
"sz_s2u() should return 0 on overflow"); "sz_s2u() should return 0 on overflow");
assert_u_eq(sz_psz2ind(max_size_class+1), NPSIZES, assert_u_eq(sz_psz2ind(max_size_class+1), SC_NPSIZES,
"sz_psz2ind() should return NPSIZES on overflow"); "sz_psz2ind() should return NPSIZES on overflow");
assert_u_eq(sz_psz2ind(ZU(PTRDIFF_MAX)+1), NPSIZES, assert_u_eq(sz_psz2ind(ZU(PTRDIFF_MAX)+1), SC_NPSIZES,
"sz_psz2ind() should return NPSIZES on overflow"); "sz_psz2ind() should return NPSIZES on overflow");
assert_u_eq(sz_psz2ind(SIZE_T_MAX), NPSIZES, assert_u_eq(sz_psz2ind(SIZE_T_MAX), SC_NPSIZES,
"sz_psz2ind() should return NPSIZES on overflow"); "sz_psz2ind() should return NPSIZES on overflow");
assert_zu_eq(sz_psz2u(max_size_class+1), max_psz, assert_zu_eq(sz_psz2u(max_size_class+1), max_psz,
......
...@@ -3,13 +3,14 @@ ...@@ -3,13 +3,14 @@
TEST_BEGIN(test_arena_slab_regind) { TEST_BEGIN(test_arena_slab_regind) {
szind_t binind; szind_t binind;
for (binind = 0; binind < NBINS; binind++) { for (binind = 0; binind < SC_NBINS; binind++) {
size_t regind; size_t regind;
extent_t slab; extent_t slab;
const bin_info_t *bin_info = &bin_infos[binind]; const bin_info_t *bin_info = &bin_infos[binind];
extent_init(&slab, NULL, mallocx(bin_info->slab_size, extent_init(&slab, NULL, mallocx(bin_info->slab_size,
MALLOCX_LG_ALIGN(LG_PAGE)), bin_info->slab_size, true, MALLOCX_LG_ALIGN(LG_PAGE)), bin_info->slab_size, true,
binind, 0, extent_state_active, false, true, true); binind, 0, extent_state_active, false, true, true,
EXTENT_NOT_HEAD);
assert_ptr_not_null(extent_addr_get(&slab), assert_ptr_not_null(extent_addr_get(&slab),
"Unexpected malloc() failure"); "Unexpected malloc() failure");
for (regind = 0; regind < bin_info->nregs; regind++) { for (regind = 0; regind < bin_info->nregs; regind++) {
......
...@@ -33,7 +33,7 @@ TEST_BEGIN(test_stats_large) { ...@@ -33,7 +33,7 @@ TEST_BEGIN(test_stats_large) {
size_t sz; size_t sz;
int expected = config_stats ? 0 : ENOENT; int expected = config_stats ? 0 : ENOENT;
p = mallocx(SMALL_MAXCLASS+1, MALLOCX_ARENA(0)); p = mallocx(SC_SMALL_MAXCLASS + 1, MALLOCX_ARENA(0));
assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
...@@ -74,9 +74,10 @@ TEST_BEGIN(test_stats_arenas_summary) { ...@@ -74,9 +74,10 @@ TEST_BEGIN(test_stats_arenas_summary) {
uint64_t dirty_npurge, dirty_nmadvise, dirty_purged; uint64_t dirty_npurge, dirty_nmadvise, dirty_purged;
uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged; uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged;
little = mallocx(SMALL_MAXCLASS, MALLOCX_ARENA(0)); little = mallocx(SC_SMALL_MAXCLASS, MALLOCX_ARENA(0));
assert_ptr_not_null(little, "Unexpected mallocx() failure"); assert_ptr_not_null(little, "Unexpected mallocx() failure");
large = mallocx((1U << LG_LARGE_MINCLASS), MALLOCX_ARENA(0)); large = mallocx((1U << SC_LG_LARGE_MINCLASS),
MALLOCX_ARENA(0));
assert_ptr_not_null(large, "Unexpected mallocx() failure"); assert_ptr_not_null(large, "Unexpected mallocx() failure");
dallocx(little, 0); dallocx(little, 0);
...@@ -148,7 +149,7 @@ TEST_BEGIN(test_stats_arenas_small) { ...@@ -148,7 +149,7 @@ TEST_BEGIN(test_stats_arenas_small) {
no_lazy_lock(); /* Lazy locking would dodge tcache testing. */ no_lazy_lock(); /* Lazy locking would dodge tcache testing. */
p = mallocx(SMALL_MAXCLASS, MALLOCX_ARENA(0)); p = mallocx(SC_SMALL_MAXCLASS, MALLOCX_ARENA(0));
assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
...@@ -191,7 +192,7 @@ TEST_BEGIN(test_stats_arenas_large) { ...@@ -191,7 +192,7 @@ TEST_BEGIN(test_stats_arenas_large) {
uint64_t epoch, nmalloc, ndalloc; uint64_t epoch, nmalloc, ndalloc;
int expected = config_stats ? 0 : ENOENT; int expected = config_stats ? 0 : ENOENT;
p = mallocx((1U << LG_LARGE_MINCLASS), MALLOCX_ARENA(0)); p = mallocx((1U << SC_LG_LARGE_MINCLASS), MALLOCX_ARENA(0));
assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
...@@ -227,7 +228,7 @@ gen_mallctl_str(char *cmd, char *name, unsigned arena_ind) { ...@@ -227,7 +228,7 @@ gen_mallctl_str(char *cmd, char *name, unsigned arena_ind) {
TEST_BEGIN(test_stats_arenas_bins) { TEST_BEGIN(test_stats_arenas_bins) {
void *p; void *p;
size_t sz, curslabs, curregs; size_t sz, curslabs, curregs, nonfull_slabs;
uint64_t epoch, nmalloc, ndalloc, nrequests, nfills, nflushes; uint64_t epoch, nmalloc, ndalloc, nrequests, nfills, nflushes;
uint64_t nslabs, nreslabs; uint64_t nslabs, nreslabs;
int expected = config_stats ? 0 : ENOENT; int expected = config_stats ? 0 : ENOENT;
...@@ -288,6 +289,9 @@ TEST_BEGIN(test_stats_arenas_bins) { ...@@ -288,6 +289,9 @@ TEST_BEGIN(test_stats_arenas_bins) {
gen_mallctl_str(cmd, "curslabs", arena_ind); gen_mallctl_str(cmd, "curslabs", arena_ind);
assert_d_eq(mallctl(cmd, (void *)&curslabs, &sz, NULL, 0), expected, assert_d_eq(mallctl(cmd, (void *)&curslabs, &sz, NULL, 0), expected,
"Unexpected mallctl() result"); "Unexpected mallctl() result");
gen_mallctl_str(cmd, "nonfull_slabs", arena_ind);
assert_d_eq(mallctl(cmd, (void *)&nonfull_slabs, &sz, NULL, 0),
expected, "Unexpected mallctl() result");
if (config_stats) { if (config_stats) {
assert_u64_gt(nmalloc, 0, assert_u64_gt(nmalloc, 0,
...@@ -308,6 +312,8 @@ TEST_BEGIN(test_stats_arenas_bins) { ...@@ -308,6 +312,8 @@ TEST_BEGIN(test_stats_arenas_bins) {
"At least one slab should have been allocated"); "At least one slab should have been allocated");
assert_zu_gt(curslabs, 0, assert_zu_gt(curslabs, 0,
"At least one slab should be currently allocated"); "At least one slab should be currently allocated");
assert_zu_eq(nonfull_slabs, 0,
"slabs_nonfull should be empty");
} }
dallocx(p, 0); dallocx(p, 0);
......
...@@ -12,10 +12,10 @@ func_to_hook(int arg1, int arg2) { ...@@ -12,10 +12,10 @@ func_to_hook(int arg1, int arg2) {
return arg1 + arg2; return arg1 + arg2;
} }
#define func_to_hook JEMALLOC_HOOK(func_to_hook, hooks_libc_hook) #define func_to_hook JEMALLOC_HOOK(func_to_hook, test_hooks_libc_hook)
TEST_BEGIN(unhooked_call) { TEST_BEGIN(unhooked_call) {
hooks_libc_hook = NULL; test_hooks_libc_hook = NULL;
hook_called = false; hook_called = false;
assert_d_eq(3, func_to_hook(1, 2), "Hooking changed return value."); assert_d_eq(3, func_to_hook(1, 2), "Hooking changed return value.");
assert_false(hook_called, "Nulling out hook didn't take."); assert_false(hook_called, "Nulling out hook didn't take.");
...@@ -23,7 +23,7 @@ TEST_BEGIN(unhooked_call) { ...@@ -23,7 +23,7 @@ TEST_BEGIN(unhooked_call) {
TEST_END TEST_END
TEST_BEGIN(hooked_call) { TEST_BEGIN(hooked_call) {
hooks_libc_hook = &hook; test_hooks_libc_hook = &hook;
hook_called = false; hook_called = false;
assert_d_eq(3, func_to_hook(1, 2), "Hooking changed return value."); assert_d_eq(3, func_to_hook(1, 2), "Hooking changed return value.");
assert_true(hook_called, "Hook should have executed."); assert_true(hook_called, "Hook should have executed.");
......
#include "test/jemalloc_test.h" #include "test/jemalloc_test.h"
/*
* If we're e.g. in debug mode, we *never* enter the fast path, and so shouldn't
* be asserting that we're on one.
*/
static bool originally_fast;
static int data_cleanup_count; static int data_cleanup_count;
void void
...@@ -98,11 +103,11 @@ thd_start_reincarnated(void *arg) { ...@@ -98,11 +103,11 @@ thd_start_reincarnated(void *arg) {
tsd_cleanup((void *)tsd); tsd_cleanup((void *)tsd);
assert_ptr_null(*tsd_arenap_get_unsafe(tsd), assert_ptr_null(*tsd_arenap_get_unsafe(tsd),
"TSD arena should have been cleared."); "TSD arena should have been cleared.");
assert_u_eq(tsd->state, tsd_state_purgatory, assert_u_eq(tsd_state_get(tsd), tsd_state_purgatory,
"TSD state should be purgatory\n"); "TSD state should be purgatory\n");
free(p); free(p);
assert_u_eq(tsd->state, tsd_state_reincarnated, assert_u_eq(tsd_state_get(tsd), tsd_state_reincarnated,
"TSD state should be reincarnated\n"); "TSD state should be reincarnated\n");
p = mallocx(1, MALLOCX_TCACHE_NONE); p = mallocx(1, MALLOCX_TCACHE_NONE);
assert_ptr_not_null(p, "Unexpected malloc() failure"); assert_ptr_not_null(p, "Unexpected malloc() failure");
...@@ -124,6 +129,128 @@ TEST_BEGIN(test_tsd_reincarnation) { ...@@ -124,6 +129,128 @@ TEST_BEGIN(test_tsd_reincarnation) {
} }
TEST_END TEST_END
typedef struct {
atomic_u32_t phase;
atomic_b_t error;
} global_slow_data_t;
static void *
thd_start_global_slow(void *arg) {
/* PHASE 0 */
global_slow_data_t *data = (global_slow_data_t *)arg;
free(mallocx(1, 0));
tsd_t *tsd = tsd_fetch();
/*
* No global slowness has happened yet; there was an error if we were
* originally fast but aren't now.
*/
atomic_store_b(&data->error, originally_fast && !tsd_fast(tsd),
ATOMIC_SEQ_CST);
atomic_store_u32(&data->phase, 1, ATOMIC_SEQ_CST);
/* PHASE 2 */
while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 2) {
}
free(mallocx(1, 0));
atomic_store_b(&data->error, tsd_fast(tsd), ATOMIC_SEQ_CST);
atomic_store_u32(&data->phase, 3, ATOMIC_SEQ_CST);
/* PHASE 4 */
while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 4) {
}
free(mallocx(1, 0));
atomic_store_b(&data->error, tsd_fast(tsd), ATOMIC_SEQ_CST);
atomic_store_u32(&data->phase, 5, ATOMIC_SEQ_CST);
/* PHASE 6 */
while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 6) {
}
free(mallocx(1, 0));
/* Only one decrement so far. */
atomic_store_b(&data->error, tsd_fast(tsd), ATOMIC_SEQ_CST);
atomic_store_u32(&data->phase, 7, ATOMIC_SEQ_CST);
/* PHASE 8 */
while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 8) {
}
free(mallocx(1, 0));
/*
* Both decrements happened; we should be fast again (if we ever
* were)
*/
atomic_store_b(&data->error, originally_fast && !tsd_fast(tsd),
ATOMIC_SEQ_CST);
atomic_store_u32(&data->phase, 9, ATOMIC_SEQ_CST);
return NULL;
}
TEST_BEGIN(test_tsd_global_slow) {
global_slow_data_t data = {ATOMIC_INIT(0), ATOMIC_INIT(false)};
/*
* Note that the "mallocx" here (vs. malloc) is important, since the
* compiler is allowed to optimize away free(malloc(1)) but not
* free(mallocx(1)).
*/
free(mallocx(1, 0));
tsd_t *tsd = tsd_fetch();
originally_fast = tsd_fast(tsd);
thd_t thd;
thd_create(&thd, thd_start_global_slow, (void *)&data.phase);
/* PHASE 1 */
while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 1) {
/*
* We don't have a portable condvar/semaphore mechanism.
* Spin-wait.
*/
}
assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
tsd_global_slow_inc(tsd_tsdn(tsd));
free(mallocx(1, 0));
assert_false(tsd_fast(tsd), "");
atomic_store_u32(&data.phase, 2, ATOMIC_SEQ_CST);
/* PHASE 3 */
while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 3) {
}
assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
/* Increase again, so that we can test multiple fast/slow changes. */
tsd_global_slow_inc(tsd_tsdn(tsd));
atomic_store_u32(&data.phase, 4, ATOMIC_SEQ_CST);
free(mallocx(1, 0));
assert_false(tsd_fast(tsd), "");
/* PHASE 5 */
while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 5) {
}
assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
tsd_global_slow_dec(tsd_tsdn(tsd));
atomic_store_u32(&data.phase, 6, ATOMIC_SEQ_CST);
/* We only decreased once; things should still be slow. */
free(mallocx(1, 0));
assert_false(tsd_fast(tsd), "");
/* PHASE 7 */
while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 7) {
}
assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
tsd_global_slow_dec(tsd_tsdn(tsd));
atomic_store_u32(&data.phase, 8, ATOMIC_SEQ_CST);
/* We incremented and then decremented twice; we should be fast now. */
free(mallocx(1, 0));
assert_true(!originally_fast || tsd_fast(tsd), "");
/* PHASE 9 */
while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 9) {
}
assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
thd_join(thd, NULL);
}
TEST_END
int int
main(void) { main(void) {
/* Ensure tsd bootstrapped. */ /* Ensure tsd bootstrapped. */
...@@ -135,5 +262,6 @@ main(void) { ...@@ -135,5 +262,6 @@ main(void) {
return test_no_reentrancy( return test_no_reentrancy(
test_tsd_main_thread, test_tsd_main_thread,
test_tsd_sub_thread, test_tsd_sub_thread,
test_tsd_reincarnation); test_tsd_reincarnation,
test_tsd_global_slow);
} }
...@@ -41,13 +41,13 @@ test_zero(size_t sz_min, size_t sz_max) { ...@@ -41,13 +41,13 @@ test_zero(size_t sz_min, size_t sz_max) {
TEST_BEGIN(test_zero_small) { TEST_BEGIN(test_zero_small) {
test_skip_if(!config_fill); test_skip_if(!config_fill);
test_zero(1, SMALL_MAXCLASS-1); test_zero(1, SC_SMALL_MAXCLASS - 1);
} }
TEST_END TEST_END
TEST_BEGIN(test_zero_large) { TEST_BEGIN(test_zero_large) {
test_skip_if(!config_fill); test_skip_if(!config_fill);
test_zero(SMALL_MAXCLASS+1, (1U << (LG_LARGE_MINCLASS+1))); test_zero(SC_SMALL_MAXCLASS + 1, 1U << (SC_LG_LARGE_MINCLASS + 1));
} }
TEST_END TEST_END
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment