Commit 89a9e5a9 authored by Guy Benoish's avatar Guy Benoish
Browse files

Merge branch 'unstable' of https://github.com/antirez/redis into unstable

parents 71a8df6a a4c7f34d
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
#define rbtn_black_height(a_type, a_field, a_rbt, r_height) do { \ #define rbtn_black_height(a_type, a_field, a_rbt, r_height) do { \
a_type *rbp_bh_t; \ a_type *rbp_bh_t; \
for (rbp_bh_t = (a_rbt)->rbt_root, (r_height) = 0; \ for (rbp_bh_t = (a_rbt)->rbt_root, (r_height) = 0; \
rbp_bh_t != NULL; \ rbp_bh_t != &(a_rbt)->rbt_nil; \
rbp_bh_t = rbtn_left_get(a_type, a_field, rbp_bh_t)) { \ rbp_bh_t = rbtn_left_get(a_type, a_field, rbp_bh_t)) { \
if (!rbtn_red_get(a_type, a_field, rbp_bh_t)) { \ if (!rbtn_red_get(a_type, a_field, rbp_bh_t)) { \
(r_height)++; \ (r_height)++; \
...@@ -21,7 +21,7 @@ struct node_s { ...@@ -21,7 +21,7 @@ struct node_s {
}; };
static int static int
node_cmp(const node_t *a, const node_t *b) { node_cmp(node_t *a, node_t *b) {
int ret; int ret;
assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic"); assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic");
...@@ -68,43 +68,38 @@ TEST_BEGIN(test_rb_empty) ...@@ -68,43 +68,38 @@ TEST_BEGIN(test_rb_empty)
TEST_END TEST_END
static unsigned static unsigned
tree_recurse(node_t *node, unsigned black_height, unsigned black_depth) tree_recurse(node_t *node, unsigned black_height, unsigned black_depth,
node_t *nil)
{ {
unsigned ret = 0; unsigned ret = 0;
node_t *left_node; node_t *left_node = rbtn_left_get(node_t, link, node);
node_t *right_node; node_t *right_node = rbtn_right_get(node_t, link, node);
if (node == NULL)
return (ret);
left_node = rbtn_left_get(node_t, link, node);
right_node = rbtn_right_get(node_t, link, node);
if (!rbtn_red_get(node_t, link, node)) if (!rbtn_red_get(node_t, link, node))
black_depth++; black_depth++;
/* Red nodes must be interleaved with black nodes. */ /* Red nodes must be interleaved with black nodes. */
if (rbtn_red_get(node_t, link, node)) { if (rbtn_red_get(node_t, link, node)) {
if (left_node != NULL)
assert_false(rbtn_red_get(node_t, link, left_node), assert_false(rbtn_red_get(node_t, link, left_node),
"Node should be black"); "Node should be black");
if (right_node != NULL)
assert_false(rbtn_red_get(node_t, link, right_node), assert_false(rbtn_red_get(node_t, link, right_node),
"Node should be black"); "Node should be black");
} }
if (node == nil)
return (ret);
/* Self. */ /* Self. */
assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic"); assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic");
/* Left subtree. */ /* Left subtree. */
if (left_node != NULL) if (left_node != nil)
ret += tree_recurse(left_node, black_height, black_depth); ret += tree_recurse(left_node, black_height, black_depth, nil);
else else
ret += (black_depth != black_height); ret += (black_depth != black_height);
/* Right subtree. */ /* Right subtree. */
if (right_node != NULL) if (right_node != nil)
ret += tree_recurse(right_node, black_height, black_depth); ret += tree_recurse(right_node, black_height, black_depth, nil);
else else
ret += (black_depth != black_height); ret += (black_depth != black_height);
...@@ -186,7 +181,8 @@ node_remove(tree_t *tree, node_t *node, unsigned nnodes) ...@@ -186,7 +181,8 @@ node_remove(tree_t *tree, node_t *node, unsigned nnodes)
node->magic = 0; node->magic = 0;
rbtn_black_height(node_t, link, tree, black_height); rbtn_black_height(node_t, link, tree, black_height);
imbalances = tree_recurse(tree->rbt_root, black_height, 0); imbalances = tree_recurse(tree->rbt_root, black_height, 0,
&(tree->rbt_nil));
assert_u_eq(imbalances, 0, "Tree is unbalanced"); assert_u_eq(imbalances, 0, "Tree is unbalanced");
assert_u_eq(tree_iterate(tree), nnodes-1, assert_u_eq(tree_iterate(tree), nnodes-1,
"Unexpected node iteration count"); "Unexpected node iteration count");
...@@ -216,15 +212,6 @@ remove_reverse_iterate_cb(tree_t *tree, node_t *node, void *data) ...@@ -216,15 +212,6 @@ remove_reverse_iterate_cb(tree_t *tree, node_t *node, void *data)
return (ret); return (ret);
} }
static void
destroy_cb(node_t *node, void *data)
{
unsigned *nnodes = (unsigned *)data;
assert_u_gt(*nnodes, 0, "Destruction removed too many nodes");
(*nnodes)--;
}
TEST_BEGIN(test_rb_random) TEST_BEGIN(test_rb_random)
{ {
#define NNODES 25 #define NNODES 25
...@@ -257,6 +244,7 @@ TEST_BEGIN(test_rb_random) ...@@ -257,6 +244,7 @@ TEST_BEGIN(test_rb_random)
for (j = 1; j <= NNODES; j++) { for (j = 1; j <= NNODES; j++) {
/* Initialize tree and nodes. */ /* Initialize tree and nodes. */
tree_new(&tree); tree_new(&tree);
tree.rbt_nil.magic = 0;
for (k = 0; k < j; k++) { for (k = 0; k < j; k++) {
nodes[k].magic = NODE_MAGIC; nodes[k].magic = NODE_MAGIC;
nodes[k].key = bag[k]; nodes[k].key = bag[k];
...@@ -269,7 +257,7 @@ TEST_BEGIN(test_rb_random) ...@@ -269,7 +257,7 @@ TEST_BEGIN(test_rb_random)
rbtn_black_height(node_t, link, &tree, rbtn_black_height(node_t, link, &tree,
black_height); black_height);
imbalances = tree_recurse(tree.rbt_root, imbalances = tree_recurse(tree.rbt_root,
black_height, 0); black_height, 0, &(tree.rbt_nil));
assert_u_eq(imbalances, 0, assert_u_eq(imbalances, 0,
"Tree is unbalanced"); "Tree is unbalanced");
...@@ -290,7 +278,7 @@ TEST_BEGIN(test_rb_random) ...@@ -290,7 +278,7 @@ TEST_BEGIN(test_rb_random)
} }
/* Remove nodes. */ /* Remove nodes. */
switch (i % 5) { switch (i % 4) {
case 0: case 0:
for (k = 0; k < j; k++) for (k = 0; k < j; k++)
node_remove(&tree, &nodes[k], j - k); node_remove(&tree, &nodes[k], j - k);
...@@ -326,12 +314,6 @@ TEST_BEGIN(test_rb_random) ...@@ -326,12 +314,6 @@ TEST_BEGIN(test_rb_random)
assert_u_eq(nnodes, 0, assert_u_eq(nnodes, 0,
"Removal terminated early"); "Removal terminated early");
break; break;
} case 4: {
unsigned nnodes = j;
tree_destroy(&tree, destroy_cb, &nnodes);
assert_u_eq(nnodes, 0,
"Destruction terminated early");
break;
} default: } default:
not_reached(); not_reached();
} }
......
#include "test/jemalloc_test.h"
TEST_BEGIN(test_small_run_size)
{
unsigned nbins, i;
size_t sz, run_size;
size_t mib[4];
size_t miblen = sizeof(mib) / sizeof(size_t);
/*
* Iterate over all small size classes, get their run sizes, and verify
* that the quantized size is the same as the run size.
*/
sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0,
"Unexpected mallctl failure");
assert_d_eq(mallctlnametomib("arenas.bin.0.run_size", mib, &miblen), 0,
"Unexpected mallctlnametomib failure");
for (i = 0; i < nbins; i++) {
mib[2] = i;
sz = sizeof(size_t);
assert_d_eq(mallctlbymib(mib, miblen, (void *)&run_size, &sz,
NULL, 0), 0, "Unexpected mallctlbymib failure");
assert_zu_eq(run_size, run_quantize_floor(run_size),
"Small run quantization should be a no-op (run_size=%zu)",
run_size);
assert_zu_eq(run_size, run_quantize_ceil(run_size),
"Small run quantization should be a no-op (run_size=%zu)",
run_size);
}
}
TEST_END
TEST_BEGIN(test_large_run_size)
{
bool cache_oblivious;
unsigned nlruns, i;
size_t sz, run_size_prev, ceil_prev;
size_t mib[4];
size_t miblen = sizeof(mib) / sizeof(size_t);
/*
* Iterate over all large size classes, get their run sizes, and verify
* that the quantized size is the same as the run size.
*/
sz = sizeof(bool);
assert_d_eq(mallctl("config.cache_oblivious", (void *)&cache_oblivious,
&sz, NULL, 0), 0, "Unexpected mallctl failure");
sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.nlruns", (void *)&nlruns, &sz, NULL, 0), 0,
"Unexpected mallctl failure");
assert_d_eq(mallctlnametomib("arenas.lrun.0.size", mib, &miblen), 0,
"Unexpected mallctlnametomib failure");
for (i = 0; i < nlruns; i++) {
size_t lrun_size, run_size, floor, ceil;
mib[2] = i;
sz = sizeof(size_t);
assert_d_eq(mallctlbymib(mib, miblen, (void *)&lrun_size, &sz,
NULL, 0), 0, "Unexpected mallctlbymib failure");
run_size = cache_oblivious ? lrun_size + PAGE : lrun_size;
floor = run_quantize_floor(run_size);
ceil = run_quantize_ceil(run_size);
assert_zu_eq(run_size, floor,
"Large run quantization should be a no-op for precise "
"size (lrun_size=%zu, run_size=%zu)", lrun_size, run_size);
assert_zu_eq(run_size, ceil,
"Large run quantization should be a no-op for precise "
"size (lrun_size=%zu, run_size=%zu)", lrun_size, run_size);
if (i > 0) {
assert_zu_eq(run_size_prev, run_quantize_floor(run_size
- PAGE), "Floor should be a precise size");
if (run_size_prev < ceil_prev) {
assert_zu_eq(ceil_prev, run_size,
"Ceiling should be a precise size "
"(run_size_prev=%zu, ceil_prev=%zu, "
"run_size=%zu)", run_size_prev, ceil_prev,
run_size);
}
}
run_size_prev = floor;
ceil_prev = run_quantize_ceil(run_size + PAGE);
}
}
TEST_END
TEST_BEGIN(test_monotonic)
{
unsigned nbins, nlruns, i;
size_t sz, floor_prev, ceil_prev;
/*
* Iterate over all run sizes and verify that
* run_quantize_{floor,ceil}() are monotonic.
*/
sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0,
"Unexpected mallctl failure");
sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.nlruns", (void *)&nlruns, &sz, NULL, 0), 0,
"Unexpected mallctl failure");
floor_prev = 0;
ceil_prev = 0;
for (i = 1; i <= chunksize >> LG_PAGE; i++) {
size_t run_size, floor, ceil;
run_size = i << LG_PAGE;
floor = run_quantize_floor(run_size);
ceil = run_quantize_ceil(run_size);
assert_zu_le(floor, run_size,
"Floor should be <= (floor=%zu, run_size=%zu, ceil=%zu)",
floor, run_size, ceil);
assert_zu_ge(ceil, run_size,
"Ceiling should be >= (floor=%zu, run_size=%zu, ceil=%zu)",
floor, run_size, ceil);
assert_zu_le(floor_prev, floor, "Floor should be monotonic "
"(floor_prev=%zu, floor=%zu, run_size=%zu, ceil=%zu)",
floor_prev, floor, run_size, ceil);
assert_zu_le(ceil_prev, ceil, "Ceiling should be monotonic "
"(floor=%zu, run_size=%zu, ceil_prev=%zu, ceil=%zu)",
floor, run_size, ceil_prev, ceil);
floor_prev = floor;
ceil_prev = ceil;
}
}
TEST_END
int
main(void)
{
return (test(
test_small_run_size,
test_large_run_size,
test_monotonic));
}
...@@ -8,8 +8,8 @@ get_max_size_class(void) ...@@ -8,8 +8,8 @@ get_max_size_class(void)
size_t sz, miblen, max_size_class; size_t sz, miblen, max_size_class;
sz = sizeof(unsigned); sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.nhchunks", (void *)&nhchunks, &sz, NULL, 0), assert_d_eq(mallctl("arenas.nhchunks", &nhchunks, &sz, NULL, 0), 0,
0, "Unexpected mallctl() error"); "Unexpected mallctl() error");
miblen = sizeof(mib) / sizeof(size_t); miblen = sizeof(mib) / sizeof(size_t);
assert_d_eq(mallctlnametomib("arenas.hchunk.0.size", mib, &miblen), 0, assert_d_eq(mallctlnametomib("arenas.hchunk.0.size", mib, &miblen), 0,
...@@ -17,8 +17,8 @@ get_max_size_class(void) ...@@ -17,8 +17,8 @@ get_max_size_class(void)
mib[2] = nhchunks - 1; mib[2] = nhchunks - 1;
sz = sizeof(size_t); sz = sizeof(size_t);
assert_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz, assert_d_eq(mallctlbymib(mib, miblen, &max_size_class, &sz, NULL, 0), 0,
NULL, 0), 0, "Unexpected mallctlbymib() error"); "Unexpected mallctlbymib() error");
return (max_size_class); return (max_size_class);
} }
...@@ -80,105 +80,10 @@ TEST_BEGIN(test_size_classes) ...@@ -80,105 +80,10 @@ TEST_BEGIN(test_size_classes)
} }
TEST_END TEST_END
TEST_BEGIN(test_psize_classes)
{
size_t size_class, max_size_class;
pszind_t pind, max_pind;
max_size_class = get_max_size_class();
max_pind = psz2ind(max_size_class);
for (pind = 0, size_class = pind2sz(pind); pind < max_pind ||
size_class < max_size_class; pind++, size_class =
pind2sz(pind)) {
assert_true(pind < max_pind,
"Loop conditionals should be equivalent; pind=%u, "
"size_class=%zu (%#zx)", pind, size_class, size_class);
assert_true(size_class < max_size_class,
"Loop conditionals should be equivalent; pind=%u, "
"size_class=%zu (%#zx)", pind, size_class, size_class);
assert_u_eq(pind, psz2ind(size_class),
"psz2ind() does not reverse pind2sz(): pind=%u -->"
" size_class=%zu --> pind=%u --> size_class=%zu", pind,
size_class, psz2ind(size_class),
pind2sz(psz2ind(size_class)));
assert_zu_eq(size_class, pind2sz(psz2ind(size_class)),
"pind2sz() does not reverse psz2ind(): pind=%u -->"
" size_class=%zu --> pind=%u --> size_class=%zu", pind,
size_class, psz2ind(size_class),
pind2sz(psz2ind(size_class)));
assert_u_eq(pind+1, psz2ind(size_class+1),
"Next size_class does not round up properly");
assert_zu_eq(size_class, (pind > 0) ?
psz2u(pind2sz(pind-1)+1) : psz2u(1),
"psz2u() does not round up to size class");
assert_zu_eq(size_class, psz2u(size_class-1),
"psz2u() does not round up to size class");
assert_zu_eq(size_class, psz2u(size_class),
"psz2u() does not compute same size class");
assert_zu_eq(psz2u(size_class+1), pind2sz(pind+1),
"psz2u() does not round up to next size class");
}
assert_u_eq(pind, psz2ind(pind2sz(pind)),
"psz2ind() does not reverse pind2sz()");
assert_zu_eq(max_size_class, pind2sz(psz2ind(max_size_class)),
"pind2sz() does not reverse psz2ind()");
assert_zu_eq(size_class, psz2u(pind2sz(pind-1)+1),
"psz2u() does not round up to size class");
assert_zu_eq(size_class, psz2u(size_class-1),
"psz2u() does not round up to size class");
assert_zu_eq(size_class, psz2u(size_class),
"psz2u() does not compute same size class");
}
TEST_END
TEST_BEGIN(test_overflow)
{
size_t max_size_class;
max_size_class = get_max_size_class();
assert_u_eq(size2index(max_size_class+1), NSIZES,
"size2index() should return NSIZES on overflow");
assert_u_eq(size2index(ZU(PTRDIFF_MAX)+1), NSIZES,
"size2index() should return NSIZES on overflow");
assert_u_eq(size2index(SIZE_T_MAX), NSIZES,
"size2index() should return NSIZES on overflow");
assert_zu_eq(s2u(max_size_class+1), 0,
"s2u() should return 0 for unsupported size");
assert_zu_eq(s2u(ZU(PTRDIFF_MAX)+1), 0,
"s2u() should return 0 for unsupported size");
assert_zu_eq(s2u(SIZE_T_MAX), 0,
"s2u() should return 0 on overflow");
assert_u_eq(psz2ind(max_size_class+1), NPSIZES,
"psz2ind() should return NPSIZES on overflow");
assert_u_eq(psz2ind(ZU(PTRDIFF_MAX)+1), NPSIZES,
"psz2ind() should return NPSIZES on overflow");
assert_u_eq(psz2ind(SIZE_T_MAX), NPSIZES,
"psz2ind() should return NPSIZES on overflow");
assert_zu_eq(psz2u(max_size_class+1), 0,
"psz2u() should return 0 for unsupported size");
assert_zu_eq(psz2u(ZU(PTRDIFF_MAX)+1), 0,
"psz2u() should return 0 for unsupported size");
assert_zu_eq(psz2u(SIZE_T_MAX), 0,
"psz2u() should return 0 on overflow");
}
TEST_END
int int
main(void) main(void)
{ {
return (test( return (test(
test_size_classes, test_size_classes));
test_psize_classes,
test_overflow));
} }
#include "test/jemalloc_test.h"
static const uint64_t smoothstep_tab[] = {
#define STEP(step, h, x, y) \
h,
SMOOTHSTEP
#undef STEP
};
TEST_BEGIN(test_smoothstep_integral)
{
uint64_t sum, min, max;
unsigned i;
/*
* The integral of smoothstep in the [0..1] range equals 1/2. Verify
* that the fixed point representation's integral is no more than
* rounding error distant from 1/2. Regarding rounding, each table
* element is rounded down to the nearest fixed point value, so the
* integral may be off by as much as SMOOTHSTEP_NSTEPS ulps.
*/
sum = 0;
for (i = 0; i < SMOOTHSTEP_NSTEPS; i++)
sum += smoothstep_tab[i];
max = (KQU(1) << (SMOOTHSTEP_BFP-1)) * (SMOOTHSTEP_NSTEPS+1);
min = max - SMOOTHSTEP_NSTEPS;
assert_u64_ge(sum, min,
"Integral too small, even accounting for truncation");
assert_u64_le(sum, max, "Integral exceeds 1/2");
if (false) {
malloc_printf("%"FMTu64" ulps under 1/2 (limit %d)\n",
max - sum, SMOOTHSTEP_NSTEPS);
}
}
TEST_END
TEST_BEGIN(test_smoothstep_monotonic)
{
uint64_t prev_h;
unsigned i;
/*
* The smoothstep function is monotonic in [0..1], i.e. its slope is
* non-negative. In practice we want to parametrize table generation
* such that piecewise slope is greater than zero, but do not require
* that here.
*/
prev_h = 0;
for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
uint64_t h = smoothstep_tab[i];
assert_u64_ge(h, prev_h, "Piecewise non-monotonic, i=%u", i);
prev_h = h;
}
assert_u64_eq(smoothstep_tab[SMOOTHSTEP_NSTEPS-1],
(KQU(1) << SMOOTHSTEP_BFP), "Last step must equal 1");
}
TEST_END
TEST_BEGIN(test_smoothstep_slope)
{
uint64_t prev_h, prev_delta;
unsigned i;
/*
* The smoothstep slope strictly increases until x=0.5, and then
* strictly decreases until x=1.0. Verify the slightly weaker
* requirement of monotonicity, so that inadequate table precision does
* not cause false test failures.
*/
prev_h = 0;
prev_delta = 0;
for (i = 0; i < SMOOTHSTEP_NSTEPS / 2 + SMOOTHSTEP_NSTEPS % 2; i++) {
uint64_t h = smoothstep_tab[i];
uint64_t delta = h - prev_h;
assert_u64_ge(delta, prev_delta,
"Slope must monotonically increase in 0.0 <= x <= 0.5, "
"i=%u", i);
prev_h = h;
prev_delta = delta;
}
prev_h = KQU(1) << SMOOTHSTEP_BFP;
prev_delta = 0;
for (i = SMOOTHSTEP_NSTEPS-1; i >= SMOOTHSTEP_NSTEPS / 2; i--) {
uint64_t h = smoothstep_tab[i];
uint64_t delta = prev_h - h;
assert_u64_ge(delta, prev_delta,
"Slope must monotonically decrease in 0.5 <= x <= 1.0, "
"i=%u", i);
prev_h = h;
prev_delta = delta;
}
}
TEST_END
int
main(void)
{
return (test(
test_smoothstep_integral,
test_smoothstep_monotonic,
test_smoothstep_slope));
}
...@@ -7,18 +7,18 @@ TEST_BEGIN(test_stats_summary) ...@@ -7,18 +7,18 @@ TEST_BEGIN(test_stats_summary)
int expected = config_stats ? 0 : ENOENT; int expected = config_stats ? 0 : ENOENT;
sz = sizeof(cactive); sz = sizeof(cactive);
assert_d_eq(mallctl("stats.cactive", (void *)&cactive, &sz, NULL, 0), assert_d_eq(mallctl("stats.cactive", &cactive, &sz, NULL, 0), expected,
expected, "Unexpected mallctl() result"); "Unexpected mallctl() result");
sz = sizeof(size_t); sz = sizeof(size_t);
assert_d_eq(mallctl("stats.allocated", (void *)&allocated, &sz, NULL, assert_d_eq(mallctl("stats.allocated", &allocated, &sz, NULL, 0),
0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("stats.active", (void *)&active, &sz, NULL, 0),
expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("stats.resident", (void *)&resident, &sz, NULL, 0),
expected, "Unexpected mallctl() result"); expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("stats.mapped", (void *)&mapped, &sz, NULL, 0), assert_d_eq(mallctl("stats.active", &active, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
assert_d_eq(mallctl("stats.resident", &resident, &sz, NULL, 0),
expected, "Unexpected mallctl() result"); expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("stats.mapped", &mapped, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
if (config_stats) { if (config_stats) {
assert_zu_le(active, *cactive, assert_zu_le(active, *cactive,
...@@ -45,19 +45,19 @@ TEST_BEGIN(test_stats_huge) ...@@ -45,19 +45,19 @@ TEST_BEGIN(test_stats_huge)
p = mallocx(large_maxclass+1, 0); p = mallocx(large_maxclass+1, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
0, "Unexpected mallctl() failure"); "Unexpected mallctl() failure");
sz = sizeof(size_t); sz = sizeof(size_t);
assert_d_eq(mallctl("stats.arenas.0.huge.allocated", (void *)&allocated, assert_d_eq(mallctl("stats.arenas.0.huge.allocated", &allocated, &sz,
&sz, NULL, 0), expected, "Unexpected mallctl() result"); NULL, 0), expected, "Unexpected mallctl() result");
sz = sizeof(uint64_t); sz = sizeof(uint64_t);
assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", (void *)&nmalloc, assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", &nmalloc, &sz, NULL,
&sz, NULL, 0), expected, "Unexpected mallctl() result"); 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", (void *)&ndalloc, assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", &ndalloc, &sz, NULL,
&sz, NULL, 0), expected, "Unexpected mallctl() result"); 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.huge.nrequests", (void *)&nrequests, assert_d_eq(mallctl("stats.arenas.0.huge.nrequests", &nrequests, &sz,
&sz, NULL, 0), expected, "Unexpected mallctl() result"); NULL, 0), expected, "Unexpected mallctl() result");
if (config_stats) { if (config_stats) {
assert_zu_gt(allocated, 0, assert_zu_gt(allocated, 0,
...@@ -83,8 +83,8 @@ TEST_BEGIN(test_stats_arenas_summary) ...@@ -83,8 +83,8 @@ TEST_BEGIN(test_stats_arenas_summary)
uint64_t npurge, nmadvise, purged; uint64_t npurge, nmadvise, purged;
arena = 0; arena = 0;
assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena, assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
sizeof(arena)), 0, "Unexpected mallctl() failure"); 0, "Unexpected mallctl() failure");
little = mallocx(SMALL_MAXCLASS, 0); little = mallocx(SMALL_MAXCLASS, 0);
assert_ptr_not_null(little, "Unexpected mallocx() failure"); assert_ptr_not_null(little, "Unexpected mallocx() failure");
...@@ -93,26 +93,22 @@ TEST_BEGIN(test_stats_arenas_summary) ...@@ -93,26 +93,22 @@ TEST_BEGIN(test_stats_arenas_summary)
huge = mallocx(chunksize, 0); huge = mallocx(chunksize, 0);
assert_ptr_not_null(huge, "Unexpected mallocx() failure"); assert_ptr_not_null(huge, "Unexpected mallocx() failure");
dallocx(little, 0);
dallocx(large, 0);
dallocx(huge, 0);
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl() failure"); "Unexpected mallctl() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
0, "Unexpected mallctl() failure"); "Unexpected mallctl() failure");
sz = sizeof(size_t); sz = sizeof(size_t);
assert_d_eq(mallctl("stats.arenas.0.mapped", (void *)&mapped, &sz, NULL, assert_d_eq(mallctl("stats.arenas.0.mapped", &mapped, &sz, NULL, 0),
0), expected, "Unexepected mallctl() result"); expected, "Unexepected mallctl() result");
sz = sizeof(uint64_t); sz = sizeof(uint64_t);
assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge, &sz, NULL, assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge, &sz, NULL, 0),
0), expected, "Unexepected mallctl() result"); expected, "Unexepected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.nmadvise", (void *)&nmadvise, &sz, assert_d_eq(mallctl("stats.arenas.0.nmadvise", &nmadvise, &sz, NULL, 0),
NULL, 0), expected, "Unexepected mallctl() result"); expected, "Unexepected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.purged", (void *)&purged, &sz, NULL, assert_d_eq(mallctl("stats.arenas.0.purged", &purged, &sz, NULL, 0),
0), expected, "Unexepected mallctl() result"); expected, "Unexepected mallctl() result");
if (config_stats) { if (config_stats) {
assert_u64_gt(npurge, 0, assert_u64_gt(npurge, 0,
...@@ -120,6 +116,10 @@ TEST_BEGIN(test_stats_arenas_summary) ...@@ -120,6 +116,10 @@ TEST_BEGIN(test_stats_arenas_summary)
assert_u64_le(nmadvise, purged, assert_u64_le(nmadvise, purged,
"nmadvise should be no greater than purged"); "nmadvise should be no greater than purged");
} }
dallocx(little, 0);
dallocx(large, 0);
dallocx(huge, 0);
} }
TEST_END TEST_END
...@@ -150,8 +150,8 @@ TEST_BEGIN(test_stats_arenas_small) ...@@ -150,8 +150,8 @@ TEST_BEGIN(test_stats_arenas_small)
no_lazy_lock(); /* Lazy locking would dodge tcache testing. */ no_lazy_lock(); /* Lazy locking would dodge tcache testing. */
arena = 0; arena = 0;
assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena, assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
sizeof(arena)), 0, "Unexpected mallctl() failure"); 0, "Unexpected mallctl() failure");
p = mallocx(SMALL_MAXCLASS, 0); p = mallocx(SMALL_MAXCLASS, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_ptr_not_null(p, "Unexpected mallocx() failure");
...@@ -159,21 +159,19 @@ TEST_BEGIN(test_stats_arenas_small) ...@@ -159,21 +159,19 @@ TEST_BEGIN(test_stats_arenas_small)
assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
config_tcache ? 0 : ENOENT, "Unexpected mallctl() result"); config_tcache ? 0 : ENOENT, "Unexpected mallctl() result");
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
0, "Unexpected mallctl() failure"); "Unexpected mallctl() failure");
sz = sizeof(size_t); sz = sizeof(size_t);
assert_d_eq(mallctl("stats.arenas.0.small.allocated", assert_d_eq(mallctl("stats.arenas.0.small.allocated", &allocated, &sz,
(void *)&allocated, &sz, NULL, 0), expected, NULL, 0), expected, "Unexpected mallctl() result");
"Unexpected mallctl() result");
sz = sizeof(uint64_t); sz = sizeof(uint64_t);
assert_d_eq(mallctl("stats.arenas.0.small.nmalloc", (void *)&nmalloc, assert_d_eq(mallctl("stats.arenas.0.small.nmalloc", &nmalloc, &sz,
&sz, NULL, 0), expected, "Unexpected mallctl() result"); NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.small.ndalloc", (void *)&ndalloc, assert_d_eq(mallctl("stats.arenas.0.small.ndalloc", &ndalloc, &sz,
&sz, NULL, 0), expected, "Unexpected mallctl() result"); NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.small.nrequests", assert_d_eq(mallctl("stats.arenas.0.small.nrequests", &nrequests, &sz,
(void *)&nrequests, &sz, NULL, 0), expected, NULL, 0), expected, "Unexpected mallctl() result");
"Unexpected mallctl() result");
if (config_stats) { if (config_stats) {
assert_zu_gt(allocated, 0, assert_zu_gt(allocated, 0,
...@@ -199,36 +197,34 @@ TEST_BEGIN(test_stats_arenas_large) ...@@ -199,36 +197,34 @@ TEST_BEGIN(test_stats_arenas_large)
int expected = config_stats ? 0 : ENOENT; int expected = config_stats ? 0 : ENOENT;
arena = 0; arena = 0;
assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena, assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
sizeof(arena)), 0, "Unexpected mallctl() failure"); 0, "Unexpected mallctl() failure");
p = mallocx(large_maxclass, 0); p = mallocx(large_maxclass, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
0, "Unexpected mallctl() failure"); "Unexpected mallctl() failure");
sz = sizeof(size_t); sz = sizeof(size_t);
assert_d_eq(mallctl("stats.arenas.0.large.allocated", assert_d_eq(mallctl("stats.arenas.0.large.allocated", &allocated, &sz,
(void *)&allocated, &sz, NULL, 0), expected, NULL, 0), expected, "Unexpected mallctl() result");
"Unexpected mallctl() result");
sz = sizeof(uint64_t); sz = sizeof(uint64_t);
assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", (void *)&nmalloc, assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", &nmalloc, &sz,
&sz, NULL, 0), expected, "Unexpected mallctl() result"); NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", (void *)&ndalloc, assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", &ndalloc, &sz,
&sz, NULL, 0), expected, "Unexpected mallctl() result"); NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.large.nrequests", assert_d_eq(mallctl("stats.arenas.0.large.nrequests", &nrequests, &sz,
(void *)&nrequests, &sz, NULL, 0), expected, NULL, 0), expected, "Unexpected mallctl() result");
"Unexpected mallctl() result");
if (config_stats) { if (config_stats) {
assert_zu_gt(allocated, 0, assert_zu_gt(allocated, 0,
"allocated should be greater than zero"); "allocated should be greater than zero");
assert_u64_gt(nmalloc, 0, assert_zu_gt(nmalloc, 0,
"nmalloc should be greater than zero"); "nmalloc should be greater than zero");
assert_u64_ge(nmalloc, ndalloc, assert_zu_ge(nmalloc, ndalloc,
"nmalloc should be at least as large as ndalloc"); "nmalloc should be at least as large as ndalloc");
assert_u64_gt(nrequests, 0, assert_zu_gt(nrequests, 0,
"nrequests should be greater than zero"); "nrequests should be greater than zero");
} }
...@@ -245,30 +241,30 @@ TEST_BEGIN(test_stats_arenas_huge) ...@@ -245,30 +241,30 @@ TEST_BEGIN(test_stats_arenas_huge)
int expected = config_stats ? 0 : ENOENT; int expected = config_stats ? 0 : ENOENT;
arena = 0; arena = 0;
assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena, assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
sizeof(arena)), 0, "Unexpected mallctl() failure"); 0, "Unexpected mallctl() failure");
p = mallocx(chunksize, 0); p = mallocx(chunksize, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
0, "Unexpected mallctl() failure"); "Unexpected mallctl() failure");
sz = sizeof(size_t); sz = sizeof(size_t);
assert_d_eq(mallctl("stats.arenas.0.huge.allocated", (void *)&allocated, assert_d_eq(mallctl("stats.arenas.0.huge.allocated", &allocated, &sz,
&sz, NULL, 0), expected, "Unexpected mallctl() result"); NULL, 0), expected, "Unexpected mallctl() result");
sz = sizeof(uint64_t); sz = sizeof(uint64_t);
assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", (void *)&nmalloc, assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", &nmalloc, &sz,
&sz, NULL, 0), expected, "Unexpected mallctl() result"); NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", (void *)&ndalloc, assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", &ndalloc, &sz,
&sz, NULL, 0), expected, "Unexpected mallctl() result"); NULL, 0), expected, "Unexpected mallctl() result");
if (config_stats) { if (config_stats) {
assert_zu_gt(allocated, 0, assert_zu_gt(allocated, 0,
"allocated should be greater than zero"); "allocated should be greater than zero");
assert_u64_gt(nmalloc, 0, assert_zu_gt(nmalloc, 0,
"nmalloc should be greater than zero"); "nmalloc should be greater than zero");
assert_u64_ge(nmalloc, ndalloc, assert_zu_ge(nmalloc, ndalloc,
"nmalloc should be at least as large as ndalloc"); "nmalloc should be at least as large as ndalloc");
} }
...@@ -286,8 +282,8 @@ TEST_BEGIN(test_stats_arenas_bins) ...@@ -286,8 +282,8 @@ TEST_BEGIN(test_stats_arenas_bins)
int expected = config_stats ? 0 : ENOENT; int expected = config_stats ? 0 : ENOENT;
arena = 0; arena = 0;
assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena, assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
sizeof(arena)), 0, "Unexpected mallctl() failure"); 0, "Unexpected mallctl() failure");
p = mallocx(arena_bin_info[0].reg_size, 0); p = mallocx(arena_bin_info[0].reg_size, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_ptr_not_null(p, "Unexpected mallocx() failure");
...@@ -295,36 +291,35 @@ TEST_BEGIN(test_stats_arenas_bins) ...@@ -295,36 +291,35 @@ TEST_BEGIN(test_stats_arenas_bins)
assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
config_tcache ? 0 : ENOENT, "Unexpected mallctl() result"); config_tcache ? 0 : ENOENT, "Unexpected mallctl() result");
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
0, "Unexpected mallctl() failure"); "Unexpected mallctl() failure");
sz = sizeof(uint64_t); sz = sizeof(uint64_t);
assert_d_eq(mallctl("stats.arenas.0.bins.0.nmalloc", (void *)&nmalloc, assert_d_eq(mallctl("stats.arenas.0.bins.0.nmalloc", &nmalloc, &sz,
&sz, NULL, 0), expected, "Unexpected mallctl() result"); NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.bins.0.ndalloc", (void *)&ndalloc, assert_d_eq(mallctl("stats.arenas.0.bins.0.ndalloc", &ndalloc, &sz,
&sz, NULL, 0), expected, "Unexpected mallctl() result"); NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.bins.0.nrequests", assert_d_eq(mallctl("stats.arenas.0.bins.0.nrequests", &nrequests, &sz,
(void *)&nrequests, &sz, NULL, 0), expected, NULL, 0), expected, "Unexpected mallctl() result");
"Unexpected mallctl() result");
sz = sizeof(size_t); sz = sizeof(size_t);
assert_d_eq(mallctl("stats.arenas.0.bins.0.curregs", (void *)&curregs, assert_d_eq(mallctl("stats.arenas.0.bins.0.curregs", &curregs, &sz,
&sz, NULL, 0), expected, "Unexpected mallctl() result"); NULL, 0), expected, "Unexpected mallctl() result");
sz = sizeof(uint64_t); sz = sizeof(uint64_t);
assert_d_eq(mallctl("stats.arenas.0.bins.0.nfills", (void *)&nfills, assert_d_eq(mallctl("stats.arenas.0.bins.0.nfills", &nfills, &sz,
&sz, NULL, 0), config_tcache ? expected : ENOENT, NULL, 0), config_tcache ? expected : ENOENT,
"Unexpected mallctl() result"); "Unexpected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.bins.0.nflushes", (void *)&nflushes, assert_d_eq(mallctl("stats.arenas.0.bins.0.nflushes", &nflushes, &sz,
&sz, NULL, 0), config_tcache ? expected : ENOENT, NULL, 0), config_tcache ? expected : ENOENT,
"Unexpected mallctl() result"); "Unexpected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.bins.0.nruns", (void *)&nruns, &sz, assert_d_eq(mallctl("stats.arenas.0.bins.0.nruns", &nruns, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.bins.0.nreruns", &nreruns, &sz,
NULL, 0), expected, "Unexpected mallctl() result"); NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.bins.0.nreruns", (void *)&nreruns,
&sz, NULL, 0), expected, "Unexpected mallctl() result");
sz = sizeof(size_t); sz = sizeof(size_t);
assert_d_eq(mallctl("stats.arenas.0.bins.0.curruns", (void *)&curruns, assert_d_eq(mallctl("stats.arenas.0.bins.0.curruns", &curruns, &sz,
&sz, NULL, 0), expected, "Unexpected mallctl() result"); NULL, 0), expected, "Unexpected mallctl() result");
if (config_stats) { if (config_stats) {
assert_u64_gt(nmalloc, 0, assert_u64_gt(nmalloc, 0,
...@@ -360,26 +355,25 @@ TEST_BEGIN(test_stats_arenas_lruns) ...@@ -360,26 +355,25 @@ TEST_BEGIN(test_stats_arenas_lruns)
int expected = config_stats ? 0 : ENOENT; int expected = config_stats ? 0 : ENOENT;
arena = 0; arena = 0;
assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena, assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
sizeof(arena)), 0, "Unexpected mallctl() failure"); 0, "Unexpected mallctl() failure");
p = mallocx(LARGE_MINCLASS, 0); p = mallocx(LARGE_MINCLASS, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
0, "Unexpected mallctl() failure"); "Unexpected mallctl() failure");
sz = sizeof(uint64_t); sz = sizeof(uint64_t);
assert_d_eq(mallctl("stats.arenas.0.lruns.0.nmalloc", (void *)&nmalloc, assert_d_eq(mallctl("stats.arenas.0.lruns.0.nmalloc", &nmalloc, &sz,
&sz, NULL, 0), expected, "Unexpected mallctl() result"); NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.lruns.0.ndalloc", (void *)&ndalloc, assert_d_eq(mallctl("stats.arenas.0.lruns.0.ndalloc", &ndalloc, &sz,
&sz, NULL, 0), expected, "Unexpected mallctl() result"); NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.lruns.0.nrequests", assert_d_eq(mallctl("stats.arenas.0.lruns.0.nrequests", &nrequests, &sz,
(void *)&nrequests, &sz, NULL, 0), expected, NULL, 0), expected, "Unexpected mallctl() result");
"Unexpected mallctl() result");
sz = sizeof(size_t); sz = sizeof(size_t);
assert_d_eq(mallctl("stats.arenas.0.lruns.0.curruns", (void *)&curruns, assert_d_eq(mallctl("stats.arenas.0.lruns.0.curruns", &curruns, &sz,
&sz, NULL, 0), expected, "Unexpected mallctl() result"); NULL, 0), expected, "Unexpected mallctl() result");
if (config_stats) { if (config_stats) {
assert_u64_gt(nmalloc, 0, assert_u64_gt(nmalloc, 0,
...@@ -405,26 +399,23 @@ TEST_BEGIN(test_stats_arenas_hchunks) ...@@ -405,26 +399,23 @@ TEST_BEGIN(test_stats_arenas_hchunks)
int expected = config_stats ? 0 : ENOENT; int expected = config_stats ? 0 : ENOENT;
arena = 0; arena = 0;
assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena, assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
sizeof(arena)), 0, "Unexpected mallctl() failure"); 0, "Unexpected mallctl() failure");
p = mallocx(chunksize, 0); p = mallocx(chunksize, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
0, "Unexpected mallctl() failure"); "Unexpected mallctl() failure");
sz = sizeof(uint64_t); sz = sizeof(uint64_t);
assert_d_eq(mallctl("stats.arenas.0.hchunks.0.nmalloc", assert_d_eq(mallctl("stats.arenas.0.hchunks.0.nmalloc", &nmalloc, &sz,
(void *)&nmalloc, &sz, NULL, 0), expected, NULL, 0), expected, "Unexpected mallctl() result");
"Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.hchunks.0.ndalloc", &ndalloc, &sz,
assert_d_eq(mallctl("stats.arenas.0.hchunks.0.ndalloc", NULL, 0), expected, "Unexpected mallctl() result");
(void *)&ndalloc, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
sz = sizeof(size_t); sz = sizeof(size_t);
assert_d_eq(mallctl("stats.arenas.0.hchunks.0.curhchunks", assert_d_eq(mallctl("stats.arenas.0.hchunks.0.curhchunks", &curhchunks,
(void *)&curhchunks, &sz, NULL, 0), expected, &sz, NULL, 0), expected, "Unexpected mallctl() result");
"Unexpected mallctl() result");
if (config_stats) { if (config_stats) {
assert_u64_gt(nmalloc, 0, assert_u64_gt(nmalloc, 0,
......
#include "test/jemalloc_test.h"
TEST_BEGIN(test_ticker_tick)
{
#define NREPS 2
#define NTICKS 3
ticker_t ticker;
int32_t i, j;
ticker_init(&ticker, NTICKS);
for (i = 0; i < NREPS; i++) {
for (j = 0; j < NTICKS; j++) {
assert_u_eq(ticker_read(&ticker), NTICKS - j,
"Unexpected ticker value (i=%d, j=%d)", i, j);
assert_false(ticker_tick(&ticker),
"Unexpected ticker fire (i=%d, j=%d)", i, j);
}
assert_u32_eq(ticker_read(&ticker), 0,
"Expected ticker depletion");
assert_true(ticker_tick(&ticker),
"Expected ticker fire (i=%d)", i);
assert_u32_eq(ticker_read(&ticker), NTICKS,
"Expected ticker reset");
}
#undef NTICKS
}
TEST_END
TEST_BEGIN(test_ticker_ticks)
{
#define NTICKS 3
ticker_t ticker;
ticker_init(&ticker, NTICKS);
assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
assert_false(ticker_ticks(&ticker, NTICKS), "Unexpected ticker fire");
assert_u_eq(ticker_read(&ticker), 0, "Unexpected ticker value");
assert_true(ticker_ticks(&ticker, NTICKS), "Expected ticker fire");
assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
assert_true(ticker_ticks(&ticker, NTICKS + 1), "Expected ticker fire");
assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
#undef NTICKS
}
TEST_END
TEST_BEGIN(test_ticker_copy)
{
#define NTICKS 3
ticker_t ta, tb;
ticker_init(&ta, NTICKS);
ticker_copy(&tb, &ta);
assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
assert_true(ticker_ticks(&tb, NTICKS + 1), "Expected ticker fire");
assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
ticker_tick(&ta);
ticker_copy(&tb, &ta);
assert_u_eq(ticker_read(&tb), NTICKS - 1, "Unexpected ticker value");
assert_true(ticker_ticks(&tb, NTICKS), "Expected ticker fire");
assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
#undef NTICKS
}
TEST_END
int
main(void)
{
return (test(
test_ticker_tick,
test_ticker_ticks,
test_ticker_copy));
}
...@@ -58,18 +58,18 @@ thd_start(void *arg) ...@@ -58,18 +58,18 @@ thd_start(void *arg)
data_t d = (data_t)(uintptr_t)arg; data_t d = (data_t)(uintptr_t)arg;
void *p; void *p;
assert_x_eq(*data_tsd_get(true), DATA_INIT, assert_x_eq(*data_tsd_get(), DATA_INIT,
"Initial tsd get should return initialization value"); "Initial tsd get should return initialization value");
p = malloc(1); p = malloc(1);
assert_ptr_not_null(p, "Unexpected malloc() failure"); assert_ptr_not_null(p, "Unexpected malloc() failure");
data_tsd_set(&d); data_tsd_set(&d);
assert_x_eq(*data_tsd_get(true), d, assert_x_eq(*data_tsd_get(), d,
"After tsd set, tsd get should return value that was set"); "After tsd set, tsd get should return value that was set");
d = 0; d = 0;
assert_x_eq(*data_tsd_get(true), (data_t)(uintptr_t)arg, assert_x_eq(*data_tsd_get(), (data_t)(uintptr_t)arg,
"Resetting local data should have no effect on tsd"); "Resetting local data should have no effect on tsd");
free(p); free(p);
...@@ -79,7 +79,7 @@ thd_start(void *arg) ...@@ -79,7 +79,7 @@ thd_start(void *arg)
TEST_BEGIN(test_tsd_main_thread) TEST_BEGIN(test_tsd_main_thread)
{ {
thd_start((void *)(uintptr_t)0xa5f3e329); thd_start((void *) 0xa5f3e329);
} }
TEST_END TEST_END
...@@ -99,11 +99,6 @@ int ...@@ -99,11 +99,6 @@ int
main(void) main(void)
{ {
/* Core tsd bootstrapping must happen prior to data_tsd_boot(). */
if (nallocx(1, 0) == 0) {
malloc_printf("Initialization error");
return (test_status_fail);
}
data_tsd_boot(); data_tsd_boot();
return (test( return (test(
......
#include "test/jemalloc_test.h" #include "test/jemalloc_test.h"
#define TEST_POW2_CEIL(t, suf, pri) do { \ TEST_BEGIN(test_pow2_ceil)
unsigned i, pow2; \
t x; \
\
assert_##suf##_eq(pow2_ceil_##suf(0), 0, "Unexpected result"); \
\
for (i = 0; i < sizeof(t) * 8; i++) { \
assert_##suf##_eq(pow2_ceil_##suf(((t)1) << i), ((t)1) \
<< i, "Unexpected result"); \
} \
\
for (i = 2; i < sizeof(t) * 8; i++) { \
assert_##suf##_eq(pow2_ceil_##suf((((t)1) << i) - 1), \
((t)1) << i, "Unexpected result"); \
} \
\
for (i = 0; i < sizeof(t) * 8 - 1; i++) { \
assert_##suf##_eq(pow2_ceil_##suf((((t)1) << i) + 1), \
((t)1) << (i+1), "Unexpected result"); \
} \
\
for (pow2 = 1; pow2 < 25; pow2++) { \
for (x = (((t)1) << (pow2-1)) + 1; x <= ((t)1) << pow2; \
x++) { \
assert_##suf##_eq(pow2_ceil_##suf(x), \
((t)1) << pow2, \
"Unexpected result, x=%"pri, x); \
} \
} \
} while (0)
TEST_BEGIN(test_pow2_ceil_u64)
{ {
unsigned i, pow2;
size_t x;
TEST_POW2_CEIL(uint64_t, u64, FMTu64); assert_zu_eq(pow2_ceil(0), 0, "Unexpected result");
}
TEST_END
TEST_BEGIN(test_pow2_ceil_u32) for (i = 0; i < sizeof(size_t) * 8; i++) {
{ assert_zu_eq(pow2_ceil(ZU(1) << i), ZU(1) << i,
"Unexpected result");
}
TEST_POW2_CEIL(uint32_t, u32, FMTu32); for (i = 2; i < sizeof(size_t) * 8; i++) {
} assert_zu_eq(pow2_ceil((ZU(1) << i) - 1), ZU(1) << i,
TEST_END "Unexpected result");
}
TEST_BEGIN(test_pow2_ceil_zu) for (i = 0; i < sizeof(size_t) * 8 - 1; i++) {
{ assert_zu_eq(pow2_ceil((ZU(1) << i) + 1), ZU(1) << (i+1),
"Unexpected result");
}
TEST_POW2_CEIL(size_t, zu, "zu"); for (pow2 = 1; pow2 < 25; pow2++) {
for (x = (ZU(1) << (pow2-1)) + 1; x <= ZU(1) << pow2; x++) {
assert_zu_eq(pow2_ceil(x), ZU(1) << pow2,
"Unexpected result, x=%zu", x);
}
}
} }
TEST_END TEST_END
...@@ -75,7 +54,6 @@ TEST_BEGIN(test_malloc_strtoumax) ...@@ -75,7 +54,6 @@ TEST_BEGIN(test_malloc_strtoumax)
}; };
#define ERR(e) e, #e #define ERR(e) e, #e
#define KUMAX(x) ((uintmax_t)x##ULL) #define KUMAX(x) ((uintmax_t)x##ULL)
#define KSMAX(x) ((uintmax_t)(intmax_t)x##LL)
struct test_s tests[] = { struct test_s tests[] = {
{"0", "0", -1, ERR(EINVAL), UINTMAX_MAX}, {"0", "0", -1, ERR(EINVAL), UINTMAX_MAX},
{"0", "0", 1, ERR(EINVAL), UINTMAX_MAX}, {"0", "0", 1, ERR(EINVAL), UINTMAX_MAX},
...@@ -88,13 +66,13 @@ TEST_BEGIN(test_malloc_strtoumax) ...@@ -88,13 +66,13 @@ TEST_BEGIN(test_malloc_strtoumax)
{"42", "", 0, ERR(0), KUMAX(42)}, {"42", "", 0, ERR(0), KUMAX(42)},
{"+42", "", 0, ERR(0), KUMAX(42)}, {"+42", "", 0, ERR(0), KUMAX(42)},
{"-42", "", 0, ERR(0), KSMAX(-42)}, {"-42", "", 0, ERR(0), KUMAX(-42)},
{"042", "", 0, ERR(0), KUMAX(042)}, {"042", "", 0, ERR(0), KUMAX(042)},
{"+042", "", 0, ERR(0), KUMAX(042)}, {"+042", "", 0, ERR(0), KUMAX(042)},
{"-042", "", 0, ERR(0), KSMAX(-042)}, {"-042", "", 0, ERR(0), KUMAX(-042)},
{"0x42", "", 0, ERR(0), KUMAX(0x42)}, {"0x42", "", 0, ERR(0), KUMAX(0x42)},
{"+0x42", "", 0, ERR(0), KUMAX(0x42)}, {"+0x42", "", 0, ERR(0), KUMAX(0x42)},
{"-0x42", "", 0, ERR(0), KSMAX(-0x42)}, {"-0x42", "", 0, ERR(0), KUMAX(-0x42)},
{"0", "", 0, ERR(0), KUMAX(0)}, {"0", "", 0, ERR(0), KUMAX(0)},
{"1", "", 0, ERR(0), KUMAX(1)}, {"1", "", 0, ERR(0), KUMAX(1)},
...@@ -131,7 +109,6 @@ TEST_BEGIN(test_malloc_strtoumax) ...@@ -131,7 +109,6 @@ TEST_BEGIN(test_malloc_strtoumax)
}; };
#undef ERR #undef ERR
#undef KUMAX #undef KUMAX
#undef KSMAX
unsigned i; unsigned i;
for (i = 0; i < sizeof(tests)/sizeof(struct test_s); i++) { for (i = 0; i < sizeof(tests)/sizeof(struct test_s); i++) {
...@@ -162,14 +139,14 @@ TEST_BEGIN(test_malloc_snprintf_truncated) ...@@ -162,14 +139,14 @@ TEST_BEGIN(test_malloc_snprintf_truncated)
{ {
#define BUFLEN 15 #define BUFLEN 15
char buf[BUFLEN]; char buf[BUFLEN];
size_t result; int result;
size_t len; size_t len;
#define TEST(expected_str_untruncated, ...) do { \ #define TEST(expected_str_untruncated, ...) do { \
result = malloc_snprintf(buf, len, __VA_ARGS__); \ result = malloc_snprintf(buf, len, __VA_ARGS__); \
assert_d_eq(strncmp(buf, expected_str_untruncated, len-1), 0, \ assert_d_eq(strncmp(buf, expected_str_untruncated, len-1), 0, \
"Unexpected string inequality (\"%s\" vs \"%s\")", \ "Unexpected string inequality (\"%s\" vs \"%s\")", \
buf, expected_str_untruncated); \ buf, expected_str_untruncated); \
assert_zu_eq(result, strlen(expected_str_untruncated), \ assert_d_eq(result, strlen(expected_str_untruncated), \
"Unexpected result"); \ "Unexpected result"); \
} while (0) } while (0)
...@@ -195,11 +172,11 @@ TEST_BEGIN(test_malloc_snprintf) ...@@ -195,11 +172,11 @@ TEST_BEGIN(test_malloc_snprintf)
{ {
#define BUFLEN 128 #define BUFLEN 128
char buf[BUFLEN]; char buf[BUFLEN];
size_t result; int result;
#define TEST(expected_str, ...) do { \ #define TEST(expected_str, ...) do { \
result = malloc_snprintf(buf, sizeof(buf), __VA_ARGS__); \ result = malloc_snprintf(buf, sizeof(buf), __VA_ARGS__); \
assert_str_eq(buf, expected_str, "Unexpected output"); \ assert_str_eq(buf, expected_str, "Unexpected output"); \
assert_zu_eq(result, strlen(expected_str), "Unexpected result");\ assert_d_eq(result, strlen(expected_str), "Unexpected result"); \
} while (0) } while (0)
TEST("hello", "hello"); TEST("hello", "hello");
...@@ -309,9 +286,7 @@ main(void) ...@@ -309,9 +286,7 @@ main(void)
{ {
return (test( return (test(
test_pow2_ceil_u64, test_pow2_ceil,
test_pow2_ceil_u32,
test_pow2_ceil_zu,
test_malloc_strtoumax_no_endptr, test_malloc_strtoumax_no_endptr,
test_malloc_strtoumax, test_malloc_strtoumax,
test_malloc_snprintf_truncated, test_malloc_snprintf_truncated,
......
#include "test/jemalloc_test.h"
static witness_lock_error_t *witness_lock_error_orig;
static witness_owner_error_t *witness_owner_error_orig;
static witness_not_owner_error_t *witness_not_owner_error_orig;
static witness_lockless_error_t *witness_lockless_error_orig;
static bool saw_lock_error;
static bool saw_owner_error;
static bool saw_not_owner_error;
static bool saw_lockless_error;
static void
witness_lock_error_intercept(const witness_list_t *witnesses,
const witness_t *witness)
{
saw_lock_error = true;
}
static void
witness_owner_error_intercept(const witness_t *witness)
{
saw_owner_error = true;
}
static void
witness_not_owner_error_intercept(const witness_t *witness)
{
saw_not_owner_error = true;
}
static void
witness_lockless_error_intercept(const witness_list_t *witnesses)
{
saw_lockless_error = true;
}
static int
witness_comp(const witness_t *a, const witness_t *b)
{
assert_u_eq(a->rank, b->rank, "Witnesses should have equal rank");
return (strcmp(a->name, b->name));
}
static int
witness_comp_reverse(const witness_t *a, const witness_t *b)
{
assert_u_eq(a->rank, b->rank, "Witnesses should have equal rank");
return (-strcmp(a->name, b->name));
}
TEST_BEGIN(test_witness)
{
witness_t a, b;
tsdn_t *tsdn;
test_skip_if(!config_debug);
tsdn = tsdn_fetch();
witness_assert_lockless(tsdn);
witness_init(&a, "a", 1, NULL);
witness_assert_not_owner(tsdn, &a);
witness_lock(tsdn, &a);
witness_assert_owner(tsdn, &a);
witness_init(&b, "b", 2, NULL);
witness_assert_not_owner(tsdn, &b);
witness_lock(tsdn, &b);
witness_assert_owner(tsdn, &b);
witness_unlock(tsdn, &a);
witness_unlock(tsdn, &b);
witness_assert_lockless(tsdn);
}
TEST_END
TEST_BEGIN(test_witness_comp)
{
witness_t a, b, c, d;
tsdn_t *tsdn;
test_skip_if(!config_debug);
tsdn = tsdn_fetch();
witness_assert_lockless(tsdn);
witness_init(&a, "a", 1, witness_comp);
witness_assert_not_owner(tsdn, &a);
witness_lock(tsdn, &a);
witness_assert_owner(tsdn, &a);
witness_init(&b, "b", 1, witness_comp);
witness_assert_not_owner(tsdn, &b);
witness_lock(tsdn, &b);
witness_assert_owner(tsdn, &b);
witness_unlock(tsdn, &b);
witness_lock_error_orig = witness_lock_error;
witness_lock_error = witness_lock_error_intercept;
saw_lock_error = false;
witness_init(&c, "c", 1, witness_comp_reverse);
witness_assert_not_owner(tsdn, &c);
assert_false(saw_lock_error, "Unexpected witness lock error");
witness_lock(tsdn, &c);
assert_true(saw_lock_error, "Expected witness lock error");
witness_unlock(tsdn, &c);
saw_lock_error = false;
witness_init(&d, "d", 1, NULL);
witness_assert_not_owner(tsdn, &d);
assert_false(saw_lock_error, "Unexpected witness lock error");
witness_lock(tsdn, &d);
assert_true(saw_lock_error, "Expected witness lock error");
witness_unlock(tsdn, &d);
witness_unlock(tsdn, &a);
witness_assert_lockless(tsdn);
witness_lock_error = witness_lock_error_orig;
}
TEST_END
TEST_BEGIN(test_witness_reversal)
{
witness_t a, b;
tsdn_t *tsdn;
test_skip_if(!config_debug);
witness_lock_error_orig = witness_lock_error;
witness_lock_error = witness_lock_error_intercept;
saw_lock_error = false;
tsdn = tsdn_fetch();
witness_assert_lockless(tsdn);
witness_init(&a, "a", 1, NULL);
witness_init(&b, "b", 2, NULL);
witness_lock(tsdn, &b);
assert_false(saw_lock_error, "Unexpected witness lock error");
witness_lock(tsdn, &a);
assert_true(saw_lock_error, "Expected witness lock error");
witness_unlock(tsdn, &a);
witness_unlock(tsdn, &b);
witness_assert_lockless(tsdn);
witness_lock_error = witness_lock_error_orig;
}
TEST_END
TEST_BEGIN(test_witness_recursive)
{
witness_t a;
tsdn_t *tsdn;
test_skip_if(!config_debug);
witness_not_owner_error_orig = witness_not_owner_error;
witness_not_owner_error = witness_not_owner_error_intercept;
saw_not_owner_error = false;
witness_lock_error_orig = witness_lock_error;
witness_lock_error = witness_lock_error_intercept;
saw_lock_error = false;
tsdn = tsdn_fetch();
witness_assert_lockless(tsdn);
witness_init(&a, "a", 1, NULL);
witness_lock(tsdn, &a);
assert_false(saw_lock_error, "Unexpected witness lock error");
assert_false(saw_not_owner_error, "Unexpected witness not owner error");
witness_lock(tsdn, &a);
assert_true(saw_lock_error, "Expected witness lock error");
assert_true(saw_not_owner_error, "Expected witness not owner error");
witness_unlock(tsdn, &a);
witness_assert_lockless(tsdn);
witness_owner_error = witness_owner_error_orig;
witness_lock_error = witness_lock_error_orig;
}
TEST_END
TEST_BEGIN(test_witness_unlock_not_owned)
{
witness_t a;
tsdn_t *tsdn;
test_skip_if(!config_debug);
witness_owner_error_orig = witness_owner_error;
witness_owner_error = witness_owner_error_intercept;
saw_owner_error = false;
tsdn = tsdn_fetch();
witness_assert_lockless(tsdn);
witness_init(&a, "a", 1, NULL);
assert_false(saw_owner_error, "Unexpected owner error");
witness_unlock(tsdn, &a);
assert_true(saw_owner_error, "Expected owner error");
witness_assert_lockless(tsdn);
witness_owner_error = witness_owner_error_orig;
}
TEST_END
TEST_BEGIN(test_witness_lockful)
{
witness_t a;
tsdn_t *tsdn;
test_skip_if(!config_debug);
witness_lockless_error_orig = witness_lockless_error;
witness_lockless_error = witness_lockless_error_intercept;
saw_lockless_error = false;
tsdn = tsdn_fetch();
witness_assert_lockless(tsdn);
witness_init(&a, "a", 1, NULL);
assert_false(saw_lockless_error, "Unexpected lockless error");
witness_assert_lockless(tsdn);
witness_lock(tsdn, &a);
witness_assert_lockless(tsdn);
assert_true(saw_lockless_error, "Expected lockless error");
witness_unlock(tsdn, &a);
witness_assert_lockless(tsdn);
witness_lockless_error = witness_lockless_error_orig;
}
TEST_END
int
main(void)
{
return (test(
test_witness,
test_witness_comp,
test_witness_reversal,
test_witness_recursive,
test_witness_unlock_not_owned,
test_witness_lockful));
}
...@@ -8,41 +8,39 @@ const char *malloc_conf = ...@@ -8,41 +8,39 @@ const char *malloc_conf =
static void static void
test_zero(size_t sz_min, size_t sz_max) test_zero(size_t sz_min, size_t sz_max)
{ {
uint8_t *s; char *s;
size_t sz_prev, sz, i; size_t sz_prev, sz, i;
#define MAGIC ((uint8_t)0x61)
sz_prev = 0; sz_prev = 0;
s = (uint8_t *)mallocx(sz_min, 0); s = (char *)mallocx(sz_min, 0);
assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
for (sz = sallocx(s, 0); sz <= sz_max; for (sz = sallocx(s, 0); sz <= sz_max;
sz_prev = sz, sz = sallocx(s, 0)) { sz_prev = sz, sz = sallocx(s, 0)) {
if (sz_prev > 0) { if (sz_prev > 0) {
assert_u_eq(s[0], MAGIC, assert_c_eq(s[0], 'a',
"Previously allocated byte %zu/%zu is corrupted", "Previously allocated byte %zu/%zu is corrupted",
ZU(0), sz_prev); ZU(0), sz_prev);
assert_u_eq(s[sz_prev-1], MAGIC, assert_c_eq(s[sz_prev-1], 'a',
"Previously allocated byte %zu/%zu is corrupted", "Previously allocated byte %zu/%zu is corrupted",
sz_prev-1, sz_prev); sz_prev-1, sz_prev);
} }
for (i = sz_prev; i < sz; i++) { for (i = sz_prev; i < sz; i++) {
assert_u_eq(s[i], 0x0, assert_c_eq(s[i], 0x0,
"Newly allocated byte %zu/%zu isn't zero-filled", "Newly allocated byte %zu/%zu isn't zero-filled",
i, sz); i, sz);
s[i] = MAGIC; s[i] = 'a';
} }
if (xallocx(s, sz+1, 0, 0) == sz) { if (xallocx(s, sz+1, 0, 0) == sz) {
s = (uint8_t *)rallocx(s, sz+1, 0); s = (char *)rallocx(s, sz+1, 0);
assert_ptr_not_null((void *)s, assert_ptr_not_null((void *)s,
"Unexpected rallocx() failure"); "Unexpected rallocx() failure");
} }
} }
dallocx(s, 0); dallocx(s, 0);
#undef MAGIC
} }
TEST_BEGIN(test_zero_small) TEST_BEGIN(test_zero_small)
......
...@@ -139,7 +139,7 @@ endif ...@@ -139,7 +139,7 @@ endif
REDIS_SERVER_NAME=redis-server REDIS_SERVER_NAME=redis-server
REDIS_SENTINEL_NAME=redis-sentinel REDIS_SENTINEL_NAME=redis-sentinel
REDIS_SERVER_OBJ=adlist.o quicklist.o ae.o anet.o dict.o server.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o crc16.o endianconv.o slowlog.o scripting.o bio.o rio.o rand.o memtest.o crc64.o bitops.o sentinel.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o redis-check-rdb.o geo.o lazyfree.o module.o evict.o expire.o geohash.o geohash_helper.o childinfo.o defrag.o siphash.o REDIS_SERVER_OBJ=adlist.o quicklist.o ae.o anet.o dict.o server.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o crc16.o endianconv.o slowlog.o scripting.o bio.o rio.o rand.o memtest.o crc64.o bitops.o sentinel.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o redis-check-rdb.o geo.o lazyfree.o module.o evict.o expire.o geohash.o geohash_helper.o childinfo.o defrag.o siphash.o rax.o
REDIS_CLI_NAME=redis-cli REDIS_CLI_NAME=redis-cli
REDIS_CLI_OBJ=anet.o adlist.o redis-cli.o zmalloc.o release.o anet.o ae.o crc64.o REDIS_CLI_OBJ=anet.o adlist.o redis-cli.o zmalloc.o release.o anet.o ae.o crc64.o
REDIS_BENCHMARK_NAME=redis-benchmark REDIS_BENCHMARK_NAME=redis-benchmark
...@@ -214,8 +214,8 @@ $(REDIS_BENCHMARK_NAME): $(REDIS_BENCHMARK_OBJ) ...@@ -214,8 +214,8 @@ $(REDIS_BENCHMARK_NAME): $(REDIS_BENCHMARK_OBJ)
$(REDIS_CHECK_AOF_NAME): $(REDIS_CHECK_AOF_OBJ) $(REDIS_CHECK_AOF_NAME): $(REDIS_CHECK_AOF_OBJ)
$(REDIS_LD) -o $@ $^ $(FINAL_LIBS) $(REDIS_LD) -o $@ $^ $(FINAL_LIBS)
dict-benchmark: dict.c zmalloc.c sds.c dict-benchmark: dict.c zmalloc.c sds.c siphash.c
$(REDIS_CC) $(FINAL_CFLAGS) dict.c zmalloc.c sds.c siphash.c -D DICT_BENCHMARK_MAIN -o dict-benchmark $(REDIS_CC) $(FINAL_CFLAGS) $^ -D DICT_BENCHMARK_MAIN -o $@ $(FINAL_LIBS)
# Because the jemalloc.h header is generated as a part of the jemalloc build, # Because the jemalloc.h header is generated as a part of the jemalloc build,
# building it should complete before building any other object. Instead of # building it should complete before building any other object. Instead of
......
...@@ -52,10 +52,8 @@ list *listCreate(void) ...@@ -52,10 +52,8 @@ list *listCreate(void)
return list; return list;
} }
/* Free the whole list. /* Remove all the elements from the list without destroying the list itself. */
* void listEmpty(list *list)
* This function can't fail. */
void listRelease(list *list)
{ {
unsigned long len; unsigned long len;
listNode *current, *next; listNode *current, *next;
...@@ -68,6 +66,16 @@ void listRelease(list *list) ...@@ -68,6 +66,16 @@ void listRelease(list *list)
zfree(current); zfree(current);
current = next; current = next;
} }
list->head = list->tail = NULL;
list->len = 0;
}
/* Free the whole list.
*
* This function can't fail. */
void listRelease(list *list)
{
listEmpty(list);
zfree(list); zfree(list);
} }
......
...@@ -72,6 +72,7 @@ typedef struct list { ...@@ -72,6 +72,7 @@ typedef struct list {
/* Prototypes */ /* Prototypes */
list *listCreate(void); list *listCreate(void);
void listRelease(list *list); void listRelease(list *list);
void listEmpty(list *list);
list *listAddNodeHead(list *list, void *value); list *listAddNodeHead(list *list, void *value);
list *listAddNodeTail(list *list, void *value); list *listAddNodeTail(list *list, void *value);
list *listInsertNode(list *list, listNode *old_node, void *value, int after); list *listInsertNode(list *list, listNode *old_node, void *value, int after);
......
...@@ -380,9 +380,11 @@ int anetUnixGenericConnect(char *err, char *path, int flags) ...@@ -380,9 +380,11 @@ int anetUnixGenericConnect(char *err, char *path, int flags)
sa.sun_family = AF_LOCAL; sa.sun_family = AF_LOCAL;
strncpy(sa.sun_path,path,sizeof(sa.sun_path)-1); strncpy(sa.sun_path,path,sizeof(sa.sun_path)-1);
if (flags & ANET_CONNECT_NONBLOCK) { if (flags & ANET_CONNECT_NONBLOCK) {
if (anetNonBlock(err,s) != ANET_OK) if (anetNonBlock(err,s) != ANET_OK) {
close(s);
return ANET_ERR; return ANET_ERR;
} }
}
if (connect(s,(struct sockaddr*)&sa,sizeof(sa)) == -1) { if (connect(s,(struct sockaddr*)&sa,sizeof(sa)) == -1) {
if (errno == EINPROGRESS && if (errno == EINPROGRESS &&
flags & ANET_CONNECT_NONBLOCK) flags & ANET_CONNECT_NONBLOCK)
...@@ -462,7 +464,7 @@ static int anetV6Only(char *err, int s) { ...@@ -462,7 +464,7 @@ static int anetV6Only(char *err, int s) {
static int _anetTcpServer(char *err, int port, char *bindaddr, int af, int backlog) static int _anetTcpServer(char *err, int port, char *bindaddr, int af, int backlog)
{ {
int s, rv; int s = -1, rv;
char _port[6]; /* strlen("65535") */ char _port[6]; /* strlen("65535") */
struct addrinfo hints, *servinfo, *p; struct addrinfo hints, *servinfo, *p;
...@@ -491,6 +493,7 @@ static int _anetTcpServer(char *err, int port, char *bindaddr, int af, int backl ...@@ -491,6 +493,7 @@ static int _anetTcpServer(char *err, int port, char *bindaddr, int af, int backl
} }
error: error:
if (s != -1) close(s);
s = ANET_ERR; s = ANET_ERR;
end: end:
freeaddrinfo(servinfo); freeaddrinfo(servinfo);
......
...@@ -115,6 +115,7 @@ void aofChildWriteDiffData(aeEventLoop *el, int fd, void *privdata, int mask) { ...@@ -115,6 +115,7 @@ void aofChildWriteDiffData(aeEventLoop *el, int fd, void *privdata, int mask) {
if (nwritten <= 0) return; if (nwritten <= 0) return;
memmove(block->buf,block->buf+nwritten,block->used-nwritten); memmove(block->buf,block->buf+nwritten,block->used-nwritten);
block->used -= nwritten; block->used -= nwritten;
block->free += nwritten;
} }
if (block->used == 0) listDelNode(server.aof_rewrite_buf_blocks,ln); if (block->used == 0) listDelNode(server.aof_rewrite_buf_blocks,ln);
} }
......
...@@ -423,8 +423,11 @@ void clusterInit(void) { ...@@ -423,8 +423,11 @@ void clusterInit(void) {
server.cluster->failover_auth_epoch = 0; server.cluster->failover_auth_epoch = 0;
server.cluster->cant_failover_reason = CLUSTER_CANT_FAILOVER_NONE; server.cluster->cant_failover_reason = CLUSTER_CANT_FAILOVER_NONE;
server.cluster->lastVoteEpoch = 0; server.cluster->lastVoteEpoch = 0;
server.cluster->stats_bus_messages_sent = 0; for (int i = 0; i < CLUSTERMSG_TYPE_COUNT; i++) {
server.cluster->stats_bus_messages_received = 0; server.cluster->stats_bus_messages_sent[i] = 0;
server.cluster->stats_bus_messages_received[i] = 0;
}
server.cluster->stats_pfail_nodes = 0;
memset(server.cluster->slots,0, sizeof(server.cluster->slots)); memset(server.cluster->slots,0, sizeof(server.cluster->slots));
clusterCloseAllSlots(); clusterCloseAllSlots();
...@@ -476,8 +479,10 @@ void clusterInit(void) { ...@@ -476,8 +479,10 @@ void clusterInit(void) {
} }
} }
/* The slots -> keys map is a sorted set. Init it. */ /* The slots -> keys map is a radix tree. Initialize it here. */
server.cluster->slots_to_keys = zslCreate(); server.cluster->slots_to_keys = raxNew();
memset(server.cluster->slots_keys_count,0,
sizeof(server.cluster->slots_keys_count));
/* Set myself->port / cport to my listening ports, we'll just need to /* Set myself->port / cport to my listening ports, we'll just need to
* discover the IP address via MEET messages. */ * discover the IP address via MEET messages. */
...@@ -1350,6 +1355,28 @@ void clusterProcessGossipSection(clusterMsg *hdr, clusterLink *link) { ...@@ -1350,6 +1355,28 @@ void clusterProcessGossipSection(clusterMsg *hdr, clusterLink *link) {
} }
} }
/* If from our POV the node is up (no failure flags are set),
* we have no pending ping for the node, nor we have failure
* reports for this node, update the last pong time with the
* one we see from the other nodes. */
if (!(flags & (CLUSTER_NODE_FAIL|CLUSTER_NODE_PFAIL)) &&
node->ping_sent == 0 &&
clusterNodeFailureReportsCount(node) == 0)
{
mstime_t pongtime = ntohl(g->pong_received);
pongtime *= 1000; /* Convert back to milliseconds. */
/* Replace the pong time with the received one only if
* it's greater than our view but is not in the future
* (with 500 milliseconds tolerance) from the POV of our
* clock. */
if (pongtime <= (server.mstime+500) &&
pongtime > node->pong_received)
{
node->pong_received = pongtime;
}
}
/* If we already know this node, but it is not reachable, and /* If we already know this node, but it is not reachable, and
* we see a different address in the gossip section of a node that * we see a different address in the gossip section of a node that
* can talk with this other node, update the address, disconnect * can talk with this other node, update the address, disconnect
...@@ -1581,7 +1608,8 @@ int clusterProcessPacket(clusterLink *link) { ...@@ -1581,7 +1608,8 @@ int clusterProcessPacket(clusterLink *link) {
uint32_t totlen = ntohl(hdr->totlen); uint32_t totlen = ntohl(hdr->totlen);
uint16_t type = ntohs(hdr->type); uint16_t type = ntohs(hdr->type);
server.cluster->stats_bus_messages_received++; if (type < CLUSTERMSG_TYPE_COUNT)
server.cluster->stats_bus_messages_received[type]++;
serverLog(LL_DEBUG,"--- Processing packet of type %d, %lu bytes", serverLog(LL_DEBUG,"--- Processing packet of type %d, %lu bytes",
type, (unsigned long) totlen); type, (unsigned long) totlen);
...@@ -2128,7 +2156,12 @@ void clusterSendMessage(clusterLink *link, unsigned char *msg, size_t msglen) { ...@@ -2128,7 +2156,12 @@ void clusterSendMessage(clusterLink *link, unsigned char *msg, size_t msglen) {
clusterWriteHandler,link); clusterWriteHandler,link);
link->sndbuf = sdscatlen(link->sndbuf, msg, msglen); link->sndbuf = sdscatlen(link->sndbuf, msg, msglen);
server.cluster->stats_bus_messages_sent++;
/* Populate sent messages stats. */
clusterMsg *hdr = (clusterMsg*) msg;
uint16_t type = ntohs(hdr->type);
if (type < CLUSTERMSG_TYPE_COUNT)
server.cluster->stats_bus_messages_sent[type]++;
} }
/* Send a message to all the nodes that are part of the cluster having /* Send a message to all the nodes that are part of the cluster having
...@@ -2229,6 +2262,33 @@ void clusterBuildMessageHdr(clusterMsg *hdr, int type) { ...@@ -2229,6 +2262,33 @@ void clusterBuildMessageHdr(clusterMsg *hdr, int type) {
/* For PING, PONG, and MEET, fixing the totlen field is up to the caller. */ /* For PING, PONG, and MEET, fixing the totlen field is up to the caller. */
} }
/* Return non zero if the node is already present in the gossip section of the
* message pointed by 'hdr' and having 'count' gossip entries. Otherwise
* zero is returned. Helper for clusterSendPing(). */
int clusterNodeIsInGossipSection(clusterMsg *hdr, int count, clusterNode *n) {
int j;
for (j = 0; j < count; j++) {
if (memcmp(hdr->data.ping.gossip[j].nodename,n->name,
CLUSTER_NAMELEN) == 0) break;
}
return j != count;
}
/* Set the i-th entry of the gossip section in the message pointed by 'hdr'
* to the info of the specified node 'n'. */
void clusterSetGossipEntry(clusterMsg *hdr, int i, clusterNode *n) {
clusterMsgDataGossip *gossip;
gossip = &(hdr->data.ping.gossip[i]);
memcpy(gossip->nodename,n->name,CLUSTER_NAMELEN);
gossip->ping_sent = htonl(n->ping_sent/1000);
gossip->pong_received = htonl(n->pong_received/1000);
memcpy(gossip->ip,n->ip,sizeof(n->ip));
gossip->port = htons(n->port);
gossip->cport = htons(n->cport);
gossip->flags = htons(n->flags);
gossip->notused1 = 0;
}
/* Send a PING or PONG packet to the specified node, making sure to add enough /* Send a PING or PONG packet to the specified node, making sure to add enough
* gossip informations. */ * gossip informations. */
void clusterSendPing(clusterLink *link, int type) { void clusterSendPing(clusterLink *link, int type) {
...@@ -2273,11 +2333,15 @@ void clusterSendPing(clusterLink *link, int type) { ...@@ -2273,11 +2333,15 @@ void clusterSendPing(clusterLink *link, int type) {
if (wanted < 3) wanted = 3; if (wanted < 3) wanted = 3;
if (wanted > freshnodes) wanted = freshnodes; if (wanted > freshnodes) wanted = freshnodes;
/* Include all the nodes in PFAIL state, so that failure reports are
* faster to propagate to go from PFAIL to FAIL state. */
int pfail_wanted = server.cluster->stats_pfail_nodes;
/* Compute the maxium totlen to allocate our buffer. We'll fix the totlen /* Compute the maxium totlen to allocate our buffer. We'll fix the totlen
* later according to the number of gossip sections we really were able * later according to the number of gossip sections we really were able
* to put inside the packet. */ * to put inside the packet. */
totlen = sizeof(clusterMsg)-sizeof(union clusterMsgData); totlen = sizeof(clusterMsg)-sizeof(union clusterMsgData);
totlen += (sizeof(clusterMsgDataGossip)*wanted); totlen += (sizeof(clusterMsgDataGossip)*(wanted+pfail_wanted));
/* Note: clusterBuildMessageHdr() expects the buffer to be always at least /* Note: clusterBuildMessageHdr() expects the buffer to be always at least
* sizeof(clusterMsg) or more. */ * sizeof(clusterMsg) or more. */
if (totlen < (int)sizeof(clusterMsg)) totlen = sizeof(clusterMsg); if (totlen < (int)sizeof(clusterMsg)) totlen = sizeof(clusterMsg);
...@@ -2294,17 +2358,13 @@ void clusterSendPing(clusterLink *link, int type) { ...@@ -2294,17 +2358,13 @@ void clusterSendPing(clusterLink *link, int type) {
while(freshnodes > 0 && gossipcount < wanted && maxiterations--) { while(freshnodes > 0 && gossipcount < wanted && maxiterations--) {
dictEntry *de = dictGetRandomKey(server.cluster->nodes); dictEntry *de = dictGetRandomKey(server.cluster->nodes);
clusterNode *this = dictGetVal(de); clusterNode *this = dictGetVal(de);
clusterMsgDataGossip *gossip;
int j;
/* Don't include this node: the whole packet header is about us /* Don't include this node: the whole packet header is about us
* already, so we just gossip about other nodes. */ * already, so we just gossip about other nodes. */
if (this == myself) continue; if (this == myself) continue;
/* Give a bias to FAIL/PFAIL nodes. */ /* PFAIL nodes will be added later. */
if (maxiterations > wanted*2 && if (this->flags & CLUSTER_NODE_PFAIL) continue;
!(this->flags & (CLUSTER_NODE_PFAIL|CLUSTER_NODE_FAIL)))
continue;
/* In the gossip section don't include: /* In the gossip section don't include:
* 1) Nodes in HANDSHAKE state. * 1) Nodes in HANDSHAKE state.
...@@ -2318,25 +2378,35 @@ void clusterSendPing(clusterLink *link, int type) { ...@@ -2318,25 +2378,35 @@ void clusterSendPing(clusterLink *link, int type) {
continue; continue;
} }
/* Check if we already added this node */ /* Do not add a node we already have. */
for (j = 0; j < gossipcount; j++) { if (clusterNodeIsInGossipSection(hdr,gossipcount,this)) continue;
if (memcmp(hdr->data.ping.gossip[j].nodename,this->name,
CLUSTER_NAMELEN) == 0) break;
}
if (j != gossipcount) continue;
/* Add it */ /* Add it */
clusterSetGossipEntry(hdr,gossipcount,this);
freshnodes--;
gossipcount++;
}
/* If there are PFAIL nodes, add them at the end. */
if (pfail_wanted) {
dictIterator *di;
dictEntry *de;
di = dictGetSafeIterator(server.cluster->nodes);
while((de = dictNext(di)) != NULL && pfail_wanted > 0) {
clusterNode *node = dictGetVal(de);
if (node->flags & CLUSTER_NODE_HANDSHAKE) continue;
if (node->flags & CLUSTER_NODE_NOADDR) continue;
if (!(node->flags & CLUSTER_NODE_PFAIL)) continue;
clusterSetGossipEntry(hdr,gossipcount,node);
freshnodes--; freshnodes--;
gossip = &(hdr->data.ping.gossip[gossipcount]);
memcpy(gossip->nodename,this->name,CLUSTER_NAMELEN);
gossip->ping_sent = htonl(this->ping_sent);
gossip->pong_received = htonl(this->pong_received);
memcpy(gossip->ip,this->ip,sizeof(this->ip));
gossip->port = htons(this->port);
gossip->cport = htons(this->cport);
gossip->flags = htons(this->flags);
gossip->notused1 = 0;
gossipcount++; gossipcount++;
/* We take the count of the slots we allocated, since the
* PFAIL stats may not match perfectly with the current number
* of PFAIL nodes. */
pfail_wanted--;
}
dictReleaseIterator(di);
} }
/* Ready to send... fix the totlen fiend and queue the message in the /* Ready to send... fix the totlen fiend and queue the message in the
...@@ -3164,13 +3234,21 @@ void clusterCron(void) { ...@@ -3164,13 +3234,21 @@ void clusterCron(void) {
handshake_timeout = server.cluster_node_timeout; handshake_timeout = server.cluster_node_timeout;
if (handshake_timeout < 1000) handshake_timeout = 1000; if (handshake_timeout < 1000) handshake_timeout = 1000;
/* Check if we have disconnected nodes and re-establish the connection. */ /* Check if we have disconnected nodes and re-establish the connection.
* Also update a few stats while we are here, that can be used to make
* better decisions in other part of the code. */
di = dictGetSafeIterator(server.cluster->nodes); di = dictGetSafeIterator(server.cluster->nodes);
server.cluster->stats_pfail_nodes = 0;
while((de = dictNext(di)) != NULL) { while((de = dictNext(di)) != NULL) {
clusterNode *node = dictGetVal(de); clusterNode *node = dictGetVal(de);
/* Not interested in reconnecting the link with myself or nodes
* for which we have no address. */
if (node->flags & (CLUSTER_NODE_MYSELF|CLUSTER_NODE_NOADDR)) continue; if (node->flags & (CLUSTER_NODE_MYSELF|CLUSTER_NODE_NOADDR)) continue;
if (node->flags & CLUSTER_NODE_PFAIL)
server.cluster->stats_pfail_nodes++;
/* A Node in HANDSHAKE state has a limited lifespan equal to the /* A Node in HANDSHAKE state has a limited lifespan equal to the
* configured node timeout. */ * configured node timeout. */
if (nodeInHandshake(node) && now - node->ctime > handshake_timeout) { if (nodeInHandshake(node) && now - node->ctime > handshake_timeout) {
...@@ -3875,6 +3953,21 @@ sds clusterGenNodesDescription(int filter) { ...@@ -3875,6 +3953,21 @@ sds clusterGenNodesDescription(int filter) {
* CLUSTER command * CLUSTER command
* -------------------------------------------------------------------------- */ * -------------------------------------------------------------------------- */
const char *clusterGetMessageTypeString(int type) {
switch(type) {
case CLUSTERMSG_TYPE_PING: return "ping";
case CLUSTERMSG_TYPE_PONG: return "pong";
case CLUSTERMSG_TYPE_MEET: return "meet";
case CLUSTERMSG_TYPE_FAIL: return "fail";
case CLUSTERMSG_TYPE_PUBLISH: return "publish";
case CLUSTERMSG_TYPE_FAILOVER_AUTH_REQUEST: return "auth-req";
case CLUSTERMSG_TYPE_FAILOVER_AUTH_ACK: return "auth-ack";
case CLUSTERMSG_TYPE_UPDATE: return "update";
case CLUSTERMSG_TYPE_MFSTART: return "mfstart";
}
return "unknown";
}
int getSlotOrReply(client *c, robj *o) { int getSlotOrReply(client *c, robj *o) {
long long slot; long long slot;
...@@ -4206,8 +4299,6 @@ void clusterCommand(client *c) { ...@@ -4206,8 +4299,6 @@ void clusterCommand(client *c) {
"cluster_size:%d\r\n" "cluster_size:%d\r\n"
"cluster_current_epoch:%llu\r\n" "cluster_current_epoch:%llu\r\n"
"cluster_my_epoch:%llu\r\n" "cluster_my_epoch:%llu\r\n"
"cluster_stats_messages_sent:%lld\r\n"
"cluster_stats_messages_received:%lld\r\n"
, statestr[server.cluster->state], , statestr[server.cluster->state],
slots_assigned, slots_assigned,
slots_ok, slots_ok,
...@@ -4216,10 +4307,36 @@ void clusterCommand(client *c) { ...@@ -4216,10 +4307,36 @@ void clusterCommand(client *c) {
dictSize(server.cluster->nodes), dictSize(server.cluster->nodes),
server.cluster->size, server.cluster->size,
(unsigned long long) server.cluster->currentEpoch, (unsigned long long) server.cluster->currentEpoch,
(unsigned long long) myepoch, (unsigned long long) myepoch
server.cluster->stats_bus_messages_sent,
server.cluster->stats_bus_messages_received
); );
/* Show stats about messages sent and received. */
long long tot_msg_sent = 0;
long long tot_msg_received = 0;
for (int i = 0; i < CLUSTERMSG_TYPE_COUNT; i++) {
if (server.cluster->stats_bus_messages_sent[i] == 0) continue;
tot_msg_sent += server.cluster->stats_bus_messages_sent[i];
info = sdscatprintf(info,
"cluster_stats_messages_%s_sent:%lld\r\n",
clusterGetMessageTypeString(i),
server.cluster->stats_bus_messages_sent[i]);
}
info = sdscatprintf(info,
"cluster_stats_messages_sent:%lld\r\n", tot_msg_sent);
for (int i = 0; i < CLUSTERMSG_TYPE_COUNT; i++) {
if (server.cluster->stats_bus_messages_received[i] == 0) continue;
tot_msg_received += server.cluster->stats_bus_messages_received[i];
info = sdscatprintf(info,
"cluster_stats_messages_%s_received:%lld\r\n",
clusterGetMessageTypeString(i),
server.cluster->stats_bus_messages_received[i]);
}
info = sdscatprintf(info,
"cluster_stats_messages_received:%lld\r\n", tot_msg_received);
/* Produce the reply protocol. */
addReplySds(c,sdscatprintf(sdsempty(),"$%lu\r\n", addReplySds(c,sdscatprintf(sdsempty(),"$%lu\r\n",
(unsigned long)sdslen(info))); (unsigned long)sdslen(info)));
addReplySds(c,info); addReplySds(c,info);
......
...@@ -73,6 +73,29 @@ typedef struct clusterLink { ...@@ -73,6 +73,29 @@ typedef struct clusterLink {
#define CLUSTER_CANT_FAILOVER_WAITING_VOTES 4 #define CLUSTER_CANT_FAILOVER_WAITING_VOTES 4
#define CLUSTER_CANT_FAILOVER_RELOG_PERIOD (60*5) /* seconds. */ #define CLUSTER_CANT_FAILOVER_RELOG_PERIOD (60*5) /* seconds. */
/* clusterState todo_before_sleep flags. */
#define CLUSTER_TODO_HANDLE_FAILOVER (1<<0)
#define CLUSTER_TODO_UPDATE_STATE (1<<1)
#define CLUSTER_TODO_SAVE_CONFIG (1<<2)
#define CLUSTER_TODO_FSYNC_CONFIG (1<<3)
/* Message types.
*
* Note that the PING, PONG and MEET messages are actually the same exact
* kind of packet. PONG is the reply to ping, in the exact format as a PING,
* while MEET is a special PING that forces the receiver to add the sender
* as a node (if it is not already in the list). */
#define CLUSTERMSG_TYPE_PING 0 /* Ping */
#define CLUSTERMSG_TYPE_PONG 1 /* Pong (reply to Ping) */
#define CLUSTERMSG_TYPE_MEET 2 /* Meet "let's join" message */
#define CLUSTERMSG_TYPE_FAIL 3 /* Mark node xxx as failing */
#define CLUSTERMSG_TYPE_PUBLISH 4 /* Pub/Sub Publish propagation */
#define CLUSTERMSG_TYPE_FAILOVER_AUTH_REQUEST 5 /* May I failover? */
#define CLUSTERMSG_TYPE_FAILOVER_AUTH_ACK 6 /* Yes, you have my vote */
#define CLUSTERMSG_TYPE_UPDATE 7 /* Another node slots configuration */
#define CLUSTERMSG_TYPE_MFSTART 8 /* Pause clients for manual failover */
#define CLUSTERMSG_TYPE_COUNT 9 /* Total number of message types. */
/* This structure represent elements of node->fail_reports. */ /* This structure represent elements of node->fail_reports. */
typedef struct clusterNodeFailReport { typedef struct clusterNodeFailReport {
struct clusterNode *node; /* Node reporting the failure condition. */ struct clusterNode *node; /* Node reporting the failure condition. */
...@@ -116,7 +139,8 @@ typedef struct clusterState { ...@@ -116,7 +139,8 @@ typedef struct clusterState {
clusterNode *migrating_slots_to[CLUSTER_SLOTS]; clusterNode *migrating_slots_to[CLUSTER_SLOTS];
clusterNode *importing_slots_from[CLUSTER_SLOTS]; clusterNode *importing_slots_from[CLUSTER_SLOTS];
clusterNode *slots[CLUSTER_SLOTS]; clusterNode *slots[CLUSTER_SLOTS];
zskiplist *slots_to_keys; uint64_t slots_keys_count[CLUSTER_SLOTS];
rax *slots_to_keys;
/* The following fields are used to take the slave state on elections. */ /* The following fields are used to take the slave state on elections. */
mstime_t failover_auth_time; /* Time of previous or next election. */ mstime_t failover_auth_time; /* Time of previous or next election. */
int failover_auth_count; /* Number of votes received so far. */ int failover_auth_count; /* Number of votes received so far. */
...@@ -138,32 +162,15 @@ typedef struct clusterState { ...@@ -138,32 +162,15 @@ typedef struct clusterState {
/* The followign fields are used by masters to take state on elections. */ /* The followign fields are used by masters to take state on elections. */
uint64_t lastVoteEpoch; /* Epoch of the last vote granted. */ uint64_t lastVoteEpoch; /* Epoch of the last vote granted. */
int todo_before_sleep; /* Things to do in clusterBeforeSleep(). */ int todo_before_sleep; /* Things to do in clusterBeforeSleep(). */
long long stats_bus_messages_sent; /* Num of msg sent via cluster bus. */ /* Messages received and sent by type. */
long long stats_bus_messages_received; /* Num of msg rcvd via cluster bus.*/ long long stats_bus_messages_sent[CLUSTERMSG_TYPE_COUNT];
long long stats_bus_messages_received[CLUSTERMSG_TYPE_COUNT];
long long stats_pfail_nodes; /* Number of nodes in PFAIL status,
excluding nodes without address. */
} clusterState; } clusterState;
/* clusterState todo_before_sleep flags. */
#define CLUSTER_TODO_HANDLE_FAILOVER (1<<0)
#define CLUSTER_TODO_UPDATE_STATE (1<<1)
#define CLUSTER_TODO_SAVE_CONFIG (1<<2)
#define CLUSTER_TODO_FSYNC_CONFIG (1<<3)
/* Redis cluster messages header */ /* Redis cluster messages header */
/* Note that the PING, PONG and MEET messages are actually the same exact
* kind of packet. PONG is the reply to ping, in the exact format as a PING,
* while MEET is a special PING that forces the receiver to add the sender
* as a node (if it is not already in the list). */
#define CLUSTERMSG_TYPE_PING 0 /* Ping */
#define CLUSTERMSG_TYPE_PONG 1 /* Pong (reply to Ping) */
#define CLUSTERMSG_TYPE_MEET 2 /* Meet "let's join" message */
#define CLUSTERMSG_TYPE_FAIL 3 /* Mark node xxx as failing */
#define CLUSTERMSG_TYPE_PUBLISH 4 /* Pub/Sub Publish propagation */
#define CLUSTERMSG_TYPE_FAILOVER_AUTH_REQUEST 5 /* May I failover? */
#define CLUSTERMSG_TYPE_FAILOVER_AUTH_ACK 6 /* Yes, you have my vote */
#define CLUSTERMSG_TYPE_UPDATE 7 /* Another node slots configuration */
#define CLUSTERMSG_TYPE_MFSTART 8 /* Pause clients for manual failover */
/* Initially we don't know our "name", but we'll find it once we connect /* Initially we don't know our "name", but we'll find it once we connect
* to the first node, using the getsockname() function. Then we'll use this * to the first node, using the getsockname() function. Then we'll use this
* address for all the next messages. */ * address for all the next messages. */
......
...@@ -1133,11 +1133,24 @@ int *getKeysUsingCommandTable(struct redisCommand *cmd,robj **argv, int argc, in ...@@ -1133,11 +1133,24 @@ int *getKeysUsingCommandTable(struct redisCommand *cmd,robj **argv, int argc, in
*numkeys = 0; *numkeys = 0;
return NULL; return NULL;
} }
last = cmd->lastkey; last = cmd->lastkey;
if (last < 0) last = argc+last; if (last < 0) last = argc+last;
keys = zmalloc(sizeof(int)*((last - cmd->firstkey)+1)); keys = zmalloc(sizeof(int)*((last - cmd->firstkey)+1));
for (j = cmd->firstkey; j <= last; j += cmd->keystep) { for (j = cmd->firstkey; j <= last; j += cmd->keystep) {
serverAssert(j < argc); if (j >= argc) {
/* Modules command do not have dispatch time arity checks, so
* we need to handle the case where the user passed an invalid
* number of arguments here. In this case we return no keys
* and expect the module command to report an arity error. */
if (cmd->flags & CMD_MODULE) {
zfree(keys);
*numkeys = 0;
return NULL;
} else {
serverPanic("Redis built-in command declared keys positions not matching the arity requirements.");
}
}
keys[i++] = j; keys[i++] = j;
} }
*numkeys = i; *numkeys = i;
...@@ -1301,90 +1314,85 @@ int *migrateGetKeys(struct redisCommand *cmd, robj **argv, int argc, int *numkey ...@@ -1301,90 +1314,85 @@ int *migrateGetKeys(struct redisCommand *cmd, robj **argv, int argc, int *numkey
/* Slot to Key API. This is used by Redis Cluster in order to obtain in /* Slot to Key API. This is used by Redis Cluster in order to obtain in
* a fast way a key that belongs to a specified hash slot. This is useful * a fast way a key that belongs to a specified hash slot. This is useful
* while rehashing the cluster. */ * while rehashing the cluster and in other conditions when we need to
void slotToKeyAdd(robj *key) { * understand if we have keys for a given hash slot. */
void slotToKeyUpdateKey(robj *key, int add) {
unsigned int hashslot = keyHashSlot(key->ptr,sdslen(key->ptr)); unsigned int hashslot = keyHashSlot(key->ptr,sdslen(key->ptr));
unsigned char buf[64];
unsigned char *indexed = buf;
size_t keylen = sdslen(key->ptr);
server.cluster->slots_keys_count[hashslot] += add ? 1 : -1;
if (keylen+2 > 64) indexed = zmalloc(keylen+2);
indexed[0] = (hashslot >> 8) & 0xff;
indexed[1] = hashslot & 0xff;
memcpy(indexed+2,key->ptr,keylen);
if (add) {
raxInsert(server.cluster->slots_to_keys,indexed,keylen+2,NULL,NULL);
} else {
raxRemove(server.cluster->slots_to_keys,indexed,keylen+2,NULL);
}
if (indexed != buf) zfree(indexed);
}
sds sdskey = sdsdup(key->ptr); void slotToKeyAdd(robj *key) {
zslInsert(server.cluster->slots_to_keys,hashslot,sdskey); slotToKeyUpdateKey(key,1);
} }
void slotToKeyDel(robj *key) { void slotToKeyDel(robj *key) {
unsigned int hashslot = keyHashSlot(key->ptr,sdslen(key->ptr)); slotToKeyUpdateKey(key,0);
zslDelete(server.cluster->slots_to_keys,hashslot,key->ptr,NULL);
} }
void slotToKeyFlush(void) { void slotToKeyFlush(void) {
zslFree(server.cluster->slots_to_keys); raxFree(server.cluster->slots_to_keys);
server.cluster->slots_to_keys = zslCreate(); server.cluster->slots_to_keys = raxNew();
memset(server.cluster->slots_keys_count,0,
sizeof(server.cluster->slots_keys_count));
} }
/* Pupulate the specified array of objects with keys in the specified slot. /* Pupulate the specified array of objects with keys in the specified slot.
* New objects are returned to represent keys, it's up to the caller to * New objects are returned to represent keys, it's up to the caller to
* decrement the reference count to release the keys names. */ * decrement the reference count to release the keys names. */
unsigned int getKeysInSlot(unsigned int hashslot, robj **keys, unsigned int count) { unsigned int getKeysInSlot(unsigned int hashslot, robj **keys, unsigned int count) {
zskiplistNode *n; raxIterator iter;
zrangespec range;
int j = 0; int j = 0;
unsigned char indexed[2];
range.min = range.max = hashslot; indexed[0] = (hashslot >> 8) & 0xff;
range.minex = range.maxex = 0; indexed[1] = hashslot & 0xff;
raxStart(&iter,server.cluster->slots_to_keys);
n = zslFirstInRange(server.cluster->slots_to_keys, &range); raxSeek(&iter,">=",indexed,2);
while(n && n->score == hashslot && count--) { while(count-- && raxNext(&iter)) {
keys[j++] = createStringObject(n->ele,sdslen(n->ele)); if (iter.key[0] != indexed[0] || iter.key[1] != indexed[1]) break;
n = n->level[0].forward; keys[j++] = createStringObject((char*)iter.key+2,iter.key_len-2);
} }
raxStop(&iter);
return j; return j;
} }
/* Remove all the keys in the specified hash slot. /* Remove all the keys in the specified hash slot.
* The number of removed items is returned. */ * The number of removed items is returned. */
unsigned int delKeysInSlot(unsigned int hashslot) { unsigned int delKeysInSlot(unsigned int hashslot) {
zskiplistNode *n; raxIterator iter;
zrangespec range;
int j = 0; int j = 0;
unsigned char indexed[2];
range.min = range.max = hashslot; indexed[0] = (hashslot >> 8) & 0xff;
range.minex = range.maxex = 0; indexed[1] = hashslot & 0xff;
raxStart(&iter,server.cluster->slots_to_keys);
while(server.cluster->slots_keys_count[hashslot]) {
raxSeek(&iter,">=",indexed,2);
raxNext(&iter);
n = zslFirstInRange(server.cluster->slots_to_keys, &range); robj *key = createStringObject((char*)iter.key+2,iter.key_len-2);
while(n && n->score == hashslot) {
sds sdskey = n->ele;
robj *key = createStringObject(sdskey,sdslen(sdskey));
n = n->level[0].forward; /* Go to the next item before freeing it. */
dbDelete(&server.db[0],key); dbDelete(&server.db[0],key);
decrRefCount(key); decrRefCount(key);
j++; j++;
} }
raxStop(&iter);
return j; return j;
} }
unsigned int countKeysInSlot(unsigned int hashslot) { unsigned int countKeysInSlot(unsigned int hashslot) {
zskiplist *zsl = server.cluster->slots_to_keys; return server.cluster->slots_keys_count[hashslot];
zskiplistNode *zn;
zrangespec range;
int rank, count = 0;
range.min = range.max = hashslot;
range.minex = range.maxex = 0;
/* Find first element in range */
zn = zslFirstInRange(zsl, &range);
/* Use rank of first element, if any, to determine preliminary count */
if (zn != NULL) {
rank = zslGetRank(zsl, zn->score, zn->ele);
count = (zsl->length - (rank - 1));
/* Find last element in range */
zn = zslLastInRange(zsl, &range);
/* Use rank of last element, if any, to determine the actual count */
if (zn != NULL) {
rank = zslGetRank(zsl, zn->score, zn->ele);
count -= (zsl->length - rank);
}
}
return count;
} }
...@@ -92,7 +92,7 @@ int activeExpireCycleTryExpire(redisDb *db, dictEntry *de, long long now) { ...@@ -92,7 +92,7 @@ int activeExpireCycleTryExpire(redisDb *db, dictEntry *de, long long now) {
* *
* If type is ACTIVE_EXPIRE_CYCLE_SLOW, that normal expire cycle is * If type is ACTIVE_EXPIRE_CYCLE_SLOW, that normal expire cycle is
* executed, where the time limit is a percentage of the REDIS_HZ period * executed, where the time limit is a percentage of the REDIS_HZ period
* as specified by the REDIS_EXPIRELOOKUPS_TIME_PERC define. */ * as specified by the ACTIVE_EXPIRE_CYCLE_SLOW_TIME_PERC define. */
void activeExpireCycle(int type) { void activeExpireCycle(int type) {
/* This function has some global state in order to continue the work /* This function has some global state in order to continue the work
......
...@@ -97,11 +97,14 @@ void emptyDbAsync(redisDb *db) { ...@@ -97,11 +97,14 @@ void emptyDbAsync(redisDb *db) {
/* Empty the slots-keys map of Redis CLuster by creating a new empty one /* Empty the slots-keys map of Redis CLuster by creating a new empty one
* and scheduiling the old for lazy freeing. */ * and scheduiling the old for lazy freeing. */
void slotToKeyFlushAsync(void) { void slotToKeyFlushAsync(void) {
zskiplist *oldsl = server.cluster->slots_to_keys; rax *old = server.cluster->slots_to_keys;
server.cluster->slots_to_keys = zslCreate();
atomicIncr(lazyfree_objects,oldsl->length, server.cluster->slots_to_keys = raxNew();
memset(server.cluster->slots_keys_count,0,
sizeof(server.cluster->slots_keys_count));
atomicIncr(lazyfree_objects,old->numele,
lazyfree_objects_mutex); lazyfree_objects_mutex);
bioCreateBackgroundJob(BIO_LAZY_FREE,NULL,NULL,oldsl); bioCreateBackgroundJob(BIO_LAZY_FREE,NULL,NULL,old);
} }
/* Release objects from the lazyfree thread. It's just decrRefCount() /* Release objects from the lazyfree thread. It's just decrRefCount()
...@@ -125,8 +128,8 @@ void lazyfreeFreeDatabaseFromBioThread(dict *ht1, dict *ht2) { ...@@ -125,8 +128,8 @@ void lazyfreeFreeDatabaseFromBioThread(dict *ht1, dict *ht2) {
/* Release the skiplist mapping Redis Cluster keys to slots in the /* Release the skiplist mapping Redis Cluster keys to slots in the
* lazyfree thread. */ * lazyfree thread. */
void lazyfreeFreeSlotsMapFromBioThread(zskiplist *sl) { void lazyfreeFreeSlotsMapFromBioThread(rax *rt) {
size_t len = sl->length; size_t len = rt->numele;
zslFree(sl); raxFree(rt);
atomicDecr(lazyfree_objects,len,lazyfree_objects_mutex); atomicDecr(lazyfree_objects,len,lazyfree_objects_mutex);
} }
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment