Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
71a8df6a
Commit
71a8df6a
authored
Mar 02, 2017
by
Guy Benoish
Browse files
Merge branch 'unstable' of
https://github.com/antirez/redis
into unstable
parents
56c01c95
9cc83d2a
Changes
182
Hide whitespace changes
Inline
Side-by-side
deps/jemalloc/test/unit/rb.c
View file @
71a8df6a
...
@@ -3,7 +3,7 @@
...
@@ -3,7 +3,7 @@
#define rbtn_black_height(a_type, a_field, a_rbt, r_height) do { \
#define rbtn_black_height(a_type, a_field, a_rbt, r_height) do { \
a_type *rbp_bh_t; \
a_type *rbp_bh_t; \
for (rbp_bh_t = (a_rbt)->rbt_root, (r_height) = 0; \
for (rbp_bh_t = (a_rbt)->rbt_root, (r_height) = 0; \
rbp_bh_t !=
&(a_rbt)->rbt_nil;
\
rbp_bh_t !=
NULL;
\
rbp_bh_t = rbtn_left_get(a_type, a_field, rbp_bh_t)) { \
rbp_bh_t = rbtn_left_get(a_type, a_field, rbp_bh_t)) { \
if (!rbtn_red_get(a_type, a_field, rbp_bh_t)) { \
if (!rbtn_red_get(a_type, a_field, rbp_bh_t)) { \
(r_height)++; \
(r_height)++; \
...
@@ -21,7 +21,7 @@ struct node_s {
...
@@ -21,7 +21,7 @@ struct node_s {
};
};
static
int
static
int
node_cmp
(
node_t
*
a
,
node_t
*
b
)
{
node_cmp
(
const
node_t
*
a
,
const
node_t
*
b
)
{
int
ret
;
int
ret
;
assert_u32_eq
(
a
->
magic
,
NODE_MAGIC
,
"Bad magic"
);
assert_u32_eq
(
a
->
magic
,
NODE_MAGIC
,
"Bad magic"
);
...
@@ -68,38 +68,43 @@ TEST_BEGIN(test_rb_empty)
...
@@ -68,38 +68,43 @@ TEST_BEGIN(test_rb_empty)
TEST_END
TEST_END
static
unsigned
static
unsigned
tree_recurse
(
node_t
*
node
,
unsigned
black_height
,
unsigned
black_depth
,
tree_recurse
(
node_t
*
node
,
unsigned
black_height
,
unsigned
black_depth
)
node_t
*
nil
)
{
{
unsigned
ret
=
0
;
unsigned
ret
=
0
;
node_t
*
left_node
=
rbtn_left_get
(
node_t
,
link
,
node
);
node_t
*
left_node
;
node_t
*
right_node
=
rbtn_right_get
(
node_t
,
link
,
node
);
node_t
*
right_node
;
if
(
node
==
NULL
)
return
(
ret
);
left_node
=
rbtn_left_get
(
node_t
,
link
,
node
);
right_node
=
rbtn_right_get
(
node_t
,
link
,
node
);
if
(
!
rbtn_red_get
(
node_t
,
link
,
node
))
if
(
!
rbtn_red_get
(
node_t
,
link
,
node
))
black_depth
++
;
black_depth
++
;
/* Red nodes must be interleaved with black nodes. */
/* Red nodes must be interleaved with black nodes. */
if
(
rbtn_red_get
(
node_t
,
link
,
node
))
{
if
(
rbtn_red_get
(
node_t
,
link
,
node
))
{
assert_false
(
rbtn_red_get
(
node_t
,
link
,
left_node
),
if
(
left_node
!=
NULL
)
"Node should be black"
);
assert_false
(
rbtn_red_get
(
node_t
,
link
,
left_node
),
assert_false
(
rbtn_red_get
(
node_t
,
link
,
right_node
),
"Node should be black"
);
"Node should be black"
);
if
(
right_node
!=
NULL
)
assert_false
(
rbtn_red_get
(
node_t
,
link
,
right_node
),
"Node should be black"
);
}
}
if
(
node
==
nil
)
return
(
ret
);
/* Self. */
/* Self. */
assert_u32_eq
(
node
->
magic
,
NODE_MAGIC
,
"Bad magic"
);
assert_u32_eq
(
node
->
magic
,
NODE_MAGIC
,
"Bad magic"
);
/* Left subtree. */
/* Left subtree. */
if
(
left_node
!=
nil
)
if
(
left_node
!=
NULL
)
ret
+=
tree_recurse
(
left_node
,
black_height
,
black_depth
,
nil
);
ret
+=
tree_recurse
(
left_node
,
black_height
,
black_depth
);
else
else
ret
+=
(
black_depth
!=
black_height
);
ret
+=
(
black_depth
!=
black_height
);
/* Right subtree. */
/* Right subtree. */
if
(
right_node
!=
nil
)
if
(
right_node
!=
NULL
)
ret
+=
tree_recurse
(
right_node
,
black_height
,
black_depth
,
nil
);
ret
+=
tree_recurse
(
right_node
,
black_height
,
black_depth
);
else
else
ret
+=
(
black_depth
!=
black_height
);
ret
+=
(
black_depth
!=
black_height
);
...
@@ -181,8 +186,7 @@ node_remove(tree_t *tree, node_t *node, unsigned nnodes)
...
@@ -181,8 +186,7 @@ node_remove(tree_t *tree, node_t *node, unsigned nnodes)
node
->
magic
=
0
;
node
->
magic
=
0
;
rbtn_black_height
(
node_t
,
link
,
tree
,
black_height
);
rbtn_black_height
(
node_t
,
link
,
tree
,
black_height
);
imbalances
=
tree_recurse
(
tree
->
rbt_root
,
black_height
,
0
,
imbalances
=
tree_recurse
(
tree
->
rbt_root
,
black_height
,
0
);
&
(
tree
->
rbt_nil
));
assert_u_eq
(
imbalances
,
0
,
"Tree is unbalanced"
);
assert_u_eq
(
imbalances
,
0
,
"Tree is unbalanced"
);
assert_u_eq
(
tree_iterate
(
tree
),
nnodes
-
1
,
assert_u_eq
(
tree_iterate
(
tree
),
nnodes
-
1
,
"Unexpected node iteration count"
);
"Unexpected node iteration count"
);
...
@@ -212,6 +216,15 @@ remove_reverse_iterate_cb(tree_t *tree, node_t *node, void *data)
...
@@ -212,6 +216,15 @@ remove_reverse_iterate_cb(tree_t *tree, node_t *node, void *data)
return
(
ret
);
return
(
ret
);
}
}
static
void
destroy_cb
(
node_t
*
node
,
void
*
data
)
{
unsigned
*
nnodes
=
(
unsigned
*
)
data
;
assert_u_gt
(
*
nnodes
,
0
,
"Destruction removed too many nodes"
);
(
*
nnodes
)
--
;
}
TEST_BEGIN
(
test_rb_random
)
TEST_BEGIN
(
test_rb_random
)
{
{
#define NNODES 25
#define NNODES 25
...
@@ -244,7 +257,6 @@ TEST_BEGIN(test_rb_random)
...
@@ -244,7 +257,6 @@ TEST_BEGIN(test_rb_random)
for
(
j
=
1
;
j
<=
NNODES
;
j
++
)
{
for
(
j
=
1
;
j
<=
NNODES
;
j
++
)
{
/* Initialize tree and nodes. */
/* Initialize tree and nodes. */
tree_new
(
&
tree
);
tree_new
(
&
tree
);
tree
.
rbt_nil
.
magic
=
0
;
for
(
k
=
0
;
k
<
j
;
k
++
)
{
for
(
k
=
0
;
k
<
j
;
k
++
)
{
nodes
[
k
].
magic
=
NODE_MAGIC
;
nodes
[
k
].
magic
=
NODE_MAGIC
;
nodes
[
k
].
key
=
bag
[
k
];
nodes
[
k
].
key
=
bag
[
k
];
...
@@ -257,7 +269,7 @@ TEST_BEGIN(test_rb_random)
...
@@ -257,7 +269,7 @@ TEST_BEGIN(test_rb_random)
rbtn_black_height
(
node_t
,
link
,
&
tree
,
rbtn_black_height
(
node_t
,
link
,
&
tree
,
black_height
);
black_height
);
imbalances
=
tree_recurse
(
tree
.
rbt_root
,
imbalances
=
tree_recurse
(
tree
.
rbt_root
,
black_height
,
0
,
&
(
tree
.
rbt_nil
)
);
black_height
,
0
);
assert_u_eq
(
imbalances
,
0
,
assert_u_eq
(
imbalances
,
0
,
"Tree is unbalanced"
);
"Tree is unbalanced"
);
...
@@ -278,7 +290,7 @@ TEST_BEGIN(test_rb_random)
...
@@ -278,7 +290,7 @@ TEST_BEGIN(test_rb_random)
}
}
/* Remove nodes. */
/* Remove nodes. */
switch
(
i
%
4
)
{
switch
(
i
%
5
)
{
case
0
:
case
0
:
for
(
k
=
0
;
k
<
j
;
k
++
)
for
(
k
=
0
;
k
<
j
;
k
++
)
node_remove
(
&
tree
,
&
nodes
[
k
],
j
-
k
);
node_remove
(
&
tree
,
&
nodes
[
k
],
j
-
k
);
...
@@ -314,6 +326,12 @@ TEST_BEGIN(test_rb_random)
...
@@ -314,6 +326,12 @@ TEST_BEGIN(test_rb_random)
assert_u_eq
(
nnodes
,
0
,
assert_u_eq
(
nnodes
,
0
,
"Removal terminated early"
);
"Removal terminated early"
);
break
;
break
;
}
case
4
:
{
unsigned
nnodes
=
j
;
tree_destroy
(
&
tree
,
destroy_cb
,
&
nnodes
);
assert_u_eq
(
nnodes
,
0
,
"Destruction terminated early"
);
break
;
}
default
:
}
default
:
not_reached
();
not_reached
();
}
}
...
...
deps/jemalloc/test/unit/run_quantize.c
0 → 100644
View file @
71a8df6a
#include "test/jemalloc_test.h"
TEST_BEGIN
(
test_small_run_size
)
{
unsigned
nbins
,
i
;
size_t
sz
,
run_size
;
size_t
mib
[
4
];
size_t
miblen
=
sizeof
(
mib
)
/
sizeof
(
size_t
);
/*
* Iterate over all small size classes, get their run sizes, and verify
* that the quantized size is the same as the run size.
*/
sz
=
sizeof
(
unsigned
);
assert_d_eq
(
mallctl
(
"arenas.nbins"
,
(
void
*
)
&
nbins
,
&
sz
,
NULL
,
0
),
0
,
"Unexpected mallctl failure"
);
assert_d_eq
(
mallctlnametomib
(
"arenas.bin.0.run_size"
,
mib
,
&
miblen
),
0
,
"Unexpected mallctlnametomib failure"
);
for
(
i
=
0
;
i
<
nbins
;
i
++
)
{
mib
[
2
]
=
i
;
sz
=
sizeof
(
size_t
);
assert_d_eq
(
mallctlbymib
(
mib
,
miblen
,
(
void
*
)
&
run_size
,
&
sz
,
NULL
,
0
),
0
,
"Unexpected mallctlbymib failure"
);
assert_zu_eq
(
run_size
,
run_quantize_floor
(
run_size
),
"Small run quantization should be a no-op (run_size=%zu)"
,
run_size
);
assert_zu_eq
(
run_size
,
run_quantize_ceil
(
run_size
),
"Small run quantization should be a no-op (run_size=%zu)"
,
run_size
);
}
}
TEST_END
TEST_BEGIN
(
test_large_run_size
)
{
bool
cache_oblivious
;
unsigned
nlruns
,
i
;
size_t
sz
,
run_size_prev
,
ceil_prev
;
size_t
mib
[
4
];
size_t
miblen
=
sizeof
(
mib
)
/
sizeof
(
size_t
);
/*
* Iterate over all large size classes, get their run sizes, and verify
* that the quantized size is the same as the run size.
*/
sz
=
sizeof
(
bool
);
assert_d_eq
(
mallctl
(
"config.cache_oblivious"
,
(
void
*
)
&
cache_oblivious
,
&
sz
,
NULL
,
0
),
0
,
"Unexpected mallctl failure"
);
sz
=
sizeof
(
unsigned
);
assert_d_eq
(
mallctl
(
"arenas.nlruns"
,
(
void
*
)
&
nlruns
,
&
sz
,
NULL
,
0
),
0
,
"Unexpected mallctl failure"
);
assert_d_eq
(
mallctlnametomib
(
"arenas.lrun.0.size"
,
mib
,
&
miblen
),
0
,
"Unexpected mallctlnametomib failure"
);
for
(
i
=
0
;
i
<
nlruns
;
i
++
)
{
size_t
lrun_size
,
run_size
,
floor
,
ceil
;
mib
[
2
]
=
i
;
sz
=
sizeof
(
size_t
);
assert_d_eq
(
mallctlbymib
(
mib
,
miblen
,
(
void
*
)
&
lrun_size
,
&
sz
,
NULL
,
0
),
0
,
"Unexpected mallctlbymib failure"
);
run_size
=
cache_oblivious
?
lrun_size
+
PAGE
:
lrun_size
;
floor
=
run_quantize_floor
(
run_size
);
ceil
=
run_quantize_ceil
(
run_size
);
assert_zu_eq
(
run_size
,
floor
,
"Large run quantization should be a no-op for precise "
"size (lrun_size=%zu, run_size=%zu)"
,
lrun_size
,
run_size
);
assert_zu_eq
(
run_size
,
ceil
,
"Large run quantization should be a no-op for precise "
"size (lrun_size=%zu, run_size=%zu)"
,
lrun_size
,
run_size
);
if
(
i
>
0
)
{
assert_zu_eq
(
run_size_prev
,
run_quantize_floor
(
run_size
-
PAGE
),
"Floor should be a precise size"
);
if
(
run_size_prev
<
ceil_prev
)
{
assert_zu_eq
(
ceil_prev
,
run_size
,
"Ceiling should be a precise size "
"(run_size_prev=%zu, ceil_prev=%zu, "
"run_size=%zu)"
,
run_size_prev
,
ceil_prev
,
run_size
);
}
}
run_size_prev
=
floor
;
ceil_prev
=
run_quantize_ceil
(
run_size
+
PAGE
);
}
}
TEST_END
TEST_BEGIN
(
test_monotonic
)
{
unsigned
nbins
,
nlruns
,
i
;
size_t
sz
,
floor_prev
,
ceil_prev
;
/*
* Iterate over all run sizes and verify that
* run_quantize_{floor,ceil}() are monotonic.
*/
sz
=
sizeof
(
unsigned
);
assert_d_eq
(
mallctl
(
"arenas.nbins"
,
(
void
*
)
&
nbins
,
&
sz
,
NULL
,
0
),
0
,
"Unexpected mallctl failure"
);
sz
=
sizeof
(
unsigned
);
assert_d_eq
(
mallctl
(
"arenas.nlruns"
,
(
void
*
)
&
nlruns
,
&
sz
,
NULL
,
0
),
0
,
"Unexpected mallctl failure"
);
floor_prev
=
0
;
ceil_prev
=
0
;
for
(
i
=
1
;
i
<=
chunksize
>>
LG_PAGE
;
i
++
)
{
size_t
run_size
,
floor
,
ceil
;
run_size
=
i
<<
LG_PAGE
;
floor
=
run_quantize_floor
(
run_size
);
ceil
=
run_quantize_ceil
(
run_size
);
assert_zu_le
(
floor
,
run_size
,
"Floor should be <= (floor=%zu, run_size=%zu, ceil=%zu)"
,
floor
,
run_size
,
ceil
);
assert_zu_ge
(
ceil
,
run_size
,
"Ceiling should be >= (floor=%zu, run_size=%zu, ceil=%zu)"
,
floor
,
run_size
,
ceil
);
assert_zu_le
(
floor_prev
,
floor
,
"Floor should be monotonic "
"(floor_prev=%zu, floor=%zu, run_size=%zu, ceil=%zu)"
,
floor_prev
,
floor
,
run_size
,
ceil
);
assert_zu_le
(
ceil_prev
,
ceil
,
"Ceiling should be monotonic "
"(floor=%zu, run_size=%zu, ceil_prev=%zu, ceil=%zu)"
,
floor
,
run_size
,
ceil_prev
,
ceil
);
floor_prev
=
floor
;
ceil_prev
=
ceil
;
}
}
TEST_END
int
main
(
void
)
{
return
(
test
(
test_small_run_size
,
test_large_run_size
,
test_monotonic
));
}
deps/jemalloc/test/unit/size_classes.c
100644 → 100755
View file @
71a8df6a
...
@@ -8,8 +8,8 @@ get_max_size_class(void)
...
@@ -8,8 +8,8 @@ get_max_size_class(void)
size_t
sz
,
miblen
,
max_size_class
;
size_t
sz
,
miblen
,
max_size_class
;
sz
=
sizeof
(
unsigned
);
sz
=
sizeof
(
unsigned
);
assert_d_eq
(
mallctl
(
"arenas.nhchunks"
,
&
nhchunks
,
&
sz
,
NULL
,
0
),
0
,
assert_d_eq
(
mallctl
(
"arenas.nhchunks"
,
(
void
*
)
&
nhchunks
,
&
sz
,
NULL
,
0
),
"Unexpected mallctl() error"
);
0
,
"Unexpected mallctl() error"
);
miblen
=
sizeof
(
mib
)
/
sizeof
(
size_t
);
miblen
=
sizeof
(
mib
)
/
sizeof
(
size_t
);
assert_d_eq
(
mallctlnametomib
(
"arenas.hchunk.0.size"
,
mib
,
&
miblen
),
0
,
assert_d_eq
(
mallctlnametomib
(
"arenas.hchunk.0.size"
,
mib
,
&
miblen
),
0
,
...
@@ -17,8 +17,8 @@ get_max_size_class(void)
...
@@ -17,8 +17,8 @@ get_max_size_class(void)
mib
[
2
]
=
nhchunks
-
1
;
mib
[
2
]
=
nhchunks
-
1
;
sz
=
sizeof
(
size_t
);
sz
=
sizeof
(
size_t
);
assert_d_eq
(
mallctlbymib
(
mib
,
miblen
,
&
max_size_class
,
&
sz
,
NULL
,
0
),
0
,
assert_d_eq
(
mallctlbymib
(
mib
,
miblen
,
(
void
*
)
&
max_size_class
,
&
sz
,
"Unexpected mallctlbymib() error"
);
NULL
,
0
),
0
,
"Unexpected mallctlbymib() error"
);
return
(
max_size_class
);
return
(
max_size_class
);
}
}
...
@@ -80,10 +80,105 @@ TEST_BEGIN(test_size_classes)
...
@@ -80,10 +80,105 @@ TEST_BEGIN(test_size_classes)
}
}
TEST_END
TEST_END
TEST_BEGIN
(
test_psize_classes
)
{
size_t
size_class
,
max_size_class
;
pszind_t
pind
,
max_pind
;
max_size_class
=
get_max_size_class
();
max_pind
=
psz2ind
(
max_size_class
);
for
(
pind
=
0
,
size_class
=
pind2sz
(
pind
);
pind
<
max_pind
||
size_class
<
max_size_class
;
pind
++
,
size_class
=
pind2sz
(
pind
))
{
assert_true
(
pind
<
max_pind
,
"Loop conditionals should be equivalent; pind=%u, "
"size_class=%zu (%#zx)"
,
pind
,
size_class
,
size_class
);
assert_true
(
size_class
<
max_size_class
,
"Loop conditionals should be equivalent; pind=%u, "
"size_class=%zu (%#zx)"
,
pind
,
size_class
,
size_class
);
assert_u_eq
(
pind
,
psz2ind
(
size_class
),
"psz2ind() does not reverse pind2sz(): pind=%u -->"
" size_class=%zu --> pind=%u --> size_class=%zu"
,
pind
,
size_class
,
psz2ind
(
size_class
),
pind2sz
(
psz2ind
(
size_class
)));
assert_zu_eq
(
size_class
,
pind2sz
(
psz2ind
(
size_class
)),
"pind2sz() does not reverse psz2ind(): pind=%u -->"
" size_class=%zu --> pind=%u --> size_class=%zu"
,
pind
,
size_class
,
psz2ind
(
size_class
),
pind2sz
(
psz2ind
(
size_class
)));
assert_u_eq
(
pind
+
1
,
psz2ind
(
size_class
+
1
),
"Next size_class does not round up properly"
);
assert_zu_eq
(
size_class
,
(
pind
>
0
)
?
psz2u
(
pind2sz
(
pind
-
1
)
+
1
)
:
psz2u
(
1
),
"psz2u() does not round up to size class"
);
assert_zu_eq
(
size_class
,
psz2u
(
size_class
-
1
),
"psz2u() does not round up to size class"
);
assert_zu_eq
(
size_class
,
psz2u
(
size_class
),
"psz2u() does not compute same size class"
);
assert_zu_eq
(
psz2u
(
size_class
+
1
),
pind2sz
(
pind
+
1
),
"psz2u() does not round up to next size class"
);
}
assert_u_eq
(
pind
,
psz2ind
(
pind2sz
(
pind
)),
"psz2ind() does not reverse pind2sz()"
);
assert_zu_eq
(
max_size_class
,
pind2sz
(
psz2ind
(
max_size_class
)),
"pind2sz() does not reverse psz2ind()"
);
assert_zu_eq
(
size_class
,
psz2u
(
pind2sz
(
pind
-
1
)
+
1
),
"psz2u() does not round up to size class"
);
assert_zu_eq
(
size_class
,
psz2u
(
size_class
-
1
),
"psz2u() does not round up to size class"
);
assert_zu_eq
(
size_class
,
psz2u
(
size_class
),
"psz2u() does not compute same size class"
);
}
TEST_END
TEST_BEGIN
(
test_overflow
)
{
size_t
max_size_class
;
max_size_class
=
get_max_size_class
();
assert_u_eq
(
size2index
(
max_size_class
+
1
),
NSIZES
,
"size2index() should return NSIZES on overflow"
);
assert_u_eq
(
size2index
(
ZU
(
PTRDIFF_MAX
)
+
1
),
NSIZES
,
"size2index() should return NSIZES on overflow"
);
assert_u_eq
(
size2index
(
SIZE_T_MAX
),
NSIZES
,
"size2index() should return NSIZES on overflow"
);
assert_zu_eq
(
s2u
(
max_size_class
+
1
),
0
,
"s2u() should return 0 for unsupported size"
);
assert_zu_eq
(
s2u
(
ZU
(
PTRDIFF_MAX
)
+
1
),
0
,
"s2u() should return 0 for unsupported size"
);
assert_zu_eq
(
s2u
(
SIZE_T_MAX
),
0
,
"s2u() should return 0 on overflow"
);
assert_u_eq
(
psz2ind
(
max_size_class
+
1
),
NPSIZES
,
"psz2ind() should return NPSIZES on overflow"
);
assert_u_eq
(
psz2ind
(
ZU
(
PTRDIFF_MAX
)
+
1
),
NPSIZES
,
"psz2ind() should return NPSIZES on overflow"
);
assert_u_eq
(
psz2ind
(
SIZE_T_MAX
),
NPSIZES
,
"psz2ind() should return NPSIZES on overflow"
);
assert_zu_eq
(
psz2u
(
max_size_class
+
1
),
0
,
"psz2u() should return 0 for unsupported size"
);
assert_zu_eq
(
psz2u
(
ZU
(
PTRDIFF_MAX
)
+
1
),
0
,
"psz2u() should return 0 for unsupported size"
);
assert_zu_eq
(
psz2u
(
SIZE_T_MAX
),
0
,
"psz2u() should return 0 on overflow"
);
}
TEST_END
int
int
main
(
void
)
main
(
void
)
{
{
return
(
test
(
return
(
test
(
test_size_classes
));
test_size_classes
,
test_psize_classes
,
test_overflow
));
}
}
deps/jemalloc/test/unit/smoothstep.c
0 → 100644
View file @
71a8df6a
#include "test/jemalloc_test.h"
static
const
uint64_t
smoothstep_tab
[]
=
{
#define STEP(step, h, x, y) \
h,
SMOOTHSTEP
#undef STEP
};
TEST_BEGIN
(
test_smoothstep_integral
)
{
uint64_t
sum
,
min
,
max
;
unsigned
i
;
/*
* The integral of smoothstep in the [0..1] range equals 1/2. Verify
* that the fixed point representation's integral is no more than
* rounding error distant from 1/2. Regarding rounding, each table
* element is rounded down to the nearest fixed point value, so the
* integral may be off by as much as SMOOTHSTEP_NSTEPS ulps.
*/
sum
=
0
;
for
(
i
=
0
;
i
<
SMOOTHSTEP_NSTEPS
;
i
++
)
sum
+=
smoothstep_tab
[
i
];
max
=
(
KQU
(
1
)
<<
(
SMOOTHSTEP_BFP
-
1
))
*
(
SMOOTHSTEP_NSTEPS
+
1
);
min
=
max
-
SMOOTHSTEP_NSTEPS
;
assert_u64_ge
(
sum
,
min
,
"Integral too small, even accounting for truncation"
);
assert_u64_le
(
sum
,
max
,
"Integral exceeds 1/2"
);
if
(
false
)
{
malloc_printf
(
"%"
FMTu64
" ulps under 1/2 (limit %d)
\n
"
,
max
-
sum
,
SMOOTHSTEP_NSTEPS
);
}
}
TEST_END
TEST_BEGIN
(
test_smoothstep_monotonic
)
{
uint64_t
prev_h
;
unsigned
i
;
/*
* The smoothstep function is monotonic in [0..1], i.e. its slope is
* non-negative. In practice we want to parametrize table generation
* such that piecewise slope is greater than zero, but do not require
* that here.
*/
prev_h
=
0
;
for
(
i
=
0
;
i
<
SMOOTHSTEP_NSTEPS
;
i
++
)
{
uint64_t
h
=
smoothstep_tab
[
i
];
assert_u64_ge
(
h
,
prev_h
,
"Piecewise non-monotonic, i=%u"
,
i
);
prev_h
=
h
;
}
assert_u64_eq
(
smoothstep_tab
[
SMOOTHSTEP_NSTEPS
-
1
],
(
KQU
(
1
)
<<
SMOOTHSTEP_BFP
),
"Last step must equal 1"
);
}
TEST_END
TEST_BEGIN
(
test_smoothstep_slope
)
{
uint64_t
prev_h
,
prev_delta
;
unsigned
i
;
/*
* The smoothstep slope strictly increases until x=0.5, and then
* strictly decreases until x=1.0. Verify the slightly weaker
* requirement of monotonicity, so that inadequate table precision does
* not cause false test failures.
*/
prev_h
=
0
;
prev_delta
=
0
;
for
(
i
=
0
;
i
<
SMOOTHSTEP_NSTEPS
/
2
+
SMOOTHSTEP_NSTEPS
%
2
;
i
++
)
{
uint64_t
h
=
smoothstep_tab
[
i
];
uint64_t
delta
=
h
-
prev_h
;
assert_u64_ge
(
delta
,
prev_delta
,
"Slope must monotonically increase in 0.0 <= x <= 0.5, "
"i=%u"
,
i
);
prev_h
=
h
;
prev_delta
=
delta
;
}
prev_h
=
KQU
(
1
)
<<
SMOOTHSTEP_BFP
;
prev_delta
=
0
;
for
(
i
=
SMOOTHSTEP_NSTEPS
-
1
;
i
>=
SMOOTHSTEP_NSTEPS
/
2
;
i
--
)
{
uint64_t
h
=
smoothstep_tab
[
i
];
uint64_t
delta
=
prev_h
-
h
;
assert_u64_ge
(
delta
,
prev_delta
,
"Slope must monotonically decrease in 0.5 <= x <= 1.0, "
"i=%u"
,
i
);
prev_h
=
h
;
prev_delta
=
delta
;
}
}
TEST_END
int
main
(
void
)
{
return
(
test
(
test_smoothstep_integral
,
test_smoothstep_monotonic
,
test_smoothstep_slope
));
}
deps/jemalloc/test/unit/stats.c
100644 → 100755
View file @
71a8df6a
...
@@ -7,18 +7,18 @@ TEST_BEGIN(test_stats_summary)
...
@@ -7,18 +7,18 @@ TEST_BEGIN(test_stats_summary)
int
expected
=
config_stats
?
0
:
ENOENT
;
int
expected
=
config_stats
?
0
:
ENOENT
;
sz
=
sizeof
(
cactive
);
sz
=
sizeof
(
cactive
);
assert_d_eq
(
mallctl
(
"stats.cactive"
,
&
cactive
,
&
sz
,
NULL
,
0
),
expected
,
assert_d_eq
(
mallctl
(
"stats.cactive"
,
(
void
*
)
&
cactive
,
&
sz
,
NULL
,
0
),
"Unexpected mallctl() result"
);
expected
,
"Unexpected mallctl() result"
);
sz
=
sizeof
(
size_t
);
sz
=
sizeof
(
size_t
);
assert_d_eq
(
mallctl
(
"stats.allocated"
,
&
allocated
,
&
sz
,
NULL
,
0
),
assert_d_eq
(
mallctl
(
"stats.allocated"
,
(
void
*
)
&
allocated
,
&
sz
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
assert_d_eq
(
mallctl
(
"stats.active"
,
(
void
*
)
&
active
,
&
sz
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
expected
,
"Unexpected mallctl() result"
);
assert_d_eq
(
mallctl
(
"stats.
active"
,
&
active
,
&
sz
,
NULL
,
0
),
expected
,
assert_d_eq
(
mallctl
(
"stats.
resident"
,
(
void
*
)
&
resident
,
&
sz
,
NULL
,
0
),
"Unexpected mallctl() result"
);
expected
,
"Unexpected mallctl() result"
);
assert_d_eq
(
mallctl
(
"stats.
resident"
,
&
resident
,
&
sz
,
NULL
,
0
),
assert_d_eq
(
mallctl
(
"stats.
mapped"
,
(
void
*
)
&
mapped
,
&
sz
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
expected
,
"Unexpected mallctl() result"
);
assert_d_eq
(
mallctl
(
"stats.mapped"
,
&
mapped
,
&
sz
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
if
(
config_stats
)
{
if
(
config_stats
)
{
assert_zu_le
(
active
,
*
cactive
,
assert_zu_le
(
active
,
*
cactive
,
...
@@ -45,19 +45,19 @@ TEST_BEGIN(test_stats_huge)
...
@@ -45,19 +45,19 @@ TEST_BEGIN(test_stats_huge)
p
=
mallocx
(
large_maxclass
+
1
,
0
);
p
=
mallocx
(
large_maxclass
+
1
,
0
);
assert_ptr_not_null
(
p
,
"Unexpected mallocx() failure"
);
assert_ptr_not_null
(
p
,
"Unexpected mallocx() failure"
);
assert_d_eq
(
mallctl
(
"epoch"
,
NULL
,
NULL
,
&
epoch
,
sizeof
(
epoch
)),
0
,
assert_d_eq
(
mallctl
(
"epoch"
,
NULL
,
NULL
,
(
void
*
)
&
epoch
,
sizeof
(
epoch
)),
"Unexpected mallctl() failure"
);
0
,
"Unexpected mallctl() failure"
);
sz
=
sizeof
(
size_t
);
sz
=
sizeof
(
size_t
);
assert_d_eq
(
mallctl
(
"stats.arenas.0.huge.allocated"
,
&
allocated
,
&
sz
,
assert_d_eq
(
mallctl
(
"stats.arenas.0.huge.allocated"
,
(
void
*
)
&
allocated
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
&
sz
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
sz
=
sizeof
(
uint64_t
);
sz
=
sizeof
(
uint64_t
);
assert_d_eq
(
mallctl
(
"stats.arenas.0.huge.nmalloc"
,
&
nmalloc
,
&
sz
,
NULL
,
assert_d_eq
(
mallctl
(
"stats.arenas.0.huge.nmalloc"
,
(
void
*
)
&
nmalloc
,
0
),
expected
,
"Unexpected mallctl() result"
);
&
sz
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
assert_d_eq
(
mallctl
(
"stats.arenas.0.huge.ndalloc"
,
&
ndalloc
,
&
sz
,
NULL
,
assert_d_eq
(
mallctl
(
"stats.arenas.0.huge.ndalloc"
,
(
void
*
)
&
ndalloc
,
0
),
expected
,
"Unexpected mallctl() result"
);
&
sz
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
assert_d_eq
(
mallctl
(
"stats.arenas.0.huge.nrequests"
,
&
nrequests
,
&
sz
,
assert_d_eq
(
mallctl
(
"stats.arenas.0.huge.nrequests"
,
(
void
*
)
&
nrequests
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
&
sz
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
if
(
config_stats
)
{
if
(
config_stats
)
{
assert_zu_gt
(
allocated
,
0
,
assert_zu_gt
(
allocated
,
0
,
...
@@ -83,8 +83,8 @@ TEST_BEGIN(test_stats_arenas_summary)
...
@@ -83,8 +83,8 @@ TEST_BEGIN(test_stats_arenas_summary)
uint64_t
npurge
,
nmadvise
,
purged
;
uint64_t
npurge
,
nmadvise
,
purged
;
arena
=
0
;
arena
=
0
;
assert_d_eq
(
mallctl
(
"thread.arena"
,
NULL
,
NULL
,
&
arena
,
sizeof
(
arena
))
,
assert_d_eq
(
mallctl
(
"thread.arena"
,
NULL
,
NULL
,
(
void
*
)
&
arena
,
0
,
"Unexpected mallctl() failure"
);
sizeof
(
arena
)),
0
,
"Unexpected mallctl() failure"
);
little
=
mallocx
(
SMALL_MAXCLASS
,
0
);
little
=
mallocx
(
SMALL_MAXCLASS
,
0
);
assert_ptr_not_null
(
little
,
"Unexpected mallocx() failure"
);
assert_ptr_not_null
(
little
,
"Unexpected mallocx() failure"
);
...
@@ -93,22 +93,26 @@ TEST_BEGIN(test_stats_arenas_summary)
...
@@ -93,22 +93,26 @@ TEST_BEGIN(test_stats_arenas_summary)
huge
=
mallocx
(
chunksize
,
0
);
huge
=
mallocx
(
chunksize
,
0
);
assert_ptr_not_null
(
huge
,
"Unexpected mallocx() failure"
);
assert_ptr_not_null
(
huge
,
"Unexpected mallocx() failure"
);
dallocx
(
little
,
0
);
dallocx
(
large
,
0
);
dallocx
(
huge
,
0
);
assert_d_eq
(
mallctl
(
"arena.0.purge"
,
NULL
,
NULL
,
NULL
,
0
),
0
,
assert_d_eq
(
mallctl
(
"arena.0.purge"
,
NULL
,
NULL
,
NULL
,
0
),
0
,
"Unexpected mallctl() failure"
);
"Unexpected mallctl() failure"
);
assert_d_eq
(
mallctl
(
"epoch"
,
NULL
,
NULL
,
&
epoch
,
sizeof
(
epoch
)),
0
,
assert_d_eq
(
mallctl
(
"epoch"
,
NULL
,
NULL
,
(
void
*
)
&
epoch
,
sizeof
(
epoch
)),
"Unexpected mallctl() failure"
);
0
,
"Unexpected mallctl() failure"
);
sz
=
sizeof
(
size_t
);
sz
=
sizeof
(
size_t
);
assert_d_eq
(
mallctl
(
"stats.arenas.0.mapped"
,
&
mapped
,
&
sz
,
NULL
,
0
),
assert_d_eq
(
mallctl
(
"stats.arenas.0.mapped"
,
(
void
*
)
&
mapped
,
&
sz
,
NULL
,
expected
,
"Unexepected mallctl() result"
);
0
),
expected
,
"Unexepected mallctl() result"
);
sz
=
sizeof
(
uint64_t
);
sz
=
sizeof
(
uint64_t
);
assert_d_eq
(
mallctl
(
"stats.arenas.0.npurge"
,
&
npurge
,
&
sz
,
NULL
,
0
),
assert_d_eq
(
mallctl
(
"stats.arenas.0.npurge"
,
(
void
*
)
&
npurge
,
&
sz
,
NULL
,
expected
,
"Unexepected mallctl() result"
);
0
),
expected
,
"Unexepected mallctl() result"
);
assert_d_eq
(
mallctl
(
"stats.arenas.0.nmadvise"
,
&
nmadvise
,
&
sz
,
NULL
,
0
),
assert_d_eq
(
mallctl
(
"stats.arenas.0.nmadvise"
,
(
void
*
)
&
nmadvise
,
&
sz
,
expected
,
"Unexepected mallctl() result"
);
NULL
,
0
),
expected
,
"Unexepected mallctl() result"
);
assert_d_eq
(
mallctl
(
"stats.arenas.0.purged"
,
&
purged
,
&
sz
,
NULL
,
0
),
assert_d_eq
(
mallctl
(
"stats.arenas.0.purged"
,
(
void
*
)
&
purged
,
&
sz
,
NULL
,
expected
,
"Unexepected mallctl() result"
);
0
),
expected
,
"Unexepected mallctl() result"
);
if
(
config_stats
)
{
if
(
config_stats
)
{
assert_u64_gt
(
npurge
,
0
,
assert_u64_gt
(
npurge
,
0
,
...
@@ -116,10 +120,6 @@ TEST_BEGIN(test_stats_arenas_summary)
...
@@ -116,10 +120,6 @@ TEST_BEGIN(test_stats_arenas_summary)
assert_u64_le
(
nmadvise
,
purged
,
assert_u64_le
(
nmadvise
,
purged
,
"nmadvise should be no greater than purged"
);
"nmadvise should be no greater than purged"
);
}
}
dallocx
(
little
,
0
);
dallocx
(
large
,
0
);
dallocx
(
huge
,
0
);
}
}
TEST_END
TEST_END
...
@@ -150,8 +150,8 @@ TEST_BEGIN(test_stats_arenas_small)
...
@@ -150,8 +150,8 @@ TEST_BEGIN(test_stats_arenas_small)
no_lazy_lock
();
/* Lazy locking would dodge tcache testing. */
no_lazy_lock
();
/* Lazy locking would dodge tcache testing. */
arena
=
0
;
arena
=
0
;
assert_d_eq
(
mallctl
(
"thread.arena"
,
NULL
,
NULL
,
&
arena
,
sizeof
(
arena
))
,
assert_d_eq
(
mallctl
(
"thread.arena"
,
NULL
,
NULL
,
(
void
*
)
&
arena
,
0
,
"Unexpected mallctl() failure"
);
sizeof
(
arena
)),
0
,
"Unexpected mallctl() failure"
);
p
=
mallocx
(
SMALL_MAXCLASS
,
0
);
p
=
mallocx
(
SMALL_MAXCLASS
,
0
);
assert_ptr_not_null
(
p
,
"Unexpected mallocx() failure"
);
assert_ptr_not_null
(
p
,
"Unexpected mallocx() failure"
);
...
@@ -159,19 +159,21 @@ TEST_BEGIN(test_stats_arenas_small)
...
@@ -159,19 +159,21 @@ TEST_BEGIN(test_stats_arenas_small)
assert_d_eq
(
mallctl
(
"thread.tcache.flush"
,
NULL
,
NULL
,
NULL
,
0
),
assert_d_eq
(
mallctl
(
"thread.tcache.flush"
,
NULL
,
NULL
,
NULL
,
0
),
config_tcache
?
0
:
ENOENT
,
"Unexpected mallctl() result"
);
config_tcache
?
0
:
ENOENT
,
"Unexpected mallctl() result"
);
assert_d_eq
(
mallctl
(
"epoch"
,
NULL
,
NULL
,
&
epoch
,
sizeof
(
epoch
)),
0
,
assert_d_eq
(
mallctl
(
"epoch"
,
NULL
,
NULL
,
(
void
*
)
&
epoch
,
sizeof
(
epoch
)),
"Unexpected mallctl() failure"
);
0
,
"Unexpected mallctl() failure"
);
sz
=
sizeof
(
size_t
);
sz
=
sizeof
(
size_t
);
assert_d_eq
(
mallctl
(
"stats.arenas.0.small.allocated"
,
&
allocated
,
&
sz
,
assert_d_eq
(
mallctl
(
"stats.arenas.0.small.allocated"
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
(
void
*
)
&
allocated
,
&
sz
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
sz
=
sizeof
(
uint64_t
);
sz
=
sizeof
(
uint64_t
);
assert_d_eq
(
mallctl
(
"stats.arenas.0.small.nmalloc"
,
&
nmalloc
,
&
sz
,
assert_d_eq
(
mallctl
(
"stats.arenas.0.small.nmalloc"
,
(
void
*
)
&
nmalloc
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
&
sz
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
assert_d_eq
(
mallctl
(
"stats.arenas.0.small.ndalloc"
,
&
ndalloc
,
&
sz
,
assert_d_eq
(
mallctl
(
"stats.arenas.0.small.ndalloc"
,
(
void
*
)
&
ndalloc
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
&
sz
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
assert_d_eq
(
mallctl
(
"stats.arenas.0.small.nrequests"
,
&
nrequests
,
&
sz
,
assert_d_eq
(
mallctl
(
"stats.arenas.0.small.nrequests"
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
(
void
*
)
&
nrequests
,
&
sz
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
if
(
config_stats
)
{
if
(
config_stats
)
{
assert_zu_gt
(
allocated
,
0
,
assert_zu_gt
(
allocated
,
0
,
...
@@ -197,34 +199,36 @@ TEST_BEGIN(test_stats_arenas_large)
...
@@ -197,34 +199,36 @@ TEST_BEGIN(test_stats_arenas_large)
int
expected
=
config_stats
?
0
:
ENOENT
;
int
expected
=
config_stats
?
0
:
ENOENT
;
arena
=
0
;
arena
=
0
;
assert_d_eq
(
mallctl
(
"thread.arena"
,
NULL
,
NULL
,
&
arena
,
sizeof
(
arena
))
,
assert_d_eq
(
mallctl
(
"thread.arena"
,
NULL
,
NULL
,
(
void
*
)
&
arena
,
0
,
"Unexpected mallctl() failure"
);
sizeof
(
arena
)),
0
,
"Unexpected mallctl() failure"
);
p
=
mallocx
(
large_maxclass
,
0
);
p
=
mallocx
(
large_maxclass
,
0
);
assert_ptr_not_null
(
p
,
"Unexpected mallocx() failure"
);
assert_ptr_not_null
(
p
,
"Unexpected mallocx() failure"
);
assert_d_eq
(
mallctl
(
"epoch"
,
NULL
,
NULL
,
&
epoch
,
sizeof
(
epoch
)),
0
,
assert_d_eq
(
mallctl
(
"epoch"
,
NULL
,
NULL
,
(
void
*
)
&
epoch
,
sizeof
(
epoch
)),
"Unexpected mallctl() failure"
);
0
,
"Unexpected mallctl() failure"
);
sz
=
sizeof
(
size_t
);
sz
=
sizeof
(
size_t
);
assert_d_eq
(
mallctl
(
"stats.arenas.0.large.allocated"
,
&
allocated
,
&
sz
,
assert_d_eq
(
mallctl
(
"stats.arenas.0.large.allocated"
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
(
void
*
)
&
allocated
,
&
sz
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
sz
=
sizeof
(
uint64_t
);
sz
=
sizeof
(
uint64_t
);
assert_d_eq
(
mallctl
(
"stats.arenas.0.large.nmalloc"
,
&
nmalloc
,
&
sz
,
assert_d_eq
(
mallctl
(
"stats.arenas.0.large.nmalloc"
,
(
void
*
)
&
nmalloc
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
&
sz
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
assert_d_eq
(
mallctl
(
"stats.arenas.0.large.ndalloc"
,
&
ndalloc
,
&
sz
,
assert_d_eq
(
mallctl
(
"stats.arenas.0.large.ndalloc"
,
(
void
*
)
&
ndalloc
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
&
sz
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
assert_d_eq
(
mallctl
(
"stats.arenas.0.large.nrequests"
,
&
nrequests
,
&
sz
,
assert_d_eq
(
mallctl
(
"stats.arenas.0.large.nrequests"
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
(
void
*
)
&
nrequests
,
&
sz
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
if
(
config_stats
)
{
if
(
config_stats
)
{
assert_zu_gt
(
allocated
,
0
,
assert_zu_gt
(
allocated
,
0
,
"allocated should be greater than zero"
);
"allocated should be greater than zero"
);
assert_
z
u_gt
(
nmalloc
,
0
,
assert_u
64
_gt
(
nmalloc
,
0
,
"nmalloc should be greater than zero"
);
"nmalloc should be greater than zero"
);
assert_
z
u_ge
(
nmalloc
,
ndalloc
,
assert_u
64
_ge
(
nmalloc
,
ndalloc
,
"nmalloc should be at least as large as ndalloc"
);
"nmalloc should be at least as large as ndalloc"
);
assert_
z
u_gt
(
nrequests
,
0
,
assert_u
64
_gt
(
nrequests
,
0
,
"nrequests should be greater than zero"
);
"nrequests should be greater than zero"
);
}
}
...
@@ -241,30 +245,30 @@ TEST_BEGIN(test_stats_arenas_huge)
...
@@ -241,30 +245,30 @@ TEST_BEGIN(test_stats_arenas_huge)
int
expected
=
config_stats
?
0
:
ENOENT
;
int
expected
=
config_stats
?
0
:
ENOENT
;
arena
=
0
;
arena
=
0
;
assert_d_eq
(
mallctl
(
"thread.arena"
,
NULL
,
NULL
,
&
arena
,
sizeof
(
arena
))
,
assert_d_eq
(
mallctl
(
"thread.arena"
,
NULL
,
NULL
,
(
void
*
)
&
arena
,
0
,
"Unexpected mallctl() failure"
);
sizeof
(
arena
)),
0
,
"Unexpected mallctl() failure"
);
p
=
mallocx
(
chunksize
,
0
);
p
=
mallocx
(
chunksize
,
0
);
assert_ptr_not_null
(
p
,
"Unexpected mallocx() failure"
);
assert_ptr_not_null
(
p
,
"Unexpected mallocx() failure"
);
assert_d_eq
(
mallctl
(
"epoch"
,
NULL
,
NULL
,
&
epoch
,
sizeof
(
epoch
)),
0
,
assert_d_eq
(
mallctl
(
"epoch"
,
NULL
,
NULL
,
(
void
*
)
&
epoch
,
sizeof
(
epoch
)),
"Unexpected mallctl() failure"
);
0
,
"Unexpected mallctl() failure"
);
sz
=
sizeof
(
size_t
);
sz
=
sizeof
(
size_t
);
assert_d_eq
(
mallctl
(
"stats.arenas.0.huge.allocated"
,
&
allocated
,
&
sz
,
assert_d_eq
(
mallctl
(
"stats.arenas.0.huge.allocated"
,
(
void
*
)
&
allocated
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
&
sz
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
sz
=
sizeof
(
uint64_t
);
sz
=
sizeof
(
uint64_t
);
assert_d_eq
(
mallctl
(
"stats.arenas.0.huge.nmalloc"
,
&
nmalloc
,
&
sz
,
assert_d_eq
(
mallctl
(
"stats.arenas.0.huge.nmalloc"
,
(
void
*
)
&
nmalloc
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
&
sz
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
assert_d_eq
(
mallctl
(
"stats.arenas.0.huge.ndalloc"
,
&
ndalloc
,
&
sz
,
assert_d_eq
(
mallctl
(
"stats.arenas.0.huge.ndalloc"
,
(
void
*
)
&
ndalloc
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
&
sz
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
if
(
config_stats
)
{
if
(
config_stats
)
{
assert_zu_gt
(
allocated
,
0
,
assert_zu_gt
(
allocated
,
0
,
"allocated should be greater than zero"
);
"allocated should be greater than zero"
);
assert_
z
u_gt
(
nmalloc
,
0
,
assert_u
64
_gt
(
nmalloc
,
0
,
"nmalloc should be greater than zero"
);
"nmalloc should be greater than zero"
);
assert_
z
u_ge
(
nmalloc
,
ndalloc
,
assert_u
64
_ge
(
nmalloc
,
ndalloc
,
"nmalloc should be at least as large as ndalloc"
);
"nmalloc should be at least as large as ndalloc"
);
}
}
...
@@ -282,8 +286,8 @@ TEST_BEGIN(test_stats_arenas_bins)
...
@@ -282,8 +286,8 @@ TEST_BEGIN(test_stats_arenas_bins)
int
expected
=
config_stats
?
0
:
ENOENT
;
int
expected
=
config_stats
?
0
:
ENOENT
;
arena
=
0
;
arena
=
0
;
assert_d_eq
(
mallctl
(
"thread.arena"
,
NULL
,
NULL
,
&
arena
,
sizeof
(
arena
))
,
assert_d_eq
(
mallctl
(
"thread.arena"
,
NULL
,
NULL
,
(
void
*
)
&
arena
,
0
,
"Unexpected mallctl() failure"
);
sizeof
(
arena
)),
0
,
"Unexpected mallctl() failure"
);
p
=
mallocx
(
arena_bin_info
[
0
].
reg_size
,
0
);
p
=
mallocx
(
arena_bin_info
[
0
].
reg_size
,
0
);
assert_ptr_not_null
(
p
,
"Unexpected mallocx() failure"
);
assert_ptr_not_null
(
p
,
"Unexpected mallocx() failure"
);
...
@@ -291,35 +295,36 @@ TEST_BEGIN(test_stats_arenas_bins)
...
@@ -291,35 +295,36 @@ TEST_BEGIN(test_stats_arenas_bins)
assert_d_eq
(
mallctl
(
"thread.tcache.flush"
,
NULL
,
NULL
,
NULL
,
0
),
assert_d_eq
(
mallctl
(
"thread.tcache.flush"
,
NULL
,
NULL
,
NULL
,
0
),
config_tcache
?
0
:
ENOENT
,
"Unexpected mallctl() result"
);
config_tcache
?
0
:
ENOENT
,
"Unexpected mallctl() result"
);
assert_d_eq
(
mallctl
(
"epoch"
,
NULL
,
NULL
,
&
epoch
,
sizeof
(
epoch
)),
0
,
assert_d_eq
(
mallctl
(
"epoch"
,
NULL
,
NULL
,
(
void
*
)
&
epoch
,
sizeof
(
epoch
)),
"Unexpected mallctl() failure"
);
0
,
"Unexpected mallctl() failure"
);
sz
=
sizeof
(
uint64_t
);
sz
=
sizeof
(
uint64_t
);
assert_d_eq
(
mallctl
(
"stats.arenas.0.bins.0.nmalloc"
,
&
nmalloc
,
&
sz
,
assert_d_eq
(
mallctl
(
"stats.arenas.0.bins.0.nmalloc"
,
(
void
*
)
&
nmalloc
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
&
sz
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
assert_d_eq
(
mallctl
(
"stats.arenas.0.bins.0.ndalloc"
,
&
ndalloc
,
&
sz
,
assert_d_eq
(
mallctl
(
"stats.arenas.0.bins.0.ndalloc"
,
(
void
*
)
&
ndalloc
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
&
sz
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
assert_d_eq
(
mallctl
(
"stats.arenas.0.bins.0.nrequests"
,
&
nrequests
,
&
sz
,
assert_d_eq
(
mallctl
(
"stats.arenas.0.bins.0.nrequests"
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
(
void
*
)
&
nrequests
,
&
sz
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
sz
=
sizeof
(
size_t
);
sz
=
sizeof
(
size_t
);
assert_d_eq
(
mallctl
(
"stats.arenas.0.bins.0.curregs"
,
&
curregs
,
&
sz
,
assert_d_eq
(
mallctl
(
"stats.arenas.0.bins.0.curregs"
,
(
void
*
)
&
curregs
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
&
sz
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
sz
=
sizeof
(
uint64_t
);
sz
=
sizeof
(
uint64_t
);
assert_d_eq
(
mallctl
(
"stats.arenas.0.bins.0.nfills"
,
&
nfills
,
&
sz
,
assert_d_eq
(
mallctl
(
"stats.arenas.0.bins.0.nfills"
,
(
void
*
)
&
nfills
,
NULL
,
0
),
config_tcache
?
expected
:
ENOENT
,
&
sz
,
NULL
,
0
),
config_tcache
?
expected
:
ENOENT
,
"Unexpected mallctl() result"
);
"Unexpected mallctl() result"
);
assert_d_eq
(
mallctl
(
"stats.arenas.0.bins.0.nflushes"
,
&
nflushes
,
&
sz
,
assert_d_eq
(
mallctl
(
"stats.arenas.0.bins.0.nflushes"
,
(
void
*
)
&
nflushes
,
NULL
,
0
),
config_tcache
?
expected
:
ENOENT
,
&
sz
,
NULL
,
0
),
config_tcache
?
expected
:
ENOENT
,
"Unexpected mallctl() result"
);
"Unexpected mallctl() result"
);
assert_d_eq
(
mallctl
(
"stats.arenas.0.bins.0.nruns"
,
&
nruns
,
&
sz
,
assert_d_eq
(
mallctl
(
"stats.arenas.0.bins.0.nruns"
,
(
void
*
)
&
nruns
,
&
sz
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
assert_d_eq
(
mallctl
(
"stats.arenas.0.bins.0.nreruns"
,
&
nreruns
,
&
sz
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
assert_d_eq
(
mallctl
(
"stats.arenas.0.bins.0.nreruns"
,
(
void
*
)
&
nreruns
,
&
sz
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
sz
=
sizeof
(
size_t
);
sz
=
sizeof
(
size_t
);
assert_d_eq
(
mallctl
(
"stats.arenas.0.bins.0.curruns"
,
&
curruns
,
&
sz
,
assert_d_eq
(
mallctl
(
"stats.arenas.0.bins.0.curruns"
,
(
void
*
)
&
curruns
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
&
sz
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
if
(
config_stats
)
{
if
(
config_stats
)
{
assert_u64_gt
(
nmalloc
,
0
,
assert_u64_gt
(
nmalloc
,
0
,
...
@@ -355,25 +360,26 @@ TEST_BEGIN(test_stats_arenas_lruns)
...
@@ -355,25 +360,26 @@ TEST_BEGIN(test_stats_arenas_lruns)
int
expected
=
config_stats
?
0
:
ENOENT
;
int
expected
=
config_stats
?
0
:
ENOENT
;
arena
=
0
;
arena
=
0
;
assert_d_eq
(
mallctl
(
"thread.arena"
,
NULL
,
NULL
,
&
arena
,
sizeof
(
arena
))
,
assert_d_eq
(
mallctl
(
"thread.arena"
,
NULL
,
NULL
,
(
void
*
)
&
arena
,
0
,
"Unexpected mallctl() failure"
);
sizeof
(
arena
)),
0
,
"Unexpected mallctl() failure"
);
p
=
mallocx
(
LARGE_MINCLASS
,
0
);
p
=
mallocx
(
LARGE_MINCLASS
,
0
);
assert_ptr_not_null
(
p
,
"Unexpected mallocx() failure"
);
assert_ptr_not_null
(
p
,
"Unexpected mallocx() failure"
);
assert_d_eq
(
mallctl
(
"epoch"
,
NULL
,
NULL
,
&
epoch
,
sizeof
(
epoch
)),
0
,
assert_d_eq
(
mallctl
(
"epoch"
,
NULL
,
NULL
,
(
void
*
)
&
epoch
,
sizeof
(
epoch
)),
"Unexpected mallctl() failure"
);
0
,
"Unexpected mallctl() failure"
);
sz
=
sizeof
(
uint64_t
);
sz
=
sizeof
(
uint64_t
);
assert_d_eq
(
mallctl
(
"stats.arenas.0.lruns.0.nmalloc"
,
&
nmalloc
,
&
sz
,
assert_d_eq
(
mallctl
(
"stats.arenas.0.lruns.0.nmalloc"
,
(
void
*
)
&
nmalloc
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
&
sz
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
assert_d_eq
(
mallctl
(
"stats.arenas.0.lruns.0.ndalloc"
,
&
ndalloc
,
&
sz
,
assert_d_eq
(
mallctl
(
"stats.arenas.0.lruns.0.ndalloc"
,
(
void
*
)
&
ndalloc
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
&
sz
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
assert_d_eq
(
mallctl
(
"stats.arenas.0.lruns.0.nrequests"
,
&
nrequests
,
&
sz
,
assert_d_eq
(
mallctl
(
"stats.arenas.0.lruns.0.nrequests"
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
(
void
*
)
&
nrequests
,
&
sz
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
sz
=
sizeof
(
size_t
);
sz
=
sizeof
(
size_t
);
assert_d_eq
(
mallctl
(
"stats.arenas.0.lruns.0.curruns"
,
&
curruns
,
&
sz
,
assert_d_eq
(
mallctl
(
"stats.arenas.0.lruns.0.curruns"
,
(
void
*
)
&
curruns
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
&
sz
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
if
(
config_stats
)
{
if
(
config_stats
)
{
assert_u64_gt
(
nmalloc
,
0
,
assert_u64_gt
(
nmalloc
,
0
,
...
@@ -399,23 +405,26 @@ TEST_BEGIN(test_stats_arenas_hchunks)
...
@@ -399,23 +405,26 @@ TEST_BEGIN(test_stats_arenas_hchunks)
int
expected
=
config_stats
?
0
:
ENOENT
;
int
expected
=
config_stats
?
0
:
ENOENT
;
arena
=
0
;
arena
=
0
;
assert_d_eq
(
mallctl
(
"thread.arena"
,
NULL
,
NULL
,
&
arena
,
sizeof
(
arena
))
,
assert_d_eq
(
mallctl
(
"thread.arena"
,
NULL
,
NULL
,
(
void
*
)
&
arena
,
0
,
"Unexpected mallctl() failure"
);
sizeof
(
arena
)),
0
,
"Unexpected mallctl() failure"
);
p
=
mallocx
(
chunksize
,
0
);
p
=
mallocx
(
chunksize
,
0
);
assert_ptr_not_null
(
p
,
"Unexpected mallocx() failure"
);
assert_ptr_not_null
(
p
,
"Unexpected mallocx() failure"
);
assert_d_eq
(
mallctl
(
"epoch"
,
NULL
,
NULL
,
&
epoch
,
sizeof
(
epoch
)),
0
,
assert_d_eq
(
mallctl
(
"epoch"
,
NULL
,
NULL
,
(
void
*
)
&
epoch
,
sizeof
(
epoch
)),
"Unexpected mallctl() failure"
);
0
,
"Unexpected mallctl() failure"
);
sz
=
sizeof
(
uint64_t
);
sz
=
sizeof
(
uint64_t
);
assert_d_eq
(
mallctl
(
"stats.arenas.0.hchunks.0.nmalloc"
,
&
nmalloc
,
&
sz
,
assert_d_eq
(
mallctl
(
"stats.arenas.0.hchunks.0.nmalloc"
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
(
void
*
)
&
nmalloc
,
&
sz
,
NULL
,
0
),
expected
,
assert_d_eq
(
mallctl
(
"stats.arenas.0.hchunks.0.ndalloc"
,
&
ndalloc
,
&
sz
,
"Unexpected mallctl() result"
);
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
assert_d_eq
(
mallctl
(
"stats.arenas.0.hchunks.0.ndalloc"
,
(
void
*
)
&
ndalloc
,
&
sz
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
sz
=
sizeof
(
size_t
);
sz
=
sizeof
(
size_t
);
assert_d_eq
(
mallctl
(
"stats.arenas.0.hchunks.0.curhchunks"
,
&
curhchunks
,
assert_d_eq
(
mallctl
(
"stats.arenas.0.hchunks.0.curhchunks"
,
&
sz
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
(
void
*
)
&
curhchunks
,
&
sz
,
NULL
,
0
),
expected
,
"Unexpected mallctl() result"
);
if
(
config_stats
)
{
if
(
config_stats
)
{
assert_u64_gt
(
nmalloc
,
0
,
assert_u64_gt
(
nmalloc
,
0
,
...
...
deps/jemalloc/test/unit/ticker.c
0 → 100644
View file @
71a8df6a
#include "test/jemalloc_test.h"
TEST_BEGIN
(
test_ticker_tick
)
{
#define NREPS 2
#define NTICKS 3
ticker_t
ticker
;
int32_t
i
,
j
;
ticker_init
(
&
ticker
,
NTICKS
);
for
(
i
=
0
;
i
<
NREPS
;
i
++
)
{
for
(
j
=
0
;
j
<
NTICKS
;
j
++
)
{
assert_u_eq
(
ticker_read
(
&
ticker
),
NTICKS
-
j
,
"Unexpected ticker value (i=%d, j=%d)"
,
i
,
j
);
assert_false
(
ticker_tick
(
&
ticker
),
"Unexpected ticker fire (i=%d, j=%d)"
,
i
,
j
);
}
assert_u32_eq
(
ticker_read
(
&
ticker
),
0
,
"Expected ticker depletion"
);
assert_true
(
ticker_tick
(
&
ticker
),
"Expected ticker fire (i=%d)"
,
i
);
assert_u32_eq
(
ticker_read
(
&
ticker
),
NTICKS
,
"Expected ticker reset"
);
}
#undef NTICKS
}
TEST_END
TEST_BEGIN
(
test_ticker_ticks
)
{
#define NTICKS 3
ticker_t
ticker
;
ticker_init
(
&
ticker
,
NTICKS
);
assert_u_eq
(
ticker_read
(
&
ticker
),
NTICKS
,
"Unexpected ticker value"
);
assert_false
(
ticker_ticks
(
&
ticker
,
NTICKS
),
"Unexpected ticker fire"
);
assert_u_eq
(
ticker_read
(
&
ticker
),
0
,
"Unexpected ticker value"
);
assert_true
(
ticker_ticks
(
&
ticker
,
NTICKS
),
"Expected ticker fire"
);
assert_u_eq
(
ticker_read
(
&
ticker
),
NTICKS
,
"Unexpected ticker value"
);
assert_true
(
ticker_ticks
(
&
ticker
,
NTICKS
+
1
),
"Expected ticker fire"
);
assert_u_eq
(
ticker_read
(
&
ticker
),
NTICKS
,
"Unexpected ticker value"
);
#undef NTICKS
}
TEST_END
TEST_BEGIN
(
test_ticker_copy
)
{
#define NTICKS 3
ticker_t
ta
,
tb
;
ticker_init
(
&
ta
,
NTICKS
);
ticker_copy
(
&
tb
,
&
ta
);
assert_u_eq
(
ticker_read
(
&
tb
),
NTICKS
,
"Unexpected ticker value"
);
assert_true
(
ticker_ticks
(
&
tb
,
NTICKS
+
1
),
"Expected ticker fire"
);
assert_u_eq
(
ticker_read
(
&
tb
),
NTICKS
,
"Unexpected ticker value"
);
ticker_tick
(
&
ta
);
ticker_copy
(
&
tb
,
&
ta
);
assert_u_eq
(
ticker_read
(
&
tb
),
NTICKS
-
1
,
"Unexpected ticker value"
);
assert_true
(
ticker_ticks
(
&
tb
,
NTICKS
),
"Expected ticker fire"
);
assert_u_eq
(
ticker_read
(
&
tb
),
NTICKS
,
"Unexpected ticker value"
);
#undef NTICKS
}
TEST_END
int
main
(
void
)
{
return
(
test
(
test_ticker_tick
,
test_ticker_ticks
,
test_ticker_copy
));
}
deps/jemalloc/test/unit/tsd.c
View file @
71a8df6a
...
@@ -58,18 +58,18 @@ thd_start(void *arg)
...
@@ -58,18 +58,18 @@ thd_start(void *arg)
data_t
d
=
(
data_t
)(
uintptr_t
)
arg
;
data_t
d
=
(
data_t
)(
uintptr_t
)
arg
;
void
*
p
;
void
*
p
;
assert_x_eq
(
*
data_tsd_get
(),
DATA_INIT
,
assert_x_eq
(
*
data_tsd_get
(
true
),
DATA_INIT
,
"Initial tsd get should return initialization value"
);
"Initial tsd get should return initialization value"
);
p
=
malloc
(
1
);
p
=
malloc
(
1
);
assert_ptr_not_null
(
p
,
"Unexpected malloc() failure"
);
assert_ptr_not_null
(
p
,
"Unexpected malloc() failure"
);
data_tsd_set
(
&
d
);
data_tsd_set
(
&
d
);
assert_x_eq
(
*
data_tsd_get
(),
d
,
assert_x_eq
(
*
data_tsd_get
(
true
),
d
,
"After tsd set, tsd get should return value that was set"
);
"After tsd set, tsd get should return value that was set"
);
d
=
0
;
d
=
0
;
assert_x_eq
(
*
data_tsd_get
(),
(
data_t
)(
uintptr_t
)
arg
,
assert_x_eq
(
*
data_tsd_get
(
true
),
(
data_t
)(
uintptr_t
)
arg
,
"Resetting local data should have no effect on tsd"
);
"Resetting local data should have no effect on tsd"
);
free
(
p
);
free
(
p
);
...
@@ -79,7 +79,7 @@ thd_start(void *arg)
...
@@ -79,7 +79,7 @@ thd_start(void *arg)
TEST_BEGIN
(
test_tsd_main_thread
)
TEST_BEGIN
(
test_tsd_main_thread
)
{
{
thd_start
((
void
*
)
0xa5f3e329
);
thd_start
((
void
*
)
(
uintptr_t
)
0xa5f3e329
);
}
}
TEST_END
TEST_END
...
@@ -99,6 +99,11 @@ int
...
@@ -99,6 +99,11 @@ int
main
(
void
)
main
(
void
)
{
{
/* Core tsd bootstrapping must happen prior to data_tsd_boot(). */
if
(
nallocx
(
1
,
0
)
==
0
)
{
malloc_printf
(
"Initialization error"
);
return
(
test_status_fail
);
}
data_tsd_boot
();
data_tsd_boot
();
return
(
test
(
return
(
test
(
...
...
deps/jemalloc/test/unit/util.c
View file @
71a8df6a
#include "test/jemalloc_test.h"
#include "test/jemalloc_test.h"
TEST_BEGIN
(
test_pow2_ceil
)
#define TEST_POW2_CEIL(t, suf, pri) do { \
unsigned i, pow2; \
t x; \
\
assert_##suf##_eq(pow2_ceil_##suf(0), 0, "Unexpected result"); \
\
for (i = 0; i < sizeof(t) * 8; i++) { \
assert_##suf##_eq(pow2_ceil_##suf(((t)1) << i), ((t)1) \
<< i, "Unexpected result"); \
} \
\
for (i = 2; i < sizeof(t) * 8; i++) { \
assert_##suf##_eq(pow2_ceil_##suf((((t)1) << i) - 1), \
((t)1) << i, "Unexpected result"); \
} \
\
for (i = 0; i < sizeof(t) * 8 - 1; i++) { \
assert_##suf##_eq(pow2_ceil_##suf((((t)1) << i) + 1), \
((t)1) << (i+1), "Unexpected result"); \
} \
\
for (pow2 = 1; pow2 < 25; pow2++) { \
for (x = (((t)1) << (pow2-1)) + 1; x <= ((t)1) << pow2; \
x++) { \
assert_##suf##_eq(pow2_ceil_##suf(x), \
((t)1) << pow2, \
"Unexpected result, x=%"pri, x); \
} \
} \
} while (0)
TEST_BEGIN
(
test_pow2_ceil_u64
)
{
{
unsigned
i
,
pow2
;
size_t
x
;
assert_zu_eq
(
pow2_ceil
(
0
),
0
,
"Unexpected result"
);
TEST_POW2_CEIL
(
uint64_t
,
u64
,
FMTu64
);
}
TEST_END
for
(
i
=
0
;
i
<
sizeof
(
size_t
)
*
8
;
i
++
)
{
TEST_BEGIN
(
test_pow2_ceil_u32
)
assert_zu_eq
(
pow2_ceil
(
ZU
(
1
)
<<
i
),
ZU
(
1
)
<<
i
,
{
"Unexpected result"
);
}
for
(
i
=
2
;
i
<
sizeof
(
size_t
)
*
8
;
i
++
)
{
TEST_POW2_CEIL
(
uint32_t
,
u32
,
FMTu32
);
assert_zu_eq
(
pow2_ceil
((
ZU
(
1
)
<<
i
)
-
1
),
ZU
(
1
)
<<
i
,
}
"Unexpected result"
);
TEST_END
}
for
(
i
=
0
;
i
<
sizeof
(
size_t
)
*
8
-
1
;
i
++
)
{
TEST_BEGIN
(
test_pow2_ceil_zu
)
assert_zu_eq
(
pow2_ceil
((
ZU
(
1
)
<<
i
)
+
1
),
ZU
(
1
)
<<
(
i
+
1
),
{
"Unexpected result"
);
}
for
(
pow2
=
1
;
pow2
<
25
;
pow2
++
)
{
TEST_POW2_CEIL
(
size_t
,
zu
,
"zu"
);
for
(
x
=
(
ZU
(
1
)
<<
(
pow2
-
1
))
+
1
;
x
<=
ZU
(
1
)
<<
pow2
;
x
++
)
{
assert_zu_eq
(
pow2_ceil
(
x
),
ZU
(
1
)
<<
pow2
,
"Unexpected result, x=%zu"
,
x
);
}
}
}
}
TEST_END
TEST_END
...
@@ -54,6 +75,7 @@ TEST_BEGIN(test_malloc_strtoumax)
...
@@ -54,6 +75,7 @@ TEST_BEGIN(test_malloc_strtoumax)
};
};
#define ERR(e) e, #e
#define ERR(e) e, #e
#define KUMAX(x) ((uintmax_t)x##ULL)
#define KUMAX(x) ((uintmax_t)x##ULL)
#define KSMAX(x) ((uintmax_t)(intmax_t)x##LL)
struct
test_s
tests
[]
=
{
struct
test_s
tests
[]
=
{
{
"0"
,
"0"
,
-
1
,
ERR
(
EINVAL
),
UINTMAX_MAX
},
{
"0"
,
"0"
,
-
1
,
ERR
(
EINVAL
),
UINTMAX_MAX
},
{
"0"
,
"0"
,
1
,
ERR
(
EINVAL
),
UINTMAX_MAX
},
{
"0"
,
"0"
,
1
,
ERR
(
EINVAL
),
UINTMAX_MAX
},
...
@@ -66,13 +88,13 @@ TEST_BEGIN(test_malloc_strtoumax)
...
@@ -66,13 +88,13 @@ TEST_BEGIN(test_malloc_strtoumax)
{
"42"
,
""
,
0
,
ERR
(
0
),
KUMAX
(
42
)},
{
"42"
,
""
,
0
,
ERR
(
0
),
KUMAX
(
42
)},
{
"+42"
,
""
,
0
,
ERR
(
0
),
KUMAX
(
42
)},
{
"+42"
,
""
,
0
,
ERR
(
0
),
KUMAX
(
42
)},
{
"-42"
,
""
,
0
,
ERR
(
0
),
K
U
MAX
(
-
42
)},
{
"-42"
,
""
,
0
,
ERR
(
0
),
K
S
MAX
(
-
42
)},
{
"042"
,
""
,
0
,
ERR
(
0
),
KUMAX
(
042
)},
{
"042"
,
""
,
0
,
ERR
(
0
),
KUMAX
(
042
)},
{
"+042"
,
""
,
0
,
ERR
(
0
),
KUMAX
(
042
)},
{
"+042"
,
""
,
0
,
ERR
(
0
),
KUMAX
(
042
)},
{
"-042"
,
""
,
0
,
ERR
(
0
),
K
U
MAX
(
-
042
)},
{
"-042"
,
""
,
0
,
ERR
(
0
),
K
S
MAX
(
-
042
)},
{
"0x42"
,
""
,
0
,
ERR
(
0
),
KUMAX
(
0x42
)},
{
"0x42"
,
""
,
0
,
ERR
(
0
),
KUMAX
(
0x42
)},
{
"+0x42"
,
""
,
0
,
ERR
(
0
),
KUMAX
(
0x42
)},
{
"+0x42"
,
""
,
0
,
ERR
(
0
),
KUMAX
(
0x42
)},
{
"-0x42"
,
""
,
0
,
ERR
(
0
),
K
U
MAX
(
-
0x42
)},
{
"-0x42"
,
""
,
0
,
ERR
(
0
),
K
S
MAX
(
-
0x42
)},
{
"0"
,
""
,
0
,
ERR
(
0
),
KUMAX
(
0
)},
{
"0"
,
""
,
0
,
ERR
(
0
),
KUMAX
(
0
)},
{
"1"
,
""
,
0
,
ERR
(
0
),
KUMAX
(
1
)},
{
"1"
,
""
,
0
,
ERR
(
0
),
KUMAX
(
1
)},
...
@@ -109,6 +131,7 @@ TEST_BEGIN(test_malloc_strtoumax)
...
@@ -109,6 +131,7 @@ TEST_BEGIN(test_malloc_strtoumax)
};
};
#undef ERR
#undef ERR
#undef KUMAX
#undef KUMAX
#undef KSMAX
unsigned
i
;
unsigned
i
;
for
(
i
=
0
;
i
<
sizeof
(
tests
)
/
sizeof
(
struct
test_s
);
i
++
)
{
for
(
i
=
0
;
i
<
sizeof
(
tests
)
/
sizeof
(
struct
test_s
);
i
++
)
{
...
@@ -139,14 +162,14 @@ TEST_BEGIN(test_malloc_snprintf_truncated)
...
@@ -139,14 +162,14 @@ TEST_BEGIN(test_malloc_snprintf_truncated)
{
{
#define BUFLEN 15
#define BUFLEN 15
char
buf
[
BUFLEN
];
char
buf
[
BUFLEN
];
in
t
result
;
size_
t
result
;
size_t
len
;
size_t
len
;
#define
TEST(expected_str_untruncated, ...) do { \
#define
TEST(expected_str_untruncated, ...) do { \
result = malloc_snprintf(buf, len, __VA_ARGS__); \
result = malloc_snprintf(buf, len, __VA_ARGS__); \
assert_d_eq(strncmp(buf, expected_str_untruncated, len-1), 0, \
assert_d_eq(strncmp(buf, expected_str_untruncated, len-1), 0, \
"Unexpected string inequality (\"%s\" vs \"%s\")", \
"Unexpected string inequality (\"%s\" vs \"%s\")", \
buf, expected_str_untruncated); \
buf, expected_str_untruncated);
\
assert_
d
_eq(result, strlen(expected_str_untruncated), \
assert_
zu
_eq(result, strlen(expected_str_untruncated), \
"Unexpected result"); \
"Unexpected result"); \
} while (0)
} while (0)
...
@@ -172,11 +195,11 @@ TEST_BEGIN(test_malloc_snprintf)
...
@@ -172,11 +195,11 @@ TEST_BEGIN(test_malloc_snprintf)
{
{
#define BUFLEN 128
#define BUFLEN 128
char
buf
[
BUFLEN
];
char
buf
[
BUFLEN
];
in
t
result
;
size_
t
result
;
#define TEST(expected_str, ...) do { \
#define TEST(expected_str, ...) do { \
result = malloc_snprintf(buf, sizeof(buf), __VA_ARGS__); \
result = malloc_snprintf(buf, sizeof(buf), __VA_ARGS__); \
assert_str_eq(buf, expected_str, "Unexpected output"); \
assert_str_eq(buf, expected_str, "Unexpected output"); \
assert_
d
_eq(result, strlen(expected_str), "Unexpected result");
\
assert_
zu
_eq(result, strlen(expected_str), "Unexpected result");\
} while (0)
} while (0)
TEST
(
"hello"
,
"hello"
);
TEST
(
"hello"
,
"hello"
);
...
@@ -286,7 +309,9 @@ main(void)
...
@@ -286,7 +309,9 @@ main(void)
{
{
return
(
test
(
return
(
test
(
test_pow2_ceil
,
test_pow2_ceil_u64
,
test_pow2_ceil_u32
,
test_pow2_ceil_zu
,
test_malloc_strtoumax_no_endptr
,
test_malloc_strtoumax_no_endptr
,
test_malloc_strtoumax
,
test_malloc_strtoumax
,
test_malloc_snprintf_truncated
,
test_malloc_snprintf_truncated
,
...
...
deps/jemalloc/test/unit/witness.c
0 → 100644
View file @
71a8df6a
#include "test/jemalloc_test.h"
static
witness_lock_error_t
*
witness_lock_error_orig
;
static
witness_owner_error_t
*
witness_owner_error_orig
;
static
witness_not_owner_error_t
*
witness_not_owner_error_orig
;
static
witness_lockless_error_t
*
witness_lockless_error_orig
;
static
bool
saw_lock_error
;
static
bool
saw_owner_error
;
static
bool
saw_not_owner_error
;
static
bool
saw_lockless_error
;
static
void
witness_lock_error_intercept
(
const
witness_list_t
*
witnesses
,
const
witness_t
*
witness
)
{
saw_lock_error
=
true
;
}
static
void
witness_owner_error_intercept
(
const
witness_t
*
witness
)
{
saw_owner_error
=
true
;
}
static
void
witness_not_owner_error_intercept
(
const
witness_t
*
witness
)
{
saw_not_owner_error
=
true
;
}
static
void
witness_lockless_error_intercept
(
const
witness_list_t
*
witnesses
)
{
saw_lockless_error
=
true
;
}
static
int
witness_comp
(
const
witness_t
*
a
,
const
witness_t
*
b
)
{
assert_u_eq
(
a
->
rank
,
b
->
rank
,
"Witnesses should have equal rank"
);
return
(
strcmp
(
a
->
name
,
b
->
name
));
}
static
int
witness_comp_reverse
(
const
witness_t
*
a
,
const
witness_t
*
b
)
{
assert_u_eq
(
a
->
rank
,
b
->
rank
,
"Witnesses should have equal rank"
);
return
(
-
strcmp
(
a
->
name
,
b
->
name
));
}
TEST_BEGIN
(
test_witness
)
{
witness_t
a
,
b
;
tsdn_t
*
tsdn
;
test_skip_if
(
!
config_debug
);
tsdn
=
tsdn_fetch
();
witness_assert_lockless
(
tsdn
);
witness_init
(
&
a
,
"a"
,
1
,
NULL
);
witness_assert_not_owner
(
tsdn
,
&
a
);
witness_lock
(
tsdn
,
&
a
);
witness_assert_owner
(
tsdn
,
&
a
);
witness_init
(
&
b
,
"b"
,
2
,
NULL
);
witness_assert_not_owner
(
tsdn
,
&
b
);
witness_lock
(
tsdn
,
&
b
);
witness_assert_owner
(
tsdn
,
&
b
);
witness_unlock
(
tsdn
,
&
a
);
witness_unlock
(
tsdn
,
&
b
);
witness_assert_lockless
(
tsdn
);
}
TEST_END
TEST_BEGIN
(
test_witness_comp
)
{
witness_t
a
,
b
,
c
,
d
;
tsdn_t
*
tsdn
;
test_skip_if
(
!
config_debug
);
tsdn
=
tsdn_fetch
();
witness_assert_lockless
(
tsdn
);
witness_init
(
&
a
,
"a"
,
1
,
witness_comp
);
witness_assert_not_owner
(
tsdn
,
&
a
);
witness_lock
(
tsdn
,
&
a
);
witness_assert_owner
(
tsdn
,
&
a
);
witness_init
(
&
b
,
"b"
,
1
,
witness_comp
);
witness_assert_not_owner
(
tsdn
,
&
b
);
witness_lock
(
tsdn
,
&
b
);
witness_assert_owner
(
tsdn
,
&
b
);
witness_unlock
(
tsdn
,
&
b
);
witness_lock_error_orig
=
witness_lock_error
;
witness_lock_error
=
witness_lock_error_intercept
;
saw_lock_error
=
false
;
witness_init
(
&
c
,
"c"
,
1
,
witness_comp_reverse
);
witness_assert_not_owner
(
tsdn
,
&
c
);
assert_false
(
saw_lock_error
,
"Unexpected witness lock error"
);
witness_lock
(
tsdn
,
&
c
);
assert_true
(
saw_lock_error
,
"Expected witness lock error"
);
witness_unlock
(
tsdn
,
&
c
);
saw_lock_error
=
false
;
witness_init
(
&
d
,
"d"
,
1
,
NULL
);
witness_assert_not_owner
(
tsdn
,
&
d
);
assert_false
(
saw_lock_error
,
"Unexpected witness lock error"
);
witness_lock
(
tsdn
,
&
d
);
assert_true
(
saw_lock_error
,
"Expected witness lock error"
);
witness_unlock
(
tsdn
,
&
d
);
witness_unlock
(
tsdn
,
&
a
);
witness_assert_lockless
(
tsdn
);
witness_lock_error
=
witness_lock_error_orig
;
}
TEST_END
TEST_BEGIN
(
test_witness_reversal
)
{
witness_t
a
,
b
;
tsdn_t
*
tsdn
;
test_skip_if
(
!
config_debug
);
witness_lock_error_orig
=
witness_lock_error
;
witness_lock_error
=
witness_lock_error_intercept
;
saw_lock_error
=
false
;
tsdn
=
tsdn_fetch
();
witness_assert_lockless
(
tsdn
);
witness_init
(
&
a
,
"a"
,
1
,
NULL
);
witness_init
(
&
b
,
"b"
,
2
,
NULL
);
witness_lock
(
tsdn
,
&
b
);
assert_false
(
saw_lock_error
,
"Unexpected witness lock error"
);
witness_lock
(
tsdn
,
&
a
);
assert_true
(
saw_lock_error
,
"Expected witness lock error"
);
witness_unlock
(
tsdn
,
&
a
);
witness_unlock
(
tsdn
,
&
b
);
witness_assert_lockless
(
tsdn
);
witness_lock_error
=
witness_lock_error_orig
;
}
TEST_END
TEST_BEGIN
(
test_witness_recursive
)
{
witness_t
a
;
tsdn_t
*
tsdn
;
test_skip_if
(
!
config_debug
);
witness_not_owner_error_orig
=
witness_not_owner_error
;
witness_not_owner_error
=
witness_not_owner_error_intercept
;
saw_not_owner_error
=
false
;
witness_lock_error_orig
=
witness_lock_error
;
witness_lock_error
=
witness_lock_error_intercept
;
saw_lock_error
=
false
;
tsdn
=
tsdn_fetch
();
witness_assert_lockless
(
tsdn
);
witness_init
(
&
a
,
"a"
,
1
,
NULL
);
witness_lock
(
tsdn
,
&
a
);
assert_false
(
saw_lock_error
,
"Unexpected witness lock error"
);
assert_false
(
saw_not_owner_error
,
"Unexpected witness not owner error"
);
witness_lock
(
tsdn
,
&
a
);
assert_true
(
saw_lock_error
,
"Expected witness lock error"
);
assert_true
(
saw_not_owner_error
,
"Expected witness not owner error"
);
witness_unlock
(
tsdn
,
&
a
);
witness_assert_lockless
(
tsdn
);
witness_owner_error
=
witness_owner_error_orig
;
witness_lock_error
=
witness_lock_error_orig
;
}
TEST_END
TEST_BEGIN
(
test_witness_unlock_not_owned
)
{
witness_t
a
;
tsdn_t
*
tsdn
;
test_skip_if
(
!
config_debug
);
witness_owner_error_orig
=
witness_owner_error
;
witness_owner_error
=
witness_owner_error_intercept
;
saw_owner_error
=
false
;
tsdn
=
tsdn_fetch
();
witness_assert_lockless
(
tsdn
);
witness_init
(
&
a
,
"a"
,
1
,
NULL
);
assert_false
(
saw_owner_error
,
"Unexpected owner error"
);
witness_unlock
(
tsdn
,
&
a
);
assert_true
(
saw_owner_error
,
"Expected owner error"
);
witness_assert_lockless
(
tsdn
);
witness_owner_error
=
witness_owner_error_orig
;
}
TEST_END
TEST_BEGIN
(
test_witness_lockful
)
{
witness_t
a
;
tsdn_t
*
tsdn
;
test_skip_if
(
!
config_debug
);
witness_lockless_error_orig
=
witness_lockless_error
;
witness_lockless_error
=
witness_lockless_error_intercept
;
saw_lockless_error
=
false
;
tsdn
=
tsdn_fetch
();
witness_assert_lockless
(
tsdn
);
witness_init
(
&
a
,
"a"
,
1
,
NULL
);
assert_false
(
saw_lockless_error
,
"Unexpected lockless error"
);
witness_assert_lockless
(
tsdn
);
witness_lock
(
tsdn
,
&
a
);
witness_assert_lockless
(
tsdn
);
assert_true
(
saw_lockless_error
,
"Expected lockless error"
);
witness_unlock
(
tsdn
,
&
a
);
witness_assert_lockless
(
tsdn
);
witness_lockless_error
=
witness_lockless_error_orig
;
}
TEST_END
int
main
(
void
)
{
return
(
test
(
test_witness
,
test_witness_comp
,
test_witness_reversal
,
test_witness_recursive
,
test_witness_unlock_not_owned
,
test_witness_lockful
));
}
deps/jemalloc/test/unit/zero.c
View file @
71a8df6a
...
@@ -8,39 +8,41 @@ const char *malloc_conf =
...
@@ -8,39 +8,41 @@ const char *malloc_conf =
static
void
static
void
test_zero
(
size_t
sz_min
,
size_t
sz_max
)
test_zero
(
size_t
sz_min
,
size_t
sz_max
)
{
{
char
*
s
;
uint8_t
*
s
;
size_t
sz_prev
,
sz
,
i
;
size_t
sz_prev
,
sz
,
i
;
#define MAGIC ((uint8_t)0x61)
sz_prev
=
0
;
sz_prev
=
0
;
s
=
(
char
*
)
mallocx
(
sz_min
,
0
);
s
=
(
uint8_t
*
)
mallocx
(
sz_min
,
0
);
assert_ptr_not_null
((
void
*
)
s
,
"Unexpected mallocx() failure"
);
assert_ptr_not_null
((
void
*
)
s
,
"Unexpected mallocx() failure"
);
for
(
sz
=
sallocx
(
s
,
0
);
sz
<=
sz_max
;
for
(
sz
=
sallocx
(
s
,
0
);
sz
<=
sz_max
;
sz_prev
=
sz
,
sz
=
sallocx
(
s
,
0
))
{
sz_prev
=
sz
,
sz
=
sallocx
(
s
,
0
))
{
if
(
sz_prev
>
0
)
{
if
(
sz_prev
>
0
)
{
assert_
c
_eq
(
s
[
0
],
'a'
,
assert_
u
_eq
(
s
[
0
],
MAGIC
,
"Previously allocated byte %zu/%zu is corrupted"
,
"Previously allocated byte %zu/%zu is corrupted"
,
ZU
(
0
),
sz_prev
);
ZU
(
0
),
sz_prev
);
assert_
c
_eq
(
s
[
sz_prev
-
1
],
'a'
,
assert_
u
_eq
(
s
[
sz_prev
-
1
],
MAGIC
,
"Previously allocated byte %zu/%zu is corrupted"
,
"Previously allocated byte %zu/%zu is corrupted"
,
sz_prev
-
1
,
sz_prev
);
sz_prev
-
1
,
sz_prev
);
}
}
for
(
i
=
sz_prev
;
i
<
sz
;
i
++
)
{
for
(
i
=
sz_prev
;
i
<
sz
;
i
++
)
{
assert_
c
_eq
(
s
[
i
],
0x0
,
assert_
u
_eq
(
s
[
i
],
0x0
,
"Newly allocated byte %zu/%zu isn't zero-filled"
,
"Newly allocated byte %zu/%zu isn't zero-filled"
,
i
,
sz
);
i
,
sz
);
s
[
i
]
=
'a'
;
s
[
i
]
=
MAGIC
;
}
}
if
(
xallocx
(
s
,
sz
+
1
,
0
,
0
)
==
sz
)
{
if
(
xallocx
(
s
,
sz
+
1
,
0
,
0
)
==
sz
)
{
s
=
(
char
*
)
rallocx
(
s
,
sz
+
1
,
0
);
s
=
(
uint8_t
*
)
rallocx
(
s
,
sz
+
1
,
0
);
assert_ptr_not_null
((
void
*
)
s
,
assert_ptr_not_null
((
void
*
)
s
,
"Unexpected rallocx() failure"
);
"Unexpected rallocx() failure"
);
}
}
}
}
dallocx
(
s
,
0
);
dallocx
(
s
,
0
);
#undef MAGIC
}
}
TEST_BEGIN
(
test_zero_small
)
TEST_BEGIN
(
test_zero_small
)
...
...
src/Makefile
View file @
71a8df6a
...
@@ -14,6 +14,7 @@
...
@@ -14,6 +14,7 @@
release_hdr
:=
$(
shell
sh
-c
'./mkreleasehdr.sh'
)
release_hdr
:=
$(
shell
sh
-c
'./mkreleasehdr.sh'
)
uname_S
:=
$(
shell
sh
-c
'uname -s 2>/dev/null || echo not'
)
uname_S
:=
$(
shell
sh
-c
'uname -s 2>/dev/null || echo not'
)
uname_M
:=
$(
shell
sh
-c
'uname -m 2>/dev/null || echo not'
)
OPTIMIZATION
?=
-O2
OPTIMIZATION
?=
-O2
DEPENDENCY_TARGETS
=
hiredis linenoise lua
DEPENDENCY_TARGETS
=
hiredis linenoise lua
NODEPS
:=
clean distclean
NODEPS
:=
clean distclean
...
@@ -27,11 +28,14 @@ PREFIX?=/usr/local
...
@@ -27,11 +28,14 @@ PREFIX?=/usr/local
INSTALL_BIN
=
$(PREFIX)
/bin
INSTALL_BIN
=
$(PREFIX)
/bin
INSTALL
=
install
INSTALL
=
install
# Default allocator
# Default allocator defaults to Jemalloc if it's not an ARM
MALLOC
=
libc
ifneq
($(uname_M),armv6l)
ifneq
($(uname_M),armv7l)
ifeq
($(uname_S),Linux)
ifeq
($(uname_S),Linux)
MALLOC
=
jemalloc
MALLOC
=
jemalloc
e
lse
e
ndif
MALLOC
=
libc
endif
endif
endif
# Backwards compatibility for selecting an allocator
# Backwards compatibility for selecting an allocator
...
@@ -61,6 +65,13 @@ DEBUG=-g -ggdb
...
@@ -61,6 +65,13 @@ DEBUG=-g -ggdb
ifeq
($(uname_S),SunOS)
ifeq
($(uname_S),SunOS)
# SunOS
# SunOS
ifneq
($(@@),32bit)
CFLAGS
+=
-m64
LDFLAGS
+=
-m64
endif
DEBUG
=
-g
DEBUG_FLAGS
=
-g
export
CFLAGS
LDFLAGS
DEBUG
DEBUG_FLAGS
INSTALL
=
cp
-pf
INSTALL
=
cp
-pf
FINAL_CFLAGS
+=
-D__EXTENSIONS__
-D_XPG6
FINAL_CFLAGS
+=
-D__EXTENSIONS__
-D_XPG6
FINAL_LIBS
+=
-ldl
-lnsl
-lsocket
-lresolv
-lpthread
-lrt
FINAL_LIBS
+=
-ldl
-lnsl
-lsocket
-lresolv
-lpthread
-lrt
...
@@ -128,7 +139,7 @@ endif
...
@@ -128,7 +139,7 @@ endif
REDIS_SERVER_NAME
=
redis-server
REDIS_SERVER_NAME
=
redis-server
REDIS_SENTINEL_NAME
=
redis-sentinel
REDIS_SENTINEL_NAME
=
redis-sentinel
REDIS_SERVER_OBJ
=
adlist.o quicklist.o ae.o anet.o dict.o server.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o crc16.o endianconv.o slowlog.o scripting.o bio.o rio.o rand.o memtest.o crc64.o bitops.o sentinel.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o redis-check-rdb.o geo.o lazyfree.o module.o evict.o expire.o geohash.o geohash_helper.o childinfo.o defrag.o
REDIS_SERVER_OBJ
=
adlist.o quicklist.o ae.o anet.o dict.o server.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o crc16.o endianconv.o slowlog.o scripting.o bio.o rio.o rand.o memtest.o crc64.o bitops.o sentinel.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o redis-check-rdb.o geo.o lazyfree.o module.o evict.o expire.o geohash.o geohash_helper.o childinfo.o defrag.o
siphash.o
REDIS_CLI_NAME
=
redis-cli
REDIS_CLI_NAME
=
redis-cli
REDIS_CLI_OBJ
=
anet.o adlist.o redis-cli.o zmalloc.o release.o anet.o ae.o crc64.o
REDIS_CLI_OBJ
=
anet.o adlist.o redis-cli.o zmalloc.o release.o anet.o ae.o crc64.o
REDIS_BENCHMARK_NAME
=
redis-benchmark
REDIS_BENCHMARK_NAME
=
redis-benchmark
...
@@ -204,7 +215,7 @@ $(REDIS_CHECK_AOF_NAME): $(REDIS_CHECK_AOF_OBJ)
...
@@ -204,7 +215,7 @@ $(REDIS_CHECK_AOF_NAME): $(REDIS_CHECK_AOF_OBJ)
$(REDIS_LD)
-o
$@
$^
$(FINAL_LIBS)
$(REDIS_LD)
-o
$@
$^
$(FINAL_LIBS)
dict-benchmark
:
dict.c zmalloc.c sds.c
dict-benchmark
:
dict.c zmalloc.c sds.c
$(REDIS_CC)
$(FINAL_CFLAGS)
dict.c zmalloc.c sds.c
-D
DICT_BENCHMARK_MAIN
-o
dict-benchmark
$(REDIS_CC)
$(FINAL_CFLAGS)
dict.c zmalloc.c sds.c
siphash.c
-D
DICT_BENCHMARK_MAIN
-o
dict-benchmark
# Because the jemalloc.h header is generated as a part of the jemalloc build,
# Because the jemalloc.h header is generated as a part of the jemalloc build,
# building it should complete before building any other object. Instead of
# building it should complete before building any other object. Instead of
...
...
src/aof.c
View file @
71a8df6a
...
@@ -1357,6 +1357,7 @@ int rewriteAppendOnlyFileBackground(void) {
...
@@ -1357,6 +1357,7 @@ int rewriteAppendOnlyFileBackground(void) {
serverLog
(
LL_WARNING
,
serverLog
(
LL_WARNING
,
"Can't rewrite append only file in background: fork: %s"
,
"Can't rewrite append only file in background: fork: %s"
,
strerror
(
errno
));
strerror
(
errno
));
aofClosePipes
();
return
C_ERR
;
return
C_ERR
;
}
}
serverLog
(
LL_NOTICE
,
serverLog
(
LL_NOTICE
,
...
...
src/atomicvar.h
View file @
71a8df6a
...
@@ -51,7 +51,7 @@
...
@@ -51,7 +51,7 @@
#ifndef __ATOMIC_VAR_H
#ifndef __ATOMIC_VAR_H
#define __ATOMIC_VAR_H
#define __ATOMIC_VAR_H
#if defined(__ATOMIC_RELAXED) && (!defined(__clang__) || !defined(__APPLE__) || __apple_build_version__ > 4210057)
#if defined(__ATOMIC_RELAXED) &&
!defined(__sun) &&
(!defined(__clang__) || !defined(__APPLE__) || __apple_build_version__ > 4210057)
/* Implementation using __atomic macros. */
/* Implementation using __atomic macros. */
#define atomicIncr(var,count,mutex) __atomic_add_fetch(&var,(count),__ATOMIC_RELAXED)
#define atomicIncr(var,count,mutex) __atomic_add_fetch(&var,(count),__ATOMIC_RELAXED)
...
...
src/bitops.c
View file @
71a8df6a
...
@@ -104,6 +104,7 @@ long redisBitpos(void *s, unsigned long count, int bit) {
...
@@ -104,6 +104,7 @@ long redisBitpos(void *s, unsigned long count, int bit) {
unsigned
long
skipval
,
word
=
0
,
one
;
unsigned
long
skipval
,
word
=
0
,
one
;
long
pos
=
0
;
/* Position of bit, to return to the caller. */
long
pos
=
0
;
/* Position of bit, to return to the caller. */
unsigned
long
j
;
unsigned
long
j
;
int
found
;
/* Process whole words first, seeking for first word that is not
/* Process whole words first, seeking for first word that is not
* all ones or all zeros respectively if we are lookig for zeros
* all ones or all zeros respectively if we are lookig for zeros
...
@@ -117,21 +118,27 @@ long redisBitpos(void *s, unsigned long count, int bit) {
...
@@ -117,21 +118,27 @@ long redisBitpos(void *s, unsigned long count, int bit) {
/* Skip initial bits not aligned to sizeof(unsigned long) byte by byte. */
/* Skip initial bits not aligned to sizeof(unsigned long) byte by byte. */
skipval
=
bit
?
0
:
UCHAR_MAX
;
skipval
=
bit
?
0
:
UCHAR_MAX
;
c
=
(
unsigned
char
*
)
s
;
c
=
(
unsigned
char
*
)
s
;
found
=
0
;
while
((
unsigned
long
)
c
&
(
sizeof
(
*
l
)
-
1
)
&&
count
)
{
while
((
unsigned
long
)
c
&
(
sizeof
(
*
l
)
-
1
)
&&
count
)
{
if
(
*
c
!=
skipval
)
break
;
if
(
*
c
!=
skipval
)
{
found
=
1
;
break
;
}
c
++
;
c
++
;
count
--
;
count
--
;
pos
+=
8
;
pos
+=
8
;
}
}
/* Skip bits with full word step. */
/* Skip bits with full word step. */
skipval
=
bit
?
0
:
ULONG_MAX
;
l
=
(
unsigned
long
*
)
c
;
l
=
(
unsigned
long
*
)
c
;
while
(
count
>=
sizeof
(
*
l
))
{
if
(
!
found
)
{
if
(
*
l
!=
skipval
)
break
;
skipval
=
bit
?
0
:
ULONG_MAX
;
l
++
;
while
(
count
>=
sizeof
(
*
l
))
{
count
-=
sizeof
(
*
l
);
if
(
*
l
!=
skipval
)
break
;
pos
+=
sizeof
(
*
l
)
*
8
;
l
++
;
count
-=
sizeof
(
*
l
);
pos
+=
sizeof
(
*
l
)
*
8
;
}
}
}
/* Load bytes into "word" considering the first byte as the most significant
/* Load bytes into "word" considering the first byte as the most significant
...
@@ -654,8 +661,11 @@ void bitopCommand(client *c) {
...
@@ -654,8 +661,11 @@ void bitopCommand(client *c) {
/* Fast path: as far as we have data for all the input bitmaps we
/* Fast path: as far as we have data for all the input bitmaps we
* can take a fast path that performs much better than the
* can take a fast path that performs much better than the
* vanilla algorithm. */
* vanilla algorithm. On ARM we skip the fast path since it will
* result in GCC compiling the code using multiple-words load/store
* operations that are not supported even in ARM >= v6. */
j
=
0
;
j
=
0
;
#ifndef USE_ALIGNED_ACCESS
if
(
minlen
>=
sizeof
(
unsigned
long
)
*
4
&&
numkeys
<=
16
)
{
if
(
minlen
>=
sizeof
(
unsigned
long
)
*
4
&&
numkeys
<=
16
)
{
unsigned
long
*
lp
[
16
];
unsigned
long
*
lp
[
16
];
unsigned
long
*
lres
=
(
unsigned
long
*
)
res
;
unsigned
long
*
lres
=
(
unsigned
long
*
)
res
;
...
@@ -716,6 +726,7 @@ void bitopCommand(client *c) {
...
@@ -716,6 +726,7 @@ void bitopCommand(client *c) {
}
}
}
}
}
}
#endif
/* j is set to the next byte to process by the previous loop. */
/* j is set to the next byte to process by the previous loop. */
for
(;
j
<
maxlen
;
j
++
)
{
for
(;
j
<
maxlen
;
j
++
)
{
...
...
src/cluster.c
View file @
71a8df6a
...
@@ -4756,6 +4756,7 @@ void migrateCommand(client *c) {
...
@@ -4756,6 +4756,7 @@ void migrateCommand(client *c) {
rio
cmd
,
payload
;
rio
cmd
,
payload
;
int
may_retry
=
1
;
int
may_retry
=
1
;
int
write_error
=
0
;
int
write_error
=
0
;
int
argv_rewritten
=
0
;
/* To support the KEYS option we need the following additional state. */
/* To support the KEYS option we need the following additional state. */
int
first_key
=
3
;
/* Argument index of the first key. */
int
first_key
=
3
;
/* Argument index of the first key. */
...
@@ -4939,12 +4940,20 @@ try_again:
...
@@ -4939,12 +4940,20 @@ try_again:
goto
socket_err
;
/* A retry is guaranteed because of tested conditions.*/
goto
socket_err
;
/* A retry is guaranteed because of tested conditions.*/
}
}
/* On socket errors, close the migration socket now that we still have
* the original host/port in the ARGV. Later the original command may be
* rewritten to DEL and will be too later. */
if
(
socket_error
)
migrateCloseSocket
(
c
->
argv
[
1
],
c
->
argv
[
2
]);
if
(
!
copy
)
{
if
(
!
copy
)
{
/* Translate MIGRATE as DEL for replication/AOF. */
/* Translate MIGRATE as DEL for replication/AOF. Note that we do
* this only for the keys for which we received an acknowledgement
* from the receiving Redis server, by using the del_idx index. */
if
(
del_idx
>
1
)
{
if
(
del_idx
>
1
)
{
newargv
[
0
]
=
createStringObject
(
"DEL"
,
3
);
newargv
[
0
]
=
createStringObject
(
"DEL"
,
3
);
/* Note that the following call takes ownership of newargv. */
/* Note that the following call takes ownership of newargv. */
replaceClientCommandVector
(
c
,
del_idx
,
newargv
);
replaceClientCommandVector
(
c
,
del_idx
,
newargv
);
argv_rewritten
=
1
;
}
else
{
}
else
{
/* No key transfer acknowledged, no need to rewrite as DEL. */
/* No key transfer acknowledged, no need to rewrite as DEL. */
zfree
(
newargv
);
zfree
(
newargv
);
...
@@ -4953,8 +4962,8 @@ try_again:
...
@@ -4953,8 +4962,8 @@ try_again:
}
}
/* If we are here and a socket error happened, we don't want to retry.
/* If we are here and a socket error happened, we don't want to retry.
* Just signal the problem to the client, but only do it if we d
on'
t
* Just signal the problem to the client, but only do it if we d
id no
t
* already queue
d
a different error reported by the destination server. */
* already queue a different error reported by the destination server. */
if
(
!
error_from_target
&&
socket_error
)
{
if
(
!
error_from_target
&&
socket_error
)
{
may_retry
=
0
;
may_retry
=
0
;
goto
socket_err
;
goto
socket_err
;
...
@@ -4962,7 +4971,11 @@ try_again:
...
@@ -4962,7 +4971,11 @@ try_again:
if
(
!
error_from_target
)
{
if
(
!
error_from_target
)
{
/* Success! Update the last_dbid in migrateCachedSocket, so that we can
/* Success! Update the last_dbid in migrateCachedSocket, so that we can
* avoid SELECT the next time if the target DB is the same. Reply +OK. */
* avoid SELECT the next time if the target DB is the same. Reply +OK.
*
* Note: If we reached this point, even if socket_error is true
* still the SELECT command succeeded (otherwise the code jumps to
* socket_err label. */
cs
->
last_dbid
=
dbid
;
cs
->
last_dbid
=
dbid
;
addReply
(
c
,
shared
.
ok
);
addReply
(
c
,
shared
.
ok
);
}
else
{
}
else
{
...
@@ -4972,7 +4985,6 @@ try_again:
...
@@ -4972,7 +4985,6 @@ try_again:
sdsfree
(
cmd
.
io
.
buffer
.
ptr
);
sdsfree
(
cmd
.
io
.
buffer
.
ptr
);
zfree
(
ov
);
zfree
(
kv
);
zfree
(
newargv
);
zfree
(
ov
);
zfree
(
kv
);
zfree
(
newargv
);
if
(
socket_error
)
migrateCloseSocket
(
c
->
argv
[
1
],
c
->
argv
[
2
]);
return
;
return
;
/* On socket errors we try to close the cached socket and try again.
/* On socket errors we try to close the cached socket and try again.
...
@@ -4982,7 +4994,12 @@ socket_err:
...
@@ -4982,7 +4994,12 @@ socket_err:
/* Cleanup we want to perform in both the retry and no retry case.
/* Cleanup we want to perform in both the retry and no retry case.
* Note: Closing the migrate socket will also force SELECT next time. */
* Note: Closing the migrate socket will also force SELECT next time. */
sdsfree
(
cmd
.
io
.
buffer
.
ptr
);
sdsfree
(
cmd
.
io
.
buffer
.
ptr
);
migrateCloseSocket
(
c
->
argv
[
1
],
c
->
argv
[
2
]);
/* If the command was rewritten as DEL and there was a socket error,
* we already closed the socket earlier. While migrateCloseSocket()
* is idempotent, the host/port arguments are now gone, so don't do it
* again. */
if
(
!
argv_rewritten
)
migrateCloseSocket
(
c
->
argv
[
1
],
c
->
argv
[
2
]);
zfree
(
newargv
);
zfree
(
newargv
);
newargv
=
NULL
;
/* This will get reallocated on retry. */
newargv
=
NULL
;
/* This will get reallocated on retry. */
...
...
src/config.c
View file @
71a8df6a
...
@@ -1423,7 +1423,7 @@ void configGetCommand(client *c) {
...
@@ -1423,7 +1423,7 @@ void configGetCommand(client *c) {
/* We use the following dictionary type to store where a configuration
/* We use the following dictionary type to store where a configuration
* option is mentioned in the old configuration file, so it's
* option is mentioned in the old configuration file, so it's
* like "maxmemory" -> list of line numbers (first line is zero). */
* like "maxmemory" -> list of line numbers (first line is zero). */
u
nsigned
in
t
dictSdsCaseHash
(
const
void
*
key
);
u
int64_
t
dictSdsCaseHash
(
const
void
*
key
);
int
dictSdsKeyCaseCompare
(
void
*
privdata
,
const
void
*
key1
,
const
void
*
key2
);
int
dictSdsKeyCaseCompare
(
void
*
privdata
,
const
void
*
key1
,
const
void
*
key2
);
void
dictSdsDestructor
(
void
*
privdata
,
void
*
val
);
void
dictSdsDestructor
(
void
*
privdata
,
void
*
val
);
void
dictListDestructor
(
void
*
privdata
,
void
*
val
);
void
dictListDestructor
(
void
*
privdata
,
void
*
val
);
...
...
src/config.h
View file @
71a8df6a
...
@@ -206,4 +206,22 @@ void setproctitle(const char *fmt, ...);
...
@@ -206,4 +206,22 @@ void setproctitle(const char *fmt, ...);
#endif
#endif
#endif
#endif
/* Make sure we can test for ARM just checking for __arm__, since sometimes
* __arm is defined but __arm__ is not. */
#if defined(__arm) && !defined(__arm__)
#define __arm__
#endif
#if defined (__aarch64__) && !defined(__arm64__)
#define __arm64__
#endif
/* Make sure we can test for SPARC just checking for __sparc__. */
#if defined(__sparc) && !defined(__sparc__)
#define __sparc__
#endif
#if defined(__sparc__) || defined(__arm__)
#define USE_ALIGNED_ACCESS
#endif
#endif
#endif
src/debug.c
View file @
71a8df6a
...
@@ -126,7 +126,7 @@ void computeDatasetDigest(unsigned char *final) {
...
@@ -126,7 +126,7 @@ void computeDatasetDigest(unsigned char *final) {
redisDb
*
db
=
server
.
db
+
j
;
redisDb
*
db
=
server
.
db
+
j
;
if
(
dictSize
(
db
->
dict
)
==
0
)
continue
;
if
(
dictSize
(
db
->
dict
)
==
0
)
continue
;
di
=
dictGetIterator
(
db
->
dict
);
di
=
dictGet
Safe
Iterator
(
db
->
dict
);
/* hash the DB id, so the same dataset moved in a different
/* hash the DB id, so the same dataset moved in a different
* DB will lead to a different digest */
* DB will lead to a different digest */
...
@@ -266,6 +266,8 @@ void debugCommand(client *c) {
...
@@ -266,6 +266,8 @@ void debugCommand(client *c) {
blen
++
;
addReplyStatus
(
c
,
blen
++
;
addReplyStatus
(
c
,
"segfault -- Crash the server with sigsegv."
);
"segfault -- Crash the server with sigsegv."
);
blen
++
;
addReplyStatus
(
c
,
blen
++
;
addReplyStatus
(
c
,
"panic -- Crash the server simulating a panic."
);
blen
++
;
addReplyStatus
(
c
,
"restart -- Graceful restart: save config, db, restart."
);
"restart -- Graceful restart: save config, db, restart."
);
blen
++
;
addReplyStatus
(
c
,
blen
++
;
addReplyStatus
(
c
,
"crash-and-recovery <milliseconds> -- Hard crash and restart after <milliseconds> delay."
);
"crash-and-recovery <milliseconds> -- Hard crash and restart after <milliseconds> delay."
);
...
@@ -300,6 +302,8 @@ void debugCommand(client *c) {
...
@@ -300,6 +302,8 @@ void debugCommand(client *c) {
setDeferredMultiBulkLength
(
c
,
blenp
,
blen
);
setDeferredMultiBulkLength
(
c
,
blenp
,
blen
);
}
else
if
(
!
strcasecmp
(
c
->
argv
[
1
]
->
ptr
,
"segfault"
))
{
}
else
if
(
!
strcasecmp
(
c
->
argv
[
1
]
->
ptr
,
"segfault"
))
{
*
((
char
*
)
-
1
)
=
'x'
;
*
((
char
*
)
-
1
)
=
'x'
;
}
else
if
(
!
strcasecmp
(
c
->
argv
[
1
]
->
ptr
,
"panic"
))
{
serverPanic
(
"DEBUG PANIC called at Unix time %ld"
,
time
(
NULL
));
}
else
if
(
!
strcasecmp
(
c
->
argv
[
1
]
->
ptr
,
"restart"
)
||
}
else
if
(
!
strcasecmp
(
c
->
argv
[
1
]
->
ptr
,
"restart"
)
||
!
strcasecmp
(
c
->
argv
[
1
]
->
ptr
,
"crash-and-recover"
))
!
strcasecmp
(
c
->
argv
[
1
]
->
ptr
,
"crash-and-recover"
))
{
{
...
@@ -615,11 +619,17 @@ void _serverAssertWithInfo(const client *c, const robj *o, const char *estr, con
...
@@ -615,11 +619,17 @@ void _serverAssertWithInfo(const client *c, const robj *o, const char *estr, con
_serverAssert
(
estr
,
file
,
line
);
_serverAssert
(
estr
,
file
,
line
);
}
}
void
_serverPanic
(
const
char
*
msg
,
const
char
*
file
,
int
line
)
{
void
_serverPanic
(
const
char
*
file
,
int
line
,
const
char
*
msg
,
...)
{
va_list
ap
;
va_start
(
ap
,
msg
);
char
fmtmsg
[
256
];
vsnprintf
(
fmtmsg
,
sizeof
(
fmtmsg
),
msg
,
ap
);
va_end
(
ap
);
bugReportStart
();
bugReportStart
();
serverLog
(
LL_WARNING
,
"------------------------------------------------"
);
serverLog
(
LL_WARNING
,
"------------------------------------------------"
);
serverLog
(
LL_WARNING
,
"!!! Software Failure. Press left mouse button to continue"
);
serverLog
(
LL_WARNING
,
"!!! Software Failure. Press left mouse button to continue"
);
serverLog
(
LL_WARNING
,
"Guru Meditation: %s #%s:%d"
,
msg
,
file
,
line
);
serverLog
(
LL_WARNING
,
"Guru Meditation: %s #%s:%d"
,
fmt
msg
,
file
,
line
);
#ifdef HAVE_BACKTRACE
#ifdef HAVE_BACKTRACE
serverLog
(
LL_WARNING
,
"(forcing SIGSEGV in order to print the stack trace)"
);
serverLog
(
LL_WARNING
,
"(forcing SIGSEGV in order to print the stack trace)"
);
#endif
#endif
...
@@ -1019,8 +1029,6 @@ void sigsegvHandler(int sig, siginfo_t *info, void *secret) {
...
@@ -1019,8 +1029,6 @@ void sigsegvHandler(int sig, siginfo_t *info, void *secret) {
/* Log INFO and CLIENT LIST */
/* Log INFO and CLIENT LIST */
serverLogRaw
(
LL_WARNING
|
LL_RAW
,
"
\n
------ INFO OUTPUT ------
\n
"
);
serverLogRaw
(
LL_WARNING
|
LL_RAW
,
"
\n
------ INFO OUTPUT ------
\n
"
);
infostring
=
genRedisInfoString
(
"all"
);
infostring
=
genRedisInfoString
(
"all"
);
infostring
=
sdscatprintf
(
infostring
,
"hash_init_value: %u
\n
"
,
dictGetHashFunctionSeed
());
serverLogRaw
(
LL_WARNING
|
LL_RAW
,
infostring
);
serverLogRaw
(
LL_WARNING
|
LL_RAW
,
infostring
);
serverLogRaw
(
LL_WARNING
|
LL_RAW
,
"
\n
------ CLIENT LIST OUTPUT ------
\n
"
);
serverLogRaw
(
LL_WARNING
|
LL_RAW
,
"
\n
------ CLIENT LIST OUTPUT ------
\n
"
);
clients
=
getAllClientsInfoString
();
clients
=
getAllClientsInfoString
();
...
...
src/defrag.c
View file @
71a8df6a
...
@@ -406,6 +406,9 @@ int defragKey(redisDb *db, dictEntry *de) {
...
@@ -406,6 +406,9 @@ int defragKey(redisDb *db, dictEntry *de) {
}
else
{
}
else
{
serverPanic
(
"Unknown hash encoding"
);
serverPanic
(
"Unknown hash encoding"
);
}
}
}
else
if
(
ob
->
type
==
OBJ_MODULE
)
{
/* Currently defragmenting modules private data types
* is not supported. */
}
else
{
}
else
{
serverPanic
(
"Unknown object type"
);
serverPanic
(
"Unknown object type"
);
}
}
...
...
src/dict.c
View file @
71a8df6a
...
@@ -37,11 +37,11 @@
...
@@ -37,11 +37,11 @@
#include <stdio.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <string.h>
#include <stdarg.h>
#include <stdarg.h>
#include <limits.h>
#include <limits.h>
#include <sys/time.h>
#include <sys/time.h>
#include <ctype.h>
#include "dict.h"
#include "dict.h"
#include "zmalloc.h"
#include "zmalloc.h"
...
@@ -71,77 +71,28 @@ static int _dictInit(dict *ht, dictType *type, void *privDataPtr);
...
@@ -71,77 +71,28 @@ static int _dictInit(dict *ht, dictType *type, void *privDataPtr);
/* -------------------------- hash functions -------------------------------- */
/* -------------------------- hash functions -------------------------------- */
static
uint
32
_t
dict_hash_function_seed
=
5381
;
static
uint
8
_t
dict_hash_function_seed
[
16
]
;
void
dictSetHashFunctionSeed
(
uint
32
_t
seed
)
{
void
dictSetHashFunctionSeed
(
uint
8
_t
*
seed
)
{
dict_hash_function_seed
=
seed
;
memcpy
(
dict_hash_function_seed
,
seed
,
sizeof
(
dict_hash_function_seed
))
;
}
}
uint
32
_t
dictGetHashFunctionSeed
(
void
)
{
uint
8
_t
*
dictGetHashFunctionSeed
(
void
)
{
return
dict_hash_function_seed
;
return
dict_hash_function_seed
;
}
}
/* MurmurHash2, by Austin Appleby
/* The default hashing function uses SipHash implementation
* Note - This code makes a few assumptions about how your machine behaves -
* in siphash.c. */
* 1. We can read a 4-byte value from any address without crashing
* 2. sizeof(int) == 4
*
* And it has a few limitations -
*
* 1. It will not work incrementally.
* 2. It will not produce the same results on little-endian and big-endian
* machines.
*/
unsigned
int
dictGenHashFunction
(
const
void
*
key
,
int
len
)
{
/* 'm' and 'r' are mixing constants generated offline.
They're not really 'magic', they just happen to work well. */
uint32_t
seed
=
dict_hash_function_seed
;
const
uint32_t
m
=
0x5bd1e995
;
const
int
r
=
24
;
/* Initialize the hash to a 'random' value */
uint32_t
h
=
seed
^
len
;
/* Mix 4 bytes at a time into the hash */
const
unsigned
char
*
data
=
(
const
unsigned
char
*
)
key
;
while
(
len
>=
4
)
{
uint32_t
k
=
*
(
uint32_t
*
)
data
;
k
*=
m
;
k
^=
k
>>
r
;
k
*=
m
;
h
*=
m
;
uint64_t
siphash
(
const
uint8_t
*
in
,
const
size_t
inlen
,
const
uint8_t
*
k
)
;
h
^=
k
;
uint64_t
siphash_nocase
(
const
uint8_t
*
in
,
const
size_t
inlen
,
const
uint8_t
*
k
)
;
data
+=
4
;
uint64_t
dictGenHashFunction
(
const
void
*
key
,
int
len
)
{
len
-=
4
;
return
siphash
(
key
,
len
,
dict_hash_function_seed
);
}
/* Handle the last few bytes of the input array */
switch
(
len
)
{
case
3
:
h
^=
data
[
2
]
<<
16
;
case
2
:
h
^=
data
[
1
]
<<
8
;
case
1
:
h
^=
data
[
0
];
h
*=
m
;
};
/* Do a few final mixes of the hash to ensure the last few
* bytes are well-incorporated. */
h
^=
h
>>
13
;
h
*=
m
;
h
^=
h
>>
15
;
return
(
unsigned
int
)
h
;
}
}
/* And a case insensitive hash function (based on djb hash) */
uint64_t
dictGenCaseHashFunction
(
const
unsigned
char
*
buf
,
int
len
)
{
unsigned
int
dictGenCaseHashFunction
(
const
unsigned
char
*
buf
,
int
len
)
{
return
siphash_nocase
(
buf
,
len
,
dict_hash_function_seed
);
unsigned
int
hash
=
(
unsigned
int
)
dict_hash_function_seed
;
while
(
len
--
)
hash
=
((
hash
<<
5
)
+
hash
)
+
(
tolower
(
*
buf
++
));
/* hash * 33 + c */
return
hash
;
}
}
/* ----------------------------- API implementation ------------------------- */
/* ----------------------------- API implementation ------------------------- */
...
@@ -1158,7 +1109,7 @@ void dictGetStats(char *buf, size_t bufsize, dict *d) {
...
@@ -1158,7 +1109,7 @@ void dictGetStats(char *buf, size_t bufsize, dict *d) {
#include "sds.h"
#include "sds.h"
u
nsigned
in
t
hashCallback
(
const
void
*
key
)
{
u
int64_
t
hashCallback
(
const
void
*
key
)
{
return
dictGenHashFunction
((
unsigned
char
*
)
key
,
sdslen
((
char
*
)
key
));
return
dictGenHashFunction
((
unsigned
char
*
)
key
,
sdslen
((
char
*
)
key
));
}
}
...
...
Prev
1
…
4
5
6
7
8
9
10
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment