Unverified Commit 5cbc73c6 authored by jared's avatar jared Committed by GitHub
Browse files

Merge pull request #1 from antirez/unstable

merge redis
parents a16aa03a d5536e04
...@@ -121,7 +121,7 @@ start_server {tags {"expire"}} { ...@@ -121,7 +121,7 @@ start_server {tags {"expire"}} {
list $a $b list $a $b
} {somevalue {}} } {somevalue {}}
test {TTL returns tiem to live in seconds} { test {TTL returns time to live in seconds} {
r del x r del x
r setex x 10 somevalue r setex x 10 somevalue
set ttl [r ttl x] set ttl [r ttl x]
......
...@@ -61,6 +61,7 @@ set regression_vectors { ...@@ -61,6 +61,7 @@ set regression_vectors {
{939895 151 59.149620271823181 65.204186651485145} {939895 151 59.149620271823181 65.204186651485145}
{1412 156 149.29737817929004 15.95807862745508} {1412 156 149.29737817929004 15.95807862745508}
{564862 149 84.062063109158544 -65.685403922426232} {564862 149 84.062063109158544 -65.685403922426232}
{1546032440391 16751 -1.8175081637769495 20.665668878082954}
} }
set rv_idx 0 set rv_idx 0
...@@ -274,8 +275,19 @@ start_server {tags {"geo"}} { ...@@ -274,8 +275,19 @@ start_server {tags {"geo"}} {
foreach place $diff { foreach place $diff {
set mydist [geo_distance $lon $lat $search_lon $search_lat] set mydist [geo_distance $lon $lat $search_lon $search_lat]
set mydist [expr $mydist/1000] set mydist [expr $mydist/1000]
if {($mydist / $radius_km) > 0.999} {incr rounding_errors} if {($mydist / $radius_km) > 0.999} {
incr rounding_errors
continue
}
if {$mydist < $radius_m} {
# This is a false positive for redis since given the
# same points the higher precision calculation provided
# by TCL shows the point within range
incr rounding_errors
continue
}
} }
# Make sure this is a real error and not a rounidng issue. # Make sure this is a real error and not a rounidng issue.
if {[llength $diff] == $rounding_errors} { if {[llength $diff] == $rounding_errors} {
set res $res2; # Error silenced set res $res2; # Error silenced
......
...@@ -115,6 +115,34 @@ start_server {tags {"hll"}} { ...@@ -115,6 +115,34 @@ start_server {tags {"hll"}} {
set e set e
} {*WRONGTYPE*} } {*WRONGTYPE*}
test {Fuzzing dense/sparse encoding: Redis should always detect errors} {
for {set j 0} {$j < 1000} {incr j} {
r del hll
set items {}
set numitems [randomInt 3000]
for {set i 0} {$i < $numitems} {incr i} {
lappend items [expr {rand()}]
}
r pfadd hll {*}$items
# Corrupt it in some random way.
for {set i 0} {$i < 5} {incr i} {
set len [r strlen hll]
set pos [randomInt $len]
set byte [randstring 1 1 binary]
r setrange hll $pos $byte
# Don't modify more bytes 50% of times
if {rand() < 0.5} break
}
# Use the hyperloglog to check if it crashes
# Redis in some way.
catch {
r pfcount hll
}
}
}
test {PFADD, PFCOUNT, PFMERGE type checking works} { test {PFADD, PFCOUNT, PFMERGE type checking works} {
r set foo bar r set foo bar
catch {r pfadd foo 1} e catch {r pfadd foo 1} e
......
proc cmdstat {cmd} {
if {[regexp "\r\ncmdstat_$cmd:(.*?)\r\n" [r info commandstats] _ value]} {
set _ $value
}
}
start_server {tags {"introspection"}} { start_server {tags {"introspection"}} {
test {TTL and TYPYE do not alter the last access time of a key} { test {TTL and TYPYE do not alter the last access time of a key} {
r set foo bar r set foo bar
...@@ -20,4 +26,55 @@ start_server {tags {"introspection"}} { ...@@ -20,4 +26,55 @@ start_server {tags {"introspection"}} {
r set key2 2 r set key2 2
r touch key0 key1 key2 key3 r touch key0 key1 key2 key3
} 2 } 2
test {command stats for GEOADD} {
r config resetstat
r GEOADD foo 0 0 bar
assert_match {*calls=1,*} [cmdstat geoadd]
assert_match {} [cmdstat zadd]
}
test {command stats for EXPIRE} {
r config resetstat
r SET foo bar
r EXPIRE foo 0
assert_match {*calls=1,*} [cmdstat expire]
assert_match {} [cmdstat del]
}
test {command stats for BRPOP} {
r config resetstat
r LPUSH list foo
r BRPOP list 0
assert_match {*calls=1,*} [cmdstat brpop]
assert_match {} [cmdstat rpop]
}
test {command stats for MULTI} {
r config resetstat
r MULTI
r set foo bar
r GEOADD foo2 0 0 bar
r EXPIRE foo2 0
r EXEC
assert_match {*calls=1,*} [cmdstat multi]
assert_match {*calls=1,*} [cmdstat exec]
assert_match {*calls=1,*} [cmdstat set]
assert_match {*calls=1,*} [cmdstat expire]
assert_match {*calls=1,*} [cmdstat geoadd]
}
test {command stats for scripts} {
r config resetstat
r set mykey myval
r eval {
redis.call('set', KEYS[1], 0)
redis.call('expire', KEYS[1], 0)
redis.call('geoadd', KEYS[1], 0, 0, "bar")
} 1 mykey
assert_match {*calls=1,*} [cmdstat eval]
assert_match {*calls=2,*} [cmdstat set]
assert_match {*calls=1,*} [cmdstat expire]
assert_match {*calls=1,*} [cmdstat geoadd]
}
} }
start_server {tags {"introspection"}} { start_server {tags {"introspection"}} {
test {CLIENT LIST} { test {CLIENT LIST} {
r client list r client list
} {*addr=*:* fd=* age=* idle=* flags=N db=9 sub=0 psub=0 multi=-1 qbuf=0 qbuf-free=* obl=0 oll=0 omem=0 events=r cmd=client*} } {*addr=*:* fd=* age=* idle=* flags=N db=9 sub=0 psub=0 multi=-1 qbuf=26 qbuf-free=* obl=0 oll=0 omem=0 events=r cmd=client*}
test {MONITOR can log executed commands} { test {MONITOR can log executed commands} {
set rd [redis_deferring_client] set rd [redis_deferring_client]
......
...@@ -142,3 +142,102 @@ start_server {tags {"maxmemory"}} { ...@@ -142,3 +142,102 @@ start_server {tags {"maxmemory"}} {
} }
} }
} }
proc test_slave_buffers {test_name cmd_count payload_len limit_memory pipeline} {
start_server {tags {"maxmemory"}} {
start_server {} {
set slave_pid [s process_id]
test "$test_name" {
set slave [srv 0 client]
set slave_host [srv 0 host]
set slave_port [srv 0 port]
set master [srv -1 client]
set master_host [srv -1 host]
set master_port [srv -1 port]
# add 100 keys of 100k (10MB total)
for {set j 0} {$j < 100} {incr j} {
$master setrange "key:$j" 100000 asdf
}
# make sure master doesn't disconnect slave because of timeout
$master config set repl-timeout 1200 ;# 20 minutes (for valgrind and slow machines)
$master config set maxmemory-policy allkeys-random
$master config set client-output-buffer-limit "replica 100000000 100000000 300"
$master config set repl-backlog-size [expr {10*1024}]
$slave slaveof $master_host $master_port
wait_for_condition 50 100 {
[s 0 master_link_status] eq {up}
} else {
fail "Replication not started."
}
# measure used memory after the slave connected and set maxmemory
set orig_used [s -1 used_memory]
set orig_client_buf [s -1 mem_clients_normal]
set orig_mem_not_counted_for_evict [s -1 mem_not_counted_for_evict]
set orig_used_no_repl [expr {$orig_used - $orig_mem_not_counted_for_evict}]
set limit [expr {$orig_used - $orig_mem_not_counted_for_evict + 20*1024}]
if {$limit_memory==1} {
$master config set maxmemory $limit
}
# put the slave to sleep
set rd_slave [redis_deferring_client]
exec kill -SIGSTOP $slave_pid
# send some 10mb worth of commands that don't increase the memory usage
if {$pipeline == 1} {
set rd_master [redis_deferring_client -1]
for {set k 0} {$k < $cmd_count} {incr k} {
$rd_master setrange key:0 0 [string repeat A $payload_len]
}
for {set k 0} {$k < $cmd_count} {incr k} {
#$rd_master read
}
} else {
for {set k 0} {$k < $cmd_count} {incr k} {
$master setrange key:0 0 [string repeat A $payload_len]
}
}
set new_used [s -1 used_memory]
set slave_buf [s -1 mem_clients_slaves]
set client_buf [s -1 mem_clients_normal]
set mem_not_counted_for_evict [s -1 mem_not_counted_for_evict]
set used_no_repl [expr {$new_used - $mem_not_counted_for_evict}]
set delta [expr {($used_no_repl - $client_buf) - ($orig_used_no_repl - $orig_client_buf)}]
assert {[$master dbsize] == 100}
assert {$slave_buf > 2*1024*1024} ;# some of the data may have been pushed to the OS buffers
set delta_max [expr {$cmd_count / 2}] ;# 1 byte unaccounted for, with 1M commands will consume some 1MB
assert {$delta < $delta_max && $delta > -$delta_max}
$master client kill type slave
set killed_used [s -1 used_memory]
set killed_slave_buf [s -1 mem_clients_slaves]
set killed_mem_not_counted_for_evict [s -1 mem_not_counted_for_evict]
set killed_used_no_repl [expr {$killed_used - $killed_mem_not_counted_for_evict}]
set delta_no_repl [expr {$killed_used_no_repl - $used_no_repl}]
assert {$killed_slave_buf == 0}
assert {$delta_no_repl > -$delta_max && $delta_no_repl < $delta_max}
}
# unfreeze slave process (after the 'test' succeeded or failed, but before we attempt to terminate the server
exec kill -SIGCONT $slave_pid
}
}
}
# test that slave buffer are counted correctly
# we wanna use many small commands, and we don't wanna wait long
# so we need to use a pipeline (redis_deferring_client)
# that may cause query buffer to fill and induce eviction, so we disable it
test_slave_buffers {slave buffer are counted correctly} 1000000 10 0 1
# test that slave buffer don't induce eviction
# test again with fewer (and bigger) commands without pipeline, but with eviction
test_slave_buffers "replica buffer don't induce eviction" 100000 100 1 0
...@@ -41,7 +41,7 @@ start_server {tags {"defrag"}} { ...@@ -41,7 +41,7 @@ start_server {tags {"defrag"}} {
test "Active defrag" { test "Active defrag" {
r config set activedefrag no r config set activedefrag no
r config set active-defrag-threshold-lower 5 r config set active-defrag-threshold-lower 5
r config set active-defrag-cycle-min 25 r config set active-defrag-cycle-min 65
r config set active-defrag-cycle-max 75 r config set active-defrag-cycle-max 75
r config set active-defrag-ignore-bytes 2mb r config set active-defrag-ignore-bytes 2mb
r config set maxmemory 100mb r config set maxmemory 100mb
...@@ -66,9 +66,10 @@ start_server {tags {"defrag"}} { ...@@ -66,9 +66,10 @@ start_server {tags {"defrag"}} {
} }
# Wait for the active defrag to stop working. # Wait for the active defrag to stop working.
wait_for_condition 100 100 { wait_for_condition 150 100 {
[s active_defrag_running] eq 0 [s active_defrag_running] eq 0
} else { } else {
after 120 ;# serverCron only updates the info once in 100ms
puts [r info memory] puts [r info memory]
puts [r memory malloc-stats] puts [r memory malloc-stats]
fail "defrag didn't stop." fail "defrag didn't stop."
...@@ -89,6 +90,7 @@ start_server {tags {"defrag"}} { ...@@ -89,6 +90,7 @@ start_server {tags {"defrag"}} {
test "Active defrag big keys" { test "Active defrag big keys" {
r flushdb r flushdb
r config resetstat r config resetstat
r config set save "" ;# prevent bgsave from interfereing with save below
r config set activedefrag no r config set activedefrag no
r config set active-defrag-max-scan-fields 1000 r config set active-defrag-max-scan-fields 1000
r config set active-defrag-threshold-lower 5 r config set active-defrag-threshold-lower 5
...@@ -97,10 +99,15 @@ start_server {tags {"defrag"}} { ...@@ -97,10 +99,15 @@ start_server {tags {"defrag"}} {
r config set active-defrag-ignore-bytes 2mb r config set active-defrag-ignore-bytes 2mb
r config set maxmemory 0 r config set maxmemory 0
r config set list-max-ziplist-size 5 ;# list of 10k items will have 2000 quicklist nodes r config set list-max-ziplist-size 5 ;# list of 10k items will have 2000 quicklist nodes
r config set stream-node-max-entries 5
r hmset hash h1 v1 h2 v2 h3 v3 r hmset hash h1 v1 h2 v2 h3 v3
r lpush list a b c d r lpush list a b c d
r zadd zset 0 a 1 b 2 c 3 d r zadd zset 0 a 1 b 2 c 3 d
r sadd set a b c d r sadd set a b c d
r xadd stream * item 1 value a
r xadd stream * item 2 value b
r xgroup create stream mygroup 0
r xreadgroup GROUP mygroup Alice COUNT 1 STREAMS stream >
# create big keys with 10k items # create big keys with 10k items
set rd [redis_deferring_client] set rd [redis_deferring_client]
...@@ -109,8 +116,9 @@ start_server {tags {"defrag"}} { ...@@ -109,8 +116,9 @@ start_server {tags {"defrag"}} {
$rd lpush biglist [concat "asdfasdfasdf" $j] $rd lpush biglist [concat "asdfasdfasdf" $j]
$rd zadd bigzset $j [concat "asdfasdfasdf" $j] $rd zadd bigzset $j [concat "asdfasdfasdf" $j]
$rd sadd bigset [concat "asdfasdfasdf" $j] $rd sadd bigset [concat "asdfasdfasdf" $j]
$rd xadd bigstream * item 1 value a
} }
for {set j 0} {$j < 40000} {incr j} { for {set j 0} {$j < 50000} {incr j} {
$rd read ; # Discard replies $rd read ; # Discard replies
} }
...@@ -134,7 +142,7 @@ start_server {tags {"defrag"}} { ...@@ -134,7 +142,7 @@ start_server {tags {"defrag"}} {
for {set j 0} {$j < 500000} {incr j} { for {set j 0} {$j < 500000} {incr j} {
$rd read ; # Discard replies $rd read ; # Discard replies
} }
assert {[r dbsize] == 500008} assert {[r dbsize] == 500010}
# create some fragmentation # create some fragmentation
for {set j 0} {$j < 500000} {incr j 2} { for {set j 0} {$j < 500000} {incr j 2} {
...@@ -143,7 +151,7 @@ start_server {tags {"defrag"}} { ...@@ -143,7 +151,7 @@ start_server {tags {"defrag"}} {
for {set j 0} {$j < 500000} {incr j 2} { for {set j 0} {$j < 500000} {incr j 2} {
$rd read ; # Discard replies $rd read ; # Discard replies
} }
assert {[r dbsize] == 250008} assert {[r dbsize] == 250010}
# start defrag # start defrag
after 120 ;# serverCron only updates the info once in 100ms after 120 ;# serverCron only updates the info once in 100ms
...@@ -155,6 +163,7 @@ start_server {tags {"defrag"}} { ...@@ -155,6 +163,7 @@ start_server {tags {"defrag"}} {
r config set latency-monitor-threshold 5 r config set latency-monitor-threshold 5
r latency reset r latency reset
set digest [r debug digest]
catch {r config set activedefrag yes} e catch {r config set activedefrag yes} e
if {![string match {DISABLED*} $e]} { if {![string match {DISABLED*} $e]} {
# wait for the active defrag to start working (decision once a second) # wait for the active defrag to start working (decision once a second)
...@@ -168,6 +177,7 @@ start_server {tags {"defrag"}} { ...@@ -168,6 +177,7 @@ start_server {tags {"defrag"}} {
wait_for_condition 500 100 { wait_for_condition 500 100 {
[s active_defrag_running] eq 0 [s active_defrag_running] eq 0
} else { } else {
after 120 ;# serverCron only updates the info once in 100ms
puts [r info memory] puts [r info memory]
puts [r memory malloc-stats] puts [r memory malloc-stats]
fail "defrag didn't stop." fail "defrag didn't stop."
...@@ -192,10 +202,12 @@ start_server {tags {"defrag"}} { ...@@ -192,10 +202,12 @@ start_server {tags {"defrag"}} {
assert {$frag < 1.1} assert {$frag < 1.1}
# due to high fragmentation, 10hz, and active-defrag-cycle-max set to 75, # due to high fragmentation, 10hz, and active-defrag-cycle-max set to 75,
# we expect max latency to be not much higher than 75ms # we expect max latency to be not much higher than 75ms
assert {$max_latency <= 80} assert {$max_latency <= 120}
} else {
set _ ""
} }
} {} # verify the data isn't corrupted or changed
set newdigest [r debug digest]
assert {$digest eq $newdigest}
r save ;# saving an rdb iterates over all the data / pointers
} {OK}
} }
} }
set testmodule [file normalize tests/modules/commandfilter.so]
start_server {tags {"modules"}} {
r module load $testmodule log-key 0
test {Command Filter handles redirected commands} {
r set mykey @log
r lrange log-key 0 -1
} "{set mykey @log}"
test {Command Filter can call RedisModule_CommandFilterArgDelete} {
r rpush mylist elem1 @delme elem2
r lrange mylist 0 -1
} {elem1 elem2}
test {Command Filter can call RedisModule_CommandFilterArgInsert} {
r del mylist
r rpush mylist elem1 @insertbefore elem2 @insertafter elem3
r lrange mylist 0 -1
} {elem1 --inserted-before-- @insertbefore elem2 @insertafter --inserted-after-- elem3}
test {Command Filter can call RedisModule_CommandFilterArgReplace} {
r del mylist
r rpush mylist elem1 @replaceme elem2
r lrange mylist 0 -1
} {elem1 --replaced-- elem2}
test {Command Filter applies on RM_Call() commands} {
r del log-key
r commandfilter.ping
r lrange log-key 0 -1
} "{ping @log}"
test {Command Filter applies on Lua redis.call()} {
r del log-key
r eval "redis.call('ping', '@log')" 0
r lrange log-key 0 -1
} "{ping @log}"
test {Command Filter applies on Lua redis.call() that calls a module} {
r del log-key
r eval "redis.call('commandfilter.ping')" 0
r lrange log-key 0 -1
} "{ping @log}"
test {Command Filter is unregistered implicitly on module unload} {
r del log-key
r module unload commandfilter
r set mykey @log
r lrange log-key 0 -1
} {}
r module load $testmodule log-key 0
test {Command Filter unregister works as expected} {
# Validate reloading succeeded
r del log-key
r set mykey @log
assert_equal "{set mykey @log}" [r lrange log-key 0 -1]
# Unregister
r commandfilter.unregister
r del log-key
r set mykey @log
r lrange log-key 0 -1
} {}
r module unload commandfilter
r module load $testmodule log-key 1
test {Command Filter REDISMODULE_CMDFILTER_NOSELF works as expected} {
r set mykey @log
assert_equal "{set mykey @log}" [r lrange log-key 0 -1]
r del log-key
r commandfilter.ping
assert_equal {} [r lrange log-key 0 -1]
r eval "redis.call('commandfilter.ping')" 0
assert_equal {} [r lrange log-key 0 -1]
}
}
set testmodule [file normalize tests/modules/testrdb.so]
proc restart_and_wait {} {
catch {
r debug restart
}
# wait for the server to come back up
set retry 50
while {$retry} {
if {[catch { r ping }]} {
after 100
} else {
break
}
incr retry -1
}
}
tags "modules" {
start_server [list overrides [list loadmodule "$testmodule"]] {
test {modules are able to persist types} {
r testrdb.set.key key1 value1
assert_equal "value1" [r testrdb.get.key key1]
r debug reload
assert_equal "value1" [r testrdb.get.key key1]
}
test {modules global are lost without aux} {
r testrdb.set.before global1
assert_equal "global1" [r testrdb.get.before]
restart_and_wait
assert_equal "" [r testrdb.get.before]
}
}
start_server [list overrides [list loadmodule "$testmodule 2"]] {
test {modules are able to persist globals before and after} {
r testrdb.set.before global1
r testrdb.set.after global2
assert_equal "global1" [r testrdb.get.before]
assert_equal "global2" [r testrdb.get.after]
restart_and_wait
assert_equal "global1" [r testrdb.get.before]
assert_equal "global2" [r testrdb.get.after]
}
}
start_server [list overrides [list loadmodule "$testmodule 1"]] {
test {modules are able to persist globals just after} {
r testrdb.set.after global2
assert_equal "global2" [r testrdb.get.after]
restart_and_wait
assert_equal "global2" [r testrdb.get.after]
}
}
# TODO: test short read handling
}
...@@ -15,7 +15,7 @@ start_server {tags {"obuf-limits"}} { ...@@ -15,7 +15,7 @@ start_server {tags {"obuf-limits"}} {
if {![regexp {omem=([0-9]+)} $c - omem]} break if {![regexp {omem=([0-9]+)} $c - omem]} break
if {$omem > 200000} break if {$omem > 200000} break
} }
assert {$omem >= 90000 && $omem < 200000} assert {$omem >= 70000 && $omem < 200000}
$rd1 close $rd1 close
} }
......
proc info_memory {r property} {
if {[regexp "\r\n$property:(.*?)\r\n" [{*}$r info memory] _ value]} {
set _ $value
}
}
proc prepare_value {size} {
set _v "c"
for {set i 1} {$i < $size} {incr i} {
append _v 0
}
return $_v
}
start_server {tags {"wait"}} {
start_server {} {
set slave [srv 0 client]
set slave_host [srv 0 host]
set slave_port [srv 0 port]
set master [srv -1 client]
set master_host [srv -1 host]
set master_port [srv -1 port]
test "pending querybuf: check size of pending_querybuf after set a big value" {
$slave slaveof $master_host $master_port
set _v [prepare_value [expr 32*1024*1024]]
$master set key $_v
after 2000
set m_usedmemory [info_memory $master used_memory]
set s_usedmemory [info_memory $slave used_memory]
if { $s_usedmemory > $m_usedmemory + 10*1024*1024 } {
fail "the used_memory of replica is much larger than master. Master:$m_usedmemory Replica:$s_usedmemory"
}
}
}}
...@@ -53,6 +53,51 @@ start_server {tags {"scan"}} { ...@@ -53,6 +53,51 @@ start_server {tags {"scan"}} {
assert_equal 100 [llength $keys] assert_equal 100 [llength $keys]
} }
test "SCAN TYPE" {
r flushdb
# populate only creates strings
r debug populate 1000
# Check non-strings are excluded
set cur 0
set keys {}
while 1 {
set res [r scan $cur type "list"]
set cur [lindex $res 0]
set k [lindex $res 1]
lappend keys {*}$k
if {$cur == 0} break
}
assert_equal 0 [llength $keys]
# Check strings are included
set cur 0
set keys {}
while 1 {
set res [r scan $cur type "string"]
set cur [lindex $res 0]
set k [lindex $res 1]
lappend keys {*}$k
if {$cur == 0} break
}
assert_equal 1000 [llength $keys]
# Check all three args work together
set cur 0
set keys {}
while 1 {
set res [r scan $cur type "string" match "key:*" count 10]
set cur [lindex $res 0]
set k [lindex $res 1]
lappend keys {*}$k
if {$cur == 0} break
}
assert_equal 1000 [llength $keys]
}
foreach enc {intset hashtable} { foreach enc {intset hashtable} {
test "SSCAN with encoding $enc" { test "SSCAN with encoding $enc" {
# Create the Set # Create the Set
......
...@@ -148,9 +148,11 @@ start_server {tags {"scripting"}} { ...@@ -148,9 +148,11 @@ start_server {tags {"scripting"}} {
test {EVAL - Scripts can't run certain commands} { test {EVAL - Scripts can't run certain commands} {
set e {} set e {}
r debug lua-always-replicate-commands 0
catch { catch {
r eval "redis.pcall('randomkey'); return redis.pcall('set','x','ciao')" 0 r eval "redis.pcall('randomkey'); return redis.pcall('set','x','ciao')" 0
} e } e
r debug lua-always-replicate-commands 1
set e set e
} {*not allowed after*} } {*not allowed after*}
...@@ -299,9 +301,12 @@ start_server {tags {"scripting"}} { ...@@ -299,9 +301,12 @@ start_server {tags {"scripting"}} {
} {b534286061d4b9e4026607613b95c06c06015ae8 loaded} } {b534286061d4b9e4026607613b95c06c06015ae8 loaded}
test "In the context of Lua the output of random commands gets ordered" { test "In the context of Lua the output of random commands gets ordered" {
r debug lua-always-replicate-commands 0
r del myset r del myset
r sadd myset a b c d e f g h i l m n o p q r s t u v z aa aaa azz r sadd myset a b c d e f g h i l m n o p q r s t u v z aa aaa azz
r eval {return redis.call('smembers',KEYS[1])} 1 myset set res [r eval {return redis.call('smembers',KEYS[1])} 1 myset]
r debug lua-always-replicate-commands 1
set res
} {a aa aaa azz b c d e f g h i l m n o p q r s t u v z} } {a aa aaa azz b c d e f g h i l m n o p q r s t u v z}
test "SORT is normally not alpha re-ordered for the scripting engine" { test "SORT is normally not alpha re-ordered for the scripting engine" {
...@@ -517,7 +522,7 @@ start_server {tags {"scripting"}} { ...@@ -517,7 +522,7 @@ start_server {tags {"scripting"}} {
# Note: keep this test at the end of this server stanza because it # Note: keep this test at the end of this server stanza because it
# kills the server. # kills the server.
test {SHUTDOWN NOSAVE can kill a timedout script anyway} { test {SHUTDOWN NOSAVE can kill a timedout script anyway} {
# The server sould be still unresponding to normal commands. # The server could be still unresponding to normal commands.
catch {r ping} e catch {r ping} e
assert_match {BUSY*} $e assert_match {BUSY*} $e
catch {r shutdown nosave} catch {r shutdown nosave}
...@@ -537,7 +542,7 @@ foreach cmdrepl {0 1} { ...@@ -537,7 +542,7 @@ foreach cmdrepl {0 1} {
r debug lua-always-replicate-commands 1 r debug lua-always-replicate-commands 1
} }
test "Before the slave connects we issue two EVAL commands $rt" { test "Before the replica connects we issue two EVAL commands $rt" {
# One with an error, but still executing a command. # One with an error, but still executing a command.
# SHA is: 67164fc43fa971f76fd1aaeeaf60c1c178d25876 # SHA is: 67164fc43fa971f76fd1aaeeaf60c1c178d25876
catch { catch {
...@@ -548,13 +553,13 @@ foreach cmdrepl {0 1} { ...@@ -548,13 +553,13 @@ foreach cmdrepl {0 1} {
r eval {return redis.call('incr',KEYS[1])} 1 x r eval {return redis.call('incr',KEYS[1])} 1 x
} {2} } {2}
test "Connect a slave to the master instance $rt" { test "Connect a replica to the master instance $rt" {
r -1 slaveof [srv 0 host] [srv 0 port] r -1 slaveof [srv 0 host] [srv 0 port]
wait_for_condition 50 100 { wait_for_condition 50 100 {
[s -1 role] eq {slave} && [s -1 role] eq {slave} &&
[string match {*master_link_status:up*} [r -1 info replication]] [string match {*master_link_status:up*} [r -1 info replication]]
} else { } else {
fail "Can't turn the instance into a slave" fail "Can't turn the instance into a replica"
} }
} }
...@@ -587,7 +592,7 @@ foreach cmdrepl {0 1} { ...@@ -587,7 +592,7 @@ foreach cmdrepl {0 1} {
wait_for_condition 50 100 { wait_for_condition 50 100 {
[r -1 lrange a 0 -1] eq [r lrange a 0 -1] [r -1 lrange a 0 -1] eq [r lrange a 0 -1]
} else { } else {
fail "Expected list 'a' in slave and master to be the same, but they are respectively '[r -1 lrange a 0 -1]' and '[r lrange a 0 -1]'" fail "Expected list 'a' in replica and master to be the same, but they are respectively '[r -1 lrange a 0 -1]' and '[r lrange a 0 -1]'"
} }
set res set res
} {a 1} } {a 1}
...@@ -622,7 +627,7 @@ foreach cmdrepl {0 1} { ...@@ -622,7 +627,7 @@ foreach cmdrepl {0 1} {
wait_for_condition 50 100 { wait_for_condition 50 100 {
[r -1 debug digest] eq [r debug digest] [r -1 debug digest] eq [r debug digest]
} else { } else {
fail "Master-Slave desync after Lua script using SELECT." fail "Master-Replica desync after Lua script using SELECT."
} }
} }
} }
...@@ -631,13 +636,13 @@ foreach cmdrepl {0 1} { ...@@ -631,13 +636,13 @@ foreach cmdrepl {0 1} {
start_server {tags {"scripting repl"}} { start_server {tags {"scripting repl"}} {
start_server {overrides {appendonly yes aof-use-rdb-preamble no}} { start_server {overrides {appendonly yes aof-use-rdb-preamble no}} {
test "Connect a slave to the master instance" { test "Connect a replica to the master instance" {
r -1 slaveof [srv 0 host] [srv 0 port] r -1 slaveof [srv 0 host] [srv 0 port]
wait_for_condition 50 100 { wait_for_condition 50 100 {
[s -1 role] eq {slave} && [s -1 role] eq {slave} &&
[string match {*master_link_status:up*} [r -1 info replication]] [string match {*master_link_status:up*} [r -1 info replication]]
} else { } else {
fail "Can't turn the instance into a slave" fail "Can't turn the instance into a replica"
} }
} }
...@@ -655,11 +660,13 @@ start_server {tags {"scripting repl"}} { ...@@ -655,11 +660,13 @@ start_server {tags {"scripting repl"}} {
} {1} } {1}
test "Redis.set_repl() must be issued after replicate_commands()" { test "Redis.set_repl() must be issued after replicate_commands()" {
r debug lua-always-replicate-commands 0
catch { catch {
r eval { r eval {
redis.set_repl(redis.REPL_ALL); redis.set_repl(redis.REPL_ALL);
} 0 } 0
} e } e
r debug lua-always-replicate-commands 1
set e set e
} {*only after turning on*} } {*only after turning on*}
...@@ -689,7 +696,7 @@ start_server {tags {"scripting repl"}} { ...@@ -689,7 +696,7 @@ start_server {tags {"scripting repl"}} {
wait_for_condition 50 100 { wait_for_condition 50 100 {
[r -1 mget a b c d] eq {1 {} {} 4} [r -1 mget a b c d] eq {1 {} {} 4}
} else { } else {
fail "Only a and c should be replicated to slave" fail "Only a and c should be replicated to replica"
} }
# Master should have everything right now # Master should have everything right now
...@@ -728,7 +735,7 @@ start_server {tags {"scripting repl"}} { ...@@ -728,7 +735,7 @@ start_server {tags {"scripting repl"}} {
wait_for_condition 50 100 { wait_for_condition 50 100 {
[r get time] eq [r -1 get time] [r get time] eq [r -1 get time]
} else { } else {
fail "Time key does not match between master and slave" fail "Time key does not match between master and replica"
} }
} }
} }
......
...@@ -78,4 +78,16 @@ start_server {tags {"slowlog"} overrides {slowlog-log-slower-than 1000000}} { ...@@ -78,4 +78,16 @@ start_server {tags {"slowlog"} overrides {slowlog-log-slower-than 1000000}} {
set e [lindex [r slowlog get] 0] set e [lindex [r slowlog get] 0]
assert_equal {lastentry_client} [lindex $e 5] assert_equal {lastentry_client} [lindex $e 5]
} }
test {SLOWLOG - can be disabled} {
r config set slowlog-max-len 1
r config set slowlog-log-slower-than 1
r slowlog reset
r debug sleep 0.2
assert_equal [r slowlog len] 1
r config set slowlog-log-slower-than -1
r slowlog reset
r debug sleep 0.2
assert_equal [r slowlog len] 0
}
} }
...@@ -436,8 +436,11 @@ start_server { ...@@ -436,8 +436,11 @@ start_server {
test "$pop: with non-integer timeout" { test "$pop: with non-integer timeout" {
set rd [redis_deferring_client] set rd [redis_deferring_client]
$rd $pop blist1 1.1 r del blist1
assert_error "ERR*not an integer*" {$rd read} $rd $pop blist1 0.1
r rpush blist1 foo
assert_equal {blist1 foo} [$rd read]
assert_equal 0 [r exists blist1]
} }
test "$pop: with zero timeout should block indefinitely" { test "$pop: with zero timeout should block indefinitely" {
......
...@@ -9,6 +9,17 @@ start_server { ...@@ -9,6 +9,17 @@ start_server {
set err set err
} {BUSYGROUP*} } {BUSYGROUP*}
test {XGROUP CREATE: automatic stream creation fails without MKSTREAM} {
r DEL mystream
catch {r XGROUP CREATE mystream mygroup $} err
set err
} {ERR*}
test {XGROUP CREATE: automatic stream creation works with MKSTREAM} {
r DEL mystream
r XGROUP CREATE mystream mygroup $ MKSTREAM
} {OK}
test {XREADGROUP will return only new elements} { test {XREADGROUP will return only new elements} {
r XADD mystream * a 1 r XADD mystream * a 1
r XADD mystream * b 2 r XADD mystream * b 2
...@@ -81,4 +92,200 @@ start_server { ...@@ -81,4 +92,200 @@ start_server {
# just ID2. # just ID2.
assert {[r XACK mystream mygroup $id1 $id2] eq 1} assert {[r XACK mystream mygroup $id1 $id2] eq 1}
} }
test {PEL NACK reassignment after XGROUP SETID event} {
r del events
r xadd events * f1 v1
r xadd events * f1 v1
r xadd events * f1 v1
r xadd events * f1 v1
r xgroup create events g1 $
r xadd events * f1 v1
set c [llength [lindex [r xreadgroup group g1 c1 streams events >] 0 1]]
assert {$c == 1}
r xgroup setid events g1 -
set c [llength [lindex [r xreadgroup group g1 c2 streams events >] 0 1]]
assert {$c == 5}
}
test {XREADGROUP will not report data on empty history. Bug #5577} {
r del events
r xadd events * a 1
r xadd events * b 2
r xadd events * c 3
r xgroup create events mygroup 0
# Current local PEL should be empty
set res [r xpending events mygroup - + 10]
assert {[llength $res] == 0}
# So XREADGROUP should read an empty history as well
set res [r xreadgroup group mygroup myconsumer count 3 streams events 0]
assert {[llength [lindex $res 0 1]] == 0}
# We should fetch all the elements in the stream asking for >
set res [r xreadgroup group mygroup myconsumer count 3 streams events >]
assert {[llength [lindex $res 0 1]] == 3}
# Now the history is populated with three not acked entries
set res [r xreadgroup group mygroup myconsumer count 3 streams events 0]
assert {[llength [lindex $res 0 1]] == 3}
}
test {XREADGROUP history reporting of deleted entries. Bug #5570} {
r del mystream
r XGROUP CREATE mystream mygroup $ MKSTREAM
r XADD mystream 1 field1 A
r XREADGROUP GROUP mygroup myconsumer STREAMS mystream >
r XADD mystream MAXLEN 1 2 field1 B
r XREADGROUP GROUP mygroup myconsumer STREAMS mystream >
# Now we have two pending entries, however one should be deleted
# and one should be ok (we should only see "B")
set res [r XREADGROUP GROUP mygroup myconsumer STREAMS mystream 0-1]
assert {[lindex $res 0 1 0] == {1-0 {}}}
assert {[lindex $res 0 1 1] == {2-0 {field1 B}}}
}
test {XCLAIM can claim PEL items from another consumer} {
# Add 3 items into the stream, and create a consumer group
r del mystream
set id1 [r XADD mystream * a 1]
set id2 [r XADD mystream * b 2]
set id3 [r XADD mystream * c 3]
r XGROUP CREATE mystream mygroup 0
# Client 1 reads item 1 from the stream without acknowledgements.
# Client 2 then claims pending item 1 from the PEL of client 1
set reply [
r XREADGROUP GROUP mygroup client1 count 1 STREAMS mystream >
]
assert {[llength [lindex $reply 0 1 0 1]] == 2}
assert {[lindex $reply 0 1 0 1] eq {a 1}}
r debug sleep 0.2
set reply [
r XCLAIM mystream mygroup client2 10 $id1
]
assert {[llength [lindex $reply 0 1]] == 2}
assert {[lindex $reply 0 1] eq {a 1}}
# Client 1 reads another 2 items from stream
r XREADGROUP GROUP mygroup client1 count 2 STREAMS mystream >
r debug sleep 0.2
# Delete item 2 from the stream. Now client 1 has PEL that contains
# only item 3. Try to use client 2 to claim the deleted item 2
# from the PEL of client 1, this should return nil
r XDEL mystream $id2
set reply [
r XCLAIM mystream mygroup client2 10 $id2
]
assert {[llength $reply] == 1}
assert_equal "" [lindex $reply 0]
# Delete item 3 from the stream. Now client 1 has PEL that is empty.
# Try to use client 2 to claim the deleted item 3 from the PEL
# of client 1, this should return nil
r debug sleep 0.2
r XDEL mystream $id3
set reply [
r XCLAIM mystream mygroup client2 10 $id3
]
assert {[llength $reply] == 1}
assert_equal "" [lindex $reply 0]
}
test {XCLAIM without JUSTID increments delivery count} {
# Add 3 items into the stream, and create a consumer group
r del mystream
set id1 [r XADD mystream * a 1]
set id2 [r XADD mystream * b 2]
set id3 [r XADD mystream * c 3]
r XGROUP CREATE mystream mygroup 0
# Client 1 reads item 1 from the stream without acknowledgements.
# Client 2 then claims pending item 1 from the PEL of client 1
set reply [
r XREADGROUP GROUP mygroup client1 count 1 STREAMS mystream >
]
assert {[llength [lindex $reply 0 1 0 1]] == 2}
assert {[lindex $reply 0 1 0 1] eq {a 1}}
r debug sleep 0.2
set reply [
r XCLAIM mystream mygroup client2 10 $id1
]
assert {[llength [lindex $reply 0 1]] == 2}
assert {[lindex $reply 0 1] eq {a 1}}
set reply [
r XPENDING mystream mygroup - + 10
]
assert {[llength [lindex $reply 0]] == 4}
assert {[lindex $reply 0 3] == 2}
# Client 3 then claims pending item 1 from the PEL of client 2 using JUSTID
r debug sleep 0.2
set reply [
r XCLAIM mystream mygroup client3 10 $id1 JUSTID
]
assert {[llength $reply] == 1}
assert {[lindex $reply 0] eq $id1}
set reply [
r XPENDING mystream mygroup - + 10
]
assert {[llength [lindex $reply 0]] == 4}
assert {[lindex $reply 0 3] == 2}
}
start_server {} {
set master [srv -1 client]
set master_host [srv -1 host]
set master_port [srv -1 port]
set slave [srv 0 client]
foreach noack {0 1} {
test "Consumer group last ID propagation to slave (NOACK=$noack)" {
$slave slaveof $master_host $master_port
wait_for_condition 50 100 {
[s 0 master_link_status] eq {up}
} else {
fail "Replication not started."
}
$master del stream
$master xadd stream * a 1
$master xadd stream * a 2
$master xadd stream * a 3
$master xgroup create stream mygroup 0
# Consume the first two items on the master
for {set j 0} {$j < 2} {incr j} {
if {$noack} {
set item [$master xreadgroup group mygroup \
myconsumer COUNT 1 NOACK STREAMS stream >]
} else {
set item [$master xreadgroup group mygroup \
myconsumer COUNT 1 STREAMS stream >]
}
set id [lindex $item 0 1 0 0]
if {$noack == 0} {
assert {[$master xack stream mygroup $id] eq "1"}
}
}
wait_for_ofs_sync $master $slave
# Turn slave into master
$slave slaveof no one
set item [$slave xreadgroup group mygroup myconsumer \
COUNT 1 STREAMS stream >]
# The consumed enty should be the third
set myentry [lindex $item 0 1 0 1]
assert {$myentry eq {a 3}}
}
}
}
} }
...@@ -234,6 +234,53 @@ start_server { ...@@ -234,6 +234,53 @@ start_server {
assert {[lindex $res 0 1 1 1] eq {field two}} assert {[lindex $res 0 1 1 1] eq {field two}}
} }
test {XDEL basic test} {
r del somestream
r xadd somestream * foo value0
set id [r xadd somestream * foo value1]
r xadd somestream * foo value2
r xdel somestream $id
assert {[r xlen somestream] == 2}
set result [r xrange somestream - +]
assert {[lindex $result 0 1 1] eq {value0}}
assert {[lindex $result 1 1 1] eq {value2}}
}
# Here the idea is to check the consistency of the stream data structure
# as we remove all the elements down to zero elements.
test {XDEL fuzz test} {
r del somestream
set ids {}
set x 0; # Length of the stream
while 1 {
lappend ids [r xadd somestream * item $x]
incr x
# Add enough elements to have a few radix tree nodes inside the stream.
if {[dict get [r xinfo stream somestream] radix-tree-keys] > 20} break
}
# Now remove all the elements till we reach an empty stream
# and after every deletion, check that the stream is sane enough
# to report the right number of elements with XRANGE: this will also
# force accessing the whole data structure to check sanity.
assert {[r xlen somestream] == $x}
# We want to remove elements in random order to really test the
# implementation in a better way.
set ids [lshuffle $ids]
foreach id $ids {
assert {[r xdel somestream $id] == 1}
incr x -1
assert {[r xlen somestream] == $x}
# The test would be too slow calling XRANGE for every iteration.
# Do it every 100 removal.
if {$x % 100 == 0} {
set res [r xrange somestream - +]
assert {[llength $res] == $x}
}
}
}
test {XRANGE fuzzing} { test {XRANGE fuzzing} {
set low_id [lindex $items 0 0] set low_id [lindex $items 0 0]
set high_id [lindex $items end 0] set high_id [lindex $items end 0]
...@@ -270,3 +317,97 @@ start_server { ...@@ -270,3 +317,97 @@ start_server {
assert_equal [r xrevrange teststream2 1234567891245 -] {{1234567891240-0 {key1 value2}} {1234567891230-0 {key1 value1}}} assert_equal [r xrevrange teststream2 1234567891245 -] {{1234567891240-0 {key1 value2}} {1234567891230-0 {key1 value1}}}
} }
} }
start_server {tags {"stream"} overrides {appendonly yes}} {
test {XADD with MAXLEN > xlen can propagate correctly} {
for {set j 0} {$j < 100} {incr j} {
r XADD mystream * xitem v
}
r XADD mystream MAXLEN 200 * xitem v
incr j
assert {[r xlen mystream] == $j}
r debug loadaof
r XADD mystream * xitem v
incr j
assert {[r xlen mystream] == $j}
}
}
start_server {tags {"stream"} overrides {appendonly yes}} {
test {XADD with ~ MAXLEN can propagate correctly} {
for {set j 0} {$j < 100} {incr j} {
r XADD mystream * xitem v
}
r XADD mystream MAXLEN ~ $j * xitem v
incr j
assert {[r xlen mystream] == $j}
r config set stream-node-max-entries 1
r debug loadaof
r XADD mystream * xitem v
incr j
assert {[r xlen mystream] == $j}
}
}
start_server {tags {"stream"} overrides {appendonly yes stream-node-max-entries 10}} {
test {XTRIM with ~ MAXLEN can propagate correctly} {
for {set j 0} {$j < 100} {incr j} {
r XADD mystream * xitem v
}
r XTRIM mystream MAXLEN ~ 85
assert {[r xlen mystream] == 89}
r config set stream-node-max-entries 1
r debug loadaof
r XADD mystream * xitem v
incr j
assert {[r xlen mystream] == 90}
}
}
start_server {tags {"xsetid"}} {
test {XADD can CREATE an empty stream} {
r XADD mystream MAXLEN 0 * a b
assert {[dict get [r xinfo stream mystream] length] == 0}
}
test {XSETID can set a specific ID} {
r XSETID mystream "200-0"
assert {[dict get [r xinfo stream mystream] last-generated-id] == "200-0"}
}
test {XSETID cannot SETID with smaller ID} {
r XADD mystream * a b
catch {r XSETID mystream "1-1"} err
r XADD mystream MAXLEN 0 * a b
set err
} {ERR*smaller*}
test {XSETID cannot SETID on non-existent key} {
catch {r XSETID stream 1-1} err
set _ $err
} {ERR no such key}
}
start_server {tags {"stream"} overrides {appendonly yes aof-use-rdb-preamble no}} {
test {Empty stream can be rewrite into AOF correctly} {
r XADD mystream MAXLEN 0 * a b
assert {[dict get [r xinfo stream mystream] length] == 0}
r bgrewriteaof
waitForBgrewriteaof r
r debug loadaof
assert {[dict get [r xinfo stream mystream] length] == 0}
}
test {Stream can be rewrite into AOF correctly after XDEL lastid} {
r XSETID mystream 0-0
r XADD mystream 1-1 a b
r XADD mystream 2-2 a b
assert {[dict get [r xinfo stream mystream] length] == 2}
r XDEL mystream 2-2
r bgrewriteaof
waitForBgrewriteaof r
r debug loadaof
assert {[dict get [r xinfo stream mystream] length] == 1}
assert {[dict get [r xinfo stream mystream] last-generated-id] == "2-2"}
}
}
...@@ -84,7 +84,7 @@ start_server {tags {"zset"}} { ...@@ -84,7 +84,7 @@ start_server {tags {"zset"}} {
set err set err
} {ERR*} } {ERR*}
test "ZADD NX with non exisitng key" { test "ZADD NX with non existing key" {
r del ztmp r del ztmp
r zadd ztmp nx 10 x 20 y 30 z r zadd ztmp nx 10 x 20 y 30 z
assert {[r zcard ztmp] == 3} assert {[r zcard ztmp] == 3}
...@@ -388,7 +388,7 @@ start_server {tags {"zset"}} { ...@@ -388,7 +388,7 @@ start_server {tags {"zset"}} {
0 omega} 0 omega}
} }
test "ZRANGEBYLEX/ZREVRANGEBYLEX/ZCOUNT basics" { test "ZRANGEBYLEX/ZREVRANGEBYLEX/ZLEXCOUNT basics" {
create_default_lex_zset create_default_lex_zset
# inclusive range # inclusive range
...@@ -416,6 +416,22 @@ start_server {tags {"zset"}} { ...@@ -416,6 +416,22 @@ start_server {tags {"zset"}} {
assert_equal {} [r zrevrangebylex zset \[elez \[elex] assert_equal {} [r zrevrangebylex zset \[elez \[elex]
assert_equal {} [r zrevrangebylex zset (hill (omega] assert_equal {} [r zrevrangebylex zset (hill (omega]
} }
test "ZLEXCOUNT advanced" {
create_default_lex_zset
assert_equal 9 [r zlexcount zset - +]
assert_equal 0 [r zlexcount zset + -]
assert_equal 0 [r zlexcount zset + \[c]
assert_equal 0 [r zlexcount zset \[c -]
assert_equal 8 [r zlexcount zset \[bar +]
assert_equal 5 [r zlexcount zset \[bar \[foo]
assert_equal 4 [r zlexcount zset \[bar (foo]
assert_equal 4 [r zlexcount zset (bar \[foo]
assert_equal 3 [r zlexcount zset (bar (foo]
assert_equal 5 [r zlexcount zset - (foo]
assert_equal 1 [r zlexcount zset (maxstring +]
}
test "ZRANGEBYSLEX with LIMIT" { test "ZRANGEBYSLEX with LIMIT" {
create_default_lex_zset create_default_lex_zset
...@@ -1185,4 +1201,30 @@ start_server {tags {"zset"}} { ...@@ -1185,4 +1201,30 @@ start_server {tags {"zset"}} {
stressers ziplist stressers ziplist
stressers skiplist stressers skiplist
} }
test {ZSET skiplist order consistency when elements are moved} {
set original_max [lindex [r config get zset-max-ziplist-entries] 1]
r config set zset-max-ziplist-entries 0
for {set times 0} {$times < 10} {incr times} {
r del zset
for {set j 0} {$j < 1000} {incr j} {
r zadd zset [randomInt 50] ele-[randomInt 10]
}
# Make sure that element ordering is correct
set prev_element {}
set prev_score -1
foreach {element score} [r zrange zset 0 -1 WITHSCORES] {
# Assert that elements are in increasing ordering
assert {
$prev_score < $score ||
($prev_score == $score &&
[string compare $prev_element $element] == -1)
}
set prev_element $element
set prev_score $score
}
}
r config set zset-max-ziplist-entries $original_max
}
} }
...@@ -21,8 +21,9 @@ int main(int argc, char **argv) { ...@@ -21,8 +21,9 @@ int main(int argc, char **argv) {
} }
srand(time(NULL)); srand(time(NULL));
char *filename = argv[1];
cycles = atoi(argv[2]); cycles = atoi(argv[2]);
fd = open("dump.rdb",O_RDWR); fd = open(filename,O_RDWR);
if (fd == -1) { if (fd == -1) {
perror("open"); perror("open");
exit(1); exit(1);
......
...@@ -5,7 +5,7 @@ rehashing.c ...@@ -5,7 +5,7 @@ rehashing.c
Visually show buckets in the two hash tables between rehashings. Also stress Visually show buckets in the two hash tables between rehashings. Also stress
test getRandomKeys() implementation, that may actually disappear from test getRandomKeys() implementation, that may actually disappear from
Redis soon, however visualizaiton some code is reusable in new bugs Redis soon, however visualization some code is reusable in new bugs
investigation. investigation.
Compile with: Compile with:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment