Unverified Commit 8341a7d4 authored by chendianqiang's avatar chendianqiang Committed by GitHub
Browse files

Merge pull request #3 from antirez/unstable

update
parents 49816941 e78c4e81
set testmodule [file normalize tests/modules/hooks.so]
tags "modules" {
start_server [list overrides [list loadmodule "$testmodule" appendonly yes]] {
test {Test clients connection / disconnection hooks} {
for {set j 0} {$j < 2} {incr j} {
set rd1 [redis_deferring_client]
$rd1 close
}
assert {[r hooks.event_count client-connected] > 1}
assert {[r hooks.event_count client-disconnected] > 1}
}
test {Test module cron hook} {
after 100
assert {[r hooks.event_count cron-loop] > 0}
set hz [r hooks.event_last cron-loop]
assert_equal $hz 10
}
test {Test module loaded / unloaded hooks} {
set othermodule [file normalize tests/modules/infotest.so]
r module load $othermodule
r module unload infotest
assert_equal [r hooks.event_last module-loaded] "infotest"
assert_equal [r hooks.event_last module-unloaded] "infotest"
}
test {Test module aofrw hook} {
r debug populate 1000 foo 10000 ;# 10mb worth of data
r config set rdbcompression no ;# rdb progress is only checked once in 2mb
r BGREWRITEAOF
waitForBgrewriteaof r
assert_equal [string match {*module-event-persistence-aof-start*} [exec tail -20 < [srv 0 stdout]]] 1
assert_equal [string match {*module-event-persistence-end*} [exec tail -20 < [srv 0 stdout]]] 1
}
test {Test module aof load and rdb/aof progress hooks} {
# create some aof tail (progress is checked only once in 1000 commands)
for {set j 0} {$j < 4000} {incr j} {
r set "bar$j" x
}
# set some configs that will cause many loading progress events during aof loading
r config set key-load-delay 500
r config set dynamic-hz no
r config set hz 500
r DEBUG LOADAOF
assert_equal [r hooks.event_last loading-aof-start] 0
assert_equal [r hooks.event_last loading-end] 0
assert {[r hooks.event_count loading-rdb-start] == 0}
assert_lessthan 2 [r hooks.event_count loading-progress-rdb] ;# comes from the preamble section
assert_lessthan 2 [r hooks.event_count loading-progress-aof]
if {$::verbose} {
puts "rdb progress events [r hooks.event_count loading-progress-rdb]"
puts "aof progress events [r hooks.event_count loading-progress-aof]"
}
}
# undo configs before next test
r config set dynamic-hz yes
r config set key-load-delay 0
test {Test module rdb save hook} {
# debug reload does: save, flush, load:
assert {[r hooks.event_count persistence-syncrdb-start] == 0}
assert {[r hooks.event_count loading-rdb-start] == 0}
r debug reload
assert {[r hooks.event_count persistence-syncrdb-start] == 1}
assert {[r hooks.event_count loading-rdb-start] == 1}
}
test {Test flushdb hooks} {
r flushdb
assert_equal [r hooks.event_last flush-start] 9
assert_equal [r hooks.event_last flush-end] 9
r flushall
assert_equal [r hooks.event_last flush-start] -1
assert_equal [r hooks.event_last flush-end] -1
}
# replication related tests
set master [srv 0 client]
set master_host [srv 0 host]
set master_port [srv 0 port]
start_server {} {
r module load $testmodule
set replica [srv 0 client]
set replica_host [srv 0 host]
set replica_port [srv 0 port]
$replica replicaof $master_host $master_port
wait_for_condition 50 100 {
[string match {*master_link_status:up*} [r info replication]]
} else {
fail "Can't turn the instance into a replica"
}
test {Test master link up hook} {
assert_equal [r hooks.event_count masterlink-up] 1
assert_equal [r hooks.event_count masterlink-down] 0
}
test {Test role-replica hook} {
assert_equal [r hooks.event_count role-replica] 1
assert_equal [r hooks.event_count role-master] 0
assert_equal [r hooks.event_last role-replica] [s 0 master_host]
}
test {Test replica-online hook} {
assert_equal [r -1 hooks.event_count replica-online] 1
assert_equal [r -1 hooks.event_count replica-offline] 0
}
test {Test master link down hook} {
r client kill type master
assert_equal [r hooks.event_count masterlink-down] 1
}
$replica replicaof no one
test {Test role-master hook} {
assert_equal [r hooks.event_count role-replica] 1
assert_equal [r hooks.event_count role-master] 1
assert_equal [r hooks.event_last role-master] {}
}
test {Test replica-offline hook} {
assert_equal [r -1 hooks.event_count replica-online] 1
assert_equal [r -1 hooks.event_count replica-offline] 1
}
# get the replica stdout, to be used by the next test
set replica_stdout [srv 0 stdout]
}
# look into the log file of the server that just exited
test {Test shutdown hook} {
assert_equal [string match {*module-event-shutdown*} [exec tail -5 < $replica_stdout]] 1
}
}
}
set testmodule [file normalize tests/modules/infotest.so]
# Return value for INFO property
proc field {info property} {
if {[regexp "\r\n$property:(.*?)\r\n" $info _ value]} {
set _ $value
}
}
start_server {tags {"modules"}} {
r module load $testmodule log-key 0
test {module reading info} {
# check string, integer and float fields
assert_equal [r info.gets replication role] "master"
assert_equal [r info.getc replication role] "master"
assert_equal [r info.geti stats expired_keys] 0
assert_equal [r info.getd stats expired_stale_perc] 0
# check signed and unsigned
assert_equal [r info.geti infotest infotest_global] -2
assert_equal [r info.getu infotest infotest_uglobal] -2
# the above are always 0, try module info that is non-zero
assert_equal [r info.geti infotest_italian infotest_due] 2
set tre [r info.getd infotest_italian infotest_tre]
assert {$tre > 3.2 && $tre < 3.4 }
# search using the wrong section
catch { [r info.gets badname redis_version] } e
assert_match {*not found*} $e
# check that section filter works
assert { [string match "*usec_per_call*" [r info.gets all cmdstat_info.gets] ] }
catch { [r info.gets default cmdstat_info.gets] ] } e
assert_match {*not found*} $e
}
test {module info all} {
set info [r info all]
# info all does not contain modules
assert { ![string match "*Spanish*" $info] }
assert { ![string match "*infotest_*" $info] }
assert { [string match "*used_memory*" $info] }
}
test {module info everything} {
set info [r info everything]
# info everything contains all default sections, but not ones for crash report
assert { [string match "*infotest_global*" $info] }
assert { [string match "*Spanish*" $info] }
assert { [string match "*Italian*" $info] }
assert { [string match "*used_memory*" $info] }
assert { ![string match "*Klingon*" $info] }
field $info infotest_dos
} {2}
test {module info modules} {
set info [r info modules]
# info all does not contain modules
assert { [string match "*Spanish*" $info] }
assert { [string match "*infotest_global*" $info] }
assert { ![string match "*used_memory*" $info] }
}
test {module info one module} {
set info [r info INFOTEST]
# info all does not contain modules
assert { [string match "*Spanish*" $info] }
assert { ![string match "*used_memory*" $info] }
field $info infotest_global
} {-2}
test {module info one section} {
set info [r info INFOTEST_SPANISH]
assert { ![string match "*used_memory*" $info] }
assert { ![string match "*Italian*" $info] }
assert { ![string match "*infotest_global*" $info] }
field $info infotest_uno
} {one}
test {module info dict} {
set info [r info infotest_keyspace]
set keyspace [field $info infotest_db0]
set keys [scan [regexp -inline {keys\=([\d]*)} $keyspace] keys=%d]
} {3}
# TODO: test crash report.
}
set testmodule [file normalize tests/modules/misc.so]
start_server {tags {"modules"}} {
r module load $testmodule
test {test RM_Call} {
set info [r test.call_info commandstats]
# cmdstat is not in a default section, so we also test an argument was passed
assert { [string match "*cmdstat_module*" $info] }
}
test {test RM_Call args array} {
set info [r test.call_generic info commandstats]
# cmdstat is not in a default section, so we also test an argument was passed
assert { [string match "*cmdstat_module*" $info] }
}
test {test long double conversions} {
set ld [r test.ld_conversion]
assert {[string match $ld "0.00000000000000001"]}
}
test {test module db commands} {
r set x foo
set key [r test.randomkey]
assert_equal $key "x"
assert_equal [r test.dbsize] 1
r test.flushall
assert_equal [r test.dbsize] 0
}
test {test modle lru api} {
r config set maxmemory-policy allkeys-lru
r set x foo
set lru [r test.getlru x]
assert { $lru <= 1000 }
set was_set [r test.setlru x 100000]
assert { $was_set == 1 }
set idle [r object idletime x]
assert { $idle >= 100 }
set lru [r test.getlru x]
assert { $lru >= 100000 }
r config set maxmemory-policy allkeys-lfu
set lru [r test.getlru x]
assert { $lru == -1 }
set was_set [r test.setlru x 100000]
assert { $was_set == 0 }
}
r config set maxmemory-policy allkeys-lru
test {test modle lfu api} {
r config set maxmemory-policy allkeys-lfu
r set x foo
set lfu [r test.getlfu x]
assert { $lfu >= 1 }
set was_set [r test.setlfu x 100]
assert { $was_set == 1 }
set freq [r object freq x]
assert { $freq <= 100 }
set lfu [r test.getlfu x]
assert { $lfu <= 100 }
r config set maxmemory-policy allkeys-lru
set lfu [r test.getlfu x]
assert { $lfu == -1 }
set was_set [r test.setlfu x 100]
assert { $was_set == 0 }
}
}
set testmodule [file normalize tests/modules/propagate.so]
tags "modules" {
test {Modules can propagate in async and threaded contexts} {
start_server {} {
set replica [srv 0 client]
set replica_host [srv 0 host]
set replica_port [srv 0 port]
start_server [list overrides [list loadmodule "$testmodule"]] {
set master [srv 0 client]
set master_host [srv 0 host]
set master_port [srv 0 port]
# Start the replication process...
$replica replicaof $master_host $master_port
wait_for_sync $replica
after 1000
$master propagate-test
wait_for_condition 5000 10 {
([$replica get timer] eq "10") && \
([$replica get thread] eq "10")
} else {
fail "The two counters don't match the expected value."
}
}
}
}
}
set testmodule [file normalize tests/modules/scan.so]
start_server {tags {"modules"}} {
r module load $testmodule
test {Module scan keyspace} {
# the module create a scan command with filtering which also return values
r set x 1
r set y 2
r set z 3
r hset h f v
lsort [r scan.scan_strings]
} {{x 1} {y 2} {z 3}}
test {Module scan hash ziplist} {
r hmset hh f1 v1 f2 v2
lsort [r scan.scan_key hh]
} {{f1 v1} {f2 v2}}
test {Module scan hash dict} {
r config set hash-max-ziplist-entries 2
r hmset hh f3 v3
lsort [r scan.scan_key hh]
} {{f1 v1} {f2 v2} {f3 v3}}
test {Module scan zset ziplist} {
r zadd zz 1 f1 2 f2
lsort [r scan.scan_key zz]
} {{f1 1} {f2 2}}
test {Module scan zset dict} {
r config set zset-max-ziplist-entries 2
r zadd zz 3 f3
lsort [r scan.scan_key zz]
} {{f1 1} {f2 2} {f3 3}}
test {Module scan set intset} {
r sadd ss 1 2
lsort [r scan.scan_key ss]
} {{1 {}} {2 {}}}
test {Module scan set dict} {
r config set set-max-intset-entries 2
r sadd ss 3
lsort [r scan.scan_key ss]
} {{1 {}} {2 {}} {3 {}}}
}
set testmodule [file normalize tests/modules/testrdb.so]
tags "modules" {
test {modules are able to persist types} {
start_server [list overrides [list loadmodule "$testmodule"]] {
r testrdb.set.key key1 value1
assert_equal "value1" [r testrdb.get.key key1]
r debug reload
assert_equal "value1" [r testrdb.get.key key1]
}
}
test {modules global are lost without aux} {
set server_path [tmpdir "server.module-testrdb"]
start_server [list overrides [list loadmodule "$testmodule" "dir" $server_path]] {
r testrdb.set.before global1
assert_equal "global1" [r testrdb.get.before]
}
start_server [list overrides [list loadmodule "$testmodule" "dir" $server_path]] {
assert_equal "" [r testrdb.get.before]
}
}
test {modules are able to persist globals before and after} {
set server_path [tmpdir "server.module-testrdb"]
start_server [list overrides [list loadmodule "$testmodule 2" "dir" $server_path]] {
r testrdb.set.before global1
r testrdb.set.after global2
assert_equal "global1" [r testrdb.get.before]
assert_equal "global2" [r testrdb.get.after]
}
start_server [list overrides [list loadmodule "$testmodule 2" "dir" $server_path]] {
assert_equal "global1" [r testrdb.get.before]
assert_equal "global2" [r testrdb.get.after]
}
}
test {modules are able to persist globals just after} {
set server_path [tmpdir "server.module-testrdb"]
start_server [list overrides [list loadmodule "$testmodule 1" "dir" $server_path]] {
r testrdb.set.after global2
assert_equal "global2" [r testrdb.get.after]
}
start_server [list overrides [list loadmodule "$testmodule 1" "dir" $server_path]] {
assert_equal "global2" [r testrdb.get.after]
}
}
tags {repl} {
test {diskless loading short read with module} {
start_server [list overrides [list loadmodule "$testmodule"]] {
set replica [srv 0 client]
set replica_host [srv 0 host]
set replica_port [srv 0 port]
start_server [list overrides [list loadmodule "$testmodule"]] {
set master [srv 0 client]
set master_host [srv 0 host]
set master_port [srv 0 port]
# Set master and replica to use diskless replication
$master config set repl-diskless-sync yes
$master config set rdbcompression no
$replica config set repl-diskless-load swapdb
for {set k 0} {$k < 30} {incr k} {
r testrdb.set.key key$k [string repeat A [expr {int(rand()*1000000)}]]
}
# Start the replication process...
$master config set repl-diskless-sync-delay 0
$replica replicaof $master_host $master_port
# kill the replication at various points
set attempts 3
if {$::accurate} { set attempts 10 }
for {set i 0} {$i < $attempts} {incr i} {
# wait for the replica to start reading the rdb
# using the log file since the replica only responds to INFO once in 2mb
wait_for_log_message -1 "*Loading DB in memory*" 5 2000 1
# add some additional random sleep so that we kill the master on a different place each time
after [expr {int(rand()*100)}]
# kill the replica connection on the master
set killed [$master client kill type replica]
if {[catch {
set res [wait_for_log_message -1 "*Internal error in RDB*" 5 100 10]
if {$::verbose} {
puts $res
}
}]} {
puts "failed triggering short read"
# force the replica to try another full sync
$master client kill type replica
$master set asdf asdf
# the side effect of resizing the backlog is that it is flushed (16k is the min size)
$master config set repl-backlog-size [expr {16384 + $i}]
}
# wait for loading to stop (fail)
wait_for_condition 100 10 {
[s -1 loading] eq 0
} else {
fail "Replica didn't disconnect"
}
}
# enable fast shutdown
$master config set rdb-key-save-delay 0
}
}
}
}
}
......@@ -306,4 +306,18 @@ start_server {tags {"multi"}} {
}
close_replication_stream $repl
}
test {DISCARD should not fail during OOM} {
set rd [redis_deferring_client]
$rd config set maxmemory 1
assert {[$rd read] eq {OK}}
r multi
catch {r set x 1} e
assert_match {OOM*} $e
r discard
$rd config set maxmemory 0
assert {[$rd read] eq {OK}}
$rd close
r ping
} {PONG}
}
......@@ -15,7 +15,7 @@ start_server {tags {"obuf-limits"}} {
if {![regexp {omem=([0-9]+)} $c - omem]} break
if {$omem > 200000} break
}
assert {$omem >= 90000 && $omem < 200000}
assert {$omem >= 70000 && $omem < 200000}
$rd1 close
}
......
......@@ -166,7 +166,11 @@ start_server {tags {"other"}} {
tags {protocol} {
test {PIPELINING stresser (also a regression for the old epoll bug)} {
if {$::tls} {
set fd2 [::tls::socket $::host $::port]
} else {
set fd2 [socket $::host $::port]
}
fconfigure $fd2 -encoding binary -translation binary
puts -nonewline $fd2 "SELECT 9\r\n"
flush $fd2
......
......@@ -72,7 +72,11 @@ start_server {tags {"protocol"}} {
foreach seq [list "\x00" "*\x00" "$\x00"] {
incr c
test "Protocol desync regression test #$c" {
if {$::tls} {
set s [::tls::socket [srv 0 host] [srv 0 port]]
} else {
set s [socket [srv 0 host] [srv 0 port]]
}
puts -nonewline $s $seq
set payload [string repeat A 1024]"\n"
set test_start [clock seconds]
......
......@@ -53,6 +53,51 @@ start_server {tags {"scan"}} {
assert_equal 100 [llength $keys]
}
test "SCAN TYPE" {
r flushdb
# populate only creates strings
r debug populate 1000
# Check non-strings are excluded
set cur 0
set keys {}
while 1 {
set res [r scan $cur type "list"]
set cur [lindex $res 0]
set k [lindex $res 1]
lappend keys {*}$k
if {$cur == 0} break
}
assert_equal 0 [llength $keys]
# Check strings are included
set cur 0
set keys {}
while 1 {
set res [r scan $cur type "string"]
set cur [lindex $res 0]
set k [lindex $res 1]
lappend keys {*}$k
if {$cur == 0} break
}
assert_equal 1000 [llength $keys]
# Check all three args work together
set cur 0
set keys {}
while 1 {
set res [r scan $cur type "string" match "key:*" count 10]
set cur [lindex $res 0]
set k [lindex $res 1]
lappend keys {*}$k
if {$cur == 0} break
}
assert_equal 1000 [llength $keys]
}
foreach enc {intset hashtable} {
test "SSCAN with encoding $enc" {
# Create the Set
......
......@@ -536,7 +536,7 @@ foreach cmdrepl {0 1} {
start_server {tags {"scripting repl"}} {
start_server {} {
if {$cmdrepl == 1} {
set rt "(commmands replication)"
set rt "(commands replication)"
} else {
set rt "(scripts replication)"
r debug lua-always-replicate-commands 1
......@@ -741,3 +741,8 @@ start_server {tags {"scripting repl"}} {
}
}
start_server {tags {"scripting"}} {
r script debug sync
r eval {return 'hello'} 0
r eval {return 'hello'} 0
}
......@@ -80,8 +80,10 @@ start_server {tags {"slowlog"} overrides {slowlog-log-slower-than 1000000}} {
}
test {SLOWLOG - can be disabled} {
r config set slowlog-max-len 1
r config set slowlog-log-slower-than 1
r slowlog reset
r debug sleep 0.2
assert_equal [r slowlog len] 1
r config set slowlog-log-slower-than -1
r slowlog reset
......
start_server {tags {"tls"}} {
if {$::tls} {
package require tls
test {TLS: Not accepting non-TLS connections on a TLS port} {
set s [redis [srv 0 host] [srv 0 port]]
catch {$s PING} e
set e
} {*I/O error*}
test {TLS: Verify tls-auth-clients behaves as expected} {
set s [redis [srv 0 host] [srv 0 port]]
::tls::import [$s channel]
catch {$s PING} e
assert_match {*error*} $e
r CONFIG SET tls-auth-clients no
set s [redis [srv 0 host] [srv 0 port]]
::tls::import [$s channel]
catch {$s PING} e
assert_match {PONG} $e
r CONFIG SET tls-auth-clients yes
}
test {TLS: Verify tls-protocols behaves as expected} {
r CONFIG SET tls-protocols TLSv1
set s [redis [srv 0 host] [srv 0 port] 0 1 {-tls1 0}]
catch {$s PING} e
assert_match {*I/O error*} $e
set s [redis [srv 0 host] [srv 0 port] 0 1 {-tls1 1}]
catch {$s PING} e
assert_match {PONG} $e
r CONFIG SET tls-protocols TLSv1.1
set s [redis [srv 0 host] [srv 0 port] 0 1 {-tls1.1 0}]
catch {$s PING} e
assert_match {*I/O error*} $e
set s [redis [srv 0 host] [srv 0 port] 0 1 {-tls1.1 1}]
catch {$s PING} e
assert_match {PONG} $e
r CONFIG SET tls-protocols TLSv1.2
set s [redis [srv 0 host] [srv 0 port] 0 1 {-tls1.2 0}]
catch {$s PING} e
assert_match {*I/O error*} $e
set s [redis [srv 0 host] [srv 0 port] 0 1 {-tls1.2 1}]
catch {$s PING} e
assert_match {PONG} $e
r CONFIG SET tls-protocols ""
}
test {TLS: Verify tls-ciphers behaves as expected} {
r CONFIG SET tls-protocols TLSv1.2
r CONFIG SET tls-ciphers "DEFAULT:-AES128-SHA256"
set s [redis [srv 0 host] [srv 0 port] 0 1 {-cipher "-ALL:AES128-SHA256"}]
catch {$s PING} e
assert_match {*I/O error*} $e
set s [redis [srv 0 host] [srv 0 port] 0 1 {-cipher "-ALL:AES256-SHA256"}]
catch {$s PING} e
assert_match {PONG} $e
r CONFIG SET tls-ciphers "DEFAULT"
set s [redis [srv 0 host] [srv 0 port] 0 1 {-cipher "-ALL:AES128-SHA256"}]
catch {$s PING} e
assert_match {PONG} $e
r CONFIG SET tls-protocols ""
r CONFIG SET tls-ciphers "DEFAULT"
}
test {TLS: Verify tls-prefer-server-ciphers behaves as expected} {
r CONFIG SET tls-protocols TLSv1.2
r CONFIG SET tls-ciphers "AES128-SHA256:AES256-SHA256"
set s [redis [srv 0 host] [srv 0 port] 0 1 {-cipher "AES256-SHA256:AES128-SHA256"}]
catch {$s PING} e
assert_match {PONG} $e
assert_equal "AES256-SHA256" [dict get [::tls::status [$s channel]] cipher]
r CONFIG SET tls-prefer-server-ciphers yes
set s [redis [srv 0 host] [srv 0 port] 0 1 {-cipher "AES256-SHA256:AES128-SHA256"}]
catch {$s PING} e
assert_match {PONG} $e
assert_equal "AES128-SHA256" [dict get [::tls::status [$s channel]] cipher]
r CONFIG SET tls-protocols ""
r CONFIG SET tls-ciphers "DEFAULT"
}
}
}
start_server {tags {"tracking"}} {
# Create a deferred client we'll use to redirect invalidation
# messages to.
set rd1 [redis_deferring_client]
$rd1 client id
set redir [$rd1 read]
$rd1 subscribe __redis__:invalidate
$rd1 read ; # Consume the SUBSCRIBE reply.
test {Clients are able to enable tracking and redirect it} {
r CLIENT TRACKING on REDIRECT $redir
} {*OK}
test {The other connection is able to get invalidations} {
r SET a 1
r GET a
r INCR a
r INCR b ; # This key should not be notified, since it wasn't fetched.
set keys [lindex [$rd1 read] 2]
assert {[llength $keys] == 1}
assert {[lindex $keys 0] eq {a}}
}
test {The client is now able to disable tracking} {
# Make sure to add a few more keys in the tracking list
# so that we can check for leaks, as a side effect.
r MGET a b c d e f g
r CLIENT TRACKING off
}
test {Clients can enable the BCAST mode with the empty prefix} {
r CLIENT TRACKING on BCAST REDIRECT $redir
} {*OK*}
test {The connection gets invalidation messages about all the keys} {
r MSET a 1 b 2 c 3
set keys [lsort [lindex [$rd1 read] 2]]
assert {$keys eq {a b c}}
}
test {Clients can enable the BCAST mode with prefixes} {
r CLIENT TRACKING off
r CLIENT TRACKING on BCAST REDIRECT $redir PREFIX a: PREFIX b:
r MULTI
r INCR a:1
r INCR a:2
r INCR b:1
r INCR b:2
r EXEC
# Because of the internals, we know we are going to receive
# two separated notifications for the two different prefixes.
set keys1 [lsort [lindex [$rd1 read] 2]]
set keys2 [lsort [lindex [$rd1 read] 2]]
set keys [lsort [list {*}$keys1 {*}$keys2]]
assert {$keys eq {a:1 a:2 b:1 b:2}}
}
test {Adding prefixes to BCAST mode works} {
r CLIENT TRACKING on BCAST REDIRECT $redir PREFIX c:
r INCR c:1234
set keys [lsort [lindex [$rd1 read] 2]]
assert {$keys eq {c:1234}}
}
$rd1 close
}
......@@ -390,6 +390,13 @@ start_server {tags {"hash"}} {
lappend rv [string match "ERR*not*float*" $bigerr]
} {1 1}
test {HINCRBYFLOAT fails against hash value that contains a null-terminator in the middle} {
r hset h f "1\x002"
catch {r hincrbyfloat h f 1} err
set rv {}
lappend rv [string match "ERR*not*float*" $err]
} {1}
test {HSTRLEN against the small hash} {
set err {}
foreach k [array names smallhash *] {
......
......@@ -436,8 +436,11 @@ start_server {
test "$pop: with non-integer timeout" {
set rd [redis_deferring_client]
$rd $pop blist1 1.1
assert_error "ERR*not an integer*" {$rd read}
r del blist1
$rd $pop blist1 0.1
r rpush blist1 foo
assert_equal {blist1 foo} [$rd read]
assert_equal 0 [r exists blist1]
}
test "$pop: with zero timeout should block indefinitely" {
......
......@@ -147,6 +147,29 @@ start_server {
assert {[lindex $res 0 1 1] == {2-0 {field1 B}}}
}
test {Blocking XREADGROUP will not reply with an empty array} {
r del mystream
r XGROUP CREATE mystream mygroup $ MKSTREAM
r XADD mystream 666 f v
set res [r XREADGROUP GROUP mygroup Alice BLOCK 10 STREAMS mystream ">"]
assert {[lindex $res 0 1 0] == {666-0 {f v}}}
r XADD mystream 667 f2 v2
r XDEL mystream 667
set rd [redis_deferring_client]
$rd XREADGROUP GROUP mygroup Alice BLOCK 10 STREAMS mystream ">"
after 20
assert {[$rd read] == {}} ;# before the fix, client didn't even block, but was served synchronously with {mystream {}}
}
test {XGROUP DESTROY should unblock XREADGROUP with -NOGROUP} {
r del mystream
r XGROUP CREATE mystream mygroup $ MKSTREAM
set rd [redis_deferring_client]
$rd XREADGROUP GROUP mygroup Alice BLOCK 100 STREAMS mystream ">"
r XGROUP DESTROY mystream mygroup
assert_error "*NOGROUP*" {$rd read}
}
test {XCLAIM can claim PEL items from another consumer} {
# Add 3 items into the stream, and create a consumer group
r del mystream
......@@ -195,6 +218,49 @@ start_server {
assert_equal "" [lindex $reply 0]
}
test {XCLAIM without JUSTID increments delivery count} {
# Add 3 items into the stream, and create a consumer group
r del mystream
set id1 [r XADD mystream * a 1]
set id2 [r XADD mystream * b 2]
set id3 [r XADD mystream * c 3]
r XGROUP CREATE mystream mygroup 0
# Client 1 reads item 1 from the stream without acknowledgements.
# Client 2 then claims pending item 1 from the PEL of client 1
set reply [
r XREADGROUP GROUP mygroup client1 count 1 STREAMS mystream >
]
assert {[llength [lindex $reply 0 1 0 1]] == 2}
assert {[lindex $reply 0 1 0 1] eq {a 1}}
r debug sleep 0.2
set reply [
r XCLAIM mystream mygroup client2 10 $id1
]
assert {[llength [lindex $reply 0 1]] == 2}
assert {[lindex $reply 0 1] eq {a 1}}
set reply [
r XPENDING mystream mygroup - + 10
]
assert {[llength [lindex $reply 0]] == 4}
assert {[lindex $reply 0 3] == 2}
# Client 3 then claims pending item 1 from the PEL of client 2 using JUSTID
r debug sleep 0.2
set reply [
r XCLAIM mystream mygroup client3 10 $id1 JUSTID
]
assert {[llength $reply] == 1}
assert {[lindex $reply 0] eq $id1}
set reply [
r XPENDING mystream mygroup - + 10
]
assert {[llength [lindex $reply 0]] == 4}
assert {[lindex $reply 0 3] == 2}
}
start_server {} {
set master [srv -1 client]
set master_host [srv -1 host]
......
......@@ -79,6 +79,12 @@ start_server {
assert {[streamCompareID $id2 $id3] == -1}
}
test {XADD IDs correctly report an error when overflowing} {
r DEL mystream
r xadd mystream 18446744073709551615-18446744073709551615 a b
assert_error ERR* {r xadd mystream * c d}
}
test {XADD with MAXLEN option} {
r DEL mystream
for {set j 0} {$j < 1000} {incr j} {
......@@ -117,6 +123,12 @@ start_server {
assert {[r xlen mystream] == $j}
}
test {XADD with ID 0-0} {
r DEL otherstream
catch {r XADD otherstream 0-0 k v} err
assert {[r EXISTS otherstream] == 0}
}
test {XRANGE COUNT works as expected} {
assert {[llength [r xrange mystream - + COUNT 10]] == 10}
}
......@@ -179,6 +191,17 @@ start_server {
assert {[lindex $res 0 1 0 1] eq {old abcd1234}}
}
test {Blocking XREAD will not reply with an empty array} {
r del s1
r XADD s1 666 f v
r XADD s1 667 f2 v2
r XDEL s1 667
set rd [redis_deferring_client]
$rd XREAD BLOCK 10 STREAMS s1 666
after 20
assert {[$rd read] == {}} ;# before the fix, client didn't even block, but was served synchronously with {s1 {}}
}
test "XREAD: XADD + DEL should not awake client" {
set rd [redis_deferring_client]
r del s1
......@@ -316,6 +339,33 @@ start_server {
assert_equal [r xrevrange teststream2 1234567891245 -] {{1234567891240-0 {key1 value2}} {1234567891230-0 {key1 value1}}}
}
test {XREAD streamID edge (no-blocking)} {
r del x
r XADD x 1-1 f v
r XADD x 1-18446744073709551615 f v
r XADD x 2-1 f v
set res [r XREAD BLOCK 0 STREAMS x 1-18446744073709551615]
assert {[lindex $res 0 1 0] == {2-1 {f v}}}
}
test {XREAD streamID edge (blocking)} {
r del x
set rd [redis_deferring_client]
$rd XREAD BLOCK 0 STREAMS x 1-18446744073709551615
r XADD x 1-1 f v
r XADD x 1-18446744073709551615 f v
r XADD x 2-1 f v
set res [$rd read]
assert {[lindex $res 0 1 0] == {2-1 {f v}}}
}
test {XADD streamID edge} {
r del x
r XADD x 2577343934890-18446744073709551615 f v ;# we need the timestamp to be in the future
r XADD x * f2 v2
assert_equal [r XRANGE x - +] {{2577343934890-18446744073709551615 {f v}} {2577343934891-0 {f2 v2}}}
}
}
start_server {tags {"stream"} overrides {appendonly yes}} {
......@@ -355,12 +405,12 @@ start_server {tags {"stream"} overrides {appendonly yes stream-node-max-entries
r XADD mystream * xitem v
}
r XTRIM mystream MAXLEN ~ 85
assert {[r xlen mystream] == 89}
assert {[r xlen mystream] == 90}
r config set stream-node-max-entries 1
r debug loadaof
r XADD mystream * xitem v
incr j
assert {[r xlen mystream] == 90}
assert {[r xlen mystream] == 91}
}
}
......
source tests/support/cli.tcl
start_server {tags {"wait"}} {
start_server {} {
set slave [srv 0 client]
......@@ -31,7 +33,8 @@ start_server {} {
}
test {WAIT should not acknowledge 1 additional copy if slave is blocked} {
exec src/redis-cli -h $slave_host -p $slave_port debug sleep 5 > /dev/null 2> /dev/null &
set cmd [rediscli $slave_port "-h $slave_host debug sleep 5"]
exec {*}$cmd > /dev/null 2> /dev/null &
after 1000 ;# Give redis-cli the time to execute the command.
$master set foo 0
$master incr foo
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment