Commit 72935b9d authored by Vitaly Arbuzov's avatar Vitaly Arbuzov
Browse files

Merge branch 'unstable' into dict-split-by-slot

parents 6baf20af 6948daca
...@@ -83,7 +83,7 @@ start_server {tags {"pause network"}} { ...@@ -83,7 +83,7 @@ start_server {tags {"pause network"}} {
$rd2 close $rd2 close
} }
test "Test read/admin mutli-execs are not blocked by pause RO" { test "Test read/admin multi-execs are not blocked by pause RO" {
r SET FOO BAR r SET FOO BAR
r client PAUSE 100000 WRITE r client PAUSE 100000 WRITE
set rr [redis_client] set rr [redis_client]
...@@ -96,7 +96,7 @@ start_server {tags {"pause network"}} { ...@@ -96,7 +96,7 @@ start_server {tags {"pause network"}} {
$rr close $rr close
} }
test "Test write mutli-execs are blocked by pause RO" { test "Test write multi-execs are blocked by pause RO" {
set rd [redis_deferring_client] set rd [redis_deferring_client]
$rd MULTI $rd MULTI
assert_equal [$rd read] "OK" assert_equal [$rd read] "OK"
...@@ -174,7 +174,7 @@ start_server {tags {"pause network"}} { ...@@ -174,7 +174,7 @@ start_server {tags {"pause network"}} {
$rr close $rr close
} }
test "Test read-only scripts in mutli-exec are not blocked by pause RO" { test "Test read-only scripts in multi-exec are not blocked by pause RO" {
r SET FOO BAR r SET FOO BAR
r client PAUSE 100000 WRITE r client PAUSE 100000 WRITE
set rr [redis_client] set rr [redis_client]
...@@ -193,7 +193,7 @@ start_server {tags {"pause network"}} { ...@@ -193,7 +193,7 @@ start_server {tags {"pause network"}} {
$rr close $rr close
} }
test "Test write scripts in mutli-exec are blocked by pause RO" { test "Test write scripts in multi-exec are blocked by pause RO" {
set rd [redis_deferring_client] set rd [redis_deferring_client]
set rd2 [redis_deferring_client] set rd2 [redis_deferring_client]
......
...@@ -110,16 +110,21 @@ start_server {tags {"protocol network"}} { ...@@ -110,16 +110,21 @@ start_server {tags {"protocol network"}} {
# raw RESP response tests # raw RESP response tests
r readraw 1 r readraw 1
set nullres {*-1}
if {$::force_resp3} {
set nullres {_}
}
test "raw protocol response" { test "raw protocol response" {
r srandmember nonexisting_key r srandmember nonexisting_key
} {*-1} } "$nullres"
r deferred 1 r deferred 1
test "raw protocol response - deferred" { test "raw protocol response - deferred" {
r srandmember nonexisting_key r srandmember nonexisting_key
r read r read
} {*-1} } "$nullres"
test "raw protocol response - multiline" { test "raw protocol response - multiline" {
r sadd ss a r sadd ss a
......
...@@ -5,21 +5,41 @@ start_server {tags {"pubsub network"}} { ...@@ -5,21 +5,41 @@ start_server {tags {"pubsub network"}} {
set db 9 set db 9
} }
test "Pub/Sub PING" { foreach resp {2 3} {
set rd1 [redis_deferring_client] set rd1 [redis_deferring_client]
subscribe $rd1 somechannel if {[lsearch $::denytags "resp3"] >= 0} {
# While subscribed to non-zero channels PING works in Pub/Sub mode. if {$resp == 3} {continue}
$rd1 ping } elseif {$::force_resp3} {
$rd1 ping "foo" if {$resp == 2} {continue}
set reply1 [$rd1 read] }
set reply2 [$rd1 read]
unsubscribe $rd1 somechannel $rd1 hello $resp
# Now we are unsubscribed, PING should just return PONG. $rd1 read
$rd1 ping
set reply3 [$rd1 read] test "Pub/Sub PING on RESP$resp" {
subscribe $rd1 somechannel
# While subscribed to non-zero channels PING works in Pub/Sub mode.
$rd1 ping
$rd1 ping "foo"
# In RESP3, the SUBSCRIBEd client can issue any command and get a reply, so the PINGs are standard
# In RESP2, only a handful of commands are allowed after a client is SUBSCRIBED (PING is one of them).
# For some reason, the reply in that case is an array with two elements: "pong" and argv[1] or an empty string
# God knows why. Done in commit 2264b981
if {$resp == 3} {
assert_equal {PONG} [$rd1 read]
assert_equal {foo} [$rd1 read]
} else {
assert_equal {pong {}} [$rd1 read]
assert_equal {pong foo} [$rd1 read]
}
unsubscribe $rd1 somechannel
# Now we are unsubscribed, PING should just return PONG.
$rd1 ping
assert_equal {PONG} [$rd1 read]
}
$rd1 close $rd1 close
list $reply1 $reply2 $reply3 }
} {{pong {}} {pong foo} PONG}
test "PUBLISH/SUBSCRIBE basics" { test "PUBLISH/SUBSCRIBE basics" {
set rd1 [redis_deferring_client] set rd1 [redis_deferring_client]
...@@ -146,6 +166,30 @@ start_server {tags {"pubsub network"}} { ...@@ -146,6 +166,30 @@ start_server {tags {"pubsub network"}} {
$rd1 close $rd1 close
} }
test "PubSub messages with CLIENT REPLY OFF" {
set rd [redis_deferring_client]
$rd hello 3
$rd read ;# Discard the hello reply
# Test that the subscribe/psubscribe notification is ok
$rd client reply off
assert_equal {1} [subscribe $rd channel]
assert_equal {2} [psubscribe $rd ch*]
# Test that the publish notification is ok
$rd client reply off
assert_equal 2 [r publish channel hello]
assert_equal {message channel hello} [$rd read]
assert_equal {pmessage ch* channel hello} [$rd read]
# Test that the unsubscribe/punsubscribe notification is ok
$rd client reply off
assert_equal {1} [unsubscribe $rd channel]
assert_equal {0} [punsubscribe $rd ch*]
$rd close
}
test "PUNSUBSCRIBE from non-subscribed channels" { test "PUNSUBSCRIBE from non-subscribed channels" {
set rd1 [redis_deferring_client] set rd1 [redis_deferring_client]
assert_equal {0 0 0} [punsubscribe $rd1 {foo.* bar.* quux.*}] assert_equal {0 0 0} [punsubscribe $rd1 {foo.* bar.* quux.*}]
...@@ -206,6 +250,7 @@ start_server {tags {"pubsub network"}} { ...@@ -206,6 +250,7 @@ start_server {tags {"pubsub network"}} {
test "Keyspace notifications: we receive keyspace notifications" { test "Keyspace notifications: we receive keyspace notifications" {
r config set notify-keyspace-events KA r config set notify-keyspace-events KA
set rd1 [redis_deferring_client] set rd1 [redis_deferring_client]
$rd1 CLIENT REPLY OFF ;# Make sure it works even if replies are silenced
assert_equal {1} [psubscribe $rd1 *] assert_equal {1} [psubscribe $rd1 *]
r set foo bar r set foo bar
assert_equal "pmessage * __keyspace@${db}__:foo set" [$rd1 read] assert_equal "pmessage * __keyspace@${db}__:foo set" [$rd1 read]
...@@ -215,6 +260,7 @@ start_server {tags {"pubsub network"}} { ...@@ -215,6 +260,7 @@ start_server {tags {"pubsub network"}} {
test "Keyspace notifications: we receive keyevent notifications" { test "Keyspace notifications: we receive keyevent notifications" {
r config set notify-keyspace-events EA r config set notify-keyspace-events EA
set rd1 [redis_deferring_client] set rd1 [redis_deferring_client]
$rd1 CLIENT REPLY SKIP ;# Make sure it works even if replies are silenced
assert_equal {1} [psubscribe $rd1 *] assert_equal {1} [psubscribe $rd1 *]
r set foo bar r set foo bar
assert_equal "pmessage * __keyevent@${db}__:set foo" [$rd1 read] assert_equal "pmessage * __keyevent@${db}__:set foo" [$rd1 read]
...@@ -224,6 +270,8 @@ start_server {tags {"pubsub network"}} { ...@@ -224,6 +270,8 @@ start_server {tags {"pubsub network"}} {
test "Keyspace notifications: we can receive both kind of events" { test "Keyspace notifications: we can receive both kind of events" {
r config set notify-keyspace-events KEA r config set notify-keyspace-events KEA
set rd1 [redis_deferring_client] set rd1 [redis_deferring_client]
$rd1 CLIENT REPLY ON ;# Just coverage
assert_equal {OK} [$rd1 read]
assert_equal {1} [psubscribe $rd1 *] assert_equal {1} [psubscribe $rd1 *]
r set foo bar r set foo bar
assert_equal "pmessage * __keyspace@${db}__:foo set" [$rd1 read] assert_equal "pmessage * __keyspace@${db}__:foo set" [$rd1 read]
......
...@@ -40,7 +40,7 @@ start_server {tags {"pubsubshard external:skip"}} { ...@@ -40,7 +40,7 @@ start_server {tags {"pubsubshard external:skip"}} {
$rd2 close $rd2 close
} }
test "PUBLISH/SUBSCRIBE after UNSUBSCRIBE without arguments" { test "SPUBLISH/SSUBSCRIBE after UNSUBSCRIBE without arguments" {
set rd1 [redis_deferring_client] set rd1 [redis_deferring_client]
assert_equal {1} [ssubscribe $rd1 {chan1}] assert_equal {1} [ssubscribe $rd1 {chan1}]
assert_equal {2} [ssubscribe $rd1 {chan2}] assert_equal {2} [ssubscribe $rd1 {chan2}]
...@@ -54,7 +54,7 @@ start_server {tags {"pubsubshard external:skip"}} { ...@@ -54,7 +54,7 @@ start_server {tags {"pubsubshard external:skip"}} {
$rd1 close $rd1 close
} }
test "SUBSCRIBE to one channel more than once" { test "SSUBSCRIBE to one channel more than once" {
set rd1 [redis_deferring_client] set rd1 [redis_deferring_client]
assert_equal {1 1 1} [ssubscribe $rd1 {chan1 chan1 chan1}] assert_equal {1 1 1} [ssubscribe $rd1 {chan1 chan1 chan1}]
assert_equal 1 [r SPUBLISH chan1 hello] assert_equal 1 [r SPUBLISH chan1 hello]
...@@ -64,7 +64,7 @@ start_server {tags {"pubsubshard external:skip"}} { ...@@ -64,7 +64,7 @@ start_server {tags {"pubsubshard external:skip"}} {
$rd1 close $rd1 close
} }
test "UNSUBSCRIBE from non-subscribed channels" { test "SUNSUBSCRIBE from non-subscribed channels" {
set rd1 [redis_deferring_client] set rd1 [redis_deferring_client]
assert_equal {0} [sunsubscribe $rd1 {foo}] assert_equal {0} [sunsubscribe $rd1 {foo}]
assert_equal {0} [sunsubscribe $rd1 {bar}] assert_equal {0} [sunsubscribe $rd1 {bar}]
...@@ -105,6 +105,33 @@ start_server {tags {"pubsubshard external:skip"}} { ...@@ -105,6 +105,33 @@ start_server {tags {"pubsubshard external:skip"}} {
assert_equal "chan1 1" [r pubsub numsub chan1] assert_equal "chan1 1" [r pubsub numsub chan1]
assert_equal "chan1" [r pubsub shardchannels] assert_equal "chan1" [r pubsub shardchannels]
assert_equal "chan1" [r pubsub channels] assert_equal "chan1" [r pubsub channels]
$rd1 close
$rd2 close
}
test "PubSubShard with CLIENT REPLY OFF" {
set rd [redis_deferring_client]
$rd hello 3
$rd read ;# Discard the hello reply
# Test that the ssubscribe notification is ok
$rd client reply off
$rd ping
assert_equal {1} [ssubscribe $rd channel]
# Test that the spublish notification is ok
$rd client reply off
$rd ping
assert_equal 1 [r spublish channel hello]
assert_equal {smessage channel hello} [$rd read]
# Test that sunsubscribe notification is ok
$rd client reply off
$rd ping
assert_equal {0} [sunsubscribe $rd channel]
$rd close
} }
} }
......
...@@ -119,6 +119,10 @@ start_server {tags {"scripting"}} { ...@@ -119,6 +119,10 @@ start_server {tags {"scripting"}} {
r evalsha fd758d1589d044dd850a6f05d52f2eefd27f033f 1 mykey r evalsha fd758d1589d044dd850a6f05d52f2eefd27f033f 1 mykey
} {myval} } {myval}
test {EVALSHA_RO - Can we call a SHA1 if already defined?} {
r evalsha_ro fd758d1589d044dd850a6f05d52f2eefd27f033f 1 mykey
} {myval}
test {EVALSHA - Can we call a SHA1 in uppercase?} { test {EVALSHA - Can we call a SHA1 in uppercase?} {
r evalsha FD758D1589D044DD850A6F05D52F2EEFD27F033F 1 mykey r evalsha FD758D1589D044DD850A6F05D52F2EEFD27F033F 1 mykey
} {myval} } {myval}
...@@ -213,41 +217,45 @@ start_server {tags {"scripting"}} { ...@@ -213,41 +217,45 @@ start_server {tags {"scripting"}} {
} {*execution time*} } {*execution time*}
} }
test {EVAL - Scripts can't run blpop command} { test {EVAL - Scripts do not block on blpop command} {
set e {} r lpush l 1
catch {run_script {return redis.pcall('blpop','x',0)} 1 x} e r lpop l
set e run_script {return redis.pcall('blpop','l',0)} 1 l
} {*not allowed*} } {}
test {EVAL - Scripts can't run brpop command} { test {EVAL - Scripts do not block on brpop command} {
set e {} r lpush l 1
catch {run_script {return redis.pcall('brpop','empty_list',0)} 1 empty_list} e r lpop l
set e run_script {return redis.pcall('brpop','l',0)} 1 l
} {*not allowed*} } {}
test {EVAL - Scripts can't run brpoplpush command} { test {EVAL - Scripts do not block on brpoplpush command} {
set e {} r lpush empty_list1{t} 1
catch {run_script {return redis.pcall('brpoplpush','empty_list1{t}', 'empty_list2{t}',0)} 2 empty_list1{t} empty_list2{t}} e r lpop empty_list1{t}
set e run_script {return redis.pcall('brpoplpush','empty_list1{t}', 'empty_list2{t}',0)} 2 empty_list1{t} empty_list2{t}
} {*not allowed*} } {}
test {EVAL - Scripts can't run blmove command} { test {EVAL - Scripts do not block on blmove command} {
set e {} r lpush empty_list1{t} 1
catch {run_script {return redis.pcall('blmove','empty_list1{t}', 'empty_list2{t}', 'LEFT', 'LEFT', 0)} 2 empty_list1{t} empty_list2{t}} e r lpop empty_list1{t}
set e run_script {return redis.pcall('blmove','empty_list1{t}', 'empty_list2{t}', 'LEFT', 'LEFT', 0)} 2 empty_list1{t} empty_list2{t}
} {*not allowed*} } {}
test {EVAL - Scripts can't run bzpopmin command} { test {EVAL - Scripts do not block on bzpopmin command} {
set e {} r zadd empty_zset 10 foo
catch {run_script {return redis.pcall('bzpopmin','empty_zset', 0)} 1 empty_zset} e r zmpop 1 empty_zset MIN
set e run_script {return redis.pcall('bzpopmin','empty_zset', 0)} 1 empty_zset
} {*not allowed*} } {}
test {EVAL - Scripts can't run bzpopmax command} { test {EVAL - Scripts do not block on bzpopmax command} {
set e {} r zadd empty_zset 10 foo
catch {run_script {return redis.pcall('bzpopmax','empty_zset', 0)} 1 empty_zset} e r zmpop 1 empty_zset MIN
set e run_script {return redis.pcall('bzpopmax','empty_zset', 0)} 1 empty_zset
} {*not allowed*} } {}
test {EVAL - Scripts do not block on wait} {
run_script {return redis.pcall('wait','1','0')} 0
} {0}
test {EVAL - Scripts can't run XREAD and XREADGROUP with BLOCK option} { test {EVAL - Scripts can't run XREAD and XREADGROUP with BLOCK option} {
r del s r del s
...@@ -703,6 +711,7 @@ start_server {tags {"scripting"}} { ...@@ -703,6 +711,7 @@ start_server {tags {"scripting"}} {
assert_equal $res $expected_list assert_equal $res $expected_list
} {} {resp3} } {} {resp3}
if {!$::log_req_res} { # this test creates a huge nested array which python can't handle (RecursionError: maximum recursion depth exceeded in comparison)
test {Script return recursive object} { test {Script return recursive object} {
r readraw 1 r readraw 1
set res [run_script {local a = {}; local b = {a}; a[1] = b; return a} 0] set res [run_script {local a = {}; local b = {a}; a[1] = b; return a} 0]
...@@ -718,6 +727,7 @@ start_server {tags {"scripting"}} { ...@@ -718,6 +727,7 @@ start_server {tags {"scripting"}} {
# make sure the connection is still valid # make sure the connection is still valid
assert_equal [r ping] {PONG} assert_equal [r ping] {PONG}
} }
}
test {Script check unpack with massive arguments} { test {Script check unpack with massive arguments} {
run_script { run_script {
...@@ -1257,9 +1267,10 @@ start_server {tags {"scripting needs:debug"}} { ...@@ -1257,9 +1267,10 @@ start_server {tags {"scripting needs:debug"}} {
for {set client_proto 2} {$client_proto <= 3} {incr client_proto} { for {set client_proto 2} {$client_proto <= 3} {incr client_proto} {
if {[lsearch $::denytags "resp3"] >= 0} { if {[lsearch $::denytags "resp3"] >= 0} {
if {$client_proto == 3} {continue} if {$client_proto == 3} {continue}
} else { } elseif {$::force_resp3} {
r hello $client_proto if {$client_proto == 2} {continue}
} }
r hello $client_proto
set extra "RESP$i/$client_proto" set extra "RESP$i/$client_proto"
r readraw 1 r readraw 1
...@@ -1367,6 +1378,7 @@ start_server {tags {"scripting needs:debug"}} { ...@@ -1367,6 +1378,7 @@ start_server {tags {"scripting needs:debug"}} {
} }
r readraw 0 r readraw 0
r hello 2
} }
} }
......
...@@ -3,12 +3,17 @@ start_server {tags {"shutdown external:skip"}} { ...@@ -3,12 +3,17 @@ start_server {tags {"shutdown external:skip"}} {
for {set i 0} {$i < 20} {incr i} { for {set i 0} {$i < 20} {incr i} {
r set $i $i r set $i $i
} }
# It will cost 2s(20 * 100ms) to dump rdb r config set rdb-key-save-delay 10000000
r config set rdb-key-save-delay 100000
# Child is dumping rdb # Child is dumping rdb
r bgsave r bgsave
after 100 wait_for_condition 1000 10 {
[s rdb_bgsave_in_progress] eq 1
} else {
fail "bgsave did not start in time"
}
after 100 ;# give the child a bit of time for the file to be created
set dir [lindex [r config get dir] 1] set dir [lindex [r config get dir] 1]
set child_pid [get_child_pid 0] set child_pid [get_child_pid 0]
set temp_rdb [file join [lindex [r config get dir] 1] temp-${child_pid}.rdb] set temp_rdb [file join [lindex [r config get dir] 1] temp-${child_pid}.rdb]
......
start_server {tags {"tracking network"}} { # logreqres:skip because it seems many of these tests rely heavily on RESP2
start_server {tags {"tracking network logreqres:skip"}} {
# Create a deferred client we'll use to redirect invalidation # Create a deferred client we'll use to redirect invalidation
# messages to. # messages to.
set rd_redirection [redis_deferring_client] set rd_redirection [redis_deferring_client]
...@@ -780,6 +781,122 @@ start_server {tags {"tracking network"}} { ...@@ -780,6 +781,122 @@ start_server {tags {"tracking network"}} {
r debug pause-cron 0 r debug pause-cron 0
} {OK} {needs:debug} } {OK} {needs:debug}
foreach resp {3 2} {
test "RESP$resp based basic invalidation with client reply off" {
# This entire test is mostly irrelevant for RESP2, but we run it anyway just for some extra coverage.
clean_all
$rd hello $resp
$rd read
$rd client tracking on
$rd read
$rd_sg set foo bar
$rd get foo
$rd read
$rd client reply off
$rd_sg set foo bar2
if {$resp == 3} {
assert_equal {invalidate foo} [$rd read]
} elseif {$resp == 2} { } ;# Just coverage
# Verify things didn't get messed up and no unexpected reply was pushed to the client.
$rd client reply on
assert_equal {OK} [$rd read]
$rd ping
assert_equal {PONG} [$rd read]
}
}
test {RESP3 based basic redirect invalidation with client reply off} {
clean_all
set rd_redir [redis_deferring_client]
$rd_redir hello 3
$rd_redir read
$rd_redir client id
set rd_redir_id [$rd_redir read]
$rd client tracking on redirect $rd_redir_id
$rd read
$rd_sg set foo bar
$rd get foo
$rd read
$rd_redir client reply off
$rd_sg set foo bar2
assert_equal {invalidate foo} [$rd_redir read]
# Verify things didn't get messed up and no unexpected reply was pushed to the client.
$rd_redir client reply on
assert_equal {OK} [$rd_redir read]
$rd_redir ping
assert_equal {PONG} [$rd_redir read]
$rd_redir close
}
test {RESP3 based basic tracking-redir-broken with client reply off} {
clean_all
$rd hello 3
$rd read
$rd client tracking on redirect $redir_id
$rd read
$rd_sg set foo bar
$rd get foo
$rd read
$rd client reply off
$rd_redirection quit
$rd_redirection read
$rd_sg set foo bar2
set res [lsearch -exact [$rd read] "tracking-redir-broken"]
assert_morethan_equal $res 0
# Verify things didn't get messed up and no unexpected reply was pushed to the client.
$rd client reply on
assert_equal {OK} [$rd read]
$rd ping
assert_equal {PONG} [$rd read]
}
$rd_redirection close $rd_redirection close
$rd_sg close
$rd close $rd close
} }
# Just some extra covergae for --log-req-res, because we do not
# run the full tracking unit in that mode
start_server {tags {"tracking network"}} {
test {Coverage: Basic CLIENT CACHING} {
set rd_redirection [redis_deferring_client]
$rd_redirection client id
set redir_id [$rd_redirection read]
assert_equal {OK} [r CLIENT TRACKING on OPTIN REDIRECT $redir_id]
assert_equal {OK} [r CLIENT CACHING yes]
r CLIENT TRACKING off
} {OK}
test {Coverage: Basic CLIENT REPLY} {
r CLIENT REPLY on
} {OK}
test {Coverage: Basic CLIENT TRACKINGINFO} {
r CLIENT TRACKINGINFO
} {flags off redirect -1 prefixes {}}
test {Coverage: Basic CLIENT GETREDIR} {
r CLIENT GETREDIR
} {-1}
}
...@@ -9,6 +9,10 @@ start_server {tags {"incr"}} { ...@@ -9,6 +9,10 @@ start_server {tags {"incr"}} {
r incr novar r incr novar
} {2} } {2}
test {DECR against key created by incr} {
r decr novar
} {1}
test {INCR against key originally set with SET} { test {INCR against key originally set with SET} {
r set novar 100 r set novar 100
r incr novar r incr novar
......
...@@ -558,9 +558,10 @@ foreach {type large} [array get largevalue] { ...@@ -558,9 +558,10 @@ foreach {type large} [array get largevalue] {
foreach resp {3 2} { foreach resp {3 2} {
if {[lsearch $::denytags "resp3"] >= 0} { if {[lsearch $::denytags "resp3"] >= 0} {
if {$resp == 3} {continue} if {$resp == 3} {continue}
} else { } elseif {$::force_resp3} {
r hello $resp if {$resp == 2} {continue}
} }
r hello $resp
# Make sure we can distinguish between an empty array and a null response # Make sure we can distinguish between an empty array and a null response
r readraw 1 r readraw 1
...@@ -589,6 +590,7 @@ foreach {type large} [array get largevalue] { ...@@ -589,6 +590,7 @@ foreach {type large} [array get largevalue] {
} }
r readraw 0 r readraw 0
r hello 2
} }
test {Variadic RPUSH/LPUSH} { test {Variadic RPUSH/LPUSH} {
...@@ -2196,8 +2198,8 @@ foreach {pop} {BLPOP BLMPOP_RIGHT} { ...@@ -2196,8 +2198,8 @@ foreach {pop} {BLPOP BLMPOP_RIGHT} {
$rd1 BLPOP mylist 0 $rd1 BLPOP mylist 0
wait_for_blocked_clients_count 1 wait_for_blocked_clients_count 1
# pipline on other client a list push and a blocking pop # pipeline on other client a list push and a blocking pop
# we should expect the fainess to be kept and have $rd1 # we should expect the fairness to be kept and have $rd1
# being unblocked # being unblocked
set buf "" set buf ""
append buf "LPUSH mylist 1\r\n" append buf "LPUSH mylist 1\r\n"
...@@ -2254,7 +2256,7 @@ foreach {pop} {BLPOP BLMPOP_RIGHT} { ...@@ -2254,7 +2256,7 @@ foreach {pop} {BLPOP BLMPOP_RIGHT} {
$rd3 close $rd3 close
} }
test "Blocking command acounted only once in commandstats" { test "Blocking command accounted only once in commandstats" {
# cleanup first # cleanup first
r del mylist r del mylist
...@@ -2277,7 +2279,7 @@ foreach {pop} {BLPOP BLMPOP_RIGHT} { ...@@ -2277,7 +2279,7 @@ foreach {pop} {BLPOP BLMPOP_RIGHT} {
$rd close $rd close
} }
test "Blocking command acounted only once in commandstats after timeout" { test "Blocking command accounted only once in commandstats after timeout" {
# cleanup first # cleanup first
r del mylist r del mylist
......
...@@ -234,6 +234,15 @@ start_server {tags {"string"}} { ...@@ -234,6 +234,15 @@ start_server {tags {"string"}} {
list [r msetnx x1{t} xxx y2{t} yyy] [r get x1{t}] [r get y2{t}] list [r msetnx x1{t} xxx y2{t} yyy] [r get x1{t}] [r get y2{t}]
} {1 xxx yyy} } {1 xxx yyy}
test {MSETNX with not existing keys - same key twice} {
r del x1{t}
list [r msetnx x1{t} xxx x1{t} yyy] [r get x1{t}]
} {1 yyy}
test {MSETNX with already existing keys - same key twice} {
list [r msetnx x1{t} xxx x1{t} zzz] [r get x1{t}]
} {0 yyy}
test "STRLEN against non-existing key" { test "STRLEN against non-existing key" {
assert_equal 0 [r strlen notakey] assert_equal 0 [r strlen notakey]
} }
...@@ -459,6 +468,13 @@ start_server {tags {"string"}} { ...@@ -459,6 +468,13 @@ start_server {tags {"string"}} {
assert_equal [string range $bin $_start $_end] [r getrange bin $start $end] assert_equal [string range $bin $_start $_end] [r getrange bin $start $end]
} }
} }
test "Coverage: SUBSTR" {
r set key abcde
assert_equal "a" [r substr key 0 0]
assert_equal "abcd" [r substr key 0 3]
assert_equal "bcde" [r substr key -4 -1]
}
if {[string match {*jemalloc*} [s mem_allocator]]} { if {[string match {*jemalloc*} [s mem_allocator]]} {
test {trim on SET with big value} { test {trim on SET with big value} {
......
...@@ -431,6 +431,10 @@ start_server {tags {"zset"}} { ...@@ -431,6 +431,10 @@ start_server {tags {"zset"}} {
} }
test "ZRANK/ZREVRANK basics - $encoding" { test "ZRANK/ZREVRANK basics - $encoding" {
set nullres {$-1}
if {$::force_resp3} {
set nullres {_}
}
r del zranktmp r del zranktmp
r zadd zranktmp 10 x r zadd zranktmp 10 x
r zadd zranktmp 20 y r zadd zranktmp 20 y
...@@ -442,11 +446,15 @@ start_server {tags {"zset"}} { ...@@ -442,11 +446,15 @@ start_server {tags {"zset"}} {
assert_equal 1 [r zrevrank zranktmp y] assert_equal 1 [r zrevrank zranktmp y]
assert_equal 0 [r zrevrank zranktmp z] assert_equal 0 [r zrevrank zranktmp z]
r readraw 1 r readraw 1
assert_equal {$-1} [r zrank zranktmp foo] assert_equal $nullres [r zrank zranktmp foo]
assert_equal {$-1} [r zrevrank zranktmp foo] assert_equal $nullres [r zrevrank zranktmp foo]
r readraw 0 r readraw 0
# withscores # withscores
set nullres {*-1}
if {$::force_resp3} {
set nullres {_}
}
assert_equal {0 10} [r zrank zranktmp x withscore] assert_equal {0 10} [r zrank zranktmp x withscore]
assert_equal {1 20} [r zrank zranktmp y withscore] assert_equal {1 20} [r zrank zranktmp y withscore]
assert_equal {2 30} [r zrank zranktmp z withscore] assert_equal {2 30} [r zrank zranktmp z withscore]
...@@ -454,8 +462,8 @@ start_server {tags {"zset"}} { ...@@ -454,8 +462,8 @@ start_server {tags {"zset"}} {
assert_equal {1 20} [r zrevrank zranktmp y withscore] assert_equal {1 20} [r zrevrank zranktmp y withscore]
assert_equal {0 30} [r zrevrank zranktmp z withscore] assert_equal {0 30} [r zrevrank zranktmp z withscore]
r readraw 1 r readraw 1
assert_equal {*-1} [r zrank zranktmp foo withscore] assert_equal $nullres [r zrank zranktmp foo withscore]
assert_equal {*-1} [r zrevrank zranktmp foo withscore] assert_equal $nullres [r zrevrank zranktmp foo withscore]
r readraw 0 r readraw 0
} }
...@@ -1243,11 +1251,12 @@ start_server {tags {"zset"}} { ...@@ -1243,11 +1251,12 @@ start_server {tags {"zset"}} {
if {[lsearch $::denytags "resp3"] >= 0} { if {[lsearch $::denytags "resp3"] >= 0} {
if {$resp == 3} {continue} if {$resp == 3} {continue}
} else { } elseif {$::force_resp3} {
r hello $resp if {$resp == 2} {continue}
$rd hello $resp
$rd read
} }
r hello $resp
$rd hello $resp
$rd read
test "ZPOPMIN/ZPOPMAX readraw in RESP$resp" { test "ZPOPMIN/ZPOPMAX readraw in RESP$resp" {
r del zset{t} r del zset{t}
...@@ -1401,6 +1410,7 @@ start_server {tags {"zset"}} { ...@@ -1401,6 +1410,7 @@ start_server {tags {"zset"}} {
} }
$rd close $rd close
r hello 2
} }
test {ZINTERSTORE regression with two sets, intset+hashtable} { test {ZINTERSTORE regression with two sets, intset+hashtable} {
......
...@@ -18,20 +18,20 @@ start_server {} { ...@@ -18,20 +18,20 @@ start_server {} {
fail "Replication not started." fail "Replication not started."
} }
} }
test {WAIT out of range timeout (milliseconds)} { test {WAIT out of range timeout (milliseconds)} {
# Timeout is parsed as milliseconds by getLongLongFromObjectOrReply(). # Timeout is parsed as milliseconds by getLongLongFromObjectOrReply().
# Verify we get out of range message if value is behind LLONG_MAX # Verify we get out of range message if value is behind LLONG_MAX
# (decimal value equals to 0x8000000000000000) # (decimal value equals to 0x8000000000000000)
assert_error "*or out of range*" {$master wait 2 9223372036854775808} assert_error "*or out of range*" {$master wait 2 9223372036854775808}
# expected to fail by later overflow condition after addition # expected to fail by later overflow condition after addition
# of mstime(). (decimal value equals to 0x7FFFFFFFFFFFFFFF) # of mstime(). (decimal value equals to 0x7FFFFFFFFFFFFFFF)
assert_error "*timeout is out of range*" {$master wait 2 9223372036854775807} assert_error "*timeout is out of range*" {$master wait 2 9223372036854775807}
assert_error "*timeout is negative*" {$master wait 2 -1} assert_error "*timeout is negative*" {$master wait 2 -1}
} }
test {WAIT should acknowledge 1 additional copy of the data} { test {WAIT should acknowledge 1 additional copy of the data} {
$master set foo 0 $master set foo 0
$master incr foo $master incr foo
...@@ -68,4 +68,365 @@ start_server {} { ...@@ -68,4 +68,365 @@ start_server {} {
exec kill -SIGCONT $slave_pid exec kill -SIGCONT $slave_pid
assert {[$master wait 1 1000] == 1} assert {[$master wait 1 1000] == 1}
} }
test {WAIT replica multiple clients unblock - reuse last result} {
set rd [redis_deferring_client -1]
set rd2 [redis_deferring_client -1]
exec kill -SIGSTOP $slave_pid
$rd incr foo
$rd read
$rd2 incr foo
$rd2 read
$rd wait 1 0
$rd2 wait 1 0
wait_for_blocked_clients_count 2 100 10 -1
exec kill -SIGCONT $slave_pid
assert_equal [$rd read] {1}
assert_equal [$rd2 read] {1}
$rd ping
assert_equal [$rd read] {PONG}
$rd2 ping
assert_equal [$rd2 read] {PONG}
$rd close
$rd2 close
}
}} }}
tags {"wait aof network external:skip"} {
start_server {overrides {appendonly {yes} auto-aof-rewrite-percentage {0}}} {
set master [srv 0 client]
test {WAITAOF local copy before fsync} {
r config set appendfsync no
$master incr foo
assert_equal [$master waitaof 1 0 50] {0 0} ;# exits on timeout
r config set appendfsync everysec
}
test {WAITAOF local copy everysec} {
$master incr foo
assert_equal [$master waitaof 1 0 0] {1 0}
}
test {WAITAOF local copy with appendfsync always} {
r config set appendfsync always
$master incr foo
assert_equal [$master waitaof 1 0 0] {1 0}
r config set appendfsync everysec
}
test {WAITAOF local wait and then stop aof} {
set rd [redis_deferring_client]
$rd incr foo
$rd read
$rd waitaof 1 0 0
wait_for_blocked_client
r config set appendonly no ;# this should release the blocked client as an error
assert_error {ERR WAITAOF cannot be used when numlocal is set but appendonly is disabled.} {$rd read}
$rd close
}
test {WAITAOF local on server with aof disabled} {
$master incr foo
assert_error {ERR WAITAOF cannot be used when numlocal is set but appendonly is disabled.} {$master waitaof 1 0 0}
}
$master config set appendonly yes
waitForBgrewriteaof $master
start_server {overrides {appendonly {yes} auto-aof-rewrite-percentage {0}}} {
set master_host [srv -1 host]
set master_port [srv -1 port]
set replica [srv 0 client]
set replica_host [srv 0 host]
set replica_port [srv 0 port]
set replica_pid [srv 0 pid]
# make sure the master always fsyncs first (easier to test)
$master config set appendfsync always
$replica config set appendfsync no
test {WAITAOF on demoted master gets unblocked with an error} {
set rd [redis_deferring_client]
$rd incr foo
$rd read
$rd waitaof 0 1 0
wait_for_blocked_client
$replica replicaof $master_host $master_port
assert_error {UNBLOCKED force unblock from blocking operation,*} {$rd read}
$rd close
}
wait_for_ofs_sync $master $replica
test {WAITAOF replica copy before fsync} {
$master incr foo
assert_equal [$master waitaof 0 1 50] {1 0} ;# exits on timeout
}
$replica config set appendfsync everysec
test {WAITAOF replica copy everysec} {
$master incr foo
assert_equal [$master waitaof 0 1 0] {1 1}
}
test {WAITAOF replica copy appendfsync always} {
$replica config set appendfsync always
$master incr foo
assert_equal [$master waitaof 0 1 0] {1 1}
$replica config set appendfsync everysec
}
test {WAITAOF replica copy if replica is blocked} {
exec kill -SIGSTOP $replica_pid
$master incr foo
assert_equal [$master waitaof 0 1 50] {1 0} ;# exits on timeout
exec kill -SIGCONT $replica_pid
assert_equal [$master waitaof 0 1 0] {1 1}
}
test {WAITAOF replica multiple clients unblock - reuse last result} {
set rd [redis_deferring_client -1]
set rd2 [redis_deferring_client -1]
exec kill -SIGSTOP $replica_pid
$rd incr foo
$rd read
$rd2 incr foo
$rd2 read
$rd waitaof 0 1 0
$rd2 waitaof 0 1 0
wait_for_blocked_clients_count 2 100 10 -1
exec kill -SIGCONT $replica_pid
assert_equal [$rd read] {1 1}
assert_equal [$rd2 read] {1 1}
$rd ping
assert_equal [$rd read] {PONG}
$rd2 ping
assert_equal [$rd2 read] {PONG}
$rd close
$rd2 close
}
test {WAITAOF on promoted replica} {
$replica replicaof no one
$replica incr foo
assert_equal [$replica waitaof 1 0 0] {1 0}
}
test {WAITAOF master that loses a replica and backlog is dropped} {
$master config set repl-backlog-ttl 1
after 2000 ;# wait for backlog to expire
$master incr foo
assert_equal [$master waitaof 1 0 0] {1 0}
}
test {WAITAOF master without backlog, wait is released when the replica finishes full-sync} {
set rd [redis_deferring_client -1]
$rd incr foo
$rd read
$rd waitaof 0 1 0
wait_for_blocked_client -1
$replica replicaof $master_host $master_port
assert_equal [$rd read] {1 1}
$rd close
}
test {WAITAOF master isn't configured to do AOF} {
$master config set appendonly no
$master incr foo
assert_equal [$master waitaof 0 1 0] {0 1}
}
test {WAITAOF replica isn't configured to do AOF} {
$master config set appendonly yes
waitForBgrewriteaof $master
$replica config set appendonly no
$master incr foo
assert_equal [$master waitaof 1 0 0] {1 0}
}
test {WAITAOF both local and replica got AOF enabled at runtime} {
$replica config set appendonly yes
waitForBgrewriteaof $replica
$master incr foo
assert_equal [$master waitaof 1 1 0] {1 1}
}
test {WAITAOF master sends PING after last write} {
$master config set repl-ping-replica-period 1
$master incr foo
after 1200 ;# wait for PING
$master get foo
assert_equal [$master waitaof 1 1 0] {1 1}
$master config set repl-ping-replica-period 10
}
test {WAITAOF master client didn't send any write command} {
$master config set repl-ping-replica-period 1
set client [redis_client -1]
after 1200 ;# wait for PING
assert_equal [$master waitaof 1 1 0] {1 1}
$client close
$master config set repl-ping-replica-period 10
}
test {WAITAOF master client didn't send any command} {
$master config set repl-ping-replica-period 1
set client [redis [srv -1 "host"] [srv -1 "port"] 0 $::tls]
after 1200 ;# wait for PING
assert_equal [$master waitaof 1 1 0] {1 1}
$client close
$master config set repl-ping-replica-period 10
}
foreach fsync {no everysec always} {
test "WAITAOF when replica switches between masters, fsync: $fsync" {
# test a case where a replica is moved from one master to the other
# between two replication streams with different offsets that should
# not be mixed. done to smoke-test race conditions with bio thread.
start_server {overrides {appendonly {yes} auto-aof-rewrite-percentage {0}}} {
start_server {overrides {appendonly {yes} auto-aof-rewrite-percentage {0}}} {
set master2 [srv -1 client]
set master2_host [srv -1 host]
set master2_port [srv -1 port]
set replica2 [srv 0 client]
set replica2_host [srv 0 host]
set replica2_port [srv 0 port]
set replica2_pid [srv 0 pid]
$replica2 replicaof $master2_host $master2_port
wait_for_ofs_sync $master2 $replica2
$master config set appendfsync $fsync
$master2 config set appendfsync $fsync
$replica config set appendfsync $fsync
$replica2 config set appendfsync $fsync
if {$fsync eq "no"} {
after 2000 ;# wait for any previous fsync to finish
# can't afford "no" on the masters
$master config set appendfsync always
$master2 config set appendfsync always
} elseif {$fsync eq "everysec"} {
after 990 ;# hoping to hit a race
}
# add some writes and block a client on each master
set rd [redis_deferring_client -3]
set rd2 [redis_deferring_client -1]
$rd set boo 11
$rd2 set boo 22
$rd read
$rd2 read
$rd waitaof 1 1 0
$rd2 waitaof 1 1 0
if {$fsync eq "no"} {
# since appendfsync is disabled in the replicas, the client
# will get released only with full sync
wait_for_blocked_client -1
wait_for_blocked_client -3
}
# switch between the two replicas
$replica2 replicaof $master_host $master_port
$replica replicaof $master2_host $master2_port
assert_equal [$rd read] {1 1}
assert_equal [$rd2 read] {1 1}
$rd close
$rd2 close
assert_equal [$replica get boo] 22
assert_equal [$replica2 get boo] 11
}
}
}
}
}
}
}
start_server {tags {"failover external:skip"}} {
start_server {} {
start_server {} {
set master [srv 0 client]
set master_host [srv 0 host]
set master_port [srv 0 port]
set replica1 [srv -1 client]
set replica1_pid [srv -1 pid]
set replica2 [srv -2 client]
test {setup replication for following tests} {
$replica1 replicaof $master_host $master_port
$replica2 replicaof $master_host $master_port
wait_for_sync $replica1
wait_for_sync $replica2
}
test {WAIT and WAITAOF replica multiple clients unblock - reuse last result} {
set rd [redis_deferring_client]
set rd2 [redis_deferring_client]
$master config set appendonly yes
$replica1 config set appendonly yes
$replica2 config set appendonly yes
$master config set appendfsync always
$replica1 config set appendfsync no
$replica2 config set appendfsync no
waitForBgrewriteaof $master
waitForBgrewriteaof $replica1
waitForBgrewriteaof $replica2
exec kill -SIGSTOP $replica1_pid
$rd incr foo
$rd read
$rd waitaof 0 1 0
# rd2 has a newer repl_offset
$rd2 incr foo
$rd2 read
$rd2 wait 2 0
wait_for_blocked_clients_count 2
exec kill -SIGCONT $replica1_pid
# WAIT will unblock the client first.
assert_equal [$rd2 read] {2}
# Make $replica1 catch up the repl_aof_off, then WAITAOF will unblock the client.
$replica1 config set appendfsync always
$master incr foo
assert_equal [$rd read] {1 1}
$rd ping
assert_equal [$rd read] {PONG}
$rd2 ping
assert_equal [$rd2 read] {PONG}
$rd close
$rd2 close
}
}
}
}
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
import glob import glob
import json import json
import os import os
import argparse
ARG_TYPES = { ARG_TYPES = {
"string": "ARG_TYPE_STRING", "string": "ARG_TYPE_STRING",
...@@ -35,29 +36,6 @@ GROUPS = { ...@@ -35,29 +36,6 @@ GROUPS = {
"bitmap": "COMMAND_GROUP_BITMAP", "bitmap": "COMMAND_GROUP_BITMAP",
} }
RESP2_TYPES = {
"simple-string": "RESP2_SIMPLE_STRING",
"error": "RESP2_ERROR",
"integer": "RESP2_INTEGER",
"bulk-string": "RESP2_BULK_STRING",
"null-bulk-string": "RESP2_NULL_BULK_STRING",
"array": "RESP2_ARRAY",
"null-array": "RESP2_NULL_ARRAY",
}
RESP3_TYPES = {
"simple-string": "RESP3_SIMPLE_STRING",
"error": "RESP3_ERROR",
"integer": "RESP3_INTEGER",
"double": "RESP3_DOUBLE",
"bulk-string": "RESP3_BULK_STRING",
"array": "RESP3_ARRAY",
"map": "RESP3_MAP",
"set": "RESP3_SET",
"bool": "RESP3_BOOL",
"null": "RESP3_NULL",
}
def get_optional_desc_string(desc, field, force_uppercase=False): def get_optional_desc_string(desc, field, force_uppercase=False):
v = desc.get(field, None) v = desc.get(field, None)
...@@ -194,7 +172,6 @@ class Argument(object): ...@@ -194,7 +172,6 @@ class Argument(object):
self.type = self.desc["type"] self.type = self.desc["type"]
self.key_spec_index = self.desc.get("key_spec_index", None) self.key_spec_index = self.desc.get("key_spec_index", None)
self.subargs = [] self.subargs = []
self.subargs_name = None
if self.type in ["oneof", "block"]: if self.type in ["oneof", "block"]:
self.display = None self.display = None
for subdesc in self.desc["arguments"]: for subdesc in self.desc["arguments"]:
...@@ -264,6 +241,75 @@ class Argument(object): ...@@ -264,6 +241,75 @@ class Argument(object):
f.write("};\n\n") f.write("};\n\n")
def to_c_name(str):
return str.replace(":", "").replace(".", "_").replace("$", "_")\
.replace("^", "_").replace("*", "_").replace("-", "_")
class ReplySchema(object):
def __init__(self, name, desc):
self.name = to_c_name(name)
self.schema = {}
if desc.get("type") == "object":
if desc.get("properties") and desc.get("additionalProperties") is None:
print("%s: Any object that has properties should have the additionalProperties field" % self.name)
exit(1)
elif desc.get("type") == "array":
if desc.get("items") and isinstance(desc["items"], list) and any([desc.get(k) is None for k in ["minItems", "maxItems"]]):
print("%s: Any array that has items should have the minItems and maxItems fields" % self.name)
exit(1)
for k, v in desc.items():
if isinstance(v, dict):
self.schema[k] = ReplySchema("%s_%s" % (self.name, k), v)
elif isinstance(v, list):
self.schema[k] = []
for i, subdesc in enumerate(v):
self.schema[k].append(ReplySchema("%s_%s_%i" % (self.name, k,i), subdesc))
else:
self.schema[k] = v
def write(self, f):
def struct_code(name, k, v):
if isinstance(v, ReplySchema):
t = "JSON_TYPE_OBJECT"
vstr = ".value.object=&%s" % name
elif isinstance(v, list):
t = "JSON_TYPE_ARRAY"
vstr = ".value.array={.objects=%s,.length=%d}" % (name, len(v))
elif isinstance(v, bool):
t = "JSON_TYPE_BOOLEAN"
vstr = ".value.boolean=%d" % int(v)
elif isinstance(v, str):
t = "JSON_TYPE_STRING"
vstr = ".value.string=\"%s\"" % v
elif isinstance(v, int):
t = "JSON_TYPE_INTEGER"
vstr = ".value.integer=%d" % v
return "%s,\"%s\",%s" % (t, k, vstr)
for k, v in self.schema.items():
if isinstance(v, ReplySchema):
v.write(f)
elif isinstance(v, list):
for i, schema in enumerate(v):
schema.write(f)
name = to_c_name("%s_%s" % (self.name, k))
f.write("/* %s array reply schema */\n" % name)
f.write("struct jsonObject *%s[] = {\n" % name)
for i, schema in enumerate(v):
f.write("&%s,\n" % schema.name)
f.write("};\n\n")
f.write("/* %s reply schema */\n" % self.name)
f.write("struct jsonObjectElement %s_elements[] = {\n" % self.name)
for k, v in self.schema.items():
name = to_c_name("%s_%s" % (self.name, k))
f.write("{%s},\n" % struct_code(name, k, v))
f.write("};\n\n")
f.write("struct jsonObject %s = {%s_elements,.length=%d};\n\n" % (self.name, self.name, len(self.schema)))
class Command(object): class Command(object):
def __init__(self, name, desc): def __init__(self, name, desc):
self.name = name.upper() self.name = name.upper()
...@@ -273,9 +319,11 @@ class Command(object): ...@@ -273,9 +319,11 @@ class Command(object):
self.subcommands = [] self.subcommands = []
self.args = [] self.args = []
for arg_desc in self.desc.get("arguments", []): for arg_desc in self.desc.get("arguments", []):
arg = Argument(self.fullname(), arg_desc) self.args.append(Argument(self.fullname(), arg_desc))
self.args.append(arg)
verify_no_dup_names(self.fullname(), self.args) verify_no_dup_names(self.fullname(), self.args)
self.reply_schema = None
if "reply_schema" in self.desc:
self.reply_schema = ReplySchema(self.reply_schema_name(), self.desc["reply_schema"])
def fullname(self): def fullname(self):
return self.name.replace("-", "_").replace(":", "") return self.name.replace("-", "_").replace(":", "")
...@@ -296,6 +344,9 @@ class Command(object): ...@@ -296,6 +344,9 @@ class Command(object):
def arg_table_name(self): def arg_table_name(self):
return "%s_Args" % (self.fullname().replace(" ", "_")) return "%s_Args" % (self.fullname().replace(" ", "_"))
def reply_schema_name(self):
return "%s_ReplySchema" % (self.fullname().replace(" ", "_"))
def struct_name(self): def struct_name(self):
return "%s_Command" % (self.fullname().replace(" ", "_")) return "%s_Command" % (self.fullname().replace(" ", "_"))
...@@ -377,6 +428,9 @@ class Command(object): ...@@ -377,6 +428,9 @@ class Command(object):
if self.args: if self.args:
s += ".args=%s," % self.arg_table_name() s += ".args=%s," % self.arg_table_name()
if self.reply_schema and args.with_reply_schema:
s += ".reply_schema=&%s," % self.reply_schema_name()
return s[:-1] return s[:-1]
def write_internal_structs(self, f): def write_internal_structs(self, f):
...@@ -423,6 +477,9 @@ class Command(object): ...@@ -423,6 +477,9 @@ class Command(object):
f.write("{0}\n") f.write("{0}\n")
f.write("};\n\n") f.write("};\n\n")
if self.reply_schema and args.with_reply_schema:
self.reply_schema.write(f)
class Subcommand(Command): class Subcommand(Command):
def __init__(self, name, desc): def __init__(self, name, desc):
...@@ -447,6 +504,10 @@ def create_command(name, desc): ...@@ -447,6 +504,10 @@ def create_command(name, desc):
# Figure out where the sources are # Figure out where the sources are
srcdir = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + "/../src") srcdir = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + "/../src")
parser = argparse.ArgumentParser()
parser.add_argument('--with-reply-schema', action='store_true')
args = parser.parse_args()
# Create all command objects # Create all command objects
print("Processing json files...") print("Processing json files...")
for filename in glob.glob('%s/commands/*.json' % srcdir): for filename in glob.glob('%s/commands/*.json' % srcdir):
...@@ -481,8 +542,9 @@ if check_command_error_counter != 0: ...@@ -481,8 +542,9 @@ if check_command_error_counter != 0:
print("Error: There are errors in the commands check, please check the above logs.") print("Error: There are errors in the commands check, please check the above logs.")
exit(1) exit(1)
print("Generating commands.c...") commands_filename = "commands_with_reply_schema" if args.with_reply_schema else "commands"
with open("%s/commands.c" % srcdir, "w") as f: print("Generating %s.c..." % commands_filename)
with open("%s/%s.c" % (srcdir, commands_filename), "w") as f:
f.write("/* Automatically generated by %s, do not edit. */\n\n" % os.path.basename(__file__)) f.write("/* Automatically generated by %s, do not edit. */\n\n" % os.path.basename(__file__))
f.write("#include \"server.h\"\n") f.write("#include \"server.h\"\n")
f.write( f.write(
......
function validate_schema(command_schema) {
var error_status = false
const Ajv = require("ajv/dist/2019")
const ajv = new Ajv({strict: true, strictTuples: false})
let json = require('../src/commands/'+ command_schema);
for (var item in json) {
const schema = json[item].reply_schema
if (schema == undefined)
continue;
try {
ajv.compile(schema)
} catch (error) {
console.error(command_schema + " : " + error.toString())
error_status = true
}
}
return error_status
}
const schema_directory_path = './src/commands'
const path = require('path')
var fs = require('fs');
var files = fs.readdirSync(schema_directory_path);
jsonFiles = files.filter(el => path.extname(el) === '.json')
var error_status = false
jsonFiles.forEach(function(file){
if (validate_schema(file))
error_status = true
})
if (error_status)
process.exit(1)
#!/usr/bin/env python3
import os
import glob
import json
import sys
import jsonschema
import subprocess
import redis
import time
import argparse
import multiprocessing
import collections
import io
import signal
import traceback
from datetime import timedelta
from functools import partial
try:
from jsonschema import Draft201909Validator as schema_validator
except ImportError:
from jsonschema import Draft7Validator as schema_validator
"""
The purpose of this file is to validate the reply_schema values of COMMAND DOCS.
Basically, this is what it does:
1. Goes over req-res files, generated by redis-servers, spawned by the testsuite (see logreqres.c)
2. For each request-response pair, it validates the response against the request's reply_schema (obtained from COMMAND DOCS)
This script spins up a redis-server and a redis-cli in order to obtain COMMAND DOCS.
In order to use this file you must run the redis testsuite with the following flags:
./runtest --dont-clean --force-resp3 --log-req-res
And then:
./utils/req-res-log-validator.py
The script will fail only if:
1. One or more of the replies doesn't comply with its schema.
2. One or more of the commands in COMMANDS DOCS doesn't have the reply_schema field (with --fail-missing-reply-schemas)
3. The testsuite didn't execute all of the commands (with --fail-commands-not-all-hit)
Future validations:
1. Fail the script if one or more of the branches of the reply schema (e.g. oneOf, anyOf) was not hit.
"""
IGNORED_COMMANDS = [
"sync",
"psync",
"monitor",
"subscribe",
"unsubscribe",
"ssubscribe",
"sunsubscribe",
"psubscribe",
"punsubscribe",
"debug",
"pfdebug",
"lolwut",
]
class Request(object):
"""
This class represents a Redis request (AKA command, argv)
"""
def __init__(self, f, docs, line_counter):
"""
Read lines from `f` (generated by logreqres.c) and populates the argv array
"""
self.command = None
self.schema = None
self.argv = []
while True:
line = f.readline()
line_counter[0] += 1
if not line:
break
length = int(line)
arg = str(f.read(length))
f.read(2) # read \r\n
line_counter[0] += 1
if arg == "__argv_end__":
break
self.argv.append(arg)
if not self.argv:
return
self.command = self.argv[0].lower()
doc = docs.get(self.command, {})
if not doc and len(self.argv) > 1:
self.command = f"{self.argv[0].lower()}|{self.argv[1].lower()}"
doc = docs.get(self.command, {})
if not doc:
self.command = None
return
self.schema = doc.get("reply_schema")
def __str__(self):
return json.dumps(self.argv)
class Response(object):
"""
This class represents a Redis response in RESP3
"""
def __init__(self, f, line_counter):
"""
Read lines from `f` (generated by logreqres.c) and build the JSON representing the response in RESP3
"""
self.error = False
self.queued = False
self.json = None
line = f.readline()[:-2]
line_counter[0] += 1
if line[0] == '+':
self.json = line[1:]
if self.json == "QUEUED":
self.queued = True
elif line[0] == '-':
self.json = line[1:]
self.error = True
elif line[0] == '$':
self.json = str(f.read(int(line[1:])))
f.read(2) # read \r\n
line_counter[0] += 1
elif line[0] == ':':
self.json = int(line[1:])
elif line[0] == ',':
self.json = float(line[1:])
elif line[0] == '_':
self.json = None
elif line[0] == '#':
self.json = line[1] == 't'
elif line[0] == '!':
self.json = str(f.read(int(line[1:])))
f.read(2) # read \r\n
line_counter[0] += 1
self.error = True
elif line[0] == '=':
self.json = str(f.read(int(line[1:])))[4:] # skip "txt:" or "mkd:"
f.read(2) # read \r\n
line_counter[0] += 1 + self.json.count("\r\n")
elif line[0] == '(':
self.json = line[1:] # big-number is actually a string
elif line[0] in ['*', '~', '>']: # unfortunately JSON doesn't tell the difference between a list and a set
self.json = []
count = int(line[1:])
for i in range(count):
ele = Response(f, line_counter)
self.json.append(ele.json)
elif line[0] in ['%', '|']:
self.json = {}
count = int(line[1:])
for i in range(count):
field = Response(f, line_counter)
# Redis allows fields to be non-strings but JSON doesn't.
# Luckily, for any kind of response we can validate, the fields are
# always strings (example: XINFO STREAM)
# The reason we can't always convert to string is because of DEBUG PROTOCOL MAP
# which anyway doesn't have a schema
if isinstance(field.json, str):
field = field.json
value = Response(f, line_counter)
self.json[field] = value.json
if line[0] == '|':
# We don't care abou the attributes, read the real response
real_res = Response(f, line_counter)
self.__dict__.update(real_res.__dict__)
def __str__(self):
return json.dumps(self.json)
def process_file(docs, path):
"""
This function processes a single filegenerated by logreqres.c
"""
line_counter = [0] # A list with one integer: to force python to pass it by reference
command_counter = dict()
print(f"Processing {path} ...")
# Convert file to StringIO in order to minimize IO operations
with open(path, "r", newline="\r\n", encoding="latin-1") as f:
content = f.read()
with io.StringIO(content) as fakefile:
while True:
try:
req = Request(fakefile, docs, line_counter)
if not req.argv:
# EOF
break
res = Response(fakefile, line_counter)
except json.decoder.JSONDecodeError as err:
print(f"JSON decoder error while processing {path}:{line_counter[0]}: {err}")
print(traceback.format_exc())
raise
except Exception as err:
print(f"General error while processing {path}:{line_counter[0]}: {err}")
print(traceback.format_exc())
raise
if not req.command:
# Unknown command
continue
command_counter[req.command] = command_counter.get(req.command, 0) + 1
if res.error or res.queued:
continue
try:
jsonschema.validate(instance=res.json, schema=req.schema, cls=schema_validator)
except (jsonschema.ValidationError, jsonschema.exceptions.SchemaError) as err:
print(f"JSON schema validation error on {path}: {err}")
print(f"argv: {req.argv}")
try:
print(f"Response: {res}")
except UnicodeDecodeError as err:
print("Response: (unprintable)")
print(f"Schema: {json.dumps(req.schema, indent=2)}")
print(traceback.format_exc())
raise
return command_counter
def fetch_schemas(cli, port, args, docs):
redis_proc = subprocess.Popen(args, stdout=subprocess.PIPE)
while True:
try:
print('Connecting to Redis...')
r = redis.Redis(port=port)
r.ping()
break
except Exception as e:
time.sleep(0.1)
pass
print('Connected')
cli_proc = subprocess.Popen([cli, '-p', str(port), '--json', 'command', 'docs'], stdout=subprocess.PIPE)
stdout, stderr = cli_proc.communicate()
docs_response = json.loads(stdout)
for name, doc in docs_response.items():
if "subcommands" in doc:
for subname, subdoc in doc["subcommands"].items():
docs[subname] = subdoc
else:
docs[name] = doc
redis_proc.terminate()
redis_proc.wait()
if __name__ == '__main__':
# Figure out where the sources are
srcdir = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + "/../src")
testdir = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + "/../tests")
parser = argparse.ArgumentParser()
parser.add_argument('--server', type=str, default='%s/redis-server' % srcdir)
parser.add_argument('--port', type=int, default=6534)
parser.add_argument('--cli', type=str, default='%s/redis-cli' % srcdir)
parser.add_argument('--module', type=str, action='append', default=[])
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--fail-commands-not-all-hit', action='store_true')
parser.add_argument('--fail-missing-reply-schemas', action='store_true')
args = parser.parse_args()
docs = dict()
# Fetch schemas from a Redis instance
print('Starting Redis server')
redis_args = [args.server, '--port', str(args.port)]
for module in args.module:
redis_args += ['--loadmodule', 'tests/modules/%s.so' % module]
fetch_schemas(args.cli, args.port, redis_args, docs)
missing_schema = [k for k, v in docs.items()
if "reply_schema" not in v and k not in IGNORED_COMMANDS]
if missing_schema:
print("WARNING! The following commands are missing a reply_schema:")
for k in sorted(missing_schema):
print(f" {k}")
if args.fail_missing_reply_schemas:
print("ERROR! at least one command does not have a reply_schema")
sys.exit(1)
# Fetch schemas from a sentinel
print('Starting Redis sentinel')
# Sentinel needs a config file to start
config_file = "tmpsentinel.conf"
open(config_file, 'a').close()
sentinel_args = [args.server, config_file, '--port', str(args.port), "--sentinel"]
fetch_schemas(args.cli, args.port, sentinel_args, docs)
os.unlink(config_file)
start = time.time()
# Obtain all the files toprocesses
paths = []
for path in glob.glob('%s/tmp/*/*.reqres' % testdir):
paths.append(path)
for path in glob.glob('%s/cluster/tmp/*/*.reqres' % testdir):
paths.append(path)
for path in glob.glob('%s/sentinel/tmp/*/*.reqres' % testdir):
paths.append(path)
counter = collections.Counter()
# Spin several processes to handle the files in parallel
with multiprocessing.Pool(multiprocessing.cpu_count()) as pool:
func = partial(process_file, docs)
# pool.map blocks until all the files have been processed
for result in pool.map(func, paths):
counter.update(result)
command_counter = dict(counter)
elapsed = time.time() - start
print(f"Done. ({timedelta(seconds=elapsed)})")
print("Hits per command:")
for k, v in sorted(command_counter.items()):
print(f" {k}: {v}")
# We don't care about SENTINEL commands
not_hit = set(filter(lambda x: not x.startswith("sentinel"),
set(docs.keys()) - set(command_counter.keys()) - set(IGNORED_COMMANDS)))
if not_hit:
if args.verbose:
print("WARNING! The following commands were not hit at all:")
for k in sorted(not_hit):
print(f" {k}")
if args.fail_commands_not_all_hit:
print("ERROR! at least one command was not hit by the tests")
sys.exit(1)
jsonschema==4.17.3
redis==4.5.1
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment