Unverified Commit 49816941 authored by chendianqiang's avatar chendianqiang Committed by GitHub
Browse files

Merge pull request #2 from antirez/unstable

merge from redis
parents 68ceb466 f311a529
...@@ -4,7 +4,7 @@ set ::valgrind_errors {} ...@@ -4,7 +4,7 @@ set ::valgrind_errors {}
proc start_server_error {config_file error} { proc start_server_error {config_file error} {
set err {} set err {}
append err "Cant' start the Redis server\n" append err "Can't start the Redis server\n"
append err "CONFIGURATION:" append err "CONFIGURATION:"
append err [exec cat $config_file] append err [exec cat $config_file]
append err "\nERROR:" append err "\nERROR:"
......
set ::num_tests 0 set ::num_tests 0
set ::num_passed 0 set ::num_passed 0
set ::num_failed 0 set ::num_failed 0
set ::num_skipped 0
set ::num_aborted 0
set ::tests_failed {} set ::tests_failed {}
proc fail {msg} { proc fail {msg} {
...@@ -68,10 +70,26 @@ proc test {name code {okpattern undefined}} { ...@@ -68,10 +70,26 @@ proc test {name code {okpattern undefined}} {
# abort if tagged with a tag to deny # abort if tagged with a tag to deny
foreach tag $::denytags { foreach tag $::denytags {
if {[lsearch $::tags $tag] >= 0} { if {[lsearch $::tags $tag] >= 0} {
incr ::num_aborted
send_data_packet $::test_server_fd ignore $name
return return
} }
} }
# abort if test name in skiptests
if {[lsearch $::skiptests $name] >= 0} {
incr ::num_skipped
send_data_packet $::test_server_fd skip $name
return
}
# abort if test name in skiptests
if {[llength $::only_tests] > 0 && [lsearch $::only_tests $name] < 0} {
incr ::num_skipped
send_data_packet $::test_server_fd skip $name
return
}
# check if tagged with at least 1 tag to allow when there *is* a list # check if tagged with at least 1 tag to allow when there *is* a list
# of tags to allow, because default policy is to run everything # of tags to allow, because default policy is to run everything
if {[llength $::allowtags] > 0} { if {[llength $::allowtags] > 0} {
...@@ -82,6 +100,8 @@ proc test {name code {okpattern undefined}} { ...@@ -82,6 +100,8 @@ proc test {name code {okpattern undefined}} {
} }
} }
if {$matched < 1} { if {$matched < 1} {
incr ::num_aborted
send_data_packet $::test_server_fd ignore $name
return return
} }
} }
......
...@@ -91,6 +91,14 @@ proc wait_for_sync r { ...@@ -91,6 +91,14 @@ proc wait_for_sync r {
} }
} }
proc wait_for_ofs_sync {r1 r2} {
wait_for_condition 50 100 {
[status $r1 master_repl_offset] eq [status $r2 master_repl_offset]
} else {
fail "replica didn't sync in time"
}
}
# Random integer between 0 and max (excluded). # Random integer between 0 and max (excluded).
proc randomInt {max} { proc randomInt {max} {
expr {int(rand()*$max)} expr {int(rand()*$max)}
......
...@@ -34,6 +34,7 @@ set ::all_tests { ...@@ -34,6 +34,7 @@ set ::all_tests {
unit/multi unit/multi
unit/quit unit/quit
unit/aofrw unit/aofrw
unit/acl
integration/block-repl integration/block-repl
integration/replication integration/replication
integration/replication-2 integration/replication-2
...@@ -74,7 +75,11 @@ set ::stack_logging 0 ...@@ -74,7 +75,11 @@ set ::stack_logging 0
set ::verbose 0 set ::verbose 0
set ::quiet 0 set ::quiet 0
set ::denytags {} set ::denytags {}
set ::skiptests {}
set ::allowtags {} set ::allowtags {}
set ::only_tests {}
set ::single_tests {}
set ::skip_till ""
set ::external 0; # If "1" this means, we are running against external instance set ::external 0; # If "1" this means, we are running against external instance
set ::file ""; # If set, runs only the tests in this comma separated list set ::file ""; # If set, runs only the tests in this comma separated list
set ::curfile ""; # Hold the filename of the current suite set ::curfile ""; # Hold the filename of the current suite
...@@ -85,6 +90,8 @@ set ::last_progress [clock seconds] ...@@ -85,6 +90,8 @@ set ::last_progress [clock seconds]
set ::active_servers {} ; # Pids of active Redis instances. set ::active_servers {} ; # Pids of active Redis instances.
set ::dont_clean 0 set ::dont_clean 0
set ::wait_server 0 set ::wait_server 0
set ::stop_on_failure 0
set ::loop 0
# Set to 1 when we are running in client mode. The Redis test uses a # Set to 1 when we are running in client mode. The Redis test uses a
# server-client model to run tests simultaneously. The server instance # server-client model to run tests simultaneously. The server instance
...@@ -255,6 +262,8 @@ proc accept_test_clients {fd addr port} { ...@@ -255,6 +262,8 @@ proc accept_test_clients {fd addr port} {
# testing: just used to signal that a given test started. # testing: just used to signal that a given test started.
# ok: a test was executed with success. # ok: a test was executed with success.
# err: a test was executed with an error. # err: a test was executed with an error.
# skip: a test was skipped by skipfile or individual test options.
# ignore: a test was skipped by a group tag.
# exception: there was a runtime exception while executing the test. # exception: there was a runtime exception while executing the test.
# done: all the specified test file was processed, this test client is # done: all the specified test file was processed, this test client is
# ready to accept a new task. # ready to accept a new task.
...@@ -283,11 +292,24 @@ proc read_from_test_client fd { ...@@ -283,11 +292,24 @@ proc read_from_test_client fd {
puts "\[[colorstr green $status]\]: $data" puts "\[[colorstr green $status]\]: $data"
} }
set ::active_clients_task($fd) "(OK) $data" set ::active_clients_task($fd) "(OK) $data"
} elseif {$status eq {skip}} {
if {!$::quiet} {
puts "\[[colorstr yellow $status]\]: $data"
}
} elseif {$status eq {ignore}} {
if {!$::quiet} {
puts "\[[colorstr cyan $status]\]: $data"
}
} elseif {$status eq {err}} { } elseif {$status eq {err}} {
set err "\[[colorstr red $status]\]: $data" set err "\[[colorstr red $status]\]: $data"
puts $err puts $err
lappend ::failed_tests $err lappend ::failed_tests $err
set ::active_clients_task($fd) "(ERR) $data" set ::active_clients_task($fd) "(ERR) $data"
if {$::stop_on_failure} {
puts -nonewline "(Test stopped, press enter to continue)"
flush stdout
gets stdin
}
} elseif {$status eq {exception}} { } elseif {$status eq {exception}} {
puts "\[[colorstr red $status]\]: $data" puts "\[[colorstr red $status]\]: $data"
kill_clients kill_clients
...@@ -350,6 +372,9 @@ proc signal_idle_client fd { ...@@ -350,6 +372,9 @@ proc signal_idle_client fd {
send_data_packet $fd run [lindex $::all_tests $::next_test] send_data_packet $fd run [lindex $::all_tests $::next_test]
lappend ::active_clients $fd lappend ::active_clients $fd
incr ::next_test incr ::next_test
if {$::loop && $::next_test == [llength $::all_tests]} {
set ::next_test 0
}
} else { } else {
lappend ::idle_clients $fd lappend ::idle_clients $fd
if {[llength $::active_clients] == 0} { if {[llength $::active_clients] == 0} {
...@@ -412,13 +437,19 @@ proc print_help_screen {} { ...@@ -412,13 +437,19 @@ proc print_help_screen {} {
"--stack-logging Enable OSX leaks/malloc stack logging." "--stack-logging Enable OSX leaks/malloc stack logging."
"--accurate Run slow randomized tests for more iterations." "--accurate Run slow randomized tests for more iterations."
"--quiet Don't show individual tests." "--quiet Don't show individual tests."
"--single <unit> Just execute the specified unit (see next option)." "--single <unit> Just execute the specified unit (see next option). this option can be repeated."
"--list-tests List all the available test units." "--list-tests List all the available test units."
"--only <test> Just execute the specified test by test name. this option can be repeated."
"--skip-till <unit> Skip all units until (and including) the specified one."
"--clients <num> Number of test clients (default 16)." "--clients <num> Number of test clients (default 16)."
"--timeout <sec> Test timeout in seconds (default 10 min)." "--timeout <sec> Test timeout in seconds (default 10 min)."
"--force-failure Force the execution of a test that always fails." "--force-failure Force the execution of a test that always fails."
"--dont-clean don't delete redis log files after the run" "--config <k> <v> Extra config file argument."
"--wait-server wait after server is started (so that you can attach a debugger)" "--skipfile <file> Name of a file containing test names that should be skipped (one per line)."
"--dont-clean Don't delete redis log files after the run."
"--stop Blocks once the first test fails."
"--loop Execute the specified set of tests forever."
"--wait-server Wait after server is started (so that you can attach a debugger)."
"--help Print this help screen." "--help Print this help screen."
} "\n"] } "\n"]
} }
...@@ -436,6 +467,17 @@ for {set j 0} {$j < [llength $argv]} {incr j} { ...@@ -436,6 +467,17 @@ for {set j 0} {$j < [llength $argv]} {incr j} {
} }
} }
incr j incr j
} elseif {$opt eq {--config}} {
set arg2 [lindex $argv [expr $j+2]]
lappend ::global_overrides $arg
lappend ::global_overrides $arg2
incr j 2
} elseif {$opt eq {--skipfile}} {
incr j
set fp [open $arg r]
set file_data [read $fp]
close $fp
set ::skiptests [split $file_data "\n"]
} elseif {$opt eq {--valgrind}} { } elseif {$opt eq {--valgrind}} {
set ::valgrind 1 set ::valgrind 1
} elseif {$opt eq {--stack-logging}} { } elseif {$opt eq {--stack-logging}} {
...@@ -456,7 +498,13 @@ for {set j 0} {$j < [llength $argv]} {incr j} { ...@@ -456,7 +498,13 @@ for {set j 0} {$j < [llength $argv]} {incr j} {
} elseif {$opt eq {--force-failure}} { } elseif {$opt eq {--force-failure}} {
set ::force_failure 1 set ::force_failure 1
} elseif {$opt eq {--single}} { } elseif {$opt eq {--single}} {
set ::all_tests $arg lappend ::single_tests $arg
incr j
} elseif {$opt eq {--only}} {
lappend ::only_tests $arg
incr j
} elseif {$opt eq {--skiptill}} {
set ::skip_till $arg
incr j incr j
} elseif {$opt eq {--list-tests}} { } elseif {$opt eq {--list-tests}} {
foreach t $::all_tests { foreach t $::all_tests {
...@@ -476,6 +524,10 @@ for {set j 0} {$j < [llength $argv]} {incr j} { ...@@ -476,6 +524,10 @@ for {set j 0} {$j < [llength $argv]} {incr j} {
set ::dont_clean 1 set ::dont_clean 1
} elseif {$opt eq {--wait-server}} { } elseif {$opt eq {--wait-server}} {
set ::wait_server 1 set ::wait_server 1
} elseif {$opt eq {--stop}} {
set ::stop_on_failure 1
} elseif {$opt eq {--loop}} {
set ::loop 1
} elseif {$opt eq {--timeout}} { } elseif {$opt eq {--timeout}} {
set ::timeout $arg set ::timeout $arg
incr j incr j
...@@ -488,6 +540,30 @@ for {set j 0} {$j < [llength $argv]} {incr j} { ...@@ -488,6 +540,30 @@ for {set j 0} {$j < [llength $argv]} {incr j} {
} }
} }
# If --skil-till option was given, we populate the list of single tests
# to run with everything *after* the specified unit.
if {$::skip_till != ""} {
set skipping 1
foreach t $::all_tests {
if {$skipping == 0} {
lappend ::single_tests $t
}
if {$t == $::skip_till} {
set skipping 0
}
}
if {$skipping} {
puts "test $::skip_till not found"
exit 0
}
}
# Override the list of tests with the specific tests we want to run
# in case there was some filter, that is --single or --skip-till options.
if {[llength $::single_tests] > 0} {
set ::all_tests $::single_tests
}
proc attach_to_replication_stream {} { proc attach_to_replication_stream {} {
set s [socket [srv 0 "host"] [srv 0 "port"]] set s [socket [srv 0 "host"] [srv 0 "port"]]
fconfigure $s -translation binary fconfigure $s -translation binary
......
start_server {tags {"acl"}} {
test {Connections start with the default user} {
r ACL WHOAMI
} {default}
test {It is possible to create new users} {
r ACL setuser newuser
}
test {New users start disabled} {
r ACL setuser newuser >passwd1
catch {r AUTH newuser passwd1} err
set err
} {*WRONGPASS*}
test {Enabling the user allows the login} {
r ACL setuser newuser on +acl
r AUTH newuser passwd1
r ACL WHOAMI
} {newuser}
test {Only the set of correct passwords work} {
r ACL setuser newuser >passwd2
catch {r AUTH newuser passwd1} e
assert {$e eq "OK"}
catch {r AUTH newuser passwd2} e
assert {$e eq "OK"}
catch {r AUTH newuser passwd3} e
set e
} {*WRONGPASS*}
test {It is possible to remove passwords from the set of valid ones} {
r ACL setuser newuser <passwd1
catch {r AUTH newuser passwd1} e
set e
} {*WRONGPASS*}
test {By default users are not able to access any command} {
catch {r SET foo bar} e
set e
} {*NOPERM*}
test {By default users are not able to access any key} {
r ACL setuser newuser +set
catch {r SET foo bar} e
set e
} {*NOPERM*key*}
test {It's possible to allow the access of a subset of keys} {
r ACL setuser newuser allcommands ~foo:* ~bar:*
r SET foo:1 a
r SET bar:2 b
catch {r SET zap:3 c} e
r ACL setuser newuser allkeys; # Undo keys ACL
set e
} {*NOPERM*key*}
test {Users can be configured to authenticate with any password} {
r ACL setuser newuser nopass
r AUTH newuser zipzapblabla
} {OK}
test {ACLs can exclude single commands} {
r ACL setuser newuser -ping
r INCR mycounter ; # Should not raise an error
catch {r PING} e
set e
} {*NOPERM*}
test {ACLs can include or excluse whole classes of commands} {
r ACL setuser newuser -@all +@set +acl
r SADD myset a b c; # Should not raise an error
r ACL setuser newuser +@all -@string
r SADD myset a b c; # Again should not raise an error
# String commands instead should raise an error
catch {r SET foo bar} e
r ACL setuser newuser allcommands; # Undo commands ACL
set e
} {*NOPERM*}
test {ACLs can include single subcommands} {
r ACL setuser newuser +@all -client
r ACL setuser newuser +client|id +client|setname
r CLIENT ID; # Should not fail
r CLIENT SETNAME foo ; # Should not fail
catch {r CLIENT KILL type master} e
set e
} {*NOPERM*}
# Note that the order of the generated ACL rules is not stable in Redis
# so we need to match the different parts and not as a whole string.
test {ACL GETUSER is able to translate back command permissions} {
# Subtractive
r ACL setuser newuser reset +@all ~* -@string +incr -debug +debug|digest
set cmdstr [dict get [r ACL getuser newuser] commands]
assert_match {*+@all*} $cmdstr
assert_match {*-@string*} $cmdstr
assert_match {*+incr*} $cmdstr
assert_match {*-debug +debug|digest**} $cmdstr
# Additive
r ACL setuser newuser reset +@string -incr +acl +debug|digest +debug|segfault
set cmdstr [dict get [r ACL getuser newuser] commands]
assert_match {*-@all*} $cmdstr
assert_match {*+@string*} $cmdstr
assert_match {*-incr*} $cmdstr
assert_match {*+debug|digest*} $cmdstr
assert_match {*+debug|segfault*} $cmdstr
assert_match {*+acl*} $cmdstr
}
}
...@@ -2,14 +2,14 @@ start_server {tags {"auth"}} { ...@@ -2,14 +2,14 @@ start_server {tags {"auth"}} {
test {AUTH fails if there is no password configured server side} { test {AUTH fails if there is no password configured server side} {
catch {r auth foo} err catch {r auth foo} err
set _ $err set _ $err
} {ERR*no password*} } {ERR*any password*}
} }
start_server {tags {"auth"} overrides {requirepass foobar}} { start_server {tags {"auth"} overrides {requirepass foobar}} {
test {AUTH fails when a wrong password is given} { test {AUTH fails when a wrong password is given} {
catch {r auth wrong!} err catch {r auth wrong!} err
set _ $err set _ $err
} {ERR*invalid password} } {WRONGPASS*}
test {Arbitrary command gives an error when AUTH is required} { test {Arbitrary command gives an error when AUTH is required} {
catch {r set foo bar} err catch {r set foo bar} err
......
...@@ -33,7 +33,7 @@ start_server {tags {"dump"}} { ...@@ -33,7 +33,7 @@ start_server {tags {"dump"}} {
set now [clock milliseconds] set now [clock milliseconds]
r restore foo [expr $now+3000] $encoded absttl r restore foo [expr $now+3000] $encoded absttl
set ttl [r pttl foo] set ttl [r pttl foo]
assert {$ttl >= 2998 && $ttl <= 3000} assert {$ttl >= 2900 && $ttl <= 3100}
r get foo r get foo
} {bar} } {bar}
...@@ -44,7 +44,7 @@ start_server {tags {"dump"}} { ...@@ -44,7 +44,7 @@ start_server {tags {"dump"}} {
r config set maxmemory-policy allkeys-lru r config set maxmemory-policy allkeys-lru
r restore foo 0 $encoded idletime 1000 r restore foo 0 $encoded idletime 1000
set idle [r object idletime foo] set idle [r object idletime foo]
assert {$idle >= 1000 && $idle <= 1002} assert {$idle >= 1000 && $idle <= 1010}
r get foo r get foo
} {bar} } {bar}
...@@ -362,7 +362,7 @@ start_server {tags {"dump"}} { ...@@ -362,7 +362,7 @@ start_server {tags {"dump"}} {
r -1 lpush list a b c d r -1 lpush list a b c d
$second config set requirepass foobar2 $second config set requirepass foobar2
catch {r -1 migrate $second_host $second_port list 9 5000 AUTH foobar} err catch {r -1 migrate $second_host $second_port list 9 5000 AUTH foobar} err
assert_match {*invalid password*} $err assert_match {*WRONGPASS*} $err
} }
} }
} }
proc cmdstat {cmd} {
if {[regexp "\r\ncmdstat_$cmd:(.*?)\r\n" [r info commandstats] _ value]} {
set _ $value
}
}
start_server {tags {"introspection"}} { start_server {tags {"introspection"}} {
test {TTL and TYPYE do not alter the last access time of a key} { test {TTL and TYPYE do not alter the last access time of a key} {
r set foo bar r set foo bar
...@@ -20,4 +26,55 @@ start_server {tags {"introspection"}} { ...@@ -20,4 +26,55 @@ start_server {tags {"introspection"}} {
r set key2 2 r set key2 2
r touch key0 key1 key2 key3 r touch key0 key1 key2 key3
} 2 } 2
test {command stats for GEOADD} {
r config resetstat
r GEOADD foo 0 0 bar
assert_match {*calls=1,*} [cmdstat geoadd]
assert_match {} [cmdstat zadd]
}
test {command stats for EXPIRE} {
r config resetstat
r SET foo bar
r EXPIRE foo 0
assert_match {*calls=1,*} [cmdstat expire]
assert_match {} [cmdstat del]
}
test {command stats for BRPOP} {
r config resetstat
r LPUSH list foo
r BRPOP list 0
assert_match {*calls=1,*} [cmdstat brpop]
assert_match {} [cmdstat rpop]
}
test {command stats for MULTI} {
r config resetstat
r MULTI
r set foo bar
r GEOADD foo2 0 0 bar
r EXPIRE foo2 0
r EXEC
assert_match {*calls=1,*} [cmdstat multi]
assert_match {*calls=1,*} [cmdstat exec]
assert_match {*calls=1,*} [cmdstat set]
assert_match {*calls=1,*} [cmdstat expire]
assert_match {*calls=1,*} [cmdstat geoadd]
}
test {command stats for scripts} {
r config resetstat
r set mykey myval
r eval {
redis.call('set', KEYS[1], 0)
redis.call('expire', KEYS[1], 0)
redis.call('geoadd', KEYS[1], 0, 0, "bar")
} 1 mykey
assert_match {*calls=1,*} [cmdstat eval]
assert_match {*calls=2,*} [cmdstat set]
assert_match {*calls=1,*} [cmdstat expire]
assert_match {*calls=1,*} [cmdstat geoadd]
}
} }
start_server {tags {"introspection"}} { start_server {tags {"introspection"}} {
test {CLIENT LIST} { test {CLIENT LIST} {
r client list r client list
} {*addr=*:* fd=* age=* idle=* flags=N db=9 sub=0 psub=0 multi=-1 qbuf=0 qbuf-free=* obl=0 oll=0 omem=0 events=r cmd=client*} } {*addr=*:* fd=* age=* idle=* flags=N db=9 sub=0 psub=0 multi=-1 qbuf=26 qbuf-free=* obl=0 oll=0 omem=0 events=r cmd=client*}
test {MONITOR can log executed commands} { test {MONITOR can log executed commands} {
set rd [redis_deferring_client] set rd [redis_deferring_client]
......
...@@ -143,9 +143,11 @@ start_server {tags {"maxmemory"}} { ...@@ -143,9 +143,11 @@ start_server {tags {"maxmemory"}} {
} }
} }
proc test_slave_buffers {cmd_count payload_len limit_memory pipeline} { proc test_slave_buffers {test_name cmd_count payload_len limit_memory pipeline} {
start_server {tags {"maxmemory"}} { start_server {tags {"maxmemory"}} {
start_server {} { start_server {} {
set slave_pid [s process_id]
test "$test_name" {
set slave [srv 0 client] set slave [srv 0 client]
set slave_host [srv 0 host] set slave_host [srv 0 host]
set slave_port [srv 0 port] set slave_port [srv 0 port]
...@@ -158,8 +160,10 @@ proc test_slave_buffers {cmd_count payload_len limit_memory pipeline} { ...@@ -158,8 +160,10 @@ proc test_slave_buffers {cmd_count payload_len limit_memory pipeline} {
$master setrange "key:$j" 100000 asdf $master setrange "key:$j" 100000 asdf
} }
# make sure master doesn't disconnect slave because of timeout
$master config set repl-timeout 300 ;# 5 minutes
$master config set maxmemory-policy allkeys-random $master config set maxmemory-policy allkeys-random
$master config set client-output-buffer-limit "slave 100000000 100000000 60" $master config set client-output-buffer-limit "replica 100000000 100000000 300"
$master config set repl-backlog-size [expr {10*1024}] $master config set repl-backlog-size [expr {10*1024}]
$slave slaveof $master_host $master_port $slave slaveof $master_host $master_port
...@@ -182,9 +186,9 @@ proc test_slave_buffers {cmd_count payload_len limit_memory pipeline} { ...@@ -182,9 +186,9 @@ proc test_slave_buffers {cmd_count payload_len limit_memory pipeline} {
# put the slave to sleep # put the slave to sleep
set rd_slave [redis_deferring_client] set rd_slave [redis_deferring_client]
$rd_slave debug sleep 60 exec kill -SIGSTOP $slave_pid
# send some 10mb woth of commands that don't increase the memory usage # send some 10mb worth of commands that don't increase the memory usage
if {$pipeline == 1} { if {$pipeline == 1} {
set rd_master [redis_deferring_client -1] set rd_master [redis_deferring_client -1]
for {set k 0} {$k < $cmd_count} {incr k} { for {set k 0} {$k < $cmd_count} {incr k} {
...@@ -218,19 +222,21 @@ proc test_slave_buffers {cmd_count payload_len limit_memory pipeline} { ...@@ -218,19 +222,21 @@ proc test_slave_buffers {cmd_count payload_len limit_memory pipeline} {
set delta_no_repl [expr {$killed_used_no_repl - $used_no_repl}] set delta_no_repl [expr {$killed_used_no_repl - $used_no_repl}]
assert {$killed_slave_buf == 0} assert {$killed_slave_buf == 0}
assert {$delta_no_repl > -50*1024 && $delta_no_repl < 50*1024} ;# 1 byte unaccounted for, with 1M commands will consume some 1MB assert {$delta_no_repl > -50*1024 && $delta_no_repl < 50*1024} ;# 1 byte unaccounted for, with 1M commands will consume some 1MB
}
# unfreeze slave process (after the 'test' succeeded or failed, but before we attempt to terminate the server
exec kill -SIGCONT $slave_pid
} }
} }
} }
test {slave buffer are counted correctly} { # test that slave buffer are counted correctly
# we wanna use many small commands, and we don't wanna wait long # we wanna use many small commands, and we don't wanna wait long
# so we need to use a pipeline (redis_deferring_client) # so we need to use a pipeline (redis_deferring_client)
# that may cause query buffer to fill and induce eviction, so we disable it # that may cause query buffer to fill and induce eviction, so we disable it
test_slave_buffers 1000000 10 0 1 test_slave_buffers {slave buffer are counted correctly} 1000000 10 0 1
}
test {slave buffer don't induce eviction} { # test that slave buffer don't induce eviction
# test again with fewer (and bigger) commands without pipeline, but with eviction # test again with fewer (and bigger) commands without pipeline, but with eviction
test_slave_buffers 100000 100 1 0 test_slave_buffers "replica buffer don't induce eviction" 100000 100 1 0
}
...@@ -90,6 +90,7 @@ start_server {tags {"defrag"}} { ...@@ -90,6 +90,7 @@ start_server {tags {"defrag"}} {
test "Active defrag big keys" { test "Active defrag big keys" {
r flushdb r flushdb
r config resetstat r config resetstat
r config set save "" ;# prevent bgsave from interfereing with save below
r config set activedefrag no r config set activedefrag no
r config set active-defrag-max-scan-fields 1000 r config set active-defrag-max-scan-fields 1000
r config set active-defrag-threshold-lower 5 r config set active-defrag-threshold-lower 5
...@@ -201,7 +202,7 @@ start_server {tags {"defrag"}} { ...@@ -201,7 +202,7 @@ start_server {tags {"defrag"}} {
assert {$frag < 1.1} assert {$frag < 1.1}
# due to high fragmentation, 10hz, and active-defrag-cycle-max set to 75, # due to high fragmentation, 10hz, and active-defrag-cycle-max set to 75,
# we expect max latency to be not much higher than 75ms # we expect max latency to be not much higher than 75ms
assert {$max_latency <= 80} assert {$max_latency <= 120}
} }
# verify the data isn't corrupted or changed # verify the data isn't corrupted or changed
set newdigest [r debug digest] set newdigest [r debug digest]
......
...@@ -29,7 +29,7 @@ start_server {} { ...@@ -29,7 +29,7 @@ start_server {} {
set m_usedmemory [info_memory $master used_memory] set m_usedmemory [info_memory $master used_memory]
set s_usedmemory [info_memory $slave used_memory] set s_usedmemory [info_memory $slave used_memory]
if { $s_usedmemory > $m_usedmemory + 10*1024*1024 } { if { $s_usedmemory > $m_usedmemory + 10*1024*1024 } {
fail "the used_memory of slave is too larger than master.Master:$m_usedmemory Slave:$s_usedmemory" fail "the used_memory of replica is much larger than master. Master:$m_usedmemory Replica:$s_usedmemory"
} }
} }
}} }}
...@@ -148,9 +148,11 @@ start_server {tags {"scripting"}} { ...@@ -148,9 +148,11 @@ start_server {tags {"scripting"}} {
test {EVAL - Scripts can't run certain commands} { test {EVAL - Scripts can't run certain commands} {
set e {} set e {}
r debug lua-always-replicate-commands 0
catch { catch {
r eval "redis.pcall('randomkey'); return redis.pcall('set','x','ciao')" 0 r eval "redis.pcall('randomkey'); return redis.pcall('set','x','ciao')" 0
} e } e
r debug lua-always-replicate-commands 1
set e set e
} {*not allowed after*} } {*not allowed after*}
...@@ -299,9 +301,12 @@ start_server {tags {"scripting"}} { ...@@ -299,9 +301,12 @@ start_server {tags {"scripting"}} {
} {b534286061d4b9e4026607613b95c06c06015ae8 loaded} } {b534286061d4b9e4026607613b95c06c06015ae8 loaded}
test "In the context of Lua the output of random commands gets ordered" { test "In the context of Lua the output of random commands gets ordered" {
r debug lua-always-replicate-commands 0
r del myset r del myset
r sadd myset a b c d e f g h i l m n o p q r s t u v z aa aaa azz r sadd myset a b c d e f g h i l m n o p q r s t u v z aa aaa azz
r eval {return redis.call('smembers',KEYS[1])} 1 myset set res [r eval {return redis.call('smembers',KEYS[1])} 1 myset]
r debug lua-always-replicate-commands 1
set res
} {a aa aaa azz b c d e f g h i l m n o p q r s t u v z} } {a aa aaa azz b c d e f g h i l m n o p q r s t u v z}
test "SORT is normally not alpha re-ordered for the scripting engine" { test "SORT is normally not alpha re-ordered for the scripting engine" {
...@@ -537,7 +542,7 @@ foreach cmdrepl {0 1} { ...@@ -537,7 +542,7 @@ foreach cmdrepl {0 1} {
r debug lua-always-replicate-commands 1 r debug lua-always-replicate-commands 1
} }
test "Before the slave connects we issue two EVAL commands $rt" { test "Before the replica connects we issue two EVAL commands $rt" {
# One with an error, but still executing a command. # One with an error, but still executing a command.
# SHA is: 67164fc43fa971f76fd1aaeeaf60c1c178d25876 # SHA is: 67164fc43fa971f76fd1aaeeaf60c1c178d25876
catch { catch {
...@@ -548,13 +553,13 @@ foreach cmdrepl {0 1} { ...@@ -548,13 +553,13 @@ foreach cmdrepl {0 1} {
r eval {return redis.call('incr',KEYS[1])} 1 x r eval {return redis.call('incr',KEYS[1])} 1 x
} {2} } {2}
test "Connect a slave to the master instance $rt" { test "Connect a replica to the master instance $rt" {
r -1 slaveof [srv 0 host] [srv 0 port] r -1 slaveof [srv 0 host] [srv 0 port]
wait_for_condition 50 100 { wait_for_condition 50 100 {
[s -1 role] eq {slave} && [s -1 role] eq {slave} &&
[string match {*master_link_status:up*} [r -1 info replication]] [string match {*master_link_status:up*} [r -1 info replication]]
} else { } else {
fail "Can't turn the instance into a slave" fail "Can't turn the instance into a replica"
} }
} }
...@@ -587,7 +592,7 @@ foreach cmdrepl {0 1} { ...@@ -587,7 +592,7 @@ foreach cmdrepl {0 1} {
wait_for_condition 50 100 { wait_for_condition 50 100 {
[r -1 lrange a 0 -1] eq [r lrange a 0 -1] [r -1 lrange a 0 -1] eq [r lrange a 0 -1]
} else { } else {
fail "Expected list 'a' in slave and master to be the same, but they are respectively '[r -1 lrange a 0 -1]' and '[r lrange a 0 -1]'" fail "Expected list 'a' in replica and master to be the same, but they are respectively '[r -1 lrange a 0 -1]' and '[r lrange a 0 -1]'"
} }
set res set res
} {a 1} } {a 1}
...@@ -622,7 +627,7 @@ foreach cmdrepl {0 1} { ...@@ -622,7 +627,7 @@ foreach cmdrepl {0 1} {
wait_for_condition 50 100 { wait_for_condition 50 100 {
[r -1 debug digest] eq [r debug digest] [r -1 debug digest] eq [r debug digest]
} else { } else {
fail "Master-Slave desync after Lua script using SELECT." fail "Master-Replica desync after Lua script using SELECT."
} }
} }
} }
...@@ -631,13 +636,13 @@ foreach cmdrepl {0 1} { ...@@ -631,13 +636,13 @@ foreach cmdrepl {0 1} {
start_server {tags {"scripting repl"}} { start_server {tags {"scripting repl"}} {
start_server {overrides {appendonly yes aof-use-rdb-preamble no}} { start_server {overrides {appendonly yes aof-use-rdb-preamble no}} {
test "Connect a slave to the master instance" { test "Connect a replica to the master instance" {
r -1 slaveof [srv 0 host] [srv 0 port] r -1 slaveof [srv 0 host] [srv 0 port]
wait_for_condition 50 100 { wait_for_condition 50 100 {
[s -1 role] eq {slave} && [s -1 role] eq {slave} &&
[string match {*master_link_status:up*} [r -1 info replication]] [string match {*master_link_status:up*} [r -1 info replication]]
} else { } else {
fail "Can't turn the instance into a slave" fail "Can't turn the instance into a replica"
} }
} }
...@@ -655,11 +660,13 @@ start_server {tags {"scripting repl"}} { ...@@ -655,11 +660,13 @@ start_server {tags {"scripting repl"}} {
} {1} } {1}
test "Redis.set_repl() must be issued after replicate_commands()" { test "Redis.set_repl() must be issued after replicate_commands()" {
r debug lua-always-replicate-commands 0
catch { catch {
r eval { r eval {
redis.set_repl(redis.REPL_ALL); redis.set_repl(redis.REPL_ALL);
} 0 } 0
} e } e
r debug lua-always-replicate-commands 1
set e set e
} {*only after turning on*} } {*only after turning on*}
...@@ -689,7 +696,7 @@ start_server {tags {"scripting repl"}} { ...@@ -689,7 +696,7 @@ start_server {tags {"scripting repl"}} {
wait_for_condition 50 100 { wait_for_condition 50 100 {
[r -1 mget a b c d] eq {1 {} {} 4} [r -1 mget a b c d] eq {1 {} {} 4}
} else { } else {
fail "Only a and c should be replicated to slave" fail "Only a and c should be replicated to replica"
} }
# Master should have everything right now # Master should have everything right now
...@@ -728,7 +735,7 @@ start_server {tags {"scripting repl"}} { ...@@ -728,7 +735,7 @@ start_server {tags {"scripting repl"}} {
wait_for_condition 50 100 { wait_for_condition 50 100 {
[r get time] eq [r -1 get time] [r get time] eq [r -1 get time]
} else { } else {
fail "Time key does not match between master and slave" fail "Time key does not match between master and replica"
} }
} }
} }
......
...@@ -9,6 +9,17 @@ start_server { ...@@ -9,6 +9,17 @@ start_server {
set err set err
} {BUSYGROUP*} } {BUSYGROUP*}
test {XGROUP CREATE: automatic stream creation fails without MKSTREAM} {
r DEL mystream
catch {r XGROUP CREATE mystream mygroup $} err
set err
} {ERR*}
test {XGROUP CREATE: automatic stream creation works with MKSTREAM} {
r DEL mystream
r XGROUP CREATE mystream mygroup $ MKSTREAM
} {OK}
test {XREADGROUP will return only new elements} { test {XREADGROUP will return only new elements} {
r XADD mystream * a 1 r XADD mystream * a 1
r XADD mystream * b 2 r XADD mystream * b 2
...@@ -96,4 +107,142 @@ start_server { ...@@ -96,4 +107,142 @@ start_server {
set c [llength [lindex [r xreadgroup group g1 c2 streams events >] 0 1]] set c [llength [lindex [r xreadgroup group g1 c2 streams events >] 0 1]]
assert {$c == 5} assert {$c == 5}
} }
test {XREADGROUP will not report data on empty history. Bug #5577} {
r del events
r xadd events * a 1
r xadd events * b 2
r xadd events * c 3
r xgroup create events mygroup 0
# Current local PEL should be empty
set res [r xpending events mygroup - + 10]
assert {[llength $res] == 0}
# So XREADGROUP should read an empty history as well
set res [r xreadgroup group mygroup myconsumer count 3 streams events 0]
assert {[llength [lindex $res 0 1]] == 0}
# We should fetch all the elements in the stream asking for >
set res [r xreadgroup group mygroup myconsumer count 3 streams events >]
assert {[llength [lindex $res 0 1]] == 3}
# Now the history is populated with three not acked entries
set res [r xreadgroup group mygroup myconsumer count 3 streams events 0]
assert {[llength [lindex $res 0 1]] == 3}
}
test {XREADGROUP history reporting of deleted entries. Bug #5570} {
r del mystream
r XGROUP CREATE mystream mygroup $ MKSTREAM
r XADD mystream 1 field1 A
r XREADGROUP GROUP mygroup myconsumer STREAMS mystream >
r XADD mystream MAXLEN 1 2 field1 B
r XREADGROUP GROUP mygroup myconsumer STREAMS mystream >
# Now we have two pending entries, however one should be deleted
# and one should be ok (we should only see "B")
set res [r XREADGROUP GROUP mygroup myconsumer STREAMS mystream 0-1]
assert {[lindex $res 0 1 0] == {1-0 {}}}
assert {[lindex $res 0 1 1] == {2-0 {field1 B}}}
}
test {XCLAIM can claim PEL items from another consumer} {
# Add 3 items into the stream, and create a consumer group
r del mystream
set id1 [r XADD mystream * a 1]
set id2 [r XADD mystream * b 2]
set id3 [r XADD mystream * c 3]
r XGROUP CREATE mystream mygroup 0
# Client 1 reads item 1 from the stream without acknowledgements.
# Client 2 then claims pending item 1 from the PEL of client 1
set reply [
r XREADGROUP GROUP mygroup client1 count 1 STREAMS mystream >
]
assert {[llength [lindex $reply 0 1 0 1]] == 2}
assert {[lindex $reply 0 1 0 1] eq {a 1}}
r debug sleep 0.2
set reply [
r XCLAIM mystream mygroup client2 10 $id1
]
assert {[llength [lindex $reply 0 1]] == 2}
assert {[lindex $reply 0 1] eq {a 1}}
# Client 1 reads another 2 items from stream
r XREADGROUP GROUP mygroup client1 count 2 STREAMS mystream >
r debug sleep 0.2
# Delete item 2 from the stream. Now client 1 has PEL that contains
# only item 3. Try to use client 2 to claim the deleted item 2
# from the PEL of client 1, this should return nil
r XDEL mystream $id2
set reply [
r XCLAIM mystream mygroup client2 10 $id2
]
assert {[llength $reply] == 1}
assert_equal "" [lindex $reply 0]
# Delete item 3 from the stream. Now client 1 has PEL that is empty.
# Try to use client 2 to claim the deleted item 3 from the PEL
# of client 1, this should return nil
r debug sleep 0.2
r XDEL mystream $id3
set reply [
r XCLAIM mystream mygroup client2 10 $id3
]
assert {[llength $reply] == 1}
assert_equal "" [lindex $reply 0]
}
start_server {} {
set master [srv -1 client]
set master_host [srv -1 host]
set master_port [srv -1 port]
set slave [srv 0 client]
foreach noack {0 1} {
test "Consumer group last ID propagation to slave (NOACK=$noack)" {
$slave slaveof $master_host $master_port
wait_for_condition 50 100 {
[s 0 master_link_status] eq {up}
} else {
fail "Replication not started."
}
$master del stream
$master xadd stream * a 1
$master xadd stream * a 2
$master xadd stream * a 3
$master xgroup create stream mygroup 0
# Consume the first two items on the master
for {set j 0} {$j < 2} {incr j} {
if {$noack} {
set item [$master xreadgroup group mygroup \
myconsumer COUNT 1 NOACK STREAMS stream >]
} else {
set item [$master xreadgroup group mygroup \
myconsumer COUNT 1 STREAMS stream >]
}
set id [lindex $item 0 1 0 0]
if {$noack == 0} {
assert {[$master xack stream mygroup $id] eq "1"}
}
}
wait_for_ofs_sync $master $slave
# Turn slave into master
$slave slaveof no one
set item [$slave xreadgroup group mygroup myconsumer \
COUNT 1 STREAMS stream >]
# The consumed enty should be the third
set myentry [lindex $item 0 1 0 1]
assert {$myentry eq {a 3}}
}
}
}
} }
...@@ -317,3 +317,97 @@ start_server { ...@@ -317,3 +317,97 @@ start_server {
assert_equal [r xrevrange teststream2 1234567891245 -] {{1234567891240-0 {key1 value2}} {1234567891230-0 {key1 value1}}} assert_equal [r xrevrange teststream2 1234567891245 -] {{1234567891240-0 {key1 value2}} {1234567891230-0 {key1 value1}}}
} }
} }
start_server {tags {"stream"} overrides {appendonly yes}} {
test {XADD with MAXLEN > xlen can propagate correctly} {
for {set j 0} {$j < 100} {incr j} {
r XADD mystream * xitem v
}
r XADD mystream MAXLEN 200 * xitem v
incr j
assert {[r xlen mystream] == $j}
r debug loadaof
r XADD mystream * xitem v
incr j
assert {[r xlen mystream] == $j}
}
}
start_server {tags {"stream"} overrides {appendonly yes}} {
test {XADD with ~ MAXLEN can propagate correctly} {
for {set j 0} {$j < 100} {incr j} {
r XADD mystream * xitem v
}
r XADD mystream MAXLEN ~ $j * xitem v
incr j
assert {[r xlen mystream] == $j}
r config set stream-node-max-entries 1
r debug loadaof
r XADD mystream * xitem v
incr j
assert {[r xlen mystream] == $j}
}
}
start_server {tags {"stream"} overrides {appendonly yes stream-node-max-entries 10}} {
test {XTRIM with ~ MAXLEN can propagate correctly} {
for {set j 0} {$j < 100} {incr j} {
r XADD mystream * xitem v
}
r XTRIM mystream MAXLEN ~ 85
assert {[r xlen mystream] == 89}
r config set stream-node-max-entries 1
r debug loadaof
r XADD mystream * xitem v
incr j
assert {[r xlen mystream] == 90}
}
}
start_server {tags {"xsetid"}} {
test {XADD can CREATE an empty stream} {
r XADD mystream MAXLEN 0 * a b
assert {[dict get [r xinfo stream mystream] length] == 0}
}
test {XSETID can set a specific ID} {
r XSETID mystream "200-0"
assert {[dict get [r xinfo stream mystream] last-generated-id] == "200-0"}
}
test {XSETID cannot SETID with smaller ID} {
r XADD mystream * a b
catch {r XSETID mystream "1-1"} err
r XADD mystream MAXLEN 0 * a b
set err
} {ERR*smaller*}
test {XSETID cannot SETID on non-existent key} {
catch {r XSETID stream 1-1} err
set _ $err
} {ERR no such key}
}
start_server {tags {"stream"} overrides {appendonly yes aof-use-rdb-preamble no}} {
test {Empty stream can be rewrite into AOF correctly} {
r XADD mystream MAXLEN 0 * a b
assert {[dict get [r xinfo stream mystream] length] == 0}
r bgrewriteaof
waitForBgrewriteaof r
r debug loadaof
assert {[dict get [r xinfo stream mystream] length] == 0}
}
test {Stream can be rewrite into AOF correctly after XDEL lastid} {
r XSETID mystream 0-0
r XADD mystream 1-1 a b
r XADD mystream 2-2 a b
assert {[dict get [r xinfo stream mystream] length] == 2}
r XDEL mystream 2-2
r bgrewriteaof
waitForBgrewriteaof r
r debug loadaof
assert {[dict get [r xinfo stream mystream] length] == 1}
assert {[dict get [r xinfo stream mystream] last-generated-id] == "2-2"}
}
}
...@@ -388,7 +388,7 @@ start_server {tags {"zset"}} { ...@@ -388,7 +388,7 @@ start_server {tags {"zset"}} {
0 omega} 0 omega}
} }
test "ZRANGEBYLEX/ZREVRANGEBYLEX/ZCOUNT basics" { test "ZRANGEBYLEX/ZREVRANGEBYLEX/ZLEXCOUNT basics" {
create_default_lex_zset create_default_lex_zset
# inclusive range # inclusive range
...@@ -417,6 +417,22 @@ start_server {tags {"zset"}} { ...@@ -417,6 +417,22 @@ start_server {tags {"zset"}} {
assert_equal {} [r zrevrangebylex zset (hill (omega] assert_equal {} [r zrevrangebylex zset (hill (omega]
} }
test "ZLEXCOUNT advanced" {
create_default_lex_zset
assert_equal 9 [r zlexcount zset - +]
assert_equal 0 [r zlexcount zset + -]
assert_equal 0 [r zlexcount zset + \[c]
assert_equal 0 [r zlexcount zset \[c -]
assert_equal 8 [r zlexcount zset \[bar +]
assert_equal 5 [r zlexcount zset \[bar \[foo]
assert_equal 4 [r zlexcount zset \[bar (foo]
assert_equal 4 [r zlexcount zset (bar \[foo]
assert_equal 3 [r zlexcount zset (bar (foo]
assert_equal 5 [r zlexcount zset - (foo]
assert_equal 1 [r zlexcount zset (maxstring +]
}
test "ZRANGEBYSLEX with LIMIT" { test "ZRANGEBYSLEX with LIMIT" {
create_default_lex_zset create_default_lex_zset
assert_equal {alpha bar} [r zrangebylex zset - \[cool LIMIT 0 2] assert_equal {alpha bar} [r zrangebylex zset - \[cool LIMIT 0 2]
...@@ -1185,4 +1201,30 @@ start_server {tags {"zset"}} { ...@@ -1185,4 +1201,30 @@ start_server {tags {"zset"}} {
stressers ziplist stressers ziplist
stressers skiplist stressers skiplist
} }
test {ZSET skiplist order consistency when elements are moved} {
set original_max [lindex [r config get zset-max-ziplist-entries] 1]
r config set zset-max-ziplist-entries 0
for {set times 0} {$times < 10} {incr times} {
r del zset
for {set j 0} {$j < 1000} {incr j} {
r zadd zset [randomInt 50] ele-[randomInt 10]
}
# Make sure that element ordering is correct
set prev_element {}
set prev_score -1
foreach {element score} [r zrange zset 0 -1 WITHSCORES] {
# Assert that elements are in increasing ordering
assert {
$prev_score < $score ||
($prev_score == $score &&
[string compare $prev_element $element] == -1)
}
set prev_element $element
set prev_score $score
}
}
r config set zset-max-ziplist-entries $original_max
}
} }
#!/usr/bin/env tclsh #!/usr/bin/env tclsh
if {[llength $::argv] != 2} { if {[llength $::argv] != 2 && [llength $::argv] != 3} {
puts "Usage: $::argv0 <branch> <version>" puts "Usage: $::argv0 <branch> <version> \[<num-commits>\]"
exit 1 exit 1
} }
set branch [lindex $::argv 0] set branch [lindex $::argv 0]
set ver [lindex $::argv 1] set ver [lindex $::argv 1]
if {[llength $::argv] == 3} {
set count [lindex ::$argv 2]
} else {
set count 100
}
set template { set template {
================================================================================ ================================================================================
...@@ -21,7 +26,7 @@ append template "\n\n" ...@@ -21,7 +26,7 @@ append template "\n\n"
set date [clock format [clock seconds]] set date [clock format [clock seconds]]
set template [string map [list %ver% $ver %date% $date] $template] set template [string map [list %ver% $ver %date% $date] $template]
append template [exec git log $branch~100..$branch "--format=format:%an in commit %h:%n %s" --shortstat] append template [exec git log $branch~$count..$branch "--format=format:%an in commit %h:%n %s" --shortstat]
#Older, more verbose version. #Older, more verbose version.
# #
......
The utilities in this directory plot the distribution of SRANDMEMBER to
evaluate how fair it is.
See http://theshfl.com/redis_sets for more information on the topic that lead
to such investigation fix.
showdist.rb -- shows the distribution of the frequency elements are returned.
The x axis is the number of times elements were returned, and
the y axis is how many elements were returned with such
frequency.
showfreq.rb -- shows the frequency each element was returned.
The x axis is the element number.
The y axis is the times it was returned.
require 'redis'
r = Redis.new
r.select(9)
r.del("myset");
r.sadd("myset",(0..999).to_a)
freq = {}
100.times {
res = r.pipelined {
1000.times {
r.srandmember("myset")
}
}
res.each{|ele|
freq[ele] = 0 if freq[ele] == nil
freq[ele] += 1
}
}
# Convert into frequency distribution
dist = {}
freq.each{|item,count|
dist[count] = 0 if dist[count] == nil
dist[count] += 1
}
min = dist.keys.min
max = dist.keys.max
(min..max).each{|x|
count = dist[x]
count = 0 if count == nil
puts "#{x} -> #{"*"*count}"
}
require 'redis'
r = Redis.new
r.select(9)
r.del("myset");
r.sadd("myset",(0..999).to_a)
freq = {}
500.times {
res = r.pipelined {
1000.times {
r.srandmember("myset")
}
}
res.each{|ele|
freq[ele] = 0 if freq[ele] == nil
freq[ele] += 1
}
}
# Print the frequency each element was yeld to process it with gnuplot
freq.each{|item,count|
puts "#{item} #{count}"
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment