Commit 846d8b3e authored by Pieter Noordhuis's avatar Pieter Noordhuis
Browse files

merge antirez/smallkeys

parents 178d6903 22194a7f
...@@ -87,7 +87,7 @@ staticsymbols: ...@@ -87,7 +87,7 @@ staticsymbols:
tclsh utils/build-static-symbols.tcl > staticsymbols.h tclsh utils/build-static-symbols.tcl > staticsymbols.h
test: test:
tclsh8.5 tests/test_helper.tcl tclsh8.5 tests/test_helper.tcl --tags "${TAGS}"
bench: bench:
./redis-benchmark ./redis-benchmark
......
...@@ -4,6 +4,7 @@ Redis TODO and Roadmap ...@@ -4,6 +4,7 @@ Redis TODO and Roadmap
VERSION 2.2 TODO (Optimizations and latency) VERSION 2.2 TODO (Optimizations and latency)
============================================ ============================================
* Support for syslog(3).
* Lower the CPU usage. * Lower the CPU usage.
* Lower the RAM usage everywhere possible. * Lower the RAM usage everywhere possible.
* Specially encoded Sets (like Hashes). * Specially encoded Sets (like Hashes).
......
This diff is collapsed.
...@@ -195,6 +195,26 @@ appendonly no ...@@ -195,6 +195,26 @@ appendonly no
appendfsync everysec appendfsync everysec
# appendfsync no # appendfsync no
# When the AOF fsync policy is set to always or everysec, and a background
# saving process (a background save or AOF log background rewriting) is
# performing a lot of I/O against the disk, in some Linux configurations
# Redis may block too long on the fsync() call. Note that there is no fix for
# this currently, as even performing fsync in a different thread will block
# our synchronous write(2) call.
#
# In order to mitigate this problem it's possible to use the following option
# that will prevent fsync() from being called in the main process while a
# BGSAVE or BGREWRITEAOF is in progress.
#
# This means that while another child is saving the durability of Redis is
# the same as "appendfsync none", that in pratical terms means that it is
# possible to lost up to 30 seconds of log in the worst scenario (with the
# default Linux settings).
#
# If you have latency problems turn this to "yes". Otherwise leave it as
# "no" that is the safest pick from the point of view of durability.
no-appendfsync-on-rewrite no
################################ VIRTUAL MEMORY ############################### ################################ VIRTUAL MEMORY ###############################
# Virtual Memory allows Redis to work with datasets bigger than the actual # Virtual Memory allows Redis to work with datasets bigger than the actual
......
...@@ -7,6 +7,7 @@ static struct redisFunctionSym symsTable[] = { ...@@ -7,6 +7,7 @@ static struct redisFunctionSym symsTable[] = {
{"addReplyBulk",(unsigned long)addReplyBulk}, {"addReplyBulk",(unsigned long)addReplyBulk},
{"addReplyBulkCString",(unsigned long)addReplyBulkCString}, {"addReplyBulkCString",(unsigned long)addReplyBulkCString},
{"addReplyBulkLen",(unsigned long)addReplyBulkLen}, {"addReplyBulkLen",(unsigned long)addReplyBulkLen},
{"addReplyBulkSds",(unsigned long)addReplyBulkSds},
{"addReplyDouble",(unsigned long)addReplyDouble}, {"addReplyDouble",(unsigned long)addReplyDouble},
{"addReplyLongLong",(unsigned long)addReplyLongLong}, {"addReplyLongLong",(unsigned long)addReplyLongLong},
{"addReplySds",(unsigned long)addReplySds}, {"addReplySds",(unsigned long)addReplySds},
...@@ -45,21 +46,27 @@ static struct redisFunctionSym symsTable[] = { ...@@ -45,21 +46,27 @@ static struct redisFunctionSym symsTable[] = {
{"createSortOperation",(unsigned long)createSortOperation}, {"createSortOperation",(unsigned long)createSortOperation},
{"createStringObject",(unsigned long)createStringObject}, {"createStringObject",(unsigned long)createStringObject},
{"createStringObjectFromLongLong",(unsigned long)createStringObjectFromLongLong}, {"createStringObjectFromLongLong",(unsigned long)createStringObjectFromLongLong},
{"createVmPointer",(unsigned long)createVmPointer},
{"createZsetObject",(unsigned long)createZsetObject}, {"createZsetObject",(unsigned long)createZsetObject},
{"daemonize",(unsigned long)daemonize}, {"daemonize",(unsigned long)daemonize},
{"dbAdd",(unsigned long)dbAdd},
{"dbDelete",(unsigned long)dbDelete},
{"dbExists",(unsigned long)dbExists},
{"dbRandomKey",(unsigned long)dbRandomKey},
{"dbReplace",(unsigned long)dbReplace},
{"dbsizeCommand",(unsigned long)dbsizeCommand}, {"dbsizeCommand",(unsigned long)dbsizeCommand},
{"debugCommand",(unsigned long)debugCommand}, {"debugCommand",(unsigned long)debugCommand},
{"decrCommand",(unsigned long)decrCommand}, {"decrCommand",(unsigned long)decrCommand},
{"decrRefCount",(unsigned long)decrRefCount}, {"decrRefCount",(unsigned long)decrRefCount},
{"decrbyCommand",(unsigned long)decrbyCommand}, {"decrbyCommand",(unsigned long)decrbyCommand},
{"delCommand",(unsigned long)delCommand}, {"delCommand",(unsigned long)delCommand},
{"deleteIfSwapped",(unsigned long)deleteIfSwapped},
{"deleteIfVolatile",(unsigned long)deleteIfVolatile}, {"deleteIfVolatile",(unsigned long)deleteIfVolatile},
{"deleteKey",(unsigned long)deleteKey},
{"dictEncObjKeyCompare",(unsigned long)dictEncObjKeyCompare}, {"dictEncObjKeyCompare",(unsigned long)dictEncObjKeyCompare},
{"dictListDestructor",(unsigned long)dictListDestructor}, {"dictListDestructor",(unsigned long)dictListDestructor},
{"dictObjKeyCompare",(unsigned long)dictObjKeyCompare}, {"dictObjKeyCompare",(unsigned long)dictObjKeyCompare},
{"dictRedisObjectDestructor",(unsigned long)dictRedisObjectDestructor}, {"dictRedisObjectDestructor",(unsigned long)dictRedisObjectDestructor},
{"dictSdsDestructor",(unsigned long)dictSdsDestructor},
{"dictSdsKeyCompare",(unsigned long)dictSdsKeyCompare},
{"dictVanillaFree",(unsigned long)dictVanillaFree}, {"dictVanillaFree",(unsigned long)dictVanillaFree},
{"discardCommand",(unsigned long)discardCommand}, {"discardCommand",(unsigned long)discardCommand},
{"dontWaitForSwappedKey",(unsigned long)dontWaitForSwappedKey}, {"dontWaitForSwappedKey",(unsigned long)dontWaitForSwappedKey},
...@@ -196,6 +203,7 @@ static struct redisFunctionSym symsTable[] = { ...@@ -196,6 +203,7 @@ static struct redisFunctionSym symsTable[] = {
{"pushGenericCommand",(unsigned long)pushGenericCommand}, {"pushGenericCommand",(unsigned long)pushGenericCommand},
{"qsortCompareSetsByCardinality",(unsigned long)qsortCompareSetsByCardinality}, {"qsortCompareSetsByCardinality",(unsigned long)qsortCompareSetsByCardinality},
{"qsortCompareZsetopsrcByCardinality",(unsigned long)qsortCompareZsetopsrcByCardinality}, {"qsortCompareZsetopsrcByCardinality",(unsigned long)qsortCompareZsetopsrcByCardinality},
{"qsortRedisCommands",(unsigned long)qsortRedisCommands},
{"queueIOJob",(unsigned long)queueIOJob}, {"queueIOJob",(unsigned long)queueIOJob},
{"queueMultiCommand",(unsigned long)queueMultiCommand}, {"queueMultiCommand",(unsigned long)queueMultiCommand},
{"randomkeyCommand",(unsigned long)randomkeyCommand}, {"randomkeyCommand",(unsigned long)randomkeyCommand},
...@@ -245,7 +253,6 @@ static struct redisFunctionSym symsTable[] = { ...@@ -245,7 +253,6 @@ static struct redisFunctionSym symsTable[] = {
{"scardCommand",(unsigned long)scardCommand}, {"scardCommand",(unsigned long)scardCommand},
{"sdiffCommand",(unsigned long)sdiffCommand}, {"sdiffCommand",(unsigned long)sdiffCommand},
{"sdiffstoreCommand",(unsigned long)sdiffstoreCommand}, {"sdiffstoreCommand",(unsigned long)sdiffstoreCommand},
{"sdsDictKeyCompare",(unsigned long)sdsDictKeyCompare},
{"sdscatrepr",(unsigned long)sdscatrepr}, {"sdscatrepr",(unsigned long)sdscatrepr},
{"segvHandler",(unsigned long)segvHandler}, {"segvHandler",(unsigned long)segvHandler},
{"selectCommand",(unsigned long)selectCommand}, {"selectCommand",(unsigned long)selectCommand},
...@@ -269,6 +276,7 @@ static struct redisFunctionSym symsTable[] = { ...@@ -269,6 +276,7 @@ static struct redisFunctionSym symsTable[] = {
{"slaveofCommand",(unsigned long)slaveofCommand}, {"slaveofCommand",(unsigned long)slaveofCommand},
{"smoveCommand",(unsigned long)smoveCommand}, {"smoveCommand",(unsigned long)smoveCommand},
{"sortCommand",(unsigned long)sortCommand}, {"sortCommand",(unsigned long)sortCommand},
{"sortCommandTable",(unsigned long)sortCommandTable},
{"sortCompare",(unsigned long)sortCompare}, {"sortCompare",(unsigned long)sortCompare},
{"spawnIOThread",(unsigned long)spawnIOThread}, {"spawnIOThread",(unsigned long)spawnIOThread},
{"spopCommand",(unsigned long)spopCommand}, {"spopCommand",(unsigned long)spopCommand},
...@@ -290,6 +298,7 @@ static struct redisFunctionSym symsTable[] = { ...@@ -290,6 +298,7 @@ static struct redisFunctionSym symsTable[] = {
{"syncWithMaster",(unsigned long)syncWithMaster}, {"syncWithMaster",(unsigned long)syncWithMaster},
{"syncWrite",(unsigned long)syncWrite}, {"syncWrite",(unsigned long)syncWrite},
{"touchWatchedKey",(unsigned long)touchWatchedKey}, {"touchWatchedKey",(unsigned long)touchWatchedKey},
{"touchWatchedKeysOnFlush",(unsigned long)touchWatchedKeysOnFlush},
{"tryFreeOneObjectFromFreelist",(unsigned long)tryFreeOneObjectFromFreelist}, {"tryFreeOneObjectFromFreelist",(unsigned long)tryFreeOneObjectFromFreelist},
{"tryObjectEncoding",(unsigned long)tryObjectEncoding}, {"tryObjectEncoding",(unsigned long)tryObjectEncoding},
{"tryResizeHashTables",(unsigned long)tryResizeHashTables}, {"tryResizeHashTables",(unsigned long)tryResizeHashTables},
......
set defaults [list [list appendonly yes] [list appendfilename appendonly.aof]] set defaults { appendonly {yes} appendfilename {appendonly.aof} }
set server_path [tmpdir server.aof] set server_path [tmpdir server.aof]
set aof_path "$server_path/appendonly.aof" set aof_path "$server_path/appendonly.aof"
...@@ -16,65 +16,67 @@ proc create_aof {code} { ...@@ -16,65 +16,67 @@ proc create_aof {code} {
proc start_server_aof {overrides code} { proc start_server_aof {overrides code} {
upvar defaults defaults srv srv server_path server_path upvar defaults defaults srv srv server_path server_path
set _defaults $defaults set config [concat $defaults $overrides]
set srv [start_server default.conf [lappend _defaults $overrides]] set srv [start_server [list overrides $config]]
uplevel 1 $code uplevel 1 $code
kill_server $srv kill_server $srv
} }
## Test the server doesn't start when the AOF contains an unfinished MULTI tags {"aof"} {
create_aof { ## Test the server doesn't start when the AOF contains an unfinished MULTI
append_to_aof [formatCommand set foo hello] create_aof {
append_to_aof [formatCommand multi] append_to_aof [formatCommand set foo hello]
append_to_aof [formatCommand set bar world] append_to_aof [formatCommand multi]
} append_to_aof [formatCommand set bar world]
}
start_server_aof [list dir $server_path] { start_server_aof [list dir $server_path] {
test {Unfinished MULTI: Server should not have been started} { test {Unfinished MULTI: Server should not have been started} {
is_alive $srv is_alive $srv
} {0} } {0}
test {Unfinished MULTI: Server should have logged an error} { test {Unfinished MULTI: Server should have logged an error} {
exec cat [dict get $srv stdout] | tail -n1 exec cat [dict get $srv stdout] | tail -n1
} {*Unexpected end of file reading the append only file*} } {*Unexpected end of file reading the append only file*}
} }
## Test that the server exits when the AOF contains a short read ## Test that the server exits when the AOF contains a short read
create_aof { create_aof {
append_to_aof [formatCommand set foo hello] append_to_aof [formatCommand set foo hello]
append_to_aof [string range [formatCommand set bar world] 0 end-1] append_to_aof [string range [formatCommand set bar world] 0 end-1]
} }
start_server_aof [list dir $server_path] { start_server_aof [list dir $server_path] {
test {Short read: Server should not have been started} { test {Short read: Server should not have been started} {
is_alive $srv is_alive $srv
} {0} } {0}
test {Short read: Server should have logged an error} { test {Short read: Server should have logged an error} {
exec cat [dict get $srv stdout] | tail -n1 exec cat [dict get $srv stdout] | tail -n1
} {*Bad file format reading the append only file*} } {*Bad file format reading the append only file*}
} }
## Test that redis-check-aof indeed sees this AOF is not valid ## Test that redis-check-aof indeed sees this AOF is not valid
test {Short read: Utility should confirm the AOF is not valid} { test {Short read: Utility should confirm the AOF is not valid} {
catch { catch {
exec ./redis-check-aof $aof_path exec ./redis-check-aof $aof_path
} str } str
set _ $str set _ $str
} {*not valid*} } {*not valid*}
test {Short read: Utility should be able to fix the AOF} { test {Short read: Utility should be able to fix the AOF} {
exec echo y | ./redis-check-aof --fix $aof_path exec echo y | ./redis-check-aof --fix $aof_path
} {*Successfully truncated AOF*} } {*Successfully truncated AOF*}
## Test that the server can be started using the truncated AOF ## Test that the server can be started using the truncated AOF
start_server_aof [list dir $server_path] { start_server_aof [list dir $server_path] {
test {Fixed AOF: Server should have been started} { test {Fixed AOF: Server should have been started} {
is_alive $srv is_alive $srv
} {1} } {1}
test {Fixed AOF: Keyspace should contain values that were parsable} { test {Fixed AOF: Keyspace should contain values that were parsable} {
set client [redis [dict get $srv host] [dict get $srv port]] set client [redis [dict get $srv host] [dict get $srv port]]
list [$client get foo] [$client get bar] list [$client get foo] [$client get bar]
} {hello {}} } {hello {}}
}
} }
start_server default.conf {} { start_server {tags {"repl"}} {
r set mykey foo r set mykey foo
start_server default.conf {} { start_server {} {
test {Second server should have role master at first} { test {Second server should have role master at first} {
s role s role
} {master} } {master}
......
set ::global_overrides {}
set ::tags {}
proc error_and_quit {config_file error} { proc error_and_quit {config_file error} {
puts "!!COULD NOT START REDIS-SERVER\n" puts "!!COULD NOT START REDIS-SERVER\n"
puts "CONFIGURATION:" puts "CONFIGURATION:"
...@@ -27,11 +30,15 @@ proc kill_server config { ...@@ -27,11 +30,15 @@ proc kill_server config {
set pid [dict get $config pid] set pid [dict get $config pid]
# check for leaks # check for leaks
catch { if {![dict exists $config "skipleaks"]} {
if {[string match {*Darwin*} [exec uname -a]]} { catch {
test "Check for memory leaks (pid $pid)" { if {[string match {*Darwin*} [exec uname -a]]} {
exec leaks $pid tags {"leaks"} {
} {*0 leaks*} test "Check for memory leaks (pid $pid)" {
exec leaks $pid
} {*0 leaks*}
}
}
} }
} }
...@@ -78,9 +85,35 @@ proc ping_server {host port} { ...@@ -78,9 +85,35 @@ proc ping_server {host port} {
return $retval return $retval
} }
set ::global_overrides {} # doesn't really belong here, but highly coupled to code in start_server
proc start_server {filename overrides {code undefined}} { proc tags {tags code} {
set data [split [exec cat "tests/assets/$filename"] "\n"] set ::tags [concat $::tags $tags]
uplevel 1 $code
set ::tags [lrange $::tags 0 end-[llength $tags]]
}
proc start_server {options {code undefined}} {
# setup defaults
set baseconfig "default.conf"
set overrides {}
set tags {}
# parse options
foreach {option value} $options {
switch $option {
"config" {
set baseconfig $value }
"overrides" {
set overrides $value }
"tags" {
set tags $value
set ::tags [concat $::tags $value] }
default {
error "Unknown option $option" }
}
}
set data [split [exec cat "tests/assets/$baseconfig"] "\n"]
set config {} set config {}
foreach line $data { foreach line $data {
if {[string length $line] > 0 && [string index $line 0] ne "#"} { if {[string length $line] > 0 && [string index $line 0] ne "#"} {
...@@ -98,9 +131,7 @@ proc start_server {filename overrides {code undefined}} { ...@@ -98,9 +131,7 @@ proc start_server {filename overrides {code undefined}} {
dict set config port [incr ::port] dict set config port [incr ::port]
# apply overrides from global space and arguments # apply overrides from global space and arguments
foreach override [concat $::global_overrides $overrides] { foreach {directive arguments} [concat $::global_overrides $overrides] {
set directive [lrange $override 0 0]
set arguments [lrange $override 1 end]
dict set config $directive $arguments dict set config $directive $arguments
} }
...@@ -177,19 +208,40 @@ proc start_server {filename overrides {code undefined}} { ...@@ -177,19 +208,40 @@ proc start_server {filename overrides {code undefined}} {
lappend ::servers $srv lappend ::servers $srv
# execute provided block # execute provided block
set curnum $::testnum
catch { uplevel 1 $code } err catch { uplevel 1 $code } err
if {$curnum == $::testnum} {
# don't check for leaks when no tests were executed
dict set srv "skipleaks" 1
}
# pop the server object # pop the server object
set ::servers [lrange $::servers 0 end-1] set ::servers [lrange $::servers 0 end-1]
kill_server $srv # allow an exception to bubble up the call chain but still kill this
# server, because we want to reuse the ports when the tests are re-run
if {[string length $err] > 0} { if {$err eq "exception"} {
puts [format "Logged warnings (pid %d):" [dict get $srv "pid"]]
set warnings [warnings_from_file [dict get $srv "stdout"]]
if {[string length $warnings] > 0} {
puts "$warnings"
} else {
puts "(none)"
}
# kill this server without checking for leaks
dict set srv "skipleaks" 1
kill_server $srv
error "exception"
} elseif {[string length $err] > 0} {
puts "Error executing the suite, aborting..." puts "Error executing the suite, aborting..."
puts $err puts $err
exit 1 exit 1
} }
set ::tags [lrange $::tags 0 end-[llength $tags]]
kill_server $srv
} else { } else {
set ::tags [lrange $::tags 0 end-[llength $tags]]
set _ $srv set _ $srv
} }
} }
...@@ -3,20 +3,34 @@ set ::failed 0 ...@@ -3,20 +3,34 @@ set ::failed 0
set ::testnum 0 set ::testnum 0
proc test {name code okpattern} { proc test {name code okpattern} {
# abort if tagged with a tag to deny
foreach tag $::denytags {
if {[lsearch $::tags $tag] >= 0} {
return
}
}
# check if tagged with at least 1 tag to allow when there *is* a list
# of tags to allow, because default policy is to run everything
if {[llength $::allowtags] > 0} {
set matched 0
foreach tag $::allowtags {
if {[lsearch $::tags $tag] >= 0} {
incr matched
}
}
if {$matched < 1} {
return
}
}
incr ::testnum incr ::testnum
# if {$::testnum < $::first || $::testnum > $::last} return
puts -nonewline [format "#%03d %-68s " $::testnum $name] puts -nonewline [format "#%03d %-68s " $::testnum $name]
flush stdout flush stdout
if {[catch {set retval [uplevel 1 $code]} error]} { if {[catch {set retval [uplevel 1 $code]} error]} {
puts "ERROR\n\nLogged warnings:" puts "EXCEPTION"
foreach file [glob tests/tmp/server.[pid].*/stdout] { puts "\nCaught error: $error"
set warnings [warnings_from_file $file] error "exception"
if {[string length $warnings] > 0} {
puts $warnings
}
}
puts "Script died with $error"
exit 1
} }
if {$okpattern eq $retval || [string match $okpattern $retval]} { if {$okpattern eq $retval || [string match $okpattern $retval]} {
puts "PASSED" puts "PASSED"
......
...@@ -13,9 +13,10 @@ set ::host 127.0.0.1 ...@@ -13,9 +13,10 @@ set ::host 127.0.0.1
set ::port 16379 set ::port 16379
set ::traceleaks 0 set ::traceleaks 0
set ::valgrind 0 set ::valgrind 0
set ::denytags {}
set ::allowtags {}
proc execute_tests name { proc execute_tests name {
set cur $::testnum
source "tests/$name.tcl" source "tests/$name.tcl"
} }
...@@ -92,4 +93,31 @@ proc main {} { ...@@ -92,4 +93,31 @@ proc main {} {
cleanup cleanup
} }
main # parse arguments
for {set j 0} {$j < [llength $argv]} {incr j} {
set opt [lindex $argv $j]
set arg [lindex $argv [expr $j+1]]
if {$opt eq {--tags}} {
foreach tag $arg {
if {[string index $tag 0] eq "-"} {
lappend ::denytags [string range $tag 1 end]
} else {
lappend ::allowtags $tag
}
}
incr j
} else {
puts "Wrong argument: $opt"
exit 1
}
}
if {[catch { main } err]} {
if {[string length $err] > 0} {
# only display error when not generated by the test suite
if {$err ne "exception"} {
puts $err
}
exit 1
}
}
start_server default.conf {{requirepass foobar}} { start_server {tags {"auth"} overrides {requirepass foobar}} {
test {AUTH fails when a wrong password is given} { test {AUTH fails when a wrong password is given} {
catch {r auth wrong!} err catch {r auth wrong!} err
format $err format $err
......
start_server default.conf {} { start_server {tags {"basic"}} {
test {DEL all keys to start with a clean DB} { test {DEL all keys to start with a clean DB} {
foreach key [r keys *] {r del $key} foreach key [r keys *] {r del $key}
r dbsize r dbsize
...@@ -52,46 +52,48 @@ start_server default.conf {} { ...@@ -52,46 +52,48 @@ start_server default.conf {} {
r get foo r get foo
} [string repeat "abcd" 1000000] } [string repeat "abcd" 1000000]
test {Very big payload random access} { tags {"slow"} {
set err {} test {Very big payload random access} {
array set payload {} set err {}
for {set j 0} {$j < 100} {incr j} { array set payload {}
set size [expr 1+[randomInt 100000]] for {set j 0} {$j < 100} {incr j} {
set buf [string repeat "pl-$j" $size] set size [expr 1+[randomInt 100000]]
set payload($j) $buf set buf [string repeat "pl-$j" $size]
r set bigpayload_$j $buf set payload($j) $buf
} r set bigpayload_$j $buf
for {set j 0} {$j < 1000} {incr j} {
set index [randomInt 100]
set buf [r get bigpayload_$index]
if {$buf != $payload($index)} {
set err "Values differ: I set '$payload($index)' but I read back '$buf'"
break
} }
} for {set j 0} {$j < 1000} {incr j} {
unset payload set index [randomInt 100]
set _ $err set buf [r get bigpayload_$index]
} {} if {$buf != $payload($index)} {
set err "Values differ: I set '$payload($index)' but I read back '$buf'"
test {SET 10000 numeric keys and access all them in reverse order} { break
set err {} }
for {set x 0} {$x < 10000} {incr x} {
r set $x $x
}
set sum 0
for {set x 9999} {$x >= 0} {incr x -1} {
set val [r get $x]
if {$val ne $x} {
set err "Eleemnt at position $x is $val instead of $x"
break
} }
} unset payload
set _ $err set _ $err
} {} } {}
test {SET 10000 numeric keys and access all them in reverse order} {
set err {}
for {set x 0} {$x < 10000} {incr x} {
r set $x $x
}
set sum 0
for {set x 9999} {$x >= 0} {incr x -1} {
set val [r get $x]
if {$val ne $x} {
set err "Eleemnt at position $x is $val instead of $x"
break
}
}
set _ $err
} {}
test {DBSIZE should be 10101 now} { test {DBSIZE should be 10101 now} {
r dbsize r dbsize
} {10101} } {10101}
}
test {INCR against non existing key} { test {INCR against non existing key} {
set res {} set res {}
......
start_server default.conf {} { start_server {tags {"cas"}} {
test {EXEC works on WATCHed key not modified} { test {EXEC works on WATCHed key not modified} {
r watch x y z r watch x y z
r watch k r watch k
......
start_server default.conf {} { start_server {tags {"expire"}} {
test {EXPIRE - don't set timeouts multiple times} { test {EXPIRE - don't set timeouts multiple times} {
r set x foobar r set x foobar
set v1 [r expire x 5] set v1 [r expire x 5]
...@@ -12,10 +12,12 @@ start_server default.conf {} { ...@@ -12,10 +12,12 @@ start_server default.conf {} {
r get x r get x
} {foobar} } {foobar}
test {EXPIRE - After 6 seconds the key should no longer be here} { tags {"slow"} {
after 6000 test {EXPIRE - After 6 seconds the key should no longer be here} {
list [r get x] [r exists x] after 6000
} {{} 0} list [r get x] [r exists x]
} {{} 0}
}
test {EXPIRE - Delete on write policy} { test {EXPIRE - Delete on write policy} {
r del x r del x
...@@ -46,10 +48,12 @@ start_server default.conf {} { ...@@ -46,10 +48,12 @@ start_server default.conf {} {
r get y r get y
} {foo} } {foo}
test {SETEX - Wait for the key to expire} { tags {"slow"} {
after 3000 test {SETEX - Wait for the key to expire} {
r get y after 3000
} {} r get y
} {}
}
test {SETEX - Wrong time parameter} { test {SETEX - Wrong time parameter} {
catch {r setex z -10 foo} e catch {r setex z -10 foo} e
......
start_server default.conf {} { start_server {} {
test {SAVE - make sure there are all the types as values} { test {SAVE - make sure there are all the types as values} {
# Wait for a background saving in progress to terminate # Wait for a background saving in progress to terminate
waitForBgsave r waitForBgsave r
...@@ -12,20 +12,22 @@ start_server default.conf {} { ...@@ -12,20 +12,22 @@ start_server default.conf {} {
r save r save
} {OK} } {OK}
foreach fuzztype {binary alpha compr} { tags {"slow"} {
test "FUZZ stresser with data model $fuzztype" { foreach fuzztype {binary alpha compr} {
set err 0 test "FUZZ stresser with data model $fuzztype" {
for {set i 0} {$i < 10000} {incr i} { set err 0
set fuzz [randstring 0 512 $fuzztype] for {set i 0} {$i < 10000} {incr i} {
r set foo $fuzz set fuzz [randstring 0 512 $fuzztype]
set got [r get foo] r set foo $fuzz
if {$got ne $fuzz} { set got [r get foo]
set err [list $fuzz $got] if {$got ne $fuzz} {
break set err [list $fuzz $got]
break
}
} }
} set _ $err
set _ $err } {0}
} {0} }
} }
test {BGSAVE} { test {BGSAVE} {
......
start_server default.conf {} { start_server {} {
test {Handle an empty query well} { test {Handle an empty query well} {
set fd [r channel] set fd [r channel]
puts -nonewline $fd "\r\n" puts -nonewline $fd "\r\n"
......
start_server default.conf {} { start_server {tags {"sort"}} {
test {SORT ALPHA against integer encoded strings} { test {SORT ALPHA against integer encoded strings} {
r del mylist r del mylist
r lpush mylist 2 r lpush mylist 2
...@@ -8,130 +8,132 @@ start_server default.conf {} { ...@@ -8,130 +8,132 @@ start_server default.conf {} {
r sort mylist alpha r sort mylist alpha
} {1 10 2 3} } {1 10 2 3}
test {Create a random list and a random set} { tags {"slow"} {
set tosort {} set res {}
array set seenrand {} test {Create a random list and a random set} {
for {set i 0} {$i < 10000} {incr i} { set tosort {}
while 1 { array set seenrand {}
# Make sure all the weights are different because for {set i 0} {$i < 10000} {incr i} {
# Redis does not use a stable sort but Tcl does. while 1 {
randpath { # Make sure all the weights are different because
set rint [expr int(rand()*1000000)] # Redis does not use a stable sort but Tcl does.
} { randpath {
set rint [expr rand()] set rint [expr int(rand()*1000000)]
} {
set rint [expr rand()]
}
if {![info exists seenrand($rint)]} break
} }
if {![info exists seenrand($rint)]} break set seenrand($rint) x
r lpush tosort $i
r sadd tosort-set $i
r set weight_$i $rint
r hset wobj_$i weight $rint
lappend tosort [list $i $rint]
} }
set seenrand($rint) x set sorted [lsort -index 1 -real $tosort]
r lpush tosort $i for {set i 0} {$i < 10000} {incr i} {
r sadd tosort-set $i lappend res [lindex $sorted $i 0]
r set weight_$i $rint
r hset wobj_$i weight $rint
lappend tosort [list $i $rint]
}
set sorted [lsort -index 1 -real $tosort]
set res {}
for {set i 0} {$i < 10000} {incr i} {
lappend res [lindex $sorted $i 0]
}
format {}
} {}
test {SORT with BY against the newly created list} {
r sort tosort {BY weight_*}
} $res
test {SORT with BY (hash field) against the newly created list} {
r sort tosort {BY wobj_*->weight}
} $res
test {SORT with GET (key+hash) with sanity check of each element (list)} {
set err {}
set l1 [r sort tosort GET # GET weight_*]
set l2 [r sort tosort GET # GET wobj_*->weight]
foreach {id1 w1} $l1 {id2 w2} $l2 {
set realweight [r get weight_$id1]
if {$id1 != $id2} {
set err "ID mismatch $id1 != $id2"
break
} }
if {$realweight != $w1 || $realweight != $w2} { format {}
set err "Weights mismatch! w1: $w1 w2: $w2 real: $realweight" } {}
break
test {SORT with BY against the newly created list} {
r sort tosort {BY weight_*}
} $res
test {SORT with BY (hash field) against the newly created list} {
r sort tosort {BY wobj_*->weight}
} $res
test {SORT with GET (key+hash) with sanity check of each element (list)} {
set err {}
set l1 [r sort tosort GET # GET weight_*]
set l2 [r sort tosort GET # GET wobj_*->weight]
foreach {id1 w1} $l1 {id2 w2} $l2 {
set realweight [r get weight_$id1]
if {$id1 != $id2} {
set err "ID mismatch $id1 != $id2"
break
}
if {$realweight != $w1 || $realweight != $w2} {
set err "Weights mismatch! w1: $w1 w2: $w2 real: $realweight"
break
}
} }
} set _ $err
set _ $err } {}
} {}
test {SORT with BY, but against the newly created set} {
test {SORT with BY, but against the newly created set} { r sort tosort-set {BY weight_*}
r sort tosort-set {BY weight_*} } $res
} $res
test {SORT with BY (hash field), but against the newly created set} {
test {SORT with BY (hash field), but against the newly created set} { r sort tosort-set {BY wobj_*->weight}
r sort tosort-set {BY wobj_*->weight} } $res
} $res
test {SORT with BY and STORE against the newly created list} {
test {SORT with BY and STORE against the newly created list} { r sort tosort {BY weight_*} store sort-res
r sort tosort {BY weight_*} store sort-res r lrange sort-res 0 -1
r lrange sort-res 0 -1 } $res
} $res
test {SORT with BY (hash field) and STORE against the newly created list} {
test {SORT with BY (hash field) and STORE against the newly created list} { r sort tosort {BY wobj_*->weight} store sort-res
r sort tosort {BY wobj_*->weight} store sort-res r lrange sort-res 0 -1
r lrange sort-res 0 -1 } $res
} $res
test {SORT direct, numeric, against the newly created list} {
test {SORT direct, numeric, against the newly created list} { r sort tosort
r sort tosort } [lsort -integer $res]
} [lsort -integer $res]
test {SORT decreasing sort} {
test {SORT decreasing sort} { r sort tosort {DESC}
r sort tosort {DESC} } [lsort -decreasing -integer $res]
} [lsort -decreasing -integer $res]
test {SORT speed, sorting 10000 elements list using BY, 100 times} {
test {SORT speed, sorting 10000 elements list using BY, 100 times} { set start [clock clicks -milliseconds]
set start [clock clicks -milliseconds] for {set i 0} {$i < 100} {incr i} {
for {set i 0} {$i < 100} {incr i} { set sorted [r sort tosort {BY weight_* LIMIT 0 10}]
set sorted [r sort tosort {BY weight_* LIMIT 0 10}] }
} set elapsed [expr [clock clicks -milliseconds]-$start]
set elapsed [expr [clock clicks -milliseconds]-$start] puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds "
puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds " flush stdout
flush stdout format {}
format {} } {}
} {}
test {SORT speed, as above but against hash field} {
test {SORT speed, as above but against hash field} { set start [clock clicks -milliseconds]
set start [clock clicks -milliseconds] for {set i 0} {$i < 100} {incr i} {
for {set i 0} {$i < 100} {incr i} { set sorted [r sort tosort {BY wobj_*->weight LIMIT 0 10}]
set sorted [r sort tosort {BY wobj_*->weight LIMIT 0 10}] }
} set elapsed [expr [clock clicks -milliseconds]-$start]
set elapsed [expr [clock clicks -milliseconds]-$start] puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds "
puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds " flush stdout
flush stdout format {}
format {} } {}
} {}
test {SORT speed, sorting 10000 elements list directly, 100 times} {
test {SORT speed, sorting 10000 elements list directly, 100 times} { set start [clock clicks -milliseconds]
set start [clock clicks -milliseconds] for {set i 0} {$i < 100} {incr i} {
for {set i 0} {$i < 100} {incr i} { set sorted [r sort tosort {LIMIT 0 10}]
set sorted [r sort tosort {LIMIT 0 10}] }
} set elapsed [expr [clock clicks -milliseconds]-$start]
set elapsed [expr [clock clicks -milliseconds]-$start] puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds "
puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds " flush stdout
flush stdout format {}
format {} } {}
} {}
test {SORT speed, pseudo-sorting 10000 elements list, BY <const>, 100 times} {
test {SORT speed, pseudo-sorting 10000 elements list, BY <const>, 100 times} { set start [clock clicks -milliseconds]
set start [clock clicks -milliseconds] for {set i 0} {$i < 100} {incr i} {
for {set i 0} {$i < 100} {incr i} { set sorted [r sort tosort {BY nokey LIMIT 0 10}]
set sorted [r sort tosort {BY nokey LIMIT 0 10}] }
} set elapsed [expr [clock clicks -milliseconds]-$start]
set elapsed [expr [clock clicks -milliseconds]-$start] puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds "
puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds " flush stdout
flush stdout format {}
format {} } {}
} {} }
test {SORT regression for issue #19, sorting floats} { test {SORT regression for issue #19, sorting floats} {
r flushdb r flushdb
......
start_server default.conf {} { start_server {tags {"hash"}} {
test {HSET/HLEN - Small hash creation} { test {HSET/HLEN - Small hash creation} {
array set smallhash {} array set smallhash {}
for {set i 0} {$i < 8} {incr i} { for {set i 0} {$i < 8} {incr i} {
......
start_server default.conf {} { start_server {tags {"list"}} {
test {Basic LPUSH, RPUSH, LLENGTH, LINDEX} { test {Basic LPUSH, RPUSH, LLENGTH, LINDEX} {
set res [r lpush mylist a] set res [r lpush mylist a]
append res [r lpush mylist b] append res [r lpush mylist b]
......
start_server default.conf {} { start_server {tags {"set"}} {
test {SADD, SCARD, SISMEMBER, SMEMBERS basics} { test {SADD, SCARD, SISMEMBER, SMEMBERS basics} {
r sadd myset foo r sadd myset foo
r sadd myset bar r sadd myset bar
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment