Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
redis
Commits
2e5b3f08
Commit
2e5b3f08
authored
Jul 28, 2024
by
YaacovHazan
Browse files
Merge remote-tracking branch 'upstream/unstable' into HEAD
parents
0637b4ea
94b9072e
Changes
56
Hide whitespace changes
Inline
Side-by-side
tests/integration/redis-cli.tcl
View file @
2e5b3f08
...
...
@@ -68,10 +68,10 @@ start_server {tags {"cli"}} {
set _
[
format_output
[
read_cli $fd
]]
}
# Note: prompt may be affected by the local history, if failed, please
# try using `rm ~/.rediscli_history` to delete it and then retry.
file delete ./.rediscli_history_test
proc test_interactive_cli_with_prompt
{
name code
}
{
set ::env
(
FAKETTY_WITH_PROMPT
)
1
set ::env
(
REDISCLI_HISTFILE
)
".rediscli_history_test"
test_interactive_cli $name $code
unset ::env
(
FAKETTY_WITH_PROMPT
)
}
...
...
tests/integration/replication.tcl
View file @
2e5b3f08
...
...
@@ -110,24 +110,23 @@ start_server {tags {"repl external:skip"}} {
$A config resetstat
set rd
[
redis_deferring_client
]
$rd brpoplpush a b 5
wait_for_blocked_client
r lpush a foo
wait_for_condition 50 100
{
[
$A debug digest
]
eq
[
$B debug digest
]
}
else
{
fail
"Master and replica have different digest:
[
$A debug digest
]
VS
[
$B debug digest
]
"
}
wait_for_ofs_sync $B $A
assert_equal
[
$A debug digest
]
[
$B debug digest
]
assert_match
{
*calls=1,*
}
[
cmdrstat rpoplpush $A
]
assert_match
{}
[
cmdrstat lmove $A
]
assert_equal
[
$rd
read
]
{
foo
}
$rd close
}
test
{
BRPOPLPUSH replication, list exists
}
{
$A config resetstat
set rd
[
redis_deferring_client
]
r lpush c 1
r lpush c 2
r lpush c 3
$rd
brpoplpush c d 5
after 1000
assert_equal
[
r
brpoplpush c d 5
]
{
1
}
wait_for_ofs_sync $B $A
assert_equal
[
$A debug digest
]
[
$B debug digest
]
assert_match
{
*calls=1,*
}
[
cmdrstat rpoplpush $A
]
assert_match
{}
[
cmdrstat lmove $A
]
...
...
@@ -139,24 +138,24 @@ start_server {tags {"repl external:skip"}} {
$A config resetstat
set rd
[
redis_deferring_client
]
$rd blmove a b $wherefrom $whereto 5
$rd flush
wait_for_blocked_client
r lpush a foo
wait_for_condition 50 100
{
[
$A debug digest
]
eq
[
$B debug digest
]
}
else
{
fail
"Master and replica have different digest:
[
$A debug digest
]
VS
[
$B debug digest
]
"
}
wait_for_ofs_sync $B $A
assert_equal
[
$A debug digest
]
[
$B debug digest
]
assert_match
{
*calls=1,*
}
[
cmdrstat lmove $A
]
assert_match
{}
[
cmdrstat rpoplpush $A
]
assert_equal
[
$rd
read
]
{
foo
}
$rd close
}
test
"BLMOVE (
$wherefrom
,
$whereto
) replication, list exists"
{
$A config resetstat
set rd
[
redis_deferring_client
]
r lpush c 1
r lpush c 2
r lpush c 3
$rd
blmove c d $wherefrom $whereto 5
after 1000
r
blmove c d $wherefrom $whereto 5
wait_for_ofs_sync $B $A
assert_equal
[
$A debug digest
]
[
$B debug digest
]
assert_match
{
*calls=1,*
}
[
cmdrstat lmove $A
]
assert_match
{}
[
cmdrstat rpoplpush $A
]
...
...
@@ -167,6 +166,7 @@ start_server {tags {"repl external:skip"}} {
test
{
BLPOP followed by role change, issue #2473
}
{
set rd
[
redis_deferring_client
]
$rd blpop foo 0
;
# Block while B is a master
wait_for_blocked_client
# Turn B into master of A
$A slaveof no one
...
...
@@ -182,7 +182,7 @@ start_server {tags {"repl external:skip"}} {
# If the client is still attached to the instance, we'll get
# a desync between the two instances.
$A rpush foo a b c
after 100
wait_for_ofs_sync $B $A
wait_for_condition 50 100
{
[
$A debug digest
]
eq
[
$B debug digest
]
&&
...
...
@@ -192,6 +192,9 @@ start_server {tags {"repl external:skip"}} {
fail
"Master and replica have different digest:
[
$A debug digest
]
VS
[
$B debug digest
]
"
}
assert_match
{
*calls=1,*,rejected_calls=0,failed_calls=1*
}
[
cmdrstat blpop $B
]
assert_error
{
UNBLOCKED*
}
{
$rd
read
}
$rd close
}
}
}
...
...
tests/support/server.tcl
View file @
2e5b3f08
...
...
@@ -369,10 +369,14 @@ proc run_external_server_test {code overrides} {
r flushall
r function flush
# store
override
s
# store
config
s
set saved_config
{}
foreach
{
param val
}
[
r config get *
]
{
dict set saved_config $param $val
}
# apply overrides
foreach
{
param val
}
$overrides
{
dict set saved_config $param
[
lindex
[
r config get $param
]
1
]
r config set $param $val
# If we enable appendonly, wait for for rewrite to complete. This is
...
...
@@ -400,7 +404,8 @@ proc run_external_server_test {code overrides} {
# restore overrides
dict for
{
param val
}
$saved_config
{
r config set $param $val
# some may fail, specifically immutable ones.
catch
{
r config set $param $val
}
}
set srv
[
lpop ::servers
]
...
...
tests/support/util.tcl
View file @
2e5b3f08
...
...
@@ -293,7 +293,6 @@ proc findKeyWithType {r type} {
proc createComplexDataset
{
r ops
{
opt
{}}}
{
set useexpire
[
expr
{[
lsearch -exact $opt useexpire
]
!= -1
}]
# TODO: Remove usehexpire on next commit, when RDB will support replication
set usehexpire
[
expr
{[
lsearch -exact $opt usehexpire
]
!= -1
}]
if
{[
lsearch -exact $opt usetag
]
!= -1
}
{
...
...
@@ -486,8 +485,9 @@ proc find_available_port {start count} {
set port $start
}
set fd1 -1
if
{[
catch
{
set fd1
[
socket -server 127.0.0.1 $port
]}]
||
[
catch
{
set fd2
[
socket -server 127.0.0.1
[
expr $port+10000
]]}]}
{
proc dummy_accept
{
chan addr port
}
{}
if
{[
catch
{
set fd1
[
socket -server dummy_accept -myaddr 127.0.0.1 $port
]}]
||
[
catch
{
set fd2
[
socket -server dummy_accept -myaddr 127.0.0.1
[
expr $port+10000
]]}]}
{
if
{
$fd1
!= -1
}
{
close $fd1
}
...
...
@@ -1155,8 +1155,7 @@ proc system_backtrace_supported {} {
# libmusl does not support backtrace. Also return 0 on
# static binaries
(
ldd exit code 1
)
where we can't detect libmusl
catch
{
set ldd
[
exec ldd src/redis-server
]
if
{
!
[
catch
{
set ldd
[
exec ldd src/redis-server
]}]}
{
if
{
!
[
string match
{
*libc.*musl*
}
$ldd
]}
{
return 1
}
...
...
tests/test_helper.tcl
View file @
2e5b3f08
...
...
@@ -30,7 +30,7 @@ set test_dirs {
foreach test_dir $test_dirs
{
set files
[
glob -nocomplain $dir/tests/$test_dir/*.tcl
]
foreach file $files
{
foreach file
[
lsort
$files
]
{
lappend ::all_tests $test_dir/
[
file root
[
file tail $file
]]
}
}
...
...
tests/unit/client-eviction.tcl
View file @
2e5b3f08
...
...
@@ -93,6 +93,11 @@ start_server {} {
set n
[
expr $maxmemory_clients_actual / 2
]
$rr write
[
join
[
list
"*1
\r\n\$
$n
\r\n
"
[
string repeat v $n
]]
""
]
$rr flush
wait_for_condition 100 10
{
[
client_field $cname tot-mem
]
>= $n
}
else
{
fail
"Failed to fill qbuf for test"
}
set tot_mem
[
client_field $cname tot-mem
]
assert
{
$tot
_mem >= $n && $tot_mem < $maxmemory_clients_actual
}
...
...
tests/unit/functions.tcl
View file @
2e5b3f08
...
...
@@ -294,6 +294,31 @@ start_server {tags {"scripting"}} {
assert_match
{}
[
r function list
]
}
test
{
FUNCTION - async function flush rebuilds Lua VM without causing race condition between main and lazyfree thread
}
{
# LAZYFREE_THRESHOLD is 64
for
{
set i 0
}
{
$i
< 1000
}
{
incr i
}
{
r function load
[
get_function_code lua test$i test$i
{
local a = 1 while true do a = a + 1 end
}]
}
assert_morethan
[
s used_memory_vm_functions
]
100000
r config resetstat
r function flush async
assert_lessthan
[
s used_memory_vm_functions
]
40000
# Wait for the completion of lazy free for both functions and engines.
set start_time
[
clock seconds
]
while
{
1
}
{
# Tests for race conditions between async function flushes and main thread Lua VM operations.
r function load REPLACE
[
get_function_code lua test test
{
local a = 1 while true do a = a + 1 end
}]
if
{[
s lazyfreed_objects
]
== 1001 ||
[
expr
{[
clock seconds
]
- $start_time
}]
> 5
}
{
break
}
}
if
{[
s lazyfreed_objects
]
!= 1001
}
{
error
"Timeout or unexpected number of lazyfreed_objects:
[
s lazyfreed_objects
]
"
}
assert_match
{{
library_name test engine LUA functions
{{
name test description
{}
flags
{}}}}}
[
r function list
]
}
test
{
FUNCTION - test function wrong argument
}
{
catch
{
r function flush bad_arg
}
e
assert_match
{
*only supports SYNC|ASYNC*
}
$e
...
...
tests/unit/introspection-2.tcl
View file @
2e5b3f08
...
...
@@ -133,6 +133,10 @@ start_server {tags {"introspection"}} {
assert_equal
{{
k1
{
RO access
}}
{
k2
{
OW update
}}}
[
r command getkeysandflags sort k1 store k2
]
}
test
{
COMMAND GETKEYSANDFLAGS invalid args
}
{
assert_error
"ERR Invalid arguments*"
{
r command getkeysandflags ZINTERSTORE zz 1443677133621497600 asdf
}
}
test
{
COMMAND GETKEYS MEMORY USAGE
}
{
assert_equal
{
key
}
[
r command getkeys memory usage key
]
}
...
...
tests/unit/introspection.tcl
View file @
2e5b3f08
...
...
@@ -648,7 +648,7 @@ start_server {tags {"introspection"}} {
# Run a dummy server on used_port so we know we can't configure redis to
# use it. It's ok for this to fail because that means used_port is invalid
# anyway
catch
{
socket -server dummy_accept -myaddr 127.0.0.1 $used_port
}
e
catch
{
set sockfd
[
socket -server dummy_accept -myaddr 127.0.0.1 $used_port
]
}
e
if
{
$::verbose
}
{
puts
"dummy_accept:
$e
"
}
# Try to listen on the used port, pass some more configs to make sure the
...
...
@@ -670,6 +670,7 @@ start_server {tags {"introspection"}} {
set r1
[
redis_client
]
assert_equal
[
$r1
ping
]
"PONG"
$r1 close
close $sockfd
}
test
{
CONFIG SET duplicate configs
}
{
...
...
tests/unit/memefficiency.tcl
View file @
2e5b3f08
...
...
@@ -37,6 +37,18 @@ start_server {tags {"memefficiency external:skip"}} {
}
run_solo
{
defrag
}
{
proc wait_for_defrag_stop
{
maxtries delay
}
{
wait_for_condition $maxtries $delay
{
[
s active_defrag_running
]
eq 0
}
else
{
after 120
;
# serverCron only updates the info once in 100ms
puts
[
r info memory
]
puts
[
r info stats
]
puts
[
r memory malloc-stats
]
fail
"defrag didn't stop."
}
}
proc test_active_defrag
{
type
}
{
if
{[
string match
{
*jemalloc*
}
[
s mem_allocator
]]
&&
[
r debug mallctl arenas.page
]
<= 8192
}
{
test
"Active defrag main dictionary:
$type
"
{
...
...
@@ -90,14 +102,7 @@ run_solo {defrag} {
r config set active-defrag-cycle-max 75
# Wait for the active defrag to stop working.
wait_for_condition 2000 100
{
[
s active_defrag_running
]
eq 0
}
else
{
after 120
;
# serverCron only updates the info once in 100ms
puts
[
r info memory
]
puts
[
r memory malloc-stats
]
fail
"defrag didn't stop."
}
wait_for_defrag_stop 2000 100
# Test the fragmentation is lower.
after 120
;
# serverCron only updates the info once in 100ms
...
...
@@ -180,9 +185,10 @@ run_solo {defrag} {
test
"Active defrag eval scripts:
$type
"
{
r flushdb
r script flush sync
r config resetstat
r config set hz 100
r config set activedefrag no
wait_for_defrag_stop 500 100
r config resetstat
r config set active-defrag-threshold-lower 5
r config set active-defrag-cycle-min 65
r config set active-defrag-cycle-max 75
...
...
@@ -240,14 +246,7 @@ run_solo {defrag} {
}
# wait for the active defrag to stop working
wait_for_condition 500 100
{
[
s active_defrag_running
]
eq 0
}
else
{
after 120
;
# serverCron only updates the info once in 100ms
puts
[
r info memory
]
puts
[
r memory malloc-stats
]
fail
"defrag didn't stop."
}
wait_for_defrag_stop 500 100
# test the fragmentation is lower
after 120
;
# serverCron only updates the info once in 100ms
...
...
@@ -265,9 +264,10 @@ run_solo {defrag} {
test
"Active defrag big keys:
$type
"
{
r flushdb
r config resetstat
r config set hz 100
r config set activedefrag no
wait_for_defrag_stop 500 100
r config resetstat
r config set active-defrag-max-scan-fields 1000
r config set active-defrag-threshold-lower 5
r config set active-defrag-cycle-min 65
...
...
@@ -362,14 +362,7 @@ run_solo {defrag} {
}
# wait for the active defrag to stop working
wait_for_condition 500 100
{
[
s active_defrag_running
]
eq 0
}
else
{
after 120
;
# serverCron only updates the info once in 100ms
puts
[
r info memory
]
puts
[
r memory malloc-stats
]
fail
"defrag didn't stop."
}
wait_for_defrag_stop 500 100
# test the fragmentation is lower
after 120
;
# serverCron only updates the info once in 100ms
...
...
@@ -406,9 +399,10 @@ run_solo {defrag} {
test
"Active defrag pubsub:
$type
"
{
r flushdb
r config resetstat
r config set hz 100
r config set activedefrag no
wait_for_defrag_stop 500 100
r config resetstat
r config set active-defrag-threshold-lower 5
r config set active-defrag-cycle-min 65
r config set active-defrag-cycle-max 75
...
...
@@ -467,14 +461,7 @@ run_solo {defrag} {
}
# wait for the active defrag to stop working
wait_for_condition 500 100
{
[
s active_defrag_running
]
eq 0
}
else
{
after 120
;
# serverCron only updates the info once in 100ms
puts
[
r info memory
]
puts
[
r memory malloc-stats
]
fail
"defrag didn't stop."
}
wait_for_defrag_stop 500 100
# test the fragmentation is lower
after 120
;
# serverCron only updates the info once in 100ms
...
...
@@ -505,9 +492,10 @@ run_solo {defrag} {
test
"Active Defrag HFE:
$type
"
{
r flushdb
r config resetstat
r config set hz 100
r config set activedefrag no
wait_for_defrag_stop 500 100
r config resetstat
# TODO: Lower the threshold after defraging the ebuckets.
# Now just to ensure that the reference is updated correctly.
r config set active-defrag-threshold-lower 12
...
...
@@ -581,14 +569,7 @@ run_solo {defrag} {
}
# wait for the active defrag to stop working
wait_for_condition 500 100
{
[
s active_defrag_running
]
eq 0
}
else
{
after 120
;
# serverCron only updates the info once in 100ms
puts
[
r info memory
]
puts
[
r memory malloc-stats
]
fail
"defrag didn't stop."
}
wait_for_defrag_stop 500 100
# test the fragmentation is lower
after 120
;
# serverCron only updates the info once in 100ms
...
...
@@ -605,9 +586,10 @@ run_solo {defrag} {
if
{
$type
eq
"standalone"
}
{
;
# skip in cluster mode
test
"Active defrag big list:
$type
"
{
r flushdb
r config resetstat
r config set hz 100
r config set activedefrag no
wait_for_defrag_stop 500 100
r config resetstat
r config set active-defrag-max-scan-fields 1000
r config set active-defrag-threshold-lower 5
r config set active-defrag-cycle-min 65
...
...
@@ -661,15 +643,7 @@ run_solo {defrag} {
}
# wait for the active defrag to stop working
wait_for_condition 500 100
{
[
s active_defrag_running
]
eq 0
}
else
{
after 120
;
# serverCron only updates the info once in 100ms
puts
[
r info memory
]
puts
[
r info stats
]
puts
[
r memory malloc-stats
]
fail
"defrag didn't stop."
}
wait_for_defrag_stop 500 100
# test the fragmentation is lower
after 120
;
# serverCron only updates the info once in 100ms
...
...
@@ -721,9 +695,10 @@ run_solo {defrag} {
# this test is more consistent on a fresh server with no history
start_server
{
tags
{
"defrag"
}
overrides
{
save
""
}}
{
r flushdb
r config resetstat
r config set hz 100
r config set activedefrag no
wait_for_defrag_stop 500 100
r config resetstat
r config set active-defrag-max-scan-fields 1000
r config set active-defrag-threshold-lower 5
r config set active-defrag-cycle-min 65
...
...
@@ -789,15 +764,7 @@ run_solo {defrag} {
}
# wait for the active defrag to stop working
wait_for_condition 500 100
{
[
s active_defrag_running
]
eq 0
}
else
{
after 120
;
# serverCron only updates the info once in 100ms
puts
[
r info memory
]
puts
[
r info stats
]
puts
[
r memory malloc-stats
]
fail
"defrag didn't stop."
}
wait_for_defrag_stop 500 100
# test the fragmentation is lower
after 120
;
# serverCron only updates the info once in 100ms
...
...
tests/unit/moduleapi/blockonbackground.tcl
View file @
2e5b3f08
set testmodule
[
file normalize tests/modules/blockonbackground.so
]
source tests/support/util.tcl
proc latency_percentiles_usec
{
cmd
}
{
return
[
latencyrstat_percentiles $cmd r
]
}
...
...
tests/unit/other.tcl
View file @
2e5b3f08
...
...
@@ -75,6 +75,20 @@ start_server {tags {"other"}} {
r flushall
assert_equal
[
s rdb_changes_since_last_save
]
0
}
test
{
FLUSHALL and bgsave
}
{
r config set save
"3600 1 300 100 60 10000"
r set x y
r bgsave
r set x y
r multi
r debug sleep 1
# by the time we'll get to run flushall, the child will finish,
# but the parent will be unaware of it, and it could wrongly set the dirty counter.
r flushall
r exec
assert_equal
[
s rdb_changes_since_last_save
]
0
}
}
test
{
BGSAVE
}
{
...
...
tests/unit/pubsub.tcl
View file @
2e5b3f08
...
...
@@ -374,7 +374,7 @@ start_server {tags {"pubsub network"}} {
assert_equal
"pmessage * __keyspace@
${db}
__:myhash hexpire"
[
$rd1
read
]
assert_equal
"pmessage * __keyspace@
${db}
__:myhash hexpire"
[
$rd1
read
]
assert_equal
"pmessage * __keyspace@
${db}
__:myhash hpersist"
[
$rd1
read
]
assert_equal
"pmessage * __keyspace@
${db}
__:myhash h
expired
"
[
$rd1
read
]
assert_equal
"pmessage * __keyspace@
${db}
__:myhash h
del
"
[
$rd1
read
]
# Test that we will get `hexpired` notification when
# a hash field is removed by active expire.
...
...
tests/unit/scripting.tcl
View file @
2e5b3f08
...
...
@@ -266,6 +266,7 @@ start_server {tags {"scripting"}} {
}
{
0
}
test
{
EVAL - Scripts do not block on waitaof
}
{
r config set appendonly no
run_script
{
return redis.pcall
(
'waitaof','0','1','0'
)}
0
}
{
0 0
}
...
...
@@ -1875,6 +1876,27 @@ start_server {tags {"scripting needs:debug"}} {
r debug set-disable-deny-scripts 0
}
start_server
{
tags
{
"scripting"
}}
{
test
"Verify Lua performs GC correctly after script loading"
{
set dummy_script
"--
[
string repeat x 10
]
\n
return "
set n 50000
for
{
set i 0
}
{
$i
< $n
}
{
incr i
}
{
set script
"
$dummy
_script
[
format
"%06d"
$i
]
"
if
{
$is
_eval
}
{
r script load $script
}
else
{
r function load
"#!lua name=test
$i
\n
redis.register_function('test
$i
', function(KEYS, ARGV)
\n
$script
\n
end)"
}
}
if
{
$is
_eval
}
{
assert_lessthan
[
s used_memory_lua
]
17500000
}
else
{
assert_lessthan
[
s used_memory_vm_functions
]
14500000
}
}
}
}
;
# foreach is_eval
...
...
tests/unit/type/hash-field-expire.tcl
View file @
2e5b3f08
...
...
@@ -19,12 +19,12 @@ set P_OK 1
############################### AUX FUNCS ######################################
proc get_
hashes_with_expiry_fields
{
r
}
{
proc get_
stat_subexpiry
{
r
}
{
set input_string
[
r info keyspace
]
set hash_count 0
foreach line
[
split $input_string
\n
]
{
if
{[
regexp
{
hashes_with_expiry_fields
=
(
\d
+
)}
$line -> value
]}
{
if
{[
regexp
{
subexpiry
=
(
\d
+
)}
$line -> value
]}
{
return $value
}
}
...
...
@@ -163,9 +163,6 @@ start_server {tags {"external:skip needs:debug"}} {
}
test
"Lazy Expire - fields are lazy deleted (
$type
)"
{
# TODO remove the SELECT once dbid will be embedded inside dict/listpack
r select 0
r debug set-active-expire 0
r del myhash
...
...
@@ -349,10 +346,12 @@ start_server {tags {"external:skip needs:debug"}} {
test
"Test HRANDFIELD deletes all expired fields (
$type
)"
{
r debug set-active-expire 0
r flushall
r config resetstat
r hset myhash f1 v1 f2 v2 f3 v3 f4 v4 f5 v5
r hpexpire myhash 1 FIELDS 2 f1 f2
after 5
assert_equal
[
lsort
[
r hrandfield myhash 5
]]
"f3 f4 f5"
assert_equal
[
s expired_subkeys
]
2
r hpexpire myhash 1 FIELDS 3 f3 f4 f5
after 5
assert_equal
[
lsort
[
r hrandfield myhash 5
]]
""
...
...
@@ -476,39 +475,48 @@ start_server {tags {"external:skip needs:debug"}} {
r hset h5 1 1 2 22 3 333 4 4444 5 55555
r hset h6 01 01 02 02 03 03 04 04 05 05 06 06
r hset h18 01 01 02 02 03 03 04 04 05 05 06 06 07 07 08 08 09 09 10 10 11 11 12 12 13 13 14 14 15 15 16 16 17 17 18 18
r hpexpire h1 1
00
NX FIELDS 1 01
r hpexpire h2 1
00
NX FIELDS 1 01
r hpexpire h2 1
00
NX FIELDS 1 02
r hpexpire h3 1
00
NX FIELDS 1 01
r hpexpire h4 1
00
NX FIELDS 1 2
r hpexpire h5 1
00
NX FIELDS 1 3
r hpexpire h6 1
00
NX FIELDS 1 05
r hpexpire h18 1
00
NX FIELDS 17 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 16 17
r hpexpire h1 1 NX FIELDS 1 01
r hpexpire h2 1 NX FIELDS 1 01
r hpexpire h2 1 NX FIELDS 1 02
r hpexpire h3 1 NX FIELDS 1 01
r hpexpire h4 1 NX FIELDS 1 2
r hpexpire h5 1 NX FIELDS 1 3
r hpexpire h6 1 NX FIELDS 1 05
r hpexpire h18 1 NX FIELDS 17 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 16 17
after
150
after
5
# Verify HDEL not ignore expired field. It is too much overhead to check
# if the field is expired before deletion.
assert_equal
[
r HDEL h1 01
]
"1"
# Verify HGET ignore expired field
r config resetstat
assert_equal
[
r HGET h2 01
]
""
assert_equal
[
s expired_subkeys
]
1
assert_equal
[
r HGET h2 02
]
""
assert_equal
[
s expired_subkeys
]
2
assert_equal
[
r HGET h3 01
]
""
assert_equal
[
r HGET h3 02
]
"02"
assert_equal
[
r HGET h3 03
]
"03"
assert_equal
[
s expired_subkeys
]
3
# Verify HINCRBY ignore expired field
assert_equal
[
r HINCRBY h4 2 1
]
"1"
assert_equal
[
s expired_subkeys
]
4
assert_equal
[
r HINCRBY h4 3 1
]
"100"
# Verify HSTRLEN ignore expired field
assert_equal
[
r HSTRLEN h5 3
]
"0"
assert_equal
[
s expired_subkeys
]
5
assert_equal
[
r HSTRLEN h5 4
]
"4"
assert_equal
[
lsort
[
r HKEYS h6
]]
"01 02 03 04 06"
assert_equal
[
s expired_subkeys
]
5
# Verify HEXISTS ignore expired field
assert_equal
[
r HEXISTS h18 07
]
"0"
assert_equal
[
s expired_subkeys
]
6
assert_equal
[
r HEXISTS h18 18
]
"1"
# Verify HVALS ignore expired field
assert_equal
[
lsort
[
r HVALS h18
]]
"18"
assert_equal
[
s expired_subkeys
]
6
# Restore to support active expire
r debug set-active-expire 1
}
...
...
@@ -898,14 +906,14 @@ start_server {tags {"external:skip needs:debug"}} {
r hset myhash f1 v1 f2 v2 f3 v3 f4 v4 f5 v5
r hpexpire myhash 100 FIELDS 3 f1 f2 f3
assert_match
[
get_
hashes_with_expiry_fields
r
]
1
assert_match
[
get_
stat_subexpiry
r
]
1
r hset myhash2 f1 v1 f2 v2 f3 v3 f4 v4 f5 v5
assert_match
[
get_
hashes_with_expiry_fields
r
]
1
assert_match
[
get_
stat_subexpiry
r
]
1
r hpexpire myhash2 100 FIELDS 3 f1 f2 f3
assert_match
[
get_
hashes_with_expiry_fields
r
]
2
assert_match
[
get_
stat_subexpiry
r
]
2
wait_for_condition 50 50
{
[
get_
hashes_with_expiry_fields
r
]
== 0
[
get_
stat_subexpiry
r
]
== 0
}
else
{
fail
"Hash field expiry statistics failed"
}
...
...
@@ -1039,11 +1047,8 @@ start_server {tags {"external:skip needs:debug"}} {
r hpexpire h1 100000 NX FIELDS 3 f3 f4 f5
r hexpire h1 100000 FIELDS 1 f6
# Verify HRANDFIELD deletes expired fields and propagates it
r hset h2 f1 v1 f2 v2
r hpexpire h2 1 FIELDS 2 f1 f2
after 5
assert_equal
[
r hrandfield h4 2
]
""
after 200
assert_aof_content $aof
{
...
...
@@ -1060,8 +1065,6 @@ start_server {tags {"external:skip needs:debug"}} {
}
array set keyAndFields1
[
dumpAllHashes r
]
# Let some time pass and reload data from AOF
after 2000
r debug loadaof
array set keyAndFields2
[
dumpAllHashes r
]
...
...
utils/generate-module-api-doc.rb
View file @
2e5b3f08
...
...
@@ -137,18 +137,8 @@ def is_func_line(src, i)
src
[
i
-
1
]
=~
/\*\//
end
puts
"---
\n
"
puts
"title:
\"
Modules API reference
\"\n
"
puts
"linkTitle:
\"
API reference
\"\n
"
puts
"weight: 1
\n
"
puts
"description: >
\n
"
puts
" Reference for the Redis Modules API
\n
"
puts
"aliases:
\n
"
puts
" - /topics/modules-api-ref
\n
"
puts
"---
\n
"
puts
"
\n
"
puts
"<!-- This file is generated from module.c using
\n
"
puts
" utils/generate-module-api-doc.rb -->
\n\n
"
puts
"
redis/redis:
utils/generate-module-api-doc.rb -->
\n\n
"
src
=
File
.
open
(
File
.
dirname
(
__FILE__
)
++
"/../src/module.c"
).
to_a
# Build function index
...
...
Prev
1
2
3
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment