Commit bf759cc9 authored by Oran Agra's avatar Oran Agra
Browse files

Merge remote-tracking branch 'antirez/unstable' into jemalloc_purge_bg

parents 2e19b941 ee1cef18
/* tracking.c - Client side caching: keys tracking and invalidation
*
* Copyright (c) 2019, Salvatore Sanfilippo <antirez at gmail dot com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Redis nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "server.h"
/* The tracking table is constituted by 2^24 radix trees (each tree, and the
* table itself, are allocated in a lazy way only when needed) tracking
* clients that may have certain keys in their local, client side, cache.
*
* Keys are grouped into 2^24 slots, in a way similar to Redis Cluster hash
* slots, however here the function we use is crc64, taking the least
* significant 24 bits of the output.
*
* When a client enables tracking with "CLIENT TRACKING on", each key served to
* the client is hashed to one of such slots, and Redis will remember what
* client may have keys about such slot. Later, when a key in a given slot is
* modified, all the clients that may have local copies of keys in that slot
* will receive an invalidation message. There is no distinction of database
* number: a single table is used.
*
* Clients will normally take frequently requested objects in memory, removing
* them when invalidation messages are received. A strategy clients may use is
* to just cache objects in a dictionary, associating to each cached object
* some incremental epoch, or just a timestamp. When invalidation messages are
* received clients may store, in a different table, the timestamp (or epoch)
* of the invalidation of such given slot: later when accessing objects, the
* eviction of stale objects may be performed in a lazy way by checking if the
* cached object timestamp is older than the invalidation timestamp for such
* objects.
*
* The output of the 24 bit hash function is very large (more than 16 million
* possible slots), so clients that may want to use less resources may only
* use the most significant bits instead of the full 24 bits. */
#define TRACKING_TABLE_SIZE (1<<24)
rax **TrackingTable = NULL;
unsigned long TrackingTableUsedSlots = 0;
robj *TrackingChannelName;
/* Remove the tracking state from the client 'c'. Note that there is not much
* to do for us here, if not to decrement the counter of the clients in
* tracking mode, because we just store the ID of the client in the tracking
* table, so we'll remove the ID reference in a lazy way. Otherwise when a
* client with many entries in the table is removed, it would cost a lot of
* time to do the cleanup. */
void disableTracking(client *c) {
if (c->flags & CLIENT_TRACKING) {
server.tracking_clients--;
c->flags &= ~(CLIENT_TRACKING|CLIENT_TRACKING_BROKEN_REDIR);
}
}
/* Enable the tracking state for the client 'c', and as a side effect allocates
* the tracking table if needed. If the 'redirect_to' argument is non zero, the
* invalidation messages for this client will be sent to the client ID
* specified by the 'redirect_to' argument. Note that if such client will
* eventually get freed, we'll send a message to the original client to
* inform it of the condition. Multiple clients can redirect the invalidation
* messages to the same client ID. */
void enableTracking(client *c, uint64_t redirect_to) {
if (c->flags & CLIENT_TRACKING) return;
c->flags |= CLIENT_TRACKING;
c->flags &= ~CLIENT_TRACKING_BROKEN_REDIR;
c->client_tracking_redirection = redirect_to;
server.tracking_clients++;
if (TrackingTable == NULL) {
TrackingTable = zcalloc(sizeof(rax*) * TRACKING_TABLE_SIZE);
TrackingChannelName = createStringObject("__redis__:invalidate",20);
}
}
/* This function is called after the excution of a readonly command in the
* case the client 'c' has keys tracking enabled. It will populate the
* tracking ivalidation table according to the keys the user fetched, so that
* Redis will know what are the clients that should receive an invalidation
* message with certain groups of keys are modified. */
void trackingRememberKeys(client *c) {
int numkeys;
int *keys = getKeysFromCommand(c->cmd,c->argv,c->argc,&numkeys);
if (keys == NULL) return;
for(int j = 0; j < numkeys; j++) {
int idx = keys[j];
sds sdskey = c->argv[idx]->ptr;
uint64_t hash = crc64(0,
(unsigned char*)sdskey,sdslen(sdskey))&(TRACKING_TABLE_SIZE-1);
if (TrackingTable[hash] == NULL) {
TrackingTable[hash] = raxNew();
TrackingTableUsedSlots++;
}
raxTryInsert(TrackingTable[hash],
(unsigned char*)&c->id,sizeof(c->id),NULL,NULL);
}
getKeysFreeResult(keys);
}
void sendTrackingMessage(client *c, long long hash) {
int using_redirection = 0;
if (c->client_tracking_redirection) {
client *redir = lookupClientByID(c->client_tracking_redirection);
if (!redir) {
/* We need to signal to the original connection that we
* are unable to send invalidation messages to the redirected
* connection, because the client no longer exist. */
if (c->resp > 2) {
addReplyPushLen(c,3);
addReplyBulkCBuffer(c,"tracking-redir-broken",21);
addReplyLongLong(c,c->client_tracking_redirection);
}
return;
}
c = redir;
using_redirection = 1;
}
/* Only send such info for clients in RESP version 3 or more. However
* if redirection is active, and the connection we redirect to is
* in Pub/Sub mode, we can support the feature with RESP 2 as well,
* by sending Pub/Sub messages in the __redis__:invalidate channel. */
if (c->resp > 2) {
addReplyPushLen(c,2);
addReplyBulkCBuffer(c,"invalidate",10);
addReplyLongLong(c,hash);
} else if (using_redirection && c->flags & CLIENT_PUBSUB) {
robj *msg = createStringObjectFromLongLong(hash);
addReplyPubsubMessage(c,TrackingChannelName,msg);
decrRefCount(msg);
}
}
/* Invalidates a caching slot: this is actually the low level implementation
* of the API that Redis calls externally, that is trackingInvalidateKey(). */
void trackingInvalidateSlot(uint64_t slot) {
if (TrackingTable == NULL || TrackingTable[slot] == NULL) return;
raxIterator ri;
raxStart(&ri,TrackingTable[slot]);
raxSeek(&ri,"^",NULL,0);
while(raxNext(&ri)) {
uint64_t id;
memcpy(&id,ri.key,ri.key_len);
client *c = lookupClientByID(id);
if (c == NULL || !(c->flags & CLIENT_TRACKING)) continue;
sendTrackingMessage(c,slot);
}
raxStop(&ri);
/* Free the tracking table: we'll create the radix tree and populate it
* again if more keys will be modified in this caching slot. */
raxFree(TrackingTable[slot]);
TrackingTable[slot] = NULL;
TrackingTableUsedSlots--;
}
/* This function is called from signalModifiedKey() or other places in Redis
* when a key changes value. In the context of keys tracking, our task here is
* to send a notification to every client that may have keys about such caching
* slot. */
void trackingInvalidateKey(robj *keyobj) {
if (TrackingTable == NULL || TrackingTableUsedSlots == 0) return;
sds sdskey = keyobj->ptr;
uint64_t hash = crc64(0,
(unsigned char*)sdskey,sdslen(sdskey))&(TRACKING_TABLE_SIZE-1);
trackingInvalidateSlot(hash);
}
/* This function is called when one or all the Redis databases are flushed
* (dbid == -1 in case of FLUSHALL). Caching slots are not specific for
* each DB but are global: currently what we do is sending a special
* notification to clients with tracking enabled, invalidating the caching
* slot "-1", which means, "all the keys", in order to avoid flooding clients
* with many invalidation messages for all the keys they may hold.
*
* However trying to flush the tracking table here is very costly:
* we need scanning 16 million caching slots in the table to check
* if they are used, this introduces a big delay. So what we do is to really
* flush the table in the case of FLUSHALL. When a FLUSHDB is called instead
* we just send the invalidation message to all the clients, but don't
* flush the table: it will slowly get garbage collected as more keys
* are modified in the used caching slots. */
void trackingInvalidateKeysOnFlush(int dbid) {
if (server.tracking_clients) {
listNode *ln;
listIter li;
listRewind(server.clients,&li);
while ((ln = listNext(&li)) != NULL) {
client *c = listNodeValue(ln);
if (c->flags & CLIENT_TRACKING) {
sendTrackingMessage(c,-1);
}
}
}
/* In case of FLUSHALL, reclaim all the memory used by tracking. */
if (dbid == -1 && TrackingTable) {
for (int j = 0; j < TRACKING_TABLE_SIZE && TrackingTableUsedSlots > 0; j++) {
if (TrackingTable[j] != NULL) {
raxFree(TrackingTable[j]);
TrackingTable[j] = NULL;
TrackingTableUsedSlots--;
}
}
/* If there are no clients with tracking enabled, we can even
* reclaim the memory used by the table itself. The code assumes
* the table is allocated only if there is at least one client alive
* with tracking enabled. */
if (server.tracking_clients == 0) {
zfree(TrackingTable);
TrackingTable = NULL;
}
}
}
/* Tracking forces Redis to remember information about which client may have
* keys about certian caching slots. In workloads where there are a lot of
* reads, but keys are hardly modified, the amount of information we have
* to remember server side could be a lot: for each 16 millions of caching
* slots we may end with a radix tree containing many entries.
*
* So Redis allows the user to configure a maximum fill rate for the
* invalidation table. This function makes sure that we don't go over the
* specified fill rate: if we are over, we can just evict informations about
* random caching slots, and send invalidation messages to clients like if
* the key was modified. */
void trackingLimitUsedSlots(void) {
static unsigned int timeout_counter = 0;
if (server.tracking_table_max_fill == 0) return; /* No limits set. */
unsigned int max_slots =
(TRACKING_TABLE_SIZE/100) * server.tracking_table_max_fill;
if (TrackingTableUsedSlots <= max_slots) {
timeout_counter = 0;
return; /* Limit not reached. */
}
/* We have to invalidate a few slots to reach the limit again. The effort
* we do here is proportional to the number of times we entered this
* function and found that we are still over the limit. */
int effort = 100 * (timeout_counter+1);
/* Let's start at a random position, and perform linear probing, in order
* to improve cache locality. However once we are able to find an used
* slot, jump again randomly, in order to avoid creating big holes in the
* table (that will make this funciton use more resourced later). */
while(effort > 0) {
unsigned int idx = rand() % TRACKING_TABLE_SIZE;
do {
effort--;
idx = (idx+1) % TRACKING_TABLE_SIZE;
if (TrackingTable[idx] != NULL) {
trackingInvalidateSlot(idx);
if (TrackingTableUsedSlots <= max_slots) {
timeout_counter = 0;
return; /* Return ASAP: we are again under the limit. */
} else {
break; /* Jump to next random position. */
}
}
} while(effort > 0);
}
timeout_counter++;
}
/* This is just used in order to access the amount of used slots in the
* tracking table. */
unsigned long long trackingGetUsedSlots(void) {
return TrackingTableUsedSlots;
}
......@@ -294,6 +294,26 @@ size_t zmalloc_get_rss(void) {
return t_info.resident_size;
}
#elif defined(__FreeBSD__)
#include <sys/types.h>
#include <sys/sysctl.h>
#include <sys/user.h>
#include <unistd.h>
size_t zmalloc_get_rss(void) {
struct kinfo_proc info;
size_t infolen = sizeof(info);
int mib[4];
mib[0] = CTL_KERN;
mib[1] = KERN_PROC;
mib[2] = KERN_PROC_PID;
mib[3] = getpid();
if (sysctl(mib, 4, &info, &infolen, NULL, 0) == 0)
return (size_t)info.ki_rssize;
return 0L;
}
#else
size_t zmalloc_get_rss(void) {
/* If we can't get the RSS in an OS-specific way for this system just
......
......@@ -115,3 +115,17 @@ start_server_and_kill_it [list "dir" $server_path] {
}
}
}
start_server {} {
test {Test FLUSHALL aborts bgsave} {
r config set rdb-key-save-delay 1000
r debug populate 1000
r bgsave
assert_equal [s rdb_bgsave_in_progress] 1
r flushall
after 200
assert_equal [s rdb_bgsave_in_progress] 0
# make sure the server is still writable
r set x xx
}
}
\ No newline at end of file
proc start_bg_complex_data {host port db ops} {
set tclsh [info nameofexecutable]
exec $tclsh tests/helpers/bg_complex_data.tcl $host $port $db $ops &
}
proc stop_bg_complex_data {handle} {
catch {exec /bin/kill -9 $handle}
}
start_server {tags {"repl"}} {
start_server {} {
......
proc start_bg_complex_data {host port db ops} {
set tclsh [info nameofexecutable]
exec $tclsh tests/helpers/bg_complex_data.tcl $host $port $db $ops &
}
proc stop_bg_complex_data {handle} {
catch {exec /bin/kill -9 $handle}
}
# Creates a master-slave pair and breaks the link continuously to force
# partial resyncs attempts, all this while flooding the master with
# write queries.
......@@ -17,7 +8,7 @@ proc stop_bg_complex_data {handle} {
# If reconnect is > 0, the test actually try to break the connection and
# reconnect with the master, otherwise just the initial synchronization is
# checked for consistency.
proc test_psync {descr duration backlog_size backlog_ttl delay cond diskless reconnect} {
proc test_psync {descr duration backlog_size backlog_ttl delay cond mdl sdl reconnect} {
start_server {tags {"repl"}} {
start_server {} {
......@@ -28,8 +19,9 @@ proc test_psync {descr duration backlog_size backlog_ttl delay cond diskless rec
$master config set repl-backlog-size $backlog_size
$master config set repl-backlog-ttl $backlog_ttl
$master config set repl-diskless-sync $diskless
$master config set repl-diskless-sync $mdl
$master config set repl-diskless-sync-delay 1
$slave config set repl-diskless-load $sdl
set load_handle0 [start_bg_complex_data $master_host $master_port 9 100000]
set load_handle1 [start_bg_complex_data $master_host $master_port 11 100000]
......@@ -54,7 +46,7 @@ proc test_psync {descr duration backlog_size backlog_ttl delay cond diskless rec
}
}
test "Test replication partial resync: $descr (diskless: $diskless, reconnect: $reconnect)" {
test "Test replication partial resync: $descr (diskless: $mdl, $sdl, reconnect: $reconnect)" {
# Now while the clients are writing data, break the maste-slave
# link multiple times.
if ($reconnect) {
......@@ -132,23 +124,25 @@ proc test_psync {descr duration backlog_size backlog_ttl delay cond diskless rec
}
}
foreach diskless {no yes} {
foreach mdl {no yes} {
foreach sdl {disabled swapdb} {
test_psync {no reconnection, just sync} 6 1000000 3600 0 {
} $diskless 0
} $mdl $sdl 0
test_psync {ok psync} 6 100000000 3600 0 {
assert {[s -1 sync_partial_ok] > 0}
} $diskless 1
} $mdl $sdl 1
test_psync {no backlog} 6 100 3600 0.5 {
assert {[s -1 sync_partial_err] > 0}
} $diskless 1
} $mdl $sdl 1
test_psync {ok after delay} 3 100000000 3600 3 {
assert {[s -1 sync_partial_ok] > 0}
} $diskless 1
} $mdl $sdl 1
test_psync {backlog expired} 3 100000000 1 3 {
assert {[s -1 sync_partial_err] > 0}
} $diskless 1
} $mdl $sdl 1
}
}
......@@ -183,26 +183,34 @@ start_server {tags {"repl"}} {
}
}
foreach dl {no yes} {
foreach mdl {no yes} {
foreach sdl {disabled swapdb} {
start_server {tags {"repl"}} {
set master [srv 0 client]
$master config set repl-diskless-sync $dl
$master config set repl-diskless-sync $mdl
$master config set repl-diskless-sync-delay 1
set master_host [srv 0 host]
set master_port [srv 0 port]
set slaves {}
set load_handle0 [start_write_load $master_host $master_port 3]
set load_handle1 [start_write_load $master_host $master_port 5]
set load_handle2 [start_write_load $master_host $master_port 20]
set load_handle3 [start_write_load $master_host $master_port 8]
set load_handle4 [start_write_load $master_host $master_port 4]
start_server {} {
lappend slaves [srv 0 client]
start_server {} {
lappend slaves [srv 0 client]
start_server {} {
lappend slaves [srv 0 client]
test "Connect multiple replicas at the same time (issue #141), diskless=$dl" {
test "Connect multiple replicas at the same time (issue #141), master diskless=$mdl, replica diskless=$sdl" {
# start load handles only inside the test, so that the test can be skipped
set load_handle0 [start_bg_complex_data $master_host $master_port 9 100000000]
set load_handle1 [start_bg_complex_data $master_host $master_port 11 100000000]
set load_handle2 [start_bg_complex_data $master_host $master_port 12 100000000]
set load_handle3 [start_write_load $master_host $master_port 8]
set load_handle4 [start_write_load $master_host $master_port 4]
after 5000 ;# wait for some data to accumulate so that we have RDB part for the fork
# Send SLAVEOF commands to slaves
[lindex $slaves 0] config set repl-diskless-load $sdl
[lindex $slaves 1] config set repl-diskless-load $sdl
[lindex $slaves 2] config set repl-diskless-load $sdl
[lindex $slaves 0] slaveof $master_host $master_port
[lindex $slaves 1] slaveof $master_host $master_port
[lindex $slaves 2] slaveof $master_host $master_port
......@@ -220,7 +228,7 @@ foreach dl {no yes} {
}
}
if {$retry == 0} {
error "assertion:Replicas not correctly synchronized"
error "assertion:Slaves not correctly synchronized"
}
# Wait that slaves acknowledge they are online so
......@@ -231,13 +239,13 @@ foreach dl {no yes} {
[lindex [[lindex $slaves 1] role] 3] eq {connected} &&
[lindex [[lindex $slaves 2] role] 3] eq {connected}
} else {
fail "Replicas still not connected after some time"
fail "Slaves still not connected after some time"
}
# Stop the write load
stop_write_load $load_handle0
stop_write_load $load_handle1
stop_write_load $load_handle2
stop_bg_complex_data $load_handle0
stop_bg_complex_data $load_handle1
stop_bg_complex_data $load_handle2
stop_write_load $load_handle3
stop_write_load $load_handle4
......@@ -248,7 +256,7 @@ foreach dl {no yes} {
[$master dbsize] == [[lindex $slaves 1] dbsize] &&
[$master dbsize] == [[lindex $slaves 2] dbsize]
} else {
fail "Different number of keys between masted and replica after too long time."
fail "Different number of keys between master and replica after too long time."
}
# Check digests
......@@ -265,15 +273,16 @@ foreach dl {no yes} {
}
}
}
}
}
start_server {tags {"repl"}} {
set master [srv 0 client]
set master_host [srv 0 host]
set master_port [srv 0 port]
set load_handle0 [start_write_load $master_host $master_port 3]
start_server {} {
test "Master stream is correctly processed while the replica has a script in -BUSY state" {
set load_handle0 [start_write_load $master_host $master_port 3]
set slave [srv 0 client]
$slave config set lua-time-limit 500
$slave slaveof $master_host $master_port
......@@ -309,3 +318,151 @@ start_server {tags {"repl"}} {
}
}
}
test {slave fails full sync and diskless load swapdb recoveres it} {
start_server {tags {"repl"}} {
set slave [srv 0 client]
set slave_host [srv 0 host]
set slave_port [srv 0 port]
set slave_log [srv 0 stdout]
start_server {} {
set master [srv 0 client]
set master_host [srv 0 host]
set master_port [srv 0 port]
# Put different data sets on the master and slave
# we need to put large keys on the master since the slave replies to info only once in 2mb
$slave debug populate 2000 slave 10
$master debug populate 200 master 100000
$master config set rdbcompression no
# Set master and slave to use diskless replication
$master config set repl-diskless-sync yes
$master config set repl-diskless-sync-delay 0
$slave config set repl-diskless-load swapdb
# Set master with a slow rdb generation, so that we can easily disconnect it mid sync
# 10ms per key, with 200 keys is 2 seconds
$master config set rdb-key-save-delay 10000
# Start the replication process...
$slave slaveof $master_host $master_port
# wait for the slave to start reading the rdb
wait_for_condition 50 100 {
[s -1 loading] eq 1
} else {
fail "Replica didn't get into loading mode"
}
# make sure that next sync will not start immediately so that we can catch the slave in betweeen syncs
$master config set repl-diskless-sync-delay 5
# for faster server shutdown, make rdb saving fast again (the fork is already uses the slow one)
$master config set rdb-key-save-delay 0
# waiting slave to do flushdb (key count drop)
wait_for_condition 50 100 {
2000 != [scan [regexp -inline {keys\=([\d]*)} [$slave info keyspace]] keys=%d]
} else {
fail "Replica didn't flush"
}
# make sure we're still loading
assert_equal [s -1 loading] 1
# kill the slave connection on the master
set killed [$master client kill type slave]
# wait for loading to stop (fail)
wait_for_condition 50 100 {
[s -1 loading] eq 0
} else {
fail "Replica didn't disconnect"
}
# make sure the original keys were restored
assert_equal [$slave dbsize] 2000
}
}
}
test {diskless loading short read} {
start_server {tags {"repl"}} {
set replica [srv 0 client]
set replica_host [srv 0 host]
set replica_port [srv 0 port]
start_server {} {
set master [srv 0 client]
set master_host [srv 0 host]
set master_port [srv 0 port]
# Set master and replica to use diskless replication
$master config set repl-diskless-sync yes
$master config set rdbcompression no
$replica config set repl-diskless-load swapdb
# Try to fill the master with all types of data types / encodings
for {set k 0} {$k < 3} {incr k} {
for {set i 0} {$i < 10} {incr i} {
r set "$k int_$i" [expr {int(rand()*10000)}]
r expire "$k int_$i" [expr {int(rand()*10000)}]
r set "$k string_$i" [string repeat A [expr {int(rand()*1000000)}]]
r hset "$k hash_small" [string repeat A [expr {int(rand()*10)}]] 0[string repeat A [expr {int(rand()*10)}]]
r hset "$k hash_large" [string repeat A [expr {int(rand()*10000)}]] [string repeat A [expr {int(rand()*1000000)}]]
r sadd "$k set_small" [string repeat A [expr {int(rand()*10)}]]
r sadd "$k set_large" [string repeat A [expr {int(rand()*1000000)}]]
r zadd "$k zset_small" [expr {rand()}] [string repeat A [expr {int(rand()*10)}]]
r zadd "$k zset_large" [expr {rand()}] [string repeat A [expr {int(rand()*1000000)}]]
r lpush "$k list_small" [string repeat A [expr {int(rand()*10)}]]
r lpush "$k list_large" [string repeat A [expr {int(rand()*1000000)}]]
for {set j 0} {$j < 10} {incr j} {
r xadd "$k stream" * foo "asdf" bar "1234"
}
r xgroup create "$k stream" "mygroup_$i" 0
r xreadgroup GROUP "mygroup_$i" Alice COUNT 1 STREAMS "$k stream" >
}
}
# Start the replication process...
$master config set repl-diskless-sync-delay 0
$replica replicaof $master_host $master_port
# kill the replication at various points
set attempts 3
if {$::accurate} { set attempts 10 }
for {set i 0} {$i < $attempts} {incr i} {
# wait for the replica to start reading the rdb
# using the log file since the replica only responds to INFO once in 2mb
wait_for_log_message -1 "*Loading DB in memory*" 5 2000 1
# add some additional random sleep so that we kill the master on a different place each time
after [expr {int(rand()*100)}]
# kill the replica connection on the master
set killed [$master client kill type replica]
if {[catch {
set res [wait_for_log_message -1 "*Internal error in RDB*" 5 100 10]
if {$::verbose} {
puts $res
}
}]} {
puts "failed triggering short read"
# force the replica to try another full sync
$master client kill type replica
$master set asdf asdf
# the side effect of resizing the backlog is that it is flushed (16k is the min size)
$master config set repl-backlog-size [expr {16384 + $i}]
}
# wait for loading to stop (fail)
wait_for_condition 100 10 {
[s -1 loading] eq 0
} else {
fail "Replica didn't disconnect"
}
}
# enable fast shutdown
$master config set rdb-key-save-delay 0
}
}
}
......@@ -13,12 +13,28 @@ endif
.SUFFIXES: .c .so .xo .o
all: commandfilter.so
all: commandfilter.so testrdb.so fork.so infotest.so propagate.so
.c.xo:
$(CC) -I../../src $(CFLAGS) $(SHOBJ_CFLAGS) -fPIC -c $< -o $@
commandfilter.xo: ../../src/redismodule.h
fork.xo: ../../src/redismodule.h
testrdb.xo: ../../src/redismodule.h
infotest.xo: ../../src/redismodule.h
propagate.xo: ../../src/redismodule.h
commandfilter.so: commandfilter.xo
$(LD) -o $@ $< $(SHOBJ_LDFLAGS) $(LIBS) -lc
fork.so: fork.xo
$(LD) -o $@ $< $(SHOBJ_LDFLAGS) $(LIBS) -lc
testrdb.so: testrdb.xo
$(LD) -o $@ $< $(SHOBJ_LDFLAGS) $(LIBS) -lc
infotest.so: infotest.xo
$(LD) -o $@ $< $(SHOBJ_LDFLAGS) $(LIBS) -lc
propagate.so: propagate.xo
$(LD) -o $@ $< $(SHOBJ_LDFLAGS) $(LIBS) -lc
#define REDISMODULE_EXPERIMENTAL_API
#include "redismodule.h"
#include <string.h>
#include <assert.h>
#include <unistd.h>
#define UNUSED(V) ((void) V)
int child_pid = -1;
int exitted_with_code = -1;
void done_handler(int exitcode, int bysignal, void *user_data) {
child_pid = -1;
exitted_with_code = exitcode;
assert(user_data==(void*)0xdeadbeef);
UNUSED(bysignal);
}
int fork_create(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
{
long long code_to_exit_with;
if (argc != 2) {
RedisModule_WrongArity(ctx);
return REDISMODULE_OK;
}
RedisModule_StringToLongLong(argv[1], &code_to_exit_with);
exitted_with_code = -1;
child_pid = RedisModule_Fork(done_handler, (void*)0xdeadbeef);
if (child_pid < 0) {
RedisModule_ReplyWithError(ctx, "Fork failed");
return REDISMODULE_OK;
} else if (child_pid > 0) {
/* parent */
RedisModule_ReplyWithLongLong(ctx, child_pid);
return REDISMODULE_OK;
}
/* child */
RedisModule_Log(ctx, "notice", "fork child started");
usleep(200000);
RedisModule_Log(ctx, "notice", "fork child exiting");
RedisModule_ExitFromChild(code_to_exit_with);
/* unreachable */
return 0;
}
int fork_exitcode(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
{
UNUSED(argv);
UNUSED(argc);
RedisModule_ReplyWithLongLong(ctx, exitted_with_code);
return REDISMODULE_OK;
}
int fork_kill(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
{
UNUSED(argv);
UNUSED(argc);
if (RedisModule_KillForkChild(child_pid) != REDISMODULE_OK)
RedisModule_ReplyWithError(ctx, "KillForkChild failed");
else
RedisModule_ReplyWithLongLong(ctx, 1);
child_pid = -1;
return REDISMODULE_OK;
}
int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
UNUSED(argv);
UNUSED(argc);
if (RedisModule_Init(ctx,"fork",1,REDISMODULE_APIVER_1)== REDISMODULE_ERR)
return REDISMODULE_ERR;
if (RedisModule_CreateCommand(ctx,"fork.create", fork_create,"",0,0,0) == REDISMODULE_ERR)
return REDISMODULE_ERR;
if (RedisModule_CreateCommand(ctx,"fork.exitcode", fork_exitcode,"",0,0,0) == REDISMODULE_ERR)
return REDISMODULE_ERR;
if (RedisModule_CreateCommand(ctx,"fork.kill", fork_kill,"",0,0,0) == REDISMODULE_ERR)
return REDISMODULE_ERR;
return REDISMODULE_OK;
}
#include "redismodule.h"
#include <string.h>
void InfoFunc(RedisModuleInfoCtx *ctx, int for_crash_report) {
RedisModule_InfoAddSection(ctx, "");
RedisModule_InfoAddFieldLongLong(ctx, "global", -2);
RedisModule_InfoAddSection(ctx, "Spanish");
RedisModule_InfoAddFieldCString(ctx, "uno", "one");
RedisModule_InfoAddFieldLongLong(ctx, "dos", 2);
RedisModule_InfoAddSection(ctx, "Italian");
RedisModule_InfoAddFieldLongLong(ctx, "due", 2);
RedisModule_InfoAddFieldDouble(ctx, "tre", 3.3);
RedisModule_InfoAddSection(ctx, "keyspace");
RedisModule_InfoBeginDictField(ctx, "db0");
RedisModule_InfoAddFieldLongLong(ctx, "keys", 3);
RedisModule_InfoAddFieldLongLong(ctx, "expires", 1);
RedisModule_InfoEndDictField(ctx);
if (for_crash_report) {
RedisModule_InfoAddSection(ctx, "Klingon");
RedisModule_InfoAddFieldCString(ctx, "one", "wa’");
RedisModule_InfoAddFieldCString(ctx, "two", "cha’");
RedisModule_InfoAddFieldCString(ctx, "three", "wej");
}
}
int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
REDISMODULE_NOT_USED(argv);
REDISMODULE_NOT_USED(argc);
if (RedisModule_Init(ctx,"infotest",1,REDISMODULE_APIVER_1)
== REDISMODULE_ERR) return REDISMODULE_ERR;
if (RedisModule_RegisterInfoFunc(ctx, InfoFunc) == REDISMODULE_ERR) return REDISMODULE_ERR;
return REDISMODULE_OK;
}
/* This module is used to test the propagation (replication + AOF) of
* commands, via the RedisModule_Replicate() interface, in asynchronous
* contexts, such as callbacks not implementing commands, and thread safe
* contexts.
*
* We create a timer callback and a threads using a thread safe context.
* Using both we try to propagate counters increments, and later we check
* if the replica contains the changes as expected.
*
* -----------------------------------------------------------------------------
*
* Copyright (c) 2019, Salvatore Sanfilippo <antirez at gmail dot com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Redis nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#define REDISMODULE_EXPERIMENTAL_API
#include "redismodule.h"
#include <pthread.h>
/* Timer callback. */
void timerHandler(RedisModuleCtx *ctx, void *data) {
REDISMODULE_NOT_USED(ctx);
REDISMODULE_NOT_USED(data);
static int times = 0;
RedisModule_Replicate(ctx,"INCR","c","timer");
times++;
if (times < 10)
RedisModule_CreateTimer(ctx,100,timerHandler,NULL);
else
times = 0;
}
/* The thread entry point. */
void *threadMain(void *arg) {
REDISMODULE_NOT_USED(arg);
RedisModuleCtx *ctx = RedisModule_GetThreadSafeContext(NULL);
RedisModule_SelectDb(ctx,9); /* Tests ran in database number 9. */
for (int i = 0; i < 10; i++) {
RedisModule_ThreadSafeContextLock(ctx);
RedisModule_Replicate(ctx,"INCR","c","thread");
RedisModule_ThreadSafeContextUnlock(ctx);
}
RedisModule_FreeThreadSafeContext(ctx);
return NULL;
}
int propagateTestCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
{
REDISMODULE_NOT_USED(argv);
REDISMODULE_NOT_USED(argc);
RedisModuleTimerID timer_id =
RedisModule_CreateTimer(ctx,100,timerHandler,NULL);
REDISMODULE_NOT_USED(timer_id);
pthread_t tid;
if (pthread_create(&tid,NULL,threadMain,NULL) != 0)
return RedisModule_ReplyWithError(ctx,"-ERR Can't start thread");
REDISMODULE_NOT_USED(tid);
RedisModule_ReplyWithSimpleString(ctx,"OK");
return REDISMODULE_OK;
}
int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
REDISMODULE_NOT_USED(argv);
REDISMODULE_NOT_USED(argc);
if (RedisModule_Init(ctx,"propagate-test",1,REDISMODULE_APIVER_1)
== REDISMODULE_ERR) return REDISMODULE_ERR;
if (RedisModule_CreateCommand(ctx,"propagate-test",
propagateTestCommand,
"",1,1,1) == REDISMODULE_ERR)
return REDISMODULE_ERR;
return REDISMODULE_OK;
}
#include "redismodule.h"
#include <string.h>
#include <assert.h>
/* Module configuration, save aux or not? */
long long conf_aux_count = 0;
/* Registered type */
RedisModuleType *testrdb_type = NULL;
/* Global values to store and persist to aux */
RedisModuleString *before_str = NULL;
RedisModuleString *after_str = NULL;
void *testrdb_type_load(RedisModuleIO *rdb, int encver) {
int count = RedisModule_LoadSigned(rdb);
if (RedisModule_IsIOError(rdb))
return NULL;
assert(count==1);
assert(encver==1);
RedisModuleString *str = RedisModule_LoadString(rdb);
return str;
}
void testrdb_type_save(RedisModuleIO *rdb, void *value) {
RedisModuleString *str = (RedisModuleString*)value;
RedisModule_SaveSigned(rdb, 1);
RedisModule_SaveString(rdb, str);
}
void testrdb_aux_save(RedisModuleIO *rdb, int when) {
if (conf_aux_count==1) assert(when == REDISMODULE_AUX_AFTER_RDB);
if (conf_aux_count==0) assert(0);
if (when == REDISMODULE_AUX_BEFORE_RDB) {
if (before_str) {
RedisModule_SaveSigned(rdb, 1);
RedisModule_SaveString(rdb, before_str);
} else {
RedisModule_SaveSigned(rdb, 0);
}
} else {
if (after_str) {
RedisModule_SaveSigned(rdb, 1);
RedisModule_SaveString(rdb, after_str);
} else {
RedisModule_SaveSigned(rdb, 0);
}
}
}
int testrdb_aux_load(RedisModuleIO *rdb, int encver, int when) {
assert(encver == 1);
if (conf_aux_count==1) assert(when == REDISMODULE_AUX_AFTER_RDB);
if (conf_aux_count==0) assert(0);
RedisModuleCtx *ctx = RedisModule_GetContextFromIO(rdb);
if (when == REDISMODULE_AUX_BEFORE_RDB) {
if (before_str)
RedisModule_FreeString(ctx, before_str);
before_str = NULL;
int count = RedisModule_LoadSigned(rdb);
if (RedisModule_IsIOError(rdb))
return REDISMODULE_ERR;
if (count)
before_str = RedisModule_LoadString(rdb);
} else {
if (after_str)
RedisModule_FreeString(ctx, after_str);
after_str = NULL;
int count = RedisModule_LoadSigned(rdb);
if (RedisModule_IsIOError(rdb))
return REDISMODULE_ERR;
if (count)
after_str = RedisModule_LoadString(rdb);
}
if (RedisModule_IsIOError(rdb))
return REDISMODULE_ERR;
return REDISMODULE_OK;
}
void testrdb_type_free(void *value) {
if (value)
RedisModule_FreeString(NULL, (RedisModuleString*)value);
}
int testrdb_set_before(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
{
if (argc != 2) {
RedisModule_WrongArity(ctx);
return REDISMODULE_OK;
}
if (before_str)
RedisModule_FreeString(ctx, before_str);
before_str = argv[1];
RedisModule_RetainString(ctx, argv[1]);
RedisModule_ReplyWithLongLong(ctx, 1);
return REDISMODULE_OK;
}
int testrdb_get_before(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
{
REDISMODULE_NOT_USED(argv);
if (argc != 1){
RedisModule_WrongArity(ctx);
return REDISMODULE_OK;
}
if (before_str)
RedisModule_ReplyWithString(ctx, before_str);
else
RedisModule_ReplyWithStringBuffer(ctx, "", 0);
return REDISMODULE_OK;
}
int testrdb_set_after(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
{
if (argc != 2){
RedisModule_WrongArity(ctx);
return REDISMODULE_OK;
}
if (after_str)
RedisModule_FreeString(ctx, after_str);
after_str = argv[1];
RedisModule_RetainString(ctx, argv[1]);
RedisModule_ReplyWithLongLong(ctx, 1);
return REDISMODULE_OK;
}
int testrdb_get_after(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
{
REDISMODULE_NOT_USED(argv);
if (argc != 1){
RedisModule_WrongArity(ctx);
return REDISMODULE_OK;
}
if (after_str)
RedisModule_ReplyWithString(ctx, after_str);
else
RedisModule_ReplyWithStringBuffer(ctx, "", 0);
return REDISMODULE_OK;
}
int testrdb_set_key(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
{
if (argc != 3){
RedisModule_WrongArity(ctx);
return REDISMODULE_OK;
}
RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_WRITE);
RedisModuleString *str = RedisModule_ModuleTypeGetValue(key);
if (str)
RedisModule_FreeString(ctx, str);
RedisModule_ModuleTypeSetValue(key, testrdb_type, argv[2]);
RedisModule_RetainString(ctx, argv[2]);
RedisModule_CloseKey(key);
RedisModule_ReplyWithLongLong(ctx, 1);
return REDISMODULE_OK;
}
int testrdb_get_key(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
{
if (argc != 2){
RedisModule_WrongArity(ctx);
return REDISMODULE_OK;
}
RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_WRITE);
RedisModuleString *str = RedisModule_ModuleTypeGetValue(key);
RedisModule_CloseKey(key);
RedisModule_ReplyWithString(ctx, str);
return REDISMODULE_OK;
}
int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
REDISMODULE_NOT_USED(argv);
REDISMODULE_NOT_USED(argc);
if (RedisModule_Init(ctx,"testrdb",1,REDISMODULE_APIVER_1) == REDISMODULE_ERR)
return REDISMODULE_ERR;
RedisModule_SetModuleOptions(ctx, REDISMODULE_OPTIONS_HANDLE_IO_ERRORS);
if (argc > 0)
RedisModule_StringToLongLong(argv[0], &conf_aux_count);
if (conf_aux_count==0) {
RedisModuleTypeMethods datatype_methods = {
.version = 1,
.rdb_load = testrdb_type_load,
.rdb_save = testrdb_type_save,
.aof_rewrite = NULL,
.digest = NULL,
.free = testrdb_type_free,
};
testrdb_type = RedisModule_CreateDataType(ctx, "test__rdb", 1, &datatype_methods);
if (testrdb_type == NULL)
return REDISMODULE_ERR;
} else {
RedisModuleTypeMethods datatype_methods = {
.version = REDISMODULE_TYPE_METHOD_VERSION,
.rdb_load = testrdb_type_load,
.rdb_save = testrdb_type_save,
.aof_rewrite = NULL,
.digest = NULL,
.free = testrdb_type_free,
.aux_load = testrdb_aux_load,
.aux_save = testrdb_aux_save,
.aux_save_triggers = (conf_aux_count == 1 ?
REDISMODULE_AUX_AFTER_RDB :
REDISMODULE_AUX_BEFORE_RDB | REDISMODULE_AUX_AFTER_RDB)
};
testrdb_type = RedisModule_CreateDataType(ctx, "test__rdb", 1, &datatype_methods);
if (testrdb_type == NULL)
return REDISMODULE_ERR;
}
if (RedisModule_CreateCommand(ctx,"testrdb.set.before", testrdb_set_before,"deny-oom",0,0,0) == REDISMODULE_ERR)
return REDISMODULE_ERR;
if (RedisModule_CreateCommand(ctx,"testrdb.get.before", testrdb_get_before,"",0,0,0) == REDISMODULE_ERR)
return REDISMODULE_ERR;
if (RedisModule_CreateCommand(ctx,"testrdb.set.after", testrdb_set_after,"deny-oom",0,0,0) == REDISMODULE_ERR)
return REDISMODULE_ERR;
if (RedisModule_CreateCommand(ctx,"testrdb.get.after", testrdb_get_after,"",0,0,0) == REDISMODULE_ERR)
return REDISMODULE_ERR;
if (RedisModule_CreateCommand(ctx,"testrdb.set.key", testrdb_set_key,"deny-oom",1,1,1) == REDISMODULE_ERR)
return REDISMODULE_ERR;
if (RedisModule_CreateCommand(ctx,"testrdb.get.key", testrdb_get_key,"",1,1,1) == REDISMODULE_ERR)
return REDISMODULE_ERR;
return REDISMODULE_OK;
}
......@@ -15,6 +15,12 @@ proc assert {condition} {
}
}
proc assert_no_match {pattern value} {
if {[string match $pattern $value]} {
error "assertion:Expected '$value' to not match '$pattern'"
}
}
proc assert_match {pattern value} {
if {![string match $pattern $value]} {
error "assertion:Expected '$value' to match '$pattern'"
......
......@@ -99,6 +99,25 @@ proc wait_for_ofs_sync {r1 r2} {
}
}
proc wait_for_log_message {srv_idx pattern last_lines maxtries delay} {
set retry $maxtries
set stdout [srv $srv_idx stdout]
while {$retry} {
set result [exec tail -$last_lines < $stdout]
set result [split $result "\n"]
foreach line $result {
if {[string match $pattern $line]} {
return $line
}
}
incr retry -1
after $delay
}
if {$retry == 0} {
fail "log message of '$pattern' not found"
}
}
# Random integer between 0 and max (excluded).
proc randomInt {max} {
expr {int(rand()*$max)}
......@@ -399,3 +418,15 @@ proc lshuffle {list} {
}
return $slist
}
# Execute a background process writing complex data for the specified number
# of ops to the specified Redis instance.
proc start_bg_complex_data {host port db ops} {
set tclsh [info nameofexecutable]
exec $tclsh tests/helpers/bg_complex_data.tcl $host $port $db $ops &
}
# Stop a process generating write load executed with start_bg_complex_data.
proc stop_bg_complex_data {handle} {
catch {exec /bin/kill -9 $handle}
}
......@@ -35,6 +35,32 @@ start_server {tags {"acl"}} {
set e
} {*WRONGPASS*}
test {Test password hashes can be added} {
r ACL setuser newuser #34344e4d60c2b6d639b7bd22e18f2b0b91bc34bf0ac5f9952744435093cfb4e6
catch {r AUTH newuser passwd4} e
assert {$e eq "OK"}
}
test {Test password hashes validate input} {
# Validate Length
catch {r ACL setuser newuser #34344e4d60c2b6d639b7bd22e18f2b0b91bc34bf0ac5f9952744435093cfb4e} e
# Validate character outside set
catch {r ACL setuser newuser #34344e4d60c2b6d639b7bd22e18f2b0b91bc34bf0ac5f9952744435093cfb4eq} e
set e
} {*Error in ACL SETUSER modifier*}
test {ACL GETUSER returns the password hash instead of the actual password} {
set passstr [dict get [r ACL getuser newuser] passwords]
assert_match {*34344e4d60c2b6d639b7bd22e18f2b0b91bc34bf0ac5f9952744435093cfb4e6*} $passstr
assert_no_match {*passwd4*} $passstr
}
test {Test hashed passwords removal} {
r ACL setuser newuser !34344e4d60c2b6d639b7bd22e18f2b0b91bc34bf0ac5f9952744435093cfb4e6
set passstr [dict get [r ACL getuser newuser] passwords]
assert_no_match {*34344e4d60c2b6d639b7bd22e18f2b0b91bc34bf0ac5f9952744435093cfb4e6*} $passstr
}
test {By default users are not able to access any command} {
catch {r SET foo bar} e
set e
......@@ -67,7 +93,7 @@ start_server {tags {"acl"}} {
set e
} {*NOPERM*}
test {ACLs can include or excluse whole classes of commands} {
test {ACLs can include or exclude whole classes of commands} {
r ACL setuser newuser -@all +@set +acl
r SADD myset a b c; # Should not raise an error
r ACL setuser newuser +@all -@string
......
......@@ -61,6 +61,7 @@ set regression_vectors {
{939895 151 59.149620271823181 65.204186651485145}
{1412 156 149.29737817929004 15.95807862745508}
{564862 149 84.062063109158544 -65.685403922426232}
{1546032440391 16751 -1.8175081637769495 20.665668878082954}
}
set rv_idx 0
......@@ -274,8 +275,19 @@ start_server {tags {"geo"}} {
foreach place $diff {
set mydist [geo_distance $lon $lat $search_lon $search_lat]
set mydist [expr $mydist/1000]
if {($mydist / $radius_km) > 0.999} {incr rounding_errors}
if {($mydist / $radius_km) > 0.999} {
incr rounding_errors
continue
}
if {$mydist < $radius_m} {
# This is a false positive for redis since given the
# same points the higher precision calculation provided
# by TCL shows the point within range
incr rounding_errors
continue
}
}
# Make sure this is a real error and not a rounidng issue.
if {[llength $diff] == $rounding_errors} {
set res $res2; # Error silenced
......
set testmodule [file normalize tests/modules/fork.so]
proc count_log_message {pattern} {
set result [exec grep -c $pattern < [srv 0 stdout]]
}
start_server {tags {"modules"}} {
r module load $testmodule
test {Module fork} {
# the argument to fork.create is the exitcode on termination
r fork.create 3
wait_for_condition 20 100 {
[r fork.exitcode] != -1
} else {
fail "fork didn't terminate"
}
r fork.exitcode
} {3}
test {Module fork kill} {
r fork.create 3
after 20
r fork.kill
after 100
assert {[count_log_message "fork child started"] eq "2"}
assert {[count_log_message "Received SIGUSR1 in child"] eq "1"}
assert {[count_log_message "fork child exiting"] eq "1"}
}
}
set testmodule [file normalize tests/modules/infotest.so]
# Return value for INFO property
proc field {info property} {
if {[regexp "\r\n$property:(.*?)\r\n" $info _ value]} {
set _ $value
}
}
start_server {tags {"modules"}} {
r module load $testmodule log-key 0
test {module info all} {
set info [r info all]
# info all does not contain modules
assert { ![string match "*Spanish*" $info] }
assert { ![string match "*infotest_*" $info] }
assert { [string match "*used_memory*" $info] }
}
test {module info everything} {
set info [r info everything]
# info everything contains all default sections, but not ones for crash report
assert { [string match "*infotest_global*" $info] }
assert { [string match "*Spanish*" $info] }
assert { [string match "*Italian*" $info] }
assert { [string match "*used_memory*" $info] }
assert { ![string match "*Klingon*" $info] }
field $info infotest_dos
} {2}
test {module info modules} {
set info [r info modules]
# info all does not contain modules
assert { [string match "*Spanish*" $info] }
assert { [string match "*infotest_global*" $info] }
assert { ![string match "*used_memory*" $info] }
}
test {module info one module} {
set info [r info INFOTEST]
# info all does not contain modules
assert { [string match "*Spanish*" $info] }
assert { ![string match "*used_memory*" $info] }
field $info infotest_global
} {-2}
test {module info one section} {
set info [r info INFOTEST_SPANISH]
assert { ![string match "*used_memory*" $info] }
assert { ![string match "*Italian*" $info] }
assert { ![string match "*infotest_global*" $info] }
field $info infotest_uno
} {one}
test {module info dict} {
set info [r info infotest_keyspace]
set keyspace [field $info infotest_db0]
set keys [scan [regexp -inline {keys\=([\d]*)} $keyspace] keys=%d]
} {3}
# TODO: test crash report.
}
set testmodule [file normalize tests/modules/propagate.so]
tags "modules" {
test {Modules can propagate in async and threaded contexts} {
start_server {} {
set replica [srv 0 client]
set replica_host [srv 0 host]
set replica_port [srv 0 port]
start_server [list overrides [list loadmodule "$testmodule"]] {
set master [srv 0 client]
set master_host [srv 0 host]
set master_port [srv 0 port]
# Start the replication process...
$replica replicaof $master_host $master_port
wait_for_sync $replica
after 1000
$master propagate-test
wait_for_condition 5000 10 {
([$replica get timer] eq "10") && \
([$replica get thread] eq "10")
} else {
fail "The two counters don't match the expected value."
}
}
}
}
}
set testmodule [file normalize tests/modules/testrdb.so]
proc restart_and_wait {} {
catch {
r debug restart
}
# wait for the server to come back up
set retry 50
while {$retry} {
if {[catch { r ping }]} {
after 100
} else {
break
}
incr retry -1
}
}
tags "modules" {
start_server [list overrides [list loadmodule "$testmodule"]] {
test {modules are able to persist types} {
r testrdb.set.key key1 value1
assert_equal "value1" [r testrdb.get.key key1]
r debug reload
assert_equal "value1" [r testrdb.get.key key1]
}
test {modules global are lost without aux} {
r testrdb.set.before global1
assert_equal "global1" [r testrdb.get.before]
restart_and_wait
assert_equal "" [r testrdb.get.before]
}
}
start_server [list overrides [list loadmodule "$testmodule 2"]] {
test {modules are able to persist globals before and after} {
r testrdb.set.before global1
r testrdb.set.after global2
assert_equal "global1" [r testrdb.get.before]
assert_equal "global2" [r testrdb.get.after]
restart_and_wait
assert_equal "global1" [r testrdb.get.before]
assert_equal "global2" [r testrdb.get.after]
}
}
start_server [list overrides [list loadmodule "$testmodule 1"]] {
test {modules are able to persist globals just after} {
r testrdb.set.after global2
assert_equal "global2" [r testrdb.get.after]
restart_and_wait
assert_equal "global2" [r testrdb.get.after]
}
}
tags {repl} {
test {diskless loading short read with module} {
start_server [list overrides [list loadmodule "$testmodule"]] {
set replica [srv 0 client]
set replica_host [srv 0 host]
set replica_port [srv 0 port]
start_server [list overrides [list loadmodule "$testmodule"]] {
set master [srv 0 client]
set master_host [srv 0 host]
set master_port [srv 0 port]
# Set master and replica to use diskless replication
$master config set repl-diskless-sync yes
$master config set rdbcompression no
$replica config set repl-diskless-load swapdb
for {set k 0} {$k < 30} {incr k} {
r testrdb.set.key key$k [string repeat A [expr {int(rand()*1000000)}]]
}
# Start the replication process...
$master config set repl-diskless-sync-delay 0
$replica replicaof $master_host $master_port
# kill the replication at various points
set attempts 3
if {$::accurate} { set attempts 10 }
for {set i 0} {$i < $attempts} {incr i} {
# wait for the replica to start reading the rdb
# using the log file since the replica only responds to INFO once in 2mb
wait_for_log_message -1 "*Loading DB in memory*" 5 2000 1
# add some additional random sleep so that we kill the master on a different place each time
after [expr {int(rand()*100)}]
# kill the replica connection on the master
set killed [$master client kill type replica]
if {[catch {
set res [wait_for_log_message -1 "*Internal error in RDB*" 5 100 10]
if {$::verbose} {
puts $res
}
}]} {
puts "failed triggering short read"
# force the replica to try another full sync
$master client kill type replica
$master set asdf asdf
# the side effect of resizing the backlog is that it is flushed (16k is the min size)
$master config set repl-backlog-size [expr {16384 + $i}]
}
# wait for loading to stop (fail)
wait_for_condition 100 10 {
[s -1 loading] eq 0
} else {
fail "Replica didn't disconnect"
}
}
# enable fast shutdown
$master config set rdb-key-save-delay 0
}
}
}
}
}
......@@ -306,4 +306,18 @@ start_server {tags {"multi"}} {
}
close_replication_stream $repl
}
test {DISCARD should not fail during OOM} {
set rd [redis_deferring_client]
$rd config set maxmemory 1
assert {[$rd read] eq {OK}}
r multi
catch {r set x 1} e
assert_match {OOM*} $e
r discard
$rd config set maxmemory 0
assert {[$rd read] eq {OK}}
$rd close
r ping
} {PONG}
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment