Commit c31055db authored by Oran Agra's avatar Oran Agra
Browse files

Sanitize dump payload: fuzz tester and fixes for segfaults and leaks it exposed

The test creates keys with various encodings, DUMP them, corrupt the payload
and RESTORES it.
It utilizes the recently added use-exit-on-panic config to distinguish between
 asserts and segfaults.
If the restore succeeds, it runs random commands on the key to attempt to
trigger a crash.

It runs in two modes, one with deep sanitation enabled and one without.
In the first one we don't expect any assertions or segfaults, in the second one
we expect assertions, but no segfaults.
We also check for leaks and invalid reads using valgrind, and if we find them
we print the commands that lead to that issue.

Changes in the code (other than the test):
- Replace a few NPD (null pointer deference) flows and division by zero with an
  assertion, so that it doesn't fail the test. (since we set the server to use
  `exit` rather than `abort` on assertion).
- Fix quite a lot of flows in rdb.c that could have lead to memory leaks in
  RESTORE command (since it now responds with an error rather than panic)
- Add a DEBUG flag for SET-SKIP-CHECKSUM-VALIDATION so that the test don't need
  to bother with faking a valid checksum
- Remove a pile of code in serverLogObjectDebugInfo which is actually unsafe to
  run in the crash report (see comments in the code)
- fix a missing boundary check in lzf_decompress

test suite infra improvements:
- be able to run valgrind checks before the process terminates
- rotate log files when restarting servers
parent 01c13bdd
......@@ -4981,6 +4981,9 @@ int verifyDumpPayload(unsigned char *p, size_t len) {
rdbver = (footer[1] << 8) | footer[0];
if (rdbver > RDB_VERSION) return C_ERR;
if (server.skip_checksum_validation)
return C_OK;
/* Verify CRC64 */
crc = crc64(0,p,len-8);
memrev64ifbe(&crc);
......
......@@ -960,6 +960,7 @@ void scanGenericCommand(client *c, robj *o, unsigned long cursor) {
* value, or skip it if it was not filtered: we only match keys. */
if (o && (o->type == OBJ_ZSET || o->type == OBJ_HASH)) {
node = nextnode;
serverAssert(node); /* assertion for valgrind (avoid NPD) */
nextnode = listNextNode(node);
if (filter) {
kobj = listNodeValue(node);
......
......@@ -403,6 +403,7 @@ void debugCommand(client *c) {
"SDSLEN <key> -- Show low level SDS string info representing key and value.",
"SEGFAULT -- Crash the server with sigsegv.",
"SET-ACTIVE-EXPIRE <0|1> -- Setting it to 0 disables expiring keys in background when they are not accessed (otherwise the Redis behavior). Setting it to 1 reenables back the default.",
"SET-SKIP-CHECKSUM-VALIDATION <0|1> -- Enables or disables checksum checks for rdb or RESTORE payload.",
"AOF-FLUSH-SLEEP <microsec> -- Server will sleep before flushing the AOF, this is used for testing",
"SLEEP <seconds> -- Stop the server for <seconds>. Decimals allowed.",
"STRUCTSIZE -- Return the size of different Redis core C structures.",
......@@ -722,6 +723,11 @@ NULL
{
server.active_expire_enabled = atoi(c->argv[2]->ptr);
addReply(c,shared.ok);
} else if (!strcasecmp(c->argv[1]->ptr,"set-skip-checksum-validation") &&
c->argc == 3)
{
server.skip_checksum_validation = atoi(c->argv[2]->ptr);
addReply(c,shared.ok);
} else if (!strcasecmp(c->argv[1]->ptr,"aof-flush-sleep") &&
c->argc == 3)
{
......@@ -880,6 +886,14 @@ void serverLogObjectDebugInfo(const robj *o) {
serverLog(LL_WARNING,"Object type: %d", o->type);
serverLog(LL_WARNING,"Object encoding: %d", o->encoding);
serverLog(LL_WARNING,"Object refcount: %d", o->refcount);
#if UNSAFE_CRASH_REPORT
/* This code is now disabled. o->ptr may be unreliable to print. in some
* cases a ziplist could have already been freed by realloc, but not yet
* updated to o->ptr. in other cases the call to ziplistLen may need to
* iterate on all the items in the list (and possibly crash again).
* For some cases it may be ok to crash here again, but these could cause
* invalid memory access which will bother valgrind and also possibly cause
* random memory portion to be "leaked" into the logfile. */
if (o->type == OBJ_STRING && sdsEncodedObject(o)) {
serverLog(LL_WARNING,"Object raw string len: %zu", sdslen(o->ptr));
if (sdslen(o->ptr) < 4096) {
......@@ -900,6 +914,7 @@ void serverLogObjectDebugInfo(const robj *o) {
} else if (o->type == OBJ_STREAM) {
serverLog(LL_WARNING,"Stream size: %d", (int) streamLength(o));
}
#endif
}
void _serverAssertPrintObject(const robj *o) {
......
......@@ -34,6 +34,7 @@
#include "intset.h"
#include "zmalloc.h"
#include "endianconv.h"
#include "redisassert.h"
/* Note that these encodings are ordered, so:
* INTSET_ENC_INT16 < INTSET_ENC_INT32 < INTSET_ENC_INT64. */
......@@ -258,7 +259,9 @@ uint8_t intsetFind(intset *is, int64_t value) {
/* Return random member */
int64_t intsetRandom(intset *is) {
return _intsetGet(is,rand()%intrev32ifbe(is->length));
uint32_t len = intrev32ifbe(is->length);
assert(len); /* avoid division by zero on corrupt intset payload. */
return _intsetGet(is,rand()%len);
}
/* Get the value at the given position. When this position is
......
......@@ -536,6 +536,7 @@ unsigned char *lpGet(unsigned char *p, int64_t *count, unsigned char *intbuf) {
int64_t val;
uint64_t uval, negstart, negmax;
assert(p); /* assertion for valgrind (avoid NPD) */
if (LP_ENCODING_IS_7BIT_UINT(p[0])) {
negstart = UINT64_MAX; /* 7 bit ints are always positive. */
negmax = 0;
......
......@@ -65,9 +65,10 @@ lzf_decompress (const void *const in_data, unsigned int in_len,
u8 const *const in_end = ip + in_len;
u8 *const out_end = op + out_len;
do
while (ip < in_end)
{
unsigned int ctrl = *ip++;
unsigned int ctrl;
ctrl = *ip++;
if (ctrl < (1 << 5)) /* literal run */
{
......@@ -182,7 +183,6 @@ lzf_decompress (const void *const in_data, unsigned int in_len,
#endif
}
}
while (ip < in_end);
return op - (u8 *)out_data;
}
......
......@@ -674,6 +674,7 @@ void addReplyLongLong(client *c, long long ll) {
}
void addReplyAggregateLen(client *c, long length, int prefix) {
serverAssert(length >= 0);
if (prefix == '*' && length < OBJ_SHARED_BULKHDR_LEN)
addReply(c,shared.mbulkhdr[length]);
else
......
......@@ -34,6 +34,7 @@
#include "ziplist.h"
#include "util.h" /* for ll2string */
#include "lzf.h"
#include "redisassert.h"
#if defined(REDIS_TEST) || defined(REDIS_TEST_VERBOSE)
#include <stdio.h> /* for printf (debug printing), snprintf (genstr) */
......@@ -1289,7 +1290,8 @@ int quicklistIndex(const quicklist *quicklist, const long long idx,
quicklistDecompressNodeForUse(entry->node);
entry->zi = ziplistIndex(entry->node->zl, entry->offset);
ziplistGet(entry->zi, &entry->value, &entry->sz, &entry->longval);
if (!ziplistGet(entry->zi, &entry->value, &entry->sz, &entry->longval))
assert(0); /* This can happen on corrupt ziplist with fake entry count. */
/* The caller will use our result, so we don't re-compress here.
* The caller can recompress or delete the node as needed. */
return 1;
......
......@@ -399,8 +399,9 @@ void *rdbLoadLzfStringObject(rio *rdb, int flags, size_t *lenptr) {
/* Load the compressed representation and uncompress it to target. */
if (rioRead(rdb,c,clen) == 0) goto err;
if (lzf_decompress(c,clen,val,len) == 0) {
if (lzf_decompress(c,clen,val,len) != len) {
rdbExitReportCorruptRDB("Invalid LZF compressed string");
goto err;
}
zfree(c);
......@@ -504,6 +505,8 @@ void *rdbGenericLoadStringObject(rio *rdb, int flags, size_t *lenptr) {
unsigned long long len;
len = rdbLoadLen(rdb,&isencoded);
if (len == RDB_LENERR) return NULL;
if (isencoded) {
switch(len) {
case RDB_ENC_INT8:
......@@ -518,7 +521,6 @@ void *rdbGenericLoadStringObject(rio *rdb, int flags, size_t *lenptr) {
}
}
if (len == RDB_LENERR) return NULL;
if (plain || sds) {
void *buf = plain ? zmalloc(len) : sdsnewlen(SDS_NOINIT,len);
if (lenptr) *lenptr = len;
......@@ -604,7 +606,7 @@ int rdbLoadDoubleValue(rio *rdb, double *val) {
default:
if (rioRead(rdb,buf,len) == 0) return -1;
buf[len] = '\0';
sscanf(buf, "%lg", val);
if (sscanf(buf, "%lg", val)!=1) return -1;
return 0;
}
}
......@@ -1572,7 +1574,12 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key) {
/* This will also be called when the set was just converted
* to a regular hash table encoded set. */
if (o->encoding == OBJ_ENCODING_HT) {
dictAdd((dict*)o->ptr,sdsele,NULL);
if (dictAdd((dict*)o->ptr,sdsele,NULL) != DICT_OK) {
rdbExitReportCorruptRDB("Duplicate set members detected");
decrRefCount(o);
sdsfree(sdsele);
return NULL;
}
} else {
sdsfree(sdsele);
}
......@@ -1693,7 +1700,11 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key) {
/* Add pair to hash table */
ret = dictAdd((dict*)o->ptr, field, value);
if (ret == DICT_ERR) {
rdbExitReportCorruptRDB("Duplicate keys detected");
rdbExitReportCorruptRDB("Duplicate hash fields detected");
sdsfree(value);
sdsfree(field);
decrRefCount(o);
return NULL;
}
}
......@@ -1843,6 +1854,9 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key) {
if (sdslen(nodekey) != sizeof(streamID)) {
rdbExitReportCorruptRDB("Stream node key entry is not the "
"size of a stream ID");
sdsfree(nodekey);
decrRefCount(o);
return NULL;
}
/* Load the listpack. */
......@@ -1870,14 +1884,22 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key) {
* deletion we should remove the radix tree key if the
* resulting listpack is empty. */
rdbExitReportCorruptRDB("Empty listpack inside stream");
sdsfree(nodekey);
decrRefCount(o);
zfree(lp);
return NULL;
}
/* Insert the key in the radix tree. */
int retval = raxInsert(s->rax,
(unsigned char*)nodekey,sizeof(streamID),lp,NULL);
sdsfree(nodekey);
if (!retval)
if (!retval) {
rdbExitReportCorruptRDB("Listpack re-added with existing key");
decrRefCount(o);
zfree(lp);
return NULL;
}
}
/* Load total number of items inside the stream. */
s->length = rdbLoadLen(rdb,NULL);
......@@ -1922,9 +1944,13 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key) {
}
streamCG *cgroup = streamCreateCG(s,cgname,sdslen(cgname),&cg_id);
if (cgroup == NULL)
if (cgroup == NULL) {
rdbExitReportCorruptRDB("Duplicated consumer group name %s",
cgname);
decrRefCount(o);
sdsfree(cgname);
return NULL;
}
sdsfree(cgname);
/* Load the global PEL for this consumer group, however we'll
......@@ -1954,9 +1980,13 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key) {
streamFreeNACK(nack);
return NULL;
}
if (!raxInsert(cgroup->pel,rawid,sizeof(rawid),nack,NULL))
if (!raxInsert(cgroup->pel,rawid,sizeof(rawid),nack,NULL)) {
rdbExitReportCorruptRDB("Duplicated global PEL entry "
"loading stream consumer group");
decrRefCount(o);
streamFreeNACK(nack);
return NULL;
}
}
/* Now that we loaded our global PEL, we need to load the
......@@ -2003,18 +2033,24 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key) {
return NULL;
}
streamNACK *nack = raxFind(cgroup->pel,rawid,sizeof(rawid));
if (nack == raxNotFound)
if (nack == raxNotFound) {
rdbExitReportCorruptRDB("Consumer entry not found in "
"group global PEL");
decrRefCount(o);
return NULL;
}
/* Set the NACK consumer, that was left to NULL when
* loading the global PEL. Then set the same shared
* NACK structure also in the consumer-specific PEL. */
nack->consumer = consumer;
if (!raxInsert(consumer->pel,rawid,sizeof(rawid),nack,NULL))
if (!raxInsert(consumer->pel,rawid,sizeof(rawid),nack,NULL)) {
rdbExitReportCorruptRDB("Duplicated consumer PEL entry "
" loading a stream consumer "
"group");
decrRefCount(o);
return NULL;
}
}
}
}
......@@ -2034,8 +2070,8 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key) {
if (mt == NULL) {
moduleTypeNameByID(name,moduleid);
serverLog(LL_WARNING,"The RDB file contains module data I can't load: no matching module '%s'", name);
exit(1);
rdbExitReportCorruptRDB("The RDB file contains module data I can't load: no matching module '%s'", name);
return NULL;
}
RedisModuleIO io;
robj keyobj;
......@@ -2054,20 +2090,26 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key) {
if (io.ver == 2) {
uint64_t eof = rdbLoadLen(rdb,NULL);
if (eof == RDB_LENERR) {
o = createModuleObject(mt,ptr); /* creating just in order to easily destroy */
decrRefCount(o);
if (ptr) {
o = createModuleObject(mt,ptr); /* creating just in order to easily destroy */
decrRefCount(o);
}
return NULL;
}
if (eof != RDB_MODULE_OPCODE_EOF) {
serverLog(LL_WARNING,"The RDB file contains module data for the module '%s' that is not terminated by the proper module value EOF marker", name);
exit(1);
rdbExitReportCorruptRDB("The RDB file contains module data for the module '%s' that is not terminated by the proper module value EOF marker", name);
if (ptr) {
o = createModuleObject(mt,ptr); /* creating just in order to easily destroy */
decrRefCount(o);
}
return NULL;
}
}
if (ptr == NULL) {
moduleTypeNameByID(name,moduleid);
serverLog(LL_WARNING,"The RDB file contains module data for the module type '%s', that the responsible module is not able to load. Check for modules log above for additional clues.", name);
exit(1);
rdbExitReportCorruptRDB("The RDB file contains module data for the module type '%s', that the responsible module is not able to load. Check for modules log above for additional clues.", name);
return NULL;
}
o = createModuleObject(mt,ptr);
} else {
......@@ -2441,7 +2483,7 @@ int rdbLoadRio(rio *rdb, int rdbflags, rdbSaveInfo *rsi) {
uint64_t cksum, expected = rdb->cksum;
if (rioRead(rdb,&cksum,8) == 0) goto eoferr;
if (server.rdb_checksum) {
if (server.rdb_checksum && !server.skip_checksum_validation) {
memrev64ifbe(&cksum);
if (cksum == 0) {
serverLog(LL_WARNING,"RDB file was saved with checksum disabled: no check performed.");
......
......@@ -2506,6 +2506,7 @@ void initServerConfig(void) {
server.tlsfd_count = 0;
server.sofd = -1;
server.active_expire_enabled = 1;
server.skip_checksum_validation = 0;
server.client_max_querybuf_len = PROTO_MAX_QUERYBUF_LEN;
server.saveparams = NULL;
server.loading = 0;
......
......@@ -1245,6 +1245,7 @@ struct redisServer {
int active_expire_effort; /* From 1 (default) to 10, active effort. */
int active_defrag_enabled;
int sanitize_dump_payload; /* Enables deep sanitization for ziplist and listpack in RDB and RESTORE. */
int skip_checksum_validation; /* Disables checksum validateion for RDB and RESTORE payload. */
int jemalloc_bg_thread; /* Enable jemalloc background thread */
size_t active_defrag_ignore_bytes; /* minimum amount of fragmentation waste to start active defrag */
int active_defrag_threshold_lower; /* minimum percentage of fragmentation to start active defrag */
......
......@@ -728,6 +728,7 @@ void lmoveGenericCommand(client *c, int wherefrom, int whereto) {
if (checkType(c,dobj,OBJ_LIST)) return;
value = listTypePop(sobj,wherefrom);
serverAssert(value); /* assertion for valgrind (avoid NPD) */
/* We saved touched key, and protect it, since lmoveHandlePush
* may change the client command argument vector (it does not
* currently). */
......
......@@ -787,6 +787,7 @@ int streamIteratorGetID(streamIterator *si, streamID *id, int64_t *numfields) {
*numfields = lpGetInteger(si->lp_ele);
si->lp_ele = lpNext(si->lp,si->lp_ele);
}
serverAssert(*numfields>=0);
/* If current >= start, and the entry is not marked as
* deleted, emit it. */
......@@ -2879,6 +2880,7 @@ void xinfoReplyWithStreamInfo(client *c, stream *s) {
addReplyStreamID(c,&id);
/* Consumer name. */
serverAssert(nack->consumer); /* assertion for valgrind (avoid NPD) */
addReplyBulkCBuffer(c,nack->consumer->name,
sdslen(nack->consumer->name));
......
......@@ -15,3 +15,12 @@
Memcheck:Value8
fun:lzf_compress
}
{
<negative size allocatoin, see integration/corrupt-dump>
Memcheck:FishyValue
malloc(size)
fun:malloc
fun:ztrymalloc_usable
fun:ztrymalloc
}
......@@ -155,7 +155,7 @@ proc log_crashes {} {
set logs [glob */err.txt]
foreach log $logs {
set res [find_valgrind_errors $log]
set res [find_valgrind_errors $log true]
if {$res != ""} {
puts $res
incr ::failed
......
# tests of corrupt ziplist payload with valid CRC
tags {"dump" "corruption"} {
proc generate_collections {suffix elements} {
set rd [redis_deferring_client]
for {set j 0} {$j < $elements} {incr j} {
# add both string values and integers
if {$j % 2 == 0} {set val $j} else {set val "_$j"}
$rd hset hash$suffix $j $val
$rd lpush list$suffix $val
$rd zadd zset$suffix $j $val
$rd sadd set$suffix $val
$rd xadd stream$suffix * item 1 value $val
}
for {set j 0} {$j < $elements * 5} {incr j} {
$rd read ; # Discard replies
}
$rd close
}
# generate keys with various types and encodings
proc generate_types {} {
r config set list-max-ziplist-size 5
r config set hash-max-ziplist-entries 5
r config set zset-max-ziplist-entries 5
r config set stream-node-max-entries 5
# create small (ziplist / listpack encoded) objects with 3 items
generate_collections "" 3
# add some metadata to the stream
r xgroup create stream mygroup 0
set records [r xreadgroup GROUP mygroup Alice COUNT 2 STREAMS stream >]
r xack stream mygroup [lindex [lindex [lindex [lindex $records 0] 1] 0] 0]
# create other non-collection types
r incr int
r set string str
# create bigger objects with 10 items (more than a single ziplist / listpack)
generate_collections big 10
# make sure our big stream also has a listpack record that has different
# field names than the master recored
r xadd streambig * item 1 value 1
r xadd streambig * item 1 unique value
}
proc corrupt_payload {payload} {
set len [string length $payload]
set count 1 ;# usually corrupt only one byte
if {rand() > 0.9} { set count 2 }
while { $count > 0 } {
set idx [expr {int(rand() * $len)}]
set ch [binary format c [expr {int(rand()*255)}]]
set payload [string replace $payload $idx $idx $ch]
incr count -1
}
return $payload
}
# fuzzy tester for corrupt RESTORE payloads
# valgrind will make sure there were no leaks in the rdb loader error handling code
foreach sanitize_dump {no yes} {
if {$::accurate} {
set min_duration [expr {60 * 10}] ;# run at least 10 minutes
set min_cycles 1000 ;# run at least 1k cycles (max 16 minutes)
} else {
set min_duration 10 ; # run at least 10 seconds
set min_cycles 10 ; # run at least 10 cycles
}
test "Fuzzer corrupt restore payloads - sanitize_dump: $sanitize_dump" {
if {$min_duration * 2 > $::timeout} {
fail "insufficient timeout"
}
# start a server, fill with data and save an RDB file once (avoid re-save)
start_server [list overrides [list "save" "" use-exit-on-panic yes crash-memcheck-enabled no loglevel verbose] ] {
set stdout [srv 0 stdout]
r config set sanitize-dump-payload $sanitize_dump
r debug set-skip-checksum-validation 1
set start_time [clock seconds]
generate_types
r save
set cycle 0
set stat_terminated_in_restore 0
set stat_terminated_in_traffic 0
set stat_terminated_by_signal 0
set stat_successful_restore 0
set stat_rejected_restore 0
set stat_traffic_commands_sent 0
# repeatedly DUMP a random key, corrupt it and try RESTORE into a new key
while true {
set k [r randomkey]
set dump [r dump $k]
set dump [corrupt_payload $dump]
set printable_dump [string2printable $dump]
set restore_failed false
set report_and_restart false
set sent {}
# RESTORE can fail, but hopefully not terminate
if { [catch { r restore "_$k" 0 $dump REPLACE } err] } {
set restore_failed true
# skip if return failed with an error response.
if {[string match "ERR*" $err]} {
incr stat_rejected_restore
} else {
set report_and_restart true
incr stat_terminated_in_restore
write_log_line 0 "corrupt payload: $printable_dump"
if {$sanitize_dump == 1} {
puts "Server crashed in RESTORE with payload: $printable_dump"
}
}
} else {
r ping ;# an attempt to check if the server didn't terminate (this will throw an error that will terminate the tests)
}
set print_commands false
if {!$restore_failed} {
# if RESTORE didn't fail or terminate, run some random traffic on the new key
incr stat_successful_restore
if { [ catch {
set sent [generate_fuzzy_traffic_on_key "_$k" 1] ;# traffic for 1 second
incr stat_traffic_commands_sent [llength $sent]
r del "_$k" ;# in case the server terminated, here's where we'll detect it.
} err ] } {
# if the server terminated update stats and restart it
set report_and_restart true
incr stat_terminated_in_traffic
set by_signal [count_log_message 0 "crashed by signal"]
incr stat_terminated_by_signal $by_signal
if {$by_signal != 0 || $sanitize_dump == 1 } {
puts "Server crashed (by signal: $by_signal), with payload: $printable_dump"
set print_commands true
}
}
}
# check valgrind report for invalid reads after each RESTORE
# payload so that we have a report that is easier to reproduce
set valgrind_errors [find_valgrind_errors [srv 0 stderr] false]
if {$valgrind_errors != ""} {
puts "valgrind found an issue for payload: $printable_dump"
set report_and_restart true
set print_commands true
}
if {$report_and_restart} {
if {$print_commands} {
puts "violating commands:"
foreach cmd $sent {
foreach arg $cmd {
puts -nonewline "[string2printable $arg] "
}
puts ""
}
}
# restart the server and re-apply debug configuration
write_log_line 0 "corrupt payload: $printable_dump"
restart_server 0 true true
r config set sanitize-dump-payload $sanitize_dump
r debug set-skip-checksum-validation 1
}
incr cycle
if { ([clock seconds]-$start_time) >= $min_duration && $cycle >= $min_cycles} {
break
}
}
if {$::verbose} {
puts "Done $cycle cycles in [expr {[clock seconds]-$start_time}] seconds."
puts "RESTORE: successful: $stat_successful_restore, rejected: $stat_rejected_restore"
puts "Total commands sent in traffic: $stat_traffic_commands_sent, crashes during traffic: $stat_terminated_in_traffic ($stat_terminated_by_signal by signal)."
}
}
# if we run sanitization we never expect the server to crash at runtime
if { $sanitize_dump == 1} {
assert_equal $stat_terminated_in_restore 0
assert_equal $stat_terminated_in_traffic 0
}
# make sure all terminations where due to assertion and not a SIGSEGV
assert_equal $stat_terminated_by_signal 0
}
}
} ;# tags
This diff is collapsed.
......@@ -280,7 +280,7 @@ start_server {} {
set sync_partial_err [status $R($master_id) sync_partial_err]
catch {
$R($slave_id) config rewrite
restart_server [expr {0-$slave_id}] true
restart_server [expr {0-$slave_id}] true false
set R($slave_id) [srv [expr {0-$slave_id}] client]
}
# note: just waiting for connected_slaves==4 has a race condition since
......@@ -329,7 +329,7 @@ start_server {} {
catch {
$R($slave_id) config rewrite
restart_server [expr {0-$slave_id}] true
restart_server [expr {0-$slave_id}] true false
set R($slave_id) [srv [expr {0-$slave_id}] client]
}
......
......@@ -164,7 +164,7 @@ test {client freed during loading} {
# 100mb of rdb, 100k keys will load in more than 1 second
r debug populate 100000 key 1000
restart_server 0 false
restart_server 0 false false
# make sure it's still loading
assert_equal [s loading] 1
......
......@@ -13,7 +13,7 @@ proc start_server_error {config_file error} {
}
proc check_valgrind_errors stderr {
set res [find_valgrind_errors $stderr]
set res [find_valgrind_errors $stderr true]
if {$res != ""} {
send_data_packet $::test_server_fd err "Valgrind error: $res\n"
}
......@@ -437,7 +437,7 @@ proc start_server {options {code undefined}} {
while 1 {
# check that the server actually started and is ready for connections
if {[exec grep -i "Ready to accept" | wc -l < $stdout] > 0} {
if {[count_message_lines $stdout "Ready to accept"] > 0} {
break
}
after 10
......@@ -511,13 +511,19 @@ proc start_server {options {code undefined}} {
}
}
proc restart_server {level wait_ready} {
proc restart_server {level wait_ready rotate_logs} {
set srv [lindex $::servers end+$level]
kill_server $srv
set pid [dict get $srv "pid"]
set stdout [dict get $srv "stdout"]
set stderr [dict get $srv "stderr"]
set config_file [dict get $srv "config_file"]
if {$rotate_logs} {
set ts [clock format [clock seconds] -format %y%m%d%H%M%S]
file rename $stdout $stdout.$ts.$pid
file rename $stderr $stderr.$ts.$pid
}
set prev_ready_count [count_message_lines $stdout "Ready to accept"]
# if we're inside a test, write the test name to the server log file
if {[info exists ::cur_test]} {
......@@ -526,7 +532,7 @@ proc restart_server {level wait_ready} {
close $fd
}
set prev_ready_count [exec grep -i "Ready to accept" | wc -l < $stdout]
set config_file [dict get $srv "config_file"]
set pid [spawn_server $config_file $stdout $stderr]
......@@ -541,7 +547,7 @@ proc restart_server {level wait_ready} {
if {$wait_ready} {
while 1 {
# check that the server actually started and is ready for connections
if {[exec grep -i "Ready to accept" | wc -l < $stdout] > $prev_ready_count + 1} {
if {[count_message_lines $stdout "Ready to accept"] > $prev_ready_count} {
break
}
after 10
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment